Skip to content

Commit 54c44b9

Browse files
committed
optuna: multifidelity/rung study skeleton
Signed-off-by: Øyvind Harboe <oyvind.harboe@zylin.com>
1 parent dd1d375 commit 54c44b9

File tree

2 files changed

+98
-40
lines changed

2 files changed

+98
-40
lines changed

optuna/BUILD

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@ exports_files([
3838
# DSE-specific flow configuration with tighter constraints
3939
[orfs_flow(
4040
name = "mock-cpu",
41-
abstract_stage = "cts",
41+
abstract_stage = STAGES[-1],
4242
arguments = {
4343
"CORE_ASPECT_RATIO": "0.5",
4444
"PDN_TCL": "$(PLATFORM_DIR)/openRoad/pdn/BLOCK_grid_strategy.tcl",

optuna/optimize_dse.py

Lines changed: 97 additions & 39 deletions
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,7 @@ def find_workspace_root() -> str:
4444
PARALLEL_RUNS = 8
4545
# synth, fastest. Later use multifidelity optimizations with rungs
4646
# from least least accurate fastest builds to most accurate slowest builds.
47-
STAGE = "synth" # synth, place, grt
47+
STAGE = "synth" # synth, place, grt
4848

4949

5050
def build_designs(
@@ -55,27 +55,30 @@ def build_designs(
5555
num_cores: List[int],
5656
pipeline_depth: List[int],
5757
work_per_stage: List[int],
58+
rung: str,
5859
) -> List[dict]:
5960
"""Build design with given parameters and extract PPA metrics."""
60-
cmd = ["bazelisk", "build"] + list(
61-
itertools.chain.from_iterable(
62-
[
61+
beggars_provisioning = ["--jobs", "1"] if rung != "synth" else []
62+
cmd = (
63+
["bazelisk", "build", "--keep_going"]
64+
+ beggars_provisioning
65+
+ list(
66+
itertools.chain.from_iterable(
6367
[
64-
f"--//optuna:density{i}={place_density[i]:.4f}",
65-
f"--//optuna:utilization{i}={core_util[i]}",
66-
f"--//optuna:params{i}=NUM_CORES {num_cores[i]} PIPELINE_DEPTH {pipeline_depth[i]} WORK_PER_STAGE {work_per_stage[i]}",
67-
f"//optuna:mock-cpu_{i}_{STAGE}_ppa",
68+
[
69+
f"--//optuna:density{i}={place_density[i]:.4f}",
70+
f"--//optuna:utilization{i}={core_util[i]}",
71+
f"--//optuna:params{i}=NUM_CORES {num_cores[i]} PIPELINE_DEPTH {pipeline_depth[i]} WORK_PER_STAGE {work_per_stage[i]}",
72+
f"//optuna:mock-cpu_{i}_{rung}_ppa",
73+
]
74+
for i in range(len(trials))
6875
]
69-
for i in range(len(trials))
70-
]
76+
)
7177
)
7278
)
7379
print(subprocess.list2cmdline(cmd))
7480
result = subprocess.run(
7581
cmd,
76-
capture_output=True,
77-
text=True,
78-
timeout=300,
7982
cwd=workspace_root, # Run in workspace root
8083
)
8184

@@ -98,42 +101,77 @@ def build_designs(
98101
metrics_list = []
99102
for i in range(len(trials)):
100103
# Parse PPA metrics - use absolute path from workspace root
101-
ppa_file = os.path.join(workspace_root, f"bazel-bin/optuna/mock-cpu_{i}_ppa.txt")
104+
ppa_file = os.path.join(
105+
workspace_root, f"bazel-bin/optuna/mock-cpu_{i}_{rung}_ppa.txt"
106+
)
102107
metrics = {}
103-
with open(ppa_file) as f:
104-
for line in f:
105-
if ":" in line and not line.startswith("#"):
106-
key, value = line.split(":", 1)
107-
metrics[key.strip()] = float(value.strip())
108-
109-
area = metrics.get("cell_area", 1e9)
110-
power = metrics.get("estimated_power_uw", 1e9)
111-
slack = metrics.get("slack", -1e9)
112-
freq = metrics.get("frequency_ghz", 0.0)
113-
114-
meets_timing = slack >= 0
115-
print(f"{'✓' if meets_timing else '✗'} Slack: {slack:.2f} ps")
116-
print(f" Area: {area:.3f} um², Power: {power:.1f} uW, Freq: {freq:.2f} GHz")
108+
if os.path.exists(ppa_file):
109+
with open(ppa_file) as f:
110+
for line in f:
111+
if ":" in line and not line.startswith("#"):
112+
key, value = line.split(":", 1)
113+
metrics[key.strip()] = float(value.strip())
114+
area = metrics.get("cell_area", 1e9)
115+
power = metrics.get("estimated_power_uw", 1e9)
116+
freq = metrics.get("frequency_ghz", 0.0)
117+
else:
118+
print("Beggars pruning - optuna multiobjective pruning not supported")
119+
area = 1e9
120+
power = 1e9
121+
freq = 1000
117122

118123
compute = freq * num_cores[i] * work_per_stage[i]
119124
energy = power / freq
120125

121126
metrics_list.append([metrics["cell_area"], compute / energy, compute])
122127

123128
trials[i].set_user_attr("area", area)
124-
trials[i].set_user_attr("compute_per_energy", compute/energy)
129+
trials[i].set_user_attr("compute_per_energy", compute / energy)
125130
trials[i].set_user_attr("compute_per_time", compute)
131+
print(
132+
f"CORE_UTILIZATION={core_util[i]}%, "
133+
f"PLACE_DENSITY={place_density[i]:.3f}, "
134+
f"NUM_CORES={num_cores[i]}, "
135+
f"PIPELINE_DEPTH={pipeline_depth[i]}, "
136+
f"WORK_PER_STAGE={work_per_stage[i]}"
137+
)
138+
print(
139+
f"AREA={area:.2f}, "
140+
f"COMPUTE/ENERGY={compute/energy:.2f}, "
141+
f"COMPUTE/TIME={compute:.2f}"
142+
)
126143

127144
return metrics_list
128145

129146

130-
def objective_multi(study : optuna.Study, trials: List[optuna.Trial], args, workspace_root: str) -> tuple:
147+
def objective_multi(
148+
study: optuna.Study,
149+
trials: List[optuna.Trial],
150+
args,
151+
workspace_root: str,
152+
rung: str,
153+
previous_study: optuna.Study,
154+
) -> tuple:
131155
"""Multi-objective: Minimize area and power."""
132156
core_util = []
133157
place_density = []
134158
num_cores = []
135159
pipeline_depth = []
136160
work_per_stage = []
161+
162+
# 1. SETUP: Warm-start if previous study exists
163+
if previous_study is not None:
164+
# Get the Top 5 Pareto optimal trials
165+
best_trials = previous_study.best_trials[:5]
166+
167+
# Register (queue) them into the current study
168+
for t in best_trials:
169+
# This tells Optuna: "For the next trial, force these parameters"
170+
study.enqueue_trial(t.params)
171+
172+
# 2. EXECUTION: Standard loop for ALL trials
173+
# Optuna automatically checks the queue first. If the queue has items (from step 1),
174+
# it uses them. If the queue is empty, it samples new values from the ranges below.
137175
for trial in trials:
138176
core_util.append(
139177
trial.suggest_int("CORE_UTILIZATION", args.min_util, args.max_util)
@@ -153,6 +191,7 @@ def objective_multi(study : optuna.Study, trials: List[optuna.Trial], args, work
153191
num_cores,
154192
pipeline_depth,
155193
work_per_stage,
194+
rung,
156195
)
157196

158197
# Store metrics
@@ -224,16 +263,35 @@ def main():
224263
storage_url = f"sqlite:///{os.path.join(workspace_root, "optuna/results/dse.db")}"
225264
print(f"Using storage: {storage_url}")
226265

227-
study = optuna.create_study(
228-
directions=["minimize", "maximize", "maximize"], # Search for pareto front
229-
sampler=optuna.samplers.TPESampler(seed=args.seed),
230-
# storage=storage_url,
231-
load_if_exists=True,
232-
)
233-
for i in range(0, (args.n_trials + PARALLEL_RUNS - 1) // PARALLEL_RUNS):
234-
trials = [study.ask() for _ in range(min(PARALLEL_RUNS, args.n_trials - i))]
266+
previous_study = None
267+
for rung_number, rung in enumerate(["synth", "place", "grt"]):
268+
study = optuna.create_study(
269+
directions=["minimize", "maximize", "maximize"], # Search for pareto front
270+
sampler=optuna.samplers.TPESampler(seed=args.seed),
271+
# storage=storage_url,
272+
load_if_exists=True,
273+
)
274+
keep_percent = 20
275+
trials_per_rung = max(
276+
args.n_trials // ((100 // keep_percent) ** rung_number), 1
277+
)
278+
print(f"Study {rung} with {trials_per_rung} trials")
279+
for i in range(
280+
0, (trials_per_rung + PARALLEL_RUNS - 1) // PARALLEL_RUNS, PARALLEL_RUNS
281+
):
282+
trials = [
283+
study.ask() for _ in range(min(PARALLEL_RUNS, trials_per_rung - i))
284+
]
235285

236-
objective_multi(study, trials, args, workspace_root)
286+
objective_multi(
287+
study,
288+
trials,
289+
args,
290+
workspace_root,
291+
rung,
292+
previous_study if i == 0 else None,
293+
)
294+
previous_study = study
237295

238296
# Print results
239297
print(f"\n{'=' * 70}\nResults\n{'=' * 70}")

0 commit comments

Comments
 (0)