Skip to content
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
33 changes: 33 additions & 0 deletions tools/AutoTuner/src/autotuner/distributed.py
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

In terms of valid config check, would you prefer to fail fast? I.e. fail at the argparse stage or wait till ray servers are started

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The failure would need to be at the individual run level, not at argparse. In other words, someone could do:

global: 0, 5
detailed 0, 5

then, AT chooses the following for a specific run:

global: 2
detailed: 4

which is invalid and we shouldn't even bother running OR.

I've got a checker that I will commit at some point that handles what we can check at the config file level.

Original file line number Diff line number Diff line change
Expand Up @@ -100,11 +100,18 @@ def setup(self, config):
)
self.step_ = 0
self.variant = f"variant-{self.__class__.__name__}-{self.trial_id}-or"
# Do a valid config check here, since we still have the config in a
# dict vs. having to scan through the parameter string later
self.is_valid_config = self._is_valid_config(config)

def step(self):
"""
Run step experiment and compute its score.
"""

# if not a valid config, then don't run and pass back an error
if not self.is_valid_config:
return {METRIC: ERROR_METRIC, "effective_clk_period": "-", "num_drc": "-"}
self._variant = f"{self.variant}-{self.step_}"
metrics_file = openroad(
args=args,
Expand Down Expand Up @@ -142,6 +149,32 @@ def evaluate(self, metrics):
score = score * (100 / self.step_) + gamma * num_drc
return (score, effective_clk_period, num_drc)

def _is_valid_config(self, config):
"""
Checks dependent parameters and returns False if we violate
a dependency. That way, we don't end up running an incompatible run
"""

ret_val = True
ret_val &= self._is_valid_padding(config)
return ret_val

def _is_valid_padding(self, config):
"""Returns True if global padding >= detail padding"""

if (
"CELL_PAD_IN_SITES_GLOBAL_PLACEMENT" in config
and "CELL_PAD_IN_SITES_DETAIL_PLACEMENT" in config
):
global_padding = config["CELL_PAD_IN_SITES_GLOBAL_PLACEMENT"]
detail_padding = config["CELL_PAD_IN_SITES_DETAIL_PLACEMENT"]
if global_padding < detail_padding:
print(
f"[WARN TUN-0032] CELL_PAD_IN_SITES_DETAIL_PLACEMENT cannot be greater than CELL_PAD_IN_SITES_GLOBAL_PLACEMENT: {detail_padding} {global_padding}"
)
return False
return True


class PPAImprov(AutoTunerBase):
"""
Expand Down