Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 13 additions & 0 deletions docs/user/InstructionsForAutoTuner.md
Original file line number Diff line number Diff line change
Expand Up @@ -145,6 +145,19 @@ python3 -m autotuner.distributed --design gcd --platform sky130hd \
sweep
```

#### Plot images

After running an AutoTuner experiment, you can generate a graph to understand the results better.
The graph will show the progression of one metric (see list below) over the execution of the experiment.

- QoR
- Runtime per trial
- Clock Period
- Worst slack

```shell
python3 utils/plot.py --results_dir <your-autotuner-result-path>
```

### Google Cloud Platform (GCP) distribution with Ray

Expand Down
15 changes: 10 additions & 5 deletions flow/test/test_autotuner.sh
Original file line number Diff line number Diff line change
Expand Up @@ -8,11 +8,6 @@ cd ../
./tools/AutoTuner/installer.sh
. ./tools/AutoTuner/setup.sh

# remove dashes and capitalize platform name
PLATFORM=${PLATFORM//-/}
# convert to uppercase
PLATFORM=${PLATFORM^^}

echo "Running Autotuner smoke tune test"
python3 -m unittest tools.AutoTuner.test.smoke_test_tune.${PLATFORM}TuneSmokeTest.test_tune

Expand All @@ -30,4 +25,14 @@ if [ "$PLATFORM" == "asap7" ] && [ "$DESIGN_NAME" == "gcd" ]; then
python3 -m unittest tools.AutoTuner.test.resume_check.ResumeCheck.test_tune_resume
fi

echo "Running Autotuner plotting smoke test"
all_experiments=$(ls -d ./flow/logs/${PLATFORM}/${DESIGN_NAME}/smoke-test-tune*)
all_experiments=$(basename -a $all_experiments)
for expt in $all_experiments; do
python3 tools/AutoTuner/src/autotuner/utils/plot.py \
--platform ${PLATFORM} \
--design ${DESIGN_NAME} \
--experiment $expt
done

exit $ret
5 changes: 4 additions & 1 deletion flow/util/genReport.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
METRICS_CHECK_FMT = "{}/metadata-{}-check.log"
REGEX_ERROR = re.compile(r"^\[error ?(\w+-\d+)?\]", re.IGNORECASE)
REGEX_WARNING = re.compile(r"^\[warning ?(\w+-\d+)?\]", re.IGNORECASE)
SKIPPED_FLOW_VARIANT_KEYWORDS = ["test", "tune"]
STATUS_GREEN = "Passing"
STATUS_RED = "Failing"

Expand Down Expand Up @@ -248,7 +249,9 @@ def write_summary():
dir_list = log_dir.split(os.sep)
# Handles autotuner folders, which do not have `report.log` natively.
# TODO: Can we log something for autotuner?
if len(dir_list) != 4 or "test-" in dir_list[-1]:
if len(dir_list) != 4 or any(
word in dir_list[-1] for word in SKIPPED_FLOW_VARIANT_KEYWORDS
):
continue
report_dir = log_dir.replace(LOGS_FOLDER, REPORTS_FOLDER)

Expand Down
1 change: 1 addition & 0 deletions tools/AutoTuner/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -9,3 +9,4 @@ tensorboard>=2.14.0,<=2.16.2
protobuf==3.20.3
SQLAlchemy==1.4.17
urllib3<=1.26.15
matplotlib==3.10.0
195 changes: 195 additions & 0 deletions tools/AutoTuner/src/autotuner/utils/plot.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,195 @@
import glob
import json
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import re
import os
import argparse
import sys

# Only does plotting for AutoTunerBase variants
AT_REGEX = r"variant-AutoTunerBase-([\w-]+)-\w+"

# TODO: Make sure the distributed.py METRIC variable is consistent with this, single source of truth.
METRIC = "metric"

cur_dir = os.path.dirname(os.path.abspath(__file__))
root_dir = os.path.join(cur_dir, "../../../../../")
os.chdir(root_dir)


def load_dir(dir: str) -> pd.DataFrame:
"""
Load and merge progress, parameters, and metrics data from a specified directory.
This function searches for `progress.csv`, `params.json`, and `metrics.json` files within the given directory,
concatenates the data, and merges them into a single pandas DataFrame.
Args:
dir (str): The directory path containing the subdirectories with `progress.csv`, `params.json`, and `metrics.json` files.
Returns:
pd.DataFrame: A DataFrame containing the merged data from the progress, parameters, and metrics files.
"""

# Concatenate progress DFs
progress_csvs = glob.glob(f"{dir}/*/progress.csv")
if len(progress_csvs) == 0:
print("No progress.csv files found.")
sys.exit(1)
progress_df = pd.concat([pd.read_csv(f) for f in progress_csvs])

# Concatenate params.json & metrics.json file
params = []
failed = []
for params_fname in glob.glob(f"{dir}/*/params.json"):
metrics_fname = params_fname.replace("params.json", "metrics.json")
try:
with open(params_fname, "r") as f:
_dict = json.load(f)
_dict["trial_id"] = re.search(AT_REGEX, params_fname).group(1)
with open(metrics_fname, "r") as f:
metrics = json.load(f)
ws = metrics["finish"]["timing__setup__ws"]
metrics["worst_slack"] = ws
_dict.update(metrics)
params.append(_dict)
except Exception as e:
failed.append(metrics_fname)
continue

# Merge all dataframe
params_df = pd.DataFrame(params)
try:
progress_df = progress_df.merge(params_df, on="trial_id")
except KeyError:
print(
"Unable to merge DFs due to missing trial_id in params.json (possibly due to failed trials.)"
)
sys.exit(1)

# Print failed, if any
if failed:
failed_files = "\n".join(failed)
print(f"Failed to load {len(failed)} files:\n{failed_files}")
return progress_df


def preprocess(df: pd.DataFrame) -> pd.DataFrame:
"""
Preprocess the input DataFrame by renaming columns, removing unnecessary columns,
filtering out invalid rows, and normalizing the timestamp.
Args:
df (pd.DataFrame): The input DataFrame to preprocess.
Returns:
pd.DataFrame: The preprocessed DataFrame with renamed columns, removed columns,
filtered rows, and normalized timestamp.
"""

cols_to_remove = [
"done",
"training_iteration",
"date",
"pid",
"hostname",
"node_ip",
"time_since_restore",
"time_total_s",
"iterations_since_restore",
]
rename_dict = {
"time_this_iter_s": "runtime",
"_SDC_CLK_PERIOD": "clk_period", # param
}
try:
df = df.rename(columns=rename_dict)
df = df.drop(columns=cols_to_remove)
df = df[df[METRIC] != 9e99]
df["timestamp"] -= df["timestamp"].min()
return df
except KeyError as e:
print(
f"KeyError: {e} in the DataFrame. Dataframe does not contain necessary columns."
)
sys.exit(1)


def plot(df: pd.DataFrame, key: str, dir: str):
"""
Plots a scatter plot with a linear fit and a box plot for a specified key from a DataFrame.
Args:
df (pd.DataFrame): The DataFrame containing the data to plot.
key (str): The column name in the DataFrame to plot.
dir (str): The directory where the plots will be saved. The directory must exist.
Returns:
None
"""

assert os.path.exists(dir), f"Directory {dir} does not exist."
# Plot box plot and time series plot for key
fig, ax = plt.subplots(1, figsize=(15, 10))
ax.scatter(df["timestamp"], df[key])
ax.set_xlabel("Time (s)")
ax.set_ylabel(key)
ax.set_title(f"{key} vs Time")

try:
coeff = np.polyfit(df["timestamp"], df[key], 1)
poly_func = np.poly1d(coeff)
ax.plot(
df["timestamp"],
poly_func(df["timestamp"]),
"r--",
label=f"y={coeff[0]:.2f}x+{coeff[1]:.2f}",
)
ax.legend()
except np.linalg.LinAlgError:
print("Cannot fit a line to the data, plotting only scatter plot.")

fig.savefig(f"{dir}/{key}.png")

plt.figure(figsize=(15, 10))
plt.boxplot(df[key])
plt.ylabel(key)
plt.title(f"{key} Boxplot")
plt.savefig(f"{dir}/{key}-boxplot.png")


def main(platform: str, design: str, experiment: str):
"""
Main function to process results from a specified directory and plot the results.
Args:
platform (str): The platform name.
design (str): The design name.
experiment (str): The experiment name.
Returns:
None
"""

results_dir = os.path.join(
root_dir, f"./flow/logs/{platform}/{design}/{experiment}"
)
img_dir = os.path.join(
root_dir, f"./flow/reports/images/{platform}/{design}/{experiment}"
)
print("Processing results from", results_dir)
os.makedirs(img_dir, exist_ok=True)
df = load_dir(results_dir)
df = preprocess(df)
keys = [METRIC] + ["runtime", "clk_period", "worst_slack"]

# Plot only if more than one entry
if len(df) < 2:
print("Less than 2 entries, skipping plotting.")
sys.exit(0)
for key in keys:
plot(df, key, img_dir)


if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Plot AutoTuner results.")
parser.add_argument("--platform", type=str, help="Platform name.", required=True)
parser.add_argument("--design", type=str, help="Design name.", required=True)
parser.add_argument(
"--experiment", type=str, help="Experiment name.", required=True
)
args = parser.parse_args()
main(platform=args.platform, design=args.design, experiment=args.experiment)
2 changes: 1 addition & 1 deletion tools/AutoTuner/test/resume_check.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ def setUp(self):
f" --platform {self.platform}"
f" --config {self.config}"
f" --jobs {self.jobs}"
f" --experiment test_resume"
f" --experiment test-resume"
f" tune --iterations {self.iterations} --samples {self.samples}"
f" --resources_per_trial {res_per_trial}"
f" {c}"
Expand Down
6 changes: 3 additions & 3 deletions tools/AutoTuner/test/smoke_test_algo_eval.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,17 +53,17 @@ def test_algo_eval(self):
self.assertTrue(successful)


class ASAP7AlgoEvalSmokeTest(BaseAlgoEvalSmokeTest):
class asap7AlgoEvalSmokeTest(BaseAlgoEvalSmokeTest):
platform = "asap7"
design = "gcd"


class IHPSG13G2AlgoEvalSmokeTest(BaseAlgoEvalSmokeTest):
class ihpsg13g2AlgoEvalSmokeTest(BaseAlgoEvalSmokeTest):
platform = "ihp-sg13g2"
design = "gcd"


class SKY130HDAlgoEvalSmokeTest(BaseAlgoEvalSmokeTest):
class sky130hdAlgoEvalSmokeTest(BaseAlgoEvalSmokeTest):
platform = "sky130hd"
design = "gcd"

Expand Down
6 changes: 3 additions & 3 deletions tools/AutoTuner/test/smoke_test_sample_iteration.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,17 +36,17 @@ def test_sample_iteration(self):
self.assertTrue(successful)


class ASAP7SampleIterationSmokeTest(BaseSampleIterationSmokeTest):
class asap7SampleIterationSmokeTest(BaseSampleIterationSmokeTest):
platform = "asap7"
design = "gcd"


class SKY130HDSampleIterationSmokeTest(BaseSampleIterationSmokeTest):
class sky130hdSampleIterationSmokeTest(BaseSampleIterationSmokeTest):
platform = "sky130hd"
design = "gcd"


class IHPSG13G2SampleIterationSmokeTest(BaseSampleIterationSmokeTest):
class ihpsg13g2SampleIterationSmokeTest(BaseSampleIterationSmokeTest):
platform = "ihp-sg13g2"
design = "gcd"

Expand Down
6 changes: 3 additions & 3 deletions tools/AutoTuner/test/smoke_test_sweep.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,17 +48,17 @@ def test_sweep(self):
self.assertTrue(successful)


class ASAP7SweepSmokeTest(BaseSweepSmokeTest):
class asap7SweepSmokeTest(BaseSweepSmokeTest):
platform = "asap7"
design = "gcd"


class SKY130HDSweepSmokeTest(BaseSweepSmokeTest):
class sky130hdSweepSmokeTest(BaseSweepSmokeTest):
platform = "sky130hd"
design = "gcd"


class IHPSG13G2SweepSmokeTest(BaseSweepSmokeTest):
class ihpsg13g2SweepSmokeTest(BaseSweepSmokeTest):
platform = "ihp-sg13g2"
design = "gcd"

Expand Down
6 changes: 3 additions & 3 deletions tools/AutoTuner/test/smoke_test_tune.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,17 +32,17 @@ def test_tune(self):
self.assertTrue(successful)


class ASAP7TuneSmokeTest(BaseTuneSmokeTest):
class asap7TuneSmokeTest(BaseTuneSmokeTest):
platform = "asap7"
design = "gcd"


class SKY130HDTuneSmokeTest(BaseTuneSmokeTest):
class sky130hdTuneSmokeTest(BaseTuneSmokeTest):
platform = "sky130hd"
design = "gcd"


class IHPSG13G2TuneSmokeTest(BaseTuneSmokeTest):
class ihpsg13g2TuneSmokeTest(BaseTuneSmokeTest):
platform = "ihp-sg13g2"
design = "gcd"

Expand Down