Skip to content

Commit 939fa9c

Browse files
committed
Implemented new function to get performance scores per strategy by running the passed experiments file
1 parent b0954f5 commit 939fa9c

File tree

1 file changed

+57
-0
lines changed

1 file changed

+57
-0
lines changed

src/autotuning_methodology/report_experiments.py

Lines changed: 57 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@
1010
RandomSearchCalculatedBaseline,
1111
)
1212
from autotuning_methodology.curves import Curve, StochasticOptimizationAlgorithm
13+
from autotuning_methodology.experiments import execute_experiment
1314
from autotuning_methodology.searchspace_statistics import SearchspaceStatistics
1415

1516

@@ -189,3 +190,59 @@ def get_strategies_aggregated_performance(
189190
strategies_aggregated_upper_err,
190191
strategies_aggregated_real_stopping_point_fraction,
191192
)
193+
194+
195+
def get_strategy_scores(experiment_filepath: str, use_strategy_as_baseline=None):
196+
"""Function to get performance scores per strategy by running the passed experiments file.
197+
198+
Args:
199+
experiment_filepath: the path to the experiment-filename.json to run.
200+
use_strategy_as_baseline: whether to use an executed strategy as the baseline. Defaults to None.
201+
202+
Returns:
203+
a dictionary of the strategies, with the performance score and error for each strategy.
204+
"""
205+
# execute the experiment if necessary, else retrieve it
206+
experiment, strategies, results_descriptions = execute_experiment(experiment_filepath, profiling=False)
207+
experiment_folderpath = Path(experiment_filepath).parent
208+
209+
# get the settings
210+
minimization: bool = experiment.get("minimization", True)
211+
cutoff_percentile: float = experiment["cutoff_percentile"]
212+
cutoff_percentile_start: float = experiment.get("cutoff_percentile_start", 0.01)
213+
time_resolution: float = experiment.get("resolution", 1e4)
214+
confidence_level: float = experiment["plot"].get("confidence_level", 0.95)
215+
216+
# aggregate the data
217+
aggregation_data = get_aggregation_data(
218+
experiment_folderpath,
219+
experiment,
220+
strategies,
221+
results_descriptions,
222+
cutoff_percentile,
223+
cutoff_percentile_start,
224+
confidence_level,
225+
minimization,
226+
time_resolution,
227+
use_strategy_as_baseline,
228+
)
229+
230+
# get the aggregated performance per strategy
231+
(
232+
strategies_performance,
233+
strategies_lower_err,
234+
strategies_upper_err,
235+
strategies_real_stopping_point_fraction,
236+
) = get_strategies_aggregated_performance(list(aggregation_data.values()), confidence_level)
237+
238+
# calculate the average performance score and error per strategy
239+
results: dict[str, dict[str, float]] = dict()
240+
for strategy_index, strategy_performance in enumerate(strategies_performance):
241+
performance = round(np.mean(strategy_performance), 3)
242+
error = round(np.std(strategy_performance), 3)
243+
strategy_name = strategies[strategy_index]["name"]
244+
results[strategy_name] = {
245+
"score": performance,
246+
"error": error,
247+
}
248+
return results

0 commit comments

Comments
 (0)