Skip to content

Commit bae800e

Browse files
committed
Basic implementation to use Searchspace in Bayesian Optimization
1 parent faeb52e commit bae800e

File tree

2 files changed

+57
-28
lines changed

2 files changed

+57
-28
lines changed

kernel_tuner/strategies/bayes_opt.py

Lines changed: 56 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
"""Bayesian Optimization implementation from the thesis by Willemsen."""
2+
23
import itertools
34
import time
45
import warnings
@@ -13,6 +14,7 @@
1314
# BO imports
1415
from kernel_tuner.searchspace import Searchspace
1516
from kernel_tuner.strategies.common import CostFunc
17+
from kernel_tuner.util import StopCriterionReached
1618

1719
try:
1820
from sklearn.gaussian_process import GaussianProcessRegressor
@@ -22,9 +24,7 @@
2224
except ImportError:
2325
bayes_opt_present = False
2426

25-
from kernel_tuner import util
26-
27-
supported_methods = ["poi", "ei", "lcb", "lcb-srinivas", "multi", "multi-advanced", "multi-fast"]
27+
supported_methods = ["poi", "ei", "lcb", "lcb-srinivas", "multi", "multi-advanced", "multi-fast", "multi-ultrafast"]
2828

2929

3030
def generate_normalized_param_dicts(tune_params: dict, eps: float) -> Tuple[dict, dict]:
@@ -93,9 +93,6 @@ def tune(searchspace: Searchspace, runner, tuning_options):
9393
9494
"""
9595
max_fevals = tuning_options.strategy_options.get("max_fevals", 100)
96-
# limit max_fevals to max size of the parameter space
97-
max_fevals = min(searchspace.size, max_fevals)
98-
9996
prune_parameterspace = tuning_options.strategy_options.get("pruneparameterspace", True)
10097
if not bayes_opt_present:
10198
raise ImportError(
@@ -108,19 +105,8 @@ def tune(searchspace: Searchspace, runner, tuning_options):
108105
_, _, eps = cost_func.get_bounds_x0_eps()
109106

110107
# compute cartesian product of all tunable parameters
111-
parameter_space = itertools.product(*tune_params.values())
112-
113-
# check for search space restrictions
114-
if searchspace.restrictions is not None:
115-
tuning_options.verbose = False
116-
parameter_space = filter(lambda p: util.config_valid(p, tuning_options, runner.dev.max_threads), parameter_space)
117-
parameter_space = list(parameter_space)
118-
if len(parameter_space) < 1:
119-
raise ValueError("Empty parameterspace after restrictionscheck. Restrictionscheck is possibly too strict.")
120-
if len(parameter_space) == 1:
121-
raise ValueError(
122-
f"Only one configuration after restrictionscheck. Restrictionscheck is possibly too strict. Configuration: {parameter_space[0]}"
123-
)
108+
# TODO actually use the Searchspace object properly throughout Bayesian Optimization
109+
parameter_space = searchspace.list
124110

125111
# normalize search space to [0,1]
126112
normalize_dict, denormalize_dict = generate_normalized_param_dicts(tune_params, eps)
@@ -138,18 +124,19 @@ def tune(searchspace: Searchspace, runner, tuning_options):
138124
# initialize and optimize
139125
try:
140126
bo = BayesianOptimization(
141-
parameter_space, removed_tune_params, tuning_options, normalize_dict, denormalize_dict, cost_func
127+
parameter_space, searchspace, removed_tune_params, tuning_options, normalize_dict, denormalize_dict, cost_func
142128
)
143-
except util.StopCriterionReached as e:
144-
print(
129+
except StopCriterionReached:
130+
warnings.warn(
145131
"Stop criterion reached during initialization, was popsize (default 20) greater than max_fevals or the alotted time?"
146132
)
147-
raise e
133+
return cost_func.results
134+
# raise e
148135
try:
149136
if max_fevals - bo.fevals <= 0:
150137
raise ValueError("No function evaluations left for optimization after sampling")
151138
bo.optimize(max_fevals)
152-
except util.StopCriterionReached as e:
139+
except StopCriterionReached as e:
153140
if tuning_options.verbose:
154141
print(e)
155142

@@ -165,7 +152,7 @@ def tune(searchspace: Searchspace, runner, tuning_options):
165152
covariancelengthscale=("The covariance length scale", 1.5),
166153
method=(
167154
"The Bayesian Optimization method to use, choose any from " + ", ".join(supported_methods),
168-
"multi-advanced",
155+
"multi-ultrafast",
169156
),
170157
samplingmethod=(
171158
"Method used for initial sampling the parameter space, either random or Latin Hypercube Sampling (LHS)",
@@ -179,6 +166,7 @@ class BayesianOptimization:
179166
def __init__(
180167
self,
181168
searchspace: list,
169+
searchspace_obj: Searchspace,
182170
removed_tune_params: list,
183171
tuning_options: dict,
184172
normalize_dict: dict,
@@ -202,7 +190,7 @@ def get_hyperparam(name: str, default, supported_values=list()):
202190
# get hyperparameters
203191
cov_kernel_name = get_hyperparam("covariancekernel", "matern32", self.supported_cov_kernels)
204192
cov_kernel_lengthscale = get_hyperparam("covariancelengthscale", 1.5)
205-
acquisition_function = get_hyperparam("method", "multi-advanced", self.supported_methods)
193+
acquisition_function = get_hyperparam("method", "multi-ultrafast", self.supported_methods)
206194
acq = acquisition_function
207195
acq_params = get_hyperparam("methodparams", {})
208196
multi_af_names = get_hyperparam("multi_af_names", ["ei", "poi", "lcb"])
@@ -256,6 +244,7 @@ def get_hyperparam(name: str, default, supported_values=list()):
256244

257245
# set remaining values
258246
self.__searchspace = searchspace
247+
self.__searchspace_obj = searchspace_obj
259248
self.removed_tune_params = removed_tune_params
260249
self.searchspace_size = len(self.searchspace)
261250
self.num_dimensions = len(self.dimensions())
@@ -345,6 +334,8 @@ def set_acquisition_function(self, acquisition_function: str):
345334
self.optimize = self.__optimize_multi_advanced
346335
elif acquisition_function == "multi-fast":
347336
self.optimize = self.__optimize_multi_fast
337+
elif acquisition_function == "multi-ultrafast":
338+
self.optimize = self.__optimize_multi_ultrafast
348339
else:
349340
raise ValueError(
350341
"Acquisition function must be one of {}, is {}".format(self.supported_methods, acquisition_function)
@@ -461,7 +452,7 @@ def evaluate_objective_function(self, param_config: tuple) -> float:
461452
"""Evaluates the objective function."""
462453
param_config = self.unprune_param_config(param_config)
463454
denormalized_param_config = self.denormalize_param_config(param_config)
464-
if not util.config_valid(denormalized_param_config, self.tuning_options, self.max_threads):
455+
if not self.__searchspace_obj.is_param_config_valid(denormalized_param_config):
465456
return self.invalid_value
466457
val = self.cost_func(param_config)
467458
self.fevals += 1
@@ -846,6 +837,44 @@ def __optimize_multi_fast(self, max_fevals):
846837
self.update_after_evaluation(observation, candidate_index, candidate_params)
847838
self.fit_observations_to_model()
848839

840+
def __optimize_multi_ultrafast(self, max_fevals, predict_eval_ratio=5):
841+
"""Optimize with a portfolio of multiple acquisition functions. Predictions are only taken once, or fewer if predictions take too long.
842+
843+
The `predict_eval_ratio` denotes the ratio between the duration of the predictions and the duration of evaluations, as updating the prediction every evaluation is not efficient when evaluation is quick.
844+
Predictions are only updated if the previous evaluation took more than `predict_eval_ratio` * the last prediction duration, or the last prediction is more than `predict_eval_ratio` evaluations ago.
845+
"""
846+
last_prediction_counter = 0
847+
last_prediction_time = 0
848+
last_eval_time = 0
849+
while self.fevals < max_fevals:
850+
aqfs = self.multi_afs
851+
# if we take the prediction only once, we want to go from most exploiting to most exploring, because the more exploiting an AF is, the more it relies on non-stale information from the model
852+
fit_observations = last_prediction_time * predict_eval_ratio <= last_eval_time or last_prediction_counter >= predict_eval_ratio
853+
if fit_observations:
854+
last_prediction_counter = 0
855+
pred_start = time.perf_counter()
856+
if last_eval_time > 0.0:
857+
self.fit_observations_to_model()
858+
predictions, _, std = self.predict_list(self.unvisited_cache)
859+
last_prediction_time = time.perf_counter() - pred_start
860+
else:
861+
last_prediction_counter += 1
862+
eval_start = time.perf_counter()
863+
hyperparam = self.contextual_variance(std)
864+
if self.__visited_num >= self.searchspace_size:
865+
raise ValueError(self.error_message_searchspace_fully_observed)
866+
for af in aqfs:
867+
if self.__visited_num >= self.searchspace_size or self.fevals >= max_fevals:
868+
break
869+
list_of_acquisition_values = af(predictions, hyperparam)
870+
best_af = self.argopt(list_of_acquisition_values)
871+
del predictions[best_af] # to avoid going out of bounds
872+
candidate_params = self.unvisited_cache[best_af]
873+
candidate_index = self.find_param_config_index(candidate_params)
874+
observation = self.evaluate_objective_function(candidate_params)
875+
self.update_after_evaluation(observation, candidate_index, candidate_params)
876+
last_eval_time = time.perf_counter() - eval_start
877+
849878
def af_random(self, predictions=None, hyperparam=None) -> list:
850879
"""Acquisition function returning a randomly shuffled list for comparison."""
851880
list_random = range(len(self.unvisited_cache))

test/strategies/test_bayesian_optimization.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@
3737
pruned_parameter_space, removed_tune_params = bayes_opt.prune_parameter_space(normalized_parameter_space, tuning_options, tune_params, original_to_normalized)
3838

3939
# initialize BO
40-
BO = BayesianOptimization(pruned_parameter_space, removed_tune_params, tuning_options, original_to_normalized, normalized_to_original, cost_func)
40+
BO = BayesianOptimization(pruned_parameter_space, searchspace, removed_tune_params, tuning_options, original_to_normalized, normalized_to_original, cost_func)
4141
predictions, _, std = BO.predict_list(BO.unvisited_cache)
4242

4343

0 commit comments

Comments
 (0)