|
28 | 28 |
|
29 | 29 | # _options dict is used for generating documentation, but is not used to check for unsupported strategy_options in bayes_opt |
30 | 30 | _options = dict( |
31 | | - covariancekernel=( |
32 | | - 'The Covariance kernel to use, choose any from "constantrbf", "rbf", "matern32", "matern52"', |
33 | | - "matern32", |
34 | | - ), |
| 31 | + covariancekernel=('The Covariance kernel to use, choose any from "constantrbf", "rbf", "matern32", "matern52"', "matern32"), |
35 | 32 | covariancelengthscale=("The covariance length scale", 1.5), |
36 | | - method=( |
37 | | - "The Bayesian Optimization method to use, choose any from " + ", ".join(supported_methods), |
38 | | - "multi-ultrafast", |
39 | | - ), |
40 | | - samplingmethod=( |
41 | | - "Method used for initial sampling the parameter space, either random or Latin Hypercube Sampling (LHS)", |
42 | | - "lhs", |
43 | | - ), |
| 33 | + method=("The Bayesian Optimization method to use, choose any from " + ", ".join(supported_methods), "multi-ultrafast"), |
| 34 | + samplingmethod=("Method used for initial sampling the parameter space, either random or Latin Hypercube Sampling (LHS)", "lhs"), |
44 | 35 | popsize=("Number of initial samples", 20), |
45 | 36 | ) |
46 | 37 |
|
@@ -110,7 +101,7 @@ def tune(searchspace: Searchspace, runner, tuning_options): |
110 | 101 | :rtype: list(dict()), dict() |
111 | 102 |
|
112 | 103 | """ |
113 | | - # we don't actually use this for Bayesian Optimization, but it is used to check for unsupported options |
| 104 | + # we don't actually use this for Bayesian Optimization, but it is used to check for unsupported options |
114 | 105 | get_options(tuning_options.strategy_options, _options, unsupported=["x0"]) |
115 | 106 |
|
116 | 107 | max_fevals = tuning_options.strategy_options.get("max_fevals", 100) |
@@ -145,7 +136,13 @@ def tune(searchspace: Searchspace, runner, tuning_options): |
145 | 136 | # initialize and optimize |
146 | 137 | try: |
147 | 138 | bo = BayesianOptimization( |
148 | | - parameter_space, searchspace, removed_tune_params, tuning_options, normalize_dict, denormalize_dict, cost_func |
| 139 | + parameter_space, |
| 140 | + searchspace, |
| 141 | + removed_tune_params, |
| 142 | + tuning_options, |
| 143 | + normalize_dict, |
| 144 | + denormalize_dict, |
| 145 | + cost_func, |
149 | 146 | ) |
150 | 147 | except StopCriterionReached: |
151 | 148 | warnings.warn( |
@@ -851,7 +848,10 @@ def __optimize_multi_ultrafast(self, max_fevals, predict_eval_ratio=5): |
851 | 848 | while self.fevals < max_fevals: |
852 | 849 | aqfs = self.multi_afs |
853 | 850 | # if we take the prediction only once, we want to go from most exploiting to most exploring, because the more exploiting an AF is, the more it relies on non-stale information from the model |
854 | | - fit_observations = last_prediction_time * predict_eval_ratio <= last_eval_time or last_prediction_counter >= predict_eval_ratio |
| 851 | + fit_observations = ( |
| 852 | + last_prediction_time * predict_eval_ratio <= last_eval_time |
| 853 | + or last_prediction_counter >= predict_eval_ratio |
| 854 | + ) |
855 | 855 | if fit_observations: |
856 | 856 | last_prediction_counter = 0 |
857 | 857 | pred_start = time.perf_counter() |
|
0 commit comments