|
1 | | -"""The strategy that uses a minimizer method for searching through the parameter space.""" |
| 1 | +"""The strategy that uses the optimizer from skopt for searching through the parameter space.""" |
2 | 2 |
|
| 3 | +import numpy as np |
3 | 4 | from kernel_tuner.util import StopCriterionReached |
4 | 5 | from kernel_tuner.searchspace import Searchspace |
5 | 6 | from kernel_tuner.strategies.common import ( |
6 | 7 | CostFunc, |
7 | 8 | get_options, |
8 | | - snap_to_nearest_config, |
9 | 9 | get_strategy_docstring, |
10 | 10 | ) |
11 | 11 |
|
12 | | -supported_methods = ["forest", "gbrt", "gp", "dummy"] |
| 12 | +supported_learners = ["RF", "ET", "GBRT", "DUMMY", "GP"] |
| 13 | +supported_acq = ["LCB", "EI", "PI","gp_hedge"] |
| 14 | +supported_liars = ["cl_min", "cl_mean", "cl_max"] |
13 | 15 |
|
14 | 16 | _options = dict( |
15 | | - method=(f"Local optimization algorithm to use, choose any from {supported_methods}", "gp"), |
16 | | - options=("Options passed to the skopt method as kwargs.", dict()), |
17 | | - popsize=("Number of initial samples. If `None`, let skopt choose the initial population", None), |
18 | | - maxiter=("Maximum number of times to repeat the method until the budget is exhausted.", 1), |
| 17 | + learner=(f"The leaner to use (supported: {supported_learners})", "RF"), |
| 18 | + acq_func=(f"The acquisition function to use (supported: {supported_acq})", "gp_hedge"), |
| 19 | + lie_strategy=(f"The lie strategy to use when using batches (supported: {supported_liars})", "cl_max"), |
| 20 | + kappa=("The value of kappa", 1.96), |
| 21 | + num_initial=("Number of initial samples. If `None`, let skopt choose the initial population", None), |
| 22 | + batch_size=("The number of points to ask per batch", 1), |
| 23 | + skopt_kwargs=("Additional options passed to the skopt `Optimizer` as kwargs.", dict()), |
19 | 24 | ) |
20 | 25 |
|
21 | 26 |
|
22 | 27 | def tune(searchspace: Searchspace, runner, tuning_options): |
23 | | - import skopt |
24 | | - |
25 | | - method, skopt_options, popsize, maxiter = get_options(tuning_options.strategy_options, _options) |
| 28 | + learner, acq_func, lie_strategy, kappa, num_initial, batch_size, skopt_kwargs = \ |
| 29 | + get_options(tuning_options.strategy_options, _options) |
26 | 30 |
|
27 | 31 | # Get maximum number of evaluations |
28 | | - max_fevals = searchspace.size |
29 | | - if "max_fevals" in tuning_options: |
30 | | - max_fevals = min(tuning_options["max_fevals"], max_fevals) |
31 | | - |
32 | | - # Set the maximum number of calls to 100 times the maximum number of evaluations. |
33 | | - # Not all calls by skopt will result in an evaluation since different calls might |
34 | | - # map to the same configuration. |
35 | | - if "n_calls" not in skopt_options: |
36 | | - skopt_options["n_calls"] = 100 * max_fevals |
37 | | - |
38 | | - # If the initial population size is specified, we select `popsize` samples |
39 | | - # from the search space. This is more efficient than letting skopt select |
40 | | - # the samples as it is not aware of restrictions. |
41 | | - if popsize: |
42 | | - x0 = searchspace.get_random_sample(min(popsize, max_fevals)) |
43 | | - skopt_options["x0"] = [searchspace.get_param_indices(x) for x in x0] |
44 | | - |
45 | | - opt_result = None |
46 | | - tune_params_values = list(searchspace.tune_params.values()) |
47 | | - bounds = [(0, len(p) - 1) if len(p) > 1 else [0] for p in tune_params_values] |
| 32 | + max_fevals = min(tuning_options.get("max_fevals", np.inf), searchspace.size) |
48 | 33 |
|
| 34 | + # Const function |
49 | 35 | cost_func = CostFunc(searchspace, tuning_options, runner) |
50 | | - objective = lambda x: cost_func(searchspace.get_param_config_from_param_indices(x)) |
51 | | - space_constraint = lambda x: searchspace.is_param_config_valid(searchspace.get_param_config_from_param_indices(x)) |
| 36 | + opt_config, opt_result = None, None |
52 | 37 |
|
53 | | - skopt_options["space_constraint"] = space_constraint |
54 | | - skopt_options["verbose"] = tuning_options.verbose |
| 38 | + # The dimensions. Parameters with one value become categorical |
| 39 | + from skopt.space.space import Categorical, Integer |
| 40 | + tune_params_values = list(searchspace.tune_params.values()) |
| 41 | + bounds = [Integer(0, len(p) - 1) if len(p) > 1 else Categorical([0]) for p in tune_params_values] |
| 42 | + |
| 43 | + # Space constraint |
| 44 | + space_constraint = lambda x: searchspace.is_param_config_valid( |
| 45 | + searchspace.get_param_config_from_param_indices(x)) |
| 46 | + |
| 47 | + # Create skopt optimizer |
| 48 | + skopt_kwargs = dict(skopt_kwargs) |
| 49 | + skopt_kwargs.setdefault("acq_func_kwargs", {})["kappa"] = kappa |
| 50 | + |
| 51 | + from skopt import Optimizer as SkOptimizer |
| 52 | + optimizer = SkOptimizer( |
| 53 | + dimensions=bounds, |
| 54 | + base_estimator=learner, |
| 55 | + n_initial_points=num_initial, |
| 56 | + acq_func=acq_func, |
| 57 | + space_constraint=space_constraint, |
| 58 | + **skopt_kwargs |
| 59 | + ) |
| 60 | + |
| 61 | + # Ask initial batch of configs |
| 62 | + num_initial = optimizer._n_initial_points |
| 63 | + batch = optimizer.ask(num_initial, lie_strategy) |
| 64 | + Xs, Ys = [], [] |
| 65 | + eval_count = 0 |
| 66 | + |
| 67 | + if tuning_options.verbose: |
| 68 | + print(f"Asked optimizer for {num_initial} points: {batch}") |
55 | 69 |
|
56 | 70 | try: |
57 | | - for _ in range(maxiter): |
58 | | - if method == "dummy": |
59 | | - opt_result = skopt.dummy_minimize(objective, bounds, **skopt_options) |
60 | | - elif method == "forest": |
61 | | - opt_result = skopt.forest_minimize(objective, bounds, **skopt_options) |
62 | | - elif method == "gp": |
63 | | - opt_result = skopt.gp_minimize(objective, bounds, **skopt_options) |
64 | | - elif method == "gbrt": |
65 | | - opt_result = skopt.gbrt_minimize(objective, bounds, **skopt_options) |
66 | | - else: |
67 | | - raise ValueError(f"invalid skopt method: {method}") |
| 71 | + while eval_count < max_fevals: |
| 72 | + if not batch: |
| 73 | + optimizer.tell(Xs, Ys) |
| 74 | + batch = optimizer.ask(batch_size, lie_strategy) |
| 75 | + Xs, Ys = [], [] |
| 76 | + |
| 77 | + if tuning_options.verbose: |
| 78 | + print(f"Asked optimizer for {batch_size} points: {batch}") |
| 79 | + |
| 80 | + x = batch.pop(0) |
| 81 | + y = cost_func(searchspace.get_param_config_from_param_indices(x)) |
| 82 | + eval_count += 1 |
| 83 | + |
| 84 | + Xs.append(x) |
| 85 | + Ys.append(y) |
| 86 | + |
| 87 | + if opt_result is None or y < opt_result: |
| 88 | + opt_config, opt_result = x, y |
| 89 | + |
68 | 90 | except StopCriterionReached as e: |
69 | 91 | if tuning_options.verbose: |
70 | 92 | print(e) |
71 | 93 |
|
72 | | - if opt_result and tuning_options.verbose: |
73 | | - print(opt_result) |
| 94 | + if opt_result is not None and tuning_options.verbose: |
| 95 | + print(f"Best configuration: {opt_result}") |
74 | 96 |
|
75 | 97 | return cost_func.results |
76 | 98 |
|
|
0 commit comments