Skip to content

Commit 962e5f9

Browse files
merged master into custom_diff_evo
2 parents baf628d + a48abc1 commit 962e5f9

14 files changed

+85
-40
lines changed

doc/source/optimization.rst

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -46,6 +46,12 @@ cache files, serving a value from the cache for the first time in the run also c
4646
Only unique function evaluations are counted, so the second time a parameter configuration is selected by the strategy it is served from the
4747
cache, but not counted as a unique function evaluation.
4848

49+
All optimization algorithms, except for brute_force, random_sample, and bayes_opt, allow the user to specify an initial guess or
50+
starting point for the optimization, called ``x0``. This can be passed to the strategy using the ``strategy_options=`` dictionary with ``"x0"`` as key and
51+
a list of values for each parameter in tune_params to note the starting point. For example, for a kernel that has parameters ``block_size_x`` (64, 128, 256)
52+
and ``tile_size_x`` (1,2,3), one could pass ``strategy_options=dict(x0=[128,2])`` to ``tune_kernel()`` to make sure the strategy starts from
53+
the configuration with ``block_size_x=128, tile_size_x=2``. The order in the ``x0`` list should match the order in the tunable parameters dictionary.
54+
4955
Below all the strategies are listed with their strategy-specific options that can be passed in a dictionary to the ``strategy_options=`` argument
5056
of ``tune_kernel()``.
5157

kernel_tuner/strategies/bayes_opt.py

Lines changed: 22 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313

1414
# BO imports
1515
from kernel_tuner.searchspace import Searchspace
16-
from kernel_tuner.strategies.common import CostFunc
16+
from kernel_tuner.strategies.common import CostFunc, get_options
1717
from kernel_tuner.util import StopCriterionReached
1818

1919
try:
@@ -26,6 +26,24 @@
2626

2727
supported_methods = ["poi", "ei", "lcb", "lcb-srinivas", "multi", "multi-advanced", "multi-fast", "multi-ultrafast"]
2828

29+
# _options dict is used for generating documentation, but is not used to check for unsupported strategy_options in bayes_opt
30+
_options = dict(
31+
covariancekernel=(
32+
'The Covariance kernel to use, choose any from "constantrbf", "rbf", "matern32", "matern52"',
33+
"matern32",
34+
),
35+
covariancelengthscale=("The covariance length scale", 1.5),
36+
method=(
37+
"The Bayesian Optimization method to use, choose any from " + ", ".join(supported_methods),
38+
"multi-ultrafast",
39+
),
40+
samplingmethod=(
41+
"Method used for initial sampling the parameter space, either random or Latin Hypercube Sampling (LHS)",
42+
"lhs",
43+
),
44+
popsize=("Number of initial samples", 20),
45+
)
46+
2947

3048
def generate_normalized_param_dicts(tune_params: dict, eps: float) -> Tuple[dict, dict]:
3149
"""Generates normalization and denormalization dictionaries."""
@@ -92,6 +110,9 @@ def tune(searchspace: Searchspace, runner, tuning_options):
92110
:rtype: list(dict()), dict()
93111
94112
"""
113+
# we don't actually use this for Bayesian Optimization, but it is used to check for unsupported options
114+
get_options(tuning_options.strategy_options, _options, unsupported=["x0"])
115+
95116
max_fevals = tuning_options.strategy_options.get("max_fevals", 100)
96117
prune_parameterspace = tuning_options.strategy_options.get("pruneparameterspace", True)
97118
if not bayes_opt_present:
@@ -143,25 +164,6 @@ def tune(searchspace: Searchspace, runner, tuning_options):
143164
return cost_func.results
144165

145166

146-
# _options dict is used for generating documentation, but is not used to check for unsupported strategy_options in bayes_opt
147-
_options = dict(
148-
covariancekernel=(
149-
'The Covariance kernel to use, choose any from "constantrbf", "rbf", "matern32", "matern52"',
150-
"matern32",
151-
),
152-
covariancelengthscale=("The covariance length scale", 1.5),
153-
method=(
154-
"The Bayesian Optimization method to use, choose any from " + ", ".join(supported_methods),
155-
"multi-ultrafast",
156-
),
157-
samplingmethod=(
158-
"Method used for initial sampling the parameter space, either random or Latin Hypercube Sampling (LHS)",
159-
"lhs",
160-
),
161-
popsize=("Number of initial samples", 20),
162-
)
163-
164-
165167
class BayesianOptimization:
166168
def __init__(
167169
self,

kernel_tuner/strategies/brute_force.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,9 @@
66

77
def tune(searchspace: Searchspace, runner, tuning_options):
88

9+
# Force error on unsupported options
10+
common.get_options(tuning_options.strategy_options or [], _options, unsupported=["max_fevals", "time_limit", "x0", "searchspace_construction_options"])
11+
912
# call the runner
1013
return runner.run(searchspace.sorted_list(), tuning_options)
1114

kernel_tuner/strategies/common.py

Lines changed: 13 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -43,9 +43,12 @@ def make_strategy_options_doc(strategy_options):
4343
return doc
4444

4545

46-
def get_options(strategy_options, options):
46+
def get_options(strategy_options, options, unsupported=None):
4747
"""Get the strategy-specific options or their defaults from user-supplied strategy_options."""
48-
accepted = list(options.keys()) + ["max_fevals", "time_limit", "searchspace_construction_options"]
48+
accepted = list(options.keys()) + ["max_fevals", "time_limit", "x0", "searchspace_construction_options"]
49+
if unsupported:
50+
for key in unsupported:
51+
accepted.remove(key)
4952
for key in strategy_options:
5053
if key not in accepted:
5154
raise ValueError(f"Unrecognized option {key} in strategy_options (allowed: {accepted})")
@@ -124,6 +127,11 @@ def __call__(self, x, check_restrictions=True):
124127

125128
return return_value
126129

130+
def get_start_pos(self):
131+
"""Get starting position for optimization."""
132+
_, x0, _ = self.get_bounds_x0_eps()
133+
return x0
134+
127135
def get_bounds_x0_eps(self):
128136
"""Compute bounds, x0 (the initial guess), and eps."""
129137
values = list(self.searchspace.tune_params.values())
@@ -140,20 +148,16 @@ def get_bounds_x0_eps(self):
140148
bounds = [(0, eps * len(v)) for v in values]
141149
if x0:
142150
# x0 has been supplied by the user, map x0 into [0, eps*len(v)]
143-
x0 = scale_from_params(x0, self.tuning_options, eps)
151+
x0 = scale_from_params(x0, self.searchspace.tune_params, eps)
144152
else:
145153
# get a valid x0
146154
pos = list(self.searchspace.get_random_sample(1)[0])
147155
x0 = scale_from_params(pos, self.searchspace.tune_params, eps)
148156
else:
149157
bounds = self.get_bounds()
150158
if not x0:
151-
x0 = [(min_v + max_v) / 2.0 for (min_v, max_v) in bounds]
152-
eps = 1e9
153-
for v_list in values:
154-
if len(v_list) > 1:
155-
vals = np.sort(v_list)
156-
eps = min(eps, np.amin(np.gradient(vals)))
159+
x0 = list(self.searchspace.get_random_sample(1)[0])
160+
eps = 1
157161

158162
self.tuning_options["eps"] = eps
159163
logging.debug('get_bounds_x0_eps called')

kernel_tuner/strategies/diff_evo.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@
3535

3636
def tune(searchspace: Searchspace, runner, tuning_options):
3737
cost_func = CostFunc(searchspace, tuning_options, runner)
38-
bounds = cost_func.get_bounds()
38+
bounds, x0, _ = cost_func.get_bounds_x0_eps()
3939

4040
options = tuning_options.strategy_options
4141
popsize, maxiter, F, CR, method, constraint_aware = common.get_options(options, _options)

kernel_tuner/strategies/firefly_algorithm.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ def tune(searchspace: Searchspace, runner, tuning_options):
2121
cost_func = CostFunc(searchspace, tuning_options, runner, scaling=True)
2222

2323
# using this instead of get_bounds because scaling is used
24-
bounds, _, eps = cost_func.get_bounds_x0_eps()
24+
bounds, x0, eps = cost_func.get_bounds_x0_eps()
2525

2626
num_particles, maxiter, B0, gamma, alpha = common.get_options(tuning_options.strategy_options, _options)
2727

@@ -38,6 +38,9 @@ def tune(searchspace: Searchspace, runner, tuning_options):
3838
for i, particle in enumerate(swarm):
3939
particle.position = scale_from_params(population[i], searchspace.tune_params, eps)
4040

41+
# include user provided starting point
42+
swarm[0].position = x0
43+
4144
# compute initial intensities
4245
for j in range(num_particles):
4346
try:

kernel_tuner/strategies/genetic_algorithm.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,8 @@ def tune(searchspace: Searchspace, runner, tuning_options):
2727

2828
population = list(list(p) for p in searchspace.get_random_sample(pop_size))
2929

30+
population[0] = cost_func.get_start_pos()
31+
3032
for generation in range(generations):
3133

3234
# determine fitness of population members

kernel_tuner/strategies/greedy_ils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ def tune(searchspace: Searchspace, runner, tuning_options):
3131
cost_func = CostFunc(searchspace, tuning_options, runner)
3232

3333
#while searching
34-
candidate = searchspace.get_random_sample(1)[0]
34+
candidate = cost_func.get_start_pos()
3535
best_score = cost_func(candidate, check_restrictions=False)
3636

3737
last_improvement = 0

kernel_tuner/strategies/greedy_mls.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -24,17 +24,19 @@ def tune(searchspace: Searchspace, runner, tuning_options):
2424

2525
fevals = 0
2626

27+
candidate = cost_func.get_start_pos()
28+
2729
#while searching
2830
while fevals < max_fevals:
29-
candidate = searchspace.get_random_sample(1)[0]
30-
3131
try:
3232
base_hillclimb(candidate, neighbor, max_fevals, searchspace, tuning_options, cost_func, restart=restart, randomize=randomize, order=order)
3333
except util.StopCriterionReached as e:
3434
if tuning_options.verbose:
3535
print(e)
3636
return cost_func.results
3737

38+
candidate = searchspace.get_random_sample(1)[0]
39+
3840
fevals = len(tuning_options.unique_results)
3941

4042
return cost_func.results

kernel_tuner/strategies/pso.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -21,8 +21,7 @@ def tune(searchspace: Searchspace, runner, tuning_options):
2121
cost_func = CostFunc(searchspace, tuning_options, runner, scaling=True)
2222

2323
#using this instead of get_bounds because scaling is used
24-
bounds, _, eps = cost_func.get_bounds_x0_eps()
25-
24+
bounds, x0, eps = cost_func.get_bounds_x0_eps()
2625

2726
num_particles, maxiter, w, c1, c2 = common.get_options(tuning_options.strategy_options, _options)
2827

@@ -39,6 +38,9 @@ def tune(searchspace: Searchspace, runner, tuning_options):
3938
for i, particle in enumerate(swarm):
4039
particle.position = scale_from_params(population[i], searchspace.tune_params, eps)
4140

41+
# include user provided starting point
42+
swarm[0].position = x0
43+
4244
# start optimization
4345
for i in range(maxiter):
4446
if tuning_options.verbose:

0 commit comments

Comments
 (0)