Skip to content

Commit 78c8a9d

Browse files
re-add support for user-specified starting point
1 parent ea29129 commit 78c8a9d

File tree

10 files changed

+48
-11
lines changed

10 files changed

+48
-11
lines changed

doc/source/optimization.rst

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -46,6 +46,12 @@ cache files, serving a value from the cache for the first time in the run also c
4646
Only unique function evaluations are counted, so the second time a parameter configuration is selected by the strategy it is served from the
4747
cache, but not counted as a unique function evaluation.
4848

49+
All optimization algorithms, except for brute_force, random_sample, and bayes_opt, allow the user to specify an initial guess or
50+
starting point for the optimization, called ``x0``. This can be passed to the strategy using the ``strategy_options=`` dictionary with ``"x0"`` as key and
51+
a list of values for each parameter in tune_params to note the starting point. For example, for a kernel that has parameters ``block_size_x`` (64, 128, 256)
52+
and ``tile_size_x`` (1,2,3), one could pass ``strategy_options=dict(x0=[128,2])`` to ``tune_kernel()`` to make sure the strategy starts from
53+
the configuration with ``block_size_x=128, tile_size_x=2``. The order in the ``x0`` list should match the order in the tunable parameters dictionary.
54+
4955
Below all the strategies are listed with their strategy-specific options that can be passed in a dictionary to the ``strategy_options=`` argument
5056
of ``tune_kernel()``.
5157

kernel_tuner/strategies/common.py

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,7 @@ def make_strategy_options_doc(strategy_options):
4444

4545
def get_options(strategy_options, options):
4646
"""Get the strategy-specific options or their defaults from user-supplied strategy_options."""
47-
accepted = list(options.keys()) + ["max_fevals", "time_limit"]
47+
accepted = list(options.keys()) + ["max_fevals", "time_limit", "x0"]
4848
for key in strategy_options:
4949
if key not in accepted:
5050
raise ValueError(f"Unrecognized option {key} in strategy_options")
@@ -114,6 +114,11 @@ def __call__(self, x, check_restrictions=True):
114114

115115
return return_value
116116

117+
def get_start_pos(self):
118+
"""Get starting position for optimization."""
119+
_, x0, _ = self.get_bounds_x0_eps()
120+
return x0
121+
117122
def get_bounds_x0_eps(self):
118123
"""Compute bounds, x0 (the initial guess), and eps."""
119124
values = list(self.searchspace.tune_params.values())
@@ -130,7 +135,7 @@ def get_bounds_x0_eps(self):
130135
bounds = [(0, eps * len(v)) for v in values]
131136
if x0:
132137
# x0 has been supplied by the user, map x0 into [0, eps*len(v)]
133-
x0 = scale_from_params(x0, self.tuning_options, eps)
138+
x0 = scale_from_params(x0, self.searchspace.tune_params, eps)
134139
else:
135140
# get a valid x0
136141
pos = list(self.searchspace.get_random_sample(1)[0])

kernel_tuner/strategies/diff_evo.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -15,12 +15,11 @@
1515

1616
def tune(searchspace: Searchspace, runner, tuning_options):
1717

18-
1918
method, popsize, maxiter = common.get_options(tuning_options.strategy_options, _options)
2019

2120
# build a bounds array as needed for the optimizer
2221
cost_func = CostFunc(searchspace, tuning_options, runner)
23-
bounds = cost_func.get_bounds()
22+
bounds, x0, _ = cost_func.get_bounds_x0_eps()
2423

2524
# ensure particles start from legal points
2625
population = list(list(p) for p in searchspace.get_random_sample(popsize))
@@ -29,7 +28,7 @@ def tune(searchspace: Searchspace, runner, tuning_options):
2928
opt_result = None
3029
try:
3130
opt_result = differential_evolution(cost_func, bounds, maxiter=maxiter, popsize=popsize, init=population,
32-
polish=False, strategy=method, disp=tuning_options.verbose)
31+
polish=False, strategy=method, disp=tuning_options.verbose, x0=x0)
3332
except util.StopCriterionReached as e:
3433
if tuning_options.verbose:
3534
print(e)

kernel_tuner/strategies/firefly_algorithm.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ def tune(searchspace: Searchspace, runner, tuning_options):
2121
cost_func = CostFunc(searchspace, tuning_options, runner, scaling=True)
2222

2323
# using this instead of get_bounds because scaling is used
24-
bounds, _, eps = cost_func.get_bounds_x0_eps()
24+
bounds, x0, eps = cost_func.get_bounds_x0_eps()
2525

2626
num_particles, maxiter, B0, gamma, alpha = common.get_options(tuning_options.strategy_options, _options)
2727

@@ -38,6 +38,9 @@ def tune(searchspace: Searchspace, runner, tuning_options):
3838
for i, particle in enumerate(swarm):
3939
particle.position = scale_from_params(population[i], searchspace.tune_params, eps)
4040

41+
# include user provided starting point
42+
swarm[0].position = x0
43+
4144
# compute initial intensities
4245
for j in range(num_particles):
4346
try:

kernel_tuner/strategies/genetic_algorithm.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,8 @@ def tune(searchspace: Searchspace, runner, tuning_options):
2727

2828
population = list(list(p) for p in searchspace.get_random_sample(pop_size))
2929

30+
population[0] = cost_func.get_start_pos()
31+
3032
for generation in range(generations):
3133

3234
# determine fitness of population members

kernel_tuner/strategies/greedy_ils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ def tune(searchspace: Searchspace, runner, tuning_options):
3131
cost_func = CostFunc(searchspace, tuning_options, runner)
3232

3333
#while searching
34-
candidate = searchspace.get_random_sample(1)[0]
34+
candidate = cost_func.get_start_pos()
3535
best_score = cost_func(candidate, check_restrictions=False)
3636

3737
last_improvement = 0

kernel_tuner/strategies/greedy_mls.py

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,9 +24,15 @@ def tune(searchspace: Searchspace, runner, tuning_options):
2424

2525
fevals = 0
2626

27+
first_candidate = cost_func.get_start_pos()
28+
2729
#while searching
2830
while fevals < max_fevals:
29-
candidate = searchspace.get_random_sample(1)[0]
31+
if first_candidate:
32+
candidate = first_candidate
33+
first_candidate = None
34+
else:
35+
candidate = searchspace.get_random_sample(1)[0]
3036

3137
try:
3238
base_hillclimb(candidate, neighbor, max_fevals, searchspace, tuning_options, cost_func, restart=restart, randomize=randomize, order=order)

kernel_tuner/strategies/pso.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -21,8 +21,7 @@ def tune(searchspace: Searchspace, runner, tuning_options):
2121
cost_func = CostFunc(searchspace, tuning_options, runner, scaling=True)
2222

2323
#using this instead of get_bounds because scaling is used
24-
bounds, _, eps = cost_func.get_bounds_x0_eps()
25-
24+
bounds, x0, eps = cost_func.get_bounds_x0_eps()
2625

2726
num_particles, maxiter, w, c1, c2 = common.get_options(tuning_options.strategy_options, _options)
2827

@@ -39,6 +38,9 @@ def tune(searchspace: Searchspace, runner, tuning_options):
3938
for i, particle in enumerate(swarm):
4039
particle.position = scale_from_params(population[i], searchspace.tune_params, eps)
4140

41+
# include user provided starting point
42+
swarm[0].position = x0
43+
4244
# start optimization
4345
for i in range(maxiter):
4446
if tuning_options.verbose:

kernel_tuner/strategies/simulated_annealing.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ def tune(searchspace: Searchspace, runner, tuning_options):
3030
max_feval = tuning_options.strategy_options.get("max_fevals", max_iter)
3131

3232
# get random starting point and evaluate cost
33-
pos = list(searchspace.get_random_sample(1)[0])
33+
pos = cost_func.get_start_pos()
3434
old_cost = cost_func(pos, check_restrictions=False)
3535

3636
# main optimization loop

test/strategies/test_strategies.py

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -78,3 +78,17 @@ def test_strategies(vector_add, strategy):
7878
for expected_key, expected_type in expected_items.items():
7979
assert expected_key in res
8080
assert isinstance(res[expected_key], expected_type)
81+
82+
# check if strategy respects user-specified starting point (x0)
83+
if not strategy in ["brute_force", "random_sample", "bayes_opt"]:
84+
x0 = [256]
85+
filter_options["x0"] = x0
86+
87+
results, _ = kernel_tuner.tune_kernel(*vector_add, strategy=strategy, strategy_options=filter_options,
88+
verbose=False, cache=cache_filename, simulation_mode=True)
89+
90+
assert results[0]["block_size_x"] == x0[0]
91+
92+
93+
94+

0 commit comments

Comments
 (0)