Skip to content

Commit a4a69ae

Browse files
committed
Adjusted CostFunc and tests to use budget_spent_fraction
1 parent 22a6e0e commit a4a69ae

File tree

2 files changed

+20
-36
lines changed

2 files changed

+20
-36
lines changed

kernel_tuner/strategies/wrapper.py

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -14,13 +14,12 @@ def __init__(self):
1414
self.costfunc_kwargs = {"scaling": True, "snap": True}
1515

1616
@abstractmethod
17-
def __call__(self, func: CostFunc, searchspace: Searchspace, budget_spent_fraction: float) -> tuple[tuple, float]:
18-
"""_summary_
17+
def __call__(self, func: CostFunc, searchspace: Searchspace) -> tuple[tuple, float]:
18+
"""Optimize the black box function `func` within the given `searchspace`.
1919
2020
Args:
21-
func (CostFunc): Cost function to be optimized.
21+
func (CostFunc): Cost function to be optimized. Has a property `budget_spent_fraction` that indicates how much of the budget has been spent.
2222
searchspace (Searchspace): Search space containing the parameters to be optimized.
23-
budget_spent_fraction (float): Fraction of the budget that has already been spent.
2423
2524
Returns:
2625
tuple[tuple, float]: tuple of the best parameters and the corresponding cost value

test/test_custom_optimizer.py

Lines changed: 17 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,9 @@
33

44
import numpy as np
55

6-
class HybridDELocalRefinement:
6+
from kernel_tuner.strategies.wrapper import OptAlg
7+
8+
class HybridDELocalRefinement(OptAlg):
79
"""
810
A two-phase differential evolution with local refinement, intended for BBOB-type
911
black box optimization problems in [-5,5]^dim.
@@ -12,21 +14,14 @@ class HybridDELocalRefinement:
1214
exploration and local exploitation under a strict function evaluation budget.
1315
"""
1416

15-
def __init__(self, budget, dim):
16-
"""
17-
Initialize the optimizer with:
18-
- budget: total number of function evaluations allowed.
19-
- dim: dimensionality of the search space.
20-
"""
21-
self.budget = budget
22-
self.dim = dim
17+
def __init__(self):
18+
super().__init__()
2319
# You can adjust these hyperparameters based on experimentation/tuning:
24-
self.population_size = min(50, 10 * dim) # Caps for extremely large dim
2520
self.F = 0.8 # Differential weight
2621
self.CR = 0.9 # Crossover probability
2722
self.local_search_freq = 10 # Local refinement frequency in generations
2823

29-
def __call__(self, func):
24+
def __call__(self, func, searchspace):
3025
"""
3126
Optimize the black box function `func` in [-5,5]^dim, using
3227
at most self.budget function evaluations.
@@ -35,9 +30,8 @@ def __call__(self, func):
3530
best_params: np.ndarray representing the best parameters found
3631
best_value: float representing the best objective value found
3732
"""
38-
# Check if we have a non-positive budget
39-
if self.budget <= 0:
40-
raise ValueError("Budget must be a positive integer.")
33+
self.dim = searchspace.num_params
34+
self.population_size = round(min(min(50, 10 * self.dim), np.ceil(searchspace.size / 3))) # Caps for extremely large dim
4135

4236
# 1. Initialize population
4337
lower_bound, upper_bound = -5.0, 5.0
@@ -49,8 +43,6 @@ def __call__(self, func):
4943
for i in range(self.population_size):
5044
fitness[i] = func(pop[i])
5145
evaluations += 1
52-
if evaluations >= self.budget:
53-
break
5446

5547
# Track best solution
5648
best_idx = np.argmin(fitness)
@@ -59,7 +51,7 @@ def __call__(self, func):
5951

6052
# 2. Main evolutionary loop
6153
gen = 0
62-
while evaluations < self.budget:
54+
while func.budget_spent_fraction < 1.0 and evaluations < searchspace.size:
6355
gen += 1
6456
for i in range(self.population_size):
6557
# DE mutation: pick three distinct indices
@@ -78,7 +70,7 @@ def __call__(self, func):
7870
# Evaluate trial
7971
trial_fitness = func(trial)
8072
evaluations += 1
81-
if evaluations >= self.budget:
73+
if func.budget_spent_fraction > 1.0:
8274
# If out of budget, wrap up
8375
if trial_fitness < fitness[i]:
8476
pop[i] = trial
@@ -99,14 +91,11 @@ def __call__(self, func):
9991
best_params = trial.copy()
10092

10193
# Periodically refine best solution with a small local neighborhood search
102-
if gen % self.local_search_freq == 0 and evaluations < self.budget:
94+
if gen % self.local_search_freq == 0 and func.budget_spent_fraction < 1.0:
10395
best_params, best_value, evaluations = self._local_refinement(
10496
func, best_params, best_value, evaluations, lower_bound, upper_bound
10597
)
10698

107-
if evaluations >= self.budget:
108-
break
109-
11099
return best_params, best_value
111100

112101
def _local_refinement(self, func, best_params, best_value, evaluations, lb, ub):
@@ -115,11 +104,10 @@ def _local_refinement(self, func, best_params, best_value, evaluations, lb, ub):
115104
Uses a quick 'perturb-and-accept' approach in a shrinking neighborhood.
116105
"""
117106
# Neighborhood size shrinks as the budget is consumed
118-
frac_budget_used = evaluations / self.budget
119-
step_size = 0.2 * (1.0 - frac_budget_used)
107+
step_size = 0.2 * (1.0 - func.budget_spent_fraction)
120108

121109
for _ in range(5): # 5 refinements each time
122-
if evaluations >= self.budget:
110+
if func.budget_spent_fraction >= 1.0:
123111
break
124112
candidate = best_params + np.random.uniform(-step_size, step_size, self.dim)
125113
candidate = np.clip(candidate, lb, ub)
@@ -138,26 +126,23 @@ def _local_refinement(self, func, best_params, best_value, evaluations, lb, ub):
138126
import os
139127
from kernel_tuner import tune_kernel
140128
from kernel_tuner.strategies.wrapper import OptAlgWrapper
141-
cache_filename = os.path.dirname(
142-
143-
os.path.realpath(__file__)) + "/test_cache_file.json"
144129

145130
from .test_runners import env
146131

132+
cache_filename = os.path.dirname(os.path.realpath(__file__)) + "/test_cache_file.json"
147133

148134
def test_OptAlgWrapper(env):
149135
kernel_name, kernel_string, size, args, tune_params = env
150136

151137
# Instantiate LLaMAE optimization algorithm
152-
budget = int(15)
153-
dim = len(tune_params)
154-
optimizer = HybridDELocalRefinement(budget, dim)
138+
optimizer = HybridDELocalRefinement()
155139

156140
# Wrap the algorithm class in the OptAlgWrapper
157141
# for use in Kernel Tuner
158142
strategy = OptAlgWrapper(optimizer)
143+
strategy_options = { 'max_fevals': 15 }
159144

160145
# Call the tuner
161146
tune_kernel(kernel_name, kernel_string, size, args, tune_params,
162-
strategy=strategy, cache=cache_filename,
147+
strategy=strategy, strategy_options=strategy_options, cache=cache_filename,
163148
simulation_mode=True, verbose=True)

0 commit comments

Comments
 (0)