From c39b87eba2b85e4f3f951162017595b17328ccf4 Mon Sep 17 00:00:00 2001 From: Ben van Werkhoven Date: Fri, 4 Jul 2025 01:52:02 +0200 Subject: [PATCH 01/11] replace differential evolution strategy --- kernel_tuner/strategies/diff_evo.py | 321 ++++++++++++++++++++++++++-- test/strategies/__init__.py | 0 test/strategies/test_diff_evo.py | 152 +++++++++++++ test/test_runners.py | 10 - 4 files changed, 453 insertions(+), 30 deletions(-) create mode 100644 test/strategies/__init__.py create mode 100644 test/strategies/test_diff_evo.py diff --git a/kernel_tuner/strategies/diff_evo.py b/kernel_tuner/strategies/diff_evo.py index 5ad2b9474..b268c55cd 100644 --- a/kernel_tuner/strategies/diff_evo.py +++ b/kernel_tuner/strategies/diff_evo.py @@ -1,43 +1,324 @@ -"""The differential evolution strategy that optimizes the search through the parameter space.""" -from scipy.optimize import differential_evolution +"""A simple Different Evolution for parameter search.""" +import re +import numpy as np from kernel_tuner import util from kernel_tuner.searchspace import Searchspace from kernel_tuner.strategies import common from kernel_tuner.strategies.common import CostFunc -supported_methods = ["best1bin", "best1exp", "rand1exp", "randtobest1exp", "best2exp", "rand2exp", "randtobest1bin", "best2bin", "rand2bin", "rand1bin"] +_options = dict( + popsize=("population size", 50), + maxiter=("maximum number of generations", 200), + F=("mutation factor (differential weight)", 0.8), + CR=("crossover rate", 0.9), + method=("method", "best1bin") +) -_options = dict(method=(f"Creation method for new population, any of {supported_methods}", "best1bin"), - popsize=("Population size", 20), - maxiter=("Number of generations", 100)) +supported_methods = ["best1bin", "rand1bin", "best2bin", "rand2bin", "best1exp", "rand1exp", "best2exp", "rand2exp", "currenttobest1bin", "currenttobest1exp", "randtobest1bin", "randtobest1exp"] def tune(searchspace: Searchspace, runner, tuning_options): - - - method, popsize, maxiter = common.get_options(tuning_options.strategy_options, _options) - - # build a bounds array as needed for the optimizer cost_func = CostFunc(searchspace, tuning_options, runner) bounds = cost_func.get_bounds() - # ensure particles start from legal points - population = list(list(p) for p in searchspace.get_random_sample(popsize)) + options = tuning_options.strategy_options + popsize, maxiter, F, CR, method = common.get_options(options, _options) + + if method not in supported_methods: + raise ValueError(f"Error {method} not supported, {supported_methods=}") - # call the differential evolution optimizer - opt_result = None try: - opt_result = differential_evolution(cost_func, bounds, maxiter=maxiter, popsize=popsize, init=population, - polish=False, strategy=method, disp=tuning_options.verbose) + differential_evolution(searchspace, cost_func, bounds, popsize, maxiter, F, CR, method, tuning_options.verbose) except util.StopCriterionReached as e: if tuning_options.verbose: print(e) - if opt_result and tuning_options.verbose: - print(opt_result.message) - return cost_func.results tune.__doc__ = common.get_strategy_docstring("Differential Evolution", _options) + + +def values_to_indices(individual_values, tune_params): + """Converts an individual's values to its corresponding index vector.""" + idx = np.zeros(len(individual_values)) + for i, v in enumerate(tune_params.values()): + idx[i] = v.index(individual_values[i]) + return idx + + +def indices_to_values(individual_indices, tune_params): + """Converts an individual's index vector back to its values.""" + tune_params_list = list(tune_params.values()) + print(f"{tune_params_list=} {individual_indices=}") + values = [] + for dim, idx in enumerate(individual_indices): + values.append(tune_params_list[dim][idx]) + return np.array(values) + + +def parse_method(method): + """ Helper func to parse the preferred method into its components. """ + pattern = r"^(best|rand|currenttobest|randtobest)(1|2)(bin|exp)$" + match = re.fullmatch(pattern, method) + + if match: + return match.group(1) == "best", int(match.group(2)), mutation[match.group(2)], crossover[match.group(3)] + else: + raise ValueError("Error parsing differential evolution method") + + +def random_draw(idxs, mutation, best): + """ + Draw requested number of random individuals. + + Draw without replacement unless there is not enough to draw from. + """ + draw = 2 * mutation + 1 - int(best) + return np.random.choice(idxs, draw, replace=draw>=len(idxs)) + + +def differential_evolution(searchspace, cost_func, bounds, popsize, maxiter, F, CR, method, verbose): + """ + A basic implementation of the Differential Evolution algorithm. + + This function finds the minimum of a given cost function within specified bounds. + + Args: + cost_func (callable): The objective function to be minimized. It should take a + single argument (a numpy array of parameters) and return a + single scalar value (the cost). + bounds (list of tuples): A list where each tuple contains the (min, max) bounds + for each parameter. e.g., [(-5, 5), (-5, 5)] + popsize (int): The size of the population. + maxiter (int): The maximum number of generations to run. + F (float): The mutation factor, also known as the differential weight. + Should be in the range [0, 2]. + CR (float): The crossover probability. Should be in the range [0, 1]. + verbose (bool): If True, prints the progress of the algorithm at each generation. + + Returns: + dict: A dictionary containing the best solution found ('solution') and its + corresponding cost ('cost'). + """ + tune_params = cost_func.tuning_options.tune_params + min_idx = np.zeros(len(tune_params)) + max_idx = [len(v)-1 for v in tune_params.values()] + + best, mutation, mutation_method, crossover_method = parse_method(method) + + # --- 1. Initialization --- + + # Get the number of dimensions from the bounds list + dimensions = len(bounds) + + # Convert bounds to a numpy array for easier manipulation + bounds = np.array(bounds) + + # Initialize the population with random individuals within the bounds + population = np.array(list(list(p) for p in searchspace.get_random_sample(popsize))) + + # Calculate the initial cost for each individual in the population + population_cost = np.array([cost_func(ind) for ind in population]) + + # Keep track of the best solution found so far + best_idx = np.argmin(population_cost) + best_solution = population[best_idx] + best_solution_idx = values_to_indices(best_solution, tune_params) + best_cost = population_cost[best_idx] + + # --- 2. Main Loop --- + + # Iterate through the specified number of generations + for generation in range(maxiter): + + trial_population = [] + + # Iterate over each individual in the population + for i in range(popsize): + + # --- a. Mutation --- + + # Select three distinct random individuals (a, b, c) from the population, + # ensuring they are different from the current individual 'i'. + idxs = [idx for idx in range(popsize) if idx != i] + randos = random_draw(idxs, mutation, best) + + if mutation_method == mutate_currenttobest1: + randos[0] = i + + randos_idx = [values_to_indices(population[rando], tune_params) for rando in randos] + + # Apply mutation strategy + donor_vector_idx = mutation_method(best_solution_idx, randos_idx, F, min_idx, max_idx, best) + donor_vector = indices_to_values(donor_vector_idx, tune_params) + + # --- b. Crossover --- + trial_vector = crossover_method(donor_vector, population[i], CR) + + # Store for selection + trial_population.append(trial_vector) + + # --- c. Selection --- + + # Calculate the cost of the new trial vectors + trial_population_cost = np.array([cost_func(ind) for ind in trial_population]) + + # Iterate over each individual in the trial population + for i in range(popsize): + + trial_vector = trial_population[i] + trial_cost = trial_population_cost[i] + + # If the trial vector has a lower or equal cost, it replaces the + # target vector in the population for the next generation. + if trial_cost <= population_cost[i]: + population[i] = trial_vector + population_cost[i] = trial_cost + + # Update the overall best solution if the new one is better + if trial_cost < best_cost: + best_cost = trial_cost + best_solution = trial_vector + best_solution_idx = values_to_indices(best_solution, tune_params) + + # Print the progress at the end of the generation + if verbose: + print(f"Generation {generation + 1}, Best Cost: {best_cost:.6f}") + + return {'solution': best_solution, 'cost': best_cost} + + +def round_and_clip(mutant_idx_float, min_idx, max_idx): + """ Helper func to round floating index to nearest integer and clip within bounds. """ + # Round to the nearest integer + rounded_idx = np.round(mutant_idx_float) + + # Clip the indices to ensure they are within valid index bounds + clipped_idx = np.clip(rounded_idx, min_idx, max_idx) + + # Convert final mutant vector to integer type + return clipped_idx.astype(int) + + +def mutate_currenttobest1(best_idx, randos_idx, F, min_idx, max_idx, best): + """ + Performs the DE/1 currenttobest1 mutation strategy. + + This function operates on the indices of the parameters, not their actual values. + The formula v = cur + F * (best - cur + a - b) is applied to the indices, and the result is + then rounded and clipped to ensure it remains a valid index. + """ + cur_idx, b_idx, c_idx = randos_idx + + # Apply the DE/currenttobest/1 formula to the indices + mutant_idx_float = cur_idx + F * (best_idx - cur_idx + b_idx - c_idx) + + return round_and_clip(mutant_idx_float, min_idx, max_idx) + + +def mutate_randtobest1(best_idx, randos_idx, F, min_idx, max_idx, best): + """ + Performs the DE/1 randtobest1 mutation strategy. + + This function operates on the indices of the parameters, not their actual values. + The formula v = a + F * (best - a + b - c) is applied to the indices, and the result is + then rounded and clipped to ensure it remains a valid index. + """ + a_idx, b_idx, c_idx = randos_idx + + # Apply the DE/currenttobest/1 formula to the indices + mutant_idx_float = a_idx + F * (best_idx - a_idx + b_idx - c_idx) + + return round_and_clip(mutant_idx_float, min_idx, max_idx) + + +def mutate_de_1(best_idx, randos_idx, F, min_idx, max_idx, best): + """ + Performs the DE/1 mutation strategy. + + This function operates on the indices of the parameters, not their actual values. + The formula v = a + F * (b - c) is applied to the indices, and the result is + then rounded and clipped to ensure it remains a valid index. + + """ + if best: + a_idx = best_idx + b_idx, c_idx = randos_idx + else: + a_idx, b_idx, c_idx = randos_idx + + # Apply the DE/rand/1 formula to the indices + mutant_idx_float = a_idx + F * (b_idx - c_idx) + + return round_and_clip(mutant_idx_float, min_idx, max_idx) + + +def mutate_de_2(best_idx, randos_idx, F, min_idx, max_idx, best): + """ + Performs the DE/2 mutation strategy for a discrete search space. + + This function operates on the indices of the parameters, not their actual values. + The formula v = a + F1 * (b - c) + F2 * (d - e) is applied to the indices, + and the result is then rounded and clipped to ensure it remains a valid index. + + """ + if best: + a_idx = best_idx + b_idx, c_idx, d_idx, e_idx = randos_idx + else: + a_idx, b_idx, c_idx, d_idx, e_idx = randos_idx + + # Apply the DE/2 formula to the indices + mutant_idx_float = a_idx + F * (b_idx + c_idx - d_idx - e_idx) + + return round_and_clip(mutant_idx_float, min_idx, max_idx) + + +def binomial_crossover(donor_vector, target, CR): + """ Performs binomial crossover of donor_vector with target given crossover rate CR. """ + # Create the trial vector by mixing parameters from the target and donor vectors + trial_vector = np.copy(target) + dimensions = len(donor_vector) + + # Generate a random array of floats for comparison with the crossover rate CR + crossover_points = np.random.rand(dimensions) < CR + + # Ensure at least one parameter is taken from the donor vector + # to prevent the trial vector from being identical to the target vector. + if not np.any(crossover_points): + crossover_points[np.random.randint(0, dimensions)] = True + + # Apply crossover + trial_vector[crossover_points] = donor_vector[crossover_points] + + return trial_vector + + +def exponential_crossover(donor_vector, target, CR): + """ + Performs exponential crossover for a discrete search space. + + This creates a trial vector by taking a contiguous block of parameters + from the donor vector and the rest from the target vector. + """ + dimensions = len(target) + trial_idx = np.copy(target) + + # 1. Select a random starting point for the crossover block. + start_point = np.random.randint(0, dimensions) + + # 2. Determine the length of the block to be copied from the mutant. + # The loop continues as long as random numbers are less than CR. + # This ensures at least one parameter is always taken from the mutant. + l = 0 + while np.random.rand() < CR and l < dimensions: + crossover_point = (start_point + l) % dimensions + trial_idx[crossover_point] = donor_vector[crossover_point] + l += 1 + + return trial_idx + +mutation = {"1": mutate_de_1, "2": mutate_de_2, "currenttobest1": mutate_currenttobest1, "randtobest1": mutate_randtobest1} +crossover = {"bin": binomial_crossover, "exp": exponential_crossover} diff --git a/test/strategies/__init__.py b/test/strategies/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/test/strategies/test_diff_evo.py b/test/strategies/test_diff_evo.py new file mode 100644 index 000000000..5b8697e5f --- /dev/null +++ b/test/strategies/test_diff_evo.py @@ -0,0 +1,152 @@ +import numpy as np +import pytest +from kernel_tuner.strategies.diff_evo import values_to_indices, indices_to_values, mutate_de_1, mutate_de_2, binomial_crossover, exponential_crossover +from kernel_tuner.strategies.diff_evo import supported_methods +from kernel_tuner import tune_kernel + +from .test_strategies import vector_add, cache_filename + +def test_values_to_indices(): + + tune_params = {} + tune_params["block_size_x"] = [16, 32, 128, 1024] + + result = values_to_indices([1024], tune_params) + expected = [3] + assert result[0] == expected[0] + assert len(result) == len(expected) + + tune_params["block_size_y"] = [16, 32, 128, 1024] + + result = values_to_indices([32, 128], tune_params) + expected = [1, 2] + assert result[0] == expected[0] + assert result[1] == expected[1] + assert len(result) == len(expected) + + +def test_indices_to_values(): + + tune_params = {} + tune_params["block_size_x"] = [16, 32, 128, 1024] + + expected = [1024] + result = indices_to_values([3], tune_params) + assert result[0] == expected[0] + assert len(result) == len(expected) + + tune_params["block_size_y"] = [16, 32, 128, 1024] + expected = [1024, 32] + result = indices_to_values([3,1], tune_params) + assert result[0] == expected[0] + assert result[1] == expected[1] + assert len(result) == len(expected) + + +def test_mutate_de_1(): + + tune_params = {} + tune_params["block_size_x"] = [16, 32, 128, 256, 512, 1024] + tune_params["block_size_y"] = [1, 2, 8] + tune_params["block_size_z"] = [1, 2, 4, 8] + + a_idx = np.array([0, 1, 2]) + b_idx = np.array([4, 1, 0]) + c_idx = np.array([5, 0, 1]) + randos_idx = [a_idx, b_idx, c_idx] + + F = 0.8 + params_list = list(tune_params) + min_idx = np.zeros(len(tune_params)) + max_idx = [len(v)-1 for v in tune_params.values()] + + mutant = mutate_de_1(a_idx, randos_idx, F, min_idx, max_idx, False) + + assert len(mutant) == len(a_idx) + + for dim, idx in enumerate(mutant): + assert isinstance(idx, np.integer) + assert min_idx[dim] <= idx <= max_idx[dim] + + mutant = mutate_de_1(a_idx, randos_idx[:-1], F, min_idx, max_idx, True) + + assert len(mutant) == len(a_idx) + + for dim, idx in enumerate(mutant): + assert isinstance(idx, np.integer) + assert min_idx[dim] <= idx <= max_idx[dim] + + +def test_mutate_de_2(): + + tune_params = {} + tune_params["block_size_x"] = [16, 32, 128, 256, 512, 1024] + tune_params["block_size_y"] = [1, 2, 8] + tune_params["block_size_z"] = [1, 2, 4, 8] + + a_idx = np.array([0, 1, 2]) + b_idx = np.array([4, 1, 0]) + c_idx = np.array([5, 0, 1]) + d_idx = np.array([3, 2, 3]) + e_idx = np.array([1, 0, 3]) + randos_idx = [a_idx, b_idx, c_idx, d_idx, e_idx] + + F = 0.8 + params_list = list(tune_params) + min_idx = np.zeros(len(tune_params)) + max_idx = [len(v)-1 for v in tune_params.values()] + + mutant = mutate_de_2(a_idx, randos_idx, F, min_idx, max_idx, False) + + assert len(mutant) == len(a_idx) + + for dim, idx in enumerate(mutant): + assert isinstance(idx, np.integer) + assert min_idx[dim] <= idx <= max_idx[dim] + + mutant = mutate_de_2(a_idx, randos_idx[:-1], F, min_idx, max_idx, True) + + assert len(mutant) == len(a_idx) + + for dim, idx in enumerate(mutant): + assert isinstance(idx, np.integer) + assert min_idx[dim] <= idx <= max_idx[dim] + + +def test_binomial_crossover(): + + donor_vector = np.array([1, 2, 3, 4, 5]) + target = np.array([6, 7, 8, 9, 10]) + CR = 0.8 + + result = binomial_crossover(donor_vector, target, CR) + assert len(result) == len(donor_vector) + + for dim, val in enumerate(result): + assert (val == donor_vector[dim]) or (val == target[dim]) + + +def test_exponential_crossover(): + + donor_vector = np.array([1, 2, 3, 4, 5]) + target = np.array([6, 7, 8, 9, 10]) + CR = 0.8 + + result = exponential_crossover(donor_vector, target, CR) + assert len(result) == len(donor_vector) + + for dim, val in enumerate(result): + assert (val == donor_vector[dim]) or (val == target[dim]) + + +@pytest.mark.parametrize('method', supported_methods) +def test_diff_evo(vector_add, method): + result, _ = tune_kernel(*vector_add, + strategy="diff_evo", + strategy_options=dict(popsize=5, method=method), + verbose=True, + cache=cache_filename, + simulation_mode=True) + assert len(result) > 0 + + diff --git a/test/test_runners.py b/test/test_runners.py index acbb641e6..22c11f7cd 100644 --- a/test/test_runners.py +++ b/test/test_runners.py @@ -130,16 +130,6 @@ def test_simulation_runner(env): assert max_time - recorded_time_including_simulation < 10 -def test_diff_evo(env): - result, _ = tune_kernel(*env, - strategy="diff_evo", - strategy_options=dict(popsize=5), - verbose=True, - cache=cache_filename, - simulation_mode=True) - assert len(result) > 0 - - def test_restrictions(env): restrictions = [lambda p: p["block_size_x"] <= 512, "block_size_x > 128"] From a81765a2d6c2ddd4c4ea4f8009945012105cbeeb Mon Sep 17 00:00:00 2001 From: Ben van Werkhoven Date: Fri, 4 Jul 2025 02:00:29 +0200 Subject: [PATCH 02/11] formatted with black --- kernel_tuner/strategies/diff_evo.py | 41 ++++++++++++++++++++--------- test/strategies/test_diff_evo.py | 34 +++++++++++++++--------- 2 files changed, 50 insertions(+), 25 deletions(-) diff --git a/kernel_tuner/strategies/diff_evo.py b/kernel_tuner/strategies/diff_evo.py index b268c55cd..92ad21bb0 100644 --- a/kernel_tuner/strategies/diff_evo.py +++ b/kernel_tuner/strategies/diff_evo.py @@ -1,4 +1,5 @@ """A simple Different Evolution for parameter search.""" + import re import numpy as np @@ -12,10 +13,23 @@ maxiter=("maximum number of generations", 200), F=("mutation factor (differential weight)", 0.8), CR=("crossover rate", 0.9), - method=("method", "best1bin") + method=("method", "best1bin"), ) -supported_methods = ["best1bin", "rand1bin", "best2bin", "rand2bin", "best1exp", "rand1exp", "best2exp", "rand2exp", "currenttobest1bin", "currenttobest1exp", "randtobest1bin", "randtobest1exp"] +supported_methods = [ + "best1bin", + "rand1bin", + "best2bin", + "rand2bin", + "best1exp", + "rand1exp", + "best2exp", + "rand2exp", + "currenttobest1bin", + "currenttobest1exp", + "randtobest1bin", + "randtobest1exp", +] def tune(searchspace: Searchspace, runner, tuning_options): @@ -59,7 +73,7 @@ def indices_to_values(individual_indices, tune_params): def parse_method(method): - """ Helper func to parse the preferred method into its components. """ + """Helper func to parse the preferred method into its components.""" pattern = r"^(best|rand|currenttobest|randtobest)(1|2)(bin|exp)$" match = re.fullmatch(pattern, method) @@ -76,7 +90,7 @@ def random_draw(idxs, mutation, best): Draw without replacement unless there is not enough to draw from. """ draw = 2 * mutation + 1 - int(best) - return np.random.choice(idxs, draw, replace=draw>=len(idxs)) + return np.random.choice(idxs, draw, replace=draw >= len(idxs)) def differential_evolution(searchspace, cost_func, bounds, popsize, maxiter, F, CR, method, verbose): @@ -104,15 +118,12 @@ def differential_evolution(searchspace, cost_func, bounds, popsize, maxiter, F, """ tune_params = cost_func.tuning_options.tune_params min_idx = np.zeros(len(tune_params)) - max_idx = [len(v)-1 for v in tune_params.values()] + max_idx = [len(v) - 1 for v in tune_params.values()] best, mutation, mutation_method, crossover_method = parse_method(method) # --- 1. Initialization --- - # Get the number of dimensions from the bounds list - dimensions = len(bounds) - # Convert bounds to a numpy array for easier manipulation bounds = np.array(bounds) @@ -187,11 +198,11 @@ def differential_evolution(searchspace, cost_func, bounds, popsize, maxiter, F, if verbose: print(f"Generation {generation + 1}, Best Cost: {best_cost:.6f}") - return {'solution': best_solution, 'cost': best_cost} + return {"solution": best_solution, "cost": best_cost} def round_and_clip(mutant_idx_float, min_idx, max_idx): - """ Helper func to round floating index to nearest integer and clip within bounds. """ + """Helper func to round floating index to nearest integer and clip within bounds.""" # Round to the nearest integer rounded_idx = np.round(mutant_idx_float) @@ -277,7 +288,7 @@ def mutate_de_2(best_idx, randos_idx, F, min_idx, max_idx, best): def binomial_crossover(donor_vector, target, CR): - """ Performs binomial crossover of donor_vector with target given crossover rate CR. """ + """Performs binomial crossover of donor_vector with target given crossover rate CR.""" # Create the trial vector by mixing parameters from the target and donor vectors trial_vector = np.copy(target) dimensions = len(donor_vector) @@ -320,5 +331,11 @@ def exponential_crossover(donor_vector, target, CR): return trial_idx -mutation = {"1": mutate_de_1, "2": mutate_de_2, "currenttobest1": mutate_currenttobest1, "randtobest1": mutate_randtobest1} + +mutation = { + "1": mutate_de_1, + "2": mutate_de_2, + "currenttobest1": mutate_currenttobest1, + "randtobest1": mutate_randtobest1, +} crossover = {"bin": binomial_crossover, "exp": exponential_crossover} diff --git a/test/strategies/test_diff_evo.py b/test/strategies/test_diff_evo.py index 5b8697e5f..f89fe8507 100644 --- a/test/strategies/test_diff_evo.py +++ b/test/strategies/test_diff_evo.py @@ -1,11 +1,19 @@ import numpy as np import pytest -from kernel_tuner.strategies.diff_evo import values_to_indices, indices_to_values, mutate_de_1, mutate_de_2, binomial_crossover, exponential_crossover +from kernel_tuner.strategies.diff_evo import ( + values_to_indices, + indices_to_values, + mutate_de_1, + mutate_de_2, + binomial_crossover, + exponential_crossover, +) from kernel_tuner.strategies.diff_evo import supported_methods from kernel_tuner import tune_kernel from .test_strategies import vector_add, cache_filename + def test_values_to_indices(): tune_params = {} @@ -37,7 +45,7 @@ def test_indices_to_values(): tune_params["block_size_y"] = [16, 32, 128, 1024] expected = [1024, 32] - result = indices_to_values([3,1], tune_params) + result = indices_to_values([3, 1], tune_params) assert result[0] == expected[0] assert result[1] == expected[1] assert len(result) == len(expected) @@ -58,7 +66,7 @@ def test_mutate_de_1(): F = 0.8 params_list = list(tune_params) min_idx = np.zeros(len(tune_params)) - max_idx = [len(v)-1 for v in tune_params.values()] + max_idx = [len(v) - 1 for v in tune_params.values()] mutant = mutate_de_1(a_idx, randos_idx, F, min_idx, max_idx, False) @@ -94,7 +102,7 @@ def test_mutate_de_2(): F = 0.8 params_list = list(tune_params) min_idx = np.zeros(len(tune_params)) - max_idx = [len(v)-1 for v in tune_params.values()] + max_idx = [len(v) - 1 for v in tune_params.values()] mutant = mutate_de_2(a_idx, randos_idx, F, min_idx, max_idx, False) @@ -139,14 +147,14 @@ def test_exponential_crossover(): assert (val == donor_vector[dim]) or (val == target[dim]) -@pytest.mark.parametrize('method', supported_methods) +@pytest.mark.parametrize("method", supported_methods) def test_diff_evo(vector_add, method): - result, _ = tune_kernel(*vector_add, - strategy="diff_evo", - strategy_options=dict(popsize=5, method=method), - verbose=True, - cache=cache_filename, - simulation_mode=True) + result, _ = tune_kernel( + *vector_add, + strategy="diff_evo", + strategy_options=dict(popsize=5, method=method), + verbose=True, + cache=cache_filename, + simulation_mode=True, + ) assert len(result) > 0 - - From 655fcc02cfe55fa3d7fc64847ecbfeb4d636b685 Mon Sep 17 00:00:00 2001 From: Ben van Werkhoven Date: Fri, 4 Jul 2025 08:47:04 +0200 Subject: [PATCH 03/11] fix parsing diff_evo method argument --- kernel_tuner/strategies/diff_evo.py | 32 ++++++++--------------------- 1 file changed, 9 insertions(+), 23 deletions(-) diff --git a/kernel_tuner/strategies/diff_evo.py b/kernel_tuner/strategies/diff_evo.py index 92ad21bb0..ec3d1c14c 100644 --- a/kernel_tuner/strategies/diff_evo.py +++ b/kernel_tuner/strategies/diff_evo.py @@ -65,7 +65,6 @@ def values_to_indices(individual_values, tune_params): def indices_to_values(individual_indices, tune_params): """Converts an individual's index vector back to its values.""" tune_params_list = list(tune_params.values()) - print(f"{tune_params_list=} {individual_indices=}") values = [] for dim, idx in enumerate(individual_indices): values.append(tune_params_list[dim][idx]) @@ -78,18 +77,22 @@ def parse_method(method): match = re.fullmatch(pattern, method) if match: - return match.group(1) == "best", int(match.group(2)), mutation[match.group(2)], crossover[match.group(3)] + if match.group(1) in ["currenttobest", "randtobest"]: + mutation_method = mutation[match.group(1)] + else: + mutation_method = mutation[match.group(2)] + return match.group(1) == "best", int(match.group(2)), mutation_method, crossover[match.group(3)] else: raise ValueError("Error parsing differential evolution method") -def random_draw(idxs, mutation, best): +def random_draw(idxs, mutate, best): """ Draw requested number of random individuals. Draw without replacement unless there is not enough to draw from. """ - draw = 2 * mutation + 1 - int(best) + draw = 2 * mutate + 1 - int(best) return np.random.choice(idxs, draw, replace=draw >= len(idxs)) @@ -98,23 +101,6 @@ def differential_evolution(searchspace, cost_func, bounds, popsize, maxiter, F, A basic implementation of the Differential Evolution algorithm. This function finds the minimum of a given cost function within specified bounds. - - Args: - cost_func (callable): The objective function to be minimized. It should take a - single argument (a numpy array of parameters) and return a - single scalar value (the cost). - bounds (list of tuples): A list where each tuple contains the (min, max) bounds - for each parameter. e.g., [(-5, 5), (-5, 5)] - popsize (int): The size of the population. - maxiter (int): The maximum number of generations to run. - F (float): The mutation factor, also known as the differential weight. - Should be in the range [0, 2]. - CR (float): The crossover probability. Should be in the range [0, 1]. - verbose (bool): If True, prints the progress of the algorithm at each generation. - - Returns: - dict: A dictionary containing the best solution found ('solution') and its - corresponding cost ('cost'). """ tune_params = cost_func.tuning_options.tune_params min_idx = np.zeros(len(tune_params)) @@ -335,7 +321,7 @@ def exponential_crossover(donor_vector, target, CR): mutation = { "1": mutate_de_1, "2": mutate_de_2, - "currenttobest1": mutate_currenttobest1, - "randtobest1": mutate_randtobest1, + "currenttobest": mutate_currenttobest1, + "randtobest": mutate_randtobest1, } crossover = {"bin": binomial_crossover, "exp": exponential_crossover} From 2fdee8077afa11099704c02e0126bbad6dfb6d3b Mon Sep 17 00:00:00 2001 From: Ben van Werkhoven Date: Fri, 4 Jul 2025 16:03:38 +0200 Subject: [PATCH 04/11] add test for parse method --- test/strategies/test_diff_evo.py | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/test/strategies/test_diff_evo.py b/test/strategies/test_diff_evo.py index f89fe8507..d7ff1dbb7 100644 --- a/test/strategies/test_diff_evo.py +++ b/test/strategies/test_diff_evo.py @@ -7,6 +7,9 @@ mutate_de_2, binomial_crossover, exponential_crossover, + parse_method, + mutation, + crossover, ) from kernel_tuner.strategies.diff_evo import supported_methods from kernel_tuner import tune_kernel @@ -147,6 +150,26 @@ def test_exponential_crossover(): assert (val == donor_vector[dim]) or (val == target[dim]) +def test_parse_method(): + + # check unsupported methods raise ValueError + for method in ["randtobest4bin", "bogus3log"]: + print(f"{method=}") + with pytest.raises(ValueError): + parse_method(method) + + # check if parses correctly + def check_result(result, expected): + assert len(result) == len(expected) + for i, res in enumerate(result): + assert res == expected[i] + + check_result(parse_method("rand1bin"), [False, 1, mutation["1"], crossover["bin"]]) + check_result(parse_method("best1exp"), [True, 1, mutation["1"], crossover["exp"]]) + check_result(parse_method("randtobest1exp"), [False, 1, mutation["randtobest"], crossover["exp"]]) + check_result(parse_method("currenttobest1bin"), [False, 1, mutation["currenttobest"], crossover["bin"]]) + + @pytest.mark.parametrize("method", supported_methods) def test_diff_evo(vector_add, method): result, _ = tune_kernel( From 539735cbf6aea2fe94be625b78fe1567ce29b230 Mon Sep 17 00:00:00 2001 From: Ben van Werkhoven Date: Fri, 4 Jul 2025 16:03:56 +0200 Subject: [PATCH 05/11] add support for x0 starting point --- kernel_tuner/strategies/diff_evo.py | 1 + 1 file changed, 1 insertion(+) diff --git a/kernel_tuner/strategies/diff_evo.py b/kernel_tuner/strategies/diff_evo.py index ec3d1c14c..57cac1ac9 100644 --- a/kernel_tuner/strategies/diff_evo.py +++ b/kernel_tuner/strategies/diff_evo.py @@ -115,6 +115,7 @@ def differential_evolution(searchspace, cost_func, bounds, popsize, maxiter, F, # Initialize the population with random individuals within the bounds population = np.array(list(list(p) for p in searchspace.get_random_sample(popsize))) + population[0] = cost_func.get_start_pos() # Calculate the initial cost for each individual in the population population_cost = np.array([cost_func(ind) for ind in population]) From baf628dd24d9546edbbf43e6a1e4bcc789b2e6cf Mon Sep 17 00:00:00 2001 From: Ben van Werkhoven Date: Fri, 4 Jul 2025 16:15:13 +0200 Subject: [PATCH 06/11] add constraint-awareness --- kernel_tuner/strategies/diff_evo.py | 45 +++++++++++++++++++++++++---- 1 file changed, 40 insertions(+), 5 deletions(-) diff --git a/kernel_tuner/strategies/diff_evo.py b/kernel_tuner/strategies/diff_evo.py index 57cac1ac9..1d4537edc 100644 --- a/kernel_tuner/strategies/diff_evo.py +++ b/kernel_tuner/strategies/diff_evo.py @@ -1,5 +1,5 @@ """A simple Different Evolution for parameter search.""" - +import random import re import numpy as np @@ -14,6 +14,7 @@ F=("mutation factor (differential weight)", 0.8), CR=("crossover rate", 0.9), method=("method", "best1bin"), + constraint_aware=("constraint-aware optimization (True/False)", True), ) supported_methods = [ @@ -37,13 +38,13 @@ def tune(searchspace: Searchspace, runner, tuning_options): bounds = cost_func.get_bounds() options = tuning_options.strategy_options - popsize, maxiter, F, CR, method = common.get_options(options, _options) + popsize, maxiter, F, CR, method, constraint_aware = common.get_options(options, _options) if method not in supported_methods: raise ValueError(f"Error {method} not supported, {supported_methods=}") try: - differential_evolution(searchspace, cost_func, bounds, popsize, maxiter, F, CR, method, tuning_options.verbose) + differential_evolution(searchspace, cost_func, bounds, popsize, maxiter, F, CR, method, constraint_aware, tuning_options.verbose) except util.StopCriterionReached as e: if tuning_options.verbose: print(e) @@ -96,7 +97,7 @@ def random_draw(idxs, mutate, best): return np.random.choice(idxs, draw, replace=draw >= len(idxs)) -def differential_evolution(searchspace, cost_func, bounds, popsize, maxiter, F, CR, method, verbose): +def differential_evolution(searchspace, cost_func, bounds, popsize, maxiter, F, CR, method, constraint_aware, verbose): """ A basic implementation of the Differential Evolution algorithm. @@ -114,7 +115,18 @@ def differential_evolution(searchspace, cost_func, bounds, popsize, maxiter, F, bounds = np.array(bounds) # Initialize the population with random individuals within the bounds - population = np.array(list(list(p) for p in searchspace.get_random_sample(popsize))) + if constraint_aware: + population = np.array(list(list(p) for p in searchspace.get_random_sample(popsize))) + else: + population = [] + dna_size = len(self.tune_params) + for _ in range(self.pop_size): + dna = [] + for key in self.tune_params: + dna.append(random.choice(self.tune_params[key])) + population.append(dna) + population = np.array(population) + population[0] = cost_func.get_start_pos() # Calculate the initial cost for each individual in the population @@ -155,6 +167,10 @@ def differential_evolution(searchspace, cost_func, bounds, popsize, maxiter, F, # --- b. Crossover --- trial_vector = crossover_method(donor_vector, population[i], CR) + # Repair if constraint_aware + if constraint_aware: + trial_vector = repair(trial_vector, searchspace) + # Store for selection trial_population.append(trial_vector) @@ -319,6 +335,25 @@ def exponential_crossover(donor_vector, target, CR): return trial_idx +def repair(trial_vector, searchspace): + """ + Attempts to repair trial_vector if trial_vector is invalid + """ + if not searchspace.is_param_config_valid(tuple(trial_vector)): + # search for valid configurations neighboring trial_vector + # start from strictly-adjacent to increasingly allowing more neighbors + for neighbor_method in ["strictly-adjacent", "adjacent", "Hamming"]: + neighbors = searchspace.get_neighbors_no_cache(tuple(trial_vector), neighbor_method=neighbor_method) + + # if we have found valid neighboring configurations, select one at random + if len(neighbors) > 0: + new_trial_vector = np.array(list(random.choice(neighbors))) + print(f"Differential evolution resulted in invalid config {trial_vector=}, repaired dna to {new_trial_vector=}") + return new_trial_vector + + return trial_vector + + mutation = { "1": mutate_de_1, "2": mutate_de_2, From ea7a69d7c620ecffb920558766f8bd5d7f5fe79a Mon Sep 17 00:00:00 2001 From: Ben van Werkhoven Date: Fri, 4 Jul 2025 23:28:41 +0200 Subject: [PATCH 07/11] LHS sampling, enforce trial population diversity, avoid getting stuck --- kernel_tuner/strategies/diff_evo.py | 83 ++++++++++++++++++++++------- 1 file changed, 64 insertions(+), 19 deletions(-) diff --git a/kernel_tuner/strategies/diff_evo.py b/kernel_tuner/strategies/diff_evo.py index 81ecac631..bc2099982 100644 --- a/kernel_tuner/strategies/diff_evo.py +++ b/kernel_tuner/strategies/diff_evo.py @@ -3,6 +3,8 @@ import re import numpy as np +from scipy.stats.qmc import LatinHypercube + from kernel_tuner import util from kernel_tuner.searchspace import Searchspace from kernel_tuner.strategies import common @@ -11,7 +13,7 @@ _options = dict( popsize=("population size", 50), maxiter=("maximum number of generations", 200), - F=("mutation factor (differential weight)", 0.8), + F=("mutation factor (differential weight)", 1.3), CR=("crossover rate", 0.9), method=("method", "best1bin"), constraint_aware=("constraint-aware optimization (True/False)", True), @@ -35,7 +37,7 @@ def tune(searchspace: Searchspace, runner, tuning_options): cost_func = CostFunc(searchspace, tuning_options, runner) - bounds, x0, _ = cost_func.get_bounds_x0_eps() + bounds = cost_func.get_bounds() options = tuning_options.strategy_options popsize, maxiter, F, CR, method, constraint_aware = common.get_options(options, _options) @@ -97,6 +99,22 @@ def random_draw(idxs, mutate, best): return np.random.choice(idxs, draw, replace=draw >= len(idxs)) +def generate_population(tune_params, min_idx, max_idx, popsize, searchspace, constraint_aware): + if constraint_aware: + samples = LatinHypercube(len(tune_params)).integers(l_bounds=0, u_bounds=max_idx, n=popsize, endpoint=True) + population = [indices_to_values(sample, tune_params) for sample in samples] + population = np.array([repair(individual, searchspace) for individual in population]) + else: + population = [] + for _ in range(popsize): + ind = [] + for key in tune_params: + ind.append(random.choice(tune_params[key])) + population.append(ind) + population = np.array(population) + return population + + def differential_evolution(searchspace, cost_func, bounds, popsize, maxiter, F, CR, method, constraint_aware, verbose): """ A basic implementation of the Differential Evolution algorithm. @@ -115,18 +133,9 @@ def differential_evolution(searchspace, cost_func, bounds, popsize, maxiter, F, bounds = np.array(bounds) # Initialize the population with random individuals within the bounds - if constraint_aware: - population = np.array(list(list(p) for p in searchspace.get_random_sample(popsize))) - else: - population = [] - dna_size = len(self.tune_params) - for _ in range(self.pop_size): - dna = [] - for key in self.tune_params: - dna.append(random.choice(self.tune_params[key])) - population.append(dna) - population = np.array(population) + population = generate_population(tune_params, min_idx, max_idx, popsize, searchspace, constraint_aware) + # Override with user-specified starting position population[0] = cost_func.get_start_pos() # Calculate the initial cost for each individual in the population @@ -140,16 +149,25 @@ def differential_evolution(searchspace, cost_func, bounds, popsize, maxiter, F, # --- 2. Main Loop --- + stabilized = 0 + # Iterate through the specified number of generations for generation in range(maxiter): + # Trial population and vectors are stored as lists + # not Numpy arrays, to make it easy to check for duplicates trial_population = [] + # If for two generations there has been no change, generate a new population + if stabilized > 2: + trial_population = list(generate_population(tune_params, min_idx, max_idx, popsize, searchspace, constraint_aware)) + # Iterate over each individual in the population - for i in range(popsize): + i = 0 + stuck = 0 + while len(trial_population) < popsize: # --- a. Mutation --- - # Select three distinct random individuals (a, b, c) from the population, # ensuring they are different from the current individual 'i'. idxs = [idx for idx in range(popsize) if idx != i] @@ -172,13 +190,28 @@ def differential_evolution(searchspace, cost_func, bounds, popsize, maxiter, F, trial_vector = repair(trial_vector, searchspace) # Store for selection - trial_population.append(trial_vector) + if list(trial_vector) not in trial_population: + trial_population.append(list(trial_vector)) + i += 1 + stuck = 0 + else: + stuck += 1 + if stuck >= 20: + if verbose: + print(f"Differential Evolution got stuck generating new individuals, insert random sample") + trial_population.append(list(searchspace.get_random_sample(1)[0])) + i += 1 + stuck = 0 + # --- c. Selection --- # Calculate the cost of the new trial vectors trial_population_cost = np.array([cost_func(ind) for ind in trial_population]) + # Keep track of whether population changes over time + no_change = True + # Iterate over each individual in the trial population for i in range(popsize): @@ -188,8 +221,13 @@ def differential_evolution(searchspace, cost_func, bounds, popsize, maxiter, F, # If the trial vector has a lower or equal cost, it replaces the # target vector in the population for the next generation. if trial_cost <= population_cost[i]: - population[i] = trial_vector - population_cost[i] = trial_cost + + # check if trial_vector is not already in population + idxs = [idx for idx in range(popsize) if idx != i] + if trial_vector not in population[idxs]: + population[i] = np.array(trial_vector) + population_cost[i] = trial_cost + no_change = False # Update the overall best solution if the new one is better if trial_cost < best_cost: @@ -197,10 +235,17 @@ def differential_evolution(searchspace, cost_func, bounds, popsize, maxiter, F, best_solution = trial_vector best_solution_idx = values_to_indices(best_solution, tune_params) + # Note if population is stabilizing + if no_change: + stabilized += 1 + # Print the progress at the end of the generation if verbose: print(f"Generation {generation + 1}, Best Cost: {best_cost:.6f}") + if verbose: + print(f"Differential Evolution completed fevals={len(cost_func.tuning_options.unique_results)}") + return {"solution": best_solution, "cost": best_cost} @@ -348,7 +393,7 @@ def repair(trial_vector, searchspace): # if we have found valid neighboring configurations, select one at random if len(neighbors) > 0: new_trial_vector = np.array(list(random.choice(neighbors))) - print(f"Differential evolution resulted in invalid config {trial_vector=}, repaired dna to {new_trial_vector=}") + print(f"Differential evolution resulted in invalid config {trial_vector=}, repaired to {new_trial_vector=}") return new_trial_vector return trial_vector From 26c8127e5bff1a88a92f31d299726676966226a3 Mon Sep 17 00:00:00 2001 From: Ben van Werkhoven Date: Fri, 4 Jul 2025 23:39:41 +0200 Subject: [PATCH 08/11] code quality improvements --- kernel_tuner/strategies/diff_evo.py | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/kernel_tuner/strategies/diff_evo.py b/kernel_tuner/strategies/diff_evo.py index bc2099982..6268ddee6 100644 --- a/kernel_tuner/strategies/diff_evo.py +++ b/kernel_tuner/strategies/diff_evo.py @@ -99,7 +99,8 @@ def random_draw(idxs, mutate, best): return np.random.choice(idxs, draw, replace=draw >= len(idxs)) -def generate_population(tune_params, min_idx, max_idx, popsize, searchspace, constraint_aware): +def generate_population(tune_params, max_idx, popsize, searchspace, constraint_aware): + """ Generate new population, returns Numpy array """ if constraint_aware: samples = LatinHypercube(len(tune_params)).integers(l_bounds=0, u_bounds=max_idx, n=popsize, endpoint=True) population = [indices_to_values(sample, tune_params) for sample in samples] @@ -133,7 +134,7 @@ def differential_evolution(searchspace, cost_func, bounds, popsize, maxiter, F, bounds = np.array(bounds) # Initialize the population with random individuals within the bounds - population = generate_population(tune_params, min_idx, max_idx, popsize, searchspace, constraint_aware) + population = generate_population(tune_params, max_idx, popsize, searchspace, constraint_aware) # Override with user-specified starting position population[0] = cost_func.get_start_pos() @@ -160,7 +161,7 @@ def differential_evolution(searchspace, cost_func, bounds, popsize, maxiter, F, # If for two generations there has been no change, generate a new population if stabilized > 2: - trial_population = list(generate_population(tune_params, min_idx, max_idx, popsize, searchspace, constraint_aware)) + trial_population = list(generate_population(tune_params, max_idx, popsize, searchspace, constraint_aware)) # Iterate over each individual in the population i = 0 @@ -189,19 +190,20 @@ def differential_evolution(searchspace, cost_func, bounds, popsize, maxiter, F, if constraint_aware: trial_vector = repair(trial_vector, searchspace) - # Store for selection + # Store for selection, if not in trial_population already if list(trial_vector) not in trial_population: trial_population.append(list(trial_vector)) i += 1 stuck = 0 else: stuck += 1 - if stuck >= 20: - if verbose: - print(f"Differential Evolution got stuck generating new individuals, insert random sample") - trial_population.append(list(searchspace.get_random_sample(1)[0])) - i += 1 - stuck = 0 + + if stuck >= 20: + if verbose: + print("Differential Evolution got stuck generating new individuals, insert random sample") + trial_population.append(list(searchspace.get_random_sample(1)[0])) + i += 1 + stuck = 0 # --- c. Selection --- From 2d41660aef3651b8da4a3898c280774bdb4f1c21 Mon Sep 17 00:00:00 2001 From: Ben van Werkhoven Date: Thu, 10 Jul 2025 16:57:19 +0200 Subject: [PATCH 09/11] string values compatible --- kernel_tuner/strategies/diff_evo.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/kernel_tuner/strategies/diff_evo.py b/kernel_tuner/strategies/diff_evo.py index 6268ddee6..c441f446e 100644 --- a/kernel_tuner/strategies/diff_evo.py +++ b/kernel_tuner/strategies/diff_evo.py @@ -71,7 +71,7 @@ def indices_to_values(individual_indices, tune_params): values = [] for dim, idx in enumerate(individual_indices): values.append(tune_params_list[dim][idx]) - return np.array(values) + return values def parse_method(method): @@ -104,7 +104,7 @@ def generate_population(tune_params, max_idx, popsize, searchspace, constraint_a if constraint_aware: samples = LatinHypercube(len(tune_params)).integers(l_bounds=0, u_bounds=max_idx, n=popsize, endpoint=True) population = [indices_to_values(sample, tune_params) for sample in samples] - population = np.array([repair(individual, searchspace) for individual in population]) + population = [repair(individual, searchspace) for individual in population] else: population = [] for _ in range(popsize): @@ -112,7 +112,7 @@ def generate_population(tune_params, max_idx, popsize, searchspace, constraint_a for key in tune_params: ind.append(random.choice(tune_params[key])) population.append(ind) - population = np.array(population) + population = population return population @@ -354,7 +354,7 @@ def binomial_crossover(donor_vector, target, CR): # Apply crossover trial_vector[crossover_points] = donor_vector[crossover_points] - return trial_vector + return list(trial_vector) def exponential_crossover(donor_vector, target, CR): @@ -379,7 +379,7 @@ def exponential_crossover(donor_vector, target, CR): trial_idx[crossover_point] = donor_vector[crossover_point] l += 1 - return trial_idx + return list(trial_idx) def repair(trial_vector, searchspace): @@ -394,7 +394,7 @@ def repair(trial_vector, searchspace): # if we have found valid neighboring configurations, select one at random if len(neighbors) > 0: - new_trial_vector = np.array(list(random.choice(neighbors))) + new_trial_vector = list(random.choice(neighbors)) print(f"Differential evolution resulted in invalid config {trial_vector=}, repaired to {new_trial_vector=}") return new_trial_vector From b170eef8397eb48bf067f2f90c7c90bc8876fc9a Mon Sep 17 00:00:00 2001 From: Ben van Werkhoven Date: Thu, 10 Jul 2025 17:12:35 +0200 Subject: [PATCH 10/11] string values compatible, for real this time --- kernel_tuner/strategies/diff_evo.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/kernel_tuner/strategies/diff_evo.py b/kernel_tuner/strategies/diff_evo.py index c441f446e..c8330d4dc 100644 --- a/kernel_tuner/strategies/diff_evo.py +++ b/kernel_tuner/strategies/diff_evo.py @@ -112,7 +112,6 @@ def generate_population(tune_params, max_idx, popsize, searchspace, constraint_a for key in tune_params: ind.append(random.choice(tune_params[key])) population.append(ind) - population = population return population @@ -225,9 +224,8 @@ def differential_evolution(searchspace, cost_func, bounds, popsize, maxiter, F, if trial_cost <= population_cost[i]: # check if trial_vector is not already in population - idxs = [idx for idx in range(popsize) if idx != i] - if trial_vector not in population[idxs]: - population[i] = np.array(trial_vector) + if population.count(trial_vector) == 0: + population[i] = trial_vector population_cost[i] = trial_cost no_change = False @@ -352,7 +350,7 @@ def binomial_crossover(donor_vector, target, CR): crossover_points[np.random.randint(0, dimensions)] = True # Apply crossover - trial_vector[crossover_points] = donor_vector[crossover_points] + trial_vector[crossover_points] = np.array(donor_vector)[crossover_points] return list(trial_vector) @@ -376,7 +374,7 @@ def exponential_crossover(donor_vector, target, CR): l = 0 while np.random.rand() < CR and l < dimensions: crossover_point = (start_point + l) % dimensions - trial_idx[crossover_point] = donor_vector[crossover_point] + trial_idx[crossover_point] = np.array(donor_vector)[crossover_point] l += 1 return list(trial_idx) From 19d51275c59126e5a790165de1408269d5c567a6 Mon Sep 17 00:00:00 2001 From: Ben van Werkhoven Date: Fri, 11 Jul 2025 09:24:20 +0200 Subject: [PATCH 11/11] further reducing use of numpy arrays for representing configs --- kernel_tuner/strategies/diff_evo.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/kernel_tuner/strategies/diff_evo.py b/kernel_tuner/strategies/diff_evo.py index c8330d4dc..c415f6f92 100644 --- a/kernel_tuner/strategies/diff_evo.py +++ b/kernel_tuner/strategies/diff_evo.py @@ -338,7 +338,7 @@ def mutate_de_2(best_idx, randos_idx, F, min_idx, max_idx, best): def binomial_crossover(donor_vector, target, CR): """Performs binomial crossover of donor_vector with target given crossover rate CR.""" # Create the trial vector by mixing parameters from the target and donor vectors - trial_vector = np.copy(target) + trial_vector = target.copy() dimensions = len(donor_vector) # Generate a random array of floats for comparison with the crossover rate CR @@ -350,9 +350,11 @@ def binomial_crossover(donor_vector, target, CR): crossover_points[np.random.randint(0, dimensions)] = True # Apply crossover - trial_vector[crossover_points] = np.array(donor_vector)[crossover_points] + for i, d in enumerate(donor_vector): + if crossover_points[i]: + trial_vector[i] = donor_vector[i] - return list(trial_vector) + return trial_vector def exponential_crossover(donor_vector, target, CR): @@ -363,7 +365,7 @@ def exponential_crossover(donor_vector, target, CR): from the donor vector and the rest from the target vector. """ dimensions = len(target) - trial_idx = np.copy(target) + trial_vector = target.copy() # 1. Select a random starting point for the crossover block. start_point = np.random.randint(0, dimensions) @@ -374,10 +376,10 @@ def exponential_crossover(donor_vector, target, CR): l = 0 while np.random.rand() < CR and l < dimensions: crossover_point = (start_point + l) % dimensions - trial_idx[crossover_point] = np.array(donor_vector)[crossover_point] + trial_vector[crossover_point] = donor_vector[crossover_point] l += 1 - return list(trial_idx) + return trial_vector def repair(trial_vector, searchspace):