Skip to content

Commit e322824

Browse files
committed
Merge branch 'master' into parallel_runner
2 parents dd4a4ed + 8ce5847 commit e322824

File tree

5 files changed

+45
-21
lines changed

5 files changed

+45
-21
lines changed

kernel_tuner/integration.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,9 @@
1111
objective_default_map = {
1212
"time": False,
1313
"energy": False,
14+
"fitness": True,
15+
"cost": False,
16+
"loss": False,
1417
"GFLOP/s": True,
1518
"TFLOP/s": True,
1619
"GB/s": True,

kernel_tuner/strategies/bayes_opt.py

Lines changed: 12 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -110,7 +110,7 @@ def tune(searchspace: Searchspace, runner, tuning_options):
110110
:rtype: list(dict()), dict()
111111
112112
"""
113-
# we don't actually use this for Bayesian Optimization, but it is used to check for unsupported options
113+
# we don't actually use this for Bayesian Optimization, but it is used to check for unsupported options
114114
get_options(tuning_options.strategy_options, _options, unsupported=["x0"])
115115

116116
max_fevals = tuning_options.strategy_options.get("max_fevals", 100)
@@ -145,7 +145,13 @@ def tune(searchspace: Searchspace, runner, tuning_options):
145145
# initialize and optimize
146146
try:
147147
bo = BayesianOptimization(
148-
parameter_space, searchspace, removed_tune_params, tuning_options, normalize_dict, denormalize_dict, cost_func
148+
parameter_space,
149+
searchspace,
150+
removed_tune_params,
151+
tuning_options,
152+
normalize_dict,
153+
denormalize_dict,
154+
cost_func,
149155
)
150156
except StopCriterionReached:
151157
warnings.warn(
@@ -851,7 +857,10 @@ def __optimize_multi_ultrafast(self, max_fevals, predict_eval_ratio=5):
851857
while self.fevals < max_fevals:
852858
aqfs = self.multi_afs
853859
# if we take the prediction only once, we want to go from most exploiting to most exploring, because the more exploiting an AF is, the more it relies on non-stale information from the model
854-
fit_observations = last_prediction_time * predict_eval_ratio <= last_eval_time or last_prediction_counter >= predict_eval_ratio
860+
fit_observations = (
861+
last_prediction_time * predict_eval_ratio <= last_eval_time
862+
or last_prediction_counter >= predict_eval_ratio
863+
)
855864
if fit_observations:
856865
last_prediction_counter = 0
857866
pred_start = time.perf_counter()

kernel_tuner/strategies/common.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -94,9 +94,9 @@ def __call__(self, x, check_restrictions=True):
9494

9595
# else check if this is a legal (non-restricted) configuration
9696
if check_restrictions and self.searchspace.restrictions:
97-
params_dict = dict(zip(self.searchspace.tune_params.keys(), params))
98-
legal = util.check_restrictions(self.searchspace.restrictions, params_dict, self.tuning_options.verbose)
97+
legal = self.searchspace.is_param_config_valid(tuple(params))
9998
if not legal:
99+
params_dict = dict(zip(self.searchspace.tune_params.keys(), params))
100100
result = params_dict
101101
result[self.tuning_options.objective] = util.InvalidConfig()
102102

kernel_tuner/util.py

Lines changed: 7 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -528,7 +528,8 @@ def get_kernel_string(kernel_source, params=None):
528528
kernel_string = read_file(kernel_source)
529529
elif isinstance(kernel_source, str):
530530
if looks_like_a_filename(kernel_source):
531-
kernel_string = read_file(kernel_source) or kernel_source
531+
with open(kernel_source, "r") as f:
532+
kernel_string = f.read()
532533
else:
533534
kernel_string = kernel_source
534535
else:
@@ -1123,6 +1124,10 @@ def compile_restrictions(
11231124
noncompiled_restrictions.append((r, [], r))
11241125
return noncompiled_restrictions + compiled_restrictions
11251126

1127+
def check_matching_problem_size(cached_problem_size, problem_size):
1128+
"""Check the if requested problem size matches the problem size in the cache."""
1129+
if not (np.array(cached_problem_size) == np.array(problem_size)).all():
1130+
raise ValueError(f"Cannot load cache which contains results for different problem_size, cache: {cached_problem_size}, requested: {problem_size}")
11261131

11271132
def process_cache(cache, kernel_options, tuning_options, runner):
11281133
"""Cache file for storing tuned configurations.
@@ -1193,18 +1198,7 @@ def process_cache(cache, kernel_options, tuning_options, runner):
11931198
f"Cannot load cache which contains results for different kernel (cache: {cached_data['kernel_name']}, actual: {kernel_options.kernel_name})"
11941199
)
11951200
if "problem_size" in cached_data and not callable(kernel_options.problem_size):
1196-
# if it's a single value, convert to an array
1197-
if isinstance(cached_data["problem_size"], int):
1198-
cached_data["problem_size"] = [cached_data["problem_size"]]
1199-
# if problem_size is not iterable, compare directly
1200-
if not hasattr(kernel_options.problem_size, "__iter__"):
1201-
if cached_data["problem_size"] != kernel_options.problem_size:
1202-
raise ValueError("Cannot load cache which contains results for different problem_size")
1203-
# else (problem_size is iterable)
1204-
# cache returns list, problem_size is likely a tuple. Therefore, the next check
1205-
# checks the equality of all items in the list/tuples individually
1206-
elif not all([i == j for i, j in zip(cached_data["problem_size"], kernel_options.problem_size)]):
1207-
raise ValueError("Cannot load cache which contains results for different problem_size")
1201+
check_matching_problem_size(cached_data["problem_size"], kernel_options.problem_size)
12081202
if cached_data["tune_params_keys"] != list(tuning_options.tune_params.keys()):
12091203
if all(key in tuning_options.tune_params for key in cached_data["tune_params_keys"]):
12101204
raise ValueError(

test/test_util_functions.py

Lines changed: 21 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -541,10 +541,10 @@ def gen_kernel(params):
541541

542542
def test_get_kernel_string_filename_not_found():
543543
# when the string looks like a filename, but the file does not exist
544-
# assume the string is not a filename after all
544+
# check if throws an exception
545545
bogus_filename = "filename_3456789.cu"
546-
answer = get_kernel_string(bogus_filename)
547-
assert answer == bogus_filename
546+
with pytest.raises(FileNotFoundError):
547+
get_kernel_string(bogus_filename)
548548

549549

550550
def test_looks_like_a_filename1():
@@ -742,6 +742,24 @@ def test_parse_restrictions():
742742
assert all(param in tune_params for param in params)
743743

744744

745+
def test_check_matching_problem_size():
746+
# these should error
747+
with pytest.raises(ValueError):
748+
check_matching_problem_size(42, 1000)
749+
with pytest.raises(ValueError):
750+
check_matching_problem_size([42,1], 42)
751+
with pytest.raises(ValueError):
752+
check_matching_problem_size([42,0], 42)
753+
with pytest.raises(ValueError):
754+
check_matching_problem_size(None, 42)
755+
# these should not error
756+
check_matching_problem_size(1000, (1000,))
757+
check_matching_problem_size([1000], 1000)
758+
check_matching_problem_size(1000, 1000)
759+
check_matching_problem_size(1000, [1000])
760+
check_matching_problem_size([1000,], 1000)
761+
762+
745763
def test_convert_constraint_lambdas():
746764

747765
restrictions = [lambda p: 32 <= p["block_size_x"]*p["block_size_y"] <= 1024,

0 commit comments

Comments
 (0)