Skip to content

Commit 6550916

Browse files
committed
Improved code quality based on sonarcloud issues
1 parent 11b378f commit 6550916

File tree

2 files changed

+57
-56
lines changed

2 files changed

+57
-56
lines changed

kernel_tuner/python.py

Lines changed: 56 additions & 54 deletions
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@
3131

3232

3333
class PythonFunctions(object):
34-
"""Class that groups the code for running and compiling C functions"""
34+
"""Class that groups the code for running Python"""
3535

3636
def __init__(self, iterations=7, observers=None, parallel_mode=False, hyperparam_mode=False, show_progressbar=False):
3737
"""instantiate PythonFunctions object used for interacting with Python code
@@ -217,59 +217,61 @@ def benchmark_hyperparams(self, func, args, threads, grid):
217217
# print(f"In {round(benchmark_time, 3)} seconds, mean: {round(np.mean(self.benchmark_times), 3)}")
218218
return result
219219

220-
start_time = perf_counter()
221-
if self.parallel_mode:
222-
num_procs = max(cpu_count() - 1, 1)
223-
logging.debug(f"Running benchmark in parallel on {num_procs} processors")
224-
manager = Manager()
225-
MRE_values = manager.list()
226-
runtimes = manager.list()
227-
with get_context('spawn').Pool(num_procs) as pool: # spawn alternative is forkserver, creates a reusable server
228-
args = func, args, self.params
229-
MRE_values, runtimes = zip(*pool.starmap(run_kernel_and_observers, zip(iterator, repeat(args))))
230-
MRE_values, runtimes = list(MRE_values), list(runtimes)
231-
print(MRE_values)
232-
result["times"] = values
233-
result["strategy_time"] = np.mean(runtimes)
234-
np_results = np.array(values)
235-
else:
236-
# sequential implementation
237-
np_results = np.array([])
238-
for iter in iterator:
239-
for obs in self.observers:
240-
obs.before_start()
241-
value = self.run_kernel(func, args)
242-
for obs in self.observers:
243-
obs.after_finish()
244-
245-
if value < 0.0:
246-
raise ValueError("Invalid benchmark result")
247-
248-
result["times"].append(value)
249-
np_results = np.append(np_results, value)
250-
if value >= invalid_value and iter >= min_valid_iterations and len(np_results[np_results < invalid_value]) < min_valid_iterations:
251-
break
252-
253-
# fill up the remaining iters with invalid in case of a break
254-
result["times"] += [invalid_value] * (self.iterations - len(result["times"]))
255-
256-
# finish by instrumenting the results with the observers
257-
for obs in self.observers:
258-
result.update(obs.get_results())
259-
260-
benchmark_time = perf_counter() - start_time
261-
self.benchmark_times.append(benchmark_time)
262-
print(f"Time taken: {round(benchmark_time, 3)} seconds, mean: {round(np.mean(self.benchmark_times), 3)}")
263-
264-
# calculate the mean of the means of the Mean Relative Error over the valid results
265-
valid_results = np_results[np_results < invalid_value]
266-
mean_mean_MRE = np.mean(valid_results) if len(valid_results) > 0 else np.nan
267-
268-
# write the 'time' to the results and return
269-
if np.isnan(mean_mean_MRE) or len(valid_results) < min_valid_iterations:
270-
mean_mean_MRE = invalid_value
271-
result["time"] = mean_mean_MRE
272-
return result
220+
# old implementation
221+
222+
# start_time = perf_counter()
223+
# if self.parallel_mode:
224+
# num_procs = max(cpu_count() - 1, 1)
225+
# logging.debug(f"Running benchmark in parallel on {num_procs} processors")
226+
# manager = Manager()
227+
# MRE_values = manager.list()
228+
# runtimes = manager.list()
229+
# with get_context('spawn').Pool(num_procs) as pool: # spawn alternative is forkserver, creates a reusable server
230+
# args = func, args, self.params
231+
# MRE_values, runtimes = zip(*pool.starmap(run_kernel_and_observers, zip(iterator, repeat(args))))
232+
# MRE_values, runtimes = list(MRE_values), list(runtimes)
233+
# print(MRE_values)
234+
# result["times"] = values
235+
# result["strategy_time"] = np.mean(runtimes)
236+
# np_results = np.array(values)
237+
238+
# # sequential implementation
239+
# np_results = np.array([])
240+
# for iter in iterator:
241+
# for obs in self.observers:
242+
# obs.before_start()
243+
# value = self.run_kernel(func, args)
244+
# for obs in self.observers:
245+
# obs.after_finish()
246+
247+
# if value < 0.0:
248+
# raise ValueError("Invalid benchmark result")
249+
250+
# result["times"].append(value)
251+
# np_results = np.append(np_results, value)
252+
# if value >= invalid_value and iter >= min_valid_iterations and len(np_results[np_results < invalid_value]) < min_valid_iterations:
253+
# break
254+
255+
# # fill up the remaining iters with invalid in case of a break
256+
# result["times"] += [invalid_value] * (self.iterations - len(result["times"]))
257+
258+
# # finish by instrumenting the results with the observers
259+
# for obs in self.observers:
260+
# result.update(obs.get_results())
261+
262+
# benchmark_time = perf_counter() - start_time
263+
# self.benchmark_times.append(benchmark_time)
264+
# print(f"Time taken: {round(benchmark_time, 3)} seconds, mean: {round(np.mean(self.benchmark_times), 3)}")
265+
266+
# # calculate the mean of the means of the Mean Relative Error over the valid results
267+
# valid_results = np_results[np_results < invalid_value]
268+
# mean_mean_MRE = np.mean(valid_results) if len(valid_results) > 0 else np.nan
269+
270+
# # write the 'time' to the results and return
271+
# if np.isnan(mean_mean_MRE) or len(valid_results) < min_valid_iterations:
272+
# mean_mean_MRE = invalid_value
273+
# result["time"] = mean_mean_MRE
274+
# return result
273275

274276
def run_kernel(self, func, args, threads, grid):
275277
"""runs the kernel once, returns whatever the kernel returns

kernel_tuner/searchspace.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1017,5 +1017,4 @@ def to_ax_searchspace(self):
10171017
raise NotImplementedError(
10181018
"Conversion to Ax SearchSpace has not been fully implemented as Ax Searchspaces can't capture full complexity."
10191019
)
1020-
1021-
return ax_searchspace
1020+
# return ax_searchspace

0 commit comments

Comments
 (0)