|
| 1 | +"""This module contains a 'device' for hyperparameter tuning using the autotuning methodology.""" |
| 2 | + |
| 3 | +import platform |
| 4 | +from pathlib import Path |
| 5 | + |
| 6 | +from numpy import mean |
| 7 | + |
| 8 | +from kernel_tuner.backends.backend import Backend |
| 9 | +from kernel_tuner.observers.observer import BenchmarkObserver |
| 10 | + |
| 11 | +try: |
| 12 | + methodology_available = True |
| 13 | + from autotuning_methodology.experiments import generate_experiment_file |
| 14 | + from autotuning_methodology.report_experiments import get_strategy_scores |
| 15 | +except ImportError: |
| 16 | + methodology_available = False |
| 17 | + |
| 18 | + |
| 19 | +class ScoreObserver(BenchmarkObserver): |
| 20 | + def __init__(self, dev): |
| 21 | + self.dev = dev |
| 22 | + self.scores = [] |
| 23 | + |
| 24 | + def after_finish(self): |
| 25 | + self.scores.append(self.dev.last_score) |
| 26 | + |
| 27 | + def get_results(self): |
| 28 | + results = {'score': mean(self.scores), 'scores': self.scores.copy()} |
| 29 | + self.scores = [] |
| 30 | + return results |
| 31 | + |
| 32 | +class HypertunerFunctions(Backend): |
| 33 | + """Class for executing hyperparameter tuning.""" |
| 34 | + units = {} |
| 35 | + |
| 36 | + def __init__(self, iterations): |
| 37 | + self.iterations = iterations |
| 38 | + self.observers = [ScoreObserver(self)] |
| 39 | + self.name = platform.processor() |
| 40 | + self.max_threads = 1024 |
| 41 | + self.last_score = None |
| 42 | + |
| 43 | + # set the environment options |
| 44 | + env = dict() |
| 45 | + env["iterations"] = self.iterations |
| 46 | + self.env = env |
| 47 | + |
| 48 | + # check for the methodology package |
| 49 | + if methodology_available is not True: |
| 50 | + raise ImportError("Unable to import the autotuning methodology, run `pip install autotuning_methodology`.") |
| 51 | + |
| 52 | + def ready_argument_list(self, arguments): |
| 53 | + arglist = super().ready_argument_list(arguments) |
| 54 | + if arglist is None: |
| 55 | + arglist = [] |
| 56 | + return arglist |
| 57 | + |
| 58 | + def compile(self, kernel_instance): |
| 59 | + super().compile(kernel_instance) |
| 60 | + path = Path(__file__).parent.parent.parent / "hyperparamtuning" |
| 61 | + path.mkdir(exist_ok=True) |
| 62 | + |
| 63 | + # TODO get applications & GPUs args from benchmark |
| 64 | + gpus = ["RTX_3090", "RTX_2080_Ti"] |
| 65 | + applications = None |
| 66 | + # applications = [ |
| 67 | + # { |
| 68 | + # "name": "convolution", |
| 69 | + # "folder": "./cached_data_used/kernels", |
| 70 | + # "input_file": "convolution.json" |
| 71 | + # }, |
| 72 | + # { |
| 73 | + # "name": "pnpoly", |
| 74 | + # "folder": "./cached_data_used/kernels", |
| 75 | + # "input_file": "pnpoly.json" |
| 76 | + # } |
| 77 | + # ] |
| 78 | + |
| 79 | + # strategy settings |
| 80 | + strategy: str = kernel_instance.arguments[0] |
| 81 | + hyperparams = [{'name': k, 'value': v} for k, v in kernel_instance.params.items()] |
| 82 | + hyperparams_string = "_".join(f"{k}={str(v)}" for k, v in kernel_instance.params.items()) |
| 83 | + searchspace_strategies = [{ |
| 84 | + "autotuner": "KernelTuner", |
| 85 | + "name": f"{strategy.lower()}_{hyperparams_string}", |
| 86 | + "display_name": strategy.replace('_', ' ').capitalize(), |
| 87 | + "search_method": strategy.lower(), |
| 88 | + 'search_method_hyperparameters': hyperparams |
| 89 | + }] |
| 90 | + |
| 91 | + # any additional settings |
| 92 | + override = { |
| 93 | + "experimental_groups_defaults": { |
| 94 | + "samples": self.iterations |
| 95 | + } |
| 96 | + } |
| 97 | + |
| 98 | + name = kernel_instance.name if len(kernel_instance.name) > 0 else kernel_instance.kernel_source.kernel_name |
| 99 | + experiments_filepath = generate_experiment_file(name, path, searchspace_strategies, applications, gpus, |
| 100 | + override=override, overwrite_existing_file=True) |
| 101 | + return str(experiments_filepath) |
| 102 | + |
| 103 | + def start_event(self): |
| 104 | + return super().start_event() |
| 105 | + |
| 106 | + def stop_event(self): |
| 107 | + return super().stop_event() |
| 108 | + |
| 109 | + def kernel_finished(self): |
| 110 | + super().kernel_finished() |
| 111 | + return True |
| 112 | + |
| 113 | + def synchronize(self): |
| 114 | + return super().synchronize() |
| 115 | + |
| 116 | + def run_kernel(self, func, gpu_args=None, threads=None, grid=None, stream=None): |
| 117 | + # generate the experiments file |
| 118 | + experiments_filepath = Path(func) |
| 119 | + |
| 120 | + # run the methodology to get a fitness score for this configuration |
| 121 | + scores = get_strategy_scores(str(experiments_filepath)) |
| 122 | + self.last_score = scores[list(scores.keys())[0]]['score'] |
| 123 | + |
| 124 | + def memset(self, allocation, value, size): |
| 125 | + return super().memset(allocation, value, size) |
| 126 | + |
| 127 | + def memcpy_dtoh(self, dest, src): |
| 128 | + return super().memcpy_dtoh(dest, src) |
| 129 | + |
| 130 | + def memcpy_htod(self, dest, src): |
| 131 | + return super().memcpy_htod(dest, src) |
0 commit comments