|
5 | 5 | from random import randint |
6 | 6 | from argparse import ArgumentParser |
7 | 7 |
|
| 8 | +import numpy as np |
| 9 | + |
8 | 10 | import kernel_tuner |
9 | 11 |
|
10 | 12 |
|
@@ -94,11 +96,26 @@ def put_if_not_present(target_dict, key, value): |
94 | 96 | return list(result_unique.values()), env |
95 | 97 |
|
96 | 98 | if __name__ == "__main__": |
| 99 | + """Main function to run the hyperparameter tuning. Run with `python hyper.py strategy_to_tune=`.""" |
| 100 | + |
97 | 101 | parser = ArgumentParser() |
98 | | - parser.add_argument("strategy_to_tune") |
| 102 | + parser.add_argument("strategy_to_tune", type=str, help="The strategy to tune hyperparameters for.") |
| 103 | + parser.add_argument("--meta_strategy", nargs='?', default="genetic_algorithm", type=str, help="The meta-strategy to use for hyperparameter tuning.") |
| 104 | + parser.add_argument("--max_time", nargs='?', default=60*60*24, type=int, help="The maximum time in seconds for the hyperparameter tuning.") |
99 | 105 | args = parser.parse_args() |
100 | 106 | strategy_to_tune = args.strategy_to_tune |
101 | 107 |
|
| 108 | + kwargs = dict( |
| 109 | + verbose=True, |
| 110 | + quiet=False, |
| 111 | + simulation_mode=False, |
| 112 | + strategy=args.meta_strategy, |
| 113 | + cache=f"hyperparamtuning_t={strategy_to_tune}_m={args.meta_strategy}.json", |
| 114 | + strategy_options=dict( |
| 115 | + time_limit=args.max_time, |
| 116 | + ) |
| 117 | + ) |
| 118 | + |
102 | 119 | # select the hyperparameter parameters for the selected optimization algorithm |
103 | 120 | restrictions = [] |
104 | 121 | if strategy_to_tune.lower() == "pso": |
@@ -131,9 +148,10 @@ def put_if_not_present(target_dict, key, value): |
131 | 148 | elif strategy_to_tune.lower() == "diff_evo": |
132 | 149 | hyperparams = { |
133 | 150 | 'method': ["best1bin", "rand1bin", "best2bin", "rand2bin", "best1exp", "rand1exp", "best2exp", "rand2exp", "currenttobest1bin", "currenttobest1exp", "randtobest1bin", "randtobest1exp"], # best1bin |
134 | | - 'popsize': [10, 20, 30, 40, 50, 60, 70, 80, 90, 100], # 50 |
135 | | - 'F': [0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2.0], # 1.3 |
136 | | - 'CR': [0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95, 1.0] # 0.9 |
| 151 | + 'popsize': list(range(1, 100+1, 1)), # 50 |
| 152 | + 'popsize_times_dimensions': [True, False], # False |
| 153 | + 'F': list(np.arange(0.05, 2.0+0.05, 0.05)), # 1.3 |
| 154 | + 'CR': list(np.arange(0.05, 1.0+0.05, 0.05)) # 0.9 |
137 | 155 | } |
138 | 156 | elif strategy_to_tune.lower() == "basinhopping": |
139 | 157 | hyperparams = { |
@@ -172,6 +190,6 @@ def put_if_not_present(target_dict, key, value): |
172 | 190 | raise ValueError(f"Invalid argument {strategy_to_tune=}") |
173 | 191 |
|
174 | 192 | # run the hyperparameter tuning |
175 | | - result, env = tune_hyper_params(strategy_to_tune.lower(), hyperparams, restrictions=restrictions) |
| 193 | + result, env = tune_hyper_params(strategy_to_tune.lower(), hyperparams, restrictions=restrictions, **kwargs) |
176 | 194 | print(result) |
177 | 195 | print(env['best_config']) |
0 commit comments