Skip to content

Commit cde7823

Browse files
committed
Extended hyperparameters for optimization algorithms in paper
1 parent c044ef8 commit cde7823

File tree

1 file changed

+34
-14
lines changed

1 file changed

+34
-14
lines changed

kernel_tuner/hyper.py

Lines changed: 34 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -100,7 +100,7 @@ def put_if_not_present(target_dict, key, value):
100100

101101
parser = ArgumentParser()
102102
parser.add_argument("strategy_to_tune", type=str, help="The strategy to tune hyperparameters for.")
103-
parser.add_argument("--meta_strategy", nargs='?', default="genetic_algorithm", type=str, help="The meta-strategy to use for hyperparameter tuning.")
103+
parser.add_argument("--meta_strategy", nargs='?', default="dual_annealing", type=str, help="The meta-strategy to use for hyperparameter tuning.")
104104
parser.add_argument("--max_time", nargs='?', default=60*60*24, type=int, help="The maximum time in seconds for the hyperparameter tuning.")
105105
args = parser.parse_args()
106106
strategy_to_tune = args.strategy_to_tune
@@ -119,12 +119,20 @@ def put_if_not_present(target_dict, key, value):
119119
# select the hyperparameter parameters for the selected optimization algorithm
120120
restrictions = []
121121
if strategy_to_tune.lower() == "pso":
122+
# exhaustive search for PSO hyperparameters
123+
# hyperparams = {
124+
# 'popsize': [10, 20, 30],
125+
# 'maxiter': [50, 100, 150],
126+
# # 'w': [0.25, 0.5, 0.75], # disabled due to low influence according to KW-test (H=0.0215) and mutual information
127+
# 'c1': [1.0, 2.0, 3.0],
128+
# 'c2': [0.5, 1.0, 1.5]
129+
# }
122130
hyperparams = {
123-
'popsize': [10, 20, 30],
124-
'maxiter': [50, 100, 150],
131+
'popsize': list(range(2, 50+1, 2)),
132+
'maxiter': list(range(10, 200, 10)),
125133
# 'w': [0.25, 0.5, 0.75], # disabled due to low influence according to KW-test (H=0.0215) and mutual information
126-
'c1': [1.0, 2.0, 3.0],
127-
'c2': [0.5, 1.0, 1.5]
134+
'c1': [round(n, 2) for n in np.arange(1.0, 3.5+0.25, 0.25).tolist()],
135+
'c2': [round(n, 2) for n in np.arange(0.5, 2.0+0.25, 0.25).tolist()]
128136
}
129137
elif strategy_to_tune.lower() == "firefly_algorithm":
130138
hyperparams = {
@@ -148,22 +156,28 @@ def put_if_not_present(target_dict, key, value):
148156
elif strategy_to_tune.lower() == "diff_evo":
149157
hyperparams = {
150158
'method': ["best1bin", "rand1bin", "best2bin", "rand2bin", "best1exp", "rand1exp", "best2exp", "rand2exp", "currenttobest1bin", "currenttobest1exp", "randtobest1bin", "randtobest1exp"], # best1bin
151-
'popsize': list(range(1, 100+1, 1)), # 50
159+
'popsize': list(range(2, 50+1, 2)), # 50
152160
'popsize_times_dimensions': [True, False], # False
153-
'F': list(np.arange(0.05, 2.0+0.05, 0.05)), # 1.3
154-
'CR': list(np.arange(0.05, 1.0+0.05, 0.05)) # 0.9
161+
'F': [round(n, 2) for n in np.arange(0.1, 2.0+0.1, 0.1).tolist()], # 1.3
162+
'CR': [round(n, 2) for n in np.arange(0.05, 1.0+0.05, 0.05).tolist()] # 0.9
155163
}
156164
elif strategy_to_tune.lower() == "basinhopping":
157165
hyperparams = {
158166
'method': ["Nelder-Mead", "Powell", "CG", "BFGS", "L-BFGS-B", "TNC", "COBYLA", "SLSQP"],
159167
'T': [0.1, 0.25, 0.5, 0.75, 1.0, 1.25, 1.5],
160168
}
161169
elif strategy_to_tune.lower() == "genetic_algorithm":
170+
# hyperparams = {
171+
# 'method': ["single_point", "two_point", "uniform", "disruptive_uniform"],
172+
# 'popsize': [10, 20, 30],
173+
# 'maxiter': [50, 100, 150],
174+
# 'mutation_chance': [5, 10, 20]
175+
# }
162176
hyperparams = {
163177
'method': ["single_point", "two_point", "uniform", "disruptive_uniform"],
164-
'popsize': [10, 20, 30],
165-
'maxiter': [50, 100, 150],
166-
'mutation_chance': [5, 10, 20]
178+
'popsize': list(range(2, 50+1, 2)),
179+
'maxiter': list(range(10, 200, 10)),
180+
'mutation_chance': list(range(5, 100, 5))
167181
}
168182
elif strategy_to_tune.lower() == "greedy_mls":
169183
hyperparams = {
@@ -172,11 +186,17 @@ def put_if_not_present(target_dict, key, value):
172186
'randomize': [True, False]
173187
}
174188
elif strategy_to_tune.lower() == "simulated_annealing":
189+
# hyperparams = {
190+
# 'T': [0.5, 1.0, 1.5],
191+
# 'T_min': [0.0001, 0.001, 0.01],
192+
# 'alpha': [0.9925, 0.995, 0.9975],
193+
# 'maxiter': [1, 2, 3]
194+
# }
175195
hyperparams = {
176-
'T': [0.5, 1.0, 1.5],
177-
'T_min': [0.0001, 0.001, 0.01],
196+
'T': [round(n, 2) for n in np.arange(0.1, 2.0+0.1, 0.1).tolist()],
197+
'T_min': [round(n, 4) for n in np.arange(0.0001, 0.1, 0.001).tolist()],
178198
'alpha': [0.9925, 0.995, 0.9975],
179-
'maxiter': [1, 2, 3]
199+
'maxiter': list(range(1, 10, 1))
180200
}
181201
elif strategy_to_tune.lower() == "bayes_opt":
182202
hyperparams = {

0 commit comments

Comments
 (0)