@@ -54,7 +54,7 @@ def tune(searchspace: Searchspace, runner, tuning_options):
54
54
print (e )
55
55
return cost_func .results
56
56
57
- ap = acceptance_prob (old_cost , new_cost , T , tuning_options )
57
+ ap = acceptance_prob (old_cost , new_cost , T )
58
58
r = random .random ()
59
59
60
60
if ap > r :
@@ -85,9 +85,9 @@ def tune(searchspace: Searchspace, runner, tuning_options):
85
85
86
86
tune .__doc__ = common .get_strategy_docstring ("Simulated Annealing" , _options )
87
87
88
- def acceptance_prob (old_cost , new_cost , T , tuning_options ):
88
+ def acceptance_prob (old_cost , new_cost , T ):
89
89
"""Annealing equation, with modifications to work towards a lower value."""
90
- error_val = sys .float_info .max if not tuning_options . objective_higher_is_better else - sys . float_info . max
90
+ error_val = sys .float_info .max
91
91
# if start pos is not valid, always move
92
92
if old_cost == error_val :
93
93
return 1.0
@@ -98,8 +98,6 @@ def acceptance_prob(old_cost, new_cost, T, tuning_options):
98
98
if new_cost < old_cost :
99
99
return 1.0
100
100
# maybe move if old cost is better than new cost depending on T and random value
101
- if tuning_options .objective_higher_is_better :
102
- return np .exp (((new_cost - old_cost )/ new_cost )/ T )
103
101
return np .exp (((old_cost - new_cost )/ old_cost )/ T )
104
102
105
103
0 commit comments