Skip to content
Merged
Show file tree
Hide file tree
Changes from 8 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
58 changes: 33 additions & 25 deletions bayes_opt/acquisition.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@

import numpy as np
from numpy.random import RandomState
from scipy.optimize import minimize
from scipy.optimize._differentialevolution import DifferentialEvolutionSolver, minimize
from scipy.special import softmax
from scipy.stats import norm
from sklearn.gaussian_process import GaussianProcessRegressor
Expand Down Expand Up @@ -219,7 +219,7 @@ def _acq_min(
acq, space, n_random=max(n_random, n_l_bfgs_b), n_x_seeds=n_l_bfgs_b
)
if n_l_bfgs_b:
x_min_l, min_acq_l = self._l_bfgs_b_minimize(acq, space, x_seeds=x_seeds)
x_min_l, min_acq_l = self._smart_minimize(acq, space, x_seeds=x_seeds)
# Either n_random or n_l_bfgs_b is not 0 => at least one of x_min_r and x_min_l is not None
if min_acq_r > min_acq_l:
return x_min_l
Expand Down Expand Up @@ -268,7 +268,7 @@ def _random_sample_minimize(
x_seeds = []
return x_min, min_acq, x_seeds

def _l_bfgs_b_minimize(
def _smart_minimize(
self,
acq: Callable[[NDArray[Float]], NDArray[Float]],
space: TargetSpace,
Expand Down Expand Up @@ -298,33 +298,41 @@ def _l_bfgs_b_minimize(
continuous_dimensions = space.continuous_dimensions
continuous_bounds = space.bounds[continuous_dimensions]

if not continuous_dimensions.any():
min_acq = np.inf
x_min = np.array([np.nan] * space.bounds.shape[0])
return x_min, min_acq

min_acq: float | None = None
x_try: NDArray[Float]
x_min: NDArray[Float]
for x_try in x_seeds:

def continuous_acq(x: NDArray[Float], x_try=x_try) -> NDArray[Float]:
x_try[continuous_dimensions] = x
return acq(x_try)
# Case of continous optimization
if all(continuous_dimensions):
for x_try in x_seeds:
res: OptimizeResult = minimize(acq, x_try, bounds=continuous_bounds, method="L-BFGS-B")
if not res.success:
continue

# Find the minimum of minus the acquisition function
res: OptimizeResult = minimize(
continuous_acq, x_try[continuous_dimensions], bounds=continuous_bounds, method="L-BFGS-B"
)
# See if success
if not res.success:
continue

# Store it if better than previous minimum(maximum).
if min_acq is None or np.squeeze(res.fun) >= min_acq:
x_try[continuous_dimensions] = res.x
x_min = x_try
min_acq = np.squeeze(res.fun)
# Store it if better than previous minimum(maximum).
if min_acq is None or np.squeeze(res.fun) >= min_acq:
x_try = res.x
x_min = x_try
min_acq = np.squeeze(res.fun)

# Case of mixed-integer optimization
else:
ntrials = max(1, len(x_seeds) // 100)
for _ in range(ntrials):
xinit = space.random_sample(15 * len(space.bounds), random_state=self.random_state)
de = DifferentialEvolutionSolver(acq, bounds=space.bounds, init=xinit, rng=self.random_state)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think we need to use polish=False to turn off gradient-based polishing?

res: OptimizeResult = de.solve()

# See if success
if not res.success:
continue

# Store it if better than previous minimum(maximum).
if min_acq is None or np.squeeze(res.fun) >= min_acq:
x_try_sc = de._unscale_parameters(res.x)
x_try = space.kernel_transform(x_try_sc).flatten()
x_min = x_try
min_acq = np.squeeze(res.fun)

if min_acq is None:
min_acq = np.inf
Expand Down
4 changes: 2 additions & 2 deletions tests/test_acquisition.py
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,7 @@ def test_upper_confidence_bound(gp, target_space, random_state):
assert acq.kappa == 0.5


def test_l_bfgs_fails(target_space, random_state):
def test_smart_minimize_fails(target_space, random_state):
acq = acquisition.UpperConfidenceBound(random_state=random_state)

def fun(x):
Expand All @@ -114,7 +114,7 @@ def fun(x):
except IndexError:
return np.nan

_, min_acq_l = acq._l_bfgs_b_minimize(fun, space=target_space, x_seeds=np.array([[2.5, 0.5]]))
_, min_acq_l = acq._smart_minimize(fun, space=target_space, x_seeds=np.array([[2.5, 0.5]]))
assert min_acq_l == np.inf


Expand Down
Loading