diff --git a/bayes_opt/bayesian_optimization.py b/bayes_opt/bayesian_optimization.py index 850afde3d..4a9305885 100644 --- a/bayes_opt/bayesian_optimization.py +++ b/bayes_opt/bayesian_optimization.py @@ -64,13 +64,13 @@ def dispatch(self, event): class BayesianOptimization(Observable): - def __init__(self, f, pbounds, random_state=None, verbose=2): + def __init__(self, f, pbounds, ptypes=None, random_state=None, verbose=2): """""" self._random_state = ensure_rng(random_state) # Data structure containing the function to be optimized, the bounds of # its domain, and a record of the evaluations we have done so far - self._space = TargetSpace(f, pbounds, random_state) + self._space = TargetSpace(f, pbounds, ptypes, random_state) # queue self._queue = Queue() @@ -129,6 +129,7 @@ def suggest(self, utility_function): gp=self._gp, y_max=self._space.target.max(), bounds=self._space.bounds, + btypes=self._space.btypes, random_state=self._random_state ) diff --git a/bayes_opt/target_space.py b/bayes_opt/target_space.py index 4b6febce1..9e00696d6 100644 --- a/bayes_opt/target_space.py +++ b/bayes_opt/target_space.py @@ -17,12 +17,13 @@ class TargetSpace(object): >>> def target_func(p1, p2): >>> return p1 + p2 >>> pbounds = {'p1': (0, 1), 'p2': (1, 100)} - >>> space = TargetSpace(target_func, pbounds, random_state=0) + >>> ptypes = {'p1': float, 'p2': int} + >>> space = TargetSpace(target_func, pbounds, ptypes, random_state=0) >>> x = space.random_points(1)[0] >>> y = space.register_point(x) >>> assert self.max_point()['max_val'] == y """ - def __init__(self, target_func, pbounds, random_state=None): + def __init__(self, target_func, pbounds, ptypes=None,random_state=None): """ Parameters ---------- @@ -30,9 +31,12 @@ def __init__(self, target_func, pbounds, random_state=None): Function to be maximized. pbounds : dict - Dictionary with parameters names as keys and a tuple with minimum + Dictionary with parameter names and list of the minimum and maximum boundaries and maximum values. + ptypes : dict + Dictionnary with parameter names and their type + random_state : int, RandomState, or None optionally specify a seed for a random number generator """ @@ -44,10 +48,19 @@ def __init__(self, target_func, pbounds, random_state=None): # Get the name of the parameters self._keys = sorted(pbounds) # Create an array with parameters bounds - self._bounds = np.array( - [item[1] for item in sorted(pbounds.items(), key=lambda x: x[0])], - dtype=np.float - ) + self._bounds = np.array([list(pbounds[item]) for item in self._keys], dtype=float) + # Create an array with the parameters type if declared + if ptypes is None: + self._btypes = None + else: + ## TODO: add exception if parameter names in btypes and ptypes do not have the same length and content + ## TODO: or store pbounds and ptypes has dictionnaries + try: + assert (len(ptypes) == len(pbounds)) + except AssertionError: + raise AssertionError("ptypes and pbounds do not have same content."+\ + "ptypes and pbounds must list exact same parameters") + self._btypes = np.array([ptypes[item] for item in self._keys], dtype=type) # preallocated memory for X and Y points self._params = np.empty(shape=(0, self.dim)) @@ -87,6 +100,10 @@ def keys(self): def bounds(self): return self._bounds + @property + def btypes(self): + return self._btypes + def params_to_array(self, params): try: assert set(params) == set(self.keys) @@ -142,11 +159,12 @@ def register(self, params, target): Notes ----- - runs in ammortized constant time + runs in amortized constant time Example ------- >>> pbounds = {'p1': (0, 1), 'p2': (1, 100)} + >>> ptypes = {'p1': float, 'p2':int} >>> space = TargetSpace(lambda p1, p2: p1 + p2, pbounds) >>> len(space) 0 @@ -208,18 +226,26 @@ def random_sample(self): ------- >>> target_func = lambda p1, p2: p1 + p2 >>> pbounds = {'p1': (0, 1), 'p2': (1, 100)} + >>> ptypes = {'p1': float, 'p2':int} >>> space = TargetSpace(target_func, pbounds, random_state=0) >>> space.random_points(1) - array([[ 55.33253689, 0.54488318]]) + array([[ 0.54488318, 55]]) """ - # TODO: support integer, category, and basic scipy.optimize constraints + # TODO: support category, and basic scipy.optimize constraints data = np.empty((1, self.dim)) - for col, (lower, upper) in enumerate(self._bounds): - data.T[col] = self.random_state.uniform(lower, upper, size=1) + if self.btypes is None: + for col, (lower, upper) in enumerate(self._bounds): + data.T[col] = self.random_state.uniform(lower, upper, size=1) + else: + for col, (lower, upper) in enumerate(self._bounds): + if self.btypes[col] != int: + data.T[col] = self.random_state.uniform(lower, upper, size=1) + if self.btypes[col] == int: + data.T[col] = self.random_state.randint(int(lower), int(upper), size=1) return data.ravel() def max(self): - """Get maximum target value found and corresponding parametes.""" + """Get maximum target value found and corresponding parameters.""" try: res = { 'target': self.target.max(), @@ -248,7 +274,19 @@ def set_bounds(self, new_bounds): ---------- new_bounds : dict A dictionary with the parameter name and its new bounds + + Returns + ---------- + if type of modified parameter is int, then return rounded integer value + Example : new_bounds = {"p1", (1.2, 8.7)} and "p1" is integer + then new_bounds are (1,9) """ for row, key in enumerate(self.keys): if key in new_bounds: - self._bounds[row] = new_bounds[key] + if self._btypes is not None: + if self._btypes[row] == int: + lbound = self._btypes[row](np.round(new_bounds[key][0], 0)) + ubound = self._btypes[row](np.round(new_bounds[key][1], 0)) + new_bounds[key] = (lbound, ubound) + self._bounds[row] = list(new_bounds[key]) + self._bounds[row] = list(new_bounds[key]) diff --git a/bayes_opt/util.py b/bayes_opt/util.py index 67fba9d9b..8ff48295f 100644 --- a/bayes_opt/util.py +++ b/bayes_opt/util.py @@ -3,8 +3,39 @@ from scipy.stats import norm from scipy.optimize import minimize +def generate_trials(n_events, bounds, btypes, random_state): + """A function to generate set of events under several constrains -def acq_max(ac, gp, y_max, bounds, random_state, n_warmup=100000, n_iter=250): + Parameters + ---------- + :param n_events: + The number of events to generate + + :param bounds: + The variables bounds to limit the search of the acq max. + + :param btypes: + The types of the variables. + + :param random_state: + Instance of np.RandomState random number generator + """ + x_trials = np.empty((n_events, bounds.shape[0])) + if btypes is None: + x_trials = random_state.uniform(bounds[:, 0], bounds[:, 1], + size=(n_events, bounds.shape[0])) + else: + for col, name in enumerate(bounds): + # print(col, name) + lower, upper = name + if btypes[col] != int: + x_trials[:, col] = random_state.uniform(lower, upper, size=n_events) + if btypes[col] == int: + x_trials[:, col] = random_state.randint(int(lower), int(upper), size=n_events) + return x_trials + + +def acq_max(ac, gp, y_max, bounds, random_state, btypes=None, n_warmup=100000, n_iter=250): """ A function to find the maximum of the acquisition function @@ -26,6 +57,9 @@ def acq_max(ac, gp, y_max, bounds, random_state, n_warmup=100000, n_iter=250): :param bounds: The variables bounds to limit the search of the acq max. + :param btypes: + The types of the variables. + :param random_state: instance of np.RandomState random number generator @@ -39,20 +73,18 @@ def acq_max(ac, gp, y_max, bounds, random_state, n_warmup=100000, n_iter=250): ------- :return: x_max, The arg max of the acquisition function. """ - # Warm up with random points - x_tries = random_state.uniform(bounds[:, 0], bounds[:, 1], - size=(n_warmup, bounds.shape[0])) + x_tries = generate_trials(n_warmup, bounds, btypes, random_state) ys = ac(x_tries, gp=gp, y_max=y_max) x_max = x_tries[ys.argmax()] max_acq = ys.max() # Explore the parameter space more throughly - x_seeds = random_state.uniform(bounds[:, 0], bounds[:, 1], - size=(n_iter, bounds.shape[0])) + x_seeds = generate_trials(n_iter, bounds, btypes, random_state) for x_try in x_seeds: # Find the minimum of minus the acquisition function - res = minimize(lambda x: -ac(x.reshape(1, -1), gp=gp, y_max=y_max), + ac_op = lambda x: -ac(x.reshape(1, -1), gp=gp, y_max=y_max) + res = minimize(ac_op, x_try.reshape(1, -1), bounds=bounds, method="L-BFGS-B") @@ -61,10 +93,36 @@ def acq_max(ac, gp, y_max, bounds, random_state, n_warmup=100000, n_iter=250): if not res.success: continue + # If integer in list of bounds + # search minimum between surroundings integers of the detected extremal point + if btypes is not None: + if int in btypes: + x_inf = res.x.copy() + x_sup = res.x.copy() + for i, (val, t) in enumerate(zip(res.x, btypes)): + x_inf[i] = t(val) + x_sup[i] = t(val + 1) if t == int else t(val) + # Store it if better than previous minimum(maximum). + x_ext = [x_inf, x_sup] + if max_acq is None or -res.fun[0] >= max_acq: + max_acq = -1*np.minimum(ac_op(x_inf), ac_op(x_sup)) + x_argmax = np.argmin((ac_op(x_inf), ac_op(x_sup))) + x_max = x_ext[x_argmax] + else: + # If only float in bounds + # store it if better than previous minimum(maximum). + if max_acq is None or -res.fun[0] >= max_acq: + x_max = res.x + max_acq = -res.fun[0] + else: + if max_acq is None or -res.fun[0] >= max_acq: + x_max = res.x + max_acq = -res.fun[0] + # Store it if better than previous minimum(maximum). - if max_acq is None or -res.fun[0] >= max_acq: - x_max = res.x - max_acq = -res.fun[0] + # if max_acq is None or -res.fun[0] >= max_acq: + # x_max = res.x + # max_acq = -res.fun[0] # Clip output to make sure it lies within the bounds. Due to floating # point technicalities this is not always the case. diff --git a/examples/basic-tour.ipynb b/examples/basic-tour.ipynb index a21037284..0ac3b5673 100644 --- a/examples/basic-tour.ipynb +++ b/examples/basic-tour.ipynb @@ -1,20 +1,5 @@ { "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Basic tour of the Bayesian Optimization package\n", - "\n", - "This is a constrained global optimization package built upon bayesian inference and gaussian process, that attempts to find the maximum value of an unknown function in as few iterations as possible. This technique is particularly suited for optimization of high cost functions, situations where the balance between exploration and exploitation is important.\n", - "\n", - "Bayesian optimization works by constructing a posterior distribution of functions (gaussian process) that best describes the function you want to optimize. As the number of observations grows, the posterior distribution improves, and the algorithm becomes more certain of which regions in parameter space are worth exploring and which are not, as seen in the picture below.\n", - "\n", - "As you iterate over and over, the algorithm balances its needs of exploration and exploitation taking into account what it knows about the target function. At each step a Gaussian Process is fitted to the known samples (points previously explored), and the posterior distribution, combined with a exploration strategy (such as UCB (Upper Confidence Bound), or EI (Expected Improvement)), are used to determine the next point that should be explored (see the gif below).\n", - "\n", - "This process is designed to minimize the number of steps required to find a combination of parameters that are close to the optimal combination. To do so, this method uses a proxy optimization problem (finding the maximum of the acquisition function) that, albeit still a hard problem, is cheaper (in the computational sense) and common tools can be employed. Therefore Bayesian Optimization is most adequate for situations where sampling the function to be optimized is a very expensive endeavor. See the references for a proper discussion of this method." - ] - }, { "cell_type": "markdown", "metadata": {}, @@ -42,6 +27,21 @@ " return -x ** 2 - (y - 1) ** 2 + 1" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Basic tour of the Bayesian Optimization package\n", + "\n", + "This is a constrained global optimization package built upon bayesian inference and gaussian process, that attempts to find the maximum value of an unknown function in as few iterations as possible. This technique is particularly suited for optimization of high cost functions, situations where the balance between exploration and exploitation is important.\n", + "\n", + "Bayesian optimization works by constructing a posterior distribution of functions (gaussian process) that best describes the function you want to optimize. As the number of observations grows, the posterior distribution improves, and the algorithm becomes more certain of which regions in parameter space are worth exploring and which are not, as seen in the picture below.\n", + "\n", + "As you iterate over and over, the algorithm balances its needs of exploration and exploitation taking into account what it knows about the target function. At each step a Gaussian Process is fitted to the known samples (points previously explored), and the posterior distribution, combined with a exploration strategy (such as UCB (Upper Confidence Bound), or EI (Expected Improvement)), are used to determine the next point that should be explored (see the gif below).\n", + "\n", + "This process is designed to minimize the number of steps required to find a combination of parameters that are close to the optimal combination. To do so, this method uses a proxy optimization problem (finding the maximum of the acquisition function) that, albeit still a hard problem, is cheaper (in the computational sense) and common tools can be employed. Therefore Bayesian Optimization is most adequate for situations where sampling the function to be optimized is a very expensive endeavor. See the references for a proper discussion of this method." + ] + }, { "cell_type": "markdown", "metadata": {}, diff --git a/examples/bo_parameterTyping_example.py b/examples/bo_parameterTyping_example.py new file mode 100644 index 000000000..cd94669ea --- /dev/null +++ b/examples/bo_parameterTyping_example.py @@ -0,0 +1,20 @@ +from bayes_opt import BayesianOptimization + +# function to be maximized - must find (x=0;y=10) +targetFunction = lambda x, y: -(x-0.5) ** 2 - (y - 10) ** 2 + 1 + +# define parameters bounds +bounds = {'y': (5, 15), 'x': (-3, 3)} +btypes = {'y':int, 'x':float} +bo = BayesianOptimization(targetFunction, bounds) #, ptypes=btypes) + +bo.probe({"x": 1.4, "y": 6}) +bo.probe({"x": 2.4, "y": 12}) +bo.probe({"x": -2.4, "y": 13}) + +bo.maximize(init_points=5, n_iter=5, kappa=2) + +# print results +print(f'Estimated position of the maximum: {bo.max}') +print(f'List of tested positions:\n{bo.res}') + diff --git a/examples/sklearn_example.py b/examples/sklearn_example.py index e4e5d88e0..770f3b1c3 100644 --- a/examples/sklearn_example.py +++ b/examples/sklearn_example.py @@ -70,7 +70,8 @@ def svc_crossval(expC, expGamma): optimizer = BayesianOptimization( f=svc_crossval, - pbounds={"expC": (-3, 2), "expGamma": (-4, -1)}, + pbounds={"expC": [float, (-3, 2)], "expGamma": [float, (-4, -1)]}, + ptypes={"expC": float, "expGamma":float}, random_state=1234, verbose=2 ) @@ -90,8 +91,8 @@ def rfc_crossval(n_estimators, min_samples_split, max_features): accordingly. """ return rfc_cv( - n_estimators=int(n_estimators), - min_samples_split=int(min_samples_split), + n_estimators=n_estimators, + min_samples_split=min_samples_split, max_features=max(min(max_features, 0.999), 1e-3), data=data, targets=targets, @@ -102,7 +103,11 @@ def rfc_crossval(n_estimators, min_samples_split, max_features): pbounds={ "n_estimators": (10, 250), "min_samples_split": (2, 25), - "max_features": (0.1, 0.999), + "max_features": (0.1, 0.999) + }, + ptypes={"n_estimators": int, + "min_samples_split": int, + "max_features": float }, random_state=1234, verbose=2 diff --git a/tests/test_bayesian_optimization.py b/tests/test_bayesian_optimization.py index a2c31e753..1775ebd8b 100644 --- a/tests/test_bayesian_optimization.py +++ b/tests/test_bayesian_optimization.py @@ -10,10 +10,10 @@ def target_func(**kwargs): PBOUNDS = {'p1': (0, 10), 'p2': (0, 10)} - +PTYPES = {'p1': float, 'p2': int} def test_register(): - optimizer = BayesianOptimization(target_func, PBOUNDS, random_state=1) + optimizer = BayesianOptimization(target_func, PBOUNDS, PTYPES, random_state=1) assert len(optimizer.space) == 0 optimizer.register(params={"p1": 1, "p2": 2}, target=3) @@ -31,7 +31,7 @@ def test_register(): def test_probe_lazy(): - optimizer = BayesianOptimization(target_func, PBOUNDS, random_state=1) + optimizer = BayesianOptimization(target_func, PBOUNDS, PTYPES, random_state=1) optimizer.probe(params={"p1": 1, "p2": 2}, lazy=True) assert len(optimizer.space) == 0 @@ -47,7 +47,7 @@ def test_probe_lazy(): def test_probe_eager(): - optimizer = BayesianOptimization(target_func, PBOUNDS, random_state=1) + optimizer = BayesianOptimization(target_func, PBOUNDS, PTYPES, random_state=1) optimizer.probe(params={"p1": 1, "p2": 2}, lazy=False) assert len(optimizer.space) == 1 @@ -70,7 +70,7 @@ def test_probe_eager(): def test_suggest_at_random(): util = UtilityFunction(kind="ucb", kappa=5, xi=0) - optimizer = BayesianOptimization(target_func, PBOUNDS, random_state=1) + optimizer = BayesianOptimization(target_func, PBOUNDS, PTYPES, random_state=1) for _ in range(50): sample = optimizer.space.params_to_array(optimizer.suggest(util)) @@ -81,7 +81,7 @@ def test_suggest_at_random(): def test_suggest_with_one_observation(): util = UtilityFunction(kind="ucb", kappa=5, xi=0) - optimizer = BayesianOptimization(target_func, PBOUNDS, random_state=1) + optimizer = BayesianOptimization(target_func, PBOUNDS, PTYPES, random_state=1) optimizer.register(params={"p1": 1, "p2": 2}, target=3) @@ -98,7 +98,7 @@ def test_suggest_with_one_observation(): def test_prime_queue_all_empty(): - optimizer = BayesianOptimization(target_func, PBOUNDS, random_state=1) + optimizer = BayesianOptimization(target_func, PBOUNDS, PTYPES, random_state=1) assert len(optimizer._queue) == 0 assert len(optimizer.space) == 0 @@ -108,7 +108,7 @@ def test_prime_queue_all_empty(): def test_prime_queue_empty_with_init(): - optimizer = BayesianOptimization(target_func, PBOUNDS, random_state=1) + optimizer = BayesianOptimization(target_func, PBOUNDS, PTYPES, random_state=1) assert len(optimizer._queue) == 0 assert len(optimizer.space) == 0 @@ -118,7 +118,7 @@ def test_prime_queue_empty_with_init(): def test_prime_queue_with_register(): - optimizer = BayesianOptimization(target_func, PBOUNDS, random_state=1) + optimizer = BayesianOptimization(target_func, PBOUNDS, PTYPES, random_state=1) assert len(optimizer._queue) == 0 assert len(optimizer.space) == 0 @@ -129,7 +129,7 @@ def test_prime_queue_with_register(): def test_prime_queue_with_register_and_init(): - optimizer = BayesianOptimization(target_func, PBOUNDS, random_state=1) + optimizer = BayesianOptimization(target_func, PBOUNDS, PTYPES, random_state=1) assert len(optimizer._queue) == 0 assert len(optimizer.space) == 0 diff --git a/tests/test_target_space.py b/tests/test_target_space.py index 7ca8b827c..8d5e99e9c 100644 --- a/tests/test_target_space.py +++ b/tests/test_target_space.py @@ -8,7 +8,8 @@ def target_func(**kwargs): return sum(kwargs.values()) -PBOUNDS = {'p1': (0, 1), 'p2': (1, 100)} +PBOUNDS = {'p1': (0, 10), 'p2': (1, 100)} +PTYPES = {'p1': float, 'p2':int} def test_keys_and_bounds_in_same_order(): @@ -16,19 +17,21 @@ def test_keys_and_bounds_in_same_order(): 'p1': (0, 1), 'p3': (0, 3), 'p2': (0, 2), - 'p4': (0, 4), + 'p4': (0, 4) } - space = TargetSpace(target_func, pbounds) + ptypes = {'p1':int, 'p2':float, 'p3':int, 'p4':float} + space = TargetSpace(target_func, pbounds, ptypes) assert space.dim == len(pbounds) assert space.empty assert space.keys == ["p1", "p2", "p3", "p4"] + assert list(space.btypes) == [int, float, int, float] assert all(space.bounds[:, 0] == np.array([0, 0, 0, 0])) assert all(space.bounds[:, 1] == np.array([1, 2, 3, 4])) def test_params_to_array(): - space = TargetSpace(target_func, PBOUNDS) + space = TargetSpace(target_func, PBOUNDS, PTYPES) assert all(space.params_to_array({"p1": 2, "p2": 3}) == np.array([2, 3])) assert all(space.params_to_array({"p2": 2, "p1": 9}) == np.array([9, 2])) @@ -41,7 +44,7 @@ def test_params_to_array(): def test_array_to_params(): - space = TargetSpace(target_func, PBOUNDS) + space = TargetSpace(target_func, PBOUNDS, PTYPES) assert space.array_to_params(np.array([2, 3])) == {"p1": 2, "p2": 3} with pytest.raises(ValueError): @@ -51,7 +54,7 @@ def test_array_to_params(): def test_as_array(): - space = TargetSpace(target_func, PBOUNDS) + space = TargetSpace(target_func, PBOUNDS, PTYPES) x = space._as_array([0, 1]) assert x.shape == (2,) @@ -72,7 +75,7 @@ def test_as_array(): def test_register(): - space = TargetSpace(target_func, PBOUNDS) + space = TargetSpace(target_func, PBOUNDS, PTYPES) assert len(space) == 0 # registering with dict @@ -94,7 +97,7 @@ def test_register(): def test_probe(): - space = TargetSpace(target_func, PBOUNDS) + space = TargetSpace(target_func, PBOUNDS, PTYPES) assert len(space) == 0 # probing with dict @@ -127,9 +130,10 @@ def test_random_sample(): 'p1': (0, 1), 'p3': (0, 3), 'p2': (0, 2), - 'p4': (0, 4), + 'p4': (0, 4) } - space = TargetSpace(target_func, pbounds, random_state=8) + ptypes = {'p1': int, 'p2': float, 'p3': int, 'p4': float} + space = TargetSpace(target_func, pbounds, ptypes, random_state=8) for _ in range(50): random_sample = space.random_sample() @@ -139,7 +143,8 @@ def test_random_sample(): def test_max(): - space = TargetSpace(target_func, PBOUNDS) + print(PBOUNDS) + space = TargetSpace(target_func, PBOUNDS, PTYPES) assert space.max() == {} space.probe(params={"p1": 1, "p2": 2}) @@ -150,7 +155,7 @@ def test_max(): def test_res(): - space = TargetSpace(target_func, PBOUNDS) + space = TargetSpace(target_func, PBOUNDS, PTYPES) assert space.res() == [] space.probe(params={"p1": 1, "p2": 2}) @@ -173,9 +178,10 @@ def test_set_bounds(): 'p1': (0, 1), 'p3': (0, 3), 'p2': (0, 2), - 'p4': (0, 4), + 'p4': (0, 4) } - space = TargetSpace(target_func, pbounds) + ptypes = {'p1':int, 'p2':float, 'p3':int, 'p4':float} + space = TargetSpace(target_func, pbounds, ptypes) # Ignore unknown keys space.set_bounds({"other": (7, 8)}) @@ -183,9 +189,17 @@ def test_set_bounds(): assert all(space.bounds[:, 1] == np.array([1, 2, 3, 4])) # Update bounds accordingly - space.set_bounds({"p2": (1, 8)}) - assert all(space.bounds[:, 0] == np.array([0, 1, 0, 0])) - assert all(space.bounds[:, 1] == np.array([1, 8, 3, 4])) + space.set_bounds({"p3": (1.1, 8.7)}) + print(space.bounds) + assert all(space.bounds[:, 0] == np.array([0, 0, 1, 0])) + assert all(space.bounds[:, 1] == np.array([1, 2, 9, 4])) + + ptypes = None + space = TargetSpace(target_func, pbounds, ptypes) + space.set_bounds({"p3": (1.1, 8.7)}) + print(space.bounds) + assert all(space.bounds[:, 0] == np.array([0, 0, 1.1, 0])) + assert all(space.bounds[:, 1] == np.array([1, 2, 8.7, 4])) if __name__ == '__main__': diff --git a/tests/test_util.py b/tests/test_util.py index 04ecf5deb..e1e8b7098 100644 --- a/tests/test_util.py +++ b/tests/test_util.py @@ -61,6 +61,7 @@ def test_acq_with_ucb(): GP, y_max, bounds=np.array([[0, 1], [0, 1]]), + btypes=[float, float], random_state=ensure_rng(0), n_iter=20 ) @@ -79,6 +80,7 @@ def test_acq_with_ei(): GP, y_max, bounds=np.array([[0, 1], [0, 1]]), + btypes=[float, float], random_state=ensure_rng(0), n_iter=200, ) @@ -97,6 +99,7 @@ def test_acq_with_poi(): GP, y_max, bounds=np.array([[0, 1], [0, 1]]), + btypes=[float, float], random_state=ensure_rng(0), n_iter=200, ) @@ -112,7 +115,8 @@ def f(x, y): optimizer = BayesianOptimization( f=f, - pbounds={"x": (-2, 2), "y": (-2, 2)} + pbounds={"x": (-2, 2), "y": (-2, 2)}, + ptypes={'x':float, 'y':float} ) assert len(optimizer.space) == 0