diff --git a/bayes_opt/logger.py b/bayes_opt/logger.py index da60874ad..ed7af16e7 100644 --- a/bayes_opt/logger.py +++ b/bayes_opt/logger.py @@ -8,7 +8,7 @@ from colorama import Fore, just_fix_windows_console if TYPE_CHECKING: - from bayes_opt.parameter import ParameterConfig + from bayes_opt.parameter import ParamsType just_fix_windows_console() @@ -28,9 +28,9 @@ class ScreenLogger: _default_cell_size = 9 _default_precision = 4 - _colour_new_max = Fore.MAGENTA - _colour_regular_message = Fore.RESET - _colour_reset = Fore.RESET + _color_new_max = Fore.MAGENTA + _color_regular_message = Fore.RESET + _color_reset = Fore.RESET def __init__(self, verbose: int = 2, is_constrained: bool = False) -> None: self._verbose = verbose @@ -141,8 +141,8 @@ def _print_step( self, keys: list[str], result: dict[str, Any], - params_config: Mapping[str, ParameterConfig], - colour: str = _colour_regular_message, + params_config: Mapping[str, ParamsType], + color: str = _color_regular_message, ) -> str: """Print a step. @@ -154,12 +154,12 @@ def _print_step( keys : list[str] The parameter keys. - params_config : Mapping[str, ParameterConfig] + params_config : Mapping[str, ParamsType] The configuration to map the key to the parameter for correct formatting. - colour : str, optional + color : str, optional Color to use for the output. - (Default value = _colour_regular_message, equivalent to Fore.RESET) + (Default value = _color_regular_message, equivalent to Fore.RESET) Returns ------- @@ -178,7 +178,7 @@ def _print_step( else params_config[key].to_string(val, self._default_cell_size) for key, val in params.items() ] - return "| " + " | ".join(colour + x + self._colour_reset for x in cells if x is not None) + " |" + return "| " + " | ".join(color + x + self._color_reset for x in cells if x is not None) + " |" def _print_header(self, keys: list[str]) -> str: """Print the header of the log. @@ -258,7 +258,7 @@ def log_optimization_step( self, keys: list[str], result: dict[str, Any], - params_config: Mapping[str, ParameterConfig], + params_config: Mapping[str, ParamsType], current_max: dict[str, Any] | None, ) -> None: """Log an optimization step. @@ -271,7 +271,7 @@ def log_optimization_step( result : dict[str, Any] The result dictionary for the most recent step. - params_config : Mapping[str, ParameterConfig] + params_config : Mapping[str, ParamsType] The configuration to map the key to the parameter for correct formatting. current_max : dict[str, Any] | None @@ -283,8 +283,8 @@ def log_optimization_step( return if self._verbose == 2 or is_new_max: - colour = self._colour_new_max if is_new_max else self._colour_regular_message - line = self._print_step(keys, result, params_config, colour=colour) + "\n" + color = self._color_new_max if is_new_max else self._color_regular_message + line = self._print_step(keys, result, params_config, color=color) + "\n" if self._verbose: print(line, end="") diff --git a/tests/test_acquisition.py b/tests/test_acquisition.py index a57b4a0f2..30150bcfd 100644 --- a/tests/test_acquisition.py +++ b/tests/test_acquisition.py @@ -406,130 +406,30 @@ def verify_optimizers_match(optimizer1, optimizer2): assert suggestion1 == suggestion2, f"\nSuggestion 1: {suggestion1}\nSuggestion 2: {suggestion2}" -def test_integration_upper_confidence_bound(target_func_x_and_y, pbounds, tmp_path): - """Test save/load integration with UpperConfidenceBound acquisition.""" - acquisition_function = UpperConfidenceBound(kappa=2.576) - - # Create and run first optimizer - optimizer = BayesianOptimization( - f=target_func_x_and_y, - pbounds=pbounds, - acquisition_function=acquisition_function, - random_state=1, - verbose=0, - ) - optimizer.maximize(init_points=2, n_iter=3) - - # Save state - state_path = tmp_path / "ucb_state.json" - optimizer.save_state(state_path) - - # Create new optimizer and load state - new_optimizer = BayesianOptimization( - f=target_func_x_and_y, - pbounds=pbounds, - acquisition_function=UpperConfidenceBound(kappa=2.576), - random_state=1, - verbose=0, - ) - new_optimizer.load_state(state_path) - - verify_optimizers_match(optimizer, new_optimizer) - - -def test_integration_probability_improvement(target_func_x_and_y, pbounds, tmp_path): - """Test save/load integration with ProbabilityOfImprovement acquisition.""" - acquisition_function = ProbabilityOfImprovement(xi=0.01) - - optimizer = BayesianOptimization( - f=target_func_x_and_y, - pbounds=pbounds, - acquisition_function=acquisition_function, - random_state=1, - verbose=0, - ) - optimizer.maximize(init_points=2, n_iter=3) - - state_path = tmp_path / "pi_state.json" - optimizer.save_state(state_path) - - new_optimizer = BayesianOptimization( - f=target_func_x_and_y, - pbounds=pbounds, - acquisition_function=ProbabilityOfImprovement(xi=0.01), - random_state=1, - verbose=0, - ) - new_optimizer.load_state(state_path) - - verify_optimizers_match(optimizer, new_optimizer) - - -def test_integration_expected_improvement(target_func_x_and_y, pbounds, tmp_path): - """Test save/load integration with ExpectedImprovement acquisition.""" - acquisition_function = ExpectedImprovement(xi=0.01) - - optimizer = BayesianOptimization( - f=target_func_x_and_y, - pbounds=pbounds, - acquisition_function=acquisition_function, - random_state=1, - verbose=0, - ) - optimizer.maximize(init_points=2, n_iter=3) - - state_path = tmp_path / "ei_state.json" - optimizer.save_state(state_path) - - new_optimizer = BayesianOptimization( - f=target_func_x_and_y, - pbounds=pbounds, - acquisition_function=ExpectedImprovement(xi=0.01), - random_state=1, - verbose=0, - ) - new_optimizer.load_state(state_path) - - verify_optimizers_match(optimizer, new_optimizer) - - -def test_integration_constant_liar(target_func_x_and_y, pbounds, tmp_path): - """Test save/load integration with ConstantLiar acquisition.""" - base_acq = UpperConfidenceBound(kappa=2.576) - acquisition_function = ConstantLiar(base_acquisition=base_acq) - - optimizer = BayesianOptimization( - f=target_func_x_and_y, - pbounds=pbounds, - acquisition_function=acquisition_function, - random_state=1, - verbose=0, - ) - optimizer.maximize(init_points=2, n_iter=3) - - state_path = tmp_path / "cl_state.json" - optimizer.save_state(state_path) - - new_optimizer = BayesianOptimization( - f=target_func_x_and_y, - pbounds=pbounds, - acquisition_function=ConstantLiar(base_acquisition=UpperConfidenceBound(kappa=2.576)), - random_state=1, - verbose=0, - ) - new_optimizer.load_state(state_path) - - verify_optimizers_match(optimizer, new_optimizer) - - -def test_integration_gp_hedge(target_func_x_and_y, pbounds, tmp_path): - """Test save/load integration with GPHedge acquisition.""" - base_acquisitions = [ - UpperConfidenceBound(kappa=2.576), - ProbabilityOfImprovement(xi=0.01), - ExpectedImprovement(xi=0.01), - ] - acquisition_function = GPHedge(base_acquisitions=base_acquisitions) +@pytest.mark.parametrize( + ("acquisition_fn_factory", "state_filename"), + [ + (lambda: UpperConfidenceBound(kappa=2.576), "ucb_state.json"), + (lambda: ProbabilityOfImprovement(xi=0.01), "pi_state.json"), + (lambda: ExpectedImprovement(xi=0.01), "ei_state.json"), + (lambda: ConstantLiar(base_acquisition=UpperConfidenceBound(kappa=2.576)), "cl_state.json"), + ( + lambda: GPHedge( + base_acquisitions=[ + UpperConfidenceBound(kappa=2.576), + ProbabilityOfImprovement(xi=0.01), + ExpectedImprovement(xi=0.01), + ] + ), + "gphedge_state.json", + ), + ], +) +def test_integration_acquisition_functions( + acquisition_fn_factory, state_filename, target_func_x_and_y, pbounds, tmp_path +): + """Parametrized integration test for acquisition functions.""" + acquisition_function = acquisition_fn_factory() optimizer = BayesianOptimization( f=target_func_x_and_y, @@ -540,18 +440,13 @@ def test_integration_gp_hedge(target_func_x_and_y, pbounds, tmp_path): ) optimizer.maximize(init_points=2, n_iter=3) - state_path = tmp_path / "gphedge_state.json" + state_path = tmp_path / state_filename optimizer.save_state(state_path) - new_base_acquisitions = [ - UpperConfidenceBound(kappa=2.576), - ProbabilityOfImprovement(xi=0.01), - ExpectedImprovement(xi=0.01), - ] new_optimizer = BayesianOptimization( f=target_func_x_and_y, pbounds=pbounds, - acquisition_function=GPHedge(base_acquisitions=new_base_acquisitions), + acquisition_function=acquisition_fn_factory(), random_state=1, verbose=0, ) diff --git a/tests/test_logger.py b/tests/test_logger.py index 903858bc5..26e642ac4 100644 --- a/tests/test_logger.py +++ b/tests/test_logger.py @@ -157,7 +157,7 @@ def test_step(): # Test with custom color custom_color = Fore.RED step_str_colored = logger._print_step( - optimizer._space.keys, optimizer._space.res()[-1], optimizer._space.params_config, colour=custom_color + optimizer._space.keys, optimizer._space.res()[-1], optimizer._space.params_config, color=custom_color ) assert custom_color in step_str_colored diff --git a/tests/test_target_space.py b/tests/test_target_space.py index c269569da..dad89986b 100644 --- a/tests/test_target_space.py +++ b/tests/test_target_space.py @@ -2,8 +2,8 @@ import numpy as np import pytest +from scipy.optimize import NonlinearConstraint -from bayes_opt.constraint import ConstraintModel from bayes_opt.exception import NotUniqueError from bayes_opt.target_space import TargetSpace @@ -99,7 +99,7 @@ def test_register(): def test_register_with_constraint(): - constraint = ConstraintModel(lambda x: x, -2, 2, transform=lambda x: x) + constraint = NonlinearConstraint(lambda x: x, -2, 2) space = TargetSpace(target_func, PBOUNDS, constraint=constraint) assert len(space) == 0 @@ -194,7 +194,7 @@ def test_y_max(): def test_y_max_with_constraint(): PBOUNDS = {"p1": (0, 10), "p2": (1, 100)} - constraint = ConstraintModel(lambda p1, p2: p1 - p2, -2, 2) + constraint = NonlinearConstraint(lambda p1, p2: p1 - p2, -2, 2) space = TargetSpace(target_func, PBOUNDS, constraint) assert space._target_max() is None space.probe(params={"p1": 1, "p2": 2}) # Feasible @@ -228,7 +228,7 @@ def test_max(): def test_max_with_constraint(): PBOUNDS = {"p1": (0, 10), "p2": (1, 100)} - constraint = ConstraintModel(lambda p1, p2: p1 - p2, -2, 2) + constraint = NonlinearConstraint(lambda p1, p2: p1 - p2, -2, 2) space = TargetSpace(target_func, PBOUNDS, constraint=constraint) assert space.max() is None @@ -241,7 +241,7 @@ def test_max_with_constraint(): def test_max_with_constraint_identical_target_value(): PBOUNDS = {"p1": (0, 10), "p2": (1, 100)} - constraint = ConstraintModel(lambda p1, p2: p1 - p2, -2, 2) + constraint = NonlinearConstraint(lambda p1, p2: p1 - p2, -2, 2) space = TargetSpace(target_func, PBOUNDS, constraint=constraint) assert space.max() is None