1313from bayes_opt .logger import ScreenLogger
1414from bayes_opt .target_space import TargetSpace
1515from scipy .optimize import NonlinearConstraint
16+ from bayes_opt .domain_reduction import SequentialDomainReductionTransformer
17+ from bayes_opt .parameter import BayesParameter
18+ from bayes_opt .util import ensure_rng
1619
1720
1821def target_func (** kwargs ):
@@ -415,20 +418,16 @@ def str_target_func(param1: str, param2: str) -> float:
415418 assert optimizer .res [i ]["params" ] == new_optimizer .res [i ]["params" ]
416419
417420
418- def test_probe_point_returns_same_point (tmp_path ):
419- """Check that probe returns same point after save/load."""
421+ def test_suggest_point_returns_same_point (tmp_path ):
422+ """Check that suggest returns same point after save/load."""
420423 optimizer = BayesianOptimization (
421424 f = target_func ,
422425 pbounds = PBOUNDS ,
423426 random_state = 1 ,
424427 verbose = 0
425428 )
426-
427- optimizer .register (
428- params = {"p1" : 5.0 , "p2" : 5.0 },
429- target = 10.0
430- )
431-
429+ optimizer .maximize (init_points = 2 , n_iter = 3 )
430+
432431 state_path = tmp_path / "optimizer_state.json"
433432 optimizer .save_state (state_path )
434433
@@ -439,27 +438,34 @@ def test_probe_point_returns_same_point(tmp_path):
439438 verbose = 0
440439 )
441440 new_optimizer .load_state (state_path )
442-
443- # Both optimizers should probe the same point
444- point = {"p1" : 1.5 , "p2" : 0.5 }
445- probe1 = optimizer .probe (point )
446- probe2 = new_optimizer .probe (point )
447- assert probe1 == probe2
441+
442+ # Both optimizers should suggest the same point
443+ suggestion1 = optimizer .suggest ()
444+ suggestion2 = new_optimizer .suggest ()
445+ assert suggestion1 == suggestion2
448446
449447
450- def test_suggest_point_returns_same_point (tmp_path ):
451- """Check that suggest returns same point after save/load."""
448+ def test_save_load_random_state (tmp_path ):
449+ """Test that random state is properly preserved."""
450+ # Initialize optimizer
452451 optimizer = BayesianOptimization (
453452 f = target_func ,
454453 pbounds = PBOUNDS ,
455454 random_state = 1 ,
456455 verbose = 0
457456 )
458- optimizer .maximize (init_points = 2 , n_iter = 3 )
459457
458+ # Register a point before saving
459+ optimizer .probe (
460+ params = {"p1" : 1 , "p2" : 2 },
461+ lazy = False
462+ )
463+
464+ # Save state
460465 state_path = tmp_path / "optimizer_state.json"
461466 optimizer .save_state (state_path )
462-
467+
468+ # Create new optimizer with same configuration
463469 new_optimizer = BayesianOptimization (
464470 f = target_func ,
465471 pbounds = PBOUNDS ,
@@ -474,35 +480,43 @@ def test_suggest_point_returns_same_point(tmp_path):
474480 assert suggestion1 == suggestion2
475481
476482
477- def test_save_load_random_state (tmp_path ):
478- """Test that random state is properly preserved."""
479- # Initialize optimizer
483+ def test_save_load_unused_optimizer (tmp_path ):
484+ """Test saving and loading optimizer state with unused optimizer."""
480485 optimizer = BayesianOptimization (
481486 f = target_func ,
482487 pbounds = PBOUNDS ,
483488 random_state = 1 ,
484489 verbose = 0
485490 )
486491
487- # Save state
488- state_path = tmp_path / "optimizer_state.json"
489- optimizer .save_state (state_path )
492+ # Test that saving without samples raises an error
493+ with pytest .raises (ValueError , match = "Cannot save optimizer state before collecting any samples" ):
494+ optimizer .save_state (tmp_path / "optimizer_state.json" )
495+
496+ # Add a sample point
497+ optimizer .probe (
498+ params = {"p1" : 1 , "p2" : 2 },
499+ lazy = False
500+ )
490501
491- # Load state and get next suggestion
502+ # Now saving should work
503+ optimizer .save_state (tmp_path / "optimizer_state.json" )
504+
492505 new_optimizer = BayesianOptimization (
493506 f = target_func ,
494507 pbounds = PBOUNDS ,
495508 random_state = 1 ,
496509 verbose = 0
497510 )
511+ new_optimizer .load_state (tmp_path / "optimizer_state.json" )
498512
499- # Both optimizers should suggest the same point
500- suggestion1 = optimizer .suggest ()
501- suggestion2 = new_optimizer .suggest ()
502- assert suggestion1 == suggestion2
513+ assert len (optimizer .space ) == len (new_optimizer .space )
514+ assert optimizer .max ["target" ] == new_optimizer .max ["target" ]
515+ assert optimizer .max ["params" ] == new_optimizer .max ["params" ]
516+ np .testing .assert_array_equal (optimizer .space .params , new_optimizer .space .params )
517+ np .testing .assert_array_equal (optimizer .space .target , new_optimizer .space .target )
503518
504519
505- def test_save_load_w_constraint (tmp_path ):
506520 """Test saving and loading optimizer state with constraints."""
507521 def constraint_func (x : float , y : float ) -> float :
508522 return x + y # Simple constraint: sum of parameters should be within bounds
@@ -574,3 +588,146 @@ def constraint_func(x: float, y: float) -> float:
574588 assert 0.0 <= constraint_value <= 3.0 , "Suggested point violates constraint"
575589
576590
591+ def test_save_load_w_domain_reduction (tmp_path ):
592+ """Test saving and loading optimizer state with domain reduction transformer."""
593+ # Initialize optimizer with bounds transformer
594+ bounds_transformer = SequentialDomainReductionTransformer ()
595+ optimizer = BayesianOptimization (
596+ f = target_func ,
597+ pbounds = PBOUNDS ,
598+ random_state = 1 ,
599+ verbose = 0 ,
600+ bounds_transformer = bounds_transformer
601+ )
602+
603+ # Run some iterations to trigger domain reduction
604+ optimizer .maximize (init_points = 2 , n_iter = 3 )
605+
606+ # Save state
607+ state_path = tmp_path / "optimizer_state.json"
608+ optimizer .save_state (state_path )
609+
610+ # Create new optimizer with same configuration
611+ new_bounds_transformer = SequentialDomainReductionTransformer ()
612+ new_optimizer = BayesianOptimization (
613+ f = target_func ,
614+ pbounds = PBOUNDS ,
615+ random_state = 1 ,
616+ verbose = 0 ,
617+ bounds_transformer = new_bounds_transformer
618+ )
619+ new_optimizer .load_state (state_path )
620+
621+ # Both optimizers should probe the same point
622+ point = {"p1" : 1.5 , "p2" : 0.5 }
623+ probe1 = optimizer .probe (point )
624+ probe2 = new_optimizer .probe (point )
625+ assert probe1 == probe2
626+
627+ # Both optimizers should suggest the same point
628+ suggestion1 = optimizer .suggest ()
629+ suggestion2 = new_optimizer .suggest ()
630+ assert suggestion1 == suggestion2
631+
632+ # Verify that the transformed bounds match
633+ assert optimizer ._space .bounds .tolist () == new_optimizer ._space .bounds .tolist ()
634+
635+
636+ def test_save_load_w_custom_parameter (tmp_path ):
637+ """Test saving and loading optimizer state with custom parameter types."""
638+
639+ class FixedPerimeterTriangleParameter (BayesParameter ):
640+ def __init__ (self , name : str , bounds , perimeter ) -> None :
641+ super ().__init__ (name , bounds )
642+ self .perimeter = perimeter
643+
644+ @property
645+ def is_continuous (self ):
646+ return True
647+
648+ def random_sample (self , n_samples : int , random_state ):
649+ random_state = ensure_rng (random_state )
650+ samples = []
651+ while len (samples ) < n_samples :
652+ samples_ = random_state .dirichlet (np .ones (3 ), n_samples )
653+ samples_ = samples_ * self .perimeter # scale samples by perimeter
654+
655+ samples_ = samples_ [np .all ((self .bounds [:, 0 ] <= samples_ ) & (samples_ <= self .bounds [:, 1 ]), axis = - 1 )]
656+ samples .extend (np .atleast_2d (samples_ ))
657+ samples = np .array (samples [:n_samples ])
658+ return samples
659+
660+ def to_float (self , value ):
661+ return value
662+
663+ def to_param (self , value ):
664+ return value * self .perimeter / sum (value )
665+
666+ def kernel_transform (self , value ):
667+ return value * self .perimeter / np .sum (value , axis = - 1 , keepdims = True )
668+
669+ def to_string (self , value , str_len : int ) -> str :
670+ len_each = (str_len - 2 ) // 3
671+ str_ = '|' .join ([f"{ float (np .round (value [i ], 4 ))} " [:len_each ] for i in range (3 )])
672+ return str_ .ljust (str_len )
673+
674+ @property
675+ def dim (self ):
676+ return 3 # as we have three float values, each representing the length of one side.
677+
678+ def area_of_triangle (sides ):
679+ a , b , c = sides
680+ s = np .sum (sides , axis = - 1 ) # perimeter
681+ A = np .sqrt (s * (s - a ) * (s - b ) * (s - c ))
682+ return A
683+
684+ # Create parameter and bounds
685+ param = FixedPerimeterTriangleParameter (
686+ name = 'sides' ,
687+ bounds = np .array ([[0. , 1. ], [0. , 1. ], [0. , 1. ]]),
688+ perimeter = 1.
689+ )
690+ pbounds = {'sides' : param }
691+
692+ # Print initial pbounds
693+ print ("\n Original pbounds:" )
694+ print (pbounds )
695+
696+ # Initialize first optimizer
697+ optimizer = BayesianOptimization (
698+ f = area_of_triangle ,
699+ pbounds = pbounds ,
700+ random_state = 1 ,
701+ verbose = 0
702+ )
703+
704+ # Run iterations and immediately save state
705+ optimizer .maximize (init_points = 2 , n_iter = 5 )
706+
707+ # Force GP update before saving
708+ optimizer ._gp .fit (optimizer .space .params , optimizer .space .target )
709+
710+ # Save state
711+ state_path = tmp_path / "optimizer_state.json"
712+ optimizer .save_state (state_path )
713+
714+ # Create new optimizer and load state
715+ new_optimizer = BayesianOptimization (
716+ f = area_of_triangle ,
717+ pbounds = pbounds ,
718+ random_state = 1 ,
719+ verbose = 0
720+ )
721+ new_optimizer .load_state (state_path )
722+
723+ suggestion1 = optimizer .suggest ()
724+ suggestion2 = new_optimizer .suggest ()
725+
726+ # Compare suggestions with reduced precision
727+ np .testing .assert_array_almost_equal (
728+ suggestion1 ['sides' ],
729+ suggestion2 ['sides' ],
730+ decimal = 10
731+ )
732+
733+
0 commit comments