18
18
import numpy .typing as npt
19
19
import torch
20
20
from botorch .exceptions .errors import CandidateGenerationError , UnsupportedError
21
- from scipy .optimize import Bounds
21
+ from botorch .optim .utils import columnwise_clamp
22
+ from scipy .optimize import Bounds , minimize
22
23
from torch import Tensor
23
24
24
25
25
26
ScipyConstraintDict = dict [
26
27
str , Union [str , Callable [[np .ndarray ], float ], Callable [[np .ndarray ], np .ndarray ]]
27
28
]
28
- CONST_TOL = 1e-6
29
+
30
+
31
+ def get_constraint_tolerance (dtype : torch .dtype ) -> float :
32
+ r"""Get the constraint tolerance for a given dtype.
33
+
34
+ Args:
35
+ dtype: The dtype to use.
36
+
37
+ Returns:
38
+ The constraint tolerance for the given dtype.
39
+ """
40
+ if dtype == torch .double :
41
+ return 1e-8
42
+ elif dtype == torch .float :
43
+ return 1e-6
44
+ elif dtype == torch .half :
45
+ return 1e-4
46
+ raise ValueError (f"Unsupported dtype { dtype } ." )
29
47
30
48
31
49
def make_scipy_bounds (
@@ -513,7 +531,7 @@ def nonlinear_constraint_is_feasible(
513
531
nonlinear_inequality_constraint : Callable ,
514
532
is_intrapoint : bool ,
515
533
x : Tensor ,
516
- tolerance : float = CONST_TOL ,
534
+ tolerance : float | None = None ,
517
535
) -> Tensor :
518
536
"""Checks if a nonlinear inequality constraint is fulfilled (within tolerance).
519
537
@@ -533,6 +551,8 @@ def nonlinear_constraint_is_feasible(
533
551
A boolean tensor of shape (batch) indicating if the constraint is
534
552
satified by the corresponding batch of `x`.
535
553
"""
554
+ if tolerance is None :
555
+ tolerance = get_constraint_tolerance (dtype = x .dtype )
536
556
537
557
def check_x (x : Tensor ) -> bool :
538
558
return _arrayify (nonlinear_inequality_constraint (x )).item () >= - tolerance
@@ -615,7 +635,7 @@ def evaluate_feasibility(
615
635
inequality_constraints : list [tuple [Tensor , Tensor , float ]] | None = None ,
616
636
equality_constraints : list [tuple [Tensor , Tensor , float ]] | None = None ,
617
637
nonlinear_inequality_constraints : list [tuple [Callable , bool ]] | None = None ,
618
- tolerance : float = CONST_TOL ,
638
+ tolerance : float | None = None ,
619
639
) -> Tensor :
620
640
r"""Evaluate feasibility of candidate points (within a tolerance).
621
641
@@ -657,6 +677,9 @@ def evaluate_feasibility(
657
677
A boolean tensor of shape `batch` indicating if the corresponding candidate of
658
678
shape `q x d` is feasible.
659
679
"""
680
+ if tolerance is None :
681
+ tolerance = get_constraint_tolerance (dtype = X .dtype )
682
+
660
683
is_feasible = torch .ones (X .shape [:- 2 ], device = X .device , dtype = torch .bool )
661
684
if inequality_constraints is not None :
662
685
for idx , coef , rhs in inequality_constraints :
@@ -691,3 +714,78 @@ def evaluate_feasibility(
691
714
tolerance = tolerance ,
692
715
)
693
716
return is_feasible
717
+
718
+
719
+ def project_to_feasible_space_via_slsqp (
720
+ X : Tensor ,
721
+ bounds : Tensor ,
722
+ inequality_constraints : list [tuple [Tensor , Tensor , float ]] | None = None ,
723
+ equality_constraints : list [tuple [Tensor , Tensor , float ]] | None = None ,
724
+ ) -> Tensor :
725
+ """Project X onto the feasible space by solving a quadratic program.
726
+
727
+ This uses SLSQP with gradients to solve the quadratic program.
728
+ NOTE: A proper specialized QP solver would be a better choice here,
729
+ but we'd like to avoid adding dependency on additional packages.
730
+ SLSQP should be able to solve this reliably and quickly since the
731
+ dimension is typically low and the number of constraints is typically
732
+ limited.
733
+
734
+ Args:
735
+ X: A `(batch_shape x) n x d`-dim tensor of inptus.
736
+ bounds: A `2 x d`-dim tensor of lower and upper bounds.
737
+ inequality_constraints: A list of tuples (indices, coefficients, rhs),
738
+ with each tuple encoding an inequality constraint of the form
739
+ `sum_i (X[indices[i]] * coefficients[i]) >= rhs`. `indices` and
740
+ `coefficients` should be torch tensors. See the docstring of
741
+ `make_scipy_linear_constraints` for an example.
742
+ equality_constraints: A list of tuples (indices, coefficients, rhs).
743
+
744
+ Returns:
745
+ A `(batch_shape x) n x d`-dim tensor of projected values.
746
+ """
747
+ if inequality_constraints is None and equality_constraints is None :
748
+ return X
749
+ bounds_scipy = make_scipy_bounds (
750
+ X = X , lower_bounds = bounds [0 ], upper_bounds = bounds [1 ]
751
+ )
752
+ constraints = make_scipy_linear_constraints (
753
+ shapeX = X .shape ,
754
+ inequality_constraints = inequality_constraints ,
755
+ equality_constraints = equality_constraints ,
756
+ )
757
+ # Define squared distance objective
758
+ X_np = X .flatten ().detach ().cpu ().numpy ()
759
+
760
+ def objective (x : np .ndarray ):
761
+ return 0.5 * np .sum ((x - X_np ) ** 2 )
762
+
763
+ def grad_objective (x : np .ndarray ):
764
+ return x - X_np
765
+
766
+ x0 = (
767
+ columnwise_clamp (X = X , lower = bounds [0 ], upper = bounds [1 ], raise_on_violation = True )
768
+ .detach ()
769
+ .cpu ()
770
+ .numpy ()
771
+ .flatten ()
772
+ )
773
+ # NOTE: A proper specialized QP solver would be a better choice here,
774
+ # but we'd like to avoid adding dependency on additional packages.
775
+ # SLSQP should be able to solve this reliably and quickly since the
776
+ # dimension is typically low and the number of constraints is typically
777
+ # limited.
778
+ result = minimize (
779
+ fun = objective ,
780
+ x0 = x0 ,
781
+ method = "SLSQP" ,
782
+ jac = grad_objective ,
783
+ bounds = bounds_scipy ,
784
+ constraints = constraints ,
785
+ tol = get_constraint_tolerance (dtype = X .dtype ),
786
+ )
787
+
788
+ if not result .success :
789
+ raise RuntimeError (f"Optimization failed: { result .message } " )
790
+
791
+ return torch .from_numpy (result .x ).to (X ).view (X .shape )
0 commit comments