Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
64 changes: 41 additions & 23 deletions ax/adapter/adapter_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,12 +41,16 @@
from ax.core.types import TBounds, TCandidateMetadata, TNumeric
from ax.exceptions.core import DataRequiredError, UserInputError
from ax.generators.torch.botorch_moo_utils import (
get_weighted_mc_objective_and_objective_thresholds,
get_weighted_mc_objective,
pareto_frontier_evaluator,
)
from ax.utils.common.constants import Keys
from ax.utils.common.hash_utils import get_current_lilo_hash
from ax.utils.common.logger import get_logger
from ax.utils.common.sympy import (
extract_metric_weights_from_objective_expr,
parse_objective_expression,
)
from ax.utils.common.typeutils import (
assert_is_instance_of_tuple,
assert_is_instance_optional,
Expand Down Expand Up @@ -208,15 +212,21 @@ def extract_objective_thresholds(
outcomes: list[str],
metric_name_to_signature: Mapping[str, str],
) -> npt.NDArray | None:
"""Extracts objective thresholds' values, in the order of `outcomes`.
"""Extracts objective thresholds' values, in the order of objectives.
Will return None if no objective thresholds or if the objective is single-
objective. Otherwise the extracted array will have length ``n_objectives``
(matching the rows of the objective weight matrix).
Will return None if no objective thresholds, otherwise the extracted tensor
will be the same length as `outcomes`.
Objectives that do not have a corresponding objective threshold will be
given a threshold of NaN. We will later infer appropriate threshold values
for those objectives.
Outcomes that are not part of an objective and the objectives that do no have
a corresponding objective threshold will be given a threshold of NaN. We will
later infer appropriate threshold values for the objectives that are given a
threshold of NaN.
The returned thresholds are maximization-aligned: for minimize objectives,
the threshold is negated. E.g., an outcome we want to maximize with a
threshold of at least 5 returns 5. An outcome we want to minimize with a
threshold of no more than 5 returns -5, since we maximize the negative of
the outcome internally.
Args:
objective_thresholds: Objective thresholds to extract values from.
Expand All @@ -225,7 +235,7 @@ def extract_objective_thresholds(
metric_name_to_signature: Mapping from metric names to signatures.
Returns:
(n,) array of thresholds
``(n_objectives,)`` array of maximization-aligned thresholds, or None.
"""
if len(objective_thresholds) == 0:
return None
Expand All @@ -250,11 +260,23 @@ def extract_objective_thresholds(
f"Got {objective_thresholds=} and {objective=}."
)

# Initialize these to be NaN to make sure that objective thresholds for
# non-objective metrics are never used.
obj_t = np.full(len(outcomes), float("nan"))
for metric, threshold in objective_threshold_dict.items():
obj_t[outcomes.index(metric)] = threshold
if not objective.is_multi_objective:
# Single objective — thresholds not applicable.
return None

parsed = parse_objective_expression(objective.expression)
sub_exprs = parsed if isinstance(parsed, tuple) else (parsed,)
n_objectives = len(sub_exprs)
obj_t = np.full(n_objectives, float("nan"))
for i, sub_expr in enumerate(sub_exprs):
sub_mw = extract_metric_weights_from_objective_expr(sub_expr)
if len(sub_mw) > 1:
continue # Scalarized sub-objective — NaN, will be inferred later.
name, weight = sub_mw[0]
sig = metric_name_to_signature[name]
if sig in objective_threshold_dict:
sign = 1.0 if weight > 0 else -1.0
obj_t[i] = sign * objective_threshold_dict[sig]
return obj_t


Expand Down Expand Up @@ -769,10 +791,8 @@ def pareto_frontier(
if obj_t is None:
return frontier_observations

# Apply appropriate weights and thresholds
obj, obj_t = get_weighted_mc_objective_and_objective_thresholds(
objective_weights=obj_w, objective_thresholds=obj_t
)
# Apply appropriate weights
obj = get_weighted_mc_objective(objective_weights=obj_w)
f_t = obj(f)

# Compute individual hypervolumes by taking the difference between the observation
Expand Down Expand Up @@ -937,15 +957,13 @@ def hypervolume(
dtype=torch.bool,
device=f.device,
)
# Apply appropriate weights and thresholds
obj, obj_t = get_weighted_mc_objective_and_objective_thresholds(
objective_weights=obj_w, objective_thresholds=none_throws(obj_t)
)
# Apply appropriate weights
obj = get_weighted_mc_objective(objective_weights=obj_w)
f_t = obj(f)
obj_mask = (obj_w != 0).any(dim=0).nonzero().view(-1)
selected_metrics_mask = selected_metrics_mask[obj_mask]
f_t = f_t[:, selected_metrics_mask]
obj_t = obj_t[selected_metrics_mask]
obj_t = none_throws(obj_t)[selected_metrics_mask]
bd = DominatedPartitioning(ref_point=obj_t, Y=f_t)
return bd.compute_hypervolume().item()

Expand Down
6 changes: 3 additions & 3 deletions ax/adapter/tests/test_torch_moo_adapter.py
Original file line number Diff line number Diff line change
Expand Up @@ -199,10 +199,10 @@ def helper_test_pareto_frontier(
)
)
self.assertTrue(obj_t is not None)
# Thresholds are now (n_objectives,) and maximization-aligned.
# LEQ thresholds with bound=5.0 become -5.0 after sign flip.
self.assertTrue(
torch.equal(
none_throws(obj_t)[:2], torch.full((2,), 5.0, dtype=torch.double)
)
torch.equal(none_throws(obj_t), torch.full((2,), -5.0, dtype=torch.double))
)
observed_frontier2 = pareto_frontier(
adapter=adapter,
Expand Down
59 changes: 46 additions & 13 deletions ax/adapter/tests/test_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -140,7 +140,7 @@ def test_extract_objective_thresholds(self) -> None:
for i, name in enumerate(outcomes[:3])
]

# None of no thresholds
# None if no thresholds
self.assertIsNone(
extract_objective_thresholds(
objective_thresholds=[],
Expand All @@ -150,17 +150,17 @@ def test_extract_objective_thresholds(self) -> None:
)
)

# Working case
# Working case: 3 objectives (all maximize), shape is (3,)
obj_t = extract_objective_thresholds(
objective_thresholds=objective_thresholds,
objective=objective,
outcomes=outcomes,
metric_name_to_signature=metric_name_to_signature,
)
expected_obj_t_not_nan = np.array([2.0, 3.0, 4.0])
self.assertTrue(np.array_equal(obj_t[:3], expected_obj_t_not_nan[:3]))
self.assertTrue(np.isnan(obj_t[-1]))
self.assertEqual(obj_t.shape[0], 4)
# All maximize, so thresholds are unchanged (sign = +1).
expected_obj_t = np.array([2.0, 3.0, 4.0])
self.assertTrue(np.array_equal(obj_t, expected_obj_t))
self.assertEqual(obj_t.shape[0], 3)

# Returns NaN for objectives without a threshold.
obj_t = extract_objective_thresholds(
Expand All @@ -169,8 +169,9 @@ def test_extract_objective_thresholds(self) -> None:
outcomes=outcomes,
metric_name_to_signature=metric_name_to_signature,
)
self.assertTrue(np.array_equal(obj_t[:2], expected_obj_t_not_nan[:2]))
self.assertTrue(np.isnan(obj_t[-2:]).all())
self.assertTrue(np.array_equal(obj_t[:2], expected_obj_t[:2]))
self.assertTrue(np.isnan(obj_t[2]))
self.assertEqual(obj_t.shape[0], 3)

# Fails if a threshold does not have a corresponding metric.
objective2 = Objective(expression="m1")
Expand All @@ -182,16 +183,48 @@ def test_extract_objective_thresholds(self) -> None:
metric_name_to_signature=metric_name_to_signature,
)

# Works with a single objective, single threshold
# Single objective returns None.
self.assertIsNone(
extract_objective_thresholds(
objective_thresholds=objective_thresholds[:1],
objective=objective2,
outcomes=outcomes,
metric_name_to_signature=metric_name_to_signature,
)
)

# Maximize-alignment: minimize objectives get negated thresholds.
objective_with_min = MultiObjective(
objectives=[
Objective(metric=Metric("m1"), minimize=False),
Objective(metric=Metric("m2"), minimize=True),
]
)
obj_thresholds_for_min = [
ObjectiveThreshold(
metric=Metric("m1"),
op=ComparisonOp.LEQ,
bound=2.0,
relative=False,
),
ObjectiveThreshold(
metric=Metric("m2"),
op=ComparisonOp.LEQ,
bound=3.0,
relative=False,
),
]
obj_t = extract_objective_thresholds(
objective_thresholds=objective_thresholds[:1],
objective=objective2,
objective_thresholds=obj_thresholds_for_min,
objective=objective_with_min,
outcomes=outcomes,
metric_name_to_signature=metric_name_to_signature,
)
# m1 maximize: sign=+1, threshold=2.0 → 2.0
# m2 minimize: sign=-1, threshold=3.0 → -3.0
self.assertEqual(obj_t.shape[0], 2)
self.assertEqual(obj_t[0], 2.0)
self.assertTrue(np.all(np.isnan(obj_t[1:])))
self.assertEqual(obj_t.shape[0], 4)
self.assertEqual(obj_t[1], -3.0)

# Fails if relative
objective_thresholds[2] = ObjectiveThreshold(
Expand Down
6 changes: 4 additions & 2 deletions ax/adapter/torch.py
Original file line number Diff line number Diff line change
Expand Up @@ -1153,12 +1153,14 @@ def _untransform_objective_thresholds(
"""
obj_indices, obj_weights = extract_objectives(objective_weights)
thresholds = []
for idx, w in zip(obj_indices, obj_weights):
for i, (idx, w) in enumerate(zip(obj_indices, obj_weights)):
sign = torch.sign(w)
# Thresholds are maximization-aligned; undo sign flip to get raw bound.
raw_bound = float(sign * objective_thresholds[i].item())
thresholds.append(
ObjectiveThreshold(
metric=opt_config_metrics[self.outcomes[idx]],
bound=float(objective_thresholds[idx].item()),
bound=raw_bound,
relative=False,
op=ComparisonOp.LEQ if sign < 0 else ComparisonOp.GEQ,
)
Expand Down
50 changes: 21 additions & 29 deletions ax/generators/tests/test_botorch_moo_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,12 +11,11 @@
from unittest import mock
from warnings import catch_warnings, simplefilter

import numpy as np
import torch
from ax.core.search_space import SearchSpaceDigest
from ax.generators.torch.botorch_modular.generator import BoTorchGenerator
from ax.generators.torch.botorch_moo_utils import (
get_weighted_mc_objective_and_objective_thresholds,
get_weighted_mc_objective,
infer_objective_thresholds,
pareto_frontier_evaluator,
)
Expand Down Expand Up @@ -68,7 +67,8 @@ def setUp(self) -> None:
]
)
self.Yvar = torch.zeros(5, 3)
self.objective_thresholds = torch.tensor([0.5, 1.5, float("nan")])
# Thresholds are (n_objectives,) in maximization-aligned space.
self.objective_thresholds = torch.tensor([0.5, 1.5])
self.objective_weights = torch.tensor([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0]])

def test_pareto_frontier_raise_error_when_missing_data(self) -> None:
Expand Down Expand Up @@ -110,11 +110,12 @@ def test_pareto_frontier_evaluator_raw(self) -> None:
self.assertAllClose(expected_cov, cov)
self.assertTrue(torch.equal(torch.arange(2, 5), indx))

# Change objective_weights so goal is to minimize b
# Change objective_weights so goal is to minimize b.
# Thresholds in maximization-aligned space: [0.5, -1.5].
Y, cov, indx = pareto_frontier_evaluator(
model=model,
objective_weights=torch.tensor([[1.0, 0.0, 0.0], [0.0, -1.0, 0.0]]),
objective_thresholds=self.objective_thresholds,
objective_thresholds=torch.tensor([0.5, -1.5]),
Y=self.Y,
Yvar=Yvar,
)
Expand Down Expand Up @@ -213,19 +214,13 @@ def test_pareto_frontier_evaluator_with_nan(self) -> None:


class BotorchMOOUtilsTest(TestCase):
def test_get_weighted_mc_objective_and_objective_thresholds(self) -> None:
def test_get_weighted_mc_objective(self) -> None:
objective_weights = torch.tensor([[0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 0.0, 1.0]])
objective_thresholds = torch.arange(4, dtype=torch.float)
(
weighted_obj,
new_obj_thresholds,
) = get_weighted_mc_objective_and_objective_thresholds(
weighted_obj = get_weighted_mc_objective(
objective_weights=objective_weights,
objective_thresholds=objective_thresholds,
)
self.assertTrue(torch.equal(weighted_obj.weights, torch.tensor([1.0, 1.0])))
self.assertEqual(weighted_obj.outcomes.tolist(), [1, 3])
self.assertTrue(torch.equal(new_obj_thresholds, objective_thresholds[[1, 3]]))

# test infer objective thresholds alone
@mock.patch( # pyre-ignore
Expand Down Expand Up @@ -255,6 +250,10 @@ def test_infer_objective_thresholds(self, _, cuda: bool = False) -> None:
objective_weights = torch.tensor(
[[-1.0, 0.0, 0.0], [0.0, -1.0, 0.0]], **tkwargs
)
# Expected: infer_reference_point returns (n_objectives,) in
# maximization-aligned space. With pareto_Y=[[-9, -3]] and
# scale=0.1, the result is [-9.9, -3.3].
expected_thresholds = torch.tensor([-9.9, -3.3], **tkwargs)
with ExitStack() as es:
_mock_infer_reference_point = es.enter_context(
mock.patch(
Expand Down Expand Up @@ -282,10 +281,9 @@ def test_infer_objective_thresholds(self, _, cuda: bool = False) -> None:
torch.tensor([[-9.0, -3.0]], **tkwargs),
)
)
self.assertTrue(
torch.equal(obj_thresholds[:2], torch.tensor([9.9, 3.3], **tkwargs))
)
self.assertTrue(np.isnan(obj_thresholds[2].item()))
# Result is (n_objectives,) maximization-aligned.
self.assertEqual(obj_thresholds.shape[0], 2)
self.assertTrue(torch.equal(obj_thresholds, expected_thresholds))

# test subset_model without subset_idcs
with mock.patch.object(model, "posterior", return_value=posterior):
Expand All @@ -295,10 +293,8 @@ def test_infer_objective_thresholds(self, _, cuda: bool = False) -> None:
outcome_constraints=outcome_constraints,
X_observed=Xs[0],
)
self.assertTrue(
torch.equal(obj_thresholds[:2], torch.tensor([9.9, 3.3], **tkwargs))
)
self.assertTrue(np.isnan(obj_thresholds[2].item()))
self.assertEqual(obj_thresholds.shape[0], 2)
self.assertTrue(torch.equal(obj_thresholds, expected_thresholds))
# test passing subset_idcs
subset_idcs = torch.tensor(
[0, 1], dtype=torch.long, device=tkwargs["device"]
Expand All @@ -312,10 +308,8 @@ def test_infer_objective_thresholds(self, _, cuda: bool = False) -> None:
X_observed=Xs[0],
subset_idcs=subset_idcs,
)
self.assertTrue(
torch.equal(obj_thresholds[:2], torch.tensor([9.9, 3.3], **tkwargs))
)
self.assertTrue(np.isnan(obj_thresholds[2].item()))
self.assertEqual(obj_thresholds.shape[0], 2)
self.assertTrue(torch.equal(obj_thresholds, expected_thresholds))
# test without subsetting (e.g. if there are
# 3 metrics for 2 objectives + 1 outcome constraint)
outcome_constraints = (
Expand Down Expand Up @@ -350,10 +344,8 @@ def test_infer_objective_thresholds(self, _, cuda: bool = False) -> None:
X_observed=Xs[0],
outcome_constraints=outcome_constraints,
)
self.assertTrue(
torch.equal(obj_thresholds[:2], torch.tensor([9.9, 3.3], **tkwargs))
)
self.assertTrue(np.isnan(obj_thresholds[2].item()))
self.assertEqual(obj_thresholds.shape[0], 2)
self.assertTrue(torch.equal(obj_thresholds, expected_thresholds))

def test_infer_objective_thresholds_cuda(self) -> None:
if torch.cuda.is_available():
Expand Down
Loading