diff --git a/ax/analysis/plotly/objective_p_feasible_frontier.py b/ax/analysis/plotly/objective_p_feasible_frontier.py index bace1a802ac..d0cccc647e5 100644 --- a/ax/analysis/plotly/objective_p_feasible_frontier.py +++ b/ax/analysis/plotly/objective_p_feasible_frontier.py @@ -26,8 +26,8 @@ from ax.core.arm import Arm from ax.core.experiment import Experiment from ax.core.optimization_config import MultiObjectiveOptimizationConfig -from ax.core.outcome_constraint import ScalarizedOutcomeConstraint from ax.core.trial_status import TrialStatus +from ax.exceptions.core import UnsupportedError from ax.generation_strategy.generation_strategy import GenerationStrategy from ax.generators.torch.botorch_modular.generator import BoTorchGenerator from ax.generators.torch.botorch_modular.multi_acquisition import MultiAcquisition @@ -133,6 +133,9 @@ def validate_applicable_state( if isinstance(experiment.optimization_config, MultiObjectiveOptimizationConfig): return "Multi-objective optimization is not supported." + if experiment.optimization_config.objective.is_scalarized_objective: + return "Scalarized objectives are not supported." + if len(experiment.optimization_config.outcome_constraints) == 0: return ( "Plotting the objective-p(feasible) frontier requires at least one " @@ -140,7 +143,7 @@ def validate_applicable_state( ) if any( - isinstance(oc, ScalarizedOutcomeConstraint) + len(oc.metric_names) > 1 for oc in experiment.optimization_config.outcome_constraints ): return "Scalarized outcome constraints are not supported yet." @@ -220,7 +223,14 @@ def compute( trial_statuses=self.trial_statuses, ) - objective_name = optimization_config.objective.metric_names[0] + objective = optimization_config.objective + if objective.is_scalarized_objective: + raise UnsupportedError( + "ObjectivePFeasibleFrontierPlot is not supported for " + "scalarized objectives. The objective is a combination of " + "metrics, not a single metric." + ) + objective_name = objective.metric_names[0] fig = _prepare_figure_scatter( df=df, @@ -230,7 +240,7 @@ def compute( y_metric_label="% Chance of Satisfying the Constraints", is_relative=self.relativize, show_pareto_frontier=False, - x_lower_is_better=optimization_config.objective.minimize, + x_lower_is_better=objective.minimize, y_lower_is_better=False, ) diff --git a/ax/analysis/plotly/tests/test_objective_p_feasible_frontier.py b/ax/analysis/plotly/tests/test_objective_p_feasible_frontier.py index 37fac1dbbd6..0ad22c09dd1 100644 --- a/ax/analysis/plotly/tests/test_objective_p_feasible_frontier.py +++ b/ax/analysis/plotly/tests/test_objective_p_feasible_frontier.py @@ -18,14 +18,10 @@ MultiObjectiveOptimizationConfig, OptimizationConfig, ) -from ax.core.outcome_constraint import OutcomeConstraint, ScalarizedOutcomeConstraint +from ax.core.outcome_constraint import OutcomeConstraint from ax.core.trial_status import DEFAULT_ANALYSIS_STATUSES, TrialStatus -from ax.core.types import ComparisonOp from ax.utils.common.testutils import TestCase -from ax.utils.testing.core_stubs import ( - get_branin_experiment_with_multi_objective, - get_branin_metric, -) +from ax.utils.testing.core_stubs import get_branin_experiment_with_multi_objective from ax.utils.testing.mock import mock_botorch_optimize from botorch.utils.testing import skip_if_import_error from pyre_extensions import none_throws @@ -200,21 +196,8 @@ def test_validate_applicable_state(self) -> None: ) ), ) - self.experiment.add_tracking_metric(get_branin_metric("branin2")) - # Get only tracking metrics, excluding the objective metric to avoid - # "Cannot constrain on objective metric" error - constraint_metrics = [ - self.experiment.metrics["branin_b"], - self.experiment.metrics["branin_c"], - ] opt_config.outcome_constraints = [ - ScalarizedOutcomeConstraint( - metrics=constraint_metrics, - weights=[1.0, 1.0], - relative=False, - bound=10.0, - op=ComparisonOp.LEQ, - ) + OutcomeConstraint(expression="1.0*branin_b + 1.0*branin_c <= 10.0"), ] self.assertIn( "Scalarized outcome constraints are not supported yet.", @@ -224,3 +207,20 @@ def test_validate_applicable_state(self) -> None: ) ), ) + + def test_scalarized_objective_raises(self) -> None: + """Scalarized objectives should be rejected in validate_applicable_state.""" + self.experiment.optimization_config = OptimizationConfig( + objective=Objective(expression="2*branin_a + -1*branin_b"), + outcome_constraints=none_throws( + self.experiment.optimization_config + ).outcome_constraints, + ) + self.assertIn( + "Scalarized objectives are not supported.", + none_throws( + ObjectivePFeasibleFrontierPlot().validate_applicable_state( + experiment=self.experiment + ) + ), + ) diff --git a/ax/plot/pareto_frontier.py b/ax/plot/pareto_frontier.py index e24c1b4596b..fec10dd7fac 100644 --- a/ax/plot/pareto_frontier.py +++ b/ax/plot/pareto_frontier.py @@ -724,12 +724,15 @@ def _maybe_get_default_minimize_single_metric( optimization_config is not None and metric_name in optimization_config.objective.metric_names ): - if optimization_config.is_moo_problem: - for obj_metric_name, weight in optimization_config.objective.metric_weights: + objective = optimization_config.objective + if not objective.is_single_objective: + # Covers both multi-objective and scalarized objectives. + # For both, metric_weights encodes direction via sign. + for obj_metric_name, weight in objective.metric_weights: if obj_metric_name == metric_name: return weight < 0 else: - return optimization_config.objective.minimize + return objective.minimize # Next try to get minimize from objective_thresholds if objective_thresholds is not None: