Skip to content

Commit 2d433d2

Browse files
esantorellafacebook-github-bot
authored andcommitted
Re-enable slow sensitivity plot tests (facebook#3893)
Summary: Pull Request resolved: facebook#3893 These tests were removed in D74846187 because they were slow. D75569748 and D75712208 sped them up. Reviewed By: saitcakmak Differential Revision: D75712258 fbshipit-source-id: 24dd33697d9db4ec6bcc9ebf4a727181a8873906
1 parent a8e35ac commit 2d433d2

File tree

1 file changed

+54
-63
lines changed

1 file changed

+54
-63
lines changed

ax/analysis/plotly/tests/test_sensitivity.py

Lines changed: 54 additions & 63 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,8 @@
55

66
# pyre-strict
77

8+
from itertools import product
9+
810
from ax.adapter.registry import Generators
911
from ax.analysis.analysis import (
1012
AnalysisBlobAnnotation,
@@ -20,39 +22,37 @@
2022
from ax.exceptions.core import UserInputError
2123
from ax.service.ax_client import AxClient, ObjectiveProperties
2224
from ax.utils.common.testutils import TestCase
23-
from ax.utils.testing.core_stubs import (
24-
get_offline_experiments_subset,
25-
get_online_experiments_subset,
26-
)
25+
from ax.utils.testing.core_stubs import get_offline_experiments, get_online_experiments
2726
from ax.utils.testing.mock import mock_botorch_optimize
2827
from ax.utils.testing.modeling_stubs import get_default_generation_strategy_at_MBM_node
2928
from pyre_extensions import assert_is_instance, none_throws
3029

3130

32-
class TestSensitivityAnalysisPlot(TestCase):
33-
@mock_botorch_optimize
34-
def setUp(self) -> None:
35-
super().setUp()
36-
self.client = AxClient()
37-
self.client.create_experiment(
38-
is_test=True,
39-
name="foo",
40-
parameters=[
41-
{
42-
"name": "x",
43-
"type": "range",
44-
"bounds": [-1.0, 1.0],
45-
}
46-
],
47-
objectives={"bar": ObjectiveProperties(minimize=True)},
31+
@mock_botorch_optimize
32+
def get_test_client() -> AxClient:
33+
client = AxClient()
34+
client.create_experiment(
35+
is_test=True,
36+
name="foo",
37+
parameters=[
38+
{
39+
"name": "x",
40+
"type": "range",
41+
"bounds": [-1.0, 1.0],
42+
}
43+
],
44+
objectives={"bar": ObjectiveProperties(minimize=True)},
45+
)
46+
47+
for _ in range(10):
48+
parameterization, trial_index = client.get_next_trial()
49+
client.complete_trial(
50+
trial_index=trial_index, raw_data={"bar": parameterization["x"] ** 2}
4851
)
52+
return client
4953

50-
for _ in range(10):
51-
parameterization, trial_index = self.client.get_next_trial()
52-
self.client.complete_trial(
53-
trial_index=trial_index, raw_data={"bar": parameterization["x"] ** 2}
54-
)
5554

55+
class TestSensitivityAnalysisPlot(TestCase):
5656
@mock_botorch_optimize
5757
def test_compute(self) -> None:
5858
client = Client()
@@ -120,10 +120,9 @@ def test_compute(self) -> None:
120120
@mock_botorch_optimize
121121
def test_compute_adhoc(self) -> None:
122122
metric_mapping = {"bar": "spunky"}
123-
data = self.client.experiment.lookup_data()
124-
adapter = Generators.BOTORCH_MODULAR(
125-
experiment=self.client.experiment, data=data
126-
)
123+
client = get_test_client()
124+
data = client.experiment.lookup_data()
125+
adapter = Generators.BOTORCH_MODULAR(experiment=client.experiment, data=data)
127126
cards = compute_sensitivity_adhoc(adapter=adapter, labels=metric_mapping)
128127
self.assertEqual(len(cards), 1)
129128
card = cards[0]
@@ -133,24 +132,19 @@ def test_compute_adhoc(self) -> None:
133132
@mock_botorch_optimize
134133
@TestCase.ax_long_test(reason="Expensive to compute Sobol indicies")
135134
def test_online(self) -> None:
136-
# Test SensitivityAnalysisPlot can be computed for a variety of experiments
137-
# which resemble those we see in an online setting, in analogous tests we
138-
# run all experiments with modifications to settings, however, this test
139-
# is slow and so we limit the number of permutations we validate.
140-
order = "total" # most common
141-
142-
for experiment in get_online_experiments_subset():
143-
for top_k in [None, 1]:
144-
generation_strategy = get_default_generation_strategy_at_MBM_node(
145-
experiment=experiment
146-
)
135+
for experiment in get_online_experiments():
136+
generation_strategy = get_default_generation_strategy_at_MBM_node(
137+
experiment=experiment
138+
)
139+
# Select an arbitrary metric from the optimization config
140+
metric_names = [
141+
none_throws(experiment.optimization_config).objective.metric_names[0]
142+
]
143+
for order, top_k in product(["first", "second", "total"], [None, 1]):
147144
analysis = SensitivityAnalysisPlot(
148-
# Select and arbitrary metric from the optimization config
149-
metric_names=[
150-
none_throws(
151-
experiment.optimization_config
152-
).objective.metric_names[0]
153-
],
145+
metric_names=metric_names,
146+
# pyre-fixme: Incompatible parameter type [6]: It isn't sure
147+
# if "order" has one of the values specified by the Literal
154148
order=order,
155149
top_k=top_k,
156150
)
@@ -162,28 +156,25 @@ def test_online(self) -> None:
162156
@mock_botorch_optimize
163157
@TestCase.ax_long_test(reason="Expensive to compute Sobol indicies")
164158
def test_offline(self) -> None:
165-
# Test SensitivityAnalysisPlot can be computed for a variety of experiments
166-
# which resemble those we see in an offline setting, in analogous tests we
167-
# run all experiments with modifications to settings, however, this test
168-
# is slow and so we limit the number of permutations we validate.
169-
order = "total" # most common
170-
171-
for experiment in get_offline_experiments_subset():
172-
for top_k in [None, 1]:
173-
generation_strategy = get_default_generation_strategy_at_MBM_node(
174-
experiment=experiment
175-
)
159+
for experiment in get_offline_experiments():
160+
generation_strategy = get_default_generation_strategy_at_MBM_node(
161+
experiment=experiment
162+
)
163+
# Select an arbitrary metric from the optimization config
164+
metric_names = [
165+
none_throws(experiment.optimization_config).objective.metric_names[0]
166+
]
167+
for order, top_k in product(["first", "second", "total"], [None, 1]):
176168
analysis = SensitivityAnalysisPlot(
177-
# Select and arbitrary metric from the optimization config
178-
metric_names=[
179-
none_throws(
180-
experiment.optimization_config
181-
).objective.metric_names[0]
182-
],
169+
metric_names=metric_names,
170+
# pyre-fixme: Incompatible parameter type [6]: It isn't sure
171+
# if "order" has one of the values specified by the Literal
183172
order=order,
184173
top_k=top_k,
185174
)
186175

176+
# This prints a lot of warnings about y being constant
177+
# because the first MOO experiment produces constant data
187178
_ = analysis.compute(
188179
experiment=experiment, generation_strategy=generation_strategy
189180
)

0 commit comments

Comments
 (0)