5
5
6
6
# pyre-strict
7
7
8
+ from itertools import product
9
+
8
10
from ax .adapter .registry import Generators
9
11
from ax .analysis .analysis import (
10
12
AnalysisBlobAnnotation ,
20
22
from ax .exceptions .core import UserInputError
21
23
from ax .service .ax_client import AxClient , ObjectiveProperties
22
24
from ax .utils .common .testutils import TestCase
23
- from ax .utils .testing .core_stubs import (
24
- get_offline_experiments_subset ,
25
- get_online_experiments_subset ,
26
- )
25
+ from ax .utils .testing .core_stubs import get_offline_experiments , get_online_experiments
27
26
from ax .utils .testing .mock import mock_botorch_optimize
28
27
from ax .utils .testing .modeling_stubs import get_default_generation_strategy_at_MBM_node
29
28
from pyre_extensions import assert_is_instance , none_throws
30
29
31
30
32
- class TestSensitivityAnalysisPlot (TestCase ):
33
- @mock_botorch_optimize
34
- def setUp (self ) -> None :
35
- super ().setUp ()
36
- self .client = AxClient ()
37
- self .client .create_experiment (
38
- is_test = True ,
39
- name = "foo" ,
40
- parameters = [
41
- {
42
- "name" : "x" ,
43
- "type" : "range" ,
44
- "bounds" : [- 1.0 , 1.0 ],
45
- }
46
- ],
47
- objectives = {"bar" : ObjectiveProperties (minimize = True )},
31
+ @mock_botorch_optimize
32
+ def get_test_client () -> AxClient :
33
+ client = AxClient ()
34
+ client .create_experiment (
35
+ is_test = True ,
36
+ name = "foo" ,
37
+ parameters = [
38
+ {
39
+ "name" : "x" ,
40
+ "type" : "range" ,
41
+ "bounds" : [- 1.0 , 1.0 ],
42
+ }
43
+ ],
44
+ objectives = {"bar" : ObjectiveProperties (minimize = True )},
45
+ )
46
+
47
+ for _ in range (10 ):
48
+ parameterization , trial_index = client .get_next_trial ()
49
+ client .complete_trial (
50
+ trial_index = trial_index , raw_data = {"bar" : parameterization ["x" ] ** 2 }
48
51
)
52
+ return client
49
53
50
- for _ in range (10 ):
51
- parameterization , trial_index = self .client .get_next_trial ()
52
- self .client .complete_trial (
53
- trial_index = trial_index , raw_data = {"bar" : parameterization ["x" ] ** 2 }
54
- )
55
54
55
+ class TestSensitivityAnalysisPlot (TestCase ):
56
56
@mock_botorch_optimize
57
57
def test_compute (self ) -> None :
58
58
client = Client ()
@@ -120,10 +120,9 @@ def test_compute(self) -> None:
120
120
@mock_botorch_optimize
121
121
def test_compute_adhoc (self ) -> None :
122
122
metric_mapping = {"bar" : "spunky" }
123
- data = self .client .experiment .lookup_data ()
124
- adapter = Generators .BOTORCH_MODULAR (
125
- experiment = self .client .experiment , data = data
126
- )
123
+ client = get_test_client ()
124
+ data = client .experiment .lookup_data ()
125
+ adapter = Generators .BOTORCH_MODULAR (experiment = client .experiment , data = data )
127
126
cards = compute_sensitivity_adhoc (adapter = adapter , labels = metric_mapping )
128
127
self .assertEqual (len (cards ), 1 )
129
128
card = cards [0 ]
@@ -133,24 +132,19 @@ def test_compute_adhoc(self) -> None:
133
132
@mock_botorch_optimize
134
133
@TestCase .ax_long_test (reason = "Expensive to compute Sobol indicies" )
135
134
def test_online (self ) -> None :
136
- # Test SensitivityAnalysisPlot can be computed for a variety of experiments
137
- # which resemble those we see in an online setting, in analogous tests we
138
- # run all experiments with modifications to settings, however, this test
139
- # is slow and so we limit the number of permutations we validate.
140
- order = "total" # most common
141
-
142
- for experiment in get_online_experiments_subset ():
143
- for top_k in [None , 1 ]:
144
- generation_strategy = get_default_generation_strategy_at_MBM_node (
145
- experiment = experiment
146
- )
135
+ for experiment in get_online_experiments ():
136
+ generation_strategy = get_default_generation_strategy_at_MBM_node (
137
+ experiment = experiment
138
+ )
139
+ # Select an arbitrary metric from the optimization config
140
+ metric_names = [
141
+ none_throws (experiment .optimization_config ).objective .metric_names [0 ]
142
+ ]
143
+ for order , top_k in product (["first" , "second" , "total" ], [None , 1 ]):
147
144
analysis = SensitivityAnalysisPlot (
148
- # Select and arbitrary metric from the optimization config
149
- metric_names = [
150
- none_throws (
151
- experiment .optimization_config
152
- ).objective .metric_names [0 ]
153
- ],
145
+ metric_names = metric_names ,
146
+ # pyre-fixme: Incompatible parameter type [6]: It isn't sure
147
+ # if "order" has one of the values specified by the Literal
154
148
order = order ,
155
149
top_k = top_k ,
156
150
)
@@ -162,28 +156,25 @@ def test_online(self) -> None:
162
156
@mock_botorch_optimize
163
157
@TestCase .ax_long_test (reason = "Expensive to compute Sobol indicies" )
164
158
def test_offline (self ) -> None :
165
- # Test SensitivityAnalysisPlot can be computed for a variety of experiments
166
- # which resemble those we see in an offline setting, in analogous tests we
167
- # run all experiments with modifications to settings, however, this test
168
- # is slow and so we limit the number of permutations we validate.
169
- order = "total" # most common
170
-
171
- for experiment in get_offline_experiments_subset ():
172
- for top_k in [None , 1 ]:
173
- generation_strategy = get_default_generation_strategy_at_MBM_node (
174
- experiment = experiment
175
- )
159
+ for experiment in get_offline_experiments ():
160
+ generation_strategy = get_default_generation_strategy_at_MBM_node (
161
+ experiment = experiment
162
+ )
163
+ # Select an arbitrary metric from the optimization config
164
+ metric_names = [
165
+ none_throws (experiment .optimization_config ).objective .metric_names [0 ]
166
+ ]
167
+ for order , top_k in product (["first" , "second" , "total" ], [None , 1 ]):
176
168
analysis = SensitivityAnalysisPlot (
177
- # Select and arbitrary metric from the optimization config
178
- metric_names = [
179
- none_throws (
180
- experiment .optimization_config
181
- ).objective .metric_names [0 ]
182
- ],
169
+ metric_names = metric_names ,
170
+ # pyre-fixme: Incompatible parameter type [6]: It isn't sure
171
+ # if "order" has one of the values specified by the Literal
183
172
order = order ,
184
173
top_k = top_k ,
185
174
)
186
175
176
+ # This prints a lot of warnings about y being constant
177
+ # because the first MOO experiment produces constant data
187
178
_ = analysis .compute (
188
179
experiment = experiment , generation_strategy = generation_strategy
189
180
)
0 commit comments