88
99import torch
1010from botorch import fit_gpytorch_model
11- from botorch .models import SingleTaskGP
11+ from botorch .exceptions .warnings import OptimizationWarning
12+ from botorch .models import FixedNoiseGP , HeteroskedasticSingleTaskGP , SingleTaskGP
1213from botorch .optim .fit import (
1314 OptimizationIteration ,
1415 fit_gpytorch_scipy ,
@@ -36,6 +37,27 @@ def _getModel(self, double=False, cuda=False):
3637 mll = ExactMarginalLogLikelihood (model .likelihood , model )
3738 return mll .to (device = device , dtype = dtype )
3839
40+ def _getBatchedModel (self , kind = "SingleTaskGP" , double = False , cuda = False ):
41+ device = torch .device ("cuda" ) if cuda else torch .device ("cpu" )
42+ dtype = torch .double if double else torch .float
43+ train_x = torch .linspace (0 , 1 , 10 , device = device , dtype = dtype ).unsqueeze (- 1 )
44+ noise = torch .tensor (NOISE , device = device , dtype = dtype )
45+ train_y1 = torch .sin (train_x .view (- 1 ) * (2 * math .pi )) + noise
46+ train_y2 = torch .sin (train_x .view (- 1 ) * (2 * math .pi )) + noise
47+ train_y = torch .stack ([train_y1 , train_y2 ], dim = - 1 )
48+ if kind == "SingleTaskGP" :
49+ model = SingleTaskGP (train_x , train_y )
50+ elif kind == "FixedNoiseGP" :
51+ model = FixedNoiseGP (train_x , train_y , 0.1 * torch .ones_like (train_y ))
52+ elif kind == "HeteroskedasticSingleTaskGP" :
53+ model = HeteroskedasticSingleTaskGP (
54+ train_x , train_y , 0.1 * torch .ones_like (train_y )
55+ )
56+ else :
57+ raise NotImplementedError
58+ mll = ExactMarginalLogLikelihood (model .likelihood , model )
59+ return mll .to (device = device , dtype = dtype )
60+
3961 def test_fit_gpytorch_model (self , cuda = False , optimizer = fit_gpytorch_scipy ):
4062 options = {"disp" : False , "maxiter" : 5 }
4163 for double in (False , True ):
@@ -46,7 +68,7 @@ def test_fit_gpytorch_model(self, cuda=False, optimizer=fit_gpytorch_scipy):
4668 )
4769 if optimizer == fit_gpytorch_scipy :
4870 self .assertEqual (len (ws ), 1 )
49- self .assertTrue (MAX_RETRY_MSG in str (ws [- 1 ].message ))
71+ self .assertTrue (MAX_RETRY_MSG in str (ws [0 ].message ))
5072 model = mll .model
5173 # Make sure all of the parameters changed
5274 self .assertGreater (model .likelihood .raw_noise .abs ().item (), 1e-3 )
@@ -68,7 +90,7 @@ def test_fit_gpytorch_model(self, cuda=False, optimizer=fit_gpytorch_scipy):
6890 )
6991 if optimizer == fit_gpytorch_scipy :
7092 self .assertEqual (len (ws ), 1 )
71- self .assertTrue (MAX_RETRY_MSG in str (ws [- 1 ].message ))
93+ self .assertTrue (MAX_RETRY_MSG in str (ws [0 ].message ))
7294
7395 model = mll .model
7496 self .assertGreaterEqual (model .likelihood .raw_noise .abs ().item (), 1e-1 )
@@ -86,7 +108,7 @@ def test_fit_gpytorch_model(self, cuda=False, optimizer=fit_gpytorch_scipy):
86108 mll , iterations = optimizer (mll , options = options , track_iterations = True )
87109 if optimizer == fit_gpytorch_scipy :
88110 self .assertEqual (len (ws ), 1 )
89- self .assertTrue (MAX_ITER_MSG in str (ws [- 1 ].message ))
111+ self .assertTrue (MAX_ITER_MSG in str (ws [0 ].message ))
90112 self .assertEqual (len (iterations ), options ["maxiter" ])
91113 self .assertIsInstance (iterations [0 ], OptimizationIteration )
92114
@@ -109,15 +131,15 @@ def test_fit_gpytorch_model(self, cuda=False, optimizer=fit_gpytorch_scipy):
109131 )
110132 if optimizer == fit_gpytorch_scipy :
111133 self .assertEqual (len (ws ), 1 )
112- self .assertTrue (MAX_RETRY_MSG in str (ws [- 1 ].message ))
134+ self .assertTrue (MAX_RETRY_MSG in str (ws [0 ].message ))
113135 self .assertTrue (mll .dummy_param .grad is None )
114136
115137 def test_fit_gpytorch_model_cuda (self ):
116138 if torch .cuda .is_available ():
117139 self .test_fit_gpytorch_model (cuda = True )
118140
119141 def test_fit_gpytorch_model_singular (self , cuda = False ):
120- options = {"disp" : False , "maxiter" : 2 }
142+ options = {"disp" : False , "maxiter" : 5 }
121143 device = torch .device ("cuda" ) if cuda else torch .device ("cpu" )
122144 for dtype in (torch .float , torch .double ):
123145 X_train = torch .rand (2 , 2 , device = device , dtype = dtype )
@@ -130,7 +152,7 @@ def test_fit_gpytorch_model_singular(self, cuda=False):
130152 mll .to (device = device , dtype = dtype )
131153 with warnings .catch_warnings (record = True ) as ws :
132154 # this will do multiple retries
133- fit_gpytorch_model (mll , options = options )
155+ fit_gpytorch_model (mll , options = options , max_retries = 1 )
134156 self .assertEqual (len (ws ), 1 )
135157 self .assertTrue (MAX_RETRY_MSG in str (ws [0 ].message ))
136158
@@ -144,3 +166,24 @@ def test_fit_gpytorch_model_torch(self, cuda=False):
144166 def test_fit_gpytorch_model_torch_cuda (self ):
145167 if torch .cuda .is_available ():
146168 self .test_fit_gpytorch_model_torch (cuda = True )
169+
170+ def test_fit_gpytorch_model_sequential (self , cuda = False ):
171+ options = {"disp" : False , "maxiter" : 1 }
172+ for double in (False , True ):
173+ for kind in ("SingleTaskGP" , "FixedNoiseGP" , "HeteroskedasticSingleTaskGP" ):
174+ with warnings .catch_warnings ():
175+ warnings .filterwarnings ("ignore" , category = OptimizationWarning )
176+ mll = self ._getBatchedModel (kind = kind , double = double , cuda = cuda )
177+ mll = fit_gpytorch_model (mll , options = options , max_retries = 1 )
178+ mll = self ._getBatchedModel (kind = kind , double = double , cuda = cuda )
179+ mll = fit_gpytorch_model (
180+ mll , options = options , sequential = True , max_retries = 1
181+ )
182+ mll = self ._getBatchedModel (kind = kind , double = double , cuda = cuda )
183+ mll = fit_gpytorch_model (
184+ mll , options = options , sequential = False , max_retries = 1
185+ )
186+
187+ def test_fit_gpytorch_model_sequential_cuda (self ):
188+ if torch .cuda .is_available ():
189+ self .test_fit_gpytorch_model_sequential (cuda = True )
0 commit comments