|
6 | 6 | import unittest |
7 | 7 | import unittest.mock |
8 | 8 |
|
| 9 | +from autosklearn.metrics import roc_auc, accuracy |
| 10 | +from autosklearn.ensembles.ensemble_selection import EnsembleSelection |
| 11 | +from autosklearn.ensemble_builder import EnsembleBuilder, Y_VALID, Y_TEST |
9 | 12 | import numpy as np |
10 | 13 |
|
11 | 14 | this_directory = os.path.dirname(__file__) |
12 | 15 | sys.path.append(this_directory) |
13 | 16 |
|
14 | | -from autosklearn.ensemble_builder import EnsembleBuilder, Y_ENSEMBLE, Y_VALID, Y_TEST |
15 | | -from autosklearn.metrics import roc_auc |
16 | | - |
17 | 17 |
|
18 | 18 | class BackendMock(object): |
19 | 19 |
|
@@ -260,3 +260,68 @@ def testLimit(self): |
260 | 260 |
|
261 | 261 | # it should try to reduce ensemble_nbest until it also failed at 2 |
262 | 262 | self.assertEqual(ensbuilder.ensemble_nbest,1) |
| 263 | + |
| 264 | + |
| 265 | +class EnsembleSelectionTest(unittest.TestCase): |
| 266 | + def testPredict(self): |
| 267 | + # Test that ensemble prediction applies weights correctly to given |
| 268 | + # predictions. There are two possible cases: |
| 269 | + # 1) predictions.shape[0] == len(self.weights_). In this case, |
| 270 | + # predictions include those made by zero-weighted models. Therefore, |
| 271 | + # we simply apply each weights to the corresponding model preds. |
| 272 | + # 2) predictions.shape[0] < len(self.weights_). In this case, |
| 273 | + # predictions exclude those made by zero-weighted models. Therefore, |
| 274 | + # we first exclude all occurrences of zero in self.weights_, and then |
| 275 | + # apply the weights. |
| 276 | + # If none of the above is the case, predict() raises Error. |
| 277 | + ensemble = EnsembleSelection(ensemble_size=3, |
| 278 | + task_type=1, |
| 279 | + metric=accuracy, |
| 280 | + ) |
| 281 | + # Test for case 1. Create (3, 2, 2) predictions. |
| 282 | + per_model_pred = np.array([ |
| 283 | + [[0.9, 0.1], |
| 284 | + [0.4, 0.6]], |
| 285 | + [[0.8, 0.2], |
| 286 | + [0.3, 0.7]], |
| 287 | + [[1.0, 0.0], |
| 288 | + [0.1, 0.9]] |
| 289 | + ]) |
| 290 | + # Weights of 3 hypothetical models |
| 291 | + ensemble.weights_ = [0.7, 0.2, 0.1] |
| 292 | + pred = ensemble.predict(per_model_pred) |
| 293 | + truth = np.array([[0.89, 0.11], # This should be the true prediction. |
| 294 | + [0.35, 0.65]]) |
| 295 | + self.assertTrue(np.allclose(pred, truth)) |
| 296 | + |
| 297 | + # Test for case 2. |
| 298 | + per_model_pred = np.array([ |
| 299 | + [[0.9, 0.1], |
| 300 | + [0.4, 0.6]], |
| 301 | + [[0.8, 0.2], |
| 302 | + [0.3, 0.7]], |
| 303 | + [[1.0, 0.0], |
| 304 | + [0.1, 0.9]] |
| 305 | + ]) |
| 306 | + # The third model now has weight of zero. |
| 307 | + ensemble.weights_ = [0.7, 0.2, 0.0, 0.1] |
| 308 | + pred = ensemble.predict(per_model_pred) |
| 309 | + truth = np.array([[0.89, 0.11], |
| 310 | + [0.35, 0.65]]) |
| 311 | + self.assertTrue(np.allclose(pred, truth)) |
| 312 | + |
| 313 | + # Test for error case. |
| 314 | + per_model_pred = np.array([ |
| 315 | + [[0.9, 0.1], |
| 316 | + [0.4, 0.6]], |
| 317 | + [[0.8, 0.2], |
| 318 | + [0.3, 0.7]], |
| 319 | + [[1.0, 0.0], |
| 320 | + [0.1, 0.9]] |
| 321 | + ]) |
| 322 | + # Now the weights have 2 zero weights and 2 non-zero weights, |
| 323 | + # which is incompatible. |
| 324 | + ensemble.weights_ = [0.6, 0.0, 0.0, 0.4] |
| 325 | + |
| 326 | + with self.assertRaises(ValueError): |
| 327 | + ensemble.predict(per_model_pred) |
0 commit comments