Skip to content

Commit 10da40d

Browse files
committed
renamed function
1 parent 3ed5c7a commit 10da40d

File tree

1 file changed

+3
-3
lines changed

1 file changed

+3
-3
lines changed

tests/test_runs/test_run_functions.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -412,15 +412,15 @@ def test_initialize_cv_from_run(self):
412412

413413
self.assertEquals(modelS.cv.random_state, 62501)
414414
self.assertEqual(modelR.cv.random_state, 62501)
415-
415+
416416
def _test_local_evaluations(self, run):
417417

418418
# compare with the scores in user defined measures
419419
accuracy_scores_provided = []
420420
for rep in run.fold_evaluations['predictive_accuracy'].keys():
421421
for fold in run.fold_evaluations['predictive_accuracy'][rep].keys():
422422
accuracy_scores_provided.append(run.fold_evaluations['predictive_accuracy'][rep][fold])
423-
accuracy_scores = run.get_metric_score(sklearn.metrics.accuracy_score)
423+
accuracy_scores = run.get_metric_fn(sklearn.metrics.accuracy_score)
424424
np.testing.assert_array_almost_equal(accuracy_scores_provided, accuracy_scores)
425425

426426
# also check if we can obtain some other scores: # TODO: how to do AUC?
@@ -431,7 +431,7 @@ def _test_local_evaluations(self, run):
431431
(sklearn.metrics.precision_score, {'average': 'macro'}),
432432
(sklearn.metrics.brier_score_loss, {})]
433433
for test_idx, test in enumerate(tests):
434-
alt_scores = run.get_metric_score(test[0], test[1])
434+
alt_scores = run.get_metric_fn(test[0], test[1])
435435
self.assertEquals(len(alt_scores), 10)
436436
for idx in range(len(alt_scores)):
437437
self.assertGreaterEqual(alt_scores[idx], 0)

0 commit comments

Comments
 (0)