Skip to content

Commit 6824259

Browse files
committed
added final unit tests
1 parent 00943ce commit 6824259

File tree

2 files changed

+37
-2
lines changed

2 files changed

+37
-2
lines changed

openml/evaluations/functions.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -71,7 +71,7 @@ def _list_evaluations(api_call):
7171
if isinstance(evals_dict['oml:evaluations']['oml:evaluation'], list):
7272
evals_list = evals_dict['oml:evaluations']['oml:evaluation']
7373
elif isinstance(evals_dict['oml:evaluations']['oml:evaluation'], dict):
74-
evals_list = [evals_dict['oml:runs']['oml:run']]
74+
evals_list = [evals_dict['oml:evaluations']['oml:evaluation']]
7575
else:
7676
raise TypeError()
7777

tests/test_evaluations/test_evaluation_functions.py

Lines changed: 36 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,3 @@
1-
import unittest
21
import openml
32
import openml.evaluations
43
from openml.testing import TestBase
@@ -29,6 +28,42 @@ def test_evaluation_list_filter_uploader(self):
2928
# self.assertEquals(evaluations[run_id].uploader, uploader_id)
3029

3130

31+
def test_evaluation_list_filter_uploader(self):
32+
openml.config.server = self.production_server
33+
34+
setup_id = 10
35+
36+
evaluations = openml.evaluations.list_evaluations("predictive_accuracy", setup=[setup_id])
37+
38+
self.assertGreater(len(evaluations), 100)
39+
for run_id in evaluations.keys():
40+
self.assertEquals(evaluations[run_id].setup_id, setup_id)
41+
42+
43+
def test_evaluation_list_filter_flow(self):
44+
openml.config.server = self.production_server
45+
46+
flow_id = 100
47+
48+
evaluations = openml.evaluations.list_evaluations("predictive_accuracy", flow=[flow_id])
49+
50+
self.assertGreater(len(evaluations), 2)
51+
for run_id in evaluations.keys():
52+
self.assertEquals(evaluations[run_id].flow_id, flow_id)
53+
54+
55+
def test_evaluation_list_filter_run(self):
56+
openml.config.server = self.production_server
57+
58+
run_id = 1
59+
60+
evaluations = openml.evaluations.list_evaluations("predictive_accuracy", id=[run_id])
61+
62+
self.assertEquals(len(evaluations), 1)
63+
for run_id in evaluations.keys():
64+
self.assertEquals(evaluations[run_id].run_id, run_id)
65+
66+
3267
def test_evaluation_list_limit(self):
3368
openml.config.server = self.production_server
3469

0 commit comments

Comments
 (0)