Skip to content

Commit dea3b45

Browse files
authored
Merge pull request #273 from openml/evaluationlist
Evaluationlist
2 parents 963f8c7 + 6824259 commit dea3b45

File tree

2 files changed

+116
-6
lines changed

2 files changed

+116
-6
lines changed

openml/evaluations/functions.py

Lines changed: 57 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -3,10 +3,64 @@
33
from .._api_calls import _perform_api_call
44
from ..evaluations import OpenMLEvaluation
55

6-
def list_evaluations(function, task_id):
6+
def list_evaluations(function, offset=None, size=None, id=None, task=None, setup=None,
7+
flow=None, uploader=None, tag=None):
8+
"""List all run-evaluation pairs matching all of the given filters.
9+
10+
Perform API call `/evaluation/function{function}/{filters}
11+
12+
Parameters
13+
----------
14+
function : str
15+
the evaluation function. e.g., predictive_accuracy
16+
offset : int, optional
17+
the number of runs to skip, starting from the first
18+
size : int, optional
19+
the maximum number of runs to show
20+
21+
id : list, optional
22+
23+
task : list, optional
24+
25+
setup: list, optional
26+
27+
flow : list, optional
28+
29+
uploader : list, optional
30+
31+
tag : str, optional
32+
33+
Returns
34+
-------
35+
list
36+
List of found evaluations.
37+
"""
38+
39+
api_call = "evaluation/list/function/%s" %function
40+
if offset is not None:
41+
api_call += "/offset/%d" % int(offset)
42+
if size is not None:
43+
api_call += "/limit/%d" % int(size)
44+
if id is not None:
45+
api_call += "/run/%s" % ','.join([str(int(i)) for i in id])
46+
if task is not None:
47+
api_call += "/task/%s" % ','.join([str(int(i)) for i in task])
48+
if setup is not None:
49+
api_call += "/setup/%s" % ','.join([str(int(i)) for i in setup])
50+
if flow is not None:
51+
api_call += "/flow/%s" % ','.join([str(int(i)) for i in flow])
52+
if uploader is not None:
53+
api_call += "/uploader/%s" % ','.join([str(int(i)) for i in uploader])
54+
if tag is not None:
55+
api_call += "/tag/%s" % tag
56+
57+
return _list_evaluations(api_call)
58+
59+
60+
def _list_evaluations(api_call):
761
"""Helper function to parse API calls which are lists of runs"""
862

9-
xml_string = _perform_api_call("evaluation/list/function/%s/task/%d" %(function, task_id))
63+
xml_string = _perform_api_call(api_call)
1064

1165
evals_dict = xmltodict.parse(xml_string)
1266
# Minimalistic check if the XML is useful
@@ -17,7 +71,7 @@ def list_evaluations(function, task_id):
1771
if isinstance(evals_dict['oml:evaluations']['oml:evaluation'], list):
1872
evals_list = evals_dict['oml:evaluations']['oml:evaluation']
1973
elif isinstance(evals_dict['oml:evaluations']['oml:evaluation'], dict):
20-
evals_list = [evals_dict['oml:runs']['oml:run']]
74+
evals_list = [evals_dict['oml:evaluations']['oml:evaluation']]
2175
else:
2276
raise TypeError()
2377

tests/test_evaluations/test_evaluation_functions.py

Lines changed: 59 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -4,12 +4,68 @@
44

55
class TestEvaluationFunctions(TestBase):
66

7-
def test_evaluation_list(self):
7+
def test_evaluation_list_filter_task(self):
88
openml.config.server = self.production_server
99

1010
task_id = 7312
1111

12-
res = openml.evaluations.list_evaluations("predictive_accuracy", task_id)
12+
evaluations = openml.evaluations.list_evaluations("predictive_accuracy", task=[task_id])
1313

14-
self.assertGreater(len(res), 100)
14+
self.assertGreater(len(evaluations), 100)
15+
for run_id in evaluations.keys():
16+
self.assertEquals(evaluations[run_id].task_id, task_id)
1517

18+
19+
def test_evaluation_list_filter_uploader(self):
20+
openml.config.server = self.production_server
21+
22+
uploader_id = 16
23+
24+
evaluations = openml.evaluations.list_evaluations("predictive_accuracy", uploader=[uploader_id])
25+
26+
self.assertGreater(len(evaluations), 100)
27+
# for run_id in evaluations.keys():
28+
# self.assertEquals(evaluations[run_id].uploader, uploader_id)
29+
30+
31+
def test_evaluation_list_filter_uploader(self):
32+
openml.config.server = self.production_server
33+
34+
setup_id = 10
35+
36+
evaluations = openml.evaluations.list_evaluations("predictive_accuracy", setup=[setup_id])
37+
38+
self.assertGreater(len(evaluations), 100)
39+
for run_id in evaluations.keys():
40+
self.assertEquals(evaluations[run_id].setup_id, setup_id)
41+
42+
43+
def test_evaluation_list_filter_flow(self):
44+
openml.config.server = self.production_server
45+
46+
flow_id = 100
47+
48+
evaluations = openml.evaluations.list_evaluations("predictive_accuracy", flow=[flow_id])
49+
50+
self.assertGreater(len(evaluations), 2)
51+
for run_id in evaluations.keys():
52+
self.assertEquals(evaluations[run_id].flow_id, flow_id)
53+
54+
55+
def test_evaluation_list_filter_run(self):
56+
openml.config.server = self.production_server
57+
58+
run_id = 1
59+
60+
evaluations = openml.evaluations.list_evaluations("predictive_accuracy", id=[run_id])
61+
62+
self.assertEquals(len(evaluations), 1)
63+
for run_id in evaluations.keys():
64+
self.assertEquals(evaluations[run_id].run_id, run_id)
65+
66+
67+
def test_evaluation_list_limit(self):
68+
openml.config.server = self.production_server
69+
70+
evaluations = openml.evaluations.list_evaluations("predictive_accuracy", size=100, offset=100)
71+
self.assertEquals(len(evaluations), 100)

0 commit comments

Comments
 (0)