Skip to content

Commit e2222aa

Browse files
authored
Merge pull request #715 from sahithyaravi1493/fix962
add sort to list_evaluations
2 parents b660d7d + b026810 commit e2222aa

File tree

4 files changed

+37
-2
lines changed

4 files changed

+37
-2
lines changed

doc/progress.rst

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@ Changelog
88

99
0.10.0
1010
~~~~~~
11+
* ADD #715: `list_evaluations` now has an option to sort evaluations by score (value).
1112
* FIX #589: Fixing a bug that did not successfully upload the columns to ignore when creating and publishing a dataset.
1213
* DOC #639: More descriptive documention for function to convert array format.
1314
* ADD #687: Adds a function to retrieve the list of evaluation measures available.

openml/evaluations/functions.py

Lines changed: 13 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22
import xmltodict
33
import pandas as pd
44
from typing import Union, List, Optional, Dict
5+
import collections
56

67
import openml.utils
78
import openml._api_calls
@@ -19,6 +20,7 @@ def list_evaluations(
1920
uploader: Optional[List] = None,
2021
tag: Optional[str] = None,
2122
per_fold: Optional[bool] = None,
23+
sort_order: Optional[str] = None,
2224
output_format: str = 'object'
2325
) -> Union[Dict, pd.DataFrame]:
2426
"""
@@ -48,6 +50,9 @@ def list_evaluations(
4850
4951
per_fold : bool, optional
5052
53+
sort_order : str, optional
54+
order of sorting evaluations, ascending ("asc") or descending ("desc")
55+
5156
output_format: str, optional (default='object')
5257
The parameter decides the format of the output.
5358
- If 'object' the output is a dict of OpenMLEvaluation objects
@@ -77,6 +82,7 @@ def list_evaluations(
7782
flow=flow,
7883
uploader=uploader,
7984
tag=tag,
85+
sort_order=sort_order,
8086
per_fold=per_fold_str)
8187

8288

@@ -87,6 +93,7 @@ def _list_evaluations(
8793
setup: Optional[List] = None,
8894
flow: Optional[List] = None,
8995
uploader: Optional[List] = None,
96+
sort_order: Optional[str] = None,
9097
output_format: str = 'object',
9198
**kwargs
9299
) -> Union[Dict, pd.DataFrame]:
@@ -114,6 +121,9 @@ def _list_evaluations(
114121
kwargs: dict, optional
115122
Legal filter operators: tag, limit, offset.
116123
124+
sort_order : str, optional
125+
order of sorting evaluations, ascending ("asc") or descending ("desc")
126+
117127
output_format: str, optional (default='dict')
118128
The parameter decides the format of the output.
119129
- If 'dict' the output is a dict of dict
@@ -141,6 +151,8 @@ def _list_evaluations(
141151
api_call += "/flow/%s" % ','.join([str(int(i)) for i in flow])
142152
if uploader is not None:
143153
api_call += "/uploader/%s" % ','.join([str(int(i)) for i in uploader])
154+
if sort_order is not None:
155+
api_call += "/sort_order/%s" % sort_order
144156

145157
return __list_evaluations(api_call, output_format=output_format)
146158

@@ -157,7 +169,7 @@ def __list_evaluations(api_call, output_format='object'):
157169
assert type(evals_dict['oml:evaluations']['oml:evaluation']) == list, \
158170
type(evals_dict['oml:evaluations'])
159171

160-
evals = dict()
172+
evals = collections.OrderedDict()
161173
for eval_ in evals_dict['oml:evaluations']['oml:evaluation']:
162174
run_id = int(eval_['oml:run_id'])
163175
value = None

openml/utils.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@
55
import warnings
66
import pandas as pd
77
from functools import wraps
8+
import collections
89

910
import openml._api_calls
1011
import openml.exceptions
@@ -182,7 +183,7 @@ def _list_all(listing_call, output_format='dict', *args, **filters):
182183
active_filters = {key: value for key, value in filters.items()
183184
if value is not None}
184185
page = 0
185-
result = {}
186+
result = collections.OrderedDict()
186187
if output_format == 'dataframe':
187188
result = pd.DataFrame()
188189

tests/test_evaluations/test_evaluation_functions.py

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -117,6 +117,27 @@ def test_evaluation_list_per_fold(self):
117117
self.assertIsNotNone(evaluations[run_id].value)
118118
self.assertIsNone(evaluations[run_id].values)
119119

120+
def test_evaluation_list_sort(self):
121+
size = 10
122+
task_id = 115
123+
# Get all evaluations of the task
124+
unsorted_eval = openml.evaluations.list_evaluations(
125+
"predictive_accuracy", offset=0, task=[task_id])
126+
# Get top 10 evaluations of the same task
127+
sorted_eval = openml.evaluations.list_evaluations(
128+
"predictive_accuracy", size=size, offset=0, task=[task_id], sort_order="desc")
129+
self.assertEqual(len(sorted_eval), size)
130+
self.assertGreater(len(unsorted_eval), 0)
131+
sorted_output = [evaluation.value for evaluation in sorted_eval.values()]
132+
unsorted_output = [evaluation.value for evaluation in unsorted_eval.values()]
133+
134+
# Check if output from sort is sorted in the right order
135+
self.assertTrue(sorted(sorted_output, reverse=True) == sorted_output)
136+
137+
# Compare manual sorting against sorted output
138+
test_output = sorted(unsorted_output, reverse=True)
139+
self.assertTrue(test_output[:size] == sorted_output)
140+
120141
def test_list_evaluation_measures(self):
121142
measures = openml.evaluations.list_evaluation_measures()
122143
self.assertEqual(isinstance(measures, list), True)

0 commit comments

Comments
 (0)