Skip to content

Commit e5dcaf0

Browse files
authored
Rename arguments of list_evaluations (#933)
* list evals name change * list evals - update
1 parent 4256834 commit e5dcaf0

File tree

8 files changed

+100
-96
lines changed

8 files changed

+100
-96
lines changed

examples/30_extended/fetch_evaluations_tutorial.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,7 @@
6363

6464
metric = "predictive_accuracy"
6565
evals = openml.evaluations.list_evaluations(
66-
function=metric, task=[task_id], output_format="dataframe"
66+
function=metric, tasks=[task_id], output_format="dataframe"
6767
)
6868
# Displaying the first 10 rows
6969
print(evals.head(n=10))
@@ -162,7 +162,7 @@ def plot_flow_compare(evaluations, top_n=10, metric="predictive_accuracy"):
162162
# List evaluations in descending order based on predictive_accuracy with
163163
# hyperparameters
164164
evals_setups = openml.evaluations.list_evaluations_setups(
165-
function="predictive_accuracy", task=[31], size=100, sort_order="desc"
165+
function="predictive_accuracy", tasks=[31], size=100, sort_order="desc"
166166
)
167167

168168
""
@@ -173,7 +173,7 @@ def plot_flow_compare(evaluations, top_n=10, metric="predictive_accuracy"):
173173
# with hyperparameters. parameters_in_separate_columns returns parameters in
174174
# separate columns
175175
evals_setups = openml.evaluations.list_evaluations_setups(
176-
function="predictive_accuracy", flow=[6767], size=100, parameters_in_separate_columns=True
176+
function="predictive_accuracy", flows=[6767], size=100, parameters_in_separate_columns=True
177177
)
178178

179179
""

examples/30_extended/plot_svm_hyperparameters_tutorial.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -20,8 +20,8 @@
2020
# uploaded runs (called *setup*).
2121
df = openml.evaluations.list_evaluations_setups(
2222
function="predictive_accuracy",
23-
flow=[8353],
24-
task=[6],
23+
flows=[8353],
24+
tasks=[6],
2525
output_format="dataframe",
2626
# Using this flag incorporates the hyperparameters into the returned dataframe. Otherwise,
2727
# the dataframe would contain a field ``paramaters`` containing an unparsed dictionary.

examples/40_paper/2018_ida_strang_example.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,7 @@
4747

4848
# Downloads all evaluation records related to this study
4949
evaluations = openml.evaluations.list_evaluations(
50-
measure, flow=flow_ids, study=study_id, output_format="dataframe"
50+
measure, flows=flow_ids, study=study_id, output_format="dataframe"
5151
)
5252
# gives us a table with columns data_id, flow1_value, flow2_value
5353
evaluations = evaluations.pivot(index="data_id", columns="flow_id", values="value").dropna()

examples/40_paper/2018_kdd_rijn_example.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -88,8 +88,8 @@
8888
# note that we explicitly only include tasks from the benchmark suite that was specified (as per the for-loop)
8989
evals = openml.evaluations.list_evaluations_setups(
9090
evaluation_measure,
91-
flow=[flow_id],
92-
task=[task_id],
91+
flows=[flow_id],
92+
tasks=[task_id],
9393
size=limit_per_task,
9494
output_format="dataframe",
9595
)

examples/40_paper/2018_neurips_perrone_example.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -91,9 +91,9 @@ def fetch_evaluations(run_full=False, flow_type="svm", metric="area_under_roc_cu
9191
# Fetching evaluations
9292
eval_df = openml.evaluations.list_evaluations_setups(
9393
function=metric,
94-
task=task_ids,
95-
flow=[flow_id],
96-
uploader=[2702],
94+
tasks=task_ids,
95+
flows=[flow_id],
96+
uploaders=[2702],
9797
output_format="dataframe",
9898
parameters_in_separate_columns=True,
9999
)

openml/evaluations/functions.py

Lines changed: 71 additions & 67 deletions
Original file line numberDiff line numberDiff line change
@@ -17,11 +17,11 @@ def list_evaluations(
1717
function: str,
1818
offset: Optional[int] = None,
1919
size: Optional[int] = None,
20-
task: Optional[List] = None,
21-
setup: Optional[List] = None,
22-
flow: Optional[List] = None,
23-
run: Optional[List] = None,
24-
uploader: Optional[List] = None,
20+
tasks: Optional[List[Union[str, int]]] = None,
21+
setups: Optional[List[Union[str, int]]] = None,
22+
flows: Optional[List[Union[str, int]]] = None,
23+
runs: Optional[List[Union[str, int]]] = None,
24+
uploaders: Optional[List[Union[str, int]]] = None,
2525
tag: Optional[str] = None,
2626
study: Optional[int] = None,
2727
per_fold: Optional[bool] = None,
@@ -41,17 +41,18 @@ def list_evaluations(
4141
size : int, optional
4242
the maximum number of runs to show
4343
44-
task : list, optional
45-
46-
setup: list, optional
47-
48-
flow : list, optional
49-
50-
run : list, optional
51-
52-
uploader : list, optional
53-
44+
tasks : list[int,str], optional
45+
the list of task IDs
46+
setups: list[int,str], optional
47+
the list of setup IDs
48+
flows : list[int,str], optional
49+
the list of flow IDs
50+
runs :list[int,str], optional
51+
the list of run IDs
52+
uploaders : list[int,str], optional
53+
the list of uploader IDs
5454
tag : str, optional
55+
filter evaluation based on given tag
5556
5657
study : int, optional
5758
@@ -85,11 +86,11 @@ def list_evaluations(
8586
function=function,
8687
offset=offset,
8788
size=size,
88-
task=task,
89-
setup=setup,
90-
flow=flow,
91-
run=run,
92-
uploader=uploader,
89+
tasks=tasks,
90+
setups=setups,
91+
flows=flows,
92+
runs=runs,
93+
uploaders=uploaders,
9394
tag=tag,
9495
study=study,
9596
sort_order=sort_order,
@@ -99,11 +100,11 @@ def list_evaluations(
99100

100101
def _list_evaluations(
101102
function: str,
102-
task: Optional[List] = None,
103-
setup: Optional[List] = None,
104-
flow: Optional[List] = None,
105-
run: Optional[List] = None,
106-
uploader: Optional[List] = None,
103+
tasks: Optional[List] = None,
104+
setups: Optional[List] = None,
105+
flows: Optional[List] = None,
106+
runs: Optional[List] = None,
107+
uploaders: Optional[List] = None,
107108
study: Optional[int] = None,
108109
sort_order: Optional[str] = None,
109110
output_format: str = "object",
@@ -120,15 +121,16 @@ def _list_evaluations(
120121
function : str
121122
the evaluation function. e.g., predictive_accuracy
122123
123-
task : list, optional
124-
125-
setup: list, optional
126-
127-
flow : list, optional
128-
129-
run : list, optional
130-
131-
uploader : list, optional
124+
tasks : list[int,str], optional
125+
the list of task IDs
126+
setups: list[int,str], optional
127+
the list of setup IDs
128+
flows : list[int,str], optional
129+
the list of flow IDs
130+
runs :list[int,str], optional
131+
the list of run IDs
132+
uploaders : list[int,str], optional
133+
the list of uploader IDs
132134
133135
study : int, optional
134136
@@ -155,16 +157,16 @@ def _list_evaluations(
155157
if kwargs is not None:
156158
for operator, value in kwargs.items():
157159
api_call += "/%s/%s" % (operator, value)
158-
if task is not None:
159-
api_call += "/task/%s" % ",".join([str(int(i)) for i in task])
160-
if setup is not None:
161-
api_call += "/setup/%s" % ",".join([str(int(i)) for i in setup])
162-
if flow is not None:
163-
api_call += "/flow/%s" % ",".join([str(int(i)) for i in flow])
164-
if run is not None:
165-
api_call += "/run/%s" % ",".join([str(int(i)) for i in run])
166-
if uploader is not None:
167-
api_call += "/uploader/%s" % ",".join([str(int(i)) for i in uploader])
160+
if tasks is not None:
161+
api_call += "/task/%s" % ",".join([str(int(i)) for i in tasks])
162+
if setups is not None:
163+
api_call += "/setup/%s" % ",".join([str(int(i)) for i in setups])
164+
if flows is not None:
165+
api_call += "/flow/%s" % ",".join([str(int(i)) for i in flows])
166+
if runs is not None:
167+
api_call += "/run/%s" % ",".join([str(int(i)) for i in runs])
168+
if uploaders is not None:
169+
api_call += "/uploader/%s" % ",".join([str(int(i)) for i in uploaders])
168170
if study is not None:
169171
api_call += "/study/%d" % study
170172
if sort_order is not None:
@@ -276,11 +278,11 @@ def list_evaluations_setups(
276278
function: str,
277279
offset: Optional[int] = None,
278280
size: Optional[int] = None,
279-
task: Optional[List] = None,
280-
setup: Optional[List] = None,
281-
flow: Optional[List] = None,
282-
run: Optional[List] = None,
283-
uploader: Optional[List] = None,
281+
tasks: Optional[List] = None,
282+
setups: Optional[List] = None,
283+
flows: Optional[List] = None,
284+
runs: Optional[List] = None,
285+
uploaders: Optional[List] = None,
284286
tag: Optional[str] = None,
285287
per_fold: Optional[bool] = None,
286288
sort_order: Optional[str] = None,
@@ -299,15 +301,15 @@ def list_evaluations_setups(
299301
the number of runs to skip, starting from the first
300302
size : int, optional
301303
the maximum number of runs to show
302-
task : list[int], optional
304+
tasks : list[int], optional
303305
the list of task IDs
304-
setup: list[int], optional
306+
setups: list[int], optional
305307
the list of setup IDs
306-
flow : list[int], optional
308+
flows : list[int], optional
307309
the list of flow IDs
308-
run : list[int], optional
310+
runs : list[int], optional
309311
the list of run IDs
310-
uploader : list[int], optional
312+
uploaders : list[int], optional
311313
the list of uploader IDs
312314
tag : str, optional
313315
filter evaluation based on given tag
@@ -327,7 +329,7 @@ def list_evaluations_setups(
327329
-------
328330
dict or dataframe with hyperparameter settings as a list of tuples.
329331
"""
330-
if parameters_in_separate_columns and (flow is None or len(flow) != 1):
332+
if parameters_in_separate_columns and (flows is None or len(flows) != 1):
331333
raise ValueError(
332334
"Can set parameters_in_separate_columns to true " "only for single flow_id"
333335
)
@@ -337,11 +339,11 @@ def list_evaluations_setups(
337339
function=function,
338340
offset=offset,
339341
size=size,
340-
run=run,
341-
task=task,
342-
setup=setup,
343-
flow=flow,
344-
uploader=uploader,
342+
runs=runs,
343+
tasks=tasks,
344+
setups=setups,
345+
flows=flows,
346+
uploaders=uploaders,
345347
tag=tag,
346348
per_fold=per_fold,
347349
sort_order=sort_order,
@@ -359,24 +361,26 @@ def list_evaluations_setups(
359361
setup_chunks = np.array_split(
360362
ary=evals["setup_id"].unique(), indices_or_sections=((length - 1) // N) + 1
361363
)
362-
setups = pd.DataFrame()
363-
for setup in setup_chunks:
364-
result = pd.DataFrame(openml.setups.list_setups(setup=setup, output_format="dataframe"))
364+
setup_data = pd.DataFrame()
365+
for setups in setup_chunks:
366+
result = pd.DataFrame(
367+
openml.setups.list_setups(setup=setups, output_format="dataframe")
368+
)
365369
result.drop("flow_id", axis=1, inplace=True)
366370
# concat resulting setup chunks into single datframe
367-
setups = pd.concat([setups, result], ignore_index=True)
371+
setup_data = pd.concat([setup_data, result], ignore_index=True)
368372
parameters = []
369373
# Convert parameters of setup into list of tuples of (hyperparameter, value)
370-
for parameter_dict in setups["parameters"]:
374+
for parameter_dict in setup_data["parameters"]:
371375
if parameter_dict is not None:
372376
parameters.append(
373377
{param["full_name"]: param["value"] for param in parameter_dict.values()}
374378
)
375379
else:
376380
parameters.append({})
377-
setups["parameters"] = parameters
381+
setup_data["parameters"] = parameters
378382
# Merge setups with evaluations
379-
df = pd.merge(evals, setups, on="setup_id", how="left")
383+
df = pd.merge(evals, setup_data, on="setup_id", how="left")
380384

381385
if parameters_in_separate_columns:
382386
df = pd.concat([df.drop("parameters", axis=1), df["parameters"].apply(pd.Series)], axis=1)

0 commit comments

Comments
 (0)