@@ -41,7 +41,9 @@ def test_evaluation_list_filter_task(self):
4141
4242 task_id = 7312
4343
44- evaluations = openml .evaluations .list_evaluations ("predictive_accuracy" , tasks = [task_id ])
44+ evaluations = openml .evaluations .list_evaluations (
45+ "predictive_accuracy" , size = 110 , tasks = [task_id ]
46+ )
4547
4648 self .assertGreater (len (evaluations ), 100 )
4749 for run_id in evaluations .keys ():
@@ -56,7 +58,7 @@ def test_evaluation_list_filter_uploader_ID_16(self):
5658
5759 uploader_id = 16
5860 evaluations = openml .evaluations .list_evaluations (
59- "predictive_accuracy" , uploaders = [uploader_id ], output_format = "dataframe"
61+ "predictive_accuracy" , size = 60 , uploaders = [uploader_id ], output_format = "dataframe"
6062 )
6163 self .assertEqual (evaluations ["uploader" ].unique (), [uploader_id ])
6264
@@ -66,7 +68,9 @@ def test_evaluation_list_filter_uploader_ID_10(self):
6668 openml .config .server = self .production_server
6769
6870 setup_id = 10
69- evaluations = openml .evaluations .list_evaluations ("predictive_accuracy" , setups = [setup_id ])
71+ evaluations = openml .evaluations .list_evaluations (
72+ "predictive_accuracy" , size = 60 , setups = [setup_id ]
73+ )
7074
7175 self .assertGreater (len (evaluations ), 50 )
7276 for run_id in evaluations .keys ():
@@ -81,7 +85,9 @@ def test_evaluation_list_filter_flow(self):
8185
8286 flow_id = 100
8387
84- evaluations = openml .evaluations .list_evaluations ("predictive_accuracy" , flows = [flow_id ])
88+ evaluations = openml .evaluations .list_evaluations (
89+ "predictive_accuracy" , size = 10 , flows = [flow_id ]
90+ )
8591
8692 self .assertGreater (len (evaluations ), 2 )
8793 for run_id in evaluations .keys ():
@@ -96,7 +102,9 @@ def test_evaluation_list_filter_run(self):
96102
97103 run_id = 12
98104
99- evaluations = openml .evaluations .list_evaluations ("predictive_accuracy" , runs = [run_id ])
105+ evaluations = openml .evaluations .list_evaluations (
106+ "predictive_accuracy" , size = 2 , runs = [run_id ]
107+ )
100108
101109 self .assertEqual (len (evaluations ), 1 )
102110 for run_id in evaluations .keys ():
@@ -164,7 +172,7 @@ def test_evaluation_list_sort(self):
164172 task_id = 6
165173 # Get all evaluations of the task
166174 unsorted_eval = openml .evaluations .list_evaluations (
167- "predictive_accuracy" , offset = 0 , tasks = [task_id ]
175+ "predictive_accuracy" , size = None , offset = 0 , tasks = [task_id ]
168176 )
169177 # Get top 10 evaluations of the same task
170178 sorted_eval = openml .evaluations .list_evaluations (
0 commit comments