Skip to content

Commit 882b06b

Browse files
authored
Add debug output (#860)
* Add debug output * try to please test server * redirect one more test to the live server * add commit as requested by Jan * Removing hard coded retrievals from task example * Improved use of pandas retrieval
1 parent 4a13100 commit 882b06b

File tree

4 files changed

+16
-10
lines changed

4 files changed

+16
-10
lines changed

examples/30_extended/tasks_tutorial.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -196,11 +196,11 @@
196196
# Error code for 'task already exists'
197197
if e.code == 614:
198198
# Lookup task
199-
tasks = openml.tasks.list_tasks(data_id=128, output_format='dataframe').to_numpy()
200-
tasks = tasks[tasks[:, 4] == "Supervised Classification"]
201-
tasks = tasks[tasks[:, 6] == "10-fold Crossvalidation"]
202-
tasks = tasks[tasks[:, 19] == "predictive_accuracy"]
203-
task_id = tasks[0][0]
199+
tasks = openml.tasks.list_tasks(data_id=128, output_format='dataframe')
200+
tasks = tasks.query('task_type == "Supervised Classification" '
201+
'and estimation_procedure == "10-fold Crossvalidation" '
202+
'and evaluation_measures == "predictive_accuracy"')
203+
task_id = tasks.loc[:, "tid"].values[0]
204204
print("Task already exists. Task ID is", task_id)
205205

206206
# reverting to prod server

tests/test_evaluations/test_evaluation_functions.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -149,8 +149,9 @@ def test_evaluation_list_per_fold(self):
149149
self.assertIsNone(evaluations[run_id].values)
150150

151151
def test_evaluation_list_sort(self):
152+
openml.config.server = self.production_server
152153
size = 10
153-
task_id = 115
154+
task_id = 6
154155
# Get all evaluations of the task
155156
unsorted_eval = openml.evaluations.list_evaluations(
156157
"predictive_accuracy", offset=0, task=[task_id])

tests/test_runs/test_run_functions.py

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -79,7 +79,7 @@ def _wait_for_processed_run(self, run_id, max_waiting_time_seconds):
7979
if len(run.evaluations) > 0:
8080
return
8181
else:
82-
time.sleep(10)
82+
time.sleep(3)
8383
raise RuntimeError('Could not find any evaluations! Please check whether run {} was '
8484
'evaluated correctly on the server'.format(run_id))
8585

@@ -1120,8 +1120,13 @@ def test_get_run(self):
11201120
)
11211121

11221122
def _check_run(self, run):
1123+
# This tests that the API returns seven entries for each run
1124+
# Check out https://openml.org/api/v1/xml/run/list/flow/1154
1125+
# They are run_id, task_id, task_type_id, setup_id, flow_id, uploader, upload_time
1126+
# error_message and run_details exist, too, but are not used so far. We need to update
1127+
# this check once they are used!
11231128
self.assertIsInstance(run, dict)
1124-
self.assertEqual(len(run), 7)
1129+
assert len(run) == 7, str(run)
11251130

11261131
def test_get_runs_list(self):
11271132
# TODO: comes from live, no such lists on test

tests/test_utils/test_utils.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -75,14 +75,14 @@ def test_list_all_for_setups(self):
7575
self.assertEqual(len(setups), required_size)
7676

7777
def test_list_all_for_runs(self):
78-
required_size = 48
78+
required_size = 21
7979
runs = openml.runs.list_runs(batch_size=self._batch_size, size=required_size)
8080

8181
# might not be on test server after reset, please rerun test at least once if fails
8282
self.assertEqual(len(runs), required_size)
8383

8484
def test_list_all_for_evaluations(self):
85-
required_size = 57
85+
required_size = 22
8686
# TODO apparently list_evaluations function does not support kwargs
8787
evaluations = openml.evaluations.list_evaluations(function='predictive_accuracy',
8888
size=required_size)

0 commit comments

Comments
 (0)