Skip to content

Commit 3481880

Browse files
authored
Merge pull request #20899 from jmchilton/tool_request_types_1
Small cleanup of tool execution code.
2 parents a522d64 + ceead59 commit 3481880

File tree

1 file changed

+23
-8
lines changed

1 file changed

+23
-8
lines changed

lib/galaxy/tool_util/verify/interactor.py

Lines changed: 23 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -674,11 +674,14 @@ def run_tool(
674674
break
675675
submit_response_object = ensure_tool_run_response_okay(submit_response, "execute tool", inputs_tree)
676676
try:
677+
outputs = self.__dictify_outputs(submit_response_object)
678+
output_collections = self.__dictify_output_collections(submit_response_object)
679+
jobs = submit_response_object["jobs"]
677680
return RunToolResponse(
678681
inputs=inputs_tree,
679-
outputs=self.__dictify_outputs(submit_response_object),
680-
output_collections=self.__dictify_output_collections(submit_response_object),
681-
jobs=submit_response_object["jobs"],
682+
outputs=outputs,
683+
output_collections=output_collections,
684+
jobs=jobs,
682685
)
683686
except KeyError:
684687
message = (
@@ -766,7 +769,11 @@ def _summarize_history(self, history_id: str):
766769
dataset = history_content
767770

768771
print(ERROR_MESSAGE_DATASET_SEP)
769-
dataset_id = dataset.get("id", None)
772+
dataset_id: Optional[str] = dataset.get("id")
773+
if dataset_id is None:
774+
print("| *TEST FRAMEWORK ERROR - NO DATASET ID*")
775+
continue
776+
770777
print(f"| {dataset['hid']} - {dataset['name']} (HID - NAME) ")
771778
if history_content["history_content_type"] == "dataset_collection":
772779
history_contents_json = self._get(
@@ -819,15 +826,15 @@ def format_for_summary(self, blob, empty_message, prefix="| "):
819826
contents = "\n".join(f"{prefix}{line.strip()}" for line in io.StringIO(blob).readlines() if line.rstrip("\n\r"))
820827
return contents or f"{prefix}*{empty_message}*"
821828

822-
def _dataset_provenance(self, history_id, id):
829+
def _dataset_provenance(self, history_id: str, id: str) -> Dict[str, Any]:
823830
provenance = self._get(f"histories/{history_id}/contents/{id}/provenance").json()
824831
return provenance
825832

826-
def _dataset_info(self, history_id, id):
833+
def _dataset_info(self, history_id: str, id: str) -> Dict[str, Any]:
827834
dataset_json = self._get(f"histories/{history_id}/contents/{id}").json()
828835
return dataset_json
829836

830-
def __contents(self, history_id):
837+
def __contents(self, history_id: str) -> List[Dict[str, Any]]:
831838
history_contents_response = self._get(f"histories/{history_id}/contents")
832839
history_contents_response.raise_for_status()
833840
return history_contents_response.json()
@@ -843,7 +850,15 @@ def _state_ready(self, job_id: str, error_msg: str):
843850
)
844851
return None
845852

846-
def __submit_tool(self, history_id, tool_id, tool_input, extra_data=None, files=None, tool_version=None):
853+
def __submit_tool(
854+
self,
855+
history_id: str,
856+
tool_id: str,
857+
tool_input: Optional[dict],
858+
extra_data: Optional[dict] = None,
859+
files: Optional[dict] = None,
860+
tool_version: Optional[str] = None,
861+
):
847862
extra_data = extra_data or {}
848863
data = dict(
849864
history_id=history_id, tool_id=tool_id, inputs=dumps(tool_input), tool_version=tool_version, **extra_data

0 commit comments

Comments
 (0)