Skip to content

Commit 2b1b90b

Browse files
authored
Include tracebacks in errors returned from failing requests (#343)
* Include tracebacks in errors returned from failing requests * Address a linter error * Supply the full schema when processing requests and responses
1 parent b150e23 commit 2b1b90b

File tree

2 files changed

+37
-19
lines changed

2 files changed

+37
-19
lines changed

merlin/systems/triton/models/executor_model.py

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -95,10 +95,17 @@ def execute(self, request):
9595
be the same as `requests`
9696
"""
9797
inputs = triton_request_to_tensor_table(request, self.ensemble.input_schema)
98+
9899
try:
99100
outputs = self.ensemble.transform(inputs, runtime=TritonExecutorRuntime())
100101
except Exception as exc:
101-
raise pb_utils.TritonModelException(str(exc)) from exc
102+
import traceback
103+
104+
raise pb_utils.TritonModelException(
105+
f"Error: {type(exc)} - {str(exc)}, "
106+
f"Traceback: {traceback.format_tb(exc.__traceback__)}"
107+
) from exc
108+
102109
return tensor_table_to_triton_response(outputs, self.ensemble.output_schema)
103110

104111

merlin/systems/triton/models/workflow_model.py

Lines changed: 29 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -97,24 +97,35 @@ def execute(self, request):
9797
"""Transforms the input batches by running through a NVTabular workflow.transform
9898
function.
9999
"""
100-
# transform the triton tensors to a dict of name:numpy tensor
101-
input_tensors = {
102-
name: _convert_tensor(pb_utils.get_input_tensor_by_name(request, name))
103-
for name in self.input_dtypes
104-
}
105-
106-
# multihots are represented as a tuple of (values, offsets)
107-
for name, dtype in self.input_multihots.items():
108-
values = _convert_tensor(pb_utils.get_input_tensor_by_name(request, name + "__values"))
109-
offsets = _convert_tensor(
110-
pb_utils.get_input_tensor_by_name(request, name + "__offsets")
111-
)
112-
input_tensors[name] = (values, offsets)
113-
114-
transformed = self.runner.run_workflow(input_tensors)
115-
result = [pb_utils.Tensor(name, data) for name, data in transformed.items()]
116-
117-
return pb_utils.InferenceResponse(result)
100+
try:
101+
# transform the triton tensors to a dict of name:numpy tensor
102+
input_tensors = {
103+
name: _convert_tensor(pb_utils.get_input_tensor_by_name(request, name))
104+
for name in self.input_dtypes
105+
}
106+
107+
# multihots are represented as a tuple of (values, offsets)
108+
for name, dtype in self.input_multihots.items():
109+
values = _convert_tensor(
110+
pb_utils.get_input_tensor_by_name(request, name + "__values")
111+
)
112+
offsets = _convert_tensor(
113+
pb_utils.get_input_tensor_by_name(request, name + "__offsets")
114+
)
115+
input_tensors[name] = (values, offsets)
116+
117+
transformed = self.runner.run_workflow(input_tensors)
118+
result = [pb_utils.Tensor(name, data) for name, data in transformed.items()]
119+
120+
return pb_utils.InferenceResponse(result)
121+
122+
except Exception as exc:
123+
import traceback
124+
125+
raise pb_utils.TritonModelException(
126+
f"Error: {type(exc)} - {str(exc)}, "
127+
f"Traceback: {traceback.format_tb(exc.__traceback__)}"
128+
) from exc
118129

119130
def _is_list_dtype(self, column: str) -> bool:
120131
"""Check if a column of a Workflow contains list elements"""

0 commit comments

Comments
 (0)