@@ -247,11 +247,7 @@ def _append_fragment(f: LlmPredictionFragment, round_index: int) -> None:
247247
248248def divide (numerator : float , denominator : float ) -> float | str :
249249 """Divide the given numerator by the given denominator. Return the result."""
250- try :
251- return numerator / denominator
252- except Exception as exc :
253- # TODO: Perform this exception-to-response-string translation implicitly
254- return f"Unhandled Python exception: { exc !r} "
250+ return numerator / denominator
255251
256252
257253@pytest .mark .lmstudio
@@ -268,13 +264,13 @@ def test_tool_using_agent_error_handling(caplog: LogCap) -> None:
268264 )
269265 tools = [divide ]
270266 predictions : list [PredictionRoundResult ] = []
271- invalid_requests : list [tuple [ LMStudioPredictionError , ToolCallRequest ] ] = []
267+ request_failures : list [LMStudioPredictionError ] = []
272268
273269 def _handle_invalid_request (
274270 exc : LMStudioPredictionError , request : ToolCallRequest | None
275271 ) -> None :
276272 if request is not None :
277- invalid_requests .append (( exc , request ) )
273+ request_failures .append (exc )
278274
279275 act_result = llm .act (
280276 chat ,
@@ -284,8 +280,11 @@ def _handle_invalid_request(
284280 )
285281 assert len (predictions ) > 1
286282 assert act_result .rounds == len (predictions )
287- # Test case is currently suppressing exceptions inside the tool call
288- assert invalid_requests == []
283+ # Ensure the tool call failure was reported to the user callback
284+ assert len (request_failures ) == 1
285+ tool_failure_exc = request_failures [0 ]
286+ assert isinstance (tool_failure_exc , LMStudioPredictionError )
287+ assert isinstance (tool_failure_exc .__cause__ , ZeroDivisionError )
289288 # If the content checks prove flaky in practice, they can be dropped
290289 assert "divide" in predictions [- 1 ].content
291290 assert "zero" in predictions [- 1 ].content
0 commit comments