Skip to content

Commit 4e87914

Browse files
committed
Fix a bug where debug logging fails with Iterable objects
1 parent f37f70b commit 4e87914

File tree

5 files changed

+93
-10
lines changed

5 files changed

+93
-10
lines changed

src/agents/extensions/models/litellm_model.py

Lines changed: 16 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -48,6 +48,7 @@
4848
from ...tracing.span_data import GenerationSpanData
4949
from ...tracing.spans import Span
5050
from ...usage import Usage
51+
from ...util._json import _to_dump_compatible
5152

5253

5354
class InternalChatCompletionMessage(ChatCompletionMessage):
@@ -286,10 +287,20 @@ async def _fetch_response(
286287
if _debug.DONT_LOG_MODEL_DATA:
287288
logger.debug("Calling LLM")
288289
else:
290+
messages_json = json.dumps(
291+
_to_dump_compatible(converted_messages),
292+
indent=2,
293+
ensure_ascii=False,
294+
)
295+
tools_json = json.dumps(
296+
_to_dump_compatible(converted_tools),
297+
indent=2,
298+
ensure_ascii=False,
299+
)
289300
logger.debug(
290301
f"Calling Litellm model: {self.model}\n"
291-
f"{json.dumps(converted_messages, indent=2, ensure_ascii=False)}\n"
292-
f"Tools:\n{json.dumps(converted_tools, indent=2, ensure_ascii=False)}\n"
302+
f"{messages_json}\n"
303+
f"Tools:\n{tools_json}\n"
293304
f"Stream: {stream}\n"
294305
f"Tool choice: {tool_choice}\n"
295306
f"Response format: {response_format}\n"
@@ -369,9 +380,9 @@ def convert_message_to_openai(
369380
if message.role != "assistant":
370381
raise ModelBehaviorError(f"Unsupported role: {message.role}")
371382

372-
tool_calls: list[
373-
ChatCompletionMessageFunctionToolCall | ChatCompletionMessageCustomToolCall
374-
] | None = (
383+
tool_calls: (
384+
list[ChatCompletionMessageFunctionToolCall | ChatCompletionMessageCustomToolCall] | None
385+
) = (
375386
[LitellmConverter.convert_tool_call_to_openai(tool) for tool in message.tool_calls]
376387
if message.tool_calls
377388
else None

src/agents/models/openai_chatcompletions.py

Lines changed: 13 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@
2323
from ..tracing.span_data import GenerationSpanData
2424
from ..tracing.spans import Span
2525
from ..usage import Usage
26+
from ..util._json import _to_dump_compatible
2627
from .chatcmpl_converter import Converter
2728
from .chatcmpl_helpers import HEADERS, ChatCmplHelpers
2829
from .chatcmpl_stream_handler import ChatCmplStreamHandler
@@ -258,9 +259,19 @@ async def _fetch_response(
258259
if _debug.DONT_LOG_MODEL_DATA:
259260
logger.debug("Calling LLM")
260261
else:
262+
messages_json = json.dumps(
263+
_to_dump_compatible(converted_messages),
264+
indent=2,
265+
ensure_ascii=False,
266+
)
267+
tools_json = json.dumps(
268+
_to_dump_compatible(converted_tools),
269+
indent=2,
270+
ensure_ascii=False,
271+
)
261272
logger.debug(
262-
f"{json.dumps(converted_messages, indent=2, ensure_ascii=False)}\n"
263-
f"Tools:\n{json.dumps(converted_tools, indent=2, ensure_ascii=False)}\n"
273+
f"{messages_json}\n"
274+
f"Tools:\n{tools_json}\n"
264275
f"Stream: {stream}\n"
265276
f"Tool choice: {tool_choice}\n"
266277
f"Response format: {response_format}\n"

src/agents/models/openai_responses.py

Lines changed: 13 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -38,6 +38,7 @@
3838
)
3939
from ..tracing import SpanError, response_span
4040
from ..usage import Usage
41+
from ..util._json import _to_dump_compatible
4142
from ..version import __version__
4243
from .interface import Model, ModelTracing
4344

@@ -263,10 +264,20 @@ async def _fetch_response(
263264
if _debug.DONT_LOG_MODEL_DATA:
264265
logger.debug("Calling LLM")
265266
else:
267+
input_json = json.dumps(
268+
_to_dump_compatible(list_input),
269+
indent=2,
270+
ensure_ascii=False,
271+
)
272+
tools_json = json.dumps(
273+
_to_dump_compatible(converted_tools.tools),
274+
indent=2,
275+
ensure_ascii=False,
276+
)
266277
logger.debug(
267278
f"Calling LLM {self.model} with input:\n"
268-
f"{json.dumps(list_input, indent=2, ensure_ascii=False)}\n"
269-
f"Tools:\n{json.dumps(converted_tools.tools, indent=2, ensure_ascii=False)}\n"
279+
f"{input_json}\n"
280+
f"Tools:\n{tools_json}\n"
270281
f"Stream: {stream}\n"
271282
f"Tool choice: {tool_choice}\n"
272283
f"Response format: {response_format}\n"

src/agents/util/_json.py

Lines changed: 19 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
from __future__ import annotations
22

3-
from typing import Literal
3+
from collections.abc import Iterable
4+
from typing import Any, Literal
45

56
from pydantic import TypeAdapter, ValidationError
67
from typing_extensions import TypeVar
@@ -29,3 +30,20 @@ def validate_json(json_str: str, type_adapter: TypeAdapter[T], partial: bool) ->
2930
raise ModelBehaviorError(
3031
f"Invalid JSON when parsing {json_str} for {type_adapter}; {e}"
3132
) from e
33+
34+
35+
def _to_dump_compatible(obj: Any) -> Any:
36+
return _to_dump_compatible_internal(obj)
37+
38+
39+
def _to_dump_compatible_internal(obj: Any) -> Any:
40+
if isinstance(obj, dict):
41+
return {k: _to_dump_compatible_internal(v) for k, v in obj.items()}
42+
43+
if isinstance(obj, (list, tuple)):
44+
return [_to_dump_compatible_internal(x) for x in obj]
45+
46+
if isinstance(obj, Iterable) and not isinstance(obj, (str, bytes, bytearray)):
47+
return [_to_dump_compatible_internal(x) for x in obj]
48+
49+
return obj

tests/utils/test_json.py

Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,32 @@
1+
import json
2+
3+
from openai.types.responses.response_output_message_param import ResponseOutputMessageParam
4+
from openai.types.responses.response_output_text_param import ResponseOutputTextParam
5+
6+
from agents.util._json import _to_dump_compatible
7+
8+
9+
def test_to_dump_compatible():
10+
# Given a list of message dictionaries, ensure the returned list is a deep copy.
11+
input_iter = [
12+
ResponseOutputMessageParam(
13+
id="a75654dc-7492-4d1c-bce0-89e8312fbdd7",
14+
content=[
15+
ResponseOutputTextParam(
16+
type="output_text",
17+
text="Hey, what's up?",
18+
annotations=[],
19+
)
20+
].__iter__(),
21+
role="assistant",
22+
status="completed",
23+
type="message",
24+
)
25+
].__iter__()
26+
# this fails if any of the properties are Iterable objects.
27+
# result = json.dumps(input_iter)
28+
result = json.dumps(_to_dump_compatible(input_iter))
29+
assert (
30+
result
31+
== """[{"id": "a75654dc-7492-4d1c-bce0-89e8312fbdd7", "content": [{"type": "output_text", "text": "Hey, what's up?", "annotations": []}], "role": "assistant", "status": "completed", "type": "message"}]""" # noqa: E501
32+
)

0 commit comments

Comments
 (0)