Skip to content

Commit 3b85e34

Browse files
rm-openaiKatHaruto
authored andcommitted
Fix Gemini API content filter handling (openai#746)
- avoid AttributeError when Gemini API returns `None` for chat message - return empty output if message is filtered - add regression test - `make format` - `make lint` - `make mypy` - `make tests` Towards openai#744
1 parent 6593575 commit 3b85e34

File tree

2 files changed

+52
-5
lines changed

2 files changed

+52
-5
lines changed

src/agents/models/openai_chatcompletions.py

Lines changed: 18 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -71,12 +71,23 @@ async def get_response(
7171
stream=False,
7272
)
7373

74+
first_choice = response.choices[0]
75+
message = first_choice.message
76+
7477
if _debug.DONT_LOG_MODEL_DATA:
7578
logger.debug("Received model response")
7679
else:
77-
logger.debug(
78-
f"LLM resp:\n{json.dumps(response.choices[0].message.model_dump(), indent=2)}\n"
79-
)
80+
if message is not None:
81+
logger.debug(
82+
"LLM resp:\n%s\n",
83+
json.dumps(message.model_dump(), indent=2, ensure_ascii=False),
84+
)
85+
else:
86+
logger.debug(
87+
"LLM resp had no message. finish_reason: %s",
88+
first_choice.finish_reason,
89+
ensure_ascii=False,
90+
)
8091

8192
usage = (
8293
Usage(
@@ -101,13 +112,15 @@ async def get_response(
101112
else Usage()
102113
)
103114
if tracing.include_data():
104-
span_generation.span_data.output = [response.choices[0].message.model_dump()]
115+
span_generation.span_data.output = (
116+
[message.model_dump()] if message is not None else []
117+
)
105118
span_generation.span_data.usage = {
106119
"input_tokens": usage.input_tokens,
107120
"output_tokens": usage.output_tokens,
108121
}
109122

110-
items = Converter.message_to_output_items(response.choices[0].message)
123+
items = Converter.message_to_output_items(message) if message is not None else []
111124

112125
return ModelResponse(
113126
output=items,

tests/test_openai_chatcompletions.py

Lines changed: 34 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -191,6 +191,40 @@ async def patched_fetch_response(self, *args, **kwargs):
191191
assert fn_call_item.arguments == "{'x':1}"
192192

193193

194+
@pytest.mark.allow_call_model_methods
195+
@pytest.mark.asyncio
196+
async def test_get_response_with_no_message(monkeypatch) -> None:
197+
"""If the model returns no message, get_response should return an empty output."""
198+
msg = ChatCompletionMessage(role="assistant", content="ignored")
199+
choice = Choice(index=0, finish_reason="content_filter", message=msg)
200+
choice.message = None # type: ignore[assignment]
201+
chat = ChatCompletion(
202+
id="resp-id",
203+
created=0,
204+
model="fake",
205+
object="chat.completion",
206+
choices=[choice],
207+
usage=None,
208+
)
209+
210+
async def patched_fetch_response(self, *args, **kwargs):
211+
return chat
212+
213+
monkeypatch.setattr(OpenAIChatCompletionsModel, "_fetch_response", patched_fetch_response)
214+
model = OpenAIProvider(use_responses=False).get_model("gpt-4")
215+
resp: ModelResponse = await model.get_response(
216+
system_instructions=None,
217+
input="",
218+
model_settings=ModelSettings(),
219+
tools=[],
220+
output_schema=None,
221+
handoffs=[],
222+
tracing=ModelTracing.DISABLED,
223+
previous_response_id=None,
224+
)
225+
assert resp.output == []
226+
227+
194228
@pytest.mark.asyncio
195229
async def test_fetch_response_non_stream(monkeypatch) -> None:
196230
"""

0 commit comments

Comments
 (0)