Skip to content

Commit 4cc2f6b

Browse files
authored
openai[patch]: guard against None text completions in BaseOpenAI (#31514)
Some chat completions APIs will return null `text` output (even though this is typed as string).
1 parent abc8bf9 commit 4cc2f6b

File tree

2 files changed

+38
-1
lines changed

2 files changed

+38
-1
lines changed

libs/partners/openai/langchain_openai/llms/base.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@ def _stream_response_to_generation_chunk(
4040
if not stream_response["choices"]:
4141
return GenerationChunk(text="")
4242
return GenerationChunk(
43-
text=stream_response["choices"][0]["text"],
43+
text=stream_response["choices"][0]["text"] or "",
4444
generation_info=dict(
4545
finish_reason=stream_response["choices"][0].get("finish_reason", None),
4646
logprobs=stream_response["choices"][0].get("logprobs", None),

libs/partners/openai/tests/unit_tests/llms/test_base.py

Lines changed: 37 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,10 @@
11
import os
22

33
import pytest
4+
from langchain_core.outputs import GenerationChunk
45

56
from langchain_openai import OpenAI
7+
from langchain_openai.llms.base import _stream_response_to_generation_chunk
68

79
os.environ["OPENAI_API_KEY"] = "foo"
810

@@ -69,3 +71,38 @@ def token_encoder(text: str) -> list[int]:
6971

7072
llm = OpenAI(custom_get_token_ids=token_encoder)
7173
assert llm.get_token_ids("foo") == [1, 2, 3]
74+
75+
76+
def test_stream_response_to_generation_chunk() -> None:
77+
completion = {
78+
"id": "cmpl-abc123",
79+
"choices": [
80+
{"finish_reason": None, "index": 0, "logprobs": None, "text": "foo"}
81+
],
82+
"created": 1749214401,
83+
"model": "my-model",
84+
"object": "text_completion",
85+
"system_fingerprint": None,
86+
"usage": None,
87+
}
88+
chunk = _stream_response_to_generation_chunk(completion)
89+
assert chunk == GenerationChunk(
90+
text="foo", generation_info={"finish_reason": None, "logprobs": None}
91+
)
92+
93+
# Pathological completion with None text (e.g., from other providers)
94+
completion = {
95+
"id": "cmpl-abc123",
96+
"choices": [
97+
{"finish_reason": None, "index": 0, "logprobs": None, "text": None}
98+
],
99+
"created": 1749214401,
100+
"model": "my-model",
101+
"object": "text_completion",
102+
"system_fingerprint": None,
103+
"usage": None,
104+
}
105+
chunk = _stream_response_to_generation_chunk(completion)
106+
assert chunk == GenerationChunk(
107+
text="", generation_info={"finish_reason": None, "logprobs": None}
108+
)

0 commit comments

Comments
 (0)