Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 8 additions & 0 deletions libs/partners/openai/langchain_openai/chat_models/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -163,6 +163,9 @@ def _convert_dict_to_message(_dict: Mapping[str, Any]) -> BaseMessage:
# Also OpenAI returns None for tool invocations
content = _dict.get("content", "") or ""
additional_kwargs: dict = {}
# add reasoning blocks if available
if reasoning_content := _dict.get("reasoning_content"):
additional_kwargs["reasoning_content"] = reasoning_content
if function_call := _dict.get("function_call"):
additional_kwargs["function_call"] = dict(function_call)
tool_calls = []
Expand Down Expand Up @@ -377,6 +380,8 @@ def _convert_delta_to_message_chunk(
if role == "user" or default_class == HumanMessageChunk:
return HumanMessageChunk(content=content, id=id_)
if role == "assistant" or default_class == AIMessageChunk:
if reasoning_content := _dict.get("reasoning_content"):
additional_kwargs["reasoning_content"] = reasoning_content
return AIMessageChunk(
content=content,
additional_kwargs=additional_kwargs,
Expand Down Expand Up @@ -1017,6 +1022,9 @@ def _convert_chunk_to_generation_chunk(
) -> ChatGenerationChunk | None:
if chunk.get("type") == "content.delta": # From beta.chat.completions.stream
return None
# add reasoning blocks if available
if reasoning_content := chunk.get("reasoning_content"):
chunk["additional_kwargs"]["reasoning_content"] = reasoning_content
token_usage = chunk.get("usage")
choices = (
chunk.get("choices", [])
Expand Down
1 change: 1 addition & 0 deletions libs/partners/openai/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,7 @@ test = [
"langchain",
"langchain-core",
"langchain-tests",
"httpx>=0.28.1",
]
lint = ["ruff>=0.13.1,<0.14.0"]
dev = ["langchain-core"]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@
_validate_tool_call_message,
magic_function,
)
from pydantic import BaseModel, Field, field_validator
from pydantic import BaseModel, Field, SecretStr, field_validator
from typing_extensions import TypedDict

from langchain_openai import ChatOpenAI
Expand Down Expand Up @@ -1320,3 +1320,30 @@ async def test_schema_parsing_failures_responses_api_async() -> None:
assert e.response is not None # type: ignore[attr-defined]
else:
raise AssertionError


# Test thinking models with LiteLLM proxy
@pytest.mark.scheduled
@pytest.mark.skipif(
os.environ.get("REASONING_BASE_URL") is None
or os.environ.get("REASONING_API_KEY") is None,
reason="REASONING_BASE_URL or REASONING_API_KEY is not set",
)
def test_with_reasoning_proxy() -> None:
"""Test reasoning models with proxy."""
chat = ChatOpenAI(
model="claude-sonnet-4-5-20250929",
reasoning_effort="medium",
base_url=os.environ["REASONING_BASE_URL"],
api_key=SecretStr(os.environ["REASONING_API_KEY"]),
max_retries=3,
# Disable SSL verification for self-signed certificates
http_client=httpx.Client(verify=False), # noqa: S501
)
# Using a prompt that will trigger reasoning
message = HumanMessage(content="Reason and think about the meaning of life")
response = chat.invoke([message])
assert isinstance(response, AIMessage)
assert isinstance(response.content, str)
# Assert that reasoning_content and thinking_blocks are in additional_kwargs
assert "reasoning_content" in response.additional_kwargs
10 changes: 10 additions & 0 deletions libs/partners/openai/tests/unit_tests/chat_models/test_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -281,6 +281,16 @@ def test__convert_dict_to_message_tool_call() -> None:
assert reverted_message_dict == message


def test__convert_dict_to_message_reasoning_blocks() -> None:
message = {"role": "assistant", "content": "foo", "reasoning_content": "bar"}
result = _convert_dict_to_message(message)
expected_output = AIMessage(
content="foo",
additional_kwargs={"reasoning_content": "bar"},
)
assert result == expected_output


class MockAsyncContextManager:
def __init__(self, chunk_list: list) -> None:
self.current_chunk = 0
Expand Down
2 changes: 2 additions & 0 deletions libs/partners/openai/uv.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Loading