Skip to content

Commit b268ab6

Browse files
authored
openai[patch]: fix client caching when request_timeout is specified via httpx.Timeout (#31698)
Resolves #31697
1 parent 4ee6112 commit b268ab6

File tree

2 files changed

+71
-4
lines changed

2 files changed

+71
-4
lines changed

libs/partners/openai/langchain_openai/chat_models/_client_utils.py

Lines changed: 46 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -41,8 +41,7 @@ def __del__(self) -> None:
4141
pass
4242

4343

44-
@lru_cache
45-
def _get_default_httpx_client(
44+
def _build_sync_httpx_client(
4645
base_url: Optional[str], timeout: Any
4746
) -> _SyncHttpxClientWrapper:
4847
return _SyncHttpxClientWrapper(
@@ -53,8 +52,7 @@ def _get_default_httpx_client(
5352
)
5453

5554

56-
@lru_cache
57-
def _get_default_async_httpx_client(
55+
def _build_async_httpx_client(
5856
base_url: Optional[str], timeout: Any
5957
) -> _AsyncHttpxClientWrapper:
6058
return _AsyncHttpxClientWrapper(
@@ -63,3 +61,47 @@ def _get_default_async_httpx_client(
6361
or "https://api.openai.com/v1",
6462
timeout=timeout,
6563
)
64+
65+
66+
@lru_cache
67+
def _cached_sync_httpx_client(
68+
base_url: Optional[str], timeout: Any
69+
) -> _SyncHttpxClientWrapper:
70+
return _build_sync_httpx_client(base_url, timeout)
71+
72+
73+
@lru_cache
74+
def _cached_async_httpx_client(
75+
base_url: Optional[str], timeout: Any
76+
) -> _AsyncHttpxClientWrapper:
77+
return _build_async_httpx_client(base_url, timeout)
78+
79+
80+
def _get_default_httpx_client(
81+
base_url: Optional[str], timeout: Any
82+
) -> _SyncHttpxClientWrapper:
83+
"""Get default httpx client.
84+
85+
Uses cached client unless timeout is ``httpx.Timeout``, which is not hashable.
86+
"""
87+
try:
88+
hash(timeout)
89+
except TypeError:
90+
return _build_sync_httpx_client(base_url, timeout)
91+
else:
92+
return _cached_sync_httpx_client(base_url, timeout)
93+
94+
95+
def _get_default_async_httpx_client(
96+
base_url: Optional[str], timeout: Any
97+
) -> _AsyncHttpxClientWrapper:
98+
"""Get default httpx client.
99+
100+
Uses cached client unless timeout is ``httpx.Timeout``, which is not hashable.
101+
"""
102+
try:
103+
hash(timeout)
104+
except TypeError:
105+
return _build_async_httpx_client(base_url, timeout)
106+
else:
107+
return _cached_async_httpx_client(base_url, timeout)

libs/partners/openai/tests/unit_tests/chat_models/test_base.py

Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66
from typing import Any, Literal, Optional, Union, cast
77
from unittest.mock import AsyncMock, MagicMock, patch
88

9+
import httpx
910
import pytest
1011
from langchain_core.load import dumps, loads
1112
from langchain_core.messages import (
@@ -74,6 +75,30 @@ def test_openai_model_param() -> None:
7475
assert llm.max_tokens == 10
7576

7677

78+
def test_openai_client_caching() -> None:
79+
"""Test that the OpenAI client is cached."""
80+
llm1 = ChatOpenAI(model="gpt-4.1-mini")
81+
llm2 = ChatOpenAI(model="gpt-4.1-mini")
82+
assert llm1.root_client._client is llm2.root_client._client
83+
84+
llm3 = ChatOpenAI(model="gpt-4.1-mini", base_url="foo")
85+
assert llm1.root_client._client is not llm3.root_client._client
86+
87+
llm4 = ChatOpenAI(model="gpt-4.1-mini", timeout=None)
88+
assert llm1.root_client._client is llm4.root_client._client
89+
90+
llm5 = ChatOpenAI(model="gpt-4.1-mini", timeout=3)
91+
assert llm1.root_client._client is not llm5.root_client._client
92+
93+
llm6 = ChatOpenAI(
94+
model="gpt-4.1-mini", timeout=httpx.Timeout(timeout=60.0, connect=5.0)
95+
)
96+
assert llm1.root_client._client is not llm6.root_client._client
97+
98+
llm7 = ChatOpenAI(model="gpt-4.1-mini", timeout=(5, 1))
99+
assert llm1.root_client._client is not llm7.root_client._client
100+
101+
77102
def test_openai_o1_temperature() -> None:
78103
llm = ChatOpenAI(model="o1-preview")
79104
assert llm.temperature == 1

0 commit comments

Comments
 (0)