Skip to content

Commit afd349c

Browse files
openai: cache httpx client (#31260)
![Screenshot 2025-05-16 at 3 49 54 PM](https://github.com/user-attachments/assets/4b377384-a769-4487-b801-bd1aa0ed66c1) Co-authored-by: Sydney Runkle <[email protected]>
1 parent e6633a7 commit afd349c

File tree

2 files changed

+80
-2
lines changed

2 files changed

+80
-2
lines changed
Lines changed: 65 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,65 @@
1+
"""Helpers for creating OpenAI API clients.
2+
3+
This module allows for the caching of httpx clients to avoid creating new instances
4+
for each instance of ChatOpenAI.
5+
6+
Logic is largely replicated from openai._base_client.
7+
"""
8+
9+
import asyncio
10+
import os
11+
from functools import lru_cache
12+
from typing import Any, Optional
13+
14+
import openai
15+
16+
17+
class _SyncHttpxClientWrapper(openai.DefaultHttpxClient):
18+
"""Borrowed from openai._base_client"""
19+
20+
def __del__(self) -> None:
21+
if self.is_closed:
22+
return
23+
24+
try:
25+
self.close()
26+
except Exception:
27+
pass
28+
29+
30+
class _AsyncHttpxClientWrapper(openai.DefaultAsyncHttpxClient):
31+
"""Borrowed from openai._base_client"""
32+
33+
def __del__(self) -> None:
34+
if self.is_closed:
35+
return
36+
37+
try:
38+
# TODO(someday): support non asyncio runtimes here
39+
asyncio.get_running_loop().create_task(self.aclose())
40+
except Exception:
41+
pass
42+
43+
44+
@lru_cache
45+
def _get_default_httpx_client(
46+
base_url: Optional[str], timeout: Any
47+
) -> _SyncHttpxClientWrapper:
48+
return _SyncHttpxClientWrapper(
49+
base_url=base_url
50+
or os.environ.get("OPENAI_BASE_URL")
51+
or "https://api.openai.com/v1",
52+
timeout=timeout,
53+
)
54+
55+
56+
@lru_cache
57+
def _get_default_async_httpx_client(
58+
base_url: Optional[str], timeout: Any
59+
) -> _AsyncHttpxClientWrapper:
60+
return _AsyncHttpxClientWrapper(
61+
base_url=base_url
62+
or os.environ.get("OPENAI_BASE_URL")
63+
or "https://api.openai.com/v1",
64+
timeout=timeout,
65+
)

libs/partners/openai/langchain_openai/chat_models/base.py

Lines changed: 15 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -102,6 +102,11 @@
102102
from pydantic.v1 import BaseModel as BaseModelV1
103103
from typing_extensions import Self
104104

105+
from langchain_openai.chat_models._client_utils import (
106+
_get_default_async_httpx_client,
107+
_get_default_httpx_client,
108+
)
109+
105110
if TYPE_CHECKING:
106111
from openai.types.responses import Response
107112

@@ -621,7 +626,10 @@ def validate_environment(self) -> Self:
621626
self.http_client = httpx.Client(
622627
proxy=self.openai_proxy, verify=global_ssl_context
623628
)
624-
sync_specific = {"http_client": self.http_client}
629+
sync_specific = {
630+
"http_client": self.http_client
631+
or _get_default_httpx_client(self.openai_api_base, self.request_timeout)
632+
}
625633
self.root_client = openai.OpenAI(**client_params, **sync_specific) # type: ignore[arg-type]
626634
self.client = self.root_client.chat.completions
627635
if not self.async_client:
@@ -636,7 +644,12 @@ def validate_environment(self) -> Self:
636644
self.http_async_client = httpx.AsyncClient(
637645
proxy=self.openai_proxy, verify=global_ssl_context
638646
)
639-
async_specific = {"http_client": self.http_async_client}
647+
async_specific = {
648+
"http_client": self.http_async_client
649+
or _get_default_async_httpx_client(
650+
self.openai_api_base, self.request_timeout
651+
)
652+
}
640653
self.root_async_client = openai.AsyncOpenAI(
641654
**client_params,
642655
**async_specific, # type: ignore[arg-type]

0 commit comments

Comments
 (0)