Skip to content

Commit 30f6c9f

Browse files
cbornetErick Friis
andauthored
community: Use Blockbuster to detect blocking calls in asyncio during tests (#29609)
Same as #29043 for langchain-community. **Dependencies:** - blockbuster (test) **Twitter handle:** cbornet_ Co-authored-by: Erick Friis <[email protected]>
1 parent 3a57a28 commit 30f6c9f

File tree

8 files changed

+104
-26
lines changed

8 files changed

+104
-26
lines changed

libs/community/langchain_community/utils/openai.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,12 @@
11
from __future__ import annotations
22

3+
import functools
34
from importlib.metadata import version
45

56
from packaging.version import parse
67

78

9+
@functools.cache
810
def is_openai_v1() -> bool:
911
"""Return whether OpenAI API is v1 or more."""
1012
_version = parse(version("openai"))

libs/community/pyproject.toml

Lines changed: 4 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ build-backend = "pdm.backend"
44

55
[project]
66
authors = []
7-
license = {text = "MIT"}
7+
license = { text = "MIT" }
88
requires-python = "<4.0,>=3.9"
99
dependencies = [
1010
"langchain-core<1.0.0,>=0.3.34",
@@ -48,20 +48,16 @@ test = [
4848
"syrupy<5.0.0,>=4.0.2",
4949
"requests-mock<2.0.0,>=1.11.0",
5050
"pytest-xdist<4.0.0,>=3.6.1",
51+
"blockbuster<1.6,>=1.5.13",
5152
"cffi<1.17.1; python_version < \"3.10\"",
5253
"cffi; python_version >= \"3.10\"",
5354
"langchain-core @ file:///${PROJECT_ROOT}/../core",
5455
"langchain @ file:///${PROJECT_ROOT}/../langchain",
5556
"langchain-tests @ file:///${PROJECT_ROOT}/../standard-tests",
5657
"toml>=0.10.2",
5758
]
58-
codespell = [
59-
"codespell<3.0.0,>=2.2.0",
60-
]
61-
test_integration = [
62-
"pytest-vcr<2.0.0,>=1.0.2",
63-
"vcrpy<7,>=6",
64-
]
59+
codespell = ["codespell<3.0.0,>=2.2.0"]
60+
test_integration = ["pytest-vcr<2.0.0,>=1.0.2", "vcrpy<7,>=6"]
6561
lint = [
6662
"ruff<0.6,>=0.5",
6763
"cffi<1.17.1; python_version < \"3.10\"",

libs/community/tests/unit_tests/conftest.py

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,28 @@
11
"""Configuration for unit tests."""
22

3+
from collections.abc import Iterator
34
from importlib import util
45
from typing import Dict, Sequence
56

67
import pytest
8+
from blockbuster import blockbuster_ctx
79
from pytest import Config, Function, Parser
810

911

12+
@pytest.fixture(autouse=True)
13+
def blockbuster() -> Iterator[None]:
14+
with blockbuster_ctx("langchain_community") as bb:
15+
(
16+
bb.functions["os.stat"]
17+
.can_block_in("langchain_community/utils/openai.py", "is_openai_v1")
18+
.can_block_in("httpx/_client.py", "_init_transport")
19+
)
20+
bb.functions["os.path.abspath"].can_block_in(
21+
"sqlalchemy/dialects/sqlite/pysqlite.py", "create_connect_args"
22+
)
23+
yield
24+
25+
1026
def pytest_addoption(parser: Parser) -> None:
1127
"""Add custom command line options to pytest."""
1228
parser.addoption(

libs/community/tests/unit_tests/test_cache.py

Lines changed: 55 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -62,20 +62,31 @@ def set_cache_and_teardown(request: FixtureRequest) -> Generator[None, None, Non
6262
raise ValueError("Cache not set. This should never happen.")
6363

6464

65-
async def test_llm_caching() -> None:
65+
def test_llm_caching() -> None:
6666
prompt = "How are you?"
6767
response = "Test response"
6868
cached_response = "Cached test response"
6969
llm = FakeListLLM(responses=[response])
7070
if llm_cache := get_llm_cache():
71-
# sync test
7271
llm_cache.update(
7372
prompt=prompt,
7473
llm_string=create_llm_string(llm),
7574
return_val=[Generation(text=cached_response)],
7675
)
7776
assert llm.invoke(prompt) == cached_response
78-
# async test
77+
else:
78+
raise ValueError(
79+
"The cache not set. This should never happen, as the pytest fixture "
80+
"`set_cache_and_teardown` always sets the cache."
81+
)
82+
83+
84+
async def test_llm_caching_async() -> None:
85+
prompt = "How are you?"
86+
response = "Test response"
87+
cached_response = "Cached test response"
88+
llm = FakeListLLM(responses=[response])
89+
if llm_cache := get_llm_cache():
7990
await llm_cache.aupdate(
8091
prompt=prompt,
8192
llm_string=create_llm_string(llm),
@@ -110,14 +121,13 @@ def test_old_sqlite_llm_caching() -> None:
110121
assert llm.invoke(prompt) == cached_response
111122

112123

113-
async def test_chat_model_caching() -> None:
124+
def test_chat_model_caching() -> None:
114125
prompt: List[BaseMessage] = [HumanMessage(content="How are you?")]
115126
response = "Test response"
116127
cached_response = "Cached test response"
117128
cached_message = AIMessage(content=cached_response)
118129
llm = FakeListChatModel(responses=[response])
119130
if llm_cache := get_llm_cache():
120-
# sync test
121131
llm_cache.update(
122132
prompt=dumps(prompt),
123133
llm_string=llm._get_llm_string(),
@@ -126,8 +136,20 @@ async def test_chat_model_caching() -> None:
126136
result = llm.invoke(prompt)
127137
assert isinstance(result, AIMessage)
128138
assert result.content == cached_response
139+
else:
140+
raise ValueError(
141+
"The cache not set. This should never happen, as the pytest fixture "
142+
"`set_cache_and_teardown` always sets the cache."
143+
)
144+
129145

130-
# async test
146+
async def test_chat_model_caching_async() -> None:
147+
prompt: List[BaseMessage] = [HumanMessage(content="How are you?")]
148+
response = "Test response"
149+
cached_response = "Cached test response"
150+
cached_message = AIMessage(content=cached_response)
151+
llm = FakeListChatModel(responses=[response])
152+
if llm_cache := get_llm_cache():
131153
await llm_cache.aupdate(
132154
prompt=dumps(prompt),
133155
llm_string=llm._get_llm_string(),
@@ -143,14 +165,13 @@ async def test_chat_model_caching() -> None:
143165
)
144166

145167

146-
async def test_chat_model_caching_params() -> None:
168+
def test_chat_model_caching_params() -> None:
147169
prompt: List[BaseMessage] = [HumanMessage(content="How are you?")]
148170
response = "Test response"
149171
cached_response = "Cached test response"
150172
cached_message = AIMessage(content=cached_response)
151173
llm = FakeListChatModel(responses=[response])
152174
if llm_cache := get_llm_cache():
153-
# sync test
154175
llm_cache.update(
155176
prompt=dumps(prompt),
156177
llm_string=llm._get_llm_string(functions=[]),
@@ -162,8 +183,20 @@ async def test_chat_model_caching_params() -> None:
162183
assert result.content == cached_response
163184
assert isinstance(result_no_params, AIMessage)
164185
assert result_no_params.content == response
186+
else:
187+
raise ValueError(
188+
"The cache not set. This should never happen, as the pytest fixture "
189+
"`set_cache_and_teardown` always sets the cache."
190+
)
191+
165192

166-
# async test
193+
async def test_chat_model_caching_params_async() -> None:
194+
prompt: List[BaseMessage] = [HumanMessage(content="How are you?")]
195+
response = "Test response"
196+
cached_response = "Cached test response"
197+
cached_message = AIMessage(content=cached_response)
198+
llm = FakeListChatModel(responses=[response])
199+
if llm_cache := get_llm_cache():
167200
await llm_cache.aupdate(
168201
prompt=dumps(prompt),
169202
llm_string=llm._get_llm_string(functions=[]),
@@ -182,13 +215,12 @@ async def test_chat_model_caching_params() -> None:
182215
)
183216

184217

185-
async def test_llm_cache_clear() -> None:
218+
def test_llm_cache_clear() -> None:
186219
prompt = "How are you?"
187220
expected_response = "Test response"
188221
cached_response = "Cached test response"
189222
llm = FakeListLLM(responses=[expected_response])
190223
if llm_cache := get_llm_cache():
191-
# sync test
192224
llm_cache.update(
193225
prompt=prompt,
194226
llm_string=create_llm_string(llm),
@@ -197,8 +229,19 @@ async def test_llm_cache_clear() -> None:
197229
llm_cache.clear()
198230
response = llm.invoke(prompt)
199231
assert response == expected_response
232+
else:
233+
raise ValueError(
234+
"The cache not set. This should never happen, as the pytest fixture "
235+
"`set_cache_and_teardown` always sets the cache."
236+
)
237+
200238

201-
# async test
239+
async def test_llm_cache_clear_async() -> None:
240+
prompt = "How are you?"
241+
expected_response = "Test response"
242+
cached_response = "Cached test response"
243+
llm = FakeListLLM(responses=[expected_response])
244+
if llm_cache := get_llm_cache():
202245
await llm_cache.aupdate(
203246
prompt=prompt,
204247
llm_string=create_llm_string(llm),

libs/community/tests/unit_tests/test_dependencies.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -76,6 +76,7 @@ def test_test_group_dependencies(uv_conf: Mapping[str, Any]) -> None:
7676
"pytest-socket",
7777
"pytest-watcher",
7878
"pytest-xdist",
79+
"blockbuster",
7980
"responses",
8081
"syrupy",
8182
"toml",

libs/community/tests/unit_tests/vectorstores/test_faiss.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1589,10 +1589,10 @@ def test_faiss_local_save_load() -> None:
15891589

15901590

15911591
@pytest.mark.requires("faiss")
1592-
async def test_faiss_async_local_save_load() -> None:
1592+
def test_faiss_async_local_save_load() -> None:
15931593
"""Test end to end serialization."""
15941594
texts = ["foo", "bar", "baz"]
1595-
docsearch = await FAISS.afrom_texts(texts, FakeEmbeddings())
1595+
docsearch = FAISS.from_texts(texts, FakeEmbeddings())
15961596
temp_timestamp = datetime.datetime.utcnow().strftime("%Y%m%d-%H%M%S")
15971597
with tempfile.TemporaryDirectory(suffix="_" + temp_timestamp + "/") as temp_folder:
15981598
docsearch.save_local(temp_folder)

libs/community/tests/unit_tests/vectorstores/test_inmemory.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -72,17 +72,17 @@ async def test_inmemory_mmr() -> None:
7272
assert output[1] == _AnyDocument(page_content="foy")
7373

7474

75-
async def test_inmemory_dump_load(tmp_path: Path) -> None:
75+
def test_inmemory_dump_load(tmp_path: Path) -> None:
7676
"""Test end to end construction and search."""
7777
embedding = ConsistentFakeEmbeddings()
78-
store = await InMemoryVectorStore.afrom_texts(["foo", "bar", "baz"], embedding)
79-
output = await store.asimilarity_search("foo", k=1)
78+
store = InMemoryVectorStore.from_texts(["foo", "bar", "baz"], embedding)
79+
output = store.similarity_search("foo", k=1)
8080

8181
test_file = str(tmp_path / "test.json")
8282
store.dump(test_file)
8383

8484
loaded_store = InMemoryVectorStore.load(test_file, embedding)
85-
loaded_output = await loaded_store.asimilarity_search("foo", k=1)
85+
loaded_output = loaded_store.similarity_search("foo", k=1)
8686

8787
assert output == loaded_output
8888

libs/community/uv.lock

Lines changed: 20 additions & 0 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

0 commit comments

Comments
 (0)