Skip to content

Commit 4968b4e

Browse files
tests(huggingface): fix docstring formatting to satisfy D205
1 parent 86943a9 commit 4968b4e

File tree

1 file changed

+60
-93
lines changed

1 file changed

+60
-93
lines changed

libs/langchain/tests/unit_tests/chat_models/test_init_chat_model_hf.py

Lines changed: 60 additions & 93 deletions
Original file line numberDiff line numberDiff line change
@@ -8,21 +8,20 @@
88

99
from langchain.chat_models import init_chat_model
1010

11-
git add libs/langchain/tests/unit_tests/chat_models/test_init_chat_model_hf.py
11+
1212
@pytest.fixture
1313
def hf_fakes(monkeypatch: pytest.MonkeyPatch) -> SimpleNamespace:
14-
"""
15-
Install fake modules for `langchain_huggingface` and `transformers` and
16-
capture their call arguments for assertions.
14+
"""Install fakes for Hugging Face and transformers.
1715
16+
Capture call arguments and simulate module presence to test initialization
17+
behavior, including current failure modes.
1818
"""
1919
pipeline_calls: list[tuple[str, dict[str, Any]]] = []
2020
init_calls: list[dict[str, Any]] = []
2121

2222
# Fake transformers.pipeline
2323
def fake_pipeline(task: str, **kwargs: Any) -> SimpleNamespace:
2424
pipeline_calls.append((task, dict(kwargs)))
25-
# A simple stand-in object for the HF pipeline
2625
return SimpleNamespace(_kind="dummy_hf_pipeline")
2726

2827
transformers_mod = types.ModuleType("transformers")
@@ -31,25 +30,25 @@ def fake_pipeline(task: str, **kwargs: Any) -> SimpleNamespace:
3130

3231
# Fake langchain_huggingface.ChatHuggingFace that REQUIRES `llm`
3332
class FakeChatHuggingFace:
34-
def __init__(self, *, llm: Any, **kwargs: Any) -> None:
33+
def __init__(self, *, llm: object, **kwargs: Any) -> None:
3534
init_calls.append({"llm": llm, "kwargs": dict(kwargs)})
36-
# minimal instance; tests only assert on ctor args
3735
self._llm = llm
3836
self._kwargs = kwargs
3937

40-
# Build full package path: langchain_huggingface.chat_models.huggingface
38+
# Build full package path:
39+
# langchain_huggingface.chat_models.huggingface
4140
hf_pkg = types.ModuleType("langchain_huggingface")
4241
hf_pkg.__path__ = [] # mark as package
4342

4443
hf_chat_models_pkg = types.ModuleType("langchain_huggingface.chat_models")
4544
hf_chat_models_pkg.__path__ = [] # mark as package
4645

47-
hf_chat_huggingface_mod = types.ModuleType(
48-
"langchain_huggingface.chat_models.huggingface"
46+
hf_chat_hf_mod = types.ModuleType(
47+
"langchain_huggingface.chat_models.huggingface",
4948
)
50-
hf_chat_huggingface_mod.ChatHuggingFace = FakeChatHuggingFace
49+
hf_chat_hf_mod.ChatHuggingFace = FakeChatHuggingFace
5150

52-
# Optional: expose at package root for compatibility with top-level imports
51+
# Also expose at package root for top-level imports
5352
hf_pkg.ChatHuggingFace = FakeChatHuggingFace
5453

5554
monkeypatch.setitem(sys.modules, "langchain_huggingface", hf_pkg)
@@ -61,7 +60,7 @@ def __init__(self, *, llm: Any, **kwargs: Any) -> None:
6160
monkeypatch.setitem(
6261
sys.modules,
6362
"langchain_huggingface.chat_models.huggingface",
64-
hf_chat_huggingface_mod,
63+
hf_chat_hf_mod,
6564
)
6665

6766
# Ensure _check_pkg sees both packages as installed
@@ -79,93 +78,61 @@ def fake_find_spec(name: str) -> Optional[object]:
7978

8079
monkeypatch.setattr("importlib.util.find_spec", fake_find_spec)
8180

82-
return SimpleNamespace(pipeline_calls=pipeline_calls, init_calls=init_calls)
83-
84-
85-
def _last_pipeline_kwargs(hf_fakes: SimpleNamespace) -> dict[str, Any]:
86-
assert hf_fakes.pipeline_calls, "transformers.pipeline was not called"
87-
_, kwargs = hf_fakes.pipeline_calls[-1]
88-
return kwargs
89-
90-
91-
def _last_chat_kwargs(hf_fakes: SimpleNamespace) -> dict[str, Any]:
92-
assert hf_fakes.init_calls, "ChatHuggingFace was not constructed"
93-
return hf_fakes.init_calls[-1]["kwargs"]
94-
95-
96-
@pytest.mark.xfail(
97-
reason=(
98-
"Pending fix for huggingface init (#28226 / #33167) — currently passes "
99-
"model_id to ChatHuggingFace"
100-
),
101-
raises=TypeError,
102-
)
103-
def test_hf_basic_wraps_pipeline(hf_fakes: SimpleNamespace) -> None:
104-
# provider specified inline
105-
llm = init_chat_model(
106-
"huggingface:microsoft/Phi-3-mini-4k-instruct",
107-
task="text-generation",
108-
temperature=0,
81+
return SimpleNamespace(
82+
pipeline_calls=pipeline_calls,
83+
init_calls=init_calls,
10984
)
110-
# Wrapped object should be constructed (we don't require a specific type here)
111-
assert llm is not None
11285

113-
# Make failure modes explicit
114-
assert hf_fakes.pipeline_calls, "Expected transformers.pipeline to be called"
115-
assert hf_fakes.init_calls, "Expected ChatHuggingFace to be constructed"
11686

117-
# pipeline called with correct model (don't assert task value)
118-
kwargs = _last_pipeline_kwargs(hf_fakes)
119-
assert kwargs["model"] == "microsoft/Phi-3-mini-4k-instruct"
87+
def test_hf_current_bug_basic_raises_typeerror(
88+
hf_fakes: SimpleNamespace,
89+
) -> None:
90+
"""Current behavior raises TypeError when using Hugging Face provider.
12091
121-
# ChatHuggingFace must be constructed with llm
122-
assert "llm" in hf_fakes.init_calls[-1]
123-
assert hf_fakes.init_calls[-1]["llm"]._kind == "dummy_hf_pipeline"
92+
init_chat_model constructs ChatHuggingFace without ``llm`` and never builds
93+
a pipeline. Verify that explicitly.
94+
"""
95+
with pytest.raises(TypeError):
96+
_ = init_chat_model(
97+
"huggingface:microsoft/Phi-3-mini-4k-instruct",
98+
task="text-generation",
99+
temperature=0,
100+
)
101+
# Buggy path should not touch transformers.pipeline
102+
assert not hf_fakes.pipeline_calls, "pipeline should NOT be called"
124103

125104

126-
@pytest.mark.xfail(
127-
reason="Pending fix for huggingface init (#28226 / #33167)",
128-
raises=TypeError,
129-
)
130-
def test_hf_max_tokens_translated_to_max_new_tokens(
105+
def test_hf_current_bug_max_tokens_case_raises_typeerror(
131106
hf_fakes: SimpleNamespace,
132107
) -> None:
133-
init_chat_model(
134-
model="mistralai/Mistral-7B-Instruct-v0.2",
135-
model_provider="huggingface",
136-
task="text-generation",
137-
max_tokens=42,
138-
)
139-
assert hf_fakes.pipeline_calls, "Expected transformers.pipeline to be called"
140-
assert hf_fakes.init_calls, "Expected ChatHuggingFace to be constructed"
141-
kwargs = _last_pipeline_kwargs(hf_fakes)
142-
assert kwargs.get("max_new_tokens") == 42
143-
# Ensure we don't leak the old name into pipeline kwargs
144-
assert "max_tokens" not in kwargs
145-
146-
147-
@pytest.mark.xfail(
148-
reason="Pending fix for huggingface init (#28226 / #33167)",
149-
raises=TypeError,
150-
)
151-
def test_hf_timeout_and_max_retries_pass_through_to_chat_wrapper(
108+
"""Same failure when passing ``max_tokens``.
109+
110+
Should raise and avoid constructing a pipeline.
111+
"""
112+
with pytest.raises(TypeError):
113+
_ = init_chat_model(
114+
model="mistralai/Mistral-7B-Instruct-v0.2",
115+
model_provider="huggingface",
116+
task="text-generation",
117+
max_tokens=42,
118+
)
119+
assert not hf_fakes.pipeline_calls, "pipeline should NOT be called"
120+
121+
122+
def test_hf_current_bug_timeout_retries_case_raises_typeerror(
152123
hf_fakes: SimpleNamespace,
153124
) -> None:
154-
init_chat_model(
155-
model="microsoft/Phi-3-mini-4k-instruct",
156-
model_provider="huggingface",
157-
task="text-generation",
158-
temperature=0.1,
159-
timeout=7,
160-
max_retries=3,
161-
)
162-
assert hf_fakes.pipeline_calls, "Expected transformers.pipeline to be called"
163-
assert hf_fakes.init_calls, "Expected ChatHuggingFace to be constructed"
164-
chat_kwargs = _last_chat_kwargs(hf_fakes)
165-
# Assert these control args are passed to the wrapper (not the pipeline)
166-
assert chat_kwargs.get("timeout") == 7
167-
assert chat_kwargs.get("max_retries") == 3
168-
# And that they are NOT passed to transformers.pipeline
169-
pipeline_kwargs = _last_pipeline_kwargs(hf_fakes)
170-
assert "timeout" not in pipeline_kwargs
171-
assert "max_retries" not in pipeline_kwargs
125+
"""Same failure when passing ``timeout``/``max_retries``.
126+
127+
Should raise and avoid constructing a pipeline.
128+
"""
129+
with pytest.raises(TypeError):
130+
_ = init_chat_model(
131+
model="microsoft/Phi-3-mini-4k-instruct",
132+
model_provider="huggingface",
133+
task="text-generation",
134+
temperature=0.1,
135+
timeout=7,
136+
max_retries=3,
137+
)
138+
assert not hf_fakes.pipeline_calls, "pipeline should NOT be called"

0 commit comments

Comments
 (0)