8
8
9
9
from langchain .chat_models import init_chat_model
10
10
11
- git add libs / langchain / tests / unit_tests / chat_models / test_init_chat_model_hf . py
11
+
12
12
@pytest .fixture
13
13
def hf_fakes (monkeypatch : pytest .MonkeyPatch ) -> SimpleNamespace :
14
- """
15
- Install fake modules for `langchain_huggingface` and `transformers` and
16
- capture their call arguments for assertions.
14
+ """Install fakes for Hugging Face and transformers.
17
15
16
+ Capture call arguments and simulate module presence to test initialization
17
+ behavior, including current failure modes.
18
18
"""
19
19
pipeline_calls : list [tuple [str , dict [str , Any ]]] = []
20
20
init_calls : list [dict [str , Any ]] = []
21
21
22
22
# Fake transformers.pipeline
23
23
def fake_pipeline (task : str , ** kwargs : Any ) -> SimpleNamespace :
24
24
pipeline_calls .append ((task , dict (kwargs )))
25
- # A simple stand-in object for the HF pipeline
26
25
return SimpleNamespace (_kind = "dummy_hf_pipeline" )
27
26
28
27
transformers_mod = types .ModuleType ("transformers" )
@@ -31,25 +30,25 @@ def fake_pipeline(task: str, **kwargs: Any) -> SimpleNamespace:
31
30
32
31
# Fake langchain_huggingface.ChatHuggingFace that REQUIRES `llm`
33
32
class FakeChatHuggingFace :
34
- def __init__ (self , * , llm : Any , ** kwargs : Any ) -> None :
33
+ def __init__ (self , * , llm : object , ** kwargs : Any ) -> None :
35
34
init_calls .append ({"llm" : llm , "kwargs" : dict (kwargs )})
36
- # minimal instance; tests only assert on ctor args
37
35
self ._llm = llm
38
36
self ._kwargs = kwargs
39
37
40
- # Build full package path: langchain_huggingface.chat_models.huggingface
38
+ # Build full package path:
39
+ # langchain_huggingface.chat_models.huggingface
41
40
hf_pkg = types .ModuleType ("langchain_huggingface" )
42
41
hf_pkg .__path__ = [] # mark as package
43
42
44
43
hf_chat_models_pkg = types .ModuleType ("langchain_huggingface.chat_models" )
45
44
hf_chat_models_pkg .__path__ = [] # mark as package
46
45
47
- hf_chat_huggingface_mod = types .ModuleType (
48
- "langchain_huggingface.chat_models.huggingface"
46
+ hf_chat_hf_mod = types .ModuleType (
47
+ "langchain_huggingface.chat_models.huggingface" ,
49
48
)
50
- hf_chat_huggingface_mod .ChatHuggingFace = FakeChatHuggingFace
49
+ hf_chat_hf_mod .ChatHuggingFace = FakeChatHuggingFace
51
50
52
- # Optional: expose at package root for compatibility with top-level imports
51
+ # Also expose at package root for top-level imports
53
52
hf_pkg .ChatHuggingFace = FakeChatHuggingFace
54
53
55
54
monkeypatch .setitem (sys .modules , "langchain_huggingface" , hf_pkg )
@@ -61,7 +60,7 @@ def __init__(self, *, llm: Any, **kwargs: Any) -> None:
61
60
monkeypatch .setitem (
62
61
sys .modules ,
63
62
"langchain_huggingface.chat_models.huggingface" ,
64
- hf_chat_huggingface_mod ,
63
+ hf_chat_hf_mod ,
65
64
)
66
65
67
66
# Ensure _check_pkg sees both packages as installed
@@ -79,93 +78,61 @@ def fake_find_spec(name: str) -> Optional[object]:
79
78
80
79
monkeypatch .setattr ("importlib.util.find_spec" , fake_find_spec )
81
80
82
- return SimpleNamespace (pipeline_calls = pipeline_calls , init_calls = init_calls )
83
-
84
-
85
- def _last_pipeline_kwargs (hf_fakes : SimpleNamespace ) -> dict [str , Any ]:
86
- assert hf_fakes .pipeline_calls , "transformers.pipeline was not called"
87
- _ , kwargs = hf_fakes .pipeline_calls [- 1 ]
88
- return kwargs
89
-
90
-
91
- def _last_chat_kwargs (hf_fakes : SimpleNamespace ) -> dict [str , Any ]:
92
- assert hf_fakes .init_calls , "ChatHuggingFace was not constructed"
93
- return hf_fakes .init_calls [- 1 ]["kwargs" ]
94
-
95
-
96
- @pytest .mark .xfail (
97
- reason = (
98
- "Pending fix for huggingface init (#28226 / #33167) — currently passes "
99
- "model_id to ChatHuggingFace"
100
- ),
101
- raises = TypeError ,
102
- )
103
- def test_hf_basic_wraps_pipeline (hf_fakes : SimpleNamespace ) -> None :
104
- # provider specified inline
105
- llm = init_chat_model (
106
- "huggingface:microsoft/Phi-3-mini-4k-instruct" ,
107
- task = "text-generation" ,
108
- temperature = 0 ,
81
+ return SimpleNamespace (
82
+ pipeline_calls = pipeline_calls ,
83
+ init_calls = init_calls ,
109
84
)
110
- # Wrapped object should be constructed (we don't require a specific type here)
111
- assert llm is not None
112
85
113
- # Make failure modes explicit
114
- assert hf_fakes .pipeline_calls , "Expected transformers.pipeline to be called"
115
- assert hf_fakes .init_calls , "Expected ChatHuggingFace to be constructed"
116
86
117
- # pipeline called with correct model (don't assert task value)
118
- kwargs = _last_pipeline_kwargs (hf_fakes )
119
- assert kwargs ["model" ] == "microsoft/Phi-3-mini-4k-instruct"
87
+ def test_hf_current_bug_basic_raises_typeerror (
88
+ hf_fakes : SimpleNamespace ,
89
+ ) -> None :
90
+ """Current behavior raises TypeError when using Hugging Face provider.
120
91
121
- # ChatHuggingFace must be constructed with llm
122
- assert "llm" in hf_fakes .init_calls [- 1 ]
123
- assert hf_fakes .init_calls [- 1 ]["llm" ]._kind == "dummy_hf_pipeline"
92
+ init_chat_model constructs ChatHuggingFace without ``llm`` and never builds
93
+ a pipeline. Verify that explicitly.
94
+ """
95
+ with pytest .raises (TypeError ):
96
+ _ = init_chat_model (
97
+ "huggingface:microsoft/Phi-3-mini-4k-instruct" ,
98
+ task = "text-generation" ,
99
+ temperature = 0 ,
100
+ )
101
+ # Buggy path should not touch transformers.pipeline
102
+ assert not hf_fakes .pipeline_calls , "pipeline should NOT be called"
124
103
125
104
126
- @pytest .mark .xfail (
127
- reason = "Pending fix for huggingface init (#28226 / #33167)" ,
128
- raises = TypeError ,
129
- )
130
- def test_hf_max_tokens_translated_to_max_new_tokens (
105
+ def test_hf_current_bug_max_tokens_case_raises_typeerror (
131
106
hf_fakes : SimpleNamespace ,
132
107
) -> None :
133
- init_chat_model (
134
- model = "mistralai/Mistral-7B-Instruct-v0.2" ,
135
- model_provider = "huggingface" ,
136
- task = "text-generation" ,
137
- max_tokens = 42 ,
138
- )
139
- assert hf_fakes .pipeline_calls , "Expected transformers.pipeline to be called"
140
- assert hf_fakes .init_calls , "Expected ChatHuggingFace to be constructed"
141
- kwargs = _last_pipeline_kwargs (hf_fakes )
142
- assert kwargs .get ("max_new_tokens" ) == 42
143
- # Ensure we don't leak the old name into pipeline kwargs
144
- assert "max_tokens" not in kwargs
145
-
146
-
147
- @pytest .mark .xfail (
148
- reason = "Pending fix for huggingface init (#28226 / #33167)" ,
149
- raises = TypeError ,
150
- )
151
- def test_hf_timeout_and_max_retries_pass_through_to_chat_wrapper (
108
+ """Same failure when passing ``max_tokens``.
109
+
110
+ Should raise and avoid constructing a pipeline.
111
+ """
112
+ with pytest .raises (TypeError ):
113
+ _ = init_chat_model (
114
+ model = "mistralai/Mistral-7B-Instruct-v0.2" ,
115
+ model_provider = "huggingface" ,
116
+ task = "text-generation" ,
117
+ max_tokens = 42 ,
118
+ )
119
+ assert not hf_fakes .pipeline_calls , "pipeline should NOT be called"
120
+
121
+
122
+ def test_hf_current_bug_timeout_retries_case_raises_typeerror (
152
123
hf_fakes : SimpleNamespace ,
153
124
) -> None :
154
- init_chat_model (
155
- model = "microsoft/Phi-3-mini-4k-instruct" ,
156
- model_provider = "huggingface" ,
157
- task = "text-generation" ,
158
- temperature = 0.1 ,
159
- timeout = 7 ,
160
- max_retries = 3 ,
161
- )
162
- assert hf_fakes .pipeline_calls , "Expected transformers.pipeline to be called"
163
- assert hf_fakes .init_calls , "Expected ChatHuggingFace to be constructed"
164
- chat_kwargs = _last_chat_kwargs (hf_fakes )
165
- # Assert these control args are passed to the wrapper (not the pipeline)
166
- assert chat_kwargs .get ("timeout" ) == 7
167
- assert chat_kwargs .get ("max_retries" ) == 3
168
- # And that they are NOT passed to transformers.pipeline
169
- pipeline_kwargs = _last_pipeline_kwargs (hf_fakes )
170
- assert "timeout" not in pipeline_kwargs
171
- assert "max_retries" not in pipeline_kwargs
125
+ """Same failure when passing ``timeout``/``max_retries``.
126
+
127
+ Should raise and avoid constructing a pipeline.
128
+ """
129
+ with pytest .raises (TypeError ):
130
+ _ = init_chat_model (
131
+ model = "microsoft/Phi-3-mini-4k-instruct" ,
132
+ model_provider = "huggingface" ,
133
+ task = "text-generation" ,
134
+ temperature = 0.1 ,
135
+ timeout = 7 ,
136
+ max_retries = 3 ,
137
+ )
138
+ assert not hf_fakes .pipeline_calls , "pipeline should NOT be called"
0 commit comments