Skip to content

Commit 010ed5d

Browse files
authored
fix(anthropic,openai): fix tests (#33257)
following #33192
1 parent 7f5be6b commit 010ed5d

File tree

6 files changed

+13
-13
lines changed

6 files changed

+13
-13
lines changed

libs/partners/anthropic/tests/integration_tests/test_chat_models.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -319,7 +319,7 @@ def test_anthropic_streaming_callback() -> None:
319319
callback_manager = CallbackManager([callback_handler])
320320
chat = ChatAnthropic(
321321
model=MODEL_NAME, # type: ignore[call-arg]
322-
callback_manager=callback_manager,
322+
callbacks=callback_manager,
323323
verbose=True,
324324
)
325325
message = HumanMessage(content="Write me a sentence with 10 words.")
@@ -335,7 +335,7 @@ async def test_anthropic_async_streaming_callback() -> None:
335335
callback_manager = CallbackManager([callback_handler])
336336
chat = ChatAnthropic(
337337
model=MODEL_NAME, # type: ignore[call-arg]
338-
callback_manager=callback_manager,
338+
callbacks=callback_manager,
339339
verbose=True,
340340
)
341341
chat_messages: list[BaseMessage] = [
@@ -379,7 +379,7 @@ def test_streaming() -> None:
379379
llm = ChatAnthropic( # type: ignore[call-arg, call-arg]
380380
model_name=MODEL_NAME,
381381
streaming=True,
382-
callback_manager=callback_manager,
382+
callbacks=callback_manager,
383383
)
384384

385385
response = llm.generate([[HumanMessage(content="I'm Pickle Rick")]])
@@ -395,7 +395,7 @@ async def test_astreaming() -> None:
395395
llm = ChatAnthropic( # type: ignore[call-arg, call-arg]
396396
model_name=MODEL_NAME,
397397
streaming=True,
398-
callback_manager=callback_manager,
398+
callbacks=callback_manager,
399399
)
400400

401401
response = await llm.agenerate([[HumanMessage(content="I'm Pickle Rick")]])

libs/partners/anthropic/tests/integration_tests/test_llms.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,7 @@ def test_anthropic_streaming_callback() -> None:
4949
llm = AnthropicLLM(
5050
model=MODEL, # type: ignore[call-arg]
5151
streaming=True,
52-
callback_manager=callback_manager,
52+
callbacks=callback_manager,
5353
verbose=True,
5454
)
5555
llm.invoke("Write me a sentence with 100 words.")
@@ -70,7 +70,7 @@ async def test_anthropic_async_streaming_callback() -> None:
7070
llm = AnthropicLLM(
7171
model=MODEL, # type: ignore[call-arg]
7272
streaming=True,
73-
callback_manager=callback_manager,
73+
callbacks=callback_manager,
7474
verbose=True,
7575
)
7676
result = await llm.agenerate(["How many toes do dogs have?"])
4.5 KB
Binary file not shown.

libs/partners/openai/tests/integration_tests/chat_models/test_azure.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -91,7 +91,7 @@ def test_chat_openai_streaming() -> None:
9191
max_tokens=10,
9292
streaming=True,
9393
temperature=0,
94-
callback_manager=callback_manager,
94+
callbacks=callback_manager,
9595
verbose=True,
9696
)
9797
message = HumanMessage(content="Hello")
@@ -113,7 +113,7 @@ def on_llm_end(self, *args: Any, **kwargs: Any) -> Any:
113113

114114
callback = _FakeCallback()
115115
callback_manager = CallbackManager([callback])
116-
chat = _get_llm(max_tokens=2, temperature=0, callback_manager=callback_manager)
116+
chat = _get_llm(max_tokens=2, temperature=0, callbacks=callback_manager)
117117
list(chat.stream("hi"))
118118
generation = callback.saved_things["generation"]
119119
# `Hello!` is two tokens, assert that that is what is returned
@@ -145,7 +145,7 @@ async def test_async_chat_openai_streaming() -> None:
145145
max_tokens=10,
146146
streaming=True,
147147
temperature=0,
148-
callback_manager=callback_manager,
148+
callbacks=callback_manager,
149149
verbose=True,
150150
)
151151
message = HumanMessage(content="Hello")

libs/partners/openai/tests/integration_tests/chat_models/test_base.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -115,7 +115,7 @@ def test_chat_openai_streaming(use_responses_api: bool) -> None:
115115
max_tokens=MAX_TOKEN_COUNT, # type: ignore[call-arg]
116116
streaming=True,
117117
temperature=0,
118-
callback_manager=callback_manager,
118+
callbacks=callback_manager,
119119
verbose=True,
120120
use_responses_api=use_responses_api,
121121
)
@@ -138,7 +138,7 @@ def on_llm_end(self, *args: Any, **kwargs: Any) -> Any:
138138

139139
callback = _FakeCallback()
140140
callback_manager = CallbackManager([callback])
141-
chat = ChatOpenAI(max_tokens=2, temperature=0, callback_manager=callback_manager) # type: ignore[call-arg]
141+
chat = ChatOpenAI(max_tokens=2, temperature=0, callbacks=callback_manager) # type: ignore[call-arg]
142142
list(chat.stream("hi"))
143143
generation = callback.saved_things["generation"]
144144
# `Hello!` is two tokens, assert that that is what is returned

libs/partners/openai/tests/integration_tests/llms/test_azure.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -145,7 +145,7 @@ def test_openai_streaming_callback() -> None:
145145
max_tokens=10,
146146
streaming=True,
147147
temperature=0,
148-
callback_manager=callback_manager,
148+
callbacks=callback_manager,
149149
verbose=True,
150150
)
151151
llm.invoke("Write me a sentence with 100 words.")
@@ -168,7 +168,7 @@ async def test_openai_async_streaming_callback() -> None:
168168
max_tokens=10,
169169
streaming=True,
170170
temperature=0,
171-
callback_manager=callback_manager,
171+
callbacks=callback_manager,
172172
verbose=True,
173173
)
174174
result = await llm.agenerate(["Write me a sentence with 100 words."])

0 commit comments

Comments
 (0)