Skip to content

Commit cd0dc55

Browse files
committed
simplify code
1 parent cbd5c72 commit cd0dc55

File tree

1 file changed

+8
-22
lines changed

1 file changed

+8
-22
lines changed

tests/integrations/langchain/test_langchain.py

Lines changed: 8 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -1745,32 +1745,18 @@ def test_langchain_response_model_extraction(
17451745
invocation_params={"model": "gpt-3.5-turbo"},
17461746
)
17471747

1748-
response_metadata = {}
1749-
if response_metadata_model is not None:
1750-
response_metadata["model_name"] = response_metadata_model
1751-
1752-
generation_info = {}
1753-
if generation_info_model is not None:
1754-
generation_info["model_name"] = generation_info_model
1755-
1756-
llm_output = {}
1757-
if llm_output_model is not None:
1758-
llm_output["model_name"] = llm_output_model
1748+
response_metadata = {"model_name": response_metadata_model}
1749+
generation_info = {"model_name": generation_info_model}
1750+
llm_output = {"model_name": llm_output_model}
17591751

17601752
message = AIMessageChunk(
1761-
content="Test response",
1762-
response_metadata=response_metadata,
1753+
content="Test response", response_metadata=response_metadata
17631754
)
17641755

1765-
generation = Mock()
1766-
generation.text = "Test response"
1767-
generation.message = message
1768-
generation.generation_info = generation_info
1769-
1770-
response = Mock()
1771-
response.generations = [[generation]]
1772-
response.llm_output = llm_output
1773-
1756+
generation = Mock(
1757+
text="Test response", message=message, generation_info=generation_info
1758+
)
1759+
response = Mock(generations=[[generation]], llm_output=llm_output)
17741760
callback.on_llm_end(response=response, run_id=run_id)
17751761

17761762
assert len(events) > 0

0 commit comments

Comments
 (0)