Skip to content

Commit d533268

Browse files
committed
simplify
1 parent cd0dc55 commit d533268

File tree

2 files changed

+6
-45
lines changed

2 files changed

+6
-45
lines changed

sentry_sdk/integrations/langchain.py

Lines changed: 1 addition & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -443,20 +443,9 @@ def on_llm_end(self, response, *, run_id, **kwargs):
443443

444444
if generation is not None:
445445
try:
446-
response_model_metadata = generation.message.response_metadata.get(
446+
response_model = generation.message.response_metadata.get(
447447
"model_name"
448448
)
449-
response_model_generation_info = generation.generation_info.get(
450-
"model_name"
451-
)
452-
response_model_llm_output = response.llm_output.get("model_name")
453-
454-
response_model = (
455-
response_model_metadata
456-
or response_model_generation_info
457-
or response_model_llm_output
458-
or None
459-
)
460449
if response_model is not None:
461450
span.set_data(SPANDATA.GEN_AI_RESPONSE_MODEL, response_model)
462451
except AttributeError:

tests/integrations/langchain/test_langchain.py

Lines changed: 5 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -1689,39 +1689,16 @@ def test_langchain_embeddings_with_list_and_string_inputs(sentry_init, capture_e
16891689

16901690

16911691
@pytest.mark.parametrize(
1692-
"response_metadata_model,generation_info_model,llm_output_model,expected_model",
1692+
"response_metadata_model,expected_model",
16931693
[
1694-
("model-from-metadata", None, None, "model-from-metadata"),
1695-
(None, "model-from-generation-info", None, "model-from-generation-info"),
1696-
(None, None, "model-from-llm-output", "model-from-llm-output"),
1697-
(
1698-
"model-from-metadata",
1699-
"model-from-generation-info",
1700-
None,
1701-
"model-from-metadata",
1702-
),
1703-
("model-from-metadata", None, "model-from-llm-output", "model-from-metadata"),
1704-
(
1705-
None,
1706-
"model-from-generation-info",
1707-
"model-from-llm-output",
1708-
"model-from-generation-info",
1709-
),
1710-
(
1711-
"model-from-metadata",
1712-
"model-from-generation-info",
1713-
"model-from-llm-output",
1714-
"model-from-metadata",
1715-
),
1716-
(None, None, None, None),
1694+
("gpt-3.5-turbo", "gpt-3.5-turbo"),
1695+
(None, None),
17171696
],
17181697
)
17191698
def test_langchain_response_model_extraction(
17201699
sentry_init,
17211700
capture_events,
17221701
response_metadata_model,
1723-
generation_info_model,
1724-
llm_output_model,
17251702
expected_model,
17261703
):
17271704
sentry_init(
@@ -1746,17 +1723,12 @@ def test_langchain_response_model_extraction(
17461723
)
17471724

17481725
response_metadata = {"model_name": response_metadata_model}
1749-
generation_info = {"model_name": generation_info_model}
1750-
llm_output = {"model_name": llm_output_model}
1751-
17521726
message = AIMessageChunk(
17531727
content="Test response", response_metadata=response_metadata
17541728
)
17551729

1756-
generation = Mock(
1757-
text="Test response", message=message, generation_info=generation_info
1758-
)
1759-
response = Mock(generations=[[generation]], llm_output=llm_output)
1730+
generation = Mock(text="Test response", message=message)
1731+
response = Mock(generations=[[generation]])
17601732
callback.on_llm_end(response=response, run_id=run_id)
17611733

17621734
assert len(events) > 0

0 commit comments

Comments
 (0)