Skip to content

Commit 176c7d4

Browse files
authored
fix(langchain): add gen_ai.response.model to chat spans (#5159)
1 parent 5fc28a1 commit 176c7d4

File tree

2 files changed

+64
-1
lines changed

2 files changed

+64
-1
lines changed

sentry_sdk/integrations/langchain.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -443,7 +443,9 @@ def on_llm_end(self, response, *, run_id, **kwargs):
443443

444444
if generation is not None:
445445
try:
446-
response_model = generation.generation_info.get("model_name")
446+
response_model = generation.message.response_metadata.get(
447+
"model_name"
448+
)
447449
if response_model is not None:
448450
span.set_data(SPANDATA.GEN_AI_RESPONSE_MODEL, response_model)
449451
except AttributeError:

tests/integrations/langchain/test_langchain.py

Lines changed: 61 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1686,3 +1686,64 @@ def test_langchain_embeddings_with_list_and_string_inputs(sentry_init, capture_e
16861686
assert "List item" in input_data or "Single string query" in input_data, (
16871687
f"Expected input text in serialized data: {input_data}"
16881688
)
1689+
1690+
1691+
@pytest.mark.parametrize(
1692+
"response_metadata_model,expected_model",
1693+
[
1694+
("gpt-3.5-turbo", "gpt-3.5-turbo"),
1695+
(None, None),
1696+
],
1697+
)
1698+
def test_langchain_response_model_extraction(
1699+
sentry_init,
1700+
capture_events,
1701+
response_metadata_model,
1702+
expected_model,
1703+
):
1704+
sentry_init(
1705+
integrations=[LangchainIntegration(include_prompts=True)],
1706+
traces_sample_rate=1.0,
1707+
send_default_pii=True,
1708+
)
1709+
events = capture_events()
1710+
1711+
callback = SentryLangchainCallback(max_span_map_size=100, include_prompts=True)
1712+
1713+
run_id = "test-response-model-uuid"
1714+
serialized = {"_type": "openai-chat", "model_name": "gpt-3.5-turbo"}
1715+
prompts = ["Test prompt"]
1716+
1717+
with start_transaction():
1718+
callback.on_llm_start(
1719+
serialized=serialized,
1720+
prompts=prompts,
1721+
run_id=run_id,
1722+
invocation_params={"model": "gpt-3.5-turbo"},
1723+
)
1724+
1725+
response_metadata = {"model_name": response_metadata_model}
1726+
message = AIMessageChunk(
1727+
content="Test response", response_metadata=response_metadata
1728+
)
1729+
1730+
generation = Mock(text="Test response", message=message)
1731+
response = Mock(generations=[[generation]])
1732+
callback.on_llm_end(response=response, run_id=run_id)
1733+
1734+
assert len(events) > 0
1735+
tx = events[0]
1736+
assert tx["type"] == "transaction"
1737+
1738+
llm_spans = [
1739+
span for span in tx.get("spans", []) if span.get("op") == "gen_ai.pipeline"
1740+
]
1741+
assert len(llm_spans) > 0
1742+
1743+
llm_span = llm_spans[0]
1744+
1745+
if expected_model is not None:
1746+
assert SPANDATA.GEN_AI_RESPONSE_MODEL in llm_span["data"]
1747+
assert llm_span["data"][SPANDATA.GEN_AI_RESPONSE_MODEL] == expected_model
1748+
else:
1749+
assert SPANDATA.GEN_AI_RESPONSE_MODEL not in llm_span.get("data", {})

0 commit comments

Comments
 (0)