Skip to content

Commit b2fd97e

Browse files
ericapisaniclaude
andauthored
feat(langchain): Update LLM span operation to gen_ai.generate_text (#5796)
Update LangChain integration to use the new `gen_ai.generate_text` operation for LLM call spans, aligning with OpenTelemetry semantic conventions. **Changes:** - Changed LLM span operation from `gen_ai.pipeline` to `gen_ai.generate_text` - Updated span naming to include the model identifier: `generate_text {model}` instead of generic "Langchain LLM call" - Removed unnecessary docstring from callback method - Updated test assertions to validate the new operation and naming convention This change improves observability by using more specific operation types that accurately reflect the semantic nature of LLM generation calls, and includes the model identifier for better span context. Related to SDK-669. Replaces some of the changes introduced in #5705 (this is being broken down into 2 parts due to upcoming changes in the langchain test suite) Co-authored-by: Claude Haiku 4.5 <noreply@anthropic.com>
1 parent 4cd6752 commit b2fd97e

File tree

2 files changed

+8
-7
lines changed

2 files changed

+8
-7
lines changed

sentry_sdk/integrations/langchain.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -351,7 +351,6 @@ def on_llm_start(
351351
metadata: "Optional[Dict[str, Any]]" = None,
352352
**kwargs: "Any",
353353
) -> "Any":
354-
"""Run when LLM starts running."""
355354
with capture_internal_exceptions():
356355
if not run_id:
357356
return
@@ -369,8 +368,8 @@ def on_llm_start(
369368
watched_span = self._create_span(
370369
run_id,
371370
parent_run_id,
372-
op=OP.GEN_AI_PIPELINE,
373-
name=kwargs.get("name") or "Langchain LLM call",
371+
op=OP.GEN_AI_GENERATE_TEXT,
372+
name=f"generate_text {model}".strip(),
374373
origin=LangchainIntegration.origin,
375374
)
376375
span = watched_span.span

tests/integrations/langchain/test_langchain.py

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -851,12 +851,14 @@ def test_langchain_integration_with_langchain_core_only(sentry_init, capture_eve
851851
assert tx["type"] == "transaction"
852852

853853
llm_spans = [
854-
span for span in tx.get("spans", []) if span.get("op") == "gen_ai.pipeline"
854+
span
855+
for span in tx.get("spans", [])
856+
if span.get("op") == "gen_ai.generate_text"
855857
]
856858
assert len(llm_spans) > 0
857859

858860
llm_span = llm_spans[0]
859-
assert llm_span["description"] == "Langchain LLM call"
861+
assert llm_span["description"] == "generate_text gpt-3.5-turbo"
860862
assert llm_span["data"]["gen_ai.request.model"] == "gpt-3.5-turbo"
861863
assert (
862864
llm_span["data"]["gen_ai.response.text"]
@@ -1062,7 +1064,7 @@ def test_langchain_message_truncation(sentry_init, capture_events):
10621064
assert tx["type"] == "transaction"
10631065

10641066
llm_spans = [
1065-
span for span in tx.get("spans", []) if span.get("op") == "gen_ai.pipeline"
1067+
span for span in tx.get("spans", []) if span.get("op") == "gen_ai.generate_text"
10661068
]
10671069
assert len(llm_spans) > 0
10681070

@@ -1776,7 +1778,7 @@ def test_langchain_response_model_extraction(
17761778
assert tx["type"] == "transaction"
17771779

17781780
llm_spans = [
1779-
span for span in tx.get("spans", []) if span.get("op") == "gen_ai.pipeline"
1781+
span for span in tx.get("spans", []) if span.get("op") == "gen_ai.generate_text"
17801782
]
17811783
assert len(llm_spans) > 0
17821784

0 commit comments

Comments
 (0)