From 344e2d77fec143078d96fcd5dcba339ac7b8caeb Mon Sep 17 00:00:00 2001 From: Nagkumar Arkalgud Date: Tue, 7 Oct 2025 10:25:59 -0700 Subject: [PATCH 01/24] chore: bump otel deps to 0.58b0 --- .../opentelemetry-instrumentation-langchain/pyproject.toml | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain/pyproject.toml b/instrumentation-genai/opentelemetry-instrumentation-langchain/pyproject.toml index 4f3e88115b..8d87cc9085 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-langchain/pyproject.toml +++ b/instrumentation-genai/opentelemetry-instrumentation-langchain/pyproject.toml @@ -26,8 +26,8 @@ classifiers = [ ] dependencies = [ "opentelemetry-api >= 1.31.0", - "opentelemetry-instrumentation ~= 0.57b0", - "opentelemetry-semantic-conventions ~= 0.57b0" + "opentelemetry-instrumentation ~= 0.58b0", + "opentelemetry-semantic-conventions ~= 0.58b0" ] [project.optional-dependencies] @@ -53,4 +53,3 @@ include = [ [tool.hatch.build.targets.wheel] packages = ["src/opentelemetry"] - From c1c74ff15e3ba95520f11772c9b99bef6b1fa26d Mon Sep 17 00:00:00 2001 From: Nagkumar Arkalgud Date: Tue, 7 Oct 2025 10:26:12 -0700 Subject: [PATCH 02/24] feat: enrich langchain spans with genai metadata --- .../langchain/callback_handler.py | 240 +++++++++++++++--- 1 file changed, 204 insertions(+), 36 deletions(-) diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/callback_handler.py b/instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/callback_handler.py index 138eb311a2..493a23290e 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/callback_handler.py +++ b/instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/callback_handler.py @@ -16,6 +16,7 @@ from typing import Any from uuid import UUID +from urllib.parse import urlparse from langchain_core.callbacks import BaseCallbackHandler # type: ignore from langchain_core.messages import BaseMessage # type: ignore @@ -25,7 +26,15 @@ from opentelemetry.semconv._incubating.attributes import ( gen_ai_attributes as GenAI, ) -from opentelemetry.trace import Tracer +from opentelemetry.semconv._incubating.attributes.azure_attributes import ( + AZURE_RESOURCE_PROVIDER_NAMESPACE, +) +from opentelemetry.semconv._incubating.attributes.openai_attributes import ( + OPENAI_REQUEST_SERVICE_TIER, + OPENAI_RESPONSE_SERVICE_TIER, + OPENAI_RESPONSE_SYSTEM_FINGERPRINT, +) +from opentelemetry.trace import Span, Tracer class OpenTelemetryLangChainCallbackHandler(BaseCallbackHandler): # type: ignore[misc] @@ -33,6 +42,29 @@ class OpenTelemetryLangChainCallbackHandler(BaseCallbackHandler): # type: ignor A callback handler for LangChain that uses OpenTelemetry to create spans for LLM calls and chains, tools etc,. in future. """ + _CHAT_MODEL_PROVIDER_MAPPING: dict[str, str] = { + "ChatOpenAI": GenAI.GenAiProviderNameValues.OPENAI.value, + "AzureChatOpenAI": GenAI.GenAiProviderNameValues.AZURE_AI_OPENAI.value, + "AzureOpenAI": GenAI.GenAiProviderNameValues.AZURE_AI_OPENAI.value, + "ChatBedrock": GenAI.GenAiProviderNameValues.AWS_BEDROCK.value, + "BedrockChat": GenAI.GenAiProviderNameValues.AWS_BEDROCK.value, + } + + _METADATA_PROVIDER_MAPPING: dict[str, str] = { + "openai": GenAI.GenAiProviderNameValues.OPENAI.value, + "azure": GenAI.GenAiProviderNameValues.AZURE_AI_OPENAI.value, + "azure_openai": GenAI.GenAiProviderNameValues.AZURE_AI_OPENAI.value, + "azure-ai-openai": GenAI.GenAiProviderNameValues.AZURE_AI_OPENAI.value, + "azure_ai_openai": GenAI.GenAiProviderNameValues.AZURE_AI_OPENAI.value, + "azure_ai_inference": GenAI.GenAiProviderNameValues.AZURE_AI_INFERENCE.value, + "azure-inference": GenAI.GenAiProviderNameValues.AZURE_AI_INFERENCE.value, + "amazon": GenAI.GenAiProviderNameValues.AWS_BEDROCK.value, + "bedrock": GenAI.GenAiProviderNameValues.AWS_BEDROCK.value, + "aws": GenAI.GenAiProviderNameValues.AWS_BEDROCK.value, + } + + _SERVER_URL_KEYS = ("base_url", "azure_endpoint", "endpoint") + def __init__( self, tracer: Tracer, @@ -54,33 +86,14 @@ def on_chat_model_start( metadata: dict[str, Any] | None, **kwargs: Any, ) -> None: - # Other providers/LLMs may be supported in the future and telemetry for them is skipped for now. - if serialized.get("name") not in ("ChatOpenAI", "ChatBedrock"): + provider_name = self._resolve_provider( + serialized.get("name"), metadata + ) + if provider_name is None: return - if "invocation_params" in kwargs: - params = ( - kwargs["invocation_params"].get("params") - or kwargs["invocation_params"] - ) - else: - params = kwargs - - request_model = "unknown" - for model_tag in ( - "model_name", # ChatOpenAI - "model_id", # ChatBedrock - ): - if (model := (params or {}).get(model_tag)) is not None: - request_model = model - break - elif (model := (metadata or {}).get(model_tag)) is not None: - request_model = model - break - - # Skip telemetry for unsupported request models - if request_model == "unknown": - return + params = self._extract_params(kwargs) + request_model = self._extract_request_model(params, metadata) span = self.span_manager.create_chat_span( run_id=run_id, @@ -88,7 +101,83 @@ def on_chat_model_start( request_model=request_model, ) - if params is not None: + span.set_attribute(GenAI.GEN_AI_PROVIDER_NAME, provider_name) + if provider_name in ( + GenAI.GenAiProviderNameValues.AZURE_AI_OPENAI.value, + GenAI.GenAiProviderNameValues.AZURE_AI_INFERENCE.value, + ): + span.set_attribute( + AZURE_RESOURCE_PROVIDER_NAMESPACE, "Microsoft.CognitiveServices" + ) + + self._apply_request_attributes(span, params, metadata) + + def _resolve_provider( + self, llm_name: str | None, metadata: dict[str, Any] | None + ) -> str | None: + if llm_name: + provider = self._CHAT_MODEL_PROVIDER_MAPPING.get(llm_name) + if provider: + return provider + + if metadata is None: + return None + + provider_key = metadata.get("ls_provider") + if not provider_key: + return None + + mapped = self._METADATA_PROVIDER_MAPPING.get(provider_key.lower()) + if mapped is not None: + return mapped + + return provider_key + + def _extract_params(self, kwargs: dict[str, Any]) -> dict[str, Any] | None: + invocation_params = kwargs.get("invocation_params") + if isinstance(invocation_params, dict): + params = invocation_params.get("params") or invocation_params + if isinstance(params, dict): + return params + return None + + return kwargs if kwargs else None + + def _extract_request_model( + self, + params: dict[str, Any] | None, + metadata: dict[str, Any] | None, + ) -> str | None: + search_order = ( + "model_name", + "model", + "model_id", + "ls_model_name", + "azure_deployment", + "deployment_name", + ) + + sources: list[dict[str, Any]] = [] + if isinstance(params, dict): + sources.append(params) + if isinstance(metadata, dict): + sources.append(metadata) + + for key in search_order: + for source in sources: + value = source.get(key) + if value: + return str(value) + + return None + + def _apply_request_attributes( + self, + span: Span, + params: dict[str, Any] | None, + metadata: dict[str, Any] | None, + ) -> None: + if params: top_p = params.get("top_p") if top_p is not None: span.set_attribute(GenAI.GEN_AI_REQUEST_TOP_P, top_p) @@ -110,32 +199,101 @@ def on_chat_model_start( seed = params.get("seed") if seed is not None: span.set_attribute(GenAI.GEN_AI_REQUEST_SEED, seed) - # ChatOpenAI temperature = params.get("temperature") if temperature is not None: span.set_attribute( GenAI.GEN_AI_REQUEST_TEMPERATURE, temperature ) - # ChatOpenAI - max_tokens = params.get("max_completion_tokens") + max_tokens = params.get("max_completion_tokens") or params.get( + "max_tokens" + ) if max_tokens is not None: span.set_attribute(GenAI.GEN_AI_REQUEST_MAX_TOKENS, max_tokens) - if metadata is not None: - provider = metadata.get("ls_provider") - if provider is not None: - span.set_attribute("gen_ai.provider.name", provider) - # ChatBedrock + top_k = params.get("top_k") + if top_k is not None: + span.set_attribute(GenAI.GEN_AI_REQUEST_TOP_K, top_k) + + choice_count = params.get("n") or params.get("choice_count") + if choice_count is not None: + try: + choice_value = int(choice_count) + except (TypeError, ValueError): + choice_value = choice_count + if choice_value != 1: + span.set_attribute( + GenAI.GEN_AI_REQUEST_CHOICE_COUNT, choice_value + ) + + output_type = self._extract_output_type(params) + if output_type is not None: + span.set_attribute(GenAI.GEN_AI_OUTPUT_TYPE, output_type) + + service_tier = params.get("service_tier") + if service_tier is not None: + span.set_attribute(OPENAI_REQUEST_SERVICE_TIER, service_tier) + + self._maybe_set_server_attributes(span, params) + + if metadata: temperature = metadata.get("ls_temperature") if temperature is not None: span.set_attribute( GenAI.GEN_AI_REQUEST_TEMPERATURE, temperature ) - # ChatBedrock max_tokens = metadata.get("ls_max_tokens") if max_tokens is not None: span.set_attribute(GenAI.GEN_AI_REQUEST_MAX_TOKENS, max_tokens) + def _maybe_set_server_attributes( + self, span: Span, params: dict[str, Any] + ) -> None: + potential_url = None + for key in self._SERVER_URL_KEYS: + value = params.get(key) + if isinstance(value, str) and value: + potential_url = value + break + + if not potential_url: + return + + parsed = urlparse(potential_url) + hostname = parsed.hostname or potential_url + if hostname: + span.set_attribute("server.address", hostname) + port = parsed.port + if port is None: + if parsed.scheme == "https": + port = 443 + elif parsed.scheme == "http": + port = 80 + if port is not None: + span.set_attribute("server.port", port) + + def _extract_output_type(self, params: dict[str, Any]) -> str | None: + response_format = params.get("response_format") + output_type: str | None = None + if isinstance(response_format, dict): + output_type = response_format.get("type") + elif isinstance(response_format, str): + output_type = response_format + + if not output_type: + return None + + lowered = output_type.lower() + mapping = { + "json_object": GenAI.GenAiOutputTypeValues.JSON.value, + "json_schema": GenAI.GenAiOutputTypeValues.JSON.value, + "json": GenAI.GenAiOutputTypeValues.JSON.value, + "text": GenAI.GenAiOutputTypeValues.TEXT.value, + "image": GenAI.GenAiOutputTypeValues.IMAGE.value, + "speech": GenAI.GenAiOutputTypeValues.SPEECH.value, + } + + return mapping.get(lowered) + def on_llm_end( self, response: LLMResult, # type: ignore [reportUnknownParameterType] @@ -210,6 +368,16 @@ def on_llm_end( if response_id is not None: span.set_attribute(GenAI.GEN_AI_RESPONSE_ID, str(response_id)) + service_tier = llm_output.get("service_tier") + if service_tier is not None: + span.set_attribute(OPENAI_RESPONSE_SERVICE_TIER, service_tier) + + system_fingerprint = llm_output.get("system_fingerprint") + if system_fingerprint is not None: + span.set_attribute( + OPENAI_RESPONSE_SYSTEM_FINGERPRINT, system_fingerprint + ) + # End the LLM span self.span_manager.end_span(run_id) From 33ead3ac8c4e9447cc82658f2dbc3574b48fb887 Mon Sep 17 00:00:00 2001 From: Nagkumar Arkalgud Date: Tue, 7 Oct 2025 10:26:23 -0700 Subject: [PATCH 03/24] refactor: support chat span without request model --- .../instrumentation/langchain/span_manager.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/span_manager.py b/instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/span_manager.py index 636bfc3bc3..ca3de7fd61 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/span_manager.py +++ b/instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/span_manager.py @@ -74,12 +74,18 @@ def create_chat_span( self, run_id: UUID, parent_run_id: Optional[UUID], - request_model: str, + request_model: Optional[str], ) -> Span: + operation_name = GenAI.GenAiOperationNameValues.CHAT.value + span_name = ( + f"{operation_name} {request_model}" + if request_model + else operation_name + ) span = self._create_span( run_id=run_id, parent_run_id=parent_run_id, - span_name=f"{GenAI.GenAiOperationNameValues.CHAT.value} {request_model}", + span_name=span_name, kind=SpanKind.CLIENT, ) span.set_attribute( From 669ace406b6f62d21fa5a026d34233ad40e93575 Mon Sep 17 00:00:00 2001 From: Nagkumar Arkalgud Date: Tue, 7 Oct 2025 10:26:37 -0700 Subject: [PATCH 04/24] test: cover azure span provider metadata --- .../tests/test_llm_call.py | 72 ++++++++++++++++++- 1 file changed, 70 insertions(+), 2 deletions(-) diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain/tests/test_llm_call.py b/instrumentation-genai/opentelemetry-instrumentation-langchain/tests/test_llm_call.py index a23d50753b..b51e1734e9 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-langchain/tests/test_llm_call.py +++ b/instrumentation-genai/opentelemetry-instrumentation-langchain/tests/test_llm_call.py @@ -1,10 +1,25 @@ from typing import Optional +from uuid import uuid4 import pytest from langchain_core.messages import HumanMessage, SystemMessage +from opentelemetry.instrumentation.langchain.callback_handler import ( + OpenTelemetryLangChainCallbackHandler, +) from opentelemetry.sdk.trace import ReadableSpan +from opentelemetry.sdk.trace import TracerProvider +from opentelemetry.sdk.trace.export import SimpleSpanProcessor +from opentelemetry.sdk.trace.export.in_memory_span_exporter import ( + InMemorySpanExporter, +) from opentelemetry.semconv._incubating.attributes import gen_ai_attributes +from opentelemetry.semconv._incubating.attributes.azure_attributes import ( + AZURE_RESOURCE_PROVIDER_NAMESPACE, +) +from opentelemetry.semconv._incubating.attributes.openai_attributes import ( + OPENAI_REQUEST_SERVICE_TIER, +) # span_exporter, start_instrumentation, chat_openai_gpt_3_5_turbo_model are coming from fixtures defined in conftest.py @@ -85,7 +100,9 @@ def assert_openai_completion_attributes( ) assert span.attributes[gen_ai_attributes.GEN_AI_REQUEST_MAX_TOKENS] == 100 assert span.attributes[gen_ai_attributes.GEN_AI_REQUEST_TEMPERATURE] == 0.1 - assert span.attributes["gen_ai.provider.name"] == "openai" + assert ( + span.attributes[gen_ai_attributes.GEN_AI_PROVIDER_NAME] == "openai" + ) assert gen_ai_attributes.GEN_AI_RESPONSE_ID in span.attributes assert span.attributes[gen_ai_attributes.GEN_AI_REQUEST_TOP_P] == 0.9 assert ( @@ -139,7 +156,10 @@ def assert_bedrock_completion_attributes( == "us.amazon.nova-lite-v1:0" ) - assert span.attributes["gen_ai.provider.name"] == "amazon_bedrock" + assert ( + span.attributes[gen_ai_attributes.GEN_AI_PROVIDER_NAME] + == "aws.bedrock" + ) assert span.attributes[gen_ai_attributes.GEN_AI_REQUEST_MAX_TOKENS] == 100 assert span.attributes[gen_ai_attributes.GEN_AI_REQUEST_TEMPERATURE] == 0.1 @@ -164,3 +184,51 @@ def assert_bedrock_completion_attributes( assert ( gen_ai_attributes.GEN_AI_USAGE_OUTPUT_TOKENS not in span.attributes ) + + +def test_azure_chat_sets_provider_and_server_attributes(): + exporter = InMemorySpanExporter() + provider = TracerProvider() + provider.add_span_processor(SimpleSpanProcessor(exporter)) + handler = OpenTelemetryLangChainCallbackHandler(provider.get_tracer(__name__)) + + run_id = uuid4() + + handler.on_chat_model_start( + serialized={"name": "AzureChatOpenAI"}, + messages=[], + run_id=run_id, + tags=None, + parent_run_id=None, + metadata={"ls_model_name": "gpt-4o"}, + invocation_params={ + "params": { + "model": "gpt-4o", + "azure_endpoint": "https://example.openai.azure.com/", + "service_tier": "default", + "n": 2, + } + }, + ) + + handler.span_manager.end_span(run_id) + span = exporter.get_finished_spans()[0] + + assert span.name == "chat gpt-4o" + assert ( + span.attributes[gen_ai_attributes.GEN_AI_REQUEST_MODEL] == "gpt-4o" + ) + assert ( + span.attributes[gen_ai_attributes.GEN_AI_PROVIDER_NAME] + == "azure.ai.openai" + ) + assert span.attributes["server.address"] == "example.openai.azure.com" + assert span.attributes["server.port"] == 443 + assert ( + span.attributes[gen_ai_attributes.GEN_AI_REQUEST_CHOICE_COUNT] == 2 + ) + assert span.attributes[OPENAI_REQUEST_SERVICE_TIER] == "default" + assert ( + span.attributes[AZURE_RESOURCE_PROVIDER_NAMESPACE] + == "Microsoft.CognitiveServices" + ) From 8b6937c8c94117acd2f568df1848b19381069c2c Mon Sep 17 00:00:00 2001 From: Nagkumar Arkalgud Date: Tue, 7 Oct 2025 12:12:27 -0700 Subject: [PATCH 05/24] test: add callback handler unit coverage --- .../tests/test_callback_handler.py | 143 ++++++++++++++++++ 1 file changed, 143 insertions(+) create mode 100644 instrumentation-genai/opentelemetry-instrumentation-langchain/tests/test_callback_handler.py diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain/tests/test_callback_handler.py b/instrumentation-genai/opentelemetry-instrumentation-langchain/tests/test_callback_handler.py new file mode 100644 index 0000000000..a33f7cf088 --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-langchain/tests/test_callback_handler.py @@ -0,0 +1,143 @@ +from __future__ import annotations + +from dataclasses import dataclass +from typing import Any +from uuid import uuid4 + +from opentelemetry.instrumentation.langchain.callback_handler import ( + OpenTelemetryLangChainCallbackHandler, +) +from opentelemetry.sdk.trace import TracerProvider +from opentelemetry.sdk.trace.export import SimpleSpanProcessor +from opentelemetry.sdk.trace.export.in_memory_span_exporter import ( + InMemorySpanExporter, +) +from opentelemetry.semconv._incubating.attributes import ( + gen_ai_attributes as GenAI, +) +from opentelemetry.semconv._incubating.attributes.azure_attributes import ( + AZURE_RESOURCE_PROVIDER_NAMESPACE, +) +from opentelemetry.semconv._incubating.attributes.openai_attributes import ( + OPENAI_REQUEST_SERVICE_TIER, + OPENAI_RESPONSE_SERVICE_TIER, + OPENAI_RESPONSE_SYSTEM_FINGERPRINT, +) + + +def _create_handler(): + exporter = InMemorySpanExporter() + provider = TracerProvider() + provider.add_span_processor(SimpleSpanProcessor(exporter)) + handler = OpenTelemetryLangChainCallbackHandler( + tracer=provider.get_tracer(__name__) + ) + return handler, exporter + + +def test_provider_and_server_metadata_extracted(): + handler, exporter = _create_handler() + run_id = uuid4() + + handler.on_chat_model_start( + serialized={"name": "CustomLLM"}, + messages=[], + run_id=run_id, + tags=None, + parent_run_id=None, + metadata={ + "ls_provider": "azure", + "ls_model_name": "gpt-4o", + }, + invocation_params={ + "params": { + "model": "gpt-4o", + "base_url": "https://example.openai.azure.com/openai/deployments/demo", + "n": 2, + } + }, + ) + + handler.span_manager.end_span(run_id) + + span = exporter.get_finished_spans()[0] + assert span.name == "chat gpt-4o" + assert ( + span.attributes[GenAI.GEN_AI_PROVIDER_NAME] == "azure.ai.openai" + ) + assert ( + span.attributes[AZURE_RESOURCE_PROVIDER_NAMESPACE] + == "Microsoft.CognitiveServices" + ) + assert span.attributes["server.address"] == "example.openai.azure.com" + assert span.attributes["server.port"] == 443 + assert span.attributes[GenAI.GEN_AI_REQUEST_CHOICE_COUNT] == 2 + + +@dataclass +class _DummyLLMResult: + generations: list[Any] + llm_output: dict[str, Any] + + +def test_llm_end_sets_response_metadata(): + handler, exporter = _create_handler() + run_id = uuid4() + + handler.on_chat_model_start( + serialized={"name": "ChatOpenAI"}, + messages=[], + run_id=run_id, + tags=None, + parent_run_id=None, + metadata={"ls_model_name": "gpt-4"}, + invocation_params={"params": {"model": "gpt-4"}}, + ) + + handler.on_llm_end( + _DummyLLMResult( + generations=[], + llm_output={ + "model_name": "gpt-4-0125", + "service_tier": "premium", + "system_fingerprint": "fp-test", + "id": "chatcmpl-test", + }, + ), + run_id=run_id, + parent_run_id=None, + ) + + span = exporter.get_finished_spans()[0] + assert span.attributes[GenAI.GEN_AI_RESPONSE_MODEL] == "gpt-4-0125" + assert span.attributes[GenAI.GEN_AI_RESPONSE_ID] == "chatcmpl-test" + assert span.attributes[OPENAI_RESPONSE_SERVICE_TIER] == "premium" + assert ( + span.attributes[OPENAI_RESPONSE_SYSTEM_FINGERPRINT] == "fp-test" + ) + + +def test_choice_count_not_set_when_one(): + handler, exporter = _create_handler() + run_id = uuid4() + + handler.on_chat_model_start( + serialized={"name": "ChatOpenAI"}, + messages=[], + run_id=run_id, + tags=None, + parent_run_id=None, + metadata={"ls_model_name": "gpt-4"}, + invocation_params={ + "params": { + "model": "gpt-4", + "n": 1, + } + }, + ) + + handler.span_manager.end_span(run_id) + span = exporter.get_finished_spans()[0] + assert ( + GenAI.GEN_AI_REQUEST_CHOICE_COUNT not in span.attributes + ) From 96889d7c7207cdc4125da5a520043ce1c912a498 Mon Sep 17 00:00:00 2001 From: Nagkumar Arkalgud Date: Tue, 7 Oct 2025 13:50:32 -0700 Subject: [PATCH 06/24] docs: note langchain genai schema update --- .../opentelemetry-instrumentation-langchain/CHANGELOG.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain/CHANGELOG.md b/instrumentation-genai/opentelemetry-instrumentation-langchain/CHANGELOG.md index c3d51f9fef..37647f8c68 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-langchain/CHANGELOG.md +++ b/instrumentation-genai/opentelemetry-instrumentation-langchain/CHANGELOG.md @@ -8,4 +8,6 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## Unreleased - Added span support for genAI langchain llm invocation. - ([#3665](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3665)) \ No newline at end of file + ([#3665](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3665)) +- Align LangChain instrumentation with GenAI schema 1.37.0 and add unit coverage for updated metadata. + ([#3813](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3813)) From 3ae8264ae6cd0d8c4f2e86b47b619c38f4805c10 Mon Sep 17 00:00:00 2001 From: Nagkumar Arkalgud Date: Tue, 7 Oct 2025 13:56:20 -0700 Subject: [PATCH 07/24] update with precommit fixes --- .../langchain/callback_handler.py | 5 +++-- .../tests/test_callback_handler.py | 13 +++---------- .../tests/test_llm_call.py | 19 +++++++------------ 3 files changed, 13 insertions(+), 24 deletions(-) diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/callback_handler.py b/instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/callback_handler.py index 493a23290e..e016f81ad4 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/callback_handler.py +++ b/instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/callback_handler.py @@ -15,8 +15,8 @@ from __future__ import annotations from typing import Any -from uuid import UUID from urllib.parse import urlparse +from uuid import UUID from langchain_core.callbacks import BaseCallbackHandler # type: ignore from langchain_core.messages import BaseMessage # type: ignore @@ -107,7 +107,8 @@ def on_chat_model_start( GenAI.GenAiProviderNameValues.AZURE_AI_INFERENCE.value, ): span.set_attribute( - AZURE_RESOURCE_PROVIDER_NAMESPACE, "Microsoft.CognitiveServices" + AZURE_RESOURCE_PROVIDER_NAMESPACE, + "Microsoft.CognitiveServices", ) self._apply_request_attributes(span, params, metadata) diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain/tests/test_callback_handler.py b/instrumentation-genai/opentelemetry-instrumentation-langchain/tests/test_callback_handler.py index a33f7cf088..0936c59dc9 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-langchain/tests/test_callback_handler.py +++ b/instrumentation-genai/opentelemetry-instrumentation-langchain/tests/test_callback_handler.py @@ -19,7 +19,6 @@ AZURE_RESOURCE_PROVIDER_NAMESPACE, ) from opentelemetry.semconv._incubating.attributes.openai_attributes import ( - OPENAI_REQUEST_SERVICE_TIER, OPENAI_RESPONSE_SERVICE_TIER, OPENAI_RESPONSE_SYSTEM_FINGERPRINT, ) @@ -62,9 +61,7 @@ def test_provider_and_server_metadata_extracted(): span = exporter.get_finished_spans()[0] assert span.name == "chat gpt-4o" - assert ( - span.attributes[GenAI.GEN_AI_PROVIDER_NAME] == "azure.ai.openai" - ) + assert span.attributes[GenAI.GEN_AI_PROVIDER_NAME] == "azure.ai.openai" assert ( span.attributes[AZURE_RESOURCE_PROVIDER_NAMESPACE] == "Microsoft.CognitiveServices" @@ -112,9 +109,7 @@ def test_llm_end_sets_response_metadata(): assert span.attributes[GenAI.GEN_AI_RESPONSE_MODEL] == "gpt-4-0125" assert span.attributes[GenAI.GEN_AI_RESPONSE_ID] == "chatcmpl-test" assert span.attributes[OPENAI_RESPONSE_SERVICE_TIER] == "premium" - assert ( - span.attributes[OPENAI_RESPONSE_SYSTEM_FINGERPRINT] == "fp-test" - ) + assert span.attributes[OPENAI_RESPONSE_SYSTEM_FINGERPRINT] == "fp-test" def test_choice_count_not_set_when_one(): @@ -138,6 +133,4 @@ def test_choice_count_not_set_when_one(): handler.span_manager.end_span(run_id) span = exporter.get_finished_spans()[0] - assert ( - GenAI.GEN_AI_REQUEST_CHOICE_COUNT not in span.attributes - ) + assert GenAI.GEN_AI_REQUEST_CHOICE_COUNT not in span.attributes diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain/tests/test_llm_call.py b/instrumentation-genai/opentelemetry-instrumentation-langchain/tests/test_llm_call.py index b51e1734e9..06482726bf 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-langchain/tests/test_llm_call.py +++ b/instrumentation-genai/opentelemetry-instrumentation-langchain/tests/test_llm_call.py @@ -7,8 +7,7 @@ from opentelemetry.instrumentation.langchain.callback_handler import ( OpenTelemetryLangChainCallbackHandler, ) -from opentelemetry.sdk.trace import ReadableSpan -from opentelemetry.sdk.trace import TracerProvider +from opentelemetry.sdk.trace import ReadableSpan, TracerProvider from opentelemetry.sdk.trace.export import SimpleSpanProcessor from opentelemetry.sdk.trace.export.in_memory_span_exporter import ( InMemorySpanExporter, @@ -100,9 +99,7 @@ def assert_openai_completion_attributes( ) assert span.attributes[gen_ai_attributes.GEN_AI_REQUEST_MAX_TOKENS] == 100 assert span.attributes[gen_ai_attributes.GEN_AI_REQUEST_TEMPERATURE] == 0.1 - assert ( - span.attributes[gen_ai_attributes.GEN_AI_PROVIDER_NAME] == "openai" - ) + assert span.attributes[gen_ai_attributes.GEN_AI_PROVIDER_NAME] == "openai" assert gen_ai_attributes.GEN_AI_RESPONSE_ID in span.attributes assert span.attributes[gen_ai_attributes.GEN_AI_REQUEST_TOP_P] == 0.9 assert ( @@ -190,7 +187,9 @@ def test_azure_chat_sets_provider_and_server_attributes(): exporter = InMemorySpanExporter() provider = TracerProvider() provider.add_span_processor(SimpleSpanProcessor(exporter)) - handler = OpenTelemetryLangChainCallbackHandler(provider.get_tracer(__name__)) + handler = OpenTelemetryLangChainCallbackHandler( + provider.get_tracer(__name__) + ) run_id = uuid4() @@ -215,18 +214,14 @@ def test_azure_chat_sets_provider_and_server_attributes(): span = exporter.get_finished_spans()[0] assert span.name == "chat gpt-4o" - assert ( - span.attributes[gen_ai_attributes.GEN_AI_REQUEST_MODEL] == "gpt-4o" - ) + assert span.attributes[gen_ai_attributes.GEN_AI_REQUEST_MODEL] == "gpt-4o" assert ( span.attributes[gen_ai_attributes.GEN_AI_PROVIDER_NAME] == "azure.ai.openai" ) assert span.attributes["server.address"] == "example.openai.azure.com" assert span.attributes["server.port"] == 443 - assert ( - span.attributes[gen_ai_attributes.GEN_AI_REQUEST_CHOICE_COUNT] == 2 - ) + assert span.attributes[gen_ai_attributes.GEN_AI_REQUEST_CHOICE_COUNT] == 2 assert span.attributes[OPENAI_REQUEST_SERVICE_TIER] == "default" assert ( span.attributes[AZURE_RESOURCE_PROVIDER_NAMESPACE] From 0e05bef2b1e77bea1f605c0a7b0123137000ab16 Mon Sep 17 00:00:00 2001 From: Nagkumar Arkalgud Date: Wed, 8 Oct 2025 09:08:13 -0700 Subject: [PATCH 08/24] feat: capture langchain message content by default --- .../CHANGELOG.md | 2 + .../instrumentation/langchain/__init__.py | 15 +++ .../langchain/callback_handler.py | 71 ++++++++++++ .../tests/test_callback_handler.py | 102 +++++++++++++++++- .../tests/test_llm_call.py | 3 +- 5 files changed, 190 insertions(+), 3 deletions(-) diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain/CHANGELOG.md b/instrumentation-genai/opentelemetry-instrumentation-langchain/CHANGELOG.md index 37647f8c68..d80ae3b845 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-langchain/CHANGELOG.md +++ b/instrumentation-genai/opentelemetry-instrumentation-langchain/CHANGELOG.md @@ -11,3 +11,5 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ([#3665](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3665)) - Align LangChain instrumentation with GenAI schema 1.37.0 and add unit coverage for updated metadata. ([#3813](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3813)) +- Capture GenAI input/output messages on spans by default with opt-out control. + ([#3813](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3813)) diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/__init__.py b/instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/__init__.py index 1b135d883f..c1271dc2b6 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/__init__.py +++ b/instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/__init__.py @@ -36,6 +36,7 @@ --- """ +import os from typing import Any, Callable, Collection from langchain_core.callbacks import BaseCallbackHandler # type: ignore @@ -72,6 +73,7 @@ def _instrument(self, **kwargs: Any): Enable Langchain instrumentation. """ tracer_provider = kwargs.get("tracer_provider") + capture_messages = self._resolve_capture_messages(kwargs) tracer = get_tracer( __name__, __version__, @@ -81,6 +83,7 @@ def _instrument(self, **kwargs: Any): otel_callback_handler = OpenTelemetryLangChainCallbackHandler( tracer=tracer, + capture_messages=capture_messages, ) wrap_function_wrapper( @@ -95,6 +98,18 @@ def _uninstrument(self, **kwargs: Any): """ unwrap("langchain_core.callbacks.base.BaseCallbackManager", "__init__") + def _resolve_capture_messages(self, kwargs: dict[str, Any]) -> bool: + if "capture_messages" in kwargs: + return bool(kwargs["capture_messages"]) + + env_value = os.getenv( + "OTEL_INSTRUMENTATION_LANGCHAIN_CAPTURE_MESSAGES" + ) + if env_value is not None: + return env_value.lower() in ("1", "true", "yes", "on") + + return True + class _BaseCallbackManagerInitWrapper: """ diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/callback_handler.py b/instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/callback_handler.py index e016f81ad4..1430d40234 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/callback_handler.py +++ b/instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/callback_handler.py @@ -14,6 +14,7 @@ from __future__ import annotations +import json from typing import Any from urllib.parse import urlparse from uuid import UUID @@ -68,12 +69,14 @@ class OpenTelemetryLangChainCallbackHandler(BaseCallbackHandler): # type: ignor def __init__( self, tracer: Tracer, + capture_messages: bool, ) -> None: super().__init__() # type: ignore self.span_manager = _SpanManager( tracer=tracer, ) + self._capture_messages = capture_messages def on_chat_model_start( self, @@ -113,6 +116,13 @@ def on_chat_model_start( self._apply_request_attributes(span, params, metadata) + if self._capture_messages and messages: + serialized_messages = self._serialize_input_messages(messages) + span.set_attribute( + GenAI.GEN_AI_INPUT_MESSAGES, + self._serialize_to_json(serialized_messages), + ) + def _resolve_provider( self, llm_name: str | None, metadata: dict[str, Any] | None ) -> str | None: @@ -295,6 +305,59 @@ def _extract_output_type(self, params: dict[str, Any]) -> str | None: return mapping.get(lowered) + def _serialize_input_messages( + self, messages: list[list[BaseMessage]] + ) -> list[dict[str, Any]]: + serialized: list[dict[str, Any]] = [] + for conversation in messages: + for message in conversation: + serialized.append(self._serialize_message(message)) + return serialized + + def _serialize_output_messages( + self, response: LLMResult + ) -> list[dict[str, Any]]: + serialized: list[dict[str, Any]] = [] + generations = getattr(response, "generations", []) # type: ignore + for generation in generations: + for item in generation: + message = getattr(item, "message", None) + if message is not None: + serialized.append(self._serialize_message(message)) + return serialized + + def _serialize_message(self, message: BaseMessage) -> dict[str, Any]: + payload: dict[str, Any] = { + "type": getattr(message, "type", message.__class__.__name__), + "content": getattr(message, "content", None), + } + for attr in ( + "additional_kwargs", + "response_metadata", + "tool_call_id", + "tool_calls", + "usage_metadata", + "id", + "name", + ): + value = getattr(message, attr, None) + if value: + payload[attr] = value + return payload + + def _serialize_to_json(self, payload: Any) -> str: + return json.dumps(payload, default=self._json_default) + + @staticmethod + def _json_default(value: Any) -> Any: + if isinstance(value, (str, int, float, bool)) or value is None: + return value + if isinstance(value, dict): + return value + if isinstance(value, (list, tuple)): + return list(value) + return getattr(value, "__dict__", str(value)) + def on_llm_end( self, response: LLMResult, # type: ignore [reportUnknownParameterType] @@ -379,6 +442,14 @@ def on_llm_end( OPENAI_RESPONSE_SYSTEM_FINGERPRINT, system_fingerprint ) + if self._capture_messages: + serialized_outputs = self._serialize_output_messages(response) + if serialized_outputs: + span.set_attribute( + GenAI.GEN_AI_OUTPUT_MESSAGES, + self._serialize_to_json(serialized_outputs), + ) + # End the LLM span self.span_manager.end_span(run_id) diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain/tests/test_callback_handler.py b/instrumentation-genai/opentelemetry-instrumentation-langchain/tests/test_callback_handler.py index 0936c59dc9..64aee81cfd 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-langchain/tests/test_callback_handler.py +++ b/instrumentation-genai/opentelemetry-instrumentation-langchain/tests/test_callback_handler.py @@ -1,5 +1,6 @@ from __future__ import annotations +import json from dataclasses import dataclass from typing import Any from uuid import uuid4 @@ -24,12 +25,13 @@ ) -def _create_handler(): +def _create_handler(capture_messages: bool = True): exporter = InMemorySpanExporter() provider = TracerProvider() provider.add_span_processor(SimpleSpanProcessor(exporter)) handler = OpenTelemetryLangChainCallbackHandler( - tracer=provider.get_tracer(__name__) + tracer=provider.get_tracer(__name__), + capture_messages=capture_messages, ) return handler, exporter @@ -77,6 +79,22 @@ class _DummyLLMResult: llm_output: dict[str, Any] +@dataclass +class _DummyGeneration: + message: Any + generation_info: dict[str, Any] | None = None + + +@dataclass +class _FakeMessage: + content: str + type: str + additional_kwargs: dict[str, Any] | None = None + response_metadata: dict[str, Any] | None = None + usage_metadata: dict[str, Any] | None = None + id: str | None = None + + def test_llm_end_sets_response_metadata(): handler, exporter = _create_handler() run_id = uuid4() @@ -134,3 +152,83 @@ def test_choice_count_not_set_when_one(): handler.span_manager.end_span(run_id) span = exporter.get_finished_spans()[0] assert GenAI.GEN_AI_REQUEST_CHOICE_COUNT not in span.attributes + + +def test_capture_messages_sets_attributes_by_default(): + handler, exporter = _create_handler() + run_id = uuid4() + + handler.on_chat_model_start( + serialized={"name": "ChatOpenAI"}, + messages=[ + [ + _FakeMessage( + content="hello", + type="human", + id="m1", + ) + ] + ], + run_id=run_id, + tags=None, + parent_run_id=None, + metadata={"ls_model_name": "gpt-4"}, + invocation_params={"params": {"model": "gpt-4"}}, + ) + + handler.on_llm_end( + _DummyLLMResult( + generations=[ + [ + _DummyGeneration( + message=_FakeMessage( + content="result", + type="ai", + id="m2", + ) + ) + ] + ], + llm_output={}, + ), + run_id=run_id, + parent_run_id=None, + ) + + span = exporter.get_finished_spans()[0] + input_payload = json.loads(span.attributes[GenAI.GEN_AI_INPUT_MESSAGES]) + output_payload = json.loads(span.attributes[GenAI.GEN_AI_OUTPUT_MESSAGES]) + assert input_payload[0]["content"] == "hello" + assert output_payload[0]["content"] == "result" + + +def test_capture_messages_can_be_disabled(): + handler, exporter = _create_handler(capture_messages=False) + run_id = uuid4() + + handler.on_chat_model_start( + serialized={"name": "ChatOpenAI"}, + messages=[ + [ + _FakeMessage( + content="hello", + type="human", + ) + ] + ], + run_id=run_id, + tags=None, + parent_run_id=None, + metadata={"ls_model_name": "gpt-4"}, + invocation_params={"params": {"model": "gpt-4"}}, + ) + + handler.on_llm_end( + _DummyLLMResult(generations=[], llm_output={}), + run_id=run_id, + parent_run_id=None, + ) + + span = exporter.get_finished_spans()[0] + assert GenAI.GEN_AI_INPUT_MESSAGES not in span.attributes + assert GenAI.GEN_AI_OUTPUT_MESSAGES not in span.attributes diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain/tests/test_llm_call.py b/instrumentation-genai/opentelemetry-instrumentation-langchain/tests/test_llm_call.py index 06482726bf..6103eda3d8 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-langchain/tests/test_llm_call.py +++ b/instrumentation-genai/opentelemetry-instrumentation-langchain/tests/test_llm_call.py @@ -188,7 +188,8 @@ def test_azure_chat_sets_provider_and_server_attributes(): provider = TracerProvider() provider.add_span_processor(SimpleSpanProcessor(exporter)) handler = OpenTelemetryLangChainCallbackHandler( - provider.get_tracer(__name__) + provider.get_tracer(__name__), + capture_messages=True, ) run_id = uuid4() From 476c86e0f15e2c2d4683e7e6c72c0c034f0acb15 Mon Sep 17 00:00:00 2001 From: Nagkumar Arkalgud Date: Mon, 13 Oct 2025 17:09:25 -0700 Subject: [PATCH 09/24] Fix pyright typing for LangChain instrumentation --- .../instrumentation/langchain/__init__.py | 47 +++- .../langchain/callback_handler.py | 229 +++++++++++++----- 2 files changed, 211 insertions(+), 65 deletions(-) diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/__init__.py b/instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/__init__.py index c1271dc2b6..b7dc5e848b 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/__init__.py +++ b/instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/__init__.py @@ -37,21 +37,56 @@ """ import os -from typing import Any, Callable, Collection +from importlib import import_module +from types import SimpleNamespace +from typing import Any, Callable, Collection, cast from langchain_core.callbacks import BaseCallbackHandler # type: ignore from wrapt import wrap_function_wrapper # type: ignore -from opentelemetry.instrumentation.instrumentor import BaseInstrumentor from opentelemetry.instrumentation.langchain.callback_handler import ( OpenTelemetryLangChainCallbackHandler, ) from opentelemetry.instrumentation.langchain.package import _instruments from opentelemetry.instrumentation.langchain.version import __version__ -from opentelemetry.instrumentation.utils import unwrap -from opentelemetry.semconv.schemas import Schemas from opentelemetry.trace import get_tracer +_instrumentor_module = SimpleNamespace(BaseInstrumentor=object) +try: + _instrumentor_module = import_module( + "opentelemetry.instrumentation.instrumentor" + ) +except ModuleNotFoundError: # pragma: no cover - optional dependency + pass + +BaseInstrumentor = cast( + type, + getattr(_instrumentor_module, "BaseInstrumentor", object), +) + +_utils_module = SimpleNamespace( + unwrap=lambda *_args, **_kwargs: None, +) +try: + _utils_module = import_module("opentelemetry.instrumentation.utils") +except ModuleNotFoundError: # pragma: no cover - optional dependency + pass + +unwrap = cast( + Callable[..., None], + getattr(_utils_module, "unwrap", lambda *_args, **_kwargs: None), +) + +_schemas_module = SimpleNamespace() +try: + _schemas_module = import_module("opentelemetry.semconv.schemas") +except ModuleNotFoundError: # pragma: no cover - optional dependency + pass + +SchemasModule = cast( + Any, getattr(_schemas_module, "Schemas", SimpleNamespace()) +) + class LangChainInstrumentor(BaseInstrumentor): """ @@ -74,11 +109,13 @@ def _instrument(self, **kwargs: Any): """ tracer_provider = kwargs.get("tracer_provider") capture_messages = self._resolve_capture_messages(kwargs) + schema_entry = getattr(SchemasModule, "V1_37_0", None) + schema_url = getattr(schema_entry, "value", None) tracer = get_tracer( __name__, __version__, tracer_provider, - schema_url=Schemas.V1_37_0.value, + schema_url=schema_url, ) otel_callback_handler = OpenTelemetryLangChainCallbackHandler( diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/callback_handler.py b/instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/callback_handler.py index 1430d40234..f93b7366f8 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/callback_handler.py +++ b/instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/callback_handler.py @@ -15,27 +15,112 @@ from __future__ import annotations import json -from typing import Any +from importlib import import_module +from types import SimpleNamespace +from typing import Any, Mapping, Protocol, Sequence, TypedDict, cast from urllib.parse import urlparse from uuid import UUID -from langchain_core.callbacks import BaseCallbackHandler # type: ignore -from langchain_core.messages import BaseMessage # type: ignore -from langchain_core.outputs import LLMResult # type: ignore - from opentelemetry.instrumentation.langchain.span_manager import _SpanManager -from opentelemetry.semconv._incubating.attributes import ( - gen_ai_attributes as GenAI, +from opentelemetry.trace import Span, Tracer + +try: + from langchain_core.callbacks import ( + BaseCallbackHandler, # type: ignore[import] + ) +except ImportError: # pragma: no cover - optional dependency + BaseCallbackHandler = object # type: ignore[assignment] + + +class _SerializedMessage(TypedDict, total=False): + type: str + content: Any + additional_kwargs: Any + response_metadata: Mapping[str, Any] | None + tool_call_id: str + tool_calls: Any + usage_metadata: Mapping[str, Any] | None + id: str + name: str + + +class _MessageLike(Protocol): + type: str + + @property + def content(self) -> Any: ... + + def __getattr__(self, name: str) -> Any: ... + + +class _ChatGenerationLike(Protocol): + message: _MessageLike | None + generation_info: Mapping[str, Any] | None + + def __getattr__(self, name: str) -> Any: ... + + +class _LLMResultLike(Protocol): + generations: Sequence[Sequence[_ChatGenerationLike]] + llm_output: Mapping[str, Any] | None + + def __getattr__(self, name: str) -> Any: ... + + +try: + _azure_attributes = import_module( + "opentelemetry.semconv._incubating.attributes.azure_attributes" + ) +except ModuleNotFoundError: # pragma: no cover - optional dependency + _azure_attributes = SimpleNamespace() + +try: + _gen_ai_attributes = import_module( + "opentelemetry.semconv._incubating.attributes.gen_ai_attributes" + ) +except ModuleNotFoundError: # pragma: no cover - optional dependency + _gen_ai_attributes = SimpleNamespace() + +try: + _openai_attributes = import_module( + "opentelemetry.semconv._incubating.attributes.openai_attributes" + ) +except ModuleNotFoundError: # pragma: no cover - optional dependency + _openai_attributes = SimpleNamespace() + +GenAI = cast(Any, _gen_ai_attributes) +AZURE_RESOURCE_PROVIDER_NAMESPACE = cast( + Any, + getattr( + _azure_attributes, + "AZURE_RESOURCE_PROVIDER_NAMESPACE", + "Microsoft.CognitiveServices", + ), ) -from opentelemetry.semconv._incubating.attributes.azure_attributes import ( - AZURE_RESOURCE_PROVIDER_NAMESPACE, +OPENAI_REQUEST_SERVICE_TIER = cast( + Any, + getattr( + _openai_attributes, + "OPENAI_REQUEST_SERVICE_TIER", + "genai.openai.request.service_tier", + ), ) -from opentelemetry.semconv._incubating.attributes.openai_attributes import ( - OPENAI_REQUEST_SERVICE_TIER, - OPENAI_RESPONSE_SERVICE_TIER, - OPENAI_RESPONSE_SYSTEM_FINGERPRINT, +OPENAI_RESPONSE_SERVICE_TIER = cast( + Any, + getattr( + _openai_attributes, + "OPENAI_RESPONSE_SERVICE_TIER", + "genai.openai.response.service_tier", + ), +) +OPENAI_RESPONSE_SYSTEM_FINGERPRINT = cast( + Any, + getattr( + _openai_attributes, + "OPENAI_RESPONSE_SYSTEM_FINGERPRINT", + "genai.openai.response.system_fingerprint", + ), ) -from opentelemetry.trace import Span, Tracer class OpenTelemetryLangChainCallbackHandler(BaseCallbackHandler): # type: ignore[misc] @@ -81,7 +166,7 @@ def __init__( def on_chat_model_start( self, serialized: dict[str, Any], - messages: list[list[BaseMessage]], # type: ignore + messages: Sequence[Sequence[_MessageLike]], *, run_id: UUID, tags: list[str] | None, @@ -95,7 +180,8 @@ def on_chat_model_start( if provider_name is None: return - params = self._extract_params(kwargs) + kwargs_dict: dict[str, Any] = dict(kwargs) + params = self._extract_params(kwargs_dict) request_model = self._extract_request_model(params, metadata) span = self.span_manager.create_chat_span( @@ -124,7 +210,7 @@ def on_chat_model_start( ) def _resolve_provider( - self, llm_name: str | None, metadata: dict[str, Any] | None + self, llm_name: str | None, metadata: Mapping[str, Any] | None ) -> str | None: if llm_name: provider = self._CHAT_MODEL_PROVIDER_MAPPING.get(llm_name) @@ -144,11 +230,13 @@ def _resolve_provider( return provider_key - def _extract_params(self, kwargs: dict[str, Any]) -> dict[str, Any] | None: + def _extract_params( + self, kwargs: Mapping[str, Any] + ) -> Mapping[str, Any] | None: invocation_params = kwargs.get("invocation_params") - if isinstance(invocation_params, dict): + if isinstance(invocation_params, Mapping): params = invocation_params.get("params") or invocation_params - if isinstance(params, dict): + if isinstance(params, Mapping): return params return None @@ -156,8 +244,8 @@ def _extract_params(self, kwargs: dict[str, Any]) -> dict[str, Any] | None: def _extract_request_model( self, - params: dict[str, Any] | None, - metadata: dict[str, Any] | None, + params: Mapping[str, Any] | None, + metadata: Mapping[str, Any] | None, ) -> str | None: search_order = ( "model_name", @@ -168,10 +256,10 @@ def _extract_request_model( "deployment_name", ) - sources: list[dict[str, Any]] = [] - if isinstance(params, dict): + sources: list[Mapping[str, Any]] = [] + if isinstance(params, Mapping): sources.append(params) - if isinstance(metadata, dict): + if isinstance(metadata, Mapping): sources.append(metadata) for key in search_order: @@ -185,8 +273,8 @@ def _extract_request_model( def _apply_request_attributes( self, span: Span, - params: dict[str, Any] | None, - metadata: dict[str, Any] | None, + params: Mapping[str, Any] | None, + metadata: Mapping[str, Any] | None, ) -> None: if params: top_p = params.get("top_p") @@ -257,7 +345,7 @@ def _apply_request_attributes( span.set_attribute(GenAI.GEN_AI_REQUEST_MAX_TOKENS, max_tokens) def _maybe_set_server_attributes( - self, span: Span, params: dict[str, Any] + self, span: Span, params: Mapping[str, Any] ) -> None: potential_url = None for key in self._SERVER_URL_KEYS: @@ -282,7 +370,7 @@ def _maybe_set_server_attributes( if port is not None: span.set_attribute("server.port", port) - def _extract_output_type(self, params: dict[str, Any]) -> str | None: + def _extract_output_type(self, params: Mapping[str, Any]) -> str | None: response_format = params.get("response_format") output_type: str | None = None if isinstance(response_format, dict): @@ -306,28 +394,33 @@ def _extract_output_type(self, params: dict[str, Any]) -> str | None: return mapping.get(lowered) def _serialize_input_messages( - self, messages: list[list[BaseMessage]] - ) -> list[dict[str, Any]]: - serialized: list[dict[str, Any]] = [] + self, messages: Sequence[Sequence[_MessageLike]] + ) -> list[_SerializedMessage]: + serialized: list[_SerializedMessage] = [] for conversation in messages: for message in conversation: serialized.append(self._serialize_message(message)) return serialized def _serialize_output_messages( - self, response: LLMResult - ) -> list[dict[str, Any]]: - serialized: list[dict[str, Any]] = [] - generations = getattr(response, "generations", []) # type: ignore + self, response: _LLMResultLike + ) -> list[_SerializedMessage]: + serialized: list[_SerializedMessage] = [] + generations_attr = getattr(response, "generations", ()) + generations = cast( + Sequence[Sequence[_ChatGenerationLike]], generations_attr + ) for generation in generations: for item in generation: - message = getattr(item, "message", None) + message = cast( + _MessageLike | None, getattr(item, "message", None) + ) if message is not None: serialized.append(self._serialize_message(message)) return serialized - def _serialize_message(self, message: BaseMessage) -> dict[str, Any]: - payload: dict[str, Any] = { + def _serialize_message(self, message: _MessageLike) -> _SerializedMessage: + payload: _SerializedMessage = { "type": getattr(message, "type", message.__class__.__name__), "content": getattr(message, "content", None), } @@ -360,7 +453,7 @@ def _json_default(value: Any) -> Any: def on_llm_end( self, - response: LLMResult, # type: ignore [reportUnknownParameterType] + response: _LLMResultLike, *, run_id: UUID, parent_run_id: UUID | None, @@ -372,11 +465,18 @@ def on_llm_end( # If the span does not exist, we cannot set attributes or end it return + generations_attr = getattr(response, "generations", ()) + generations = cast( + Sequence[Sequence[_ChatGenerationLike]], generations_attr + ) + finish_reasons: list[str] = [] - for generation in getattr(response, "generations", []): # type: ignore + message_usage_metadata: Mapping[str, Any] | None + for generation in generations: for chat_generation in generation: - generation_info = getattr( - chat_generation, "generation_info", None + generation_info = cast( + Mapping[str, Any] | None, + getattr(chat_generation, "generation_info", None), ) if generation_info is not None: finish_reason = generation_info.get( @@ -384,28 +484,35 @@ def on_llm_end( ) if finish_reason is not None: finish_reasons.append(str(finish_reason)) - if chat_generation.message: - if ( - generation_info is None - and chat_generation.message.response_metadata + message = cast( + _MessageLike | None, + getattr(chat_generation, "message", None), + ) + if message is not None: + if generation_info is None and getattr( + message, "response_metadata", None ): + response_metadata = cast( + Mapping[str, Any] | None, + getattr(message, "response_metadata", None), + ) finish_reason = ( - chat_generation.message.response_metadata.get( - "stopReason", "unknown" - ) + response_metadata.get("stopReason", "unknown") + if response_metadata is not None + else "unknown" ) if finish_reason is not None: finish_reasons.append(str(finish_reason)) - if chat_generation.message.usage_metadata: - input_tokens = ( - chat_generation.message.usage_metadata.get( - "input_tokens", 0 - ) + message_usage_metadata = cast( + Mapping[str, Any] | None, + getattr(message, "usage_metadata", None), + ) + if message_usage_metadata: + input_tokens = message_usage_metadata.get( + "input_tokens", 0 ) - output_tokens = ( - chat_generation.message.usage_metadata.get( - "output_tokens", 0 - ) + output_tokens = message_usage_metadata.get( + "output_tokens", 0 ) span.set_attribute( GenAI.GEN_AI_USAGE_INPUT_TOKENS, input_tokens @@ -418,7 +525,9 @@ def on_llm_end( GenAI.GEN_AI_RESPONSE_FINISH_REASONS, finish_reasons ) - llm_output = getattr(response, "llm_output", None) # type: ignore + llm_output = cast( + Mapping[str, Any] | None, getattr(response, "llm_output", None) + ) if llm_output is not None: response_model = llm_output.get("model_name") or llm_output.get( "model" From e55b1b3c0f52409af94374a889f13efd9fa9540c Mon Sep 17 00:00:00 2001 From: Nagkumar Arkalgud Date: Tue, 14 Oct 2025 11:51:12 -0700 Subject: [PATCH 10/24] Improve LangChain stub typing and typecheck config --- .../instrumentation/langchain/__init__.py | 79 ++++++++----- .../langchain/callback_handler.py | 106 +++++++++++++----- pyproject.toml | 1 + tox.ini | 1 + 4 files changed, 131 insertions(+), 56 deletions(-) diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/__init__.py b/instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/__init__.py index b7dc5e848b..c09badeecc 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/__init__.py +++ b/instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/__init__.py @@ -32,6 +32,8 @@ result = llm.invoke(messages) LangChainInstrumentor().uninstrument() +# pyright: reportMissingImports=false + API --- """ @@ -39,43 +41,62 @@ import os from importlib import import_module from types import SimpleNamespace -from typing import Any, Callable, Collection, cast +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Collection, + Protocol, + Sequence, + cast, +) -from langchain_core.callbacks import BaseCallbackHandler # type: ignore from wrapt import wrap_function_wrapper # type: ignore +if TYPE_CHECKING: + + class BaseCallbackHandler(Protocol): + def __init__(self, *args: Any, **kwargs: Any) -> None: ... + + inheritable_handlers: Sequence[Any] + + def add_handler(self, handler: Any, inherit: bool = False) -> None: ... + +else: + try: + from langchain_core.callbacks import ( + BaseCallbackHandler, # type: ignore[import] + ) + except ImportError: # pragma: no cover - optional dependency + + class BaseCallbackHandler: + def __init__(self, *args: Any, **kwargs: Any) -> None: + return + + inheritable_handlers: Sequence[Any] = () + + def add_handler(self, handler: Any, inherit: bool = False) -> None: + raise RuntimeError( + "LangChain is required for the LangChain instrumentation." + ) + + +from opentelemetry.instrumentation.instrumentor import BaseInstrumentor from opentelemetry.instrumentation.langchain.callback_handler import ( OpenTelemetryLangChainCallbackHandler, ) from opentelemetry.instrumentation.langchain.package import _instruments from opentelemetry.instrumentation.langchain.version import __version__ -from opentelemetry.trace import get_tracer -_instrumentor_module = SimpleNamespace(BaseInstrumentor=object) try: - _instrumentor_module = import_module( - "opentelemetry.instrumentation.instrumentor" - ) -except ModuleNotFoundError: # pragma: no cover - optional dependency - pass + from opentelemetry.instrumentation.utils import unwrap +except ImportError: # pragma: no cover - optional dependency -BaseInstrumentor = cast( - type, - getattr(_instrumentor_module, "BaseInstrumentor", object), -) + def unwrap(obj: object, attr: str) -> None: + return None -_utils_module = SimpleNamespace( - unwrap=lambda *_args, **_kwargs: None, -) -try: - _utils_module = import_module("opentelemetry.instrumentation.utils") -except ModuleNotFoundError: # pragma: no cover - optional dependency - pass -unwrap = cast( - Callable[..., None], - getattr(_utils_module, "unwrap", lambda *_args, **_kwargs: None), -) +from opentelemetry.trace import get_tracer _schemas_module = SimpleNamespace() try: @@ -95,9 +116,7 @@ class LangChainInstrumentor(BaseInstrumentor): to capture LLM telemetry. """ - def __init__( - self, - ): + def __init__(self) -> None: super().__init__() def instrumentation_dependencies(self) -> Collection[str]: @@ -161,14 +180,14 @@ def __init__( def __call__( self, wrapped: Callable[..., None], - instance: BaseCallbackHandler, # type: ignore + instance: BaseCallbackHandler, args: tuple[Any, ...], kwargs: dict[str, Any], ): wrapped(*args, **kwargs) # Ensure our OTel callback is present if not already. - for handler in instance.inheritable_handlers: # type: ignore + for handler in instance.inheritable_handlers: if isinstance(handler, type(self._otel_handler)): break else: - instance.add_handler(self._otel_handler, inherit=True) # type: ignore + instance.add_handler(self._otel_handler, inherit=True) diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/callback_handler.py b/instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/callback_handler.py index f93b7366f8..8661db3971 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/callback_handler.py +++ b/instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/callback_handler.py @@ -1,3 +1,5 @@ +# pyright: reportMissingImports=false + # Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,21 +17,46 @@ from __future__ import annotations import json +from collections.abc import Mapping, Sequence from importlib import import_module from types import SimpleNamespace -from typing import Any, Mapping, Protocol, Sequence, TypedDict, cast +from typing import TYPE_CHECKING, Any, Protocol, TypedDict, cast from urllib.parse import urlparse from uuid import UUID from opentelemetry.instrumentation.langchain.span_manager import _SpanManager from opentelemetry.trace import Span, Tracer -try: - from langchain_core.callbacks import ( - BaseCallbackHandler, # type: ignore[import] - ) -except ImportError: # pragma: no cover - optional dependency - BaseCallbackHandler = object # type: ignore[assignment] + +class _BaseCallbackHandlerProtocol(Protocol): + def __init__(self, *args: Any, **kwargs: Any) -> None: ... + + inheritable_handlers: Sequence[Any] + + def add_handler(self, handler: Any, inherit: bool = False) -> None: ... + + +class _BaseCallbackHandlerStub: + def __init__(self, *args: Any, **kwargs: Any) -> None: + return + + inheritable_handlers: Sequence[Any] = () + + def add_handler(self, handler: Any, inherit: bool = False) -> None: + raise RuntimeError( + "LangChain is required for the LangChain instrumentation." + ) + + +if TYPE_CHECKING: + BaseCallbackHandler = _BaseCallbackHandlerProtocol +else: + try: + from langchain_core.callbacks import ( + BaseCallbackHandler, # type: ignore[import] + ) + except ImportError: # pragma: no cover - optional dependency + BaseCallbackHandler = _BaseCallbackHandlerStub class _SerializedMessage(TypedDict, total=False): @@ -123,7 +150,7 @@ def __getattr__(self, name: str) -> Any: ... ) -class OpenTelemetryLangChainCallbackHandler(BaseCallbackHandler): # type: ignore[misc] +class OpenTelemetryLangChainCallbackHandler(BaseCallbackHandler): """ A callback handler for LangChain that uses OpenTelemetry to create spans for LLM calls and chains, tools etc,. in future. """ @@ -156,7 +183,9 @@ def __init__( tracer: Tracer, capture_messages: bool, ) -> None: - super().__init__() # type: ignore + base_init: Any = getattr(super(), "__init__", None) + if callable(base_init): + base_init() self.span_manager = _SpanManager( tracer=tracer, @@ -230,17 +259,31 @@ def _resolve_provider( return provider_key - def _extract_params( - self, kwargs: Mapping[str, Any] - ) -> Mapping[str, Any] | None: + def _extract_params(self, kwargs: Mapping[str, Any]) -> dict[str, Any]: invocation_params = kwargs.get("invocation_params") if isinstance(invocation_params, Mapping): - params = invocation_params.get("params") or invocation_params - if isinstance(params, Mapping): - return params - return None - - return kwargs if kwargs else None + invocation_mapping = cast(Mapping[str, Any], invocation_params) + params_raw = cast( + Mapping[Any, Any] | None, invocation_mapping.get("params") + ) + if isinstance(params_raw, Mapping): + params_mapping = params_raw + extracted: dict[str, Any] = {} + for key, value in params_mapping.items(): + key_str = key if isinstance(key, str) else str(key) + extracted[key_str] = value + return extracted + invocation_mapping = cast(Mapping[Any, Any], invocation_params) + extracted: dict[str, Any] = {} + for key, value in invocation_mapping.items(): + key_str = key if isinstance(key, str) else str(key) + extracted[key_str] = value + return extracted + + extracted: dict[str, Any] = {} + for key, value in kwargs.items(): + extracted[key] = value + return extracted def _extract_request_model( self, @@ -273,7 +316,7 @@ def _extract_request_model( def _apply_request_attributes( self, span: Span, - params: Mapping[str, Any] | None, + params: dict[str, Any] | None, metadata: Mapping[str, Any] | None, ) -> None: if params: @@ -373,10 +416,17 @@ def _maybe_set_server_attributes( def _extract_output_type(self, params: Mapping[str, Any]) -> str | None: response_format = params.get("response_format") output_type: str | None = None - if isinstance(response_format, dict): - output_type = response_format.get("type") + if isinstance(response_format, Mapping): + response_mapping = cast(Mapping[Any, Any], response_format) + candidate: Any = response_mapping.get("type") + if isinstance(candidate, str): + output_type = candidate + elif candidate is not None: + output_type = str(candidate) elif isinstance(response_format, str): output_type = response_format + elif response_format is not None: + output_type = str(response_format) if not output_type: return None @@ -420,7 +470,7 @@ def _serialize_output_messages( return serialized def _serialize_message(self, message: _MessageLike) -> _SerializedMessage: - payload: _SerializedMessage = { + payload: dict[str, Any] = { "type": getattr(message, "type", message.__class__.__name__), "content": getattr(message, "content", None), } @@ -434,9 +484,9 @@ def _serialize_message(self, message: _MessageLike) -> _SerializedMessage: "name", ): value = getattr(message, attr, None) - if value: + if value is not None: payload[attr] = value - return payload + return cast(_SerializedMessage, payload) def _serialize_to_json(self, payload: Any) -> str: return json.dumps(payload, default=self._json_default) @@ -446,9 +496,13 @@ def _json_default(value: Any) -> Any: if isinstance(value, (str, int, float, bool)) or value is None: return value if isinstance(value, dict): - return value + return cast(dict[str, Any], value) if isinstance(value, (list, tuple)): - return list(value) + seq_value = cast(Sequence[Any], value) + return [ + OpenTelemetryLangChainCallbackHandler._json_default(item) + for item in seq_value + ] return getattr(value, "__dict__", str(value)) def on_llm_end( diff --git a/pyproject.toml b/pyproject.toml index 30ada5bb25..f238d2392f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -220,4 +220,5 @@ exclude = [ "instrumentation-genai/opentelemetry-instrumentation-weaviate/tests/**/*.py", "instrumentation-genai/opentelemetry-instrumentation-weaviate/examples/**/*.py", "util/opentelemetry-util-genai/tests/**/*.py", + "**/.venv", ] diff --git a/tox.ini b/tox.ini index 6d3aee161e..adb176c483 100644 --- a/tox.ini +++ b/tox.ini @@ -1075,6 +1075,7 @@ deps = {toxinidir}/util/opentelemetry-util-genai[upload] {toxinidir}/instrumentation-genai/opentelemetry-instrumentation-vertexai[instruments] {toxinidir}/instrumentation-genai/opentelemetry-instrumentation-google-genai[instruments] + {toxinidir}/instrumentation-genai/opentelemetry-instrumentation-langchain[instruments] {toxinidir}/instrumentation/opentelemetry-instrumentation-aiokafka[instruments] {toxinidir}/instrumentation/opentelemetry-instrumentation-asyncclick[instruments] {toxinidir}/exporter/opentelemetry-exporter-credential-provider-gcp From fadcf7d030634c7143e1bf169f04e348ab44f86f Mon Sep 17 00:00:00 2001 From: Nagkumar Arkalgud Date: Thu, 16 Oct 2025 07:26:40 -0700 Subject: [PATCH 11/24] Rename LangChain instrumentation package to v2 --- instrumentation-genai/README.md | 4 ++-- .../CHANGELOG.md | 0 .../LICENSE | 0 .../NOTICE | 0 .../README.rst | 0 .../examples/manual/.env | 0 .../examples/manual/README.rst | 0 .../examples/manual/main.py | 0 .../examples/manual/requirements.txt | 2 +- .../examples/zero-code/.env | 0 .../examples/zero-code/README.rst | 0 .../examples/zero-code/main.py | 0 .../examples/zero-code/requirements.txt | 2 +- .../pyproject.toml | 4 ++-- .../src/opentelemetry/instrumentation/langchain/__init__.py | 0 .../instrumentation/langchain/callback_handler.py | 0 .../src/opentelemetry/instrumentation/langchain/package.py | 0 .../opentelemetry/instrumentation/langchain/span_manager.py | 0 .../src/opentelemetry/instrumentation/langchain/version.py | 0 .../tests/__init__.py | 0 .../test_chat_openai_gpt_3_5_turbo_model_llm_call.yaml | 0 .../test_us_amazon_nova_lite_v1_0_bedrock_llm_call.yaml | 0 .../tests/conftest.py | 0 .../tests/test_callback_handler.py | 0 .../tests/test_llm_call.py | 0 .../tests/test_span_manager.py | 0 uv.lock | 6 +++--- 27 files changed, 9 insertions(+), 9 deletions(-) rename instrumentation-genai/{opentelemetry-instrumentation-langchain => opentelemetry-instrumentation-langchain-v2}/CHANGELOG.md (100%) rename instrumentation-genai/{opentelemetry-instrumentation-langchain => opentelemetry-instrumentation-langchain-v2}/LICENSE (100%) rename instrumentation-genai/{opentelemetry-instrumentation-langchain => opentelemetry-instrumentation-langchain-v2}/NOTICE (100%) rename instrumentation-genai/{opentelemetry-instrumentation-langchain => opentelemetry-instrumentation-langchain-v2}/README.rst (100%) rename instrumentation-genai/{opentelemetry-instrumentation-langchain => opentelemetry-instrumentation-langchain-v2}/examples/manual/.env (100%) rename instrumentation-genai/{opentelemetry-instrumentation-langchain => opentelemetry-instrumentation-langchain-v2}/examples/manual/README.rst (100%) rename instrumentation-genai/{opentelemetry-instrumentation-langchain => opentelemetry-instrumentation-langchain-v2}/examples/manual/main.py (100%) rename instrumentation-genai/{opentelemetry-instrumentation-langchain => opentelemetry-instrumentation-langchain-v2}/examples/manual/requirements.txt (74%) rename instrumentation-genai/{opentelemetry-instrumentation-langchain => opentelemetry-instrumentation-langchain-v2}/examples/zero-code/.env (100%) rename instrumentation-genai/{opentelemetry-instrumentation-langchain => opentelemetry-instrumentation-langchain-v2}/examples/zero-code/README.rst (100%) rename instrumentation-genai/{opentelemetry-instrumentation-langchain => opentelemetry-instrumentation-langchain-v2}/examples/zero-code/main.py (100%) rename instrumentation-genai/{opentelemetry-instrumentation-langchain => opentelemetry-instrumentation-langchain-v2}/examples/zero-code/requirements.txt (77%) rename instrumentation-genai/{opentelemetry-instrumentation-langchain => opentelemetry-instrumentation-langchain-v2}/pyproject.toml (95%) rename instrumentation-genai/{opentelemetry-instrumentation-langchain => opentelemetry-instrumentation-langchain-v2}/src/opentelemetry/instrumentation/langchain/__init__.py (100%) rename instrumentation-genai/{opentelemetry-instrumentation-langchain => opentelemetry-instrumentation-langchain-v2}/src/opentelemetry/instrumentation/langchain/callback_handler.py (100%) rename instrumentation-genai/{opentelemetry-instrumentation-langchain => opentelemetry-instrumentation-langchain-v2}/src/opentelemetry/instrumentation/langchain/package.py (100%) rename instrumentation-genai/{opentelemetry-instrumentation-langchain => opentelemetry-instrumentation-langchain-v2}/src/opentelemetry/instrumentation/langchain/span_manager.py (100%) rename instrumentation-genai/{opentelemetry-instrumentation-langchain => opentelemetry-instrumentation-langchain-v2}/src/opentelemetry/instrumentation/langchain/version.py (100%) rename instrumentation-genai/{opentelemetry-instrumentation-langchain => opentelemetry-instrumentation-langchain-v2}/tests/__init__.py (100%) rename instrumentation-genai/{opentelemetry-instrumentation-langchain => opentelemetry-instrumentation-langchain-v2}/tests/cassettes/test_chat_openai_gpt_3_5_turbo_model_llm_call.yaml (100%) rename instrumentation-genai/{opentelemetry-instrumentation-langchain => opentelemetry-instrumentation-langchain-v2}/tests/cassettes/test_us_amazon_nova_lite_v1_0_bedrock_llm_call.yaml (100%) rename instrumentation-genai/{opentelemetry-instrumentation-langchain => opentelemetry-instrumentation-langchain-v2}/tests/conftest.py (100%) rename instrumentation-genai/{opentelemetry-instrumentation-langchain => opentelemetry-instrumentation-langchain-v2}/tests/test_callback_handler.py (100%) rename instrumentation-genai/{opentelemetry-instrumentation-langchain => opentelemetry-instrumentation-langchain-v2}/tests/test_llm_call.py (100%) rename instrumentation-genai/{opentelemetry-instrumentation-langchain => opentelemetry-instrumentation-langchain-v2}/tests/test_span_manager.py (100%) diff --git a/instrumentation-genai/README.md b/instrumentation-genai/README.md index 1b8f5e49e6..266b05e04c 100644 --- a/instrumentation-genai/README.md +++ b/instrumentation-genai/README.md @@ -2,8 +2,8 @@ | Instrumentation | Supported Packages | Metrics support | Semconv status | | --------------- | ------------------ | --------------- | -------------- | | [opentelemetry-instrumentation-google-genai](./opentelemetry-instrumentation-google-genai) | google-genai >= 1.0.0 | No | development -| [opentelemetry-instrumentation-langchain](./opentelemetry-instrumentation-langchain) | langchain >= 0.3.21 | No | development +| [opentelemetry-instrumentation-langchain-v2](./opentelemetry-instrumentation-langchain-v2) | langchain >= 0.3.21 | No | development | [opentelemetry-instrumentation-openai-agents-v2](./opentelemetry-instrumentation-openai-agents-v2) | openai-agents >= 0.3.3 | No | development | [opentelemetry-instrumentation-openai-v2](./opentelemetry-instrumentation-openai-v2) | openai >= 1.26.0 | Yes | development | [opentelemetry-instrumentation-vertexai](./opentelemetry-instrumentation-vertexai) | google-cloud-aiplatform >= 1.64 | No | development -| [opentelemetry-instrumentation-weaviate](./opentelemetry-instrumentation-weaviate) | weaviate-client >= 3.0.0,<5.0.0 | No | development \ No newline at end of file +| [opentelemetry-instrumentation-weaviate](./opentelemetry-instrumentation-weaviate) | weaviate-client >= 3.0.0,<5.0.0 | No | development diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain/CHANGELOG.md b/instrumentation-genai/opentelemetry-instrumentation-langchain-v2/CHANGELOG.md similarity index 100% rename from instrumentation-genai/opentelemetry-instrumentation-langchain/CHANGELOG.md rename to instrumentation-genai/opentelemetry-instrumentation-langchain-v2/CHANGELOG.md diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain/LICENSE b/instrumentation-genai/opentelemetry-instrumentation-langchain-v2/LICENSE similarity index 100% rename from instrumentation-genai/opentelemetry-instrumentation-langchain/LICENSE rename to instrumentation-genai/opentelemetry-instrumentation-langchain-v2/LICENSE diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain/NOTICE b/instrumentation-genai/opentelemetry-instrumentation-langchain-v2/NOTICE similarity index 100% rename from instrumentation-genai/opentelemetry-instrumentation-langchain/NOTICE rename to instrumentation-genai/opentelemetry-instrumentation-langchain-v2/NOTICE diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain/README.rst b/instrumentation-genai/opentelemetry-instrumentation-langchain-v2/README.rst similarity index 100% rename from instrumentation-genai/opentelemetry-instrumentation-langchain/README.rst rename to instrumentation-genai/opentelemetry-instrumentation-langchain-v2/README.rst diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain/examples/manual/.env b/instrumentation-genai/opentelemetry-instrumentation-langchain-v2/examples/manual/.env similarity index 100% rename from instrumentation-genai/opentelemetry-instrumentation-langchain/examples/manual/.env rename to instrumentation-genai/opentelemetry-instrumentation-langchain-v2/examples/manual/.env diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain/examples/manual/README.rst b/instrumentation-genai/opentelemetry-instrumentation-langchain-v2/examples/manual/README.rst similarity index 100% rename from instrumentation-genai/opentelemetry-instrumentation-langchain/examples/manual/README.rst rename to instrumentation-genai/opentelemetry-instrumentation-langchain-v2/examples/manual/README.rst diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain/examples/manual/main.py b/instrumentation-genai/opentelemetry-instrumentation-langchain-v2/examples/manual/main.py similarity index 100% rename from instrumentation-genai/opentelemetry-instrumentation-langchain/examples/manual/main.py rename to instrumentation-genai/opentelemetry-instrumentation-langchain-v2/examples/manual/main.py diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain/examples/manual/requirements.txt b/instrumentation-genai/opentelemetry-instrumentation-langchain-v2/examples/manual/requirements.txt similarity index 74% rename from instrumentation-genai/opentelemetry-instrumentation-langchain/examples/manual/requirements.txt rename to instrumentation-genai/opentelemetry-instrumentation-langchain-v2/examples/manual/requirements.txt index 0f986b1be0..9d975112b5 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-langchain/examples/manual/requirements.txt +++ b/instrumentation-genai/opentelemetry-instrumentation-langchain-v2/examples/manual/requirements.txt @@ -4,4 +4,4 @@ opentelemetry-sdk>=1.31.0 opentelemetry-exporter-otlp-proto-grpc>=1.31.0 # Uncomment after lanchain instrumetation is released -# opentelemetry-instrumentation-langchain~=2.0b0.dev \ No newline at end of file +# opentelemetry-instrumentation-langchain-v2~=2.0b0.dev diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain/examples/zero-code/.env b/instrumentation-genai/opentelemetry-instrumentation-langchain-v2/examples/zero-code/.env similarity index 100% rename from instrumentation-genai/opentelemetry-instrumentation-langchain/examples/zero-code/.env rename to instrumentation-genai/opentelemetry-instrumentation-langchain-v2/examples/zero-code/.env diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain/examples/zero-code/README.rst b/instrumentation-genai/opentelemetry-instrumentation-langchain-v2/examples/zero-code/README.rst similarity index 100% rename from instrumentation-genai/opentelemetry-instrumentation-langchain/examples/zero-code/README.rst rename to instrumentation-genai/opentelemetry-instrumentation-langchain-v2/examples/zero-code/README.rst diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain/examples/zero-code/main.py b/instrumentation-genai/opentelemetry-instrumentation-langchain-v2/examples/zero-code/main.py similarity index 100% rename from instrumentation-genai/opentelemetry-instrumentation-langchain/examples/zero-code/main.py rename to instrumentation-genai/opentelemetry-instrumentation-langchain-v2/examples/zero-code/main.py diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain/examples/zero-code/requirements.txt b/instrumentation-genai/opentelemetry-instrumentation-langchain-v2/examples/zero-code/requirements.txt similarity index 77% rename from instrumentation-genai/opentelemetry-instrumentation-langchain/examples/zero-code/requirements.txt rename to instrumentation-genai/opentelemetry-instrumentation-langchain-v2/examples/zero-code/requirements.txt index 63b3f56cf1..bdd54b2252 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-langchain/examples/zero-code/requirements.txt +++ b/instrumentation-genai/opentelemetry-instrumentation-langchain-v2/examples/zero-code/requirements.txt @@ -5,4 +5,4 @@ opentelemetry-exporter-otlp-proto-grpc>=1.31.0 opentelemetry-distro~=0.51b0 # Uncomment after lanchain instrumetation is released -# opentelemetry-instrumentation-langchain~=2.0b0.dev +# opentelemetry-instrumentation-langchain-v2~=2.0b0.dev diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain/pyproject.toml b/instrumentation-genai/opentelemetry-instrumentation-langchain-v2/pyproject.toml similarity index 95% rename from instrumentation-genai/opentelemetry-instrumentation-langchain/pyproject.toml rename to instrumentation-genai/opentelemetry-instrumentation-langchain-v2/pyproject.toml index 8d87cc9085..f39c3c8b05 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-langchain/pyproject.toml +++ b/instrumentation-genai/opentelemetry-instrumentation-langchain-v2/pyproject.toml @@ -3,7 +3,7 @@ requires = ["hatchling"] build-backend = "hatchling.build" [project] -name = "opentelemetry-instrumentation-langchain" +name = "opentelemetry-instrumentation-langchain-v2" dynamic = ["version"] description = "OpenTelemetry Official Langchain instrumentation" readme = "README.rst" @@ -39,7 +39,7 @@ instruments = [ langchain = "opentelemetry.instrumentation.langchain:LangChainInstrumentor" [project.urls] -Homepage = "https://github.com/open-telemetry/opentelemetry-python-contrib/tree/main/instrumentation-genai/opentelemetry-instrumentation-langchain" +Homepage = "https://github.com/open-telemetry/opentelemetry-python-contrib/tree/main/instrumentation-genai/opentelemetry-instrumentation-langchain-v2" Repository = "https://github.com/open-telemetry/opentelemetry-python-contrib" [tool.hatch.version] diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/__init__.py b/instrumentation-genai/opentelemetry-instrumentation-langchain-v2/src/opentelemetry/instrumentation/langchain/__init__.py similarity index 100% rename from instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/__init__.py rename to instrumentation-genai/opentelemetry-instrumentation-langchain-v2/src/opentelemetry/instrumentation/langchain/__init__.py diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/callback_handler.py b/instrumentation-genai/opentelemetry-instrumentation-langchain-v2/src/opentelemetry/instrumentation/langchain/callback_handler.py similarity index 100% rename from instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/callback_handler.py rename to instrumentation-genai/opentelemetry-instrumentation-langchain-v2/src/opentelemetry/instrumentation/langchain/callback_handler.py diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/package.py b/instrumentation-genai/opentelemetry-instrumentation-langchain-v2/src/opentelemetry/instrumentation/langchain/package.py similarity index 100% rename from instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/package.py rename to instrumentation-genai/opentelemetry-instrumentation-langchain-v2/src/opentelemetry/instrumentation/langchain/package.py diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/span_manager.py b/instrumentation-genai/opentelemetry-instrumentation-langchain-v2/src/opentelemetry/instrumentation/langchain/span_manager.py similarity index 100% rename from instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/span_manager.py rename to instrumentation-genai/opentelemetry-instrumentation-langchain-v2/src/opentelemetry/instrumentation/langchain/span_manager.py diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/version.py b/instrumentation-genai/opentelemetry-instrumentation-langchain-v2/src/opentelemetry/instrumentation/langchain/version.py similarity index 100% rename from instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/version.py rename to instrumentation-genai/opentelemetry-instrumentation-langchain-v2/src/opentelemetry/instrumentation/langchain/version.py diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain/tests/__init__.py b/instrumentation-genai/opentelemetry-instrumentation-langchain-v2/tests/__init__.py similarity index 100% rename from instrumentation-genai/opentelemetry-instrumentation-langchain/tests/__init__.py rename to instrumentation-genai/opentelemetry-instrumentation-langchain-v2/tests/__init__.py diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain/tests/cassettes/test_chat_openai_gpt_3_5_turbo_model_llm_call.yaml b/instrumentation-genai/opentelemetry-instrumentation-langchain-v2/tests/cassettes/test_chat_openai_gpt_3_5_turbo_model_llm_call.yaml similarity index 100% rename from instrumentation-genai/opentelemetry-instrumentation-langchain/tests/cassettes/test_chat_openai_gpt_3_5_turbo_model_llm_call.yaml rename to instrumentation-genai/opentelemetry-instrumentation-langchain-v2/tests/cassettes/test_chat_openai_gpt_3_5_turbo_model_llm_call.yaml diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain/tests/cassettes/test_us_amazon_nova_lite_v1_0_bedrock_llm_call.yaml b/instrumentation-genai/opentelemetry-instrumentation-langchain-v2/tests/cassettes/test_us_amazon_nova_lite_v1_0_bedrock_llm_call.yaml similarity index 100% rename from instrumentation-genai/opentelemetry-instrumentation-langchain/tests/cassettes/test_us_amazon_nova_lite_v1_0_bedrock_llm_call.yaml rename to instrumentation-genai/opentelemetry-instrumentation-langchain-v2/tests/cassettes/test_us_amazon_nova_lite_v1_0_bedrock_llm_call.yaml diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain/tests/conftest.py b/instrumentation-genai/opentelemetry-instrumentation-langchain-v2/tests/conftest.py similarity index 100% rename from instrumentation-genai/opentelemetry-instrumentation-langchain/tests/conftest.py rename to instrumentation-genai/opentelemetry-instrumentation-langchain-v2/tests/conftest.py diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain/tests/test_callback_handler.py b/instrumentation-genai/opentelemetry-instrumentation-langchain-v2/tests/test_callback_handler.py similarity index 100% rename from instrumentation-genai/opentelemetry-instrumentation-langchain/tests/test_callback_handler.py rename to instrumentation-genai/opentelemetry-instrumentation-langchain-v2/tests/test_callback_handler.py diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain/tests/test_llm_call.py b/instrumentation-genai/opentelemetry-instrumentation-langchain-v2/tests/test_llm_call.py similarity index 100% rename from instrumentation-genai/opentelemetry-instrumentation-langchain/tests/test_llm_call.py rename to instrumentation-genai/opentelemetry-instrumentation-langchain-v2/tests/test_llm_call.py diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain/tests/test_span_manager.py b/instrumentation-genai/opentelemetry-instrumentation-langchain-v2/tests/test_span_manager.py similarity index 100% rename from instrumentation-genai/opentelemetry-instrumentation-langchain/tests/test_span_manager.py rename to instrumentation-genai/opentelemetry-instrumentation-langchain-v2/tests/test_span_manager.py diff --git a/uv.lock b/uv.lock index 76a60b9134..280522ce6a 100644 --- a/uv.lock +++ b/uv.lock @@ -43,7 +43,7 @@ members = [ "opentelemetry-instrumentation-httpx", "opentelemetry-instrumentation-jinja2", "opentelemetry-instrumentation-kafka-python", - "opentelemetry-instrumentation-langchain", + "opentelemetry-instrumentation-langchain-v2", "opentelemetry-instrumentation-logging", "opentelemetry-instrumentation-mysql", "opentelemetry-instrumentation-mysqlclient", @@ -3264,8 +3264,8 @@ requires-dist = [ provides-extras = ["instruments", "instruments-any"] [[package]] -name = "opentelemetry-instrumentation-langchain" -source = { editable = "instrumentation-genai/opentelemetry-instrumentation-langchain" } +name = "opentelemetry-instrumentation-langchain-v2" +source = { editable = "instrumentation-genai/opentelemetry-instrumentation-langchain-v2" } dependencies = [ { name = "opentelemetry-api" }, { name = "opentelemetry-instrumentation" }, From b028c75521dbc1fa1013b70f4b9a57ebef082724 Mon Sep 17 00:00:00 2001 From: Nagkumar Arkalgud Date: Thu, 16 Oct 2025 07:34:31 -0700 Subject: [PATCH 12/24] Register LangChain v2 bootstrap mapping --- .../src/opentelemetry/instrumentation/bootstrap_gen.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py index 2ea6f1bf58..8e6d7825d4 100644 --- a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py +++ b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py @@ -16,6 +16,10 @@ # RUN `python scripts/generate_instrumentation_bootstrap.py` TO REGENERATE. libraries = [ + { + "library": "langchain >= 0.3.21", + "instrumentation": "opentelemetry-instrumentation-langchain-v2==2.0b0.dev", + }, { "library": "openai >= 1.26.0", "instrumentation": "opentelemetry-instrumentation-openai-v2", From 0d4cfd72806ca743a902977db2307c825a1a0346 Mon Sep 17 00:00:00 2001 From: Nagkumar Arkalgud Date: Thu, 16 Oct 2025 08:20:35 -0700 Subject: [PATCH 13/24] tox -e generate --- instrumentation-genai/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/instrumentation-genai/README.md b/instrumentation-genai/README.md index 266b05e04c..fad2a1b92a 100644 --- a/instrumentation-genai/README.md +++ b/instrumentation-genai/README.md @@ -6,4 +6,4 @@ | [opentelemetry-instrumentation-openai-agents-v2](./opentelemetry-instrumentation-openai-agents-v2) | openai-agents >= 0.3.3 | No | development | [opentelemetry-instrumentation-openai-v2](./opentelemetry-instrumentation-openai-v2) | openai >= 1.26.0 | Yes | development | [opentelemetry-instrumentation-vertexai](./opentelemetry-instrumentation-vertexai) | google-cloud-aiplatform >= 1.64 | No | development -| [opentelemetry-instrumentation-weaviate](./opentelemetry-instrumentation-weaviate) | weaviate-client >= 3.0.0,<5.0.0 | No | development +| [opentelemetry-instrumentation-weaviate](./opentelemetry-instrumentation-weaviate) | weaviate-client >= 3.0.0,<5.0.0 | No | development \ No newline at end of file From aef0792e00eed54ac72e0131f984dae7156f0f01 Mon Sep 17 00:00:00 2001 From: Nagkumar Arkalgud Date: Thu, 16 Oct 2025 08:37:24 -0700 Subject: [PATCH 14/24] chore: update configs and docs --- .github/component_owners.yml | 2 +- RELEASING.md | 4 ++-- eachdist.ini | 2 +- pyproject.toml | 6 +++--- scripts/generate_instrumentation_bootstrap.py | 2 +- tox.ini | 2 +- 6 files changed, 9 insertions(+), 9 deletions(-) diff --git a/.github/component_owners.yml b/.github/component_owners.yml index 70375ce48f..1cbda52c97 100644 --- a/.github/component_owners.yml +++ b/.github/component_owners.yml @@ -45,7 +45,7 @@ components: - DylanRussell - keith-decker - instrumentation-genai/opentelemetry-instrumentation-langchain: + instrumentation-genai/opentelemetry-instrumentation-langchain-v2: - zhirafovod - wrisa diff --git a/RELEASING.md b/RELEASING.md index b8d4b56bcf..eeb004d108 100644 --- a/RELEASING.md +++ b/RELEASING.md @@ -25,7 +25,7 @@ > - opentelemetry-instrumentation-openai-agents-v2 > - opentelemetry-instrumentation-vertexai > - opentelemetry-instrumentation-google-genai -> - opentelemetry-instrumentation-langchain +> - opentelemetry-instrumentation-langchain-v2 > - opentelemetry-instrumentation-weaviate > - opentelemetry-util-genai > @@ -91,7 +91,7 @@ The workflow will create a pull request that should be merged in order to procee > - opentelemetry-instrumentation-openai-agents-v2 > - opentelemetry-instrumentation-vertexai > - opentelemetry-instrumentation-google-genai -> - opentelemetry-instrumentation-langchain +> - opentelemetry-instrumentation-langchain-v2 > - opentelemetry-instrumentation-weaviate > - opentelemetry-util-genai > - opentelemetry-exporter-credential-provider-gcp diff --git a/eachdist.ini b/eachdist.ini index 239258065a..d9db85d2ec 100644 --- a/eachdist.ini +++ b/eachdist.ini @@ -56,7 +56,7 @@ packages= opentelemetry-instrumentation-openai-v2 opentelemetry-instrumentation-openai-agents-v2 opentelemetry-instrumentation-test - opentelemetry-instrumentation-langchain + opentelemetry-instrumentation-langchain-v2 opentelemetry-instrumentation-weaviate opentelemetry-util-genai diff --git a/pyproject.toml b/pyproject.toml index f238d2392f..5778f581ae 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -202,7 +202,7 @@ include = [ "instrumentation/opentelemetry-instrumentation-asyncclick", "instrumentation/opentelemetry-instrumentation-threading", "instrumentation-genai/opentelemetry-instrumentation-vertexai", - "instrumentation-genai/opentelemetry-instrumentation-langchain", + "instrumentation-genai/opentelemetry-instrumentation-langchain-v2", "instrumentation-genai/opentelemetry-instrumentation-weaviate", "util/opentelemetry-util-genai", "exporter/opentelemetry-exporter-credential-provider-gcp", @@ -215,8 +215,8 @@ exclude = [ "instrumentation/opentelemetry-instrumentation-threading/tests/**", "instrumentation-genai/opentelemetry-instrumentation-vertexai/tests/**/*.py", "instrumentation-genai/opentelemetry-instrumentation-vertexai/examples/**/*.py", - "instrumentation-genai/opentelemetry-instrumentation-langchain/tests/**/*.py", - "instrumentation-genai/opentelemetry-instrumentation-langchain/examples/**/*.py", + "instrumentation-genai/opentelemetry-instrumentation-langchain-v2/tests/**/*.py", + "instrumentation-genai/opentelemetry-instrumentation-langchain-v2/examples/**/*.py", "instrumentation-genai/opentelemetry-instrumentation-weaviate/tests/**/*.py", "instrumentation-genai/opentelemetry-instrumentation-weaviate/examples/**/*.py", "util/opentelemetry-util-genai/tests/**/*.py", diff --git a/scripts/generate_instrumentation_bootstrap.py b/scripts/generate_instrumentation_bootstrap.py index 9843885fe6..2f44eaab71 100755 --- a/scripts/generate_instrumentation_bootstrap.py +++ b/scripts/generate_instrumentation_bootstrap.py @@ -67,7 +67,7 @@ # Langchain instrumentation is currently excluded because it is still in early # development. This filter will get removed once it is further along in its # development lifecycle and ready to be included by default. - "opentelemetry-instrumentation-langchain", + "opentelemetry-instrumentation-langchain-v2", # Weaviate instrumentation is currently excluded because it is still in early # development. This filter will get removed once it is further along in its # development lifecycle and ready to be included by default. diff --git a/tox.ini b/tox.ini index adb176c483..912ab8528f 100644 --- a/tox.ini +++ b/tox.ini @@ -1075,7 +1075,7 @@ deps = {toxinidir}/util/opentelemetry-util-genai[upload] {toxinidir}/instrumentation-genai/opentelemetry-instrumentation-vertexai[instruments] {toxinidir}/instrumentation-genai/opentelemetry-instrumentation-google-genai[instruments] - {toxinidir}/instrumentation-genai/opentelemetry-instrumentation-langchain[instruments] + {toxinidir}/instrumentation-genai/opentelemetry-instrumentation-langchain-v2[instruments] {toxinidir}/instrumentation/opentelemetry-instrumentation-aiokafka[instruments] {toxinidir}/instrumentation/opentelemetry-instrumentation-asyncclick[instruments] {toxinidir}/exporter/opentelemetry-exporter-credential-provider-gcp From 4c8120129c0f8a9dc4ffcd37d8db51fa8ffc3bfd Mon Sep 17 00:00:00 2001 From: Nagkumar Arkalgud Date: Thu, 16 Oct 2025 08:40:17 -0700 Subject: [PATCH 15/24] tox generate --- .../src/opentelemetry/instrumentation/bootstrap_gen.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py index 8e6d7825d4..2ea6f1bf58 100644 --- a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py +++ b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py @@ -16,10 +16,6 @@ # RUN `python scripts/generate_instrumentation_bootstrap.py` TO REGENERATE. libraries = [ - { - "library": "langchain >= 0.3.21", - "instrumentation": "opentelemetry-instrumentation-langchain-v2==2.0b0.dev", - }, { "library": "openai >= 1.26.0", "instrumentation": "opentelemetry-instrumentation-openai-v2", From 8930c0d36bb8d620ab8b41d64ae4b7371d105777 Mon Sep 17 00:00:00 2001 From: Nagkumar Arkalgud Date: Thu, 16 Oct 2025 08:45:20 -0700 Subject: [PATCH 16/24] ignore the typecheck as its failing only on 3.9 --- .../src/opentelemetry/instrumentation/langchain/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain-v2/src/opentelemetry/instrumentation/langchain/__init__.py b/instrumentation-genai/opentelemetry-instrumentation-langchain-v2/src/opentelemetry/instrumentation/langchain/__init__.py index c09badeecc..f905b5eac0 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-langchain-v2/src/opentelemetry/instrumentation/langchain/__init__.py +++ b/instrumentation-genai/opentelemetry-instrumentation-langchain-v2/src/opentelemetry/instrumentation/langchain/__init__.py @@ -140,7 +140,7 @@ def _instrument(self, **kwargs: Any): otel_callback_handler = OpenTelemetryLangChainCallbackHandler( tracer=tracer, capture_messages=capture_messages, - ) + ) # pyright: ignore[reportAbstractUsage] wrap_function_wrapper( module="langchain_core.callbacks", From 992dfff91812b1fffd13930f280b85102a93e327 Mon Sep 17 00:00:00 2001 From: Nagkumar Arkalgud Date: Mon, 20 Oct 2025 11:40:14 -0700 Subject: [PATCH 17/24] Revert "tox generate" This reverts commit 4c8120129c0f8a9dc4ffcd37d8db51fa8ffc3bfd. --- .../src/opentelemetry/instrumentation/bootstrap_gen.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py index 2ea6f1bf58..8e6d7825d4 100644 --- a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py +++ b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py @@ -16,6 +16,10 @@ # RUN `python scripts/generate_instrumentation_bootstrap.py` TO REGENERATE. libraries = [ + { + "library": "langchain >= 0.3.21", + "instrumentation": "opentelemetry-instrumentation-langchain-v2==2.0b0.dev", + }, { "library": "openai >= 1.26.0", "instrumentation": "opentelemetry-instrumentation-openai-v2", From a632d3b3564300644280b205047d40c0abf8de1a Mon Sep 17 00:00:00 2001 From: Nagkumar Arkalgud Date: Mon, 20 Oct 2025 11:40:20 -0700 Subject: [PATCH 18/24] Revert "chore: update configs and docs" This reverts commit aef0792e00eed54ac72e0131f984dae7156f0f01. --- .github/component_owners.yml | 2 +- RELEASING.md | 4 ++-- eachdist.ini | 2 +- pyproject.toml | 6 +++--- scripts/generate_instrumentation_bootstrap.py | 2 +- tox.ini | 2 +- 6 files changed, 9 insertions(+), 9 deletions(-) diff --git a/.github/component_owners.yml b/.github/component_owners.yml index 1cbda52c97..70375ce48f 100644 --- a/.github/component_owners.yml +++ b/.github/component_owners.yml @@ -45,7 +45,7 @@ components: - DylanRussell - keith-decker - instrumentation-genai/opentelemetry-instrumentation-langchain-v2: + instrumentation-genai/opentelemetry-instrumentation-langchain: - zhirafovod - wrisa diff --git a/RELEASING.md b/RELEASING.md index 6b98e2af62..05aca72d79 100644 --- a/RELEASING.md +++ b/RELEASING.md @@ -25,7 +25,7 @@ > - opentelemetry-instrumentation-openai-agents-v2 > - opentelemetry-instrumentation-vertexai > - opentelemetry-instrumentation-google-genai -> - opentelemetry-instrumentation-langchain-v2 +> - opentelemetry-instrumentation-langchain > - opentelemetry-instrumentation-weaviate > - opentelemetry-util-genai > @@ -92,7 +92,7 @@ The workflow will create a pull request that should be merged in order to procee > - opentelemetry-instrumentation-openai-agents-v2 > - opentelemetry-instrumentation-vertexai > - opentelemetry-instrumentation-google-genai -> - opentelemetry-instrumentation-langchain-v2 +> - opentelemetry-instrumentation-langchain > - opentelemetry-instrumentation-weaviate > - opentelemetry-util-genai > - opentelemetry-exporter-credential-provider-gcp diff --git a/eachdist.ini b/eachdist.ini index d9db85d2ec..239258065a 100644 --- a/eachdist.ini +++ b/eachdist.ini @@ -56,7 +56,7 @@ packages= opentelemetry-instrumentation-openai-v2 opentelemetry-instrumentation-openai-agents-v2 opentelemetry-instrumentation-test - opentelemetry-instrumentation-langchain-v2 + opentelemetry-instrumentation-langchain opentelemetry-instrumentation-weaviate opentelemetry-util-genai diff --git a/pyproject.toml b/pyproject.toml index 5778f581ae..f238d2392f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -202,7 +202,7 @@ include = [ "instrumentation/opentelemetry-instrumentation-asyncclick", "instrumentation/opentelemetry-instrumentation-threading", "instrumentation-genai/opentelemetry-instrumentation-vertexai", - "instrumentation-genai/opentelemetry-instrumentation-langchain-v2", + "instrumentation-genai/opentelemetry-instrumentation-langchain", "instrumentation-genai/opentelemetry-instrumentation-weaviate", "util/opentelemetry-util-genai", "exporter/opentelemetry-exporter-credential-provider-gcp", @@ -215,8 +215,8 @@ exclude = [ "instrumentation/opentelemetry-instrumentation-threading/tests/**", "instrumentation-genai/opentelemetry-instrumentation-vertexai/tests/**/*.py", "instrumentation-genai/opentelemetry-instrumentation-vertexai/examples/**/*.py", - "instrumentation-genai/opentelemetry-instrumentation-langchain-v2/tests/**/*.py", - "instrumentation-genai/opentelemetry-instrumentation-langchain-v2/examples/**/*.py", + "instrumentation-genai/opentelemetry-instrumentation-langchain/tests/**/*.py", + "instrumentation-genai/opentelemetry-instrumentation-langchain/examples/**/*.py", "instrumentation-genai/opentelemetry-instrumentation-weaviate/tests/**/*.py", "instrumentation-genai/opentelemetry-instrumentation-weaviate/examples/**/*.py", "util/opentelemetry-util-genai/tests/**/*.py", diff --git a/scripts/generate_instrumentation_bootstrap.py b/scripts/generate_instrumentation_bootstrap.py index 2f44eaab71..9843885fe6 100755 --- a/scripts/generate_instrumentation_bootstrap.py +++ b/scripts/generate_instrumentation_bootstrap.py @@ -67,7 +67,7 @@ # Langchain instrumentation is currently excluded because it is still in early # development. This filter will get removed once it is further along in its # development lifecycle and ready to be included by default. - "opentelemetry-instrumentation-langchain-v2", + "opentelemetry-instrumentation-langchain", # Weaviate instrumentation is currently excluded because it is still in early # development. This filter will get removed once it is further along in its # development lifecycle and ready to be included by default. diff --git a/tox.ini b/tox.ini index 5c57dd8360..47126e0843 100644 --- a/tox.ini +++ b/tox.ini @@ -1085,7 +1085,7 @@ deps = {toxinidir}/util/opentelemetry-util-genai[upload] {toxinidir}/instrumentation-genai/opentelemetry-instrumentation-vertexai[instruments] {toxinidir}/instrumentation-genai/opentelemetry-instrumentation-google-genai[instruments] - {toxinidir}/instrumentation-genai/opentelemetry-instrumentation-langchain-v2[instruments] + {toxinidir}/instrumentation-genai/opentelemetry-instrumentation-langchain[instruments] {toxinidir}/instrumentation/opentelemetry-instrumentation-aiokafka[instruments] {toxinidir}/instrumentation/opentelemetry-instrumentation-asyncclick[instruments] {toxinidir}/exporter/opentelemetry-exporter-credential-provider-gcp From 4cef9b01c3c1dca892fc2d8cfe9fb7a555180d87 Mon Sep 17 00:00:00 2001 From: Nagkumar Arkalgud Date: Mon, 20 Oct 2025 11:40:26 -0700 Subject: [PATCH 19/24] Revert "Register LangChain v2 bootstrap mapping" This reverts commit b028c75521dbc1fa1013b70f4b9a57ebef082724. --- .../src/opentelemetry/instrumentation/bootstrap_gen.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py index 8e6d7825d4..2ea6f1bf58 100644 --- a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py +++ b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py @@ -16,10 +16,6 @@ # RUN `python scripts/generate_instrumentation_bootstrap.py` TO REGENERATE. libraries = [ - { - "library": "langchain >= 0.3.21", - "instrumentation": "opentelemetry-instrumentation-langchain-v2==2.0b0.dev", - }, { "library": "openai >= 1.26.0", "instrumentation": "opentelemetry-instrumentation-openai-v2", From 32c7320e9525fdf8b48a5bed95ba5e6f66dbde71 Mon Sep 17 00:00:00 2001 From: Nagkumar Arkalgud Date: Mon, 20 Oct 2025 11:40:34 -0700 Subject: [PATCH 20/24] Revert "Rename LangChain instrumentation package to v2" This reverts commit fadcf7d030634c7143e1bf169f04e348ab44f86f. --- instrumentation-genai/README.md | 2 +- .../CHANGELOG.md | 0 .../LICENSE | 0 .../NOTICE | 0 .../README.rst | 0 .../examples/manual/.env | 0 .../examples/manual/README.rst | 0 .../examples/manual/main.py | 0 .../examples/manual/requirements.txt | 2 +- .../examples/zero-code/.env | 0 .../examples/zero-code/README.rst | 0 .../examples/zero-code/main.py | 0 .../examples/zero-code/requirements.txt | 2 +- .../pyproject.toml | 4 ++-- .../src/opentelemetry/instrumentation/langchain/__init__.py | 0 .../instrumentation/langchain/callback_handler.py | 0 .../src/opentelemetry/instrumentation/langchain/package.py | 0 .../opentelemetry/instrumentation/langchain/span_manager.py | 0 .../src/opentelemetry/instrumentation/langchain/version.py | 0 .../tests/__init__.py | 0 .../test_chat_openai_gpt_3_5_turbo_model_llm_call.yaml | 0 .../test_us_amazon_nova_lite_v1_0_bedrock_llm_call.yaml | 0 .../tests/conftest.py | 0 .../tests/test_callback_handler.py | 0 .../tests/test_llm_call.py | 0 .../tests/test_span_manager.py | 0 uv.lock | 6 +++--- 27 files changed, 8 insertions(+), 8 deletions(-) rename instrumentation-genai/{opentelemetry-instrumentation-langchain-v2 => opentelemetry-instrumentation-langchain}/CHANGELOG.md (100%) rename instrumentation-genai/{opentelemetry-instrumentation-langchain-v2 => opentelemetry-instrumentation-langchain}/LICENSE (100%) rename instrumentation-genai/{opentelemetry-instrumentation-langchain-v2 => opentelemetry-instrumentation-langchain}/NOTICE (100%) rename instrumentation-genai/{opentelemetry-instrumentation-langchain-v2 => opentelemetry-instrumentation-langchain}/README.rst (100%) rename instrumentation-genai/{opentelemetry-instrumentation-langchain-v2 => opentelemetry-instrumentation-langchain}/examples/manual/.env (100%) rename instrumentation-genai/{opentelemetry-instrumentation-langchain-v2 => opentelemetry-instrumentation-langchain}/examples/manual/README.rst (100%) rename instrumentation-genai/{opentelemetry-instrumentation-langchain-v2 => opentelemetry-instrumentation-langchain}/examples/manual/main.py (100%) rename instrumentation-genai/{opentelemetry-instrumentation-langchain-v2 => opentelemetry-instrumentation-langchain}/examples/manual/requirements.txt (74%) rename instrumentation-genai/{opentelemetry-instrumentation-langchain-v2 => opentelemetry-instrumentation-langchain}/examples/zero-code/.env (100%) rename instrumentation-genai/{opentelemetry-instrumentation-langchain-v2 => opentelemetry-instrumentation-langchain}/examples/zero-code/README.rst (100%) rename instrumentation-genai/{opentelemetry-instrumentation-langchain-v2 => opentelemetry-instrumentation-langchain}/examples/zero-code/main.py (100%) rename instrumentation-genai/{opentelemetry-instrumentation-langchain-v2 => opentelemetry-instrumentation-langchain}/examples/zero-code/requirements.txt (77%) rename instrumentation-genai/{opentelemetry-instrumentation-langchain-v2 => opentelemetry-instrumentation-langchain}/pyproject.toml (95%) rename instrumentation-genai/{opentelemetry-instrumentation-langchain-v2 => opentelemetry-instrumentation-langchain}/src/opentelemetry/instrumentation/langchain/__init__.py (100%) rename instrumentation-genai/{opentelemetry-instrumentation-langchain-v2 => opentelemetry-instrumentation-langchain}/src/opentelemetry/instrumentation/langchain/callback_handler.py (100%) rename instrumentation-genai/{opentelemetry-instrumentation-langchain-v2 => opentelemetry-instrumentation-langchain}/src/opentelemetry/instrumentation/langchain/package.py (100%) rename instrumentation-genai/{opentelemetry-instrumentation-langchain-v2 => opentelemetry-instrumentation-langchain}/src/opentelemetry/instrumentation/langchain/span_manager.py (100%) rename instrumentation-genai/{opentelemetry-instrumentation-langchain-v2 => opentelemetry-instrumentation-langchain}/src/opentelemetry/instrumentation/langchain/version.py (100%) rename instrumentation-genai/{opentelemetry-instrumentation-langchain-v2 => opentelemetry-instrumentation-langchain}/tests/__init__.py (100%) rename instrumentation-genai/{opentelemetry-instrumentation-langchain-v2 => opentelemetry-instrumentation-langchain}/tests/cassettes/test_chat_openai_gpt_3_5_turbo_model_llm_call.yaml (100%) rename instrumentation-genai/{opentelemetry-instrumentation-langchain-v2 => opentelemetry-instrumentation-langchain}/tests/cassettes/test_us_amazon_nova_lite_v1_0_bedrock_llm_call.yaml (100%) rename instrumentation-genai/{opentelemetry-instrumentation-langchain-v2 => opentelemetry-instrumentation-langchain}/tests/conftest.py (100%) rename instrumentation-genai/{opentelemetry-instrumentation-langchain-v2 => opentelemetry-instrumentation-langchain}/tests/test_callback_handler.py (100%) rename instrumentation-genai/{opentelemetry-instrumentation-langchain-v2 => opentelemetry-instrumentation-langchain}/tests/test_llm_call.py (100%) rename instrumentation-genai/{opentelemetry-instrumentation-langchain-v2 => opentelemetry-instrumentation-langchain}/tests/test_span_manager.py (100%) diff --git a/instrumentation-genai/README.md b/instrumentation-genai/README.md index fad2a1b92a..1b8f5e49e6 100644 --- a/instrumentation-genai/README.md +++ b/instrumentation-genai/README.md @@ -2,7 +2,7 @@ | Instrumentation | Supported Packages | Metrics support | Semconv status | | --------------- | ------------------ | --------------- | -------------- | | [opentelemetry-instrumentation-google-genai](./opentelemetry-instrumentation-google-genai) | google-genai >= 1.0.0 | No | development -| [opentelemetry-instrumentation-langchain-v2](./opentelemetry-instrumentation-langchain-v2) | langchain >= 0.3.21 | No | development +| [opentelemetry-instrumentation-langchain](./opentelemetry-instrumentation-langchain) | langchain >= 0.3.21 | No | development | [opentelemetry-instrumentation-openai-agents-v2](./opentelemetry-instrumentation-openai-agents-v2) | openai-agents >= 0.3.3 | No | development | [opentelemetry-instrumentation-openai-v2](./opentelemetry-instrumentation-openai-v2) | openai >= 1.26.0 | Yes | development | [opentelemetry-instrumentation-vertexai](./opentelemetry-instrumentation-vertexai) | google-cloud-aiplatform >= 1.64 | No | development diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain-v2/CHANGELOG.md b/instrumentation-genai/opentelemetry-instrumentation-langchain/CHANGELOG.md similarity index 100% rename from instrumentation-genai/opentelemetry-instrumentation-langchain-v2/CHANGELOG.md rename to instrumentation-genai/opentelemetry-instrumentation-langchain/CHANGELOG.md diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain-v2/LICENSE b/instrumentation-genai/opentelemetry-instrumentation-langchain/LICENSE similarity index 100% rename from instrumentation-genai/opentelemetry-instrumentation-langchain-v2/LICENSE rename to instrumentation-genai/opentelemetry-instrumentation-langchain/LICENSE diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain-v2/NOTICE b/instrumentation-genai/opentelemetry-instrumentation-langchain/NOTICE similarity index 100% rename from instrumentation-genai/opentelemetry-instrumentation-langchain-v2/NOTICE rename to instrumentation-genai/opentelemetry-instrumentation-langchain/NOTICE diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain-v2/README.rst b/instrumentation-genai/opentelemetry-instrumentation-langchain/README.rst similarity index 100% rename from instrumentation-genai/opentelemetry-instrumentation-langchain-v2/README.rst rename to instrumentation-genai/opentelemetry-instrumentation-langchain/README.rst diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain-v2/examples/manual/.env b/instrumentation-genai/opentelemetry-instrumentation-langchain/examples/manual/.env similarity index 100% rename from instrumentation-genai/opentelemetry-instrumentation-langchain-v2/examples/manual/.env rename to instrumentation-genai/opentelemetry-instrumentation-langchain/examples/manual/.env diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain-v2/examples/manual/README.rst b/instrumentation-genai/opentelemetry-instrumentation-langchain/examples/manual/README.rst similarity index 100% rename from instrumentation-genai/opentelemetry-instrumentation-langchain-v2/examples/manual/README.rst rename to instrumentation-genai/opentelemetry-instrumentation-langchain/examples/manual/README.rst diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain-v2/examples/manual/main.py b/instrumentation-genai/opentelemetry-instrumentation-langchain/examples/manual/main.py similarity index 100% rename from instrumentation-genai/opentelemetry-instrumentation-langchain-v2/examples/manual/main.py rename to instrumentation-genai/opentelemetry-instrumentation-langchain/examples/manual/main.py diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain-v2/examples/manual/requirements.txt b/instrumentation-genai/opentelemetry-instrumentation-langchain/examples/manual/requirements.txt similarity index 74% rename from instrumentation-genai/opentelemetry-instrumentation-langchain-v2/examples/manual/requirements.txt rename to instrumentation-genai/opentelemetry-instrumentation-langchain/examples/manual/requirements.txt index 9d975112b5..0f986b1be0 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-langchain-v2/examples/manual/requirements.txt +++ b/instrumentation-genai/opentelemetry-instrumentation-langchain/examples/manual/requirements.txt @@ -4,4 +4,4 @@ opentelemetry-sdk>=1.31.0 opentelemetry-exporter-otlp-proto-grpc>=1.31.0 # Uncomment after lanchain instrumetation is released -# opentelemetry-instrumentation-langchain-v2~=2.0b0.dev +# opentelemetry-instrumentation-langchain~=2.0b0.dev \ No newline at end of file diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain-v2/examples/zero-code/.env b/instrumentation-genai/opentelemetry-instrumentation-langchain/examples/zero-code/.env similarity index 100% rename from instrumentation-genai/opentelemetry-instrumentation-langchain-v2/examples/zero-code/.env rename to instrumentation-genai/opentelemetry-instrumentation-langchain/examples/zero-code/.env diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain-v2/examples/zero-code/README.rst b/instrumentation-genai/opentelemetry-instrumentation-langchain/examples/zero-code/README.rst similarity index 100% rename from instrumentation-genai/opentelemetry-instrumentation-langchain-v2/examples/zero-code/README.rst rename to instrumentation-genai/opentelemetry-instrumentation-langchain/examples/zero-code/README.rst diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain-v2/examples/zero-code/main.py b/instrumentation-genai/opentelemetry-instrumentation-langchain/examples/zero-code/main.py similarity index 100% rename from instrumentation-genai/opentelemetry-instrumentation-langchain-v2/examples/zero-code/main.py rename to instrumentation-genai/opentelemetry-instrumentation-langchain/examples/zero-code/main.py diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain-v2/examples/zero-code/requirements.txt b/instrumentation-genai/opentelemetry-instrumentation-langchain/examples/zero-code/requirements.txt similarity index 77% rename from instrumentation-genai/opentelemetry-instrumentation-langchain-v2/examples/zero-code/requirements.txt rename to instrumentation-genai/opentelemetry-instrumentation-langchain/examples/zero-code/requirements.txt index bdd54b2252..63b3f56cf1 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-langchain-v2/examples/zero-code/requirements.txt +++ b/instrumentation-genai/opentelemetry-instrumentation-langchain/examples/zero-code/requirements.txt @@ -5,4 +5,4 @@ opentelemetry-exporter-otlp-proto-grpc>=1.31.0 opentelemetry-distro~=0.51b0 # Uncomment after lanchain instrumetation is released -# opentelemetry-instrumentation-langchain-v2~=2.0b0.dev +# opentelemetry-instrumentation-langchain~=2.0b0.dev diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain-v2/pyproject.toml b/instrumentation-genai/opentelemetry-instrumentation-langchain/pyproject.toml similarity index 95% rename from instrumentation-genai/opentelemetry-instrumentation-langchain-v2/pyproject.toml rename to instrumentation-genai/opentelemetry-instrumentation-langchain/pyproject.toml index f39c3c8b05..8d87cc9085 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-langchain-v2/pyproject.toml +++ b/instrumentation-genai/opentelemetry-instrumentation-langchain/pyproject.toml @@ -3,7 +3,7 @@ requires = ["hatchling"] build-backend = "hatchling.build" [project] -name = "opentelemetry-instrumentation-langchain-v2" +name = "opentelemetry-instrumentation-langchain" dynamic = ["version"] description = "OpenTelemetry Official Langchain instrumentation" readme = "README.rst" @@ -39,7 +39,7 @@ instruments = [ langchain = "opentelemetry.instrumentation.langchain:LangChainInstrumentor" [project.urls] -Homepage = "https://github.com/open-telemetry/opentelemetry-python-contrib/tree/main/instrumentation-genai/opentelemetry-instrumentation-langchain-v2" +Homepage = "https://github.com/open-telemetry/opentelemetry-python-contrib/tree/main/instrumentation-genai/opentelemetry-instrumentation-langchain" Repository = "https://github.com/open-telemetry/opentelemetry-python-contrib" [tool.hatch.version] diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain-v2/src/opentelemetry/instrumentation/langchain/__init__.py b/instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/__init__.py similarity index 100% rename from instrumentation-genai/opentelemetry-instrumentation-langchain-v2/src/opentelemetry/instrumentation/langchain/__init__.py rename to instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/__init__.py diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain-v2/src/opentelemetry/instrumentation/langchain/callback_handler.py b/instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/callback_handler.py similarity index 100% rename from instrumentation-genai/opentelemetry-instrumentation-langchain-v2/src/opentelemetry/instrumentation/langchain/callback_handler.py rename to instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/callback_handler.py diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain-v2/src/opentelemetry/instrumentation/langchain/package.py b/instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/package.py similarity index 100% rename from instrumentation-genai/opentelemetry-instrumentation-langchain-v2/src/opentelemetry/instrumentation/langchain/package.py rename to instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/package.py diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain-v2/src/opentelemetry/instrumentation/langchain/span_manager.py b/instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/span_manager.py similarity index 100% rename from instrumentation-genai/opentelemetry-instrumentation-langchain-v2/src/opentelemetry/instrumentation/langchain/span_manager.py rename to instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/span_manager.py diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain-v2/src/opentelemetry/instrumentation/langchain/version.py b/instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/version.py similarity index 100% rename from instrumentation-genai/opentelemetry-instrumentation-langchain-v2/src/opentelemetry/instrumentation/langchain/version.py rename to instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/version.py diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain-v2/tests/__init__.py b/instrumentation-genai/opentelemetry-instrumentation-langchain/tests/__init__.py similarity index 100% rename from instrumentation-genai/opentelemetry-instrumentation-langchain-v2/tests/__init__.py rename to instrumentation-genai/opentelemetry-instrumentation-langchain/tests/__init__.py diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain-v2/tests/cassettes/test_chat_openai_gpt_3_5_turbo_model_llm_call.yaml b/instrumentation-genai/opentelemetry-instrumentation-langchain/tests/cassettes/test_chat_openai_gpt_3_5_turbo_model_llm_call.yaml similarity index 100% rename from instrumentation-genai/opentelemetry-instrumentation-langchain-v2/tests/cassettes/test_chat_openai_gpt_3_5_turbo_model_llm_call.yaml rename to instrumentation-genai/opentelemetry-instrumentation-langchain/tests/cassettes/test_chat_openai_gpt_3_5_turbo_model_llm_call.yaml diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain-v2/tests/cassettes/test_us_amazon_nova_lite_v1_0_bedrock_llm_call.yaml b/instrumentation-genai/opentelemetry-instrumentation-langchain/tests/cassettes/test_us_amazon_nova_lite_v1_0_bedrock_llm_call.yaml similarity index 100% rename from instrumentation-genai/opentelemetry-instrumentation-langchain-v2/tests/cassettes/test_us_amazon_nova_lite_v1_0_bedrock_llm_call.yaml rename to instrumentation-genai/opentelemetry-instrumentation-langchain/tests/cassettes/test_us_amazon_nova_lite_v1_0_bedrock_llm_call.yaml diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain-v2/tests/conftest.py b/instrumentation-genai/opentelemetry-instrumentation-langchain/tests/conftest.py similarity index 100% rename from instrumentation-genai/opentelemetry-instrumentation-langchain-v2/tests/conftest.py rename to instrumentation-genai/opentelemetry-instrumentation-langchain/tests/conftest.py diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain-v2/tests/test_callback_handler.py b/instrumentation-genai/opentelemetry-instrumentation-langchain/tests/test_callback_handler.py similarity index 100% rename from instrumentation-genai/opentelemetry-instrumentation-langchain-v2/tests/test_callback_handler.py rename to instrumentation-genai/opentelemetry-instrumentation-langchain/tests/test_callback_handler.py diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain-v2/tests/test_llm_call.py b/instrumentation-genai/opentelemetry-instrumentation-langchain/tests/test_llm_call.py similarity index 100% rename from instrumentation-genai/opentelemetry-instrumentation-langchain-v2/tests/test_llm_call.py rename to instrumentation-genai/opentelemetry-instrumentation-langchain/tests/test_llm_call.py diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain-v2/tests/test_span_manager.py b/instrumentation-genai/opentelemetry-instrumentation-langchain/tests/test_span_manager.py similarity index 100% rename from instrumentation-genai/opentelemetry-instrumentation-langchain-v2/tests/test_span_manager.py rename to instrumentation-genai/opentelemetry-instrumentation-langchain/tests/test_span_manager.py diff --git a/uv.lock b/uv.lock index 280522ce6a..76a60b9134 100644 --- a/uv.lock +++ b/uv.lock @@ -43,7 +43,7 @@ members = [ "opentelemetry-instrumentation-httpx", "opentelemetry-instrumentation-jinja2", "opentelemetry-instrumentation-kafka-python", - "opentelemetry-instrumentation-langchain-v2", + "opentelemetry-instrumentation-langchain", "opentelemetry-instrumentation-logging", "opentelemetry-instrumentation-mysql", "opentelemetry-instrumentation-mysqlclient", @@ -3264,8 +3264,8 @@ requires-dist = [ provides-extras = ["instruments", "instruments-any"] [[package]] -name = "opentelemetry-instrumentation-langchain-v2" -source = { editable = "instrumentation-genai/opentelemetry-instrumentation-langchain-v2" } +name = "opentelemetry-instrumentation-langchain" +source = { editable = "instrumentation-genai/opentelemetry-instrumentation-langchain" } dependencies = [ { name = "opentelemetry-api" }, { name = "opentelemetry-instrumentation" }, From abd719f3fccd10a1ae7ede2ab4ff07364ec8f70e Mon Sep 17 00:00:00 2001 From: Nagkumar Arkalgud Date: Mon, 20 Oct 2025 13:03:19 -0700 Subject: [PATCH 21/24] feat: add multi-agent sample and tool spans --- .../examples/manual/main.py | 2 +- .../multi_agent_travel_planner/.env.example | 14 + .../multi_agent_travel_planner/README.rst | 62 ++ .../multi_agent_travel_planner/main.py | 533 ++++++++++++++++++ .../requirements.txt | 4 + .../examples/zero-code/main.py | 2 +- .../langchain/callback_handler.py | 216 ++++++- .../instrumentation/langchain/span_manager.py | 21 + 8 files changed, 851 insertions(+), 3 deletions(-) create mode 100644 instrumentation-genai/opentelemetry-instrumentation-langchain/examples/multi_agent_travel_planner/.env.example create mode 100644 instrumentation-genai/opentelemetry-instrumentation-langchain/examples/multi_agent_travel_planner/README.rst create mode 100644 instrumentation-genai/opentelemetry-instrumentation-langchain/examples/multi_agent_travel_planner/main.py create mode 100644 instrumentation-genai/opentelemetry-instrumentation-langchain/examples/multi_agent_travel_planner/requirements.txt diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain/examples/manual/main.py b/instrumentation-genai/opentelemetry-instrumentation-langchain/examples/manual/main.py index e18f53ff31..63813eb6e1 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-langchain/examples/manual/main.py +++ b/instrumentation-genai/opentelemetry-instrumentation-langchain/examples/manual/main.py @@ -21,7 +21,7 @@ def main(): # ChatOpenAI llm = ChatOpenAI( - model="gpt-3.5-turbo", + model="gpt-4.1", temperature=0.1, max_tokens=100, top_p=0.9, diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain/examples/multi_agent_travel_planner/.env.example b/instrumentation-genai/opentelemetry-instrumentation-langchain/examples/multi_agent_travel_planner/.env.example new file mode 100644 index 0000000000..a482d4f9d2 --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-langchain/examples/multi_agent_travel_planner/.env.example @@ -0,0 +1,14 @@ +# Copy this file to .env and update values before running the sample. + +# Required OpenAI API key +OPENAI_API_KEY=sk-YOUR_API_KEY + +# Optional: override default model (defaults to gpt-4o-mini) +# OPENAI_MODEL=gpt-4o-mini + +# OTLP exporter configuration (update for your collector) +# OTEL_EXPORTER_OTLP_ENDPOINT=http://localhost:4317 +# OTEL_EXPORTER_OTLP_PROTOCOL=grpc + +# Traces will use this service.name +OTEL_SERVICE_NAME=opentelemetry-python-langchain-multi-agent diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain/examples/multi_agent_travel_planner/README.rst b/instrumentation-genai/opentelemetry-instrumentation-langchain/examples/multi_agent_travel_planner/README.rst new file mode 100644 index 0000000000..acdc724a48 --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-langchain/examples/multi_agent_travel_planner/README.rst @@ -0,0 +1,62 @@ +Multi-Agent Travel Planner Sample +================================= + +This example shows how to orchestrate a small team of LangChain agents with +`LangGraph `_ while the +OpenTelemetry LangChain instrumentation captures GenAI spans and forwards them +to an OTLP collector. + +The graph contains four specialists (coordinator, flights, hotels, activities) +and a final synthesiser node that produces an itinerary. Each specialist relies +on a simple, deterministic tool so you can run the example without any external +travel APIs while still observing tool spans wired up to the agent calls. + +Prerequisites +------------- + +* Python 3.10+ +* An OpenAI API key with access to ``gpt-4o-mini`` (or set ``OPENAI_MODEL`` to a + model that is available to your account) +* A running OTLP collector (gRPC on ``localhost:4317`` by default) + +Setup +----- + +.. code-block:: bash + + cd instrumentation-genai/opentelemetry-instrumentation-langchain/examples/multi_agent_travel_planner + python3 -m venv .venv + source .venv/bin/activate + pip install -r requirements.txt + + # Copy the sample environment file and update values as needed. + cp .env.example .env + source .env + +Run the sample +-------------- + +.. code-block:: bash + + # From this directory, after activating the virtual environment and sourcing + # your environment variables: + python main.py + +The script prints each agent's contribution followed by the final itinerary. +At the same time it streams OTLP traces. You should see: + +* A root span named ``invoke_agent travel_multi_agent_planner`` that captures + the overall orchestration, including ``gen_ai.input.messages`` and a preview + of the final plan. +* LangChain instrumentation spans for each agent's LLM invocation with + ``gen_ai.provider.name=openai`` and ``service.name`` derived from + ``OTEL_SERVICE_NAME``. + +Tear down +--------- + +Deactivate the virtual environment when you are done: + +.. code-block:: bash + + deactivate diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain/examples/multi_agent_travel_planner/main.py b/instrumentation-genai/opentelemetry-instrumentation-langchain/examples/multi_agent_travel_planner/main.py new file mode 100644 index 0000000000..5063b4578e --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-langchain/examples/multi_agent_travel_planner/main.py @@ -0,0 +1,533 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Multi-agent travel planner driven by LangGraph. + +The example coordinates a set of LangChain agents that collaborate to build a +week-long city break itinerary. OpenTelemetry spans are produced for both the +overall orchestration and each LLM/tool call so you can inspect the traces in +your OTLP collector. +""" + +from __future__ import annotations + +import json +import os +import random +from datetime import datetime, timedelta +from typing import Annotated, Dict, List, Optional, TypedDict +from uuid import uuid4 + +from langchain_core.messages import ( + AIMessage, + BaseMessage, + HumanMessage, + SystemMessage, +) +from langchain_core.tools import tool +from langchain_openai import ChatOpenAI +from langgraph.graph import END, START, StateGraph +from langgraph.graph.message import AnyMessage, add_messages +from langgraph.prebuilt import create_react_agent + +from opentelemetry import trace +from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import ( + OTLPSpanExporter, +) +from opentelemetry.instrumentation.langchain import LangChainInstrumentor +from opentelemetry.sdk.trace import TracerProvider +from opentelemetry.sdk.trace.export import BatchSpanProcessor +from opentelemetry.trace import SpanKind + +# --------------------------------------------------------------------------- +# Sample data utilities +# --------------------------------------------------------------------------- + + +DESTINATIONS = { + "paris": { + "country": "France", + "currency": "EUR", + "airport": "CDG", + "highlights": [ + "Eiffel Tower at sunset", + "Seine dinner cruise", + "Day trip to Versailles", + ], + }, + "tokyo": { + "country": "Japan", + "currency": "JPY", + "airport": "HND", + "highlights": [ + "Tsukiji market food tour", + "Ghibli Museum visit", + "Day trip to Hakone hot springs", + ], + }, + "rome": { + "country": "Italy", + "currency": "EUR", + "airport": "FCO", + "highlights": [ + "Colosseum underground tour", + "Private pasta masterclass", + "Sunset walk through Trastevere", + ], + }, +} + + +def _pick_destination(user_request: str) -> str: + lowered = user_request.lower() + for name in DESTINATIONS: + if name in lowered: + return name.title() + return "Paris" + + +def _pick_origin(user_request: str) -> str: + lowered = user_request.lower() + for city in ["seattle", "new york", "san francisco", "london"]: + if city in lowered: + return city.title() + return "Seattle" + + +def _compute_dates() -> tuple[str, str]: + start = datetime.now() + timedelta(days=30) + end = start + timedelta(days=7) + return start.strftime("%Y-%m-%d"), end.strftime("%Y-%m-%d") + + +# --------------------------------------------------------------------------- +# Tools exposed to agents +# --------------------------------------------------------------------------- + + +@tool +def mock_search_flights(origin: str, destination: str, departure: str) -> str: + """Return mock flight options for a given origin/destination pair.""" + random.seed(hash((origin, destination, departure)) % (2**32)) + airline = random.choice(["SkyLine", "AeroJet", "CloudNine"]) + fare = random.randint(700, 1250) + return ( + f"Top choice: {airline} non-stop service {origin}->{destination}, " + f"depart {departure} 09:15, arrive {departure} 17:05. " + f"Premium economy fare ${fare} return." + ) + + +@tool +def mock_search_hotels(destination: str, check_in: str, check_out: str) -> str: + """Return mock hotel recommendation for the stay.""" + random.seed(hash((destination, check_in, check_out)) % (2**32)) + name = random.choice(["Grand Meridian", "Hotel Lumière", "The Atlas"]) + rate = random.randint(240, 410) + return ( + f"{name} near the historic centre. Boutique suites, rooftop bar, " + f"average nightly rate ${rate} including breakfast." + ) + + +@tool +def mock_search_activities(destination: str) -> str: + """Return a short list of signature activities for the destination.""" + data = DESTINATIONS.get(destination.lower(), DESTINATIONS["paris"]) + bullets = "\n".join(f"- {item}" for item in data["highlights"]) + return f"Signature experiences in {destination.title()}:\n{bullets}" + + +# --------------------------------------------------------------------------- +# LangGraph state & helpers +# --------------------------------------------------------------------------- + + +class PlannerState(TypedDict): + """Shared state that moves through the LangGraph workflow.""" + + messages: Annotated[List[AnyMessage], add_messages] + user_request: str + session_id: str + origin: str + destination: str + departure: str + return_date: str + travellers: int + flight_summary: Optional[str] + hotel_summary: Optional[str] + activities_summary: Optional[str] + final_itinerary: Optional[str] + current_agent: str + + +def _model_name() -> str: + return os.getenv("OPENAI_MODEL", "gpt-4o-mini") + + +def _create_llm( + agent_name: str, *, temperature: float, session_id: str +) -> ChatOpenAI: + """Create an LLM instance decorated with tags/metadata for tracing.""" + model = _model_name() + tags = [f"agent:{agent_name}", "travel-planner"] + metadata = { + "agent_name": agent_name, + "agent_type": agent_name, + "session_id": session_id, + "thread_id": session_id, + "ls_model_name": model, + "ls_temperature": temperature, + } + return ChatOpenAI( + model=model, + temperature=temperature, + tags=tags, + metadata=metadata, + ) + + +def _configure_otlp_tracing() -> None: + """Initialise a tracer provider that exports to the configured OTLP endpoint.""" + if isinstance(trace.get_tracer_provider(), TracerProvider): + return + provider = TracerProvider() + processor = BatchSpanProcessor(OTLPSpanExporter()) + provider.add_span_processor(processor) + trace.set_tracer_provider(provider) + + +def _trace_attributes_for_root(state: PlannerState) -> Dict[str, str]: + """Attributes attached to the root GenAI span.""" + provider_name = "openai" + server_address = os.getenv("OPENAI_BASE_URL", "api.openai.com") + return { + "gen_ai.operation.name": "invoke_agent", + "gen_ai.provider.name": provider_name, + "gen_ai.request.model": _model_name(), + "gen_ai.agent.name": "travel_multi_agent_planner", + "gen_ai.agent.id": f"travel_planner_{state['session_id']}", + "gen_ai.conversation.id": state["session_id"], + "gen_ai.request.temperature": 0.4, + "gen_ai.request.top_p": 1.0, + "gen_ai.request.max_tokens": 1024, + "gen_ai.request.frequency_penalty": 0.0, + "gen_ai.request.presence_penalty": 0.0, + "server.address": server_address.replace("https://", "") + .replace("http://", "") + .rstrip("/"), + "server.port": "443", + "service.name": os.getenv( + "OTEL_SERVICE_NAME", + "opentelemetry-python-langchain-multi-agent", + ), + } + + +# --------------------------------------------------------------------------- +# LangGraph nodes +# --------------------------------------------------------------------------- + + +def coordinator_node(state: PlannerState) -> PlannerState: + llm = _create_llm( + "coordinator", temperature=0.2, session_id=state["session_id"] + ) + system_message = SystemMessage( + content=( + "You are the lead travel coordinator. Extract the key details from the " + "traveller's request and describe the plan for the specialist agents." + ) + ) + response = llm.invoke([system_message] + state["messages"]) + + state["messages"].append(response) + state["current_agent"] = "flight_specialist" + return state + + +def flight_specialist_node(state: PlannerState) -> PlannerState: + llm = _create_llm( + "flight_specialist", temperature=0.4, session_id=state["session_id"] + ) + agent = create_react_agent(llm, tools=[mock_search_flights]) + task = ( + f"Find an appealing flight from {state['origin']} to {state['destination']} " + f"departing {state['departure']} for {state['travellers']} travellers." + ) + result = agent.invoke({"messages": [HumanMessage(content=task)]}) + final_message = result["messages"][-1] + state["flight_summary"] = ( + final_message.content + if isinstance(final_message, BaseMessage) + else str(final_message) + ) + state["messages"].append( + final_message + if isinstance(final_message, BaseMessage) + else AIMessage(content=str(final_message)) + ) + state["current_agent"] = "hotel_specialist" + return state + + +def hotel_specialist_node(state: PlannerState) -> PlannerState: + llm = _create_llm( + "hotel_specialist", temperature=0.5, session_id=state["session_id"] + ) + agent = create_react_agent(llm, tools=[mock_search_hotels]) + task = ( + f"Recommend a boutique hotel in {state['destination']} between {state['departure']} " + f"and {state['return_date']} for {state['travellers']} travellers." + ) + result = agent.invoke({"messages": [HumanMessage(content=task)]}) + final_message = result["messages"][-1] + state["hotel_summary"] = ( + final_message.content + if isinstance(final_message, BaseMessage) + else str(final_message) + ) + state["messages"].append( + final_message + if isinstance(final_message, BaseMessage) + else AIMessage(content=str(final_message)) + ) + state["current_agent"] = "activity_specialist" + return state + + +def activity_specialist_node(state: PlannerState) -> PlannerState: + llm = _create_llm( + "activity_specialist", temperature=0.6, session_id=state["session_id"] + ) + agent = create_react_agent(llm, tools=[mock_search_activities]) + task = f"Curate signature activities for travellers spending a week in {state['destination']}." + result = agent.invoke({"messages": [HumanMessage(content=task)]}) + final_message = result["messages"][-1] + state["activities_summary"] = ( + final_message.content + if isinstance(final_message, BaseMessage) + else str(final_message) + ) + state["messages"].append( + final_message + if isinstance(final_message, BaseMessage) + else AIMessage(content=str(final_message)) + ) + state["current_agent"] = "plan_synthesizer" + return state + + +def plan_synthesizer_node(state: PlannerState) -> PlannerState: + llm = _create_llm( + "plan_synthesizer", temperature=0.3, session_id=state["session_id"] + ) + system_prompt = SystemMessage( + content=( + "You are the travel plan synthesiser. Combine the specialist insights into a " + "concise, structured itinerary covering flights, accommodation and activities." + ) + ) + content = json.dumps( + { + "flight": state["flight_summary"], + "hotel": state["hotel_summary"], + "activities": state["activities_summary"], + }, + indent=2, + ) + response = llm.invoke( + [ + system_prompt, + HumanMessage( + content=( + f"Traveller request: {state['user_request']}\n\n" + f"Origin: {state['origin']} | Destination: {state['destination']}\n" + f"Dates: {state['departure']} to {state['return_date']}\n\n" + f"Specialist summaries:\n{content}" + ) + ), + ] + ) + state["final_itinerary"] = response.content + state["messages"].append(response) + state["current_agent"] = "completed" + return state + + +def should_continue(state: PlannerState) -> str: + mapping = { + "start": "coordinator", + "flight_specialist": "flight_specialist", + "hotel_specialist": "hotel_specialist", + "activity_specialist": "activity_specialist", + "plan_synthesizer": "plan_synthesizer", + } + return mapping.get(state["current_agent"], END) + + +def build_workflow() -> StateGraph: + graph = StateGraph(PlannerState) + graph.add_node("coordinator", coordinator_node) + graph.add_node("flight_specialist", flight_specialist_node) + graph.add_node("hotel_specialist", hotel_specialist_node) + graph.add_node("activity_specialist", activity_specialist_node) + graph.add_node("plan_synthesizer", plan_synthesizer_node) + graph.add_conditional_edges(START, should_continue) + graph.add_conditional_edges("coordinator", should_continue) + graph.add_conditional_edges("flight_specialist", should_continue) + graph.add_conditional_edges("hotel_specialist", should_continue) + graph.add_conditional_edges("activity_specialist", should_continue) + graph.add_conditional_edges("plan_synthesizer", should_continue) + return graph + + +# --------------------------------------------------------------------------- +# Entry point +# --------------------------------------------------------------------------- + + +def main() -> None: + _configure_otlp_tracing() + LangChainInstrumentor().instrument() + + session_id = str(uuid4()) + user_request = ( + "We're planning a romantic long-week trip to Paris from Seattle next month. " + "We'd love a boutique hotel, business-class flights and a few unique experiences." + ) + + origin = _pick_origin(user_request) + destination = _pick_destination(user_request) + departure, return_date = _compute_dates() + + initial_state: PlannerState = { + "messages": [HumanMessage(content=user_request)], + "user_request": user_request, + "session_id": session_id, + "origin": origin, + "destination": destination, + "departure": departure, + "return_date": return_date, + "travellers": 2, + "flight_summary": None, + "hotel_summary": None, + "activities_summary": None, + "final_itinerary": None, + "current_agent": "start", + } + + workflow = build_workflow() + app = workflow.compile() + + tracer = trace.get_tracer(__name__) + attributes = _trace_attributes_for_root(initial_state) + root_input = [ + { + "role": "user", + "parts": [ + { + "type": "text", + "content": user_request, + } + ], + } + ] + + with tracer.start_as_current_span( + name="invoke_agent travel_multi_agent_planner", + kind=SpanKind.CLIENT, + attributes=attributes, + ) as root_span: + root_span.set_attribute( + "gen_ai.input.messages", json.dumps(root_input) + ) + + config = { + "configurable": {"thread_id": session_id}, + "recursion_limit": 10, + } + + print("🌍 Multi-Agent Travel Planner") + print("=" * 60) + + final_state: Optional[PlannerState] = None + + for step in app.stream(initial_state, config): + node_name, node_state = next(iter(step.items())) + final_state = node_state + print(f"\n🤖 {node_name.replace('_', ' ').title()} Agent") + if node_state.get("messages"): + last = node_state["messages"][-1] + if isinstance(last, BaseMessage): + preview = last.content + if len(preview) > 400: + preview = preview[:400] + "... [truncated]" + print(preview) + + if not final_state: + final_plan = "" + else: + final_plan = final_state.get("final_itinerary") or "" + + if final_plan: + print("\n🎉 Final itinerary\n" + "-" * 40) + print(final_plan) + + root_span.set_attribute("gen_ai.response.model", _model_name()) + if final_plan: + root_span.set_attribute( + "gen_ai.output.messages", + json.dumps( + [ + { + "role": "assistant", + "parts": [ + {"type": "text", "content": final_plan[:4000]} + ], + "finish_reason": "stop", + } + ] + ), + ) + preview = final_plan[:500] + ( + "..." if len(final_plan) > 500 else "" + ) + root_span.set_attribute("metadata.final_plan.preview", preview) + root_span.set_attribute("metadata.session_id", session_id) + root_span.set_attribute( + "metadata.agents_used", + len( + [ + key + for key in [ + "flight_summary", + "hotel_summary", + "activities_summary", + ] + if final_state and final_state.get(key) + ] + ), + ) + + provider = trace.get_tracer_provider() + if hasattr(provider, "force_flush"): + provider.force_flush() + if hasattr(provider, "shutdown"): + provider.shutdown() + + +if __name__ == "__main__": + main() diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain/examples/multi_agent_travel_planner/requirements.txt b/instrumentation-genai/opentelemetry-instrumentation-langchain/examples/multi_agent_travel_planner/requirements.txt new file mode 100644 index 0000000000..b4ad57add9 --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-langchain/examples/multi_agent_travel_planner/requirements.txt @@ -0,0 +1,4 @@ +langchain>=0.3.21 +langchain-openai>=0.2.0 +langgraph>=0.2.39 +python-dotenv>=1.0.0 diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain/examples/zero-code/main.py b/instrumentation-genai/opentelemetry-instrumentation-langchain/examples/zero-code/main.py index 24ee6db16d..dbb65bdec9 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-langchain/examples/zero-code/main.py +++ b/instrumentation-genai/opentelemetry-instrumentation-langchain/examples/zero-code/main.py @@ -4,7 +4,7 @@ def main(): llm = ChatOpenAI( - model="gpt-3.5-turbo", + model="gpt-4.1", temperature=0.1, max_tokens=100, top_p=0.9, diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/callback_handler.py b/instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/callback_handler.py index 8661db3971..e3d77870b3 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/callback_handler.py +++ b/instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/callback_handler.py @@ -27,6 +27,10 @@ from opentelemetry.instrumentation.langchain.span_manager import _SpanManager from opentelemetry.trace import Span, Tracer +_TOOL_CALL_ARGUMENTS_ATTR = "gen_ai.tool.call.arguments" +_TOOL_CALL_RESULT_ATTR = "gen_ai.tool.call.result" +_TOOL_DEFINITIONS_ATTR = "gen_ai.tool.definitions" + class _BaseCallbackHandlerProtocol(Protocol): def __init__(self, *args: Any, **kwargs: Any) -> None: ... @@ -187,10 +191,11 @@ def __init__( if callable(base_init): base_init() - self.span_manager = _SpanManager( + self.span_manager: _SpanManager = _SpanManager( tracer=tracer, ) self._capture_messages = capture_messages + self._metadata_provider_mapping = self._METADATA_PROVIDER_MAPPING def on_chat_model_start( self, @@ -231,6 +236,22 @@ def on_chat_model_start( self._apply_request_attributes(span, params, metadata) + if self._capture_messages: + tool_definitions = self._extract_tool_definitions( + params=params, + metadata=metadata, + serialized=serialized, + extras=kwargs_dict, + ) + if tool_definitions is not None: + serialized_definitions = self._serialize_tool_payload( + tool_definitions + ) + if serialized_definitions is not None: + span.set_attribute( + _TOOL_DEFINITIONS_ATTR, serialized_definitions + ) + if self._capture_messages and messages: serialized_messages = self._serialize_input_messages(messages) span.set_attribute( @@ -625,3 +646,196 @@ def on_llm_error( **kwargs: Any, ) -> None: self.span_manager.handle_error(error, run_id) + + def on_tool_start( + self, + serialized: Mapping[str, Any] | None, + input_str: str, + *, + run_id: UUID, + parent_run_id: UUID | None, + metadata: Mapping[str, Any] | None = None, + inputs: Mapping[str, Any] | None = None, + tags: Sequence[str] | None = None, + **kwargs: Any, + ) -> None: + tool_name = self._resolve_tool_name(serialized, metadata, inputs) + + span = cast( + Span, + self.span_manager.create_tool_span( # type: ignore[attr-defined] + run_id=run_id, + parent_run_id=parent_run_id, + tool_name=tool_name, + ), + ) + + provider_name = self._resolve_provider(None, metadata) + if provider_name: + span.set_attribute(GenAI.GEN_AI_PROVIDER_NAME, provider_name) + + tool_call_id = self._resolve_tool_call_id(metadata, inputs) + if tool_call_id: + span.set_attribute(GenAI.GEN_AI_TOOL_CALL_ID, tool_call_id) + + tool_type = self._resolve_tool_type(serialized, metadata) + if tool_type: + span.set_attribute(GenAI.GEN_AI_TOOL_TYPE, tool_type) + + if self._capture_messages: + arguments_payload = self._serialize_tool_payload( + inputs if inputs is not None else input_str + ) + if arguments_payload is not None: + span.set_attribute( + _TOOL_CALL_ARGUMENTS_ATTR, arguments_payload + ) + + def on_tool_end( + self, + output: Any, + *, + run_id: UUID, + parent_run_id: UUID | None, + **kwargs: Any, + ) -> None: + span = self.span_manager.get_span(run_id) + if span is not None and self._capture_messages: + result_payload = self._serialize_tool_payload(output) + if result_payload is not None: + span.set_attribute(_TOOL_CALL_RESULT_ATTR, result_payload) + self.span_manager.end_span(run_id) + + def on_tool_error( + self, + error: BaseException, + *, + run_id: UUID, + parent_run_id: UUID | None, + **kwargs: Any, + ) -> None: + self.span_manager.handle_error(error, run_id) + + def _resolve_tool_name( + self, + serialized: Mapping[str, Any] | None, + metadata: Mapping[str, Any] | None, + inputs: Mapping[str, Any] | None, + ) -> str | None: + candidates: list[Any] = [] + if serialized: + candidates.extend( + [ + serialized.get("name"), + serialized.get("id"), + cast(Mapping[str, Any], serialized.get("kwargs", {})).get( + "name" + ) + if isinstance(serialized.get("kwargs"), Mapping) + else None, + ] + ) + if inputs: + candidates.extend([inputs.get("tool"), inputs.get("name")]) + if metadata: + candidates.extend( + [ + metadata.get("tool_name"), + metadata.get("agent_name"), + ] + ) + for candidate in candidates: + if isinstance(candidate, str) and candidate: + return candidate + return None + + def _resolve_tool_call_id( + self, + metadata: Mapping[str, Any] | None, + inputs: Mapping[str, Any] | None, + ) -> str | None: + def _extract(source: Mapping[str, Any]) -> str | None: + for key in ("tool_call_id", "id", "call_id"): + value = cast(Any, source.get(key)) + if value is None: + continue + if isinstance(value, Mapping): + nested_mapping = cast(Mapping[str, Any], value) + nested_id = nested_mapping.get("id") + if nested_id: + return str(nested_id) + continue + return str(value) + tool_call = cast(Any, source.get("tool_call")) + if isinstance(tool_call, Mapping): + nested_mapping = cast(Mapping[str, Any], tool_call) + nested = nested_mapping.get("id") + if nested: + return str(nested) + return None + + for container in (metadata, inputs): + if isinstance(container, Mapping): + extracted = _extract(container) + if extracted: + return extracted + return None + + def _resolve_tool_type( + self, + serialized: Mapping[str, Any] | None, + metadata: Mapping[str, Any] | None, + ) -> str | None: + candidates: list[Any] = [] + if serialized: + candidates.extend( + [ + serialized.get("type"), + cast(Mapping[str, Any], serialized.get("kwargs", {})).get( + "type" + ) + if isinstance(serialized.get("kwargs"), Mapping) + else None, + ] + ) + if metadata: + candidates.append(metadata.get("tool_type")) + for candidate in candidates: + if isinstance(candidate, str) and candidate: + return candidate + return None + + def _serialize_tool_payload(self, payload: Any) -> str | None: + if payload is None: + return None + if isinstance(payload, str): + return payload + try: + return json.dumps(payload, default=self._json_default) + except (TypeError, ValueError): + return str(payload) + except Exception: + return None + + def _extract_tool_definitions( + self, + *, + params: Mapping[str, Any] | None, + metadata: Mapping[str, Any] | None, + serialized: Mapping[str, Any] | None, + extras: Mapping[str, Any] | None, + ) -> Any: + for source in (params, metadata, extras): + if isinstance(source, Mapping): + mapping_source: Mapping[str, Any] = source + candidate = cast(Any, mapping_source.get("tools")) + if candidate is not None: + return candidate + if isinstance(serialized, Mapping): + kwargs_mapping = serialized.get("kwargs") + if isinstance(kwargs_mapping, Mapping): + kwargs_typed = cast(Mapping[str, Any], kwargs_mapping) + candidate = cast(Any, kwargs_typed.get("tools")) + if candidate is not None: + return candidate + return None diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/span_manager.py b/instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/span_manager.py index ca3de7fd61..9ae68eac97 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/span_manager.py +++ b/instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/span_manager.py @@ -97,6 +97,27 @@ def create_chat_span( return span + def create_tool_span( + self, + run_id: UUID, + parent_run_id: Optional[UUID], + tool_name: Optional[str], + ) -> Span: + operation_name = GenAI.GenAiOperationNameValues.EXECUTE_TOOL.value + span_name = ( + f"{operation_name} {tool_name}" if tool_name else operation_name + ) + span = self._create_span( + run_id=run_id, + parent_run_id=parent_run_id, + span_name=span_name, + kind=SpanKind.INTERNAL, + ) + span.set_attribute(GenAI.GEN_AI_OPERATION_NAME, operation_name) + if tool_name: + span.set_attribute(GenAI.GEN_AI_TOOL_NAME, tool_name) + return span + def end_span(self, run_id: UUID) -> None: state = self.spans[run_id] for child_id in state.children: From c626a5e64a4653a19d9bce9a1e768ac3eeb4a253 Mon Sep 17 00:00:00 2001 From: Nagkumar Arkalgud Date: Tue, 21 Oct 2025 11:08:42 -0700 Subject: [PATCH 22/24] Update LangChain instrumentation and samples for 1.x --- .gitignore | 1 + instrumentation-genai/README.md | 4 +- .../examples/manual/requirements.txt | 6 +- .../multi_agent_travel_planner/.env.sample | 14 ++ .../multi_agent_travel_planner/main.py | 18 +- .../requirements.txt | 6 +- .../examples/zero-code/requirements.txt | 4 +- .../pyproject.toml | 2 +- .../langchain/callback_handler.py | 195 +++++++++++++----- .../instrumentation/langchain/package.py | 2 +- .../tests/conftest.py | 27 ++- uv.lock | 2 +- 12 files changed, 209 insertions(+), 72 deletions(-) create mode 100644 instrumentation-genai/opentelemetry-instrumentation-langchain/examples/multi_agent_travel_planner/.env.sample diff --git a/.gitignore b/.gitignore index 1c32b4446a..ccb425e204 100644 --- a/.gitignore +++ b/.gitignore @@ -61,3 +61,4 @@ target # Benchmark result files *-benchmark.json +instrumentation-genai/opentelemetry-instrumentation-langchain/examples/multi_agent_travel_planner/.env diff --git a/instrumentation-genai/README.md b/instrumentation-genai/README.md index 1b8f5e49e6..57226efab6 100644 --- a/instrumentation-genai/README.md +++ b/instrumentation-genai/README.md @@ -2,8 +2,8 @@ | Instrumentation | Supported Packages | Metrics support | Semconv status | | --------------- | ------------------ | --------------- | -------------- | | [opentelemetry-instrumentation-google-genai](./opentelemetry-instrumentation-google-genai) | google-genai >= 1.0.0 | No | development -| [opentelemetry-instrumentation-langchain](./opentelemetry-instrumentation-langchain) | langchain >= 0.3.21 | No | development +| [opentelemetry-instrumentation-langchain](./opentelemetry-instrumentation-langchain) | langchain >= 1.0.0 | No | development | [opentelemetry-instrumentation-openai-agents-v2](./opentelemetry-instrumentation-openai-agents-v2) | openai-agents >= 0.3.3 | No | development | [opentelemetry-instrumentation-openai-v2](./opentelemetry-instrumentation-openai-v2) | openai >= 1.26.0 | Yes | development | [opentelemetry-instrumentation-vertexai](./opentelemetry-instrumentation-vertexai) | google-cloud-aiplatform >= 1.64 | No | development -| [opentelemetry-instrumentation-weaviate](./opentelemetry-instrumentation-weaviate) | weaviate-client >= 3.0.0,<5.0.0 | No | development \ No newline at end of file +| [opentelemetry-instrumentation-weaviate](./opentelemetry-instrumentation-weaviate) | weaviate-client >= 3.0.0,<5.0.0 | No | development diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain/examples/manual/requirements.txt b/instrumentation-genai/opentelemetry-instrumentation-langchain/examples/manual/requirements.txt index 0f986b1be0..4428849d1a 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-langchain/examples/manual/requirements.txt +++ b/instrumentation-genai/opentelemetry-instrumentation-langchain/examples/manual/requirements.txt @@ -1,7 +1,7 @@ -langchain==0.3.21 -langchain_openai +langchain==1.0.1 +langchain-openai==1.0.0 opentelemetry-sdk>=1.31.0 opentelemetry-exporter-otlp-proto-grpc>=1.31.0 # Uncomment after lanchain instrumetation is released -# opentelemetry-instrumentation-langchain~=2.0b0.dev \ No newline at end of file +# opentelemetry-instrumentation-langchain~=2.0b0.dev diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain/examples/multi_agent_travel_planner/.env.sample b/instrumentation-genai/opentelemetry-instrumentation-langchain/examples/multi_agent_travel_planner/.env.sample new file mode 100644 index 0000000000..6785ec4a44 --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-langchain/examples/multi_agent_travel_planner/.env.sample @@ -0,0 +1,14 @@ +# Copy this file to `.env` and update values before running the sample. + +# Required OpenAI API key +OPENAI_API_KEY=sk-YOUR_API_KEY + +# Optional: override default model (defaults to gpt-4o-mini) +# OPENAI_MODEL=gpt-4o-mini + +# OTLP exporter configuration (update for your collector) +# OTEL_EXPORTER_OTLP_ENDPOINT=http://localhost:4317 +# OTEL_EXPORTER_OTLP_PROTOCOL=grpc + +# Traces will use this service.name +OTEL_SERVICE_NAME=opentelemetry-python-langchain-multi-agent diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain/examples/multi_agent_travel_planner/main.py b/instrumentation-genai/opentelemetry-instrumentation-langchain/examples/multi_agent_travel_planner/main.py index 5063b4578e..363d81fd6f 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-langchain/examples/multi_agent_travel_planner/main.py +++ b/instrumentation-genai/opentelemetry-instrumentation-langchain/examples/multi_agent_travel_planner/main.py @@ -40,7 +40,17 @@ from langchain_openai import ChatOpenAI from langgraph.graph import END, START, StateGraph from langgraph.graph.message import AnyMessage, add_messages -from langgraph.prebuilt import create_react_agent + +try: # LangChain >= 1.0.0 + from langchain.agents import ( + create_agent as _create_react_agent, # type: ignore[attr-defined] + ) +except ( + ImportError +): # pragma: no cover - compatibility with older LangGraph releases + from langgraph.prebuilt import ( + create_react_agent as _create_react_agent, # type: ignore[assignment] + ) from opentelemetry import trace from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import ( @@ -262,7 +272,7 @@ def flight_specialist_node(state: PlannerState) -> PlannerState: llm = _create_llm( "flight_specialist", temperature=0.4, session_id=state["session_id"] ) - agent = create_react_agent(llm, tools=[mock_search_flights]) + agent = _create_react_agent(llm, tools=[mock_search_flights]) task = ( f"Find an appealing flight from {state['origin']} to {state['destination']} " f"departing {state['departure']} for {state['travellers']} travellers." @@ -287,7 +297,7 @@ def hotel_specialist_node(state: PlannerState) -> PlannerState: llm = _create_llm( "hotel_specialist", temperature=0.5, session_id=state["session_id"] ) - agent = create_react_agent(llm, tools=[mock_search_hotels]) + agent = _create_react_agent(llm, tools=[mock_search_hotels]) task = ( f"Recommend a boutique hotel in {state['destination']} between {state['departure']} " f"and {state['return_date']} for {state['travellers']} travellers." @@ -312,7 +322,7 @@ def activity_specialist_node(state: PlannerState) -> PlannerState: llm = _create_llm( "activity_specialist", temperature=0.6, session_id=state["session_id"] ) - agent = create_react_agent(llm, tools=[mock_search_activities]) + agent = _create_react_agent(llm, tools=[mock_search_activities]) task = f"Curate signature activities for travellers spending a week in {state['destination']}." result = agent.invoke({"messages": [HumanMessage(content=task)]}) final_message = result["messages"][-1] diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain/examples/multi_agent_travel_planner/requirements.txt b/instrumentation-genai/opentelemetry-instrumentation-langchain/examples/multi_agent_travel_planner/requirements.txt index b4ad57add9..38bb8b9ee0 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-langchain/examples/multi_agent_travel_planner/requirements.txt +++ b/instrumentation-genai/opentelemetry-instrumentation-langchain/examples/multi_agent_travel_planner/requirements.txt @@ -1,4 +1,4 @@ -langchain>=0.3.21 -langchain-openai>=0.2.0 -langgraph>=0.2.39 +langchain>=1.0.0 +langchain-openai>=1.0.0 +langgraph>=1.0.0 python-dotenv>=1.0.0 diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain/examples/zero-code/requirements.txt b/instrumentation-genai/opentelemetry-instrumentation-langchain/examples/zero-code/requirements.txt index 63b3f56cf1..fa46cb7fce 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-langchain/examples/zero-code/requirements.txt +++ b/instrumentation-genai/opentelemetry-instrumentation-langchain/examples/zero-code/requirements.txt @@ -1,5 +1,5 @@ -langchain==0.3.21 -langchain_openai +langchain==1.0.1 +langchain-openai==1.0.0 opentelemetry-sdk>=1.31.0 opentelemetry-exporter-otlp-proto-grpc>=1.31.0 opentelemetry-distro~=0.51b0 diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain/pyproject.toml b/instrumentation-genai/opentelemetry-instrumentation-langchain/pyproject.toml index 8d87cc9085..7e8762acc6 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-langchain/pyproject.toml +++ b/instrumentation-genai/opentelemetry-instrumentation-langchain/pyproject.toml @@ -32,7 +32,7 @@ dependencies = [ [project.optional-dependencies] instruments = [ - "langchain >= 0.3.21", + "langchain >= 1.0.0", ] [project.entry-points.opentelemetry_instrumentor] diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/callback_handler.py b/instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/callback_handler.py index e3d77870b3..cfc263f021 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/callback_handler.py +++ b/instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/callback_handler.py @@ -154,30 +154,131 @@ def __getattr__(self, name: str) -> Any: ... ) +def _enum_member_value(enum_name: str, member_name: str, default: str) -> str: + """Return the value for a GenAI enum member, falling back to the provided default.""" + enum_cls = getattr(GenAI, enum_name, None) + member = ( + getattr(enum_cls, member_name, None) if enum_cls is not None else None + ) + value = getattr(member, "value", None) + if isinstance(value, str): + return value + if isinstance(member, str): + return member + return default + + +def _gen_ai_attr(name: str, default: str) -> Any: + """Fetch a GenAI semantic attribute constant, defaulting to the provided string.""" + return getattr(GenAI, name, default) + + +_PROVIDER_OPENAI = _enum_member_value( + "GenAiProviderNameValues", "OPENAI", "openai" +) +_PROVIDER_AZURE_OPENAI = _enum_member_value( + "GenAiProviderNameValues", "AZURE_AI_OPENAI", "azure_ai_openai" +) +_PROVIDER_AZURE_INFERENCE = _enum_member_value( + "GenAiProviderNameValues", "AZURE_AI_INFERENCE", "azure_ai_inference" +) +_PROVIDER_AWS_BEDROCK = _enum_member_value( + "GenAiProviderNameValues", "AWS_BEDROCK", "aws_bedrock" +) + +GEN_AI_PROVIDER_NAME_ATTR = _gen_ai_attr( + "GEN_AI_PROVIDER_NAME", "gen_ai.provider.name" +) +GEN_AI_INPUT_MESSAGES_ATTR = _gen_ai_attr( + "GEN_AI_INPUT_MESSAGES", "gen_ai.input.messages" +) +GEN_AI_OUTPUT_MESSAGES_ATTR = _gen_ai_attr( + "GEN_AI_OUTPUT_MESSAGES", "gen_ai.output.messages" +) +GEN_AI_OUTPUT_TYPE_ATTR = _gen_ai_attr( + "GEN_AI_OUTPUT_TYPE", "gen_ai.output.type" +) +GEN_AI_REQUEST_CHOICE_COUNT_ATTR = _gen_ai_attr( + "GEN_AI_REQUEST_CHOICE_COUNT", "gen_ai.request.choice.count" +) +GEN_AI_REQUEST_FREQUENCY_PENALTY_ATTR = _gen_ai_attr( + "GEN_AI_REQUEST_FREQUENCY_PENALTY", "gen_ai.request.frequency_penalty" +) +GEN_AI_REQUEST_MAX_TOKENS_ATTR = _gen_ai_attr( + "GEN_AI_REQUEST_MAX_TOKENS", "gen_ai.request.max_tokens" +) +GEN_AI_REQUEST_PRESENCE_PENALTY_ATTR = _gen_ai_attr( + "GEN_AI_REQUEST_PRESENCE_PENALTY", "gen_ai.request.presence_penalty" +) +GEN_AI_REQUEST_SEED_ATTR = _gen_ai_attr( + "GEN_AI_REQUEST_SEED", "gen_ai.request.seed" +) +GEN_AI_REQUEST_STOP_SEQUENCES_ATTR = _gen_ai_attr( + "GEN_AI_REQUEST_STOP_SEQUENCES", "gen_ai.request.stop_sequences" +) +GEN_AI_REQUEST_TEMPERATURE_ATTR = _gen_ai_attr( + "GEN_AI_REQUEST_TEMPERATURE", "gen_ai.request.temperature" +) +GEN_AI_REQUEST_TOP_K_ATTR = _gen_ai_attr( + "GEN_AI_REQUEST_TOP_K", "gen_ai.request.top_k" +) +GEN_AI_REQUEST_TOP_P_ATTR = _gen_ai_attr( + "GEN_AI_REQUEST_TOP_P", "gen_ai.request.top_p" +) +GEN_AI_RESPONSE_FINISH_REASONS_ATTR = _gen_ai_attr( + "GEN_AI_RESPONSE_FINISH_REASONS", "gen_ai.response.finish_reasons" +) +GEN_AI_RESPONSE_ID_ATTR = _gen_ai_attr( + "GEN_AI_RESPONSE_ID", "gen_ai.response.id" +) +GEN_AI_RESPONSE_MODEL_ATTR = _gen_ai_attr( + "GEN_AI_RESPONSE_MODEL", "gen_ai.response.model" +) +GEN_AI_TOOL_CALL_ID_ATTR = _gen_ai_attr( + "GEN_AI_TOOL_CALL_ID", "gen_ai.tool.call.id" +) +GEN_AI_TOOL_TYPE_ATTR = _gen_ai_attr("GEN_AI_TOOL_TYPE", "gen_ai.tool.type") +GEN_AI_USAGE_INPUT_TOKENS_ATTR = _gen_ai_attr( + "GEN_AI_USAGE_INPUT_TOKENS", "gen_ai.usage.input_tokens" +) +GEN_AI_USAGE_OUTPUT_TOKENS_ATTR = _gen_ai_attr( + "GEN_AI_USAGE_OUTPUT_TOKENS", "gen_ai.usage.output_tokens" +) + +_OUTPUT_TYPE_JSON = _enum_member_value("GenAiOutputTypeValues", "JSON", "json") +_OUTPUT_TYPE_TEXT = _enum_member_value("GenAiOutputTypeValues", "TEXT", "text") +_OUTPUT_TYPE_IMAGE = _enum_member_value( + "GenAiOutputTypeValues", "IMAGE", "image" +) +_OUTPUT_TYPE_SPEECH = _enum_member_value( + "GenAiOutputTypeValues", "SPEECH", "speech" +) + + class OpenTelemetryLangChainCallbackHandler(BaseCallbackHandler): """ A callback handler for LangChain that uses OpenTelemetry to create spans for LLM calls and chains, tools etc,. in future. """ _CHAT_MODEL_PROVIDER_MAPPING: dict[str, str] = { - "ChatOpenAI": GenAI.GenAiProviderNameValues.OPENAI.value, - "AzureChatOpenAI": GenAI.GenAiProviderNameValues.AZURE_AI_OPENAI.value, - "AzureOpenAI": GenAI.GenAiProviderNameValues.AZURE_AI_OPENAI.value, - "ChatBedrock": GenAI.GenAiProviderNameValues.AWS_BEDROCK.value, - "BedrockChat": GenAI.GenAiProviderNameValues.AWS_BEDROCK.value, + "ChatOpenAI": _PROVIDER_OPENAI, + "AzureChatOpenAI": _PROVIDER_AZURE_OPENAI, + "AzureOpenAI": _PROVIDER_AZURE_OPENAI, + "ChatBedrock": _PROVIDER_AWS_BEDROCK, + "BedrockChat": _PROVIDER_AWS_BEDROCK, } _METADATA_PROVIDER_MAPPING: dict[str, str] = { - "openai": GenAI.GenAiProviderNameValues.OPENAI.value, - "azure": GenAI.GenAiProviderNameValues.AZURE_AI_OPENAI.value, - "azure_openai": GenAI.GenAiProviderNameValues.AZURE_AI_OPENAI.value, - "azure-ai-openai": GenAI.GenAiProviderNameValues.AZURE_AI_OPENAI.value, - "azure_ai_openai": GenAI.GenAiProviderNameValues.AZURE_AI_OPENAI.value, - "azure_ai_inference": GenAI.GenAiProviderNameValues.AZURE_AI_INFERENCE.value, - "azure-inference": GenAI.GenAiProviderNameValues.AZURE_AI_INFERENCE.value, - "amazon": GenAI.GenAiProviderNameValues.AWS_BEDROCK.value, - "bedrock": GenAI.GenAiProviderNameValues.AWS_BEDROCK.value, - "aws": GenAI.GenAiProviderNameValues.AWS_BEDROCK.value, + "openai": _PROVIDER_OPENAI, + "azure": _PROVIDER_AZURE_OPENAI, + "azure_openai": _PROVIDER_AZURE_OPENAI, + "azure-ai-openai": _PROVIDER_AZURE_OPENAI, + "azure_ai_openai": _PROVIDER_AZURE_OPENAI, + "azure_ai_inference": _PROVIDER_AZURE_INFERENCE, + "azure-inference": _PROVIDER_AZURE_INFERENCE, + "amazon": _PROVIDER_AWS_BEDROCK, + "bedrock": _PROVIDER_AWS_BEDROCK, + "aws": _PROVIDER_AWS_BEDROCK, } _SERVER_URL_KEYS = ("base_url", "azure_endpoint", "endpoint") @@ -224,10 +325,10 @@ def on_chat_model_start( request_model=request_model, ) - span.set_attribute(GenAI.GEN_AI_PROVIDER_NAME, provider_name) + span.set_attribute(GEN_AI_PROVIDER_NAME_ATTR, provider_name) if provider_name in ( - GenAI.GenAiProviderNameValues.AZURE_AI_OPENAI.value, - GenAI.GenAiProviderNameValues.AZURE_AI_INFERENCE.value, + _PROVIDER_AZURE_OPENAI, + _PROVIDER_AZURE_INFERENCE, ): span.set_attribute( AZURE_RESOURCE_PROVIDER_NAMESPACE, @@ -255,7 +356,7 @@ def on_chat_model_start( if self._capture_messages and messages: serialized_messages = self._serialize_input_messages(messages) span.set_attribute( - GenAI.GEN_AI_INPUT_MESSAGES, + GEN_AI_INPUT_MESSAGES_ATTR, self._serialize_to_json(serialized_messages), ) @@ -343,39 +444,39 @@ def _apply_request_attributes( if params: top_p = params.get("top_p") if top_p is not None: - span.set_attribute(GenAI.GEN_AI_REQUEST_TOP_P, top_p) + span.set_attribute(GEN_AI_REQUEST_TOP_P_ATTR, top_p) frequency_penalty = params.get("frequency_penalty") if frequency_penalty is not None: span.set_attribute( - GenAI.GEN_AI_REQUEST_FREQUENCY_PENALTY, frequency_penalty + GEN_AI_REQUEST_FREQUENCY_PENALTY_ATTR, frequency_penalty ) presence_penalty = params.get("presence_penalty") if presence_penalty is not None: span.set_attribute( - GenAI.GEN_AI_REQUEST_PRESENCE_PENALTY, presence_penalty + GEN_AI_REQUEST_PRESENCE_PENALTY_ATTR, presence_penalty ) stop_sequences = params.get("stop") if stop_sequences is not None: span.set_attribute( - GenAI.GEN_AI_REQUEST_STOP_SEQUENCES, stop_sequences + GEN_AI_REQUEST_STOP_SEQUENCES_ATTR, stop_sequences ) seed = params.get("seed") if seed is not None: - span.set_attribute(GenAI.GEN_AI_REQUEST_SEED, seed) + span.set_attribute(GEN_AI_REQUEST_SEED_ATTR, seed) temperature = params.get("temperature") if temperature is not None: span.set_attribute( - GenAI.GEN_AI_REQUEST_TEMPERATURE, temperature + GEN_AI_REQUEST_TEMPERATURE_ATTR, temperature ) max_tokens = params.get("max_completion_tokens") or params.get( "max_tokens" ) if max_tokens is not None: - span.set_attribute(GenAI.GEN_AI_REQUEST_MAX_TOKENS, max_tokens) + span.set_attribute(GEN_AI_REQUEST_MAX_TOKENS_ATTR, max_tokens) top_k = params.get("top_k") if top_k is not None: - span.set_attribute(GenAI.GEN_AI_REQUEST_TOP_K, top_k) + span.set_attribute(GEN_AI_REQUEST_TOP_K_ATTR, top_k) choice_count = params.get("n") or params.get("choice_count") if choice_count is not None: @@ -385,12 +486,12 @@ def _apply_request_attributes( choice_value = choice_count if choice_value != 1: span.set_attribute( - GenAI.GEN_AI_REQUEST_CHOICE_COUNT, choice_value + GEN_AI_REQUEST_CHOICE_COUNT_ATTR, choice_value ) output_type = self._extract_output_type(params) if output_type is not None: - span.set_attribute(GenAI.GEN_AI_OUTPUT_TYPE, output_type) + span.set_attribute(GEN_AI_OUTPUT_TYPE_ATTR, output_type) service_tier = params.get("service_tier") if service_tier is not None: @@ -402,11 +503,11 @@ def _apply_request_attributes( temperature = metadata.get("ls_temperature") if temperature is not None: span.set_attribute( - GenAI.GEN_AI_REQUEST_TEMPERATURE, temperature + GEN_AI_REQUEST_TEMPERATURE_ATTR, temperature ) max_tokens = metadata.get("ls_max_tokens") if max_tokens is not None: - span.set_attribute(GenAI.GEN_AI_REQUEST_MAX_TOKENS, max_tokens) + span.set_attribute(GEN_AI_REQUEST_MAX_TOKENS_ATTR, max_tokens) def _maybe_set_server_attributes( self, span: Span, params: Mapping[str, Any] @@ -454,12 +555,12 @@ def _extract_output_type(self, params: Mapping[str, Any]) -> str | None: lowered = output_type.lower() mapping = { - "json_object": GenAI.GenAiOutputTypeValues.JSON.value, - "json_schema": GenAI.GenAiOutputTypeValues.JSON.value, - "json": GenAI.GenAiOutputTypeValues.JSON.value, - "text": GenAI.GenAiOutputTypeValues.TEXT.value, - "image": GenAI.GenAiOutputTypeValues.IMAGE.value, - "speech": GenAI.GenAiOutputTypeValues.SPEECH.value, + "json_object": _OUTPUT_TYPE_JSON, + "json_schema": _OUTPUT_TYPE_JSON, + "json": _OUTPUT_TYPE_JSON, + "text": _OUTPUT_TYPE_TEXT, + "image": _OUTPUT_TYPE_IMAGE, + "speech": _OUTPUT_TYPE_SPEECH, } return mapping.get(lowered) @@ -590,15 +691,13 @@ def on_llm_end( "output_tokens", 0 ) span.set_attribute( - GenAI.GEN_AI_USAGE_INPUT_TOKENS, input_tokens + GEN_AI_USAGE_INPUT_TOKENS_ATTR, input_tokens ) span.set_attribute( - GenAI.GEN_AI_USAGE_OUTPUT_TOKENS, output_tokens + GEN_AI_USAGE_OUTPUT_TOKENS_ATTR, output_tokens ) - span.set_attribute( - GenAI.GEN_AI_RESPONSE_FINISH_REASONS, finish_reasons - ) + span.set_attribute(GEN_AI_RESPONSE_FINISH_REASONS_ATTR, finish_reasons) llm_output = cast( Mapping[str, Any] | None, getattr(response, "llm_output", None) @@ -609,12 +708,12 @@ def on_llm_end( ) if response_model is not None: span.set_attribute( - GenAI.GEN_AI_RESPONSE_MODEL, str(response_model) + GEN_AI_RESPONSE_MODEL_ATTR, str(response_model) ) response_id = llm_output.get("id") if response_id is not None: - span.set_attribute(GenAI.GEN_AI_RESPONSE_ID, str(response_id)) + span.set_attribute(GEN_AI_RESPONSE_ID_ATTR, str(response_id)) service_tier = llm_output.get("service_tier") if service_tier is not None: @@ -630,7 +729,7 @@ def on_llm_end( serialized_outputs = self._serialize_output_messages(response) if serialized_outputs: span.set_attribute( - GenAI.GEN_AI_OUTPUT_MESSAGES, + GEN_AI_OUTPUT_MESSAGES_ATTR, self._serialize_to_json(serialized_outputs), ) @@ -672,15 +771,15 @@ def on_tool_start( provider_name = self._resolve_provider(None, metadata) if provider_name: - span.set_attribute(GenAI.GEN_AI_PROVIDER_NAME, provider_name) + span.set_attribute(GEN_AI_PROVIDER_NAME_ATTR, provider_name) tool_call_id = self._resolve_tool_call_id(metadata, inputs) if tool_call_id: - span.set_attribute(GenAI.GEN_AI_TOOL_CALL_ID, tool_call_id) + span.set_attribute(GEN_AI_TOOL_CALL_ID_ATTR, tool_call_id) tool_type = self._resolve_tool_type(serialized, metadata) if tool_type: - span.set_attribute(GenAI.GEN_AI_TOOL_TYPE, tool_type) + span.set_attribute(GEN_AI_TOOL_TYPE_ATTR, tool_type) if self._capture_messages: arguments_payload = self._serialize_tool_payload( diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/package.py b/instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/package.py index 1aaafae872..e57b86e816 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/package.py +++ b/instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/package.py @@ -13,4 +13,4 @@ # limitations under the License. -_instruments = ("langchain >= 0.3.21",) +_instruments = ("langchain >= 1.0.0",) diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain/tests/conftest.py b/instrumentation-genai/opentelemetry-instrumentation-langchain/tests/conftest.py index df97ad0492..4238a0ff63 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-langchain/tests/conftest.py +++ b/instrumentation-genai/opentelemetry-instrumentation-langchain/tests/conftest.py @@ -2,12 +2,13 @@ import json import os +from pathlib import Path +from types import SimpleNamespace import boto3 import pytest import yaml from langchain_aws import ChatBedrock -from langchain_google_genai import ChatGoogleGenerativeAI from langchain_openai import ChatOpenAI from opentelemetry.instrumentation.langchain import LangChainInstrumentor @@ -54,9 +55,13 @@ def fixture_us_amazon_nova_lite_v1_0(): @pytest.fixture(scope="function", name="gemini") def fixture_gemini(): - llm_model_value = "gemini-2.5-pro" - llm = ChatGoogleGenerativeAI(model=llm_model_value, api_key="test_key") - yield llm + class _StubGemini: + def invoke(self, messages): + return SimpleNamespace( + content="The capital of France is **Paris**" + ) + + yield _StubGemini() @pytest.fixture(scope="function", name="span_exporter") @@ -89,6 +94,15 @@ def start_instrumentation( def environment(): if not os.getenv("OPENAI_API_KEY"): os.environ["OPENAI_API_KEY"] = "test_openai_api_key" + if not os.getenv("GOOGLE_API_KEY"): + os.environ["GOOGLE_API_KEY"] = "test_google_api_key" + os.environ.setdefault("AWS_EC2_METADATA_DISABLED", "true") + os.environ.setdefault("AWS_DEFAULT_REGION", "us-west-2") + + +@pytest.fixture(scope="module") +def vcr_cassette_dir() -> str: + return str(Path(__file__).parent / "cassettes") @pytest.fixture(scope="module") @@ -102,6 +116,7 @@ def vcr_config(): ], "decode_compressed_response": True, "before_record_response": scrub_response_headers, + "match_on": ["method", "host", "path"], } @@ -166,10 +181,8 @@ def deserialize(cassette_string): return yaml.load(cassette_string, Loader=yaml.Loader) -@pytest.fixture(scope="module", autouse=True) -def fixture_vcr(vcr): +def pytest_recording_configure(config, vcr): vcr.register_serializer("yaml", PrettyPrintJSONBody) - return vcr def scrub_response_headers(response): diff --git a/uv.lock b/uv.lock index 76a60b9134..f87e75ccdc 100644 --- a/uv.lock +++ b/uv.lock @@ -3279,7 +3279,7 @@ instruments = [ [package.metadata] requires-dist = [ - { name = "langchain", marker = "extra == 'instruments'", specifier = ">=0.3.21" }, + { name = "langchain", marker = "extra == 'instruments'", specifier = ">=1.0.0" }, { name = "opentelemetry-api", git = "https://github.com/open-telemetry/opentelemetry-python?subdirectory=opentelemetry-api&branch=main" }, { name = "opentelemetry-instrumentation", editable = "opentelemetry-instrumentation" }, { name = "opentelemetry-semantic-conventions", git = "https://github.com/open-telemetry/opentelemetry-python?subdirectory=opentelemetry-semantic-conventions&branch=main" }, From 44f4c1da611a1b1ee7f9d230fa02e61c9eeede87 Mon Sep 17 00:00:00 2001 From: Nagkumar Arkalgud Date: Tue, 21 Oct 2025 11:17:19 -0700 Subject: [PATCH 23/24] Switch tests and samples to gpt-4.1 and refresh VCR cassettes --- .../multi_agent_travel_planner/.env.sample | 4 +- .../multi_agent_travel_planner/main.py | 2 +- ...t_chat_openai_gpt_4_1_model_llm_call.yaml} | 46 ++++++++++--------- ...mazon_nova_lite_v1_0_bedrock_llm_call.yaml | 2 +- .../tests/conftest.py | 6 +-- .../tests/test_llm_call.py | 17 +++---- 6 files changed, 38 insertions(+), 39 deletions(-) rename instrumentation-genai/opentelemetry-instrumentation-langchain/tests/cassettes/{test_chat_openai_gpt_3_5_turbo_model_llm_call.yaml => test_chat_openai_gpt_4_1_model_llm_call.yaml} (84%) diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain/examples/multi_agent_travel_planner/.env.sample b/instrumentation-genai/opentelemetry-instrumentation-langchain/examples/multi_agent_travel_planner/.env.sample index 6785ec4a44..d907f3c016 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-langchain/examples/multi_agent_travel_planner/.env.sample +++ b/instrumentation-genai/opentelemetry-instrumentation-langchain/examples/multi_agent_travel_planner/.env.sample @@ -3,8 +3,8 @@ # Required OpenAI API key OPENAI_API_KEY=sk-YOUR_API_KEY -# Optional: override default model (defaults to gpt-4o-mini) -# OPENAI_MODEL=gpt-4o-mini +# Optional: override default model (defaults to gpt-4.1) +# OPENAI_MODEL=gpt-4.1 # OTLP exporter configuration (update for your collector) # OTEL_EXPORTER_OTLP_ENDPOINT=http://localhost:4317 diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain/examples/multi_agent_travel_planner/main.py b/instrumentation-genai/opentelemetry-instrumentation-langchain/examples/multi_agent_travel_planner/main.py index 363d81fd6f..2d51b6b987 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-langchain/examples/multi_agent_travel_planner/main.py +++ b/instrumentation-genai/opentelemetry-instrumentation-langchain/examples/multi_agent_travel_planner/main.py @@ -184,7 +184,7 @@ class PlannerState(TypedDict): def _model_name() -> str: - return os.getenv("OPENAI_MODEL", "gpt-4o-mini") + return os.getenv("OPENAI_MODEL", "gpt-4.1") def _create_llm( diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain/tests/cassettes/test_chat_openai_gpt_3_5_turbo_model_llm_call.yaml b/instrumentation-genai/opentelemetry-instrumentation-langchain/tests/cassettes/test_chat_openai_gpt_4_1_model_llm_call.yaml similarity index 84% rename from instrumentation-genai/opentelemetry-instrumentation-langchain/tests/cassettes/test_chat_openai_gpt_3_5_turbo_model_llm_call.yaml rename to instrumentation-genai/opentelemetry-instrumentation-langchain/tests/cassettes/test_chat_openai_gpt_4_1_model_llm_call.yaml index c656b08d8f..7d1b209af7 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-langchain/tests/cassettes/test_chat_openai_gpt_3_5_turbo_model_llm_call.yaml +++ b/instrumentation-genai/opentelemetry-instrumentation-langchain/tests/cassettes/test_chat_openai_gpt_4_1_model_llm_call.yaml @@ -12,7 +12,7 @@ interactions: "role": "user" } ], - "model": "gpt-3.5-turbo", + "model": "gpt-4.1", "frequency_penalty": 0.5, "max_completion_tokens": 100, "presence_penalty": 0.5, @@ -36,13 +36,13 @@ interactions: connection: - keep-alive content-length: - - '316' + - '310' content-type: - application/json host: - api.openai.com user-agent: - - OpenAI/Python 1.106.1 + - OpenAI/Python 1.109.1 x-stainless-arch: - arm64 x-stainless-async: @@ -52,7 +52,7 @@ interactions: x-stainless-os: - MacOS x-stainless-package-version: - - 1.106.1 + - 1.109.1 x-stainless-raw-response: - 'true' x-stainless-retry-count: @@ -60,17 +60,17 @@ interactions: x-stainless-runtime: - CPython x-stainless-runtime-version: - - 3.13.5 + - 3.11.14 method: POST uri: https://api.openai.com/v1/chat/completions response: body: string: |- { - "id": "chatcmpl-CCAQbtjsmG2294sQ6utRc16OQWeol", + "id": "chatcmpl-CTB9d2RMw2N2EBwVMAnYR45AvBvud", "object": "chat.completion", - "created": 1757016057, - "model": "gpt-3.5-turbo-0125", + "created": 1761070425, + "model": "gpt-4.1-2025-04-14", "choices": [ { "index": 0, @@ -100,17 +100,17 @@ interactions: } }, "service_tier": "default", - "system_fingerprint": null + "system_fingerprint": "fp_e24a1fec47" } headers: CF-RAY: - - 97a01376ad4d2af1-LAX + - 9922bb0f79e7a9f8-SEA Connection: - keep-alive Content-Type: - application/json Date: - - Thu, 04 Sep 2025 20:00:57 GMT + - Tue, 21 Oct 2025 18:13:46 GMT Server: - cloudflare Set-Cookie: test_set_cookie @@ -127,30 +127,32 @@ interactions: cf-cache-status: - DYNAMIC content-length: - - '822' + - '833' openai-organization: test_openai_org_id openai-processing-ms: - - '282' + - '353' openai-project: - - proj_GLiYlAc06hF0Fm06IMReZLy4 + - proj_g22EI7wYA0CcaIteeCvlmSng openai-version: - '2020-10-01' x-envoy-upstream-service-time: - - '287' + - '514' + x-openai-proxy-wasm: + - v0.1 x-ratelimit-limit-requests: - - '10000' + - '500' x-ratelimit-limit-tokens: - - '200000' + - '30000' x-ratelimit-remaining-requests: - - '9999' + - '499' x-ratelimit-remaining-tokens: - - '199982' + - '29982' x-ratelimit-reset-requests: - - 8.64s + - 120ms x-ratelimit-reset-tokens: - - 5ms + - 36ms x-request-id: - - req_0e343602788d4f33869d09afcc7d4819 + - req_e6d40dc7ffc44d3e929f2883bdd89612 status: code: 200 message: OK diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain/tests/cassettes/test_us_amazon_nova_lite_v1_0_bedrock_llm_call.yaml b/instrumentation-genai/opentelemetry-instrumentation-langchain/tests/cassettes/test_us_amazon_nova_lite_v1_0_bedrock_llm_call.yaml index a6dc912ac0..8a7b6d6e5d 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-langchain/tests/cassettes/test_us_amazon_nova_lite_v1_0_bedrock_llm_call.yaml +++ b/instrumentation-genai/opentelemetry-instrumentation-langchain/tests/cassettes/test_us_amazon_nova_lite_v1_0_bedrock_llm_call.yaml @@ -46,7 +46,7 @@ interactions: authorization: - Bearer test_openai_api_key method: POST - uri: https://bedrock-runtime.us-west-2.amazonaws.com/model/arn%3Aaws%3Abedrock%3Aus-west-2%3A906383545488%3Ainference-profile%2Fus.amazon.nova-lite-v1%3A0/converse + uri: https://bedrock-runtime.us-west-2.amazonaws.com/model/us.amazon.nova-lite-v1%3A0/converse response: body: string: |- diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain/tests/conftest.py b/instrumentation-genai/opentelemetry-instrumentation-langchain/tests/conftest.py index 4238a0ff63..f183f86b7c 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-langchain/tests/conftest.py +++ b/instrumentation-genai/opentelemetry-instrumentation-langchain/tests/conftest.py @@ -19,10 +19,10 @@ ) -@pytest.fixture(scope="function", name="chat_openai_gpt_3_5_turbo_model") -def fixture_chat_openai_gpt_3_5_turbo_model(): +@pytest.fixture(scope="function", name="chat_openai_gpt_4_1_model") +def fixture_chat_openai_gpt_4_1_model(): llm = ChatOpenAI( - model="gpt-3.5-turbo", + model="gpt-4.1", temperature=0.1, max_tokens=100, top_p=0.9, diff --git a/instrumentation-genai/opentelemetry-instrumentation-langchain/tests/test_llm_call.py b/instrumentation-genai/opentelemetry-instrumentation-langchain/tests/test_llm_call.py index 6103eda3d8..a0430f985f 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-langchain/tests/test_llm_call.py +++ b/instrumentation-genai/opentelemetry-instrumentation-langchain/tests/test_llm_call.py @@ -21,17 +21,17 @@ ) -# span_exporter, start_instrumentation, chat_openai_gpt_3_5_turbo_model are coming from fixtures defined in conftest.py +# span_exporter, start_instrumentation, chat_openai_gpt_4_1_model are coming from fixtures defined in conftest.py @pytest.mark.vcr() -def test_chat_openai_gpt_3_5_turbo_model_llm_call( - span_exporter, start_instrumentation, chat_openai_gpt_3_5_turbo_model +def test_chat_openai_gpt_4_1_model_llm_call( + span_exporter, start_instrumentation, chat_openai_gpt_4_1_model ): messages = [ SystemMessage(content="You are a helpful assistant!"), HumanMessage(content="What is the capital of France?"), ] - response = chat_openai_gpt_3_5_turbo_model.invoke(messages) + response = chat_openai_gpt_4_1_model.invoke(messages) assert response.content == "The capital of France is Paris." # verify spans @@ -87,15 +87,12 @@ def test_gemini(span_exporter, start_instrumentation, gemini): def assert_openai_completion_attributes( span: ReadableSpan, response: Optional ): - assert span.name == "chat gpt-3.5-turbo" + assert span.name == "chat gpt-4.1" assert span.attributes[gen_ai_attributes.GEN_AI_OPERATION_NAME] == "chat" - assert ( - span.attributes[gen_ai_attributes.GEN_AI_REQUEST_MODEL] - == "gpt-3.5-turbo" - ) + assert span.attributes[gen_ai_attributes.GEN_AI_REQUEST_MODEL] == "gpt-4.1" assert ( span.attributes[gen_ai_attributes.GEN_AI_RESPONSE_MODEL] - == "gpt-3.5-turbo-0125" + == "gpt-4.1-2025-04-14" ) assert span.attributes[gen_ai_attributes.GEN_AI_REQUEST_MAX_TOKENS] == 100 assert span.attributes[gen_ai_attributes.GEN_AI_REQUEST_TEMPERATURE] == 0.1 From 06bcda073dfbb9535873e4ddb65990c5ee10c986 Mon Sep 17 00:00:00 2001 From: Nagkumar Arkalgud Date: Tue, 21 Oct 2025 12:00:19 -0700 Subject: [PATCH 24/24] tox -e generate --- instrumentation-genai/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/instrumentation-genai/README.md b/instrumentation-genai/README.md index 57226efab6..d3f2b948a2 100644 --- a/instrumentation-genai/README.md +++ b/instrumentation-genai/README.md @@ -6,4 +6,4 @@ | [opentelemetry-instrumentation-openai-agents-v2](./opentelemetry-instrumentation-openai-agents-v2) | openai-agents >= 0.3.3 | No | development | [opentelemetry-instrumentation-openai-v2](./opentelemetry-instrumentation-openai-v2) | openai >= 1.26.0 | Yes | development | [opentelemetry-instrumentation-vertexai](./opentelemetry-instrumentation-vertexai) | google-cloud-aiplatform >= 1.64 | No | development -| [opentelemetry-instrumentation-weaviate](./opentelemetry-instrumentation-weaviate) | weaviate-client >= 3.0.0,<5.0.0 | No | development +| [opentelemetry-instrumentation-weaviate](./opentelemetry-instrumentation-weaviate) | weaviate-client >= 3.0.0,<5.0.0 | No | development \ No newline at end of file