Skip to content

Commit 2c545ce

Browse files
committed
skipped telemetry for other than ChatOpenAI and ChatBedrock. Added test for the same.
1 parent 81a8e76 commit 2c545ce

File tree

4 files changed

+36
-1
lines changed

4 files changed

+36
-1
lines changed

instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@
1414

1515
"""
1616
Langchain instrumentation supporting `ChatOpenAI` and `ChatBedrock`, it can be enabled by
17-
using ``LangChainInstrumentor``.
17+
using ``LangChainInstrumentor``. Other providers/LLMs may be supported in the future and telemetry for them is skipped for now.
1818
1919
Usage
2020
-----

instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/callback_handler.py

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -54,6 +54,12 @@ def on_chat_model_start(
5454
metadata: dict[str, Any] | None,
5555
**kwargs: Any,
5656
) -> None:
57+
# Other providers/LLMs may be supported in the future and telemetry for them is skipped for now.
58+
if serialized and "name" in serialized:
59+
name = serialized.get("name")
60+
if name not in ("ChatOpenAI", "ChatBedrock"):
61+
return
62+
5763
if "invocation_params" in kwargs:
5864
params = (
5965
kwargs["invocation_params"].get("params")
@@ -74,6 +80,10 @@ def on_chat_model_start(
7480
request_model = model
7581
break
7682

83+
# Skip telemetry for unsupported request models
84+
if request_model == "unknown":
85+
return
86+
7787
span = self.span_manager.create_chat_span(
7888
run_id=run_id,
7989
parent_run_id=parent_run_id,

instrumentation-genai/opentelemetry-instrumentation-langchain/tests/conftest.py

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@
77
import pytest
88
import yaml
99
from langchain_aws import ChatBedrock
10+
from langchain_google_genai import ChatGoogleGenerativeAI
1011
from langchain_openai import ChatOpenAI
1112

1213
from opentelemetry.instrumentation.langchain import LangChainInstrumentor
@@ -51,6 +52,13 @@ def fixture_us_amazon_nova_lite_v1_0():
5152
yield llm
5253

5354

55+
@pytest.fixture(scope="function", name="gemini")
56+
def fixture_gemini():
57+
llm_model_value = "gemini-2.5-pro"
58+
llm = ChatGoogleGenerativeAI(model=llm_model_value, api_key="test_key")
59+
yield llm
60+
61+
5462
@pytest.fixture(scope="function", name="span_exporter")
5563
def fixture_span_exporter():
5664
exporter = InMemorySpanExporter()

instrumentation-genai/opentelemetry-instrumentation-langchain/tests/test_llm_call.py

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -53,6 +53,23 @@ def test_us_amazon_nova_lite_v1_0_bedrock_llm_call(
5353
assert_bedrock_completion_attributes(spans[0], result)
5454

5555

56+
# span_exporter, start_instrumentation, gemini are coming from fixtures defined in conftest.py
57+
@pytest.mark.vcr()
58+
def test_gemini(span_exporter, start_instrumentation, gemini):
59+
messages = [
60+
SystemMessage(content="You are a helpful assistant!"),
61+
HumanMessage(content="What is the capital of France?"),
62+
]
63+
64+
result = gemini.invoke(messages)
65+
66+
assert result.content.find("The capital of France is **Paris**") != -1
67+
68+
# verify spans
69+
spans = span_exporter.get_finished_spans()
70+
assert len(spans) == 0 # No spans should be created for gemini as of now
71+
72+
5673
def assert_openai_completion_attributes(
5774
span: ReadableSpan, response: Optional
5875
):

0 commit comments

Comments
 (0)