Skip to content

Commit ae0ab2f

Browse files
committed
fixed check runs and updated dependencies
1 parent 33afa20 commit ae0ab2f

File tree

13 files changed

+121
-86
lines changed

13 files changed

+121
-86
lines changed

instrumentation-genai/opentelemetry-instrumentation-langchain/CHANGELOG.md

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,4 +5,7 @@ All notable changes to this project will be documented in this file.
55
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
66
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
77

8-
## Unreleased
8+
## Unreleased
9+
10+
- Added span support for genAI langchain llm invocation.
11+
([#3665](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3665))

instrumentation-genai/opentelemetry-instrumentation-langchain/examples/manual/main.py

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,11 @@
11
from langchain_core.messages import HumanMessage, SystemMessage
22
from langchain_openai import ChatOpenAI
33

4-
from opentelemetry.instrumentation.langchain import LangChainInstrumentor
54
from opentelemetry import trace
6-
from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter
5+
from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import (
6+
OTLPSpanExporter,
7+
)
8+
from opentelemetry.instrumentation.langchain import LangChainInstrumentor
79
from opentelemetry.sdk.trace import TracerProvider
810
from opentelemetry.sdk.trace.export import BatchSpanProcessor
911

@@ -42,4 +44,4 @@ def main():
4244
LangChainInstrumentor().uninstrument()
4345

4446
if __name__ == "__main__":
45-
main()
47+
main()
Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
langchain==0.3.21
22
langchain_openai
3-
opentelemetry-sdk~=1.30.0
4-
opentelemetry-exporter-otlp-proto-grpc~=1.30.0
3+
opentelemetry-sdk~=1.36.0
4+
opentelemetry-exporter-otlp-proto-grpc~=1.36.0
55

66
# Uncomment after lanchain instrumetation is released
77
# opentelemetry-instrumentation-langchain~=2.0b0.dev
Lines changed: 13 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,18 @@
11
from langchain_core.messages import HumanMessage, SystemMessage
22
from langchain_openai import ChatOpenAI
33

4-
def main():
54

6-
llm = ChatOpenAI(model="gpt-3.5-turbo")
5+
def main():
6+
llm = ChatOpenAI(
7+
model="gpt-3.5-turbo",
8+
temperature=0.1,
9+
max_tokens=100,
10+
top_p=0.9,
11+
frequency_penalty=0.5,
12+
presence_penalty=0.5,
13+
stop_sequences=["\n", "Human:", "AI:"],
14+
seed=100,
15+
)
716

817
messages = [
918
SystemMessage(content="You are a helpful assistant!"),
@@ -14,4 +23,5 @@ def main():
1423
print("LLM output:\n", result)
1524

1625
if __name__ == "__main__":
17-
main()
26+
main()
27+
Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,8 @@
11
langchain==0.3.21
22
langchain_openai
3-
opentelemetry-sdk~=1.30.0
4-
opentelemetry-exporter-otlp-proto-grpc~=1.30.0
3+
opentelemetry-sdk~=1.36.0
4+
opentelemetry-exporter-otlp-proto-grpc~=1.36.0
5+
opentelemetry-distro~=0.57b0
56

67
# Uncomment after lanchain instrumetation is released
78
# opentelemetry-instrumentation-langchain~=2.0b0.dev

instrumentation-genai/opentelemetry-instrumentation-langchain/pyproject.toml

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -25,9 +25,9 @@ classifiers = [
2525
"Programming Language :: Python :: 3.13",
2626
]
2727
dependencies = [
28-
"opentelemetry-api == 1.30",
29-
"opentelemetry-instrumentation == 0.51b0",
30-
"opentelemetry-semantic-conventions == 0.51b0"
28+
"opentelemetry-api == 1.36.0",
29+
"opentelemetry-instrumentation == 0.57b0",
30+
"opentelemetry-semantic-conventions == 0.57b0"
3131
]
3232

3333
[project.optional-dependencies]

instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/__init__.py

Lines changed: 13 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -36,20 +36,20 @@
3636
---
3737
"""
3838

39-
from typing import Collection
39+
from typing import Any, Callable, Collection, Optional
4040

41-
from wrapt import wrap_function_wrapper
41+
from wrapt import wrap_function_wrapper # type: ignore
4242

43-
from opentelemetry.instrumentation.langchain.config import Config
44-
from opentelemetry.instrumentation.langchain.version import __version__
45-
from opentelemetry.instrumentation.langchain.package import _instruments
43+
from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
4644
from opentelemetry.instrumentation.langchain.callback_handler import (
4745
OpenTelemetryLangChainCallbackHandler,
4846
)
49-
from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
47+
from opentelemetry.instrumentation.langchain.config import Config
48+
from opentelemetry.instrumentation.langchain.package import _instruments
49+
from opentelemetry.instrumentation.langchain.version import __version__
5050
from opentelemetry.instrumentation.utils import unwrap
51-
from opentelemetry.trace import get_tracer
5251
from opentelemetry.semconv.schemas import Schemas
52+
from opentelemetry.trace import get_tracer
5353

5454

5555
class LangChainInstrumentor(BaseInstrumentor):
@@ -59,14 +59,14 @@ class LangChainInstrumentor(BaseInstrumentor):
5959
to capture LLM telemetry.
6060
"""
6161

62-
def __init__(self, exception_logger=None):
62+
def __init__(self, exception_logger: Optional[Callable[[Exception], Any]] = None):
6363
super().__init__()
6464
Config.exception_logger = exception_logger
6565

6666
def instrumentation_dependencies(self) -> Collection[str]:
6767
return _instruments
6868

69-
def _instrument(self, **kwargs):
69+
def _instrument(self, **kwargs: Any):
7070
"""
7171
Enable Langchain instrumentation.
7272
"""
@@ -88,7 +88,7 @@ def _instrument(self, **kwargs):
8888
wrapper=_BaseCallbackManagerInitWrapper(otel_callback_handler),
8989
)
9090

91-
def _uninstrument(self, **kwargs):
91+
def _uninstrument(self, **kwargs: Any):
9292
"""
9393
Cleanup instrumentation (unwrap).
9494
"""
@@ -100,14 +100,14 @@ class _BaseCallbackManagerInitWrapper:
100100
Wrap the BaseCallbackManager __init__ to insert custom callback handler in the manager's handlers list.
101101
"""
102102

103-
def __init__(self, callback_handler):
103+
def __init__(self, callback_handler: OpenTelemetryLangChainCallbackHandler):
104104
self._otel_handler = callback_handler
105105

106-
def __call__(self, wrapped, instance, args, kwargs):
106+
def __call__(self, wrapped: Callable[..., None], instance: Any, args: tuple[Any, ...], kwargs: dict[str, Any]):
107107
wrapped(*args, **kwargs)
108108
# Ensure our OTel callback is present if not already.
109109
for handler in instance.inheritable_handlers:
110110
if isinstance(handler, type(self._otel_handler)):
111111
break
112112
else:
113-
instance.add_handler(self._otel_handler, inherit=True)
113+
instance.add_handler(self._otel_handler, inherit=True)

instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/callback_handler.py

Lines changed: 52 additions & 44 deletions
Original file line numberDiff line numberDiff line change
@@ -1,19 +1,23 @@
11
import time
22
from dataclasses import dataclass, field
3-
from typing import Any, Dict, List, Optional, Union
3+
from typing import Any, Dict, List, Optional
44
from uuid import UUID
55

6-
from langchain_core.callbacks import BaseCallbackHandler
7-
from langchain_core.messages import BaseMessage
8-
from langchain_core.outputs import LLMResult
6+
from langchain_core.callbacks import BaseCallbackHandler # type: ignore
7+
from langchain_core.messages import BaseMessage # type: ignore
8+
from langchain_core.outputs import LLMResult # type: ignore
99

10-
from opentelemetry.context import get_current, Context
11-
from opentelemetry.semconv._incubating.attributes import gen_ai_attributes as GenAI
12-
from opentelemetry.semconv.attributes import error_attributes as ErrorAttributes
13-
from opentelemetry.trace import Span, SpanKind, set_span_in_context, use_span
10+
from opentelemetry.context import Context, get_current
11+
from opentelemetry.instrumentation.langchain.utils import dont_throw
12+
from opentelemetry.semconv._incubating.attributes import (
13+
gen_ai_attributes as GenAI,
14+
)
15+
from opentelemetry.semconv.attributes import (
16+
error_attributes as ErrorAttributes,
17+
)
18+
from opentelemetry.trace import Span, SpanKind, Tracer, set_span_in_context
1419
from opentelemetry.trace.status import Status, StatusCode
1520

16-
from opentelemetry.instrumentation.langchain.utils import dont_throw
1721

1822
@dataclass
1923
class _SpanState:
@@ -23,16 +27,16 @@ class _SpanState:
2327
children: List[UUID] = field(default_factory=list)
2428

2529

26-
class OpenTelemetryLangChainCallbackHandler(BaseCallbackHandler):
30+
class OpenTelemetryLangChainCallbackHandler(BaseCallbackHandler): # type: ignore[misc]
2731
"""
2832
A callback handler for LangChain that uses OpenTelemetry to create spans for LLM calls and chains, tools etc,. in future.
2933
"""
3034

3135
def __init__(
3236
self,
33-
tracer,
37+
tracer: Tracer,
3438
) -> None:
35-
super().__init__()
39+
super().__init__() # type: ignore
3640
self._tracer = tracer
3741

3842
# Map from run_id -> _SpanState, to keep track of spans and parent/child relationships
@@ -80,30 +84,31 @@ def _create_llm_span(
8084

8185
return span
8286

83-
def _end_span(self, run_id: UUID):
87+
def _end_span(self, run_id: UUID) -> None:
8488
state = self.spans[run_id]
8589
for child_id in state.children:
8690
child_state = self.spans.get(child_id)
87-
if child_state and child_state.span.end_time is None:
91+
if child_state:
92+
# Always end child spans as OpenTelemetry spans don't expose end_time directly
8893
child_state.span.end()
89-
if state.span.end_time is None:
90-
state.span.end()
94+
# Always end the span as OpenTelemetry spans don't expose end_time directly
95+
state.span.end()
9196

9297
def _get_span(self, run_id: UUID) -> Span:
9398
return self.spans[run_id].span
9499

95100
@dont_throw
96101
def on_chat_model_start(
97102
self,
98-
serialized: dict,
99-
messages: List[List[BaseMessage]],
103+
serialized: Dict[str, Any],
104+
messages: List[List[BaseMessage]], # type: ignore
100105
*,
101106
run_id: UUID,
102-
tags: Optional[list[str]] = None,
107+
tags: Optional[List[str]] = None,
103108
parent_run_id: Optional[UUID] = None,
104-
metadata: Optional[dict[str, Any]] = None,
105-
**kwargs,
106-
):
109+
metadata: Optional[Dict[str, Any]] = None,
110+
**kwargs: Any,
111+
) -> None:
107112
name = serialized.get("name") or kwargs.get("name") or "ChatLLM"
108113
span = self._create_llm_span(
109114
run_id=run_id,
@@ -113,7 +118,7 @@ def on_chat_model_start(
113118

114119
invocation_params = kwargs.get("invocation_params")
115120
if invocation_params is not None:
116-
request_model = kwargs.get("invocation_params").get("model_name")
121+
request_model = invocation_params.get("model_name")
117122
if request_model is not None:
118123
span.set_attribute(GenAI.GEN_AI_REQUEST_MODEL, request_model)
119124
top_p = invocation_params.get("top_p")
@@ -129,7 +134,7 @@ def on_chat_model_start(
129134
if stop_sequences is not None:
130135
span.set_attribute(GenAI.GEN_AI_REQUEST_STOP_SEQUENCES, stop_sequences)
131136
seed = invocation_params.get("seed")
132-
if stop_sequences is not None:
137+
if seed is not None:
133138
span.set_attribute(GenAI.GEN_AI_REQUEST_SEED, seed)
134139

135140

@@ -148,41 +153,44 @@ def on_chat_model_start(
148153
@dont_throw
149154
def on_llm_end(
150155
self,
151-
response: LLMResult,
156+
response: LLMResult, # type: ignore
152157
*,
153158
run_id: UUID,
154-
parent_run_id: Union[UUID, None] = None,
155-
**kwargs,
156-
):
159+
parent_run_id: Optional[UUID] = None,
160+
**kwargs: Any,
161+
) -> None:
157162
span = self._get_span(run_id)
158163

159-
finish_reasons = []
160-
for generation in getattr(response, "generations", []):
161-
for index, chat_generation in enumerate(generation):
162-
generation_info = chat_generation.generation_info
164+
finish_reasons: List[str] = []
165+
for generation in getattr(response, "generations", []): # type: ignore
166+
for chat_generation in generation:
167+
generation_info = getattr(chat_generation, "generation_info", None)
163168
if generation_info is not None:
164169
finish_reason = generation_info.get("finish_reason")
165170
if finish_reason is not None:
166-
finish_reasons.append(finish_reason or "error")
171+
finish_reasons.append(str(finish_reason) or "error")
167172

168173
span.set_attribute(GenAI.GEN_AI_RESPONSE_FINISH_REASONS, finish_reasons)
169174

170-
if response.llm_output is not None:
171-
response_model = response.llm_output.get("model_name") or response.llm_output.get("model")
175+
llm_output = getattr(response, "llm_output", None) # type: ignore
176+
if llm_output is not None:
177+
response_model = llm_output.get("model_name") or llm_output.get("model")
172178
if response_model is not None:
173-
span.set_attribute(GenAI.GEN_AI_RESPONSE_MODEL, response_model)
179+
span.set_attribute(GenAI.GEN_AI_RESPONSE_MODEL, str(response_model))
174180

175-
response_id = response.llm_output.get("id")
181+
response_id = llm_output.get("id")
176182
if response_id is not None:
177-
span.set_attribute(GenAI.GEN_AI_RESPONSE_ID, response_id)
183+
span.set_attribute(GenAI.GEN_AI_RESPONSE_ID, str(response_id))
178184

179185
# usage
180-
usage = response.llm_output.get("usage") or response.llm_output.get("token_usage")
186+
usage = llm_output.get("usage") or llm_output.get("token_usage")
181187
if usage:
182188
prompt_tokens = usage.get("prompt_tokens", 0)
183189
completion_tokens = usage.get("completion_tokens", 0)
184-
span.set_attribute(GenAI.GEN_AI_USAGE_INPUT_TOKENS, prompt_tokens)
185-
span.set_attribute(GenAI.GEN_AI_USAGE_OUTPUT_TOKENS, completion_tokens)
190+
span.set_attribute(GenAI.GEN_AI_USAGE_INPUT_TOKENS,
191+
int(prompt_tokens) if prompt_tokens is not None else 0)
192+
span.set_attribute(GenAI.GEN_AI_USAGE_OUTPUT_TOKENS,
193+
int(completion_tokens) if completion_tokens is not None else 0)
186194

187195
# End the LLM span
188196
self._end_span(run_id)
@@ -194,12 +202,12 @@ def on_llm_error(
194202
*,
195203
run_id: UUID,
196204
parent_run_id: Optional[UUID] = None,
197-
**kwargs,
198-
):
205+
**kwargs: Any,
206+
) -> None:
199207
self._handle_error(error, run_id)
200208

201209
def _handle_error(self, error: BaseException, run_id: UUID):
202210
span = self._get_span(run_id)
203211
span.set_status(Status(StatusCode.ERROR, str(error)))
204212
span.set_attribute(ErrorAttributes.ERROR_TYPE, type(error).__qualname__)
205-
self._end_span(run_id)
213+
self._end_span(run_id)

instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/config.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,11 +11,13 @@
1111
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
14+
from typing import Any, Callable, Optional
15+
1416

1517
class Config:
1618
"""
1719
Shared static config for LangChain OTel instrumentation.
1820
"""
1921

2022
# Logger to handle exceptions during instrumentation
21-
exception_logger = None
23+
exception_logger: Optional[Callable[[Exception], Any]] = None

0 commit comments

Comments
 (0)