Skip to content

Commit 0fca01f

Browse files
committed
Added span support for llm invocation
1 parent bbcdf8b commit 0fca01f

File tree

16 files changed

+957
-3
lines changed

16 files changed

+957
-3
lines changed
Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,8 @@
1+
# Update this with your real OpenAI API key
2+
OPENAI_API_KEY=sk-YOUR_API_KEY
3+
4+
# Uncomment and change to your OTLP endpoint
5+
# OTEL_EXPORTER_OTLP_ENDPOINT=http://localhost:4317
6+
# OTEL_EXPORTER_OTLP_PROTOCOL=grpc
7+
8+
OTEL_SERVICE_NAME=opentelemetry-python-langchain-manual
Lines changed: 39 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,39 @@
1+
OpenTelemetry Langcahin Instrumentation Example
2+
============================================
3+
4+
This is an example of how to instrument Langchain when configuring OpenTelemetry SDK and instrumentations manually.
5+
6+
When :code:`main.py <main.py>`_ is run, it exports traces to an OTLP-compatible endpoint.
7+
Traces include details such as the span name and other attributes.
8+
9+
Note: :code:`.env <.env>`_ file configures additional environment variables:
10+
- :code:`OTEL_LOGS_EXPORTER=otlp` to specify exporter type.
11+
- :code:`OPENAI_API_KEY` open AI key for accessing the OpenAI API.
12+
- :code:`OTEL_EXPORTER_OTLP_ENDPOINT` to specify the endpoint for exporting traces (default is http://localhost:4317).
13+
14+
Setup
15+
-----
16+
17+
Minimally, update the :code:`.env <.env>`_ file with your :code:`OPENAI_API_KEY`.
18+
An OTLP compatible endpoint should be listening for traces http://localhost:4317.
19+
If not, update :code:`OTEL_EXPORTER_OTLP_ENDPOINT` as well.
20+
21+
Next, set up a virtual environment like this:
22+
23+
::
24+
25+
python3 -m venv .venv
26+
source .venv/bin/activate
27+
pip install "python-dotenv[cli]"
28+
pip install -r requirements.txt
29+
30+
Run
31+
---
32+
33+
Run the example like this:
34+
35+
::
36+
37+
dotenv run -- python main.py
38+
39+
You should see the capital of France generated by Langchain ChatOpenAI while traces export to your configured observability tool.
Lines changed: 45 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,45 @@
1+
from langchain_core.messages import HumanMessage, SystemMessage
2+
from langchain_openai import ChatOpenAI
3+
4+
from opentelemetry.instrumentation.langchain import LangChainInstrumentor
5+
from opentelemetry import trace
6+
from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter
7+
from opentelemetry.sdk.trace import TracerProvider
8+
from opentelemetry.sdk.trace.export import BatchSpanProcessor
9+
10+
# Configure tracing
11+
trace.set_tracer_provider(TracerProvider())
12+
span_processor = BatchSpanProcessor(OTLPSpanExporter())
13+
trace.get_tracer_provider().add_span_processor(span_processor)
14+
15+
def main():
16+
17+
# Set up instrumentation
18+
LangChainInstrumentor().instrument()
19+
20+
# ChatOpenAI
21+
llm = ChatOpenAI(
22+
model="gpt-3.5-turbo",
23+
temperature=0.1,
24+
max_tokens=100,
25+
top_p=0.9,
26+
frequency_penalty=0.5,
27+
presence_penalty=0.5,
28+
stop_sequences=["\n", "Human:", "AI:"],
29+
seed=100,
30+
)
31+
32+
messages = [
33+
SystemMessage(content="You are a helpful assistant!"),
34+
HumanMessage(content="What is the capital of France?"),
35+
]
36+
37+
result = llm.invoke(messages)
38+
39+
print("LLM output:\n", result)
40+
41+
# Un-instrument after use
42+
LangChainInstrumentor().uninstrument()
43+
44+
if __name__ == "__main__":
45+
main()
Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,7 @@
1+
langchain==0.3.21
2+
langchain_openai
3+
opentelemetry-sdk~=1.30.0
4+
opentelemetry-exporter-otlp-proto-grpc~=1.30.0
5+
6+
# Uncomment after lanchain instrumetation is released
7+
# opentelemetry-instrumentation-langchain~=2.0b0.dev
Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,8 @@
1+
# Update this with your real OpenAI API key
2+
OPENAI_API_KEY=sk-YOUR_API_KEY
3+
4+
# Uncomment and change to your OTLP endpoint
5+
# OTEL_EXPORTER_OTLP_ENDPOINT=http://localhost:4317
6+
# OTEL_EXPORTER_OTLP_PROTOCOL=grpc
7+
8+
OTEL_SERVICE_NAME=opentelemetry-python-langchain-zero-code
Lines changed: 40 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,40 @@
1+
OpenTelemetry Langchain Zero-Code Instrumentation Example
2+
======================================================
3+
4+
This is an example of how to instrument Langchain with zero code changes,
5+
using `opentelemetry-instrument`.
6+
7+
When :code:`main.py <main.py>`_ is run, it exports traces to an OTLP-compatible endpoint.
8+
Traces include details such as the span name and other attributes.
9+
10+
Note: :code:`.env <.env>`_ file configures additional environment variables:
11+
- :code:`OTEL_LOGS_EXPORTER=otlp` to specify exporter type.
12+
- :code:`OPENAI_API_KEY` open AI key for accessing the OpenAI API.
13+
- :code:`OTEL_EXPORTER_OTLP_ENDPOINT` to specify the endpoint for exporting traces (default is http://localhost:4317).
14+
15+
Setup
16+
-----
17+
18+
Minimally, update the :code:`.env <.env>`_ file with your :code:`OPENAI_API_KEY`.
19+
An OTLP compatible endpoint should be listening for traces http://localhost:4317.
20+
If not, update :code:`OTEL_EXPORTER_OTLP_ENDPOINT` as well.
21+
22+
Next, set up a virtual environment like this:
23+
24+
::
25+
26+
python3 -m venv .venv
27+
source .venv/bin/activate
28+
pip install "python-dotenv[cli]"
29+
pip install -r requirements.txt
30+
31+
Run
32+
---
33+
34+
Run the example like this:
35+
36+
::
37+
38+
dotenv run -- opentelemetry-instrument python main.py
39+
40+
You should see the capital of France generated by Langchain ChatOpenAI while traces export to your configured observability tool.
Lines changed: 58 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,58 @@
1+
from langchain_core.messages import HumanMessage, SystemMessage
2+
from langchain_openai import ChatOpenAI
3+
4+
# from opentelemetry import trace
5+
# from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter
6+
# from opentelemetry.sdk.trace import TracerProvider
7+
# from opentelemetry.sdk.trace.export import BatchSpanProcessor
8+
# # Configure tracing
9+
# trace.set_tracer_provider(TracerProvider())
10+
# span_processor = BatchSpanProcessor(OTLPSpanExporter())
11+
# trace.get_tracer_provider().add_span_processor(span_processor)
12+
13+
from flask import Flask, request, jsonify
14+
15+
# Set up logging
16+
import logging
17+
logging.basicConfig(level=logging.INFO)
18+
logger = logging.getLogger(__name__)
19+
20+
app = Flask(__name__)
21+
22+
@app.post("/chat")
23+
def chat():
24+
try:
25+
print("LLM output1:\n")
26+
payload = request.get_json(silent=True) or request.form
27+
llm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0.1, max_tokens=100)
28+
print("LLM output2:\n")
29+
messages = [
30+
SystemMessage(content="You are a helpful assistant!"),
31+
HumanMessage(content="What is the capital of France?"),
32+
]
33+
print("LLM output3:\n")
34+
result = llm.invoke(messages).content
35+
print("LLM output:\n", result)
36+
# return result.content
37+
except Exception as e:
38+
logger.error(f"Error processing chat request: {e}")
39+
str = f"Error processing chat request: {e}"
40+
return jsonify({str}), 500
41+
# from opentelemetry import _logs
42+
# from opentelemetry.exporter.otlp.proto.grpc._log_exporter import (
43+
# OTLPLogExporter,
44+
# )
45+
# from opentelemetry.sdk._logs import LoggerProvider
46+
# from opentelemetry.sdk._logs.export import BatchLogRecordProcessor
47+
# _logs.set_logger_provider(LoggerProvider())
48+
# _logs.get_logger_provider().add_log_record_processor(
49+
# BatchLogRecordProcessor(OTLPLogExporter())
50+
# )
51+
# import logging
52+
# logger = logging.getLogger(__name__)
53+
# logging.basicConfig(level=logging.DEBUG)
54+
# logger.debug("OpenTelemetry instrumentation for LangChain encountered an error in $$$$$$$$$$$$$$$$")
55+
56+
if __name__ == "__main__":
57+
# When run directly: python app.py
58+
app.run(host="0.0.0.0", port=5003)
Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,7 @@
1+
langchain==0.3.21
2+
langchain_openai
3+
opentelemetry-sdk~=1.30.0
4+
opentelemetry-exporter-otlp-proto-grpc~=1.30.0
5+
6+
# Uncomment after lanchain instrumetation is released
7+
# opentelemetry-instrumentation-langchain~=2.0b0.dev

instrumentation-genai/opentelemetry-instrumentation-langchain/pyproject.toml

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -25,9 +25,9 @@ classifiers = [
2525
"Programming Language :: Python :: 3.13",
2626
]
2727
dependencies = [
28-
"opentelemetry-api ~= 1.30",
29-
"opentelemetry-instrumentation ~= 0.51b0",
30-
"opentelemetry-semantic-conventions ~= 0.51b0"
28+
"opentelemetry-api == 1.30",
29+
"opentelemetry-instrumentation == 0.51b0",
30+
"opentelemetry-semantic-conventions == 0.51b0"
3131
]
3232

3333
[project.optional-dependencies]
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,113 @@
1+
# Copyright The OpenTelemetry Authors
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# http://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
15+
"""
16+
Langchain instrumentation supporting `ChatOpenAI`, it can be enabled by
17+
using ``LangChainInstrumentor``.
18+
19+
Usage
20+
-----
21+
.. code:: python
22+
from opentelemetry.instrumentation.langchain import LangChainInstrumentor
23+
from langchain_core.messages import HumanMessage, SystemMessage
24+
from langchain_openai import ChatOpenAI
25+
26+
LangChainInstrumentor().instrument()
27+
llm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0, max_tokens=1000)
28+
messages = [
29+
SystemMessage(content="You are a helpful assistant!"),
30+
HumanMessage(content="What is the capital of France?"),
31+
]
32+
result = llm.invoke(messages)
33+
LangChainInstrumentor().uninstrument()
34+
35+
API
36+
---
37+
"""
38+
39+
from typing import Collection
40+
41+
from wrapt import wrap_function_wrapper
42+
43+
from opentelemetry.instrumentation.langchain.config import Config
44+
from opentelemetry.instrumentation.langchain.version import __version__
45+
from opentelemetry.instrumentation.langchain.package import _instruments
46+
from opentelemetry.instrumentation.langchain.callback_handler import (
47+
OpenTelemetryLangChainCallbackHandler,
48+
)
49+
from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
50+
from opentelemetry.instrumentation.utils import unwrap
51+
from opentelemetry.trace import get_tracer
52+
from opentelemetry.semconv.schemas import Schemas
53+
54+
55+
class LangChainInstrumentor(BaseInstrumentor):
56+
"""
57+
OpenTelemetry instrumentor for LangChain.
58+
This adds a custom callback handler to the LangChain callback manager
59+
to capture LLM telemetry.
60+
"""
61+
62+
def __init__(self, exception_logger=None):
63+
super().__init__()
64+
Config.exception_logger = exception_logger
65+
66+
def instrumentation_dependencies(self) -> Collection[str]:
67+
return _instruments
68+
69+
def _instrument(self, **kwargs):
70+
"""
71+
Enable Langchain instrumentation.
72+
"""
73+
tracer_provider = kwargs.get("tracer_provider")
74+
tracer = get_tracer(
75+
__name__,
76+
__version__,
77+
tracer_provider,
78+
schema_url=Schemas.V1_28_0.value,
79+
)
80+
81+
otel_callback_handler = OpenTelemetryLangChainCallbackHandler(
82+
tracer=tracer,
83+
)
84+
85+
wrap_function_wrapper(
86+
module="langchain_core.callbacks",
87+
name="BaseCallbackManager.__init__",
88+
wrapper=_BaseCallbackManagerInitWrapper(otel_callback_handler),
89+
)
90+
91+
def _uninstrument(self, **kwargs):
92+
"""
93+
Cleanup instrumentation (unwrap).
94+
"""
95+
unwrap("langchain_core.callbacks.base", "BaseCallbackManager.__init__")
96+
97+
98+
class _BaseCallbackManagerInitWrapper:
99+
"""
100+
Wrap the BaseCallbackManager __init__ to insert custom callback handler in the manager's handlers list.
101+
"""
102+
103+
def __init__(self, callback_handler):
104+
self._otel_handler = callback_handler
105+
106+
def __call__(self, wrapped, instance, args, kwargs):
107+
wrapped(*args, **kwargs)
108+
# Ensure our OTel callback is present if not already.
109+
for handler in instance.inheritable_handlers:
110+
if isinstance(handler, type(self._otel_handler)):
111+
break
112+
else:
113+
instance.add_handler(self._otel_handler, inherit=True)

0 commit comments

Comments
 (0)