Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -19,10 +19,16 @@ pip install elastic-opentelemetry-instrumentation-openai

## Usage

This instrumentation supports *0-code* / *auto* instrumentation:
This instrumentation supports *0-code* / *autoinstrumentation*:

```
opentelemetry-instrument python use_openai.py

# You can record more information about prompts as span events by enabling content capture.
OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT=true opentelemetry-instrument python use_openai.py

# You can record more information about prompts as log events by enabling content capture.
OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT=true ELASTIC_OTEL_GENAI_EVENTS=log opentelemetry-instrument python use_openai.py
```

Or manual instrumentation:
Expand All @@ -48,8 +54,8 @@ chat_completion = client.chat.completions.create(model="gpt-4o-mini", messages=m

### Instrumentation specific environment variable configuration

- `ELASTIC_OTEL_GENAI_CAPTURE_CONTENT` (default: `false`): when sets to `true` collect more
informations about prompts and responses by enabling content capture
- `ELASTIC_OTEL_GENAI_EVENTS` (default: `span`): when set to `log` exports GenAI events as
log events instead of span events.

### Elastic specific semantic conventions

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,8 +26,8 @@
from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
from opentelemetry.instrumentation.utils import unwrap
from opentelemetry.instrumentation.openai.environment_variables import (
ELASTIC_OTEL_GENAI_CAPTURE_CONTENT,
ELASTIC_OTEL_GENAI_EVENTS,
OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT,
)
from opentelemetry.instrumentation.openai.helpers import (
_get_embeddings_span_attributes_from_wrapper,
Expand Down Expand Up @@ -78,10 +78,13 @@ def _instrument(self, **kwargs):
``tracer_provider``: a TracerProvider, defaults to global
``meter_provider``: a MeterProvider, defaults to global
``event_logger_provider``: a EventLoggerProvider, defaults to global
``capture_content``: to enable content capturing, defaults to False
``capture_message_content``: to enable content capturing, defaults to False
"""
capture_content = "true" if kwargs.get("capture_content") else "false"
self.capture_content = os.environ.get(ELASTIC_OTEL_GENAI_CAPTURE_CONTENT, capture_content).lower() == "true"
capture_message_content = "true" if kwargs.get("capture_message_content") else "false"
self.capture_message_content = (
os.environ.get(OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT, capture_message_content).lower()
== "true"
)

# we support 3 values for deciding how to send events:
# - "latest" to match latest semconv, as 1.27.0 it's span
Expand Down Expand Up @@ -135,7 +138,7 @@ def _patch(self, _module):
)

def _uninstrument(self, **kwargs):
# unwrap only supports uninstrementing real module references so we
# unwrap only supports uninstrumenting real module references so we
# import here.
import openai

Expand All @@ -159,7 +162,7 @@ def _chat_completion_wrapper(self, wrapped, instance, args, kwargs):
end_on_exit=False,
) as span:
# TODO: more fine grained depending on the message.role?
if self.capture_content:
if self.capture_message_content:
messages = kwargs.get("messages", [])

if self.event_kind == "log":
Expand All @@ -184,7 +187,7 @@ def _chat_completion_wrapper(self, wrapped, instance, args, kwargs):
return StreamWrapper(
stream=result,
span=span,
capture_content=self.capture_content,
capture_message_content=self.capture_message_content,
event_kind=self.event_kind,
event_attributes=event_attributes,
event_logger=self.event_logger,
Expand All @@ -201,7 +204,7 @@ def _chat_completion_wrapper(self, wrapped, instance, args, kwargs):
_record_token_usage_metrics(self.token_usage_metric, span, result.usage)
_record_operation_duration_metric(self.operation_duration_metric, span, start_time)

if self.capture_content:
if self.capture_message_content:
if self.event_kind == "log":
_send_log_events_from_choices(
self.event_logger, choices=result.choices, attributes=event_attributes
Expand Down Expand Up @@ -234,7 +237,7 @@ async def _async_chat_completion_wrapper(self, wrapped, instance, args, kwargs):
# this is important to avoid having the span closed before ending the stream
end_on_exit=False,
) as span:
if self.capture_content:
if self.capture_message_content:
messages = kwargs.get("messages", [])

if self.event_kind == "log":
Expand All @@ -259,7 +262,7 @@ async def _async_chat_completion_wrapper(self, wrapped, instance, args, kwargs):
return StreamWrapper(
stream=result,
span=span,
capture_content=self.capture_content,
capture_message_content=self.capture_message_content,
event_kind=self.event_kind,
event_attributes=event_attributes,
event_logger=self.event_logger,
Expand All @@ -276,7 +279,7 @@ async def _async_chat_completion_wrapper(self, wrapped, instance, args, kwargs):
_record_token_usage_metrics(self.token_usage_metric, span, result.usage)
_record_operation_duration_metric(self.operation_duration_metric, span, start_time)

if self.capture_content:
if self.capture_message_content:
if self.event_kind == "log":
_send_log_events_from_choices(
self.event_logger, choices=result.choices, attributes=event_attributes
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.

ELASTIC_OTEL_GENAI_CAPTURE_CONTENT = "ELASTIC_OTEL_GENAI_CAPTURE_CONTENT"
OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT = "OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT"

ELASTIC_OTEL_GENAI_EVENTS = "ELASTIC_OTEL_GENAI_EVENTS"
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ def __init__(
self,
stream,
span: Span,
capture_content: bool,
capture_message_content: bool,
event_kind: Literal["log", "span"],
event_attributes: Attributes,
event_logger: EventLogger,
Expand All @@ -55,7 +55,7 @@ def __init__(
):
self.stream = stream
self.span = span
self.capture_content = capture_content
self.capture_message_content = capture_message_content
self.event_kind = event_kind
self.event_attributes = event_attributes
self.event_logger = event_logger
Expand Down Expand Up @@ -84,7 +84,7 @@ def end(self, exc=None):
if self.usage:
_record_token_usage_metrics(self.token_usage_metric, self.span, self.usage)

if self.capture_content:
if self.capture_message_content:
if self.event_kind == "log":
_send_log_events_from_stream_choices(
self.event_logger, choices=self.choices, span=self.span, attributes=self.event_attributes
Expand Down
Loading