1414# See the License for the specific language governing permissions and
1515# limitations under the License.
1616
17- import json
1817import logging
1918import os
2019from timeit import default_timer
2625from opentelemetry .instrumentation .instrumentor import BaseInstrumentor
2726from opentelemetry .instrumentation .utils import unwrap
2827from opentelemetry .instrumentation .openai .environment_variables import (
29- ELASTIC_OTEL_GENAI_EVENTS ,
3028 OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT ,
3129)
3230from opentelemetry .instrumentation .openai .helpers import (
3331 _get_embeddings_span_attributes_from_wrapper ,
3432 _get_event_attributes ,
3533 _get_span_attributes_from_wrapper ,
36- _message_from_choice ,
3734 _record_token_usage_metrics ,
3835 _record_operation_duration_metric ,
3936 _send_log_events_from_messages ,
4643from opentelemetry .instrumentation .openai .version import __version__
4744from opentelemetry .instrumentation .openai .wrappers import StreamWrapper
4845from opentelemetry .metrics import get_meter
49- from opentelemetry .semconv ._incubating .attributes .gen_ai_attributes import (
50- GEN_AI_COMPLETION ,
51- GEN_AI_PROMPT ,
52- )
5346from opentelemetry .semconv ._incubating .metrics .gen_ai_metrics import (
5447 create_gen_ai_client_token_usage ,
5548 create_gen_ai_client_operation_duration ,
@@ -86,13 +79,6 @@ def _instrument(self, **kwargs):
8679 == "true"
8780 )
8881
89- # we support 3 values for deciding how to send events:
90- # - "latest" to match latest semconv, as 1.28.0 it's log
91- # - "log" to send log events (default)
92- # - "span" to send span events
93- genai_events = os .environ .get (ELASTIC_OTEL_GENAI_EVENTS , "latest" ).lower ()
94- self .event_kind = "span" if genai_events == "span" else "log"
95-
9682 tracer_provider = kwargs .get ("tracer_provider" )
9783 self .tracer = get_tracer (
9884 __name__ ,
@@ -165,13 +151,7 @@ def _chat_completion_wrapper(self, wrapped, instance, args, kwargs):
165151 if self .capture_message_content :
166152 messages = kwargs .get ("messages" , [])
167153
168- if self .event_kind == "log" :
169- _send_log_events_from_messages (self .event_logger , messages = messages , attributes = event_attributes )
170- elif span .is_recording ():
171- try :
172- span .add_event (EVENT_GEN_AI_CONTENT_PROMPT , attributes = {GEN_AI_PROMPT : json .dumps (messages )})
173- except TypeError :
174- logger .error (f"Failed to serialize { EVENT_GEN_AI_CONTENT_PROMPT } " )
154+ _send_log_events_from_messages (self .event_logger , messages = messages , attributes = event_attributes )
175155
176156 start_time = default_timer ()
177157 try :
@@ -188,7 +168,6 @@ def _chat_completion_wrapper(self, wrapped, instance, args, kwargs):
188168 stream = result ,
189169 span = span ,
190170 capture_message_content = self .capture_message_content ,
191- event_kind = self .event_kind ,
192171 event_attributes = event_attributes ,
193172 event_logger = self .event_logger ,
194173 start_time = start_time ,
@@ -205,19 +184,7 @@ def _chat_completion_wrapper(self, wrapped, instance, args, kwargs):
205184 _record_operation_duration_metric (self .operation_duration_metric , span , start_time )
206185
207186 if self .capture_message_content :
208- if self .event_kind == "log" :
209- _send_log_events_from_choices (
210- self .event_logger , choices = result .choices , attributes = event_attributes
211- )
212- elif span .is_recording ():
213- # same format as the prompt
214- completion = [_message_from_choice (choice ) for choice in result .choices ]
215- try :
216- span .add_event (
217- EVENT_GEN_AI_CONTENT_COMPLETION , attributes = {GEN_AI_COMPLETION : json .dumps (completion )}
218- )
219- except TypeError :
220- logger .error (f"Failed to serialize { EVENT_GEN_AI_CONTENT_COMPLETION } " )
187+ _send_log_events_from_choices (self .event_logger , choices = result .choices , attributes = event_attributes )
221188
222189 span .end ()
223190
@@ -239,14 +206,7 @@ async def _async_chat_completion_wrapper(self, wrapped, instance, args, kwargs):
239206 ) as span :
240207 if self .capture_message_content :
241208 messages = kwargs .get ("messages" , [])
242-
243- if self .event_kind == "log" :
244- _send_log_events_from_messages (self .event_logger , messages = messages , attributes = event_attributes )
245- elif span .is_recording ():
246- try :
247- span .add_event (EVENT_GEN_AI_CONTENT_PROMPT , attributes = {GEN_AI_PROMPT : json .dumps (messages )})
248- except TypeError :
249- logger .error (f"Failed to serialize { EVENT_GEN_AI_CONTENT_PROMPT } " )
209+ _send_log_events_from_messages (self .event_logger , messages = messages , attributes = event_attributes )
250210
251211 start_time = default_timer ()
252212 try :
@@ -263,7 +223,6 @@ async def _async_chat_completion_wrapper(self, wrapped, instance, args, kwargs):
263223 stream = result ,
264224 span = span ,
265225 capture_message_content = self .capture_message_content ,
266- event_kind = self .event_kind ,
267226 event_attributes = event_attributes ,
268227 event_logger = self .event_logger ,
269228 start_time = start_time ,
@@ -280,19 +239,7 @@ async def _async_chat_completion_wrapper(self, wrapped, instance, args, kwargs):
280239 _record_operation_duration_metric (self .operation_duration_metric , span , start_time )
281240
282241 if self .capture_message_content :
283- if self .event_kind == "log" :
284- _send_log_events_from_choices (
285- self .event_logger , choices = result .choices , attributes = event_attributes
286- )
287- elif span .is_recording ():
288- # same format as the prompt
289- completion = [_message_from_choice (choice ) for choice in result .choices ]
290- try :
291- span .add_event (
292- EVENT_GEN_AI_CONTENT_COMPLETION , attributes = {GEN_AI_COMPLETION : json .dumps (completion )}
293- )
294- except TypeError :
295- logger .error (f"Failed to serialize { EVENT_GEN_AI_CONTENT_COMPLETION } " )
242+ _send_log_events_from_choices (self .event_logger , choices = result .choices , attributes = event_attributes )
296243
297244 span .end ()
298245
0 commit comments