Skip to content

Commit da9a98d

Browse files
committed
Resolve merge conflicts.
2 parents 1403bdf + c09a299 commit da9a98d

File tree

7 files changed

+58
-24
lines changed

7 files changed

+58
-24
lines changed

CHANGELOG.md

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
1313

1414
### Added
1515

16+
- `opentelemetry-instrumentation-openai-v2` Update doc for OpenAI Instrumentation to support OpenAI Compatible Platforms
17+
([#3279](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3279))
1618
- `opentelemetry-instrumentation-system-metrics` Add `process` metrics and deprecated `process.runtime` prefixed ones
1719
([#3250](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3250))
1820
- `opentelemetry-instrumentation-botocore` Add support for GenAI user events and lazy initialize tracer

instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py

Lines changed: 25 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -48,6 +48,9 @@
4848
# Constant used to make the absence of content more understandable.
4949
_CONTENT_ELIDED = "<elided>"
5050

51+
# Constant used for the value of 'gen_ai.operation.name".
52+
_GENERATE_CONTENT_OP_NAME = "generate_content"
53+
5154

5255
class _MethodsSnapshot:
5356
def __init__(self):
@@ -189,12 +192,11 @@ def _get_top_p(config: Optional[GenerateContentConfigOrDict]):
189192
def _to_dict(value: object):
190193
if isinstance(value, dict):
191194
return value
192-
if hasattr(value, 'model_dump'):
195+
if hasattr(value, "model_dump"):
193196
return value.model_dump()
194197
return json.loads(json.dumps(value))
195198

196199

197-
198200
class _GenerateContentInstrumentationHelper:
199201
def __init__(
200202
self,
@@ -216,13 +218,13 @@ def __init__(
216218

217219
def start_span_as_current_span(self, model_name, function_name):
218220
return self._otel_wrapper.start_as_current_span(
219-
f"generate_content {model_name}",
221+
f"{_GENERATE_CONTENT_OP_NAME} {model_name}",
220222
start_time=self._start_time,
221223
attributes={
222224
code_attributes.CODE_FUNCTION_NAME: function_name,
223225
gen_ai_attributes.GEN_AI_SYSTEM: self._genai_system,
224226
gen_ai_attributes.GEN_AI_REQUEST_MODEL: self._genai_request_model,
225-
gen_ai_attributes.GEN_AI_OPERATION_NAME: "GenerateContent",
227+
gen_ai_attributes.GEN_AI_OPERATION_NAME: _GENERATE_CONTENT_OP_NAME,
226228
},
227229
)
228230

@@ -338,19 +340,19 @@ def _maybe_log_user_prompt(
338340
self, contents: Union[ContentListUnion, ContentListUnionDict]
339341
):
340342
if isinstance(contents, list):
341-
total=len(contents)
342-
index=0
343+
total = len(contents)
344+
index = 0
343345
for entry in contents:
344-
self._maybe_log_single_user_prompt(entry, index=index, total=total)
346+
self._maybe_log_single_user_prompt(
347+
entry, index=index, total=total
348+
)
345349
index += 1
346350
else:
347351
self._maybe_log_single_user_prompt(contents)
348352

349353
def _maybe_log_single_user_prompt(
350-
self,
351-
contents: Union[ContentUnion, ContentUnionDict],
352-
index=0,
353-
total=1):
354+
self, contents: Union[ContentUnion, ContentUnionDict], index=0, total=1
355+
):
354356
# TODO: figure out how to report the index in a manner that is
355357
# aligned with the OTel semantic conventions.
356358
attributes = {
@@ -394,7 +396,9 @@ def _maybe_log_response_stats(self, response: GenerateContentResponse):
394396
#
395397
pass
396398

397-
def _maybe_log_response_safety_ratings(self, response: GenerateContentResponse):
399+
def _maybe_log_response_safety_ratings(
400+
self, response: GenerateContentResponse
401+
):
398402
# TODO: Determine if there is a way that we can log
399403
# the "prompt_feedback". This would be especially useful
400404
# in the case where the response is blocked.
@@ -411,7 +415,8 @@ def _maybe_log_response(self, response: GenerateContentResponse):
411415
candidate,
412416
flat_candidate_index=self._candidate_index,
413417
candidate_in_response_index=candidate_in_response_index,
414-
response_index=self._response_index)
418+
response_index=self._response_index,
419+
)
415420
self._candidate_index += 1
416421
candidate_in_response_index += 1
417422

@@ -420,10 +425,11 @@ def _maybe_log_response_candidate(
420425
candidate: Candidate,
421426
flat_candidate_index: int,
422427
candidate_in_response_index: int,
423-
response_index: int):
428+
response_index: int,
429+
):
424430
# TODO: Determine if there might be a way to report the
425431
# response index and candidate response index.
426-
attributes={
432+
attributes = {
427433
gen_ai_attributes.GEN_AI_SYSTEM: self._genai_system,
428434
}
429435
# TODO: determine if "role" should be reported here or not and, if so,
@@ -438,7 +444,7 @@ def _maybe_log_response_candidate(
438444
# "citation_metadata", "grounding_metadata", "logprobs_result", etc.
439445
#
440446
# See also: "TODOS.md"
441-
body={
447+
body = {
442448
"index": flat_candidate_index,
443449
}
444450
if self._content_recording_enabled:
@@ -460,7 +466,7 @@ def _record_token_usage_metric(self):
460466
gen_ai_attributes.GEN_AI_TOKEN_TYPE: "input",
461467
gen_ai_attributes.GEN_AI_SYSTEM: self._genai_system,
462468
gen_ai_attributes.GEN_AI_REQUEST_MODEL: self._genai_request_model,
463-
gen_ai_attributes.GEN_AI_OPERATION_NAME: "GenerateContent",
469+
gen_ai_attributes.GEN_AI_OPERATION_NAME: _GENERATE_CONTENT_OP_NAME,
464470
},
465471
)
466472
self._otel_wrapper.token_usage_metric.record(
@@ -469,15 +475,15 @@ def _record_token_usage_metric(self):
469475
gen_ai_attributes.GEN_AI_TOKEN_TYPE: "output",
470476
gen_ai_attributes.GEN_AI_SYSTEM: self._genai_system,
471477
gen_ai_attributes.GEN_AI_REQUEST_MODEL: self._genai_request_model,
472-
gen_ai_attributes.GEN_AI_OPERATION_NAME: "GenerateContent",
478+
gen_ai_attributes.GEN_AI_OPERATION_NAME: _GENERATE_CONTENT_OP_NAME,
473479
},
474480
)
475481

476482
def _record_duration_metric(self):
477483
attributes = {
478484
gen_ai_attributes.GEN_AI_SYSTEM: self._genai_system,
479485
gen_ai_attributes.GEN_AI_REQUEST_MODEL: self._genai_request_model,
480-
gen_ai_attributes.GEN_AI_OPERATION_NAME: "GenerateContent",
486+
gen_ai_attributes.GEN_AI_OPERATION_NAME: _GENERATE_CONTENT_OP_NAME,
481487
}
482488
if self._error_type is not None:
483489
attributes[error_attributes.ERROR_TYPE] = self._error_type

instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/common/requests_mocker.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -32,6 +32,7 @@
3232
# Because they APIs that need to be mocked are simple enough and well documented
3333
# enough, it seems approachable to mock the requests library, instead.
3434

35+
from typing import Optional
3536
import copy
3637
import functools
3738
import http.client
@@ -81,7 +82,7 @@ def response(self):
8182

8283

8384
def _return_error_status(
84-
args: RequestsCallArgs, status_code: int, reason: str = None
85+
args: RequestsCallArgs, status_code: int, reason: Optional[str] = None
8586
):
8687
result = requests.Response()
8788
result.url = args.request.url
@@ -198,6 +199,7 @@ def _do_send_streaming(
198199
request: requests.PreparedRequest,
199200
**kwargs,
200201
):
202+
args = RequestsCallArgs(session, request, **kwargs)
201203
response_generators = []
202204
for matcher, response_generator in self._handlers:
203205
if matcher is None:
@@ -206,7 +208,6 @@ def _do_send_streaming(
206208
response_generators.append(response_generator)
207209
if not response_generators:
208210
response_generators.append(_return_404)
209-
args = RequestsCallArgs(session, request, **kwargs)
210211
response_generator = _to_stream_response_generator(response_generators)
211212
call = RequestsCall(args, response_generator)
212213
result = call.response

instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/nonstreaming_base.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -76,7 +76,7 @@ def test_generated_span_has_minimal_genai_attributes(self):
7676
span = self.otel.get_span_named("generate_content gemini-2.0-flash")
7777
self.assertEqual(span.attributes["gen_ai.system"], "gemini")
7878
self.assertEqual(
79-
span.attributes["gen_ai.operation.name"], "GenerateContent"
79+
span.attributes["gen_ai.operation.name"], "generate_content"
8080
)
8181

8282
def test_generated_span_has_correct_function_name(self):
@@ -100,7 +100,7 @@ def test_generated_span_has_vertex_ai_system_when_configured(self):
100100
span = self.otel.get_span_named("generate_content gemini-2.0-flash")
101101
self.assertEqual(span.attributes["gen_ai.system"], "vertex_ai")
102102
self.assertEqual(
103-
span.attributes["gen_ai.operation.name"], "GenerateContent"
103+
span.attributes["gen_ai.operation.name"], "generate_content"
104104
)
105105

106106
def test_generated_span_counts_tokens(self):

instrumentation-genai/opentelemetry-instrumentation-openai-v2/README.rst

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,28 @@ This library allows tracing LLM requests and logging of messages made by the
1010
`OpenAI Python API library <https://pypi.org/project/openai/>`_. It also captures
1111
the duration of the operations and the number of tokens used as metrics.
1212

13+
Many LLM platforms support the OpenAI SDK. This means systems such as the following are observable with this instrumentation when accessed using it:
14+
15+
.. list-table:: OpenAI Compatible Platforms
16+
:widths: 40 25
17+
:header-rows: 1
18+
19+
* - Name
20+
- gen_ai.system
21+
* - `Azure OpenAI <https://github.com/openai/openai-python?tab=readme-ov-file#microsoft-azure-openai>`_
22+
- ``az.ai.openai``
23+
* - `Gemini <https://developers.googleblog.com/en/gemini-is-now-accessible-from-the-openai-library/>`_
24+
- ``gemini``
25+
* - `Perplexity <https://docs.perplexity.ai/api-reference/chat-completions>`_
26+
- ``perplexity``
27+
* - `xAI <https://x.ai/api>`_ (Compatible with Anthropic)
28+
- ``xai``
29+
* - `DeepSeek <https://api-docs.deepseek.com/>`_
30+
- ``deepseek``
31+
* - `Groq <https://console.groq.com/docs/openai>`_
32+
- ``groq``
33+
* - `MistralAI <https://docs.mistral.ai/api/>`_
34+
- ``mistral_ai``
1335

1436
Installation
1537
------------

pyproject.toml

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -144,6 +144,10 @@ members = [
144144
"propagator/*",
145145
"util/opentelemetry-util-http",
146146
]
147+
# TODO: remove after https://github.com/open-telemetry/opentelemetry-python-contrib/issues/3300
148+
exclude = [
149+
"instrumentation-genai/opentelemetry-instrumentation-google-genai",
150+
]
147151

148152
[tool.ruff]
149153
# https://docs.astral.sh/ruff/configuration/

scripts/generate_instrumentation_bootstrap.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,6 @@
6060
# by manually adding it to their environment.
6161
# See https://github.com/open-telemetry/opentelemetry-python-contrib/issues/2787
6262
"opentelemetry-instrumentation-aws-lambda",
63-
6463
# Google GenAI instrumentation is currently excluded because it is still in early
6564
# development. This filter will get removed once it is further along in its
6665
# development lifecycle and ready to be included by default.

0 commit comments

Comments
 (0)