Skip to content

Commit e8dd71b

Browse files
committed
Update _map_finish_reason() and use it in span attribute as well
1 parent 193c446 commit e8dd71b

File tree

3 files changed

+42
-11
lines changed

3 files changed

+42
-11
lines changed

instrumentation-genai/opentelemetry-instrumentation-vertexai/src/opentelemetry/instrumentation/vertexai/utils.py

Lines changed: 4 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -155,7 +155,8 @@ def get_genai_response_attributes(
155155
| prediction_service_v1beta1.GenerateContentResponse,
156156
) -> dict[str, AttributeValue]:
157157
finish_reasons: list[str] = [
158-
candidate.finish_reason.name for candidate in response.candidates
158+
_map_finish_reason(candidate.finish_reason)
159+
for candidate in response.candidates
159160
]
160161
# TODO: add gen_ai.response.id once available in the python client
161162
# https://github.com/open-telemetry/opentelemetry-python-contrib/issues/3246
@@ -276,9 +277,5 @@ def _map_finish_reason(
276277
if finish_reason is EnumType.MAX_TOKENS:
277278
return "length"
278279

279-
# There are a lot of specific enum values from Vertex that would map to "content_filter".
280-
# I'm worried trying to map the enum obfuscates the telemetry because 1) it over
281-
# generalizes and 2) half of the values are from the OTel enum and others from the vertex
282-
# enum. See for reference
283-
# https://github.com/googleapis/python-aiplatform/blob/c5023698c7068e2f84523f91b824641c9ef2d694/google/cloud/aiplatform_v1/types/content.py#L786-L822
284-
return finish_reason.name.lower()
280+
# If there is no 1:1 mapping to an OTel preferred enum value, use the exact vertex reason
281+
return finish_reason.name

instrumentation-genai/opentelemetry-instrumentation-vertexai/tests/test_chat_completions.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@ def test_generate_content(
3838
assert dict(spans[0].attributes) == {
3939
"gen_ai.operation.name": "chat",
4040
"gen_ai.request.model": "gemini-1.5-flash-002",
41-
"gen_ai.response.finish_reasons": ("STOP",),
41+
"gen_ai.response.finish_reasons": ("stop",),
4242
"gen_ai.response.model": "gemini-1.5-flash-002",
4343
"gen_ai.system": "vertex_ai",
4444
"gen_ai.usage.input_tokens": 5,
@@ -106,7 +106,7 @@ def test_generate_content_without_events(
106106
assert dict(spans[0].attributes) == {
107107
"gen_ai.operation.name": "chat",
108108
"gen_ai.request.model": "gemini-1.5-flash-002",
109-
"gen_ai.response.finish_reasons": ("STOP",),
109+
"gen_ai.response.finish_reasons": ("stop",),
110110
"gen_ai.response.model": "gemini-1.5-flash-002",
111111
"gen_ai.system": "vertex_ai",
112112
"gen_ai.usage.input_tokens": 5,
@@ -294,7 +294,7 @@ def test_generate_content_extra_params(span_exporter, instrument_no_content):
294294
"gen_ai.request.stop_sequences": ("\n\n\n",),
295295
"gen_ai.request.temperature": 0.20000000298023224,
296296
"gen_ai.request.top_p": 0.949999988079071,
297-
"gen_ai.response.finish_reasons": ("MAX_TOKENS",),
297+
"gen_ai.response.finish_reasons": ("length",),
298298
"gen_ai.response.model": "gemini-1.5-flash-002",
299299
"gen_ai.system": "vertex_ai",
300300
"gen_ai.usage.input_tokens": 5,

instrumentation-genai/opentelemetry-instrumentation-vertexai/tests/test_utils.py

Lines changed: 35 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,19 @@
1313
# limitations under the License.
1414

1515

16-
from opentelemetry.instrumentation.vertexai.utils import get_server_attributes
16+
from opentelemetry.instrumentation.vertexai.utils import (
17+
get_server_attributes,
18+
_map_finish_reason,
19+
)
20+
import pytest
21+
from google.cloud.aiplatform_v1.types import (
22+
content,
23+
prediction_service,
24+
tool,
25+
)
26+
from google.cloud.aiplatform_v1beta1.types import (
27+
content as content_v1beta1,
28+
)
1729

1830

1931
def test_get_server_attributes() -> None:
@@ -30,3 +42,25 @@ def test_get_server_attributes() -> None:
3042
"server.address": "us-central1-aiplatform.googleapis.com",
3143
"server.port": 5432,
3244
}
45+
46+
47+
def test_map_finish_reason():
48+
for Enum in (
49+
content.Candidate.FinishReason,
50+
content_v1beta1.Candidate.FinishReason,
51+
):
52+
for finish_reason, expect in [
53+
# Handled mappings
54+
(Enum.FINISH_REASON_UNSPECIFIED, "error"),
55+
(Enum.OTHER, "error"),
56+
(Enum.STOP, "stop"),
57+
(Enum.MAX_TOKENS, "length"),
58+
# Preserve vertex enum value
59+
(Enum.BLOCKLIST, "BLOCKLIST"),
60+
(Enum.MALFORMED_FUNCTION_CALL, "MALFORMED_FUNCTION_CALL"),
61+
(Enum.PROHIBITED_CONTENT, "PROHIBITED_CONTENT"),
62+
(Enum.RECITATION, "RECITATION"),
63+
(Enum.SAFETY, "SAFETY"),
64+
(Enum.SPII, "SPII"),
65+
]:
66+
assert _map_finish_reason(finish_reason) == expect

0 commit comments

Comments
 (0)