Skip to content

Commit eb99cba

Browse files
authored
Merge branch 'main' into fix-copyright-botocore
2 parents dd60bcf + 44754e2 commit eb99cba

File tree

10 files changed

+356
-12
lines changed

10 files changed

+356
-12
lines changed

instrumentation-genai/opentelemetry-instrumentation-vertexai/CHANGELOG.md

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,3 +11,5 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
1111
([#3192](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3192))
1212
- Initial VertexAI instrumentation
1313
([#3123](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3123))
14+
- Add server attributes to Vertex AI spans
15+
([#3208](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3208))

instrumentation-genai/opentelemetry-instrumentation-vertexai/src/opentelemetry/instrumentation/vertexai/patch.py

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,7 @@
2525
from opentelemetry.instrumentation.vertexai.utils import (
2626
GenerateContentParams,
2727
get_genai_request_attributes,
28+
get_server_attributes,
2829
get_span_name,
2930
)
3031
from opentelemetry.trace import SpanKind, Tracer
@@ -100,7 +101,11 @@ def traced_method(
100101
kwargs: Any,
101102
):
102103
params = _extract_params(*args, **kwargs)
103-
span_attributes = get_genai_request_attributes(params)
104+
api_endpoint: str = instance.api_endpoint # type: ignore[reportUnknownMemberType]
105+
span_attributes = {
106+
**get_genai_request_attributes(params),
107+
**get_server_attributes(api_endpoint),
108+
}
104109

105110
span_name = get_span_name(span_attributes)
106111
with tracer.start_as_current_span(

instrumentation-genai/opentelemetry-instrumentation-vertexai/src/opentelemetry/instrumentation/vertexai/utils.py

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -22,10 +22,12 @@
2222
Mapping,
2323
Sequence,
2424
)
25+
from urllib.parse import urlparse
2526

2627
from opentelemetry.semconv._incubating.attributes import (
2728
gen_ai_attributes as GenAIAttributes,
2829
)
30+
from opentelemetry.semconv.attributes import server_attributes
2931
from opentelemetry.util.types import AttributeValue
3032

3133
if TYPE_CHECKING:
@@ -58,6 +60,24 @@ class GenerateContentParams:
5860
) = None
5961

6062

63+
def get_server_attributes(
64+
endpoint: str,
65+
) -> dict[str, AttributeValue]:
66+
"""Get server.* attributes from the endpoint, which is a hostname with optional port e.g.
67+
- ``us-central1-aiplatform.googleapis.com``
68+
- ``us-central1-aiplatform.googleapis.com:5431``
69+
"""
70+
parsed = urlparse(f"scheme://{endpoint}")
71+
72+
if not parsed.hostname:
73+
return {}
74+
75+
return {
76+
server_attributes.SERVER_ADDRESS: parsed.hostname,
77+
server_attributes.SERVER_PORT: parsed.port or 443,
78+
}
79+
80+
6181
def get_genai_request_attributes(
6282
params: GenerateContentParams,
6383
operation_name: GenAIAttributes.GenAiOperationNameValues = GenAIAttributes.GenAiOperationNameValues.CHAT,

instrumentation-genai/opentelemetry-instrumentation-vertexai/tests/test_chat_completions.py

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,8 @@ def test_generate_content(
3434
"gen_ai.operation.name": "chat",
3535
"gen_ai.request.model": "gemini-1.5-flash-002",
3636
"gen_ai.system": "vertex_ai",
37+
"server.address": "us-central1-aiplatform.googleapis.com",
38+
"server.port": 443,
3739
}
3840

3941

@@ -62,6 +64,8 @@ def test_generate_content_empty_model(
6264
"gen_ai.operation.name": "chat",
6365
"gen_ai.request.model": "",
6466
"gen_ai.system": "vertex_ai",
67+
"server.address": "us-central1-aiplatform.googleapis.com",
68+
"server.port": 443,
6569
}
6670
assert_span_error(spans[0])
6771

@@ -91,6 +95,8 @@ def test_generate_content_missing_model(
9195
"gen_ai.operation.name": "chat",
9296
"gen_ai.request.model": "gemini-does-not-exist",
9397
"gen_ai.system": "vertex_ai",
98+
"server.address": "us-central1-aiplatform.googleapis.com",
99+
"server.port": 443,
94100
}
95101
assert_span_error(spans[0])
96102

@@ -122,6 +128,8 @@ def test_generate_content_invalid_temperature(
122128
"gen_ai.request.model": "gemini-1.5-flash-002",
123129
"gen_ai.request.temperature": 1000.0,
124130
"gen_ai.system": "vertex_ai",
131+
"server.address": "us-central1-aiplatform.googleapis.com",
132+
"server.port": 443,
125133
}
126134
assert_span_error(spans[0])
127135

@@ -158,6 +166,8 @@ def test_generate_content_extra_params(span_exporter, instrument_no_content):
158166
"gen_ai.request.temperature": 0.20000000298023224,
159167
"gen_ai.request.top_p": 0.949999988079071,
160168
"gen_ai.system": "vertex_ai",
169+
"server.address": "us-central1-aiplatform.googleapis.com",
170+
"server.port": 443,
161171
}
162172

163173

Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,32 @@
1+
# Copyright The OpenTelemetry Authors
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# http://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
15+
16+
from opentelemetry.instrumentation.vertexai.utils import get_server_attributes
17+
18+
19+
def test_get_server_attributes() -> None:
20+
# without port
21+
assert get_server_attributes("us-central1-aiplatform.googleapis.com") == {
22+
"server.address": "us-central1-aiplatform.googleapis.com",
23+
"server.port": 443,
24+
}
25+
26+
# with port
27+
assert get_server_attributes(
28+
"us-central1-aiplatform.googleapis.com:5432"
29+
) == {
30+
"server.address": "us-central1-aiplatform.googleapis.com",
31+
"server.port": 5432,
32+
}

instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/extensions/bedrock.py

Lines changed: 17 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -266,6 +266,12 @@ def _invoke_model_on_success(
266266
if original_body is not None:
267267
original_body.close()
268268

269+
def _on_stream_error_callback(self, span: Span, exception):
270+
span.set_status(Status(StatusCode.ERROR, str(exception)))
271+
if span.is_recording():
272+
span.set_attribute(ERROR_TYPE, type(exception).__qualname__)
273+
span.end()
274+
269275
def on_success(self, span: Span, result: dict[str, Any]):
270276
if self._call_context.operation not in self._HANDLED_OPERATIONS:
271277
return
@@ -282,8 +288,11 @@ def stream_done_callback(response):
282288
self._converse_on_success(span, response)
283289
span.end()
284290

291+
def stream_error_callback(exception):
292+
self._on_stream_error_callback(span, exception)
293+
285294
result["stream"] = ConverseStreamWrapper(
286-
result["stream"], stream_done_callback
295+
result["stream"], stream_done_callback, stream_error_callback
287296
)
288297
return
289298

@@ -307,8 +316,14 @@ def invoke_model_stream_done_callback(response):
307316
self._converse_on_success(span, response)
308317
span.end()
309318

319+
def invoke_model_stream_error_callback(exception):
320+
self._on_stream_error_callback(span, exception)
321+
310322
result["body"] = InvokeModelWithResponseStreamWrapper(
311-
result["body"], invoke_model_stream_done_callback, model_id
323+
result["body"],
324+
invoke_model_stream_done_callback,
325+
invoke_model_stream_error_callback,
326+
model_id,
312327
)
313328
return
314329

instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/extensions/bedrock_utils.py

Lines changed: 25 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -15,10 +15,14 @@
1515
from __future__ import annotations
1616

1717
import json
18+
from typing import Callable, Dict, Union
1819

19-
from botocore.eventstream import EventStream
20+
from botocore.eventstream import EventStream, EventStreamError
2021
from wrapt import ObjectProxy
2122

23+
_StreamDoneCallableT = Callable[[Dict[str, Union[int, str]]], None]
24+
_StreamErrorCallableT = Callable[[Exception], None]
25+
2226

2327
# pylint: disable=abstract-method
2428
class ConverseStreamWrapper(ObjectProxy):
@@ -27,19 +31,25 @@ class ConverseStreamWrapper(ObjectProxy):
2731
def __init__(
2832
self,
2933
stream: EventStream,
30-
stream_done_callback,
34+
stream_done_callback: _StreamDoneCallableT,
35+
stream_error_callback: _StreamErrorCallableT,
3136
):
3237
super().__init__(stream)
3338

3439
self._stream_done_callback = stream_done_callback
40+
self._stream_error_callback = stream_error_callback
3541
# accumulating things in the same shape of non-streaming version
3642
# {"usage": {"inputTokens": 0, "outputTokens": 0}, "stopReason": "finish"}
3743
self._response = {}
3844

3945
def __iter__(self):
40-
for event in self.__wrapped__:
41-
self._process_event(event)
42-
yield event
46+
try:
47+
for event in self.__wrapped__:
48+
self._process_event(event)
49+
yield event
50+
except EventStreamError as exc:
51+
self._stream_error_callback(exc)
52+
raise
4353

4454
def _process_event(self, event):
4555
if "messageStart" in event:
@@ -81,22 +91,28 @@ class InvokeModelWithResponseStreamWrapper(ObjectProxy):
8191
def __init__(
8292
self,
8393
stream: EventStream,
84-
stream_done_callback,
94+
stream_done_callback: _StreamDoneCallableT,
95+
stream_error_callback: _StreamErrorCallableT,
8596
model_id: str,
8697
):
8798
super().__init__(stream)
8899

89100
self._stream_done_callback = stream_done_callback
101+
self._stream_error_callback = stream_error_callback
90102
self._model_id = model_id
91103

92104
# accumulating things in the same shape of the Converse API
93105
# {"usage": {"inputTokens": 0, "outputTokens": 0}, "stopReason": "finish"}
94106
self._response = {}
95107

96108
def __iter__(self):
97-
for event in self.__wrapped__:
98-
self._process_event(event)
99-
yield event
109+
try:
110+
for event in self.__wrapped__:
111+
self._process_event(event)
112+
yield event
113+
except EventStreamError as exc:
114+
self._stream_error_callback(exc)
115+
raise
100116

101117
def _process_event(self, event):
102118
if "chunk" not in event:
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,71 @@
1+
interactions:
2+
- request:
3+
body: '{"messages": [{"role": "user", "content": [{"text": "Say this is a test"}]}],
4+
"inferenceConfig": {"maxTokens": 10, "temperature": 0.8, "topP": 1, "stopSequences":
5+
["|"]}}'
6+
headers:
7+
Content-Length:
8+
- '170'
9+
Content-Type:
10+
- !!binary |
11+
YXBwbGljYXRpb24vanNvbg==
12+
User-Agent:
13+
- !!binary |
14+
Qm90bzMvMS4zNS41NiBtZC9Cb3RvY29yZSMxLjM1LjU2IHVhLzIuMCBvcy9saW51eCM2LjEuMC0x
15+
MDM0LW9lbSBtZC9hcmNoI3g4Nl82NCBsYW5nL3B5dGhvbiMzLjEwLjEyIG1kL3B5aW1wbCNDUHl0
16+
aG9uIGNmZy9yZXRyeS1tb2RlI2xlZ2FjeSBCb3RvY29yZS8xLjM1LjU2
17+
X-Amz-Date:
18+
- !!binary |
19+
MjAyNTAxMjdUMTE0NjAyWg==
20+
X-Amz-Security-Token:
21+
- test_aws_security_token
22+
X-Amzn-Trace-Id:
23+
- !!binary |
24+
Um9vdD0xLWI5YzVlMjRlLWRmYzBjYTYyMmFiYjA2ZWEyMjAzZDZkYjtQYXJlbnQ9NDE0MWM4NWIx
25+
ODkzMmI3OTtTYW1wbGVkPTE=
26+
amz-sdk-invocation-id:
27+
- !!binary |
28+
YjA0ZTAzYWEtMDg2MS00NGIzLTk3NmMtMWZjOGE5MzY5YTFl
29+
amz-sdk-request:
30+
- !!binary |
31+
YXR0ZW1wdD0x
32+
authorization:
33+
- Bearer test_aws_authorization
34+
method: POST
35+
uri: https://bedrock-runtime.us-east-1.amazonaws.com/model/amazon.titan-text-lite-v1/converse-stream
36+
response:
37+
body:
38+
string: !!binary |
39+
AAAAswAAAFK3IJ11CzpldmVudC10eXBlBwAMbWVzc2FnZVN0YXJ0DTpjb250ZW50LXR5cGUHABBh
40+
cHBsaWNhdGlvbi9qc29uDTptZXNzYWdlLXR5cGUHAAVldmVudHsicCI6ImFiY2RlZmdoaWprbG1u
41+
b3BxcnN0dXZ3eHl6QUJDREVGR0hJSktMTU5PUFFSU1RVVldYWVowMSIsInJvbGUiOiJhc3Npc3Rh
42+
bnQifRl7p7oAAAC3AAAAVzLKzzoLOmV2ZW50LXR5cGUHABFjb250ZW50QmxvY2tEZWx0YQ06Y29u
43+
dGVudC10eXBlBwAQYXBwbGljYXRpb24vanNvbg06bWVzc2FnZS10eXBlBwAFZXZlbnR7ImNvbnRl
44+
bnRCbG9ja0luZGV4IjowLCJkZWx0YSI6eyJ0ZXh0IjoiSGkhIEknbSBhbiBBSSBsYW5ndWFnZSJ9
45+
LCJwIjoiYWJjZGVmZ2gifUn9+AsAAACUAAAAVsOsqngLOmV2ZW50LXR5cGUHABBjb250ZW50Qmxv
46+
Y2tTdG9wDTpjb250ZW50LXR5cGUHABBhcHBsaWNhdGlvbi9qc29uDTptZXNzYWdlLXR5cGUHAAVl
47+
dmVudHsiY29udGVudEJsb2NrSW5kZXgiOjAsInAiOiJhYmNkZWZnaGlqa2xtbm9wIn3KsHRKAAAA
48+
pgAAAFGGKdQ9CzpldmVudC10eXBlBwALbWVzc2FnZVN0b3ANOmNvbnRlbnQtdHlwZQcAEGFwcGxp
49+
Y2F0aW9uL2pzb24NOm1lc3NhZ2UtdHlwZQcABWV2ZW50eyJwIjoiYWJjZGVmZ2hpamtsbW5vcHFy
50+
c3R1dnd4eXpBQkNERUZHSEkiLCJzdG9wUmVhc29uIjoibWF4X3Rva2VucyJ9eRUDZQAAAPUAAABO
51+
dJJs0ws6ZXZlbnQtdHlwZQcACG1ldGFkYXRhDTpjb250ZW50LXR5cGUHABBhcHBsaWNhdGlvbi9q
52+
c29uDTptZXNzYWdlLXR5cGUHAAVldmVudHsibWV0cmljcyI6eyJsYXRlbmN5TXMiOjY2NH0sInAi
53+
OiJhYmNkZWZnaGlqa2xtbm9wcXJzdHV2d3h5ekFCQ0RFRkdISUpLTE1OT1BRUlNUVVZXWFlaMDEi
54+
LCJ1c2FnZSI6eyJpbnB1dFRva2VucyI6OCwib3V0cHV0VG9rZW5zIjoxMCwidG90YWxUb2tlbnMi
55+
OjE4fX3B+Dpy
56+
headers:
57+
Connection:
58+
- keep-alive
59+
Content-Type:
60+
- application/vnd.amazon.eventstream
61+
Date:
62+
- Mon, 27 Jan 2025 11:46:02 GMT
63+
Set-Cookie: test_set_cookie
64+
Transfer-Encoding:
65+
- chunked
66+
x-amzn-RequestId:
67+
- 657e0bef-5ebb-4387-be65-d3ceafd53dea
68+
status:
69+
code: 200
70+
message: OK
71+
version: 1
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,62 @@
1+
interactions:
2+
- request:
3+
body: '{"inputText": "Say this is a test", "textGenerationConfig": {"maxTokenCount":
4+
10, "temperature": 0.8, "topP": 1, "stopSequences": ["|"]}}'
5+
headers:
6+
Content-Length:
7+
- '137'
8+
User-Agent:
9+
- !!binary |
10+
Qm90bzMvMS4zNS41NiBtZC9Cb3RvY29yZSMxLjM1LjU2IHVhLzIuMCBvcy9saW51eCM2LjEuMC0x
11+
MDM0LW9lbSBtZC9hcmNoI3g4Nl82NCBsYW5nL3B5dGhvbiMzLjEwLjEyIG1kL3B5aW1wbCNDUHl0
12+
aG9uIGNmZy9yZXRyeS1tb2RlI2xlZ2FjeSBCb3RvY29yZS8xLjM1LjU2
13+
X-Amz-Date:
14+
- !!binary |
15+
MjAyNTAxMjdUMTIwMTU0Wg==
16+
X-Amz-Security-Token:
17+
- test_aws_security_token
18+
X-Amzn-Trace-Id:
19+
- !!binary |
20+
Um9vdD0xLWJhYTFjOTdhLTI3M2UxYTlhYjIyMTM1NGQwN2JjNGNhYztQYXJlbnQ9OTVhNmQzZGEx
21+
YTZkZjM4ZjtTYW1wbGVkPTE=
22+
amz-sdk-invocation-id:
23+
- !!binary |
24+
ZWQxZGViZmQtZTE5NS00N2RiLWIyMzItMTY1MzJhYjQzZTM0
25+
amz-sdk-request:
26+
- !!binary |
27+
YXR0ZW1wdD0x
28+
authorization:
29+
- Bearer test_aws_authorization
30+
method: POST
31+
uri: https://bedrock-runtime.us-east-1.amazonaws.com/model/amazon.titan-text-lite-v1/invoke-with-response-stream
32+
response:
33+
body:
34+
string: !!binary |
35+
AAACBAAAAEs8ZEC6CzpldmVudC10eXBlBwAFY2h1bmsNOmNvbnRlbnQtdHlwZQcAEGFwcGxpY2F0
36+
aW9uL2pzb24NOm1lc3NhZ2UtdHlwZQcABWV2ZW50eyJieXRlcyI6ImV5SnZkWFJ3ZFhSVVpYaDBJ
37+
am9pSUdOdmJXMWxiblJjYmtobGJHeHZJU0JKSUdGdElHRWdZMjl0Y0hWMFpYSWdjSEp2WjNKaGJT
38+
QmtaWE5wWjI1bFpDSXNJbWx1WkdWNElqb3dMQ0owYjNSaGJFOTFkSEIxZEZSbGVIUlViMnRsYmtO
39+
dmRXNTBJam94TUN3aVkyOXRjR3hsZEdsdmJsSmxZWE52YmlJNklreEZUa2RVU0NJc0ltbHVjSFYw
40+
VkdWNGRGUnZhMlZ1UTI5MWJuUWlPalVzSW1GdFlYcHZiaTFpWldSeWIyTnJMV2x1ZG05allYUnBi
41+
MjVOWlhSeWFXTnpJanA3SW1sdWNIVjBWRzlyWlc1RGIzVnVkQ0k2TlN3aWIzVjBjSFYwVkc5clpX
42+
NURiM1Z1ZENJNk1UQXNJbWx1ZG05allYUnBiMjVNWVhSbGJtTjVJam8yTnpRc0ltWnBjbk4wUW5s
43+
MFpVeGhkR1Z1WTNraU9qWTNNMzE5IiwicCI6ImFiY2RlZmdoaWprbG1ub3BxcnN0dXZ3eHl6In2J
44+
Hw51
45+
headers:
46+
Connection:
47+
- keep-alive
48+
Content-Type:
49+
- application/vnd.amazon.eventstream
50+
Date:
51+
- Mon, 27 Jan 2025 12:01:55 GMT
52+
Set-Cookie: test_set_cookie
53+
Transfer-Encoding:
54+
- chunked
55+
X-Amzn-Bedrock-Content-Type:
56+
- application/json
57+
x-amzn-RequestId:
58+
- 1eb1af77-fb2f-400f-9bf8-049e38b90f02
59+
status:
60+
code: 200
61+
message: OK
62+
version: 1

0 commit comments

Comments
 (0)