Skip to content

Commit 2d85b8e

Browse files
authored
Merge branch 'main' into sqlalchemy-snippet
2 parents 70497c8 + fdcd80d commit 2d85b8e

File tree

30 files changed

+3572
-1233
lines changed

30 files changed

+3572
-1233
lines changed
Lines changed: 47 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,47 @@
1+
name: OSSF Scorecard
2+
3+
on:
4+
push:
5+
branches:
6+
- main
7+
schedule:
8+
- cron: "10 6 * * 1" # once a week
9+
workflow_dispatch:
10+
11+
permissions: read-all
12+
13+
jobs:
14+
analysis:
15+
runs-on: ubuntu-latest
16+
permissions:
17+
# Needed for Code scanning upload
18+
security-events: write
19+
# Needed for GitHub OIDC token if publish_results is true
20+
id-token: write
21+
steps:
22+
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
23+
with:
24+
persist-credentials: false
25+
26+
- uses: ossf/scorecard-action@f49aabe0b5af0936a0987cfb85d86b75731b0186 # v2.4.1
27+
with:
28+
results_file: results.sarif
29+
results_format: sarif
30+
publish_results: true
31+
32+
# Upload the results as artifacts (optional). Commenting out will disable
33+
# uploads of run results in SARIF format to the repository Actions tab.
34+
# https://docs.github.com/en/actions/advanced-guides/storing-workflow-data-as-artifacts
35+
- name: "Upload artifact"
36+
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
37+
with:
38+
name: SARIF file
39+
path: results.sarif
40+
retention-days: 5
41+
42+
# Upload the results to GitHub's code scanning dashboard (optional).
43+
# Commenting out will disable upload of results to your repo's Code Scanning dashboard
44+
- name: "Upload to code-scanning"
45+
uses: github/codeql-action/upload-sarif@5f8171a638ada777af81d42b55959a643bb29017 # v3.28.12
46+
with:
47+
sarif_file: results.sarif

CHANGELOG.md

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,8 +11,12 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
1111
1212
## Unreleased
1313

14+
### Added
15+
1416
- `opentelemetry-instrumentation-asyncclick`: new instrumentation to trace asyncclick commands
1517
([#3319](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3319))
18+
- `opentelemetry-instrumentation-botocore` Add support for GenAI tool events using Amazon Nova models and `InvokeModel*` APIs
19+
([#3385](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3385))
1620

1721
### Fixed
1822

@@ -21,6 +25,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
2125
- `opentelemetry-instrumentation-dbapi`, `opentelemetry-instrumentation-django`,
2226
`opentelemetry-instrumentation-sqlalchemy`: Fix sqlcomment for non string query and composable object.
2327
([#3113](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3113))
28+
- `opentelemetry-instrumentation-grpc` Fix error when using gprc versions <= 1.50.0 with unix sockets.
29+
([[#3393](https://github.com/open-telemetry/opentelemetry-python-contrib/issues/3393)])
2430

2531
## Version 1.31.0/0.52b0 (2025-03-12)
2632

instrumentation-genai/README.md

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
2+
| Instrumentation | Supported Packages | Metrics support | Semconv status |
3+
| --------------- | ------------------ | --------------- | -------------- |
4+
| [opentelemetry-instrumentation-google-genai](./opentelemetry-instrumentation-google-genai) | google-genai >= 1.0.0 | No | development
5+
| [opentelemetry-instrumentation-openai-v2](./opentelemetry-instrumentation-openai-v2) | openai >= 1.26.0 | Yes | development
6+
| [opentelemetry-instrumentation-vertexai](./opentelemetry-instrumentation-vertexai) | google-cloud-aiplatform >= 1.64 | No | development

instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -619,7 +619,7 @@ async def instrumented_generate_content(
619619

620620

621621
# Disabling type checking because this is not yet implemented and tested fully.
622-
def _create_instrumented_async_generate_content_stream( # pyright: ignore
622+
def _create_instrumented_async_generate_content_stream( # type: ignore
623623
snapshot: _MethodsSnapshot, otel_wrapper: OTelWrapper
624624
):
625625
wrapped_func = snapshot.async_generate_content_stream
@@ -632,7 +632,7 @@ async def instrumented_generate_content_stream(
632632
contents: Union[ContentListUnion, ContentListUnionDict],
633633
config: Optional[GenerateContentConfigOrDict] = None,
634634
**kwargs: Any,
635-
) -> Awaitable[AsyncIterator[GenerateContentResponse]]: # pyright: ignore
635+
) -> Awaitable[AsyncIterator[GenerateContentResponse]]: # type: ignore
636636
helper = _GenerateContentInstrumentationHelper(
637637
self, otel_wrapper, model
638638
)
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,15 @@
1+
# Copyright The OpenTelemetry Authors
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# http://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
15+
_instruments = ("google-genai >= 1.0.0",)

instrumentation-genai/opentelemetry-instrumentation-openai-v2/src/opentelemetry/instrumentation/openai_v2/package.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,3 +14,5 @@
1414

1515

1616
_instruments = ("openai >= 1.26.0",)
17+
18+
_supports_metrics = True

instrumentation-genai/opentelemetry-instrumentation-vertexai/CHANGELOG.md

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
99

1010
- Implement uninstrument for `opentelemetry-instrumentation-vertexai`
1111
([#3328](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3328))
12+
- VertexAI support for async calling
13+
([#3386](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3386))
1214

1315
## Version 2.0b0 (2025-02-24)
1416

instrumentation-genai/opentelemetry-instrumentation-vertexai/src/opentelemetry/instrumentation/vertexai/__init__.py

Lines changed: 42 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -39,6 +39,8 @@
3939
---
4040
"""
4141

42+
from __future__ import annotations
43+
4244
from typing import Any, Collection
4345

4446
from wrapt import (
@@ -49,32 +51,54 @@
4951
from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
5052
from opentelemetry.instrumentation.utils import unwrap
5153
from opentelemetry.instrumentation.vertexai.package import _instruments
52-
from opentelemetry.instrumentation.vertexai.patch import (
53-
generate_content_create,
54-
)
54+
from opentelemetry.instrumentation.vertexai.patch import MethodWrappers
5555
from opentelemetry.instrumentation.vertexai.utils import is_content_enabled
5656
from opentelemetry.semconv.schemas import Schemas
5757
from opentelemetry.trace import get_tracer
5858

5959

60-
def _client_classes():
60+
def _methods_to_wrap(
61+
method_wrappers: MethodWrappers,
62+
):
6163
# This import is very slow, do it lazily in case instrument() is not called
62-
6364
# pylint: disable=import-outside-toplevel
6465
from google.cloud.aiplatform_v1.services.prediction_service import (
66+
async_client,
6567
client,
6668
)
69+
from google.cloud.aiplatform_v1beta1.services.prediction_service import (
70+
async_client as async_client_v1beta1,
71+
)
6772
from google.cloud.aiplatform_v1beta1.services.prediction_service import (
6873
client as client_v1beta1,
6974
)
7075

71-
return (
76+
for client_class in (
7277
client.PredictionServiceClient,
7378
client_v1beta1.PredictionServiceClient,
74-
)
79+
):
80+
yield (
81+
client_class,
82+
client_class.generate_content.__name__, # type: ignore[reportUnknownMemberType]
83+
method_wrappers.generate_content,
84+
)
85+
86+
for client_class in (
87+
async_client.PredictionServiceAsyncClient,
88+
async_client_v1beta1.PredictionServiceAsyncClient,
89+
):
90+
yield (
91+
client_class,
92+
client_class.generate_content.__name__, # type: ignore[reportUnknownMemberType]
93+
method_wrappers.agenerate_content,
94+
)
7595

7696

7797
class VertexAIInstrumentor(BaseInstrumentor):
98+
def __init__(self) -> None:
99+
super().__init__()
100+
self._methods_to_unwrap: list[tuple[Any, str]] = []
101+
78102
def instrumentation_dependencies(self) -> Collection[str]:
79103
return _instruments
80104

@@ -95,15 +119,19 @@ def _instrument(self, **kwargs: Any):
95119
event_logger_provider=event_logger_provider,
96120
)
97121

98-
for client_class in _client_classes():
122+
method_wrappers = MethodWrappers(
123+
tracer, event_logger, is_content_enabled()
124+
)
125+
for client_class, method_name, wrapper in _methods_to_wrap(
126+
method_wrappers
127+
):
99128
wrap_function_wrapper(
100129
client_class,
101-
name="generate_content",
102-
wrapper=generate_content_create(
103-
tracer, event_logger, is_content_enabled()
104-
),
130+
name=method_name,
131+
wrapper=wrapper,
105132
)
133+
self._methods_to_unwrap.append((client_class, method_name))
106134

107135
def _uninstrument(self, **kwargs: Any) -> None:
108-
for client_class in _client_classes():
109-
unwrap(client_class, "generate_content")
136+
for client_class, method_name in self._methods_to_unwrap:
137+
unwrap(client_class, method_name)

instrumentation-genai/opentelemetry-instrumentation-vertexai/src/opentelemetry/instrumentation/vertexai/patch.py

Lines changed: 79 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -14,9 +14,11 @@
1414

1515
from __future__ import annotations
1616

17+
from contextlib import contextmanager
1718
from typing import (
1819
TYPE_CHECKING,
1920
Any,
21+
Awaitable,
2022
Callable,
2123
MutableSequence,
2224
)
@@ -87,17 +89,17 @@ def _extract_params(
8789
)
8890

8991

90-
def generate_content_create(
91-
tracer: Tracer, event_logger: EventLogger, capture_content: bool
92-
):
93-
"""Wrap the `generate_content` method of the `GenerativeModel` class to trace it."""
92+
class MethodWrappers:
93+
def __init__(
94+
self, tracer: Tracer, event_logger: EventLogger, capture_content: bool
95+
) -> None:
96+
self.tracer = tracer
97+
self.event_logger = event_logger
98+
self.capture_content = capture_content
9499

95-
def traced_method(
96-
wrapped: Callable[
97-
...,
98-
prediction_service.GenerateContentResponse
99-
| prediction_service_v1beta1.GenerateContentResponse,
100-
],
100+
@contextmanager
101+
def _with_instrumentation(
102+
self,
101103
instance: client.PredictionServiceClient
102104
| client_v1beta1.PredictionServiceClient,
103105
args: Any,
@@ -111,32 +113,82 @@ def traced_method(
111113
}
112114

113115
span_name = get_span_name(span_attributes)
114-
with tracer.start_as_current_span(
116+
117+
with self.tracer.start_as_current_span(
115118
name=span_name,
116119
kind=SpanKind.CLIENT,
117120
attributes=span_attributes,
118121
) as span:
119122
for event in request_to_events(
120-
params=params, capture_content=capture_content
123+
params=params, capture_content=self.capture_content
121124
):
122-
event_logger.emit(event)
125+
self.event_logger.emit(event)
123126

124127
# TODO: set error.type attribute
125128
# https://github.com/open-telemetry/semantic-conventions/blob/main/docs/gen-ai/gen-ai-spans.md
126-
response = wrapped(*args, **kwargs)
127-
# TODO: handle streaming
128-
# if is_streaming(kwargs):
129-
# return StreamWrapper(
130-
# result, span, event_logger, capture_content
131-
# )
132-
133-
if span.is_recording():
134-
span.set_attributes(get_genai_response_attributes(response))
135-
for event in response_to_events(
136-
response=response, capture_content=capture_content
137-
):
138-
event_logger.emit(event)
139129

130+
def handle_response(
131+
response: prediction_service.GenerateContentResponse
132+
| prediction_service_v1beta1.GenerateContentResponse,
133+
) -> None:
134+
if span.is_recording():
135+
# When streaming, this is called multiple times so attributes would be
136+
# overwritten. In practice, it looks the API only returns the interesting
137+
# attributes on the last streamed response. However, I couldn't find
138+
# documentation for this and setting attributes shouldn't be too expensive.
139+
span.set_attributes(
140+
get_genai_response_attributes(response)
141+
)
142+
143+
for event in response_to_events(
144+
response=response, capture_content=self.capture_content
145+
):
146+
self.event_logger.emit(event)
147+
148+
yield handle_response
149+
150+
def generate_content(
151+
self,
152+
wrapped: Callable[
153+
...,
154+
prediction_service.GenerateContentResponse
155+
| prediction_service_v1beta1.GenerateContentResponse,
156+
],
157+
instance: client.PredictionServiceClient
158+
| client_v1beta1.PredictionServiceClient,
159+
args: Any,
160+
kwargs: Any,
161+
) -> (
162+
prediction_service.GenerateContentResponse
163+
| prediction_service_v1beta1.GenerateContentResponse
164+
):
165+
with self._with_instrumentation(
166+
instance, args, kwargs
167+
) as handle_response:
168+
response = wrapped(*args, **kwargs)
169+
handle_response(response)
140170
return response
141171

142-
return traced_method
172+
async def agenerate_content(
173+
self,
174+
wrapped: Callable[
175+
...,
176+
Awaitable[
177+
prediction_service.GenerateContentResponse
178+
| prediction_service_v1beta1.GenerateContentResponse
179+
],
180+
],
181+
instance: client.PredictionServiceClient
182+
| client_v1beta1.PredictionServiceClient,
183+
args: Any,
184+
kwargs: Any,
185+
) -> (
186+
prediction_service.GenerateContentResponse
187+
| prediction_service_v1beta1.GenerateContentResponse
188+
):
189+
with self._with_instrumentation(
190+
instance, args, kwargs
191+
) as handle_response:
192+
response = await wrapped(*args, **kwargs)
193+
handle_response(response)
194+
return response

instrumentation-genai/opentelemetry-instrumentation-vertexai/tests/requirements.latest.txt

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -44,8 +44,8 @@ charset-normalizer==3.4.0
4444
Deprecated==1.2.15
4545
docstring_parser==0.16
4646
exceptiongroup==1.2.2
47-
google-api-core==2.23.0
48-
google-auth==2.36.0
47+
google-api-core[grpc, async_rest]==2.23.0
48+
google-auth[aiohttp]==2.36.0
4949
google-cloud-aiplatform==1.79.0
5050
google-cloud-bigquery==3.27.0
5151
google-cloud-core==2.4.1

0 commit comments

Comments
 (0)