From aa9e148e7841a93f0de6f5b2d278730078afb737 Mon Sep 17 00:00:00 2001 From: Aaron Abbott Date: Mon, 25 Nov 2024 21:50:02 +0000 Subject: [PATCH 01/16] Add VertexAI instrumentation boilerplate --- .../CHANGELOG.md | 10 + .../LICENSE | 201 ++++++++++++++++++ .../README.rst | 29 +++ .../example/.env | 10 + .../example/README.rst | 38 ++++ .../example/main.py | 16 ++ .../example/requirements.txt | 6 + .../pyproject.toml | 49 +++++ .../instrumentation/vertexai_v2/version.py | 15 ++ .../test-requirements-0.txt | 56 +++++ .../test-requirements-1.txt | 52 +++++ 11 files changed, 482 insertions(+) create mode 100644 instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/CHANGELOG.md create mode 100644 instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/LICENSE create mode 100644 instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/README.rst create mode 100644 instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/example/.env create mode 100644 instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/example/README.rst create mode 100644 instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/example/main.py create mode 100644 instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/example/requirements.txt create mode 100644 instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/pyproject.toml create mode 100644 instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/src/opentelemetry/instrumentation/vertexai_v2/version.py create mode 100644 instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/test-requirements-0.txt create mode 100644 instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/test-requirements-1.txt diff --git a/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/CHANGELOG.md b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/CHANGELOG.md new file mode 100644 index 0000000000..33e7cea173 --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/CHANGELOG.md @@ -0,0 +1,10 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## Unreleased + +- Add boilerplate for `opentelemetry-instrumentation-vertexai-v2` diff --git a/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/LICENSE b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/README.rst b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/README.rst new file mode 100644 index 0000000000..6a46a55c85 --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/README.rst @@ -0,0 +1,29 @@ +OpenTelemetry VertexAI Instrumentation +==================================== + +|pypi| + +.. |pypi| image:: https://badge.fury.io/py/opentelemetry-instrumentation-vertexai-v2.svg + :target: https://pypi.org/project/opentelemetry-instrumentation-vertexai-v2/ + +This library allows tracing LLM requests and logging of messages made by the +`VertexAI Python API library `_. + + +Installation +------------ + +If your application is already instrumented with OpenTelemetry, add this +package to your requirements. +:: + + pip install opentelemetry-instrumentation-vertexai-v2 + +If you don't have an VertexAI application, yet, try our `example `_. + +References +---------- +* `OpenTelemetry VertexAI Instrumentation `_ +* `OpenTelemetry Project `_ +* `OpenTelemetry Python Examples `_ + diff --git a/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/example/.env b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/example/.env new file mode 100644 index 0000000000..0a92e5539d --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/example/.env @@ -0,0 +1,10 @@ +OTEL_EXPORTER_OTLP_ENDPOINT=http://localhost:4318 +OTEL_EXPORTER_OTLP_PROTOCOL=http/protobuf +OTEL_SERVICE_NAME=opentelemetry-python-openai + +# Change to 'false' to disable logging +OTEL_PYTHON_LOGGING_AUTO_INSTRUMENTATION_ENABLED=true +# Change to 'console' if your OTLP endpoint doesn't support logs +OTEL_LOGS_EXPORTER=otlp_proto_http +# Change to 'false' to hide prompt and completion content +OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT=true diff --git a/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/example/README.rst b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/example/README.rst new file mode 100644 index 0000000000..6fe161f82f --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/example/README.rst @@ -0,0 +1,38 @@ +OpenTelemetry VertexAI Instrumentation Example +============================================ + +This is an example of how to instrument VertexAI calls with zero code changes, +using `opentelemetry-instrument`. + +When `main.py `_ is run, it exports traces and logs to an OTLP +compatible endpoint. Traces include details such as the model used and the +duration of the chat request. Logs capture the chat request and the generated +response, providing a comprehensive view of the performance and behavior of +your VertexAI requests. + +Setup +----- + +An OTLP compatible endpoint should be listening for traces and logs on http://localhost:4318. +If not, update "OTEL_EXPORTER_OTLP_ENDPOINT" as well. + +Next, set up a virtual environment like this: + +:: + + python3 -m venv .venv + source .venv/bin/activate + pip install "python-dotenv[cli]" + pip install -r requirements.txt + +Run +--- + +Run the example like this: + +:: + + dotenv run -- opentelemetry-instrument python main.py + +You should see a poem generated by VertexAI while traces and logs export to your +configured observability tool. diff --git a/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/example/main.py b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/example/main.py new file mode 100644 index 0000000000..2ddfdb73bb --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/example/main.py @@ -0,0 +1,16 @@ +import vertexai +from vertexai.generative_models import GenerativeModel + +vertexai.init(location="us-central1") + +model = GenerativeModel("gemini-1.5-flash-002") + +response = model.generate_content("Write a short poem on OpenTelemetry.") + +print(response.text) +# Example response: +# **Emphasizing the Dried Aspect:** +# * Everlasting Blooms +# * Dried & Delightful +# * The Petal Preserve +# ... diff --git a/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/example/requirements.txt b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/example/requirements.txt new file mode 100644 index 0000000000..3aea8bcb24 --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/example/requirements.txt @@ -0,0 +1,6 @@ +google-cloud-aiplatform>=1.64 + +opentelemetry-sdk~=1.28.2 +opentelemetry-exporter-otlp-proto-http~=1.28.2 +opentelemetry-distro~=0.49b2 +opentelemetry-instrumentation-vertexai-v2~=2.0b0 diff --git a/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/pyproject.toml b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/pyproject.toml new file mode 100644 index 0000000000..45faacdc06 --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/pyproject.toml @@ -0,0 +1,49 @@ +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[project] +name = "opentelemetry-instrumentation-vertexai-v2" +dynamic = ["version"] +description = "OpenTelemetry Official VertexAI instrumentation" +readme = "README.rst" +license = "Apache-2.0" +requires-python = ">=3.8" +authors = [ + { name = "OpenTelemetry Authors", email = "cncf-opentelemetry-contributors@lists.cncf.io" }, +] +classifiers = [ + "Development Status :: 4 - Beta", + "Intended Audience :: Developers", + "License :: OSI Approved :: Apache Software License", + "Programming Language :: Python", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", +] +dependencies = [ + "opentelemetry-api ~= 1.28", + "opentelemetry-instrumentation ~= 0.49b0", + "opentelemetry-semantic-conventions ~= 0.49b0", +] + +[project.optional-dependencies] +instruments = ["google-cloud-aiplatform >= 1.64"] + +[project.entry-points.opentelemetry_instrumentor] +vertexai = "opentelemetry.instrumentation.vertexai_v2:VertexAIInstrumentor" + +[project.urls] +Homepage = "https://github.com/open-telemetry/opentelemetry-python-contrib/tree/main/instrumentation/opentelemetry-instrumentation-vertexai-v2" + +[tool.hatch.version] +path = "src/opentelemetry/instrumentation/vertexai_v2/version.py" + +[tool.hatch.build.targets.sdist] +include = ["/src", "/tests"] + +[tool.hatch.build.targets.wheel] +packages = ["src/opentelemetry"] diff --git a/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/src/opentelemetry/instrumentation/vertexai_v2/version.py b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/src/opentelemetry/instrumentation/vertexai_v2/version.py new file mode 100644 index 0000000000..5b77207d9d --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/src/opentelemetry/instrumentation/vertexai_v2/version.py @@ -0,0 +1,15 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__version__ = "2.1b0.dev" diff --git a/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/test-requirements-0.txt b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/test-requirements-0.txt new file mode 100644 index 0000000000..f8290ac243 --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/test-requirements-0.txt @@ -0,0 +1,56 @@ +annotated-types==0.7.0 +cachetools==5.5.0 +certifi==2024.8.30 +charset-normalizer==3.4.0 +Deprecated==1.2.15 +docstring_parser==0.16 +google-api-core==2.23.0 +google-auth==2.36.0 +google-cloud-aiplatform==1.73.0 +google-cloud-bigquery==3.27.0 +google-cloud-core==2.4.1 +google-cloud-resource-manager==1.13.1 +google-cloud-storage==2.18.2 +google-crc32c==1.6.0 +google-resumable-media==2.7.2 +googleapis-common-protos==1.66.0 +grpc-google-iam-v1==0.13.1 +grpcio==1.68.0 +grpcio-status==1.68.0 +idna==3.10 +importlib_metadata==8.5.0 +iniconfig==2.0.0 +multidict==6.1.0 +numpy==2.1.3 +packaging==24.2 +pluggy==1.5.0 +propcache==0.2.0 +proto-plus==1.25.0 +protobuf==5.28.3 +pyasn1==0.6.1 +pyasn1_modules==0.4.1 +pydantic==2.10.1 +pydantic_core==2.27.1 +pytest==7.4.4 +pytest-asyncio==0.21.0 +pytest-vcr==1.0.2 +python-dateutil==2.9.0.post0 +PyYAML==6.0.2 +requests==2.32.3 +rsa==4.9 +shapely==2.0.6 +six==1.16.0 +typing_extensions==4.12.2 +urllib3==2.2.3 +vcrpy==6.0.2 +wrapt==1.17.0 +yarl==1.18.0 +zipp==3.21.0 + +# when updating, also update in pyproject.toml +opentelemetry-api==1.28 +opentelemetry-sdk==1.28 +opentelemetry-semantic-conventions==0.49b0 +opentelemetry-instrumentation==0.49b0 + +-e instrumentation-genai/opentelemetry-instrumentation-vertexai-v2[instruments] diff --git a/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/test-requirements-1.txt b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/test-requirements-1.txt new file mode 100644 index 0000000000..c1f50b6a2d --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/test-requirements-1.txt @@ -0,0 +1,52 @@ +annotated-types==0.7.0 +cachetools==5.5.0 +certifi==2024.8.30 +charset-normalizer==3.4.0 +Deprecated==1.2.15 +docstring_parser==0.16 +google-api-core==2.23.0 +google-auth==2.36.0 +google-cloud-aiplatform==1.73.0 +google-cloud-bigquery==3.27.0 +google-cloud-core==2.4.1 +google-cloud-resource-manager==1.13.1 +google-cloud-storage==2.18.2 +google-crc32c==1.6.0 +google-resumable-media==2.7.2 +googleapis-common-protos==1.66.0 +grpc-google-iam-v1==0.13.1 +grpcio==1.68.0 +grpcio-status==1.68.0 +idna==3.10 +importlib_metadata==8.5.0 +iniconfig==2.0.0 +multidict==6.1.0 +numpy==2.1.3 +packaging==24.2 +pluggy==1.5.0 +propcache==0.2.0 +proto-plus==1.25.0 +protobuf==5.28.3 +pyasn1==0.6.1 +pyasn1_modules==0.4.1 +pydantic==2.10.1 +pydantic_core==2.27.1 +pytest==7.4.4 +pytest-asyncio==0.21.0 +pytest-vcr==1.0.2 +python-dateutil==2.9.0.post0 +PyYAML==6.0.2 +requests==2.32.3 +rsa==4.9 +shapely==2.0.6 +six==1.16.0 +typing_extensions==4.12.2 +urllib3==2.2.3 +vcrpy==6.0.2 +wrapt==1.17.0 +yarl==1.18.0 +zipp==3.21.0 +# test with the latest version of opentelemetry-api, sdk, and semantic conventions + +-e opentelemetry-instrumentation +-e instrumentation-genai/opentelemetry-instrumentation-vertexai-v2[instruments] From 6081d3a3b1b67b7af6ebb7bb11a0ae183bf7f104 Mon Sep 17 00:00:00 2001 From: Aaron Abbott Date: Mon, 25 Nov 2024 21:54:21 +0000 Subject: [PATCH 02/16] Copy in files from openllmetry --- .../instrumentation/vertexai_v2/__init__.py | 368 ++++++++++++++++++ .../instrumentation/vertexai_v2/config.py | 2 + .../instrumentation/vertexai_v2/utils.py | 29 ++ .../tests/__init__.py | 1 + .../tests/conftest.py | 34 ++ .../tests/disabled_test_bison.py | 258 ++++++++++++ .../tests/disabled_test_gemini.py | 51 +++ .../tests/test_placeholder.py | 2 + 8 files changed, 745 insertions(+) create mode 100644 instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/src/opentelemetry/instrumentation/vertexai_v2/__init__.py create mode 100644 instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/src/opentelemetry/instrumentation/vertexai_v2/config.py create mode 100644 instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/src/opentelemetry/instrumentation/vertexai_v2/utils.py create mode 100644 instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/__init__.py create mode 100644 instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/conftest.py create mode 100644 instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/disabled_test_bison.py create mode 100644 instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/disabled_test_gemini.py create mode 100644 instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/test_placeholder.py diff --git a/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/src/opentelemetry/instrumentation/vertexai_v2/__init__.py b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/src/opentelemetry/instrumentation/vertexai_v2/__init__.py new file mode 100644 index 0000000000..da692b8494 --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/src/opentelemetry/instrumentation/vertexai_v2/__init__.py @@ -0,0 +1,368 @@ +"""OpenTelemetry Vertex AI instrumentation""" + +import logging +import os +import types +from typing import Collection +from opentelemetry.instrumentation.vertexai.config import Config +from opentelemetry.instrumentation.vertexai.utils import dont_throw +from wrapt import wrap_function_wrapper + +from opentelemetry import context as context_api +from opentelemetry.trace import get_tracer, SpanKind +from opentelemetry.trace.status import Status, StatusCode + +from opentelemetry.instrumentation.instrumentor import BaseInstrumentor +from opentelemetry.instrumentation.utils import _SUPPRESS_INSTRUMENTATION_KEY, unwrap + +from opentelemetry.semconv_ai import ( + SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY, + SpanAttributes, + LLMRequestTypeValues, +) +from opentelemetry.instrumentation.vertexai.version import __version__ + +logger = logging.getLogger(__name__) + +_instruments = ("google-cloud-aiplatform >= 1.38.1",) + +WRAPPED_METHODS = [ + { + "package": "vertexai.generative_models", + "object": "GenerativeModel", + "method": "generate_content", + "span_name": "vertexai.generate_content", + "is_async": False, + }, + { + "package": "vertexai.generative_models", + "object": "GenerativeModel", + "method": "generate_content_async", + "span_name": "vertexai.generate_content_async", + "is_async": True, + }, + { + "package": "vertexai.preview.generative_models", + "object": "GenerativeModel", + "method": "generate_content", + "span_name": "vertexai.generate_content", + "is_async": False, + }, + { + "package": "vertexai.preview.generative_models", + "object": "GenerativeModel", + "method": "generate_content_async", + "span_name": "vertexai.generate_content_async", + "is_async": True, + }, + { + "package": "vertexai.language_models", + "object": "TextGenerationModel", + "method": "predict", + "span_name": "vertexai.predict", + "is_async": False, + }, + { + "package": "vertexai.language_models", + "object": "TextGenerationModel", + "method": "predict_async", + "span_name": "vertexai.predict_async", + "is_async": True, + }, + { + "package": "vertexai.language_models", + "object": "TextGenerationModel", + "method": "predict_streaming", + "span_name": "vertexai.predict_streaming", + "is_async": False, + }, + { + "package": "vertexai.language_models", + "object": "TextGenerationModel", + "method": "predict_streaming_async", + "span_name": "vertexai.predict_streaming_async", + "is_async": True, + }, + { + "package": "vertexai.language_models", + "object": "ChatSession", + "method": "send_message", + "span_name": "vertexai.send_message", + "is_async": False, + }, + { + "package": "vertexai.language_models", + "object": "ChatSession", + "method": "send_message_streaming", + "span_name": "vertexai.send_message_streaming", + "is_async": False, + }, +] + + +def should_send_prompts(): + return ( + os.getenv("TRACELOOP_TRACE_CONTENT") or "true" + ).lower() == "true" or context_api.get_value("override_enable_content_tracing") + + +def is_streaming_response(response): + return isinstance(response, types.GeneratorType) + + +def is_async_streaming_response(response): + return isinstance(response, types.AsyncGeneratorType) + + +def _set_span_attribute(span, name, value): + if value is not None: + if value != "": + span.set_attribute(name, value) + return + + +def _set_input_attributes(span, args, kwargs, llm_model): + if should_send_prompts() and args is not None and len(args) > 0: + prompt = "" + for arg in args: + if isinstance(arg, str): + prompt = f"{prompt}{arg}\n" + elif isinstance(arg, list): + for subarg in arg: + prompt = f"{prompt}{subarg}\n" + + _set_span_attribute( + span, + f"{SpanAttributes.LLM_PROMPTS}.0.user", + prompt, + ) + + _set_span_attribute(span, SpanAttributes.LLM_REQUEST_MODEL, llm_model) + _set_span_attribute( + span, f"{SpanAttributes.LLM_PROMPTS}.0.user", kwargs.get("prompt") + ) + _set_span_attribute( + span, SpanAttributes.LLM_REQUEST_TEMPERATURE, kwargs.get("temperature") + ) + _set_span_attribute( + span, SpanAttributes.LLM_REQUEST_MAX_TOKENS, kwargs.get("max_output_tokens") + ) + _set_span_attribute(span, SpanAttributes.LLM_REQUEST_TOP_P, kwargs.get("top_p")) + _set_span_attribute(span, SpanAttributes.LLM_TOP_K, kwargs.get("top_k")) + _set_span_attribute( + span, SpanAttributes.LLM_PRESENCE_PENALTY, kwargs.get("presence_penalty") + ) + _set_span_attribute( + span, SpanAttributes.LLM_FREQUENCY_PENALTY, kwargs.get("frequency_penalty") + ) + + return + + +@dont_throw +def _set_response_attributes(span, llm_model, generation_text, token_usage): + _set_span_attribute(span, SpanAttributes.LLM_RESPONSE_MODEL, llm_model) + + if token_usage: + _set_span_attribute( + span, + SpanAttributes.LLM_USAGE_TOTAL_TOKENS, + token_usage.total_token_count, + ) + _set_span_attribute( + span, + SpanAttributes.LLM_USAGE_COMPLETION_TOKENS, + token_usage.candidates_token_count, + ) + _set_span_attribute( + span, + SpanAttributes.LLM_USAGE_PROMPT_TOKENS, + token_usage.prompt_token_count, + ) + + _set_span_attribute(span, f"{SpanAttributes.LLM_COMPLETIONS}.0.role", "assistant") + _set_span_attribute( + span, + f"{SpanAttributes.LLM_COMPLETIONS}.0.content", + generation_text, + ) + + +def _build_from_streaming_response(span, response, llm_model): + complete_response = "" + token_usage = None + for item in response: + item_to_yield = item + complete_response += str(item.text) + if item.usage_metadata: + token_usage = item.usage_metadata + + yield item_to_yield + + _set_response_attributes(span, llm_model, complete_response, token_usage) + + span.set_status(Status(StatusCode.OK)) + span.end() + + +async def _abuild_from_streaming_response(span, response, llm_model): + complete_response = "" + token_usage = None + async for item in response: + item_to_yield = item + complete_response += str(item.text) + if item.usage_metadata: + token_usage = item.usage_metadata + + yield item_to_yield + + _set_response_attributes(span, llm_model, complete_response, token_usage) + + span.set_status(Status(StatusCode.OK)) + span.end() + + +@dont_throw +def _handle_request(span, args, kwargs, llm_model): + if span.is_recording(): + _set_input_attributes(span, args, kwargs, llm_model) + + +@dont_throw +def _handle_response(span, response, llm_model): + if span.is_recording(): + _set_response_attributes( + span, llm_model, response.candidates[0].text, response.usage_metadata + ) + + span.set_status(Status(StatusCode.OK)) + + +def _with_tracer_wrapper(func): + """Helper for providing tracer for wrapper functions.""" + + def _with_tracer(tracer, to_wrap): + def wrapper(wrapped, instance, args, kwargs): + return func(tracer, to_wrap, wrapped, instance, args, kwargs) + + return wrapper + + return _with_tracer + + +@_with_tracer_wrapper +async def _awrap(tracer, to_wrap, wrapped, instance, args, kwargs): + """Instruments and calls every function defined in TO_WRAP.""" + if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY) or context_api.get_value( + SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY + ): + return await wrapped(*args, **kwargs) + + llm_model = "unknown" + if hasattr(instance, "_model_id"): + llm_model = instance._model_id + if hasattr(instance, "_model_name"): + llm_model = instance._model_name.replace("publishers/google/models/", "") + + name = to_wrap.get("span_name") + span = tracer.start_span( + name, + kind=SpanKind.CLIENT, + attributes={ + SpanAttributes.LLM_SYSTEM: "VertexAI", + SpanAttributes.LLM_REQUEST_TYPE: LLMRequestTypeValues.COMPLETION.value, + }, + ) + + _handle_request(span, args, kwargs, llm_model) + + response = await wrapped(*args, **kwargs) + + if response: + if is_streaming_response(response): + return _build_from_streaming_response(span, response, llm_model) + elif is_async_streaming_response(response): + return _abuild_from_streaming_response(span, response, llm_model) + else: + _handle_response(span, response, llm_model) + + span.end() + return response + + +@_with_tracer_wrapper +def _wrap(tracer, to_wrap, wrapped, instance, args, kwargs): + """Instruments and calls every function defined in TO_WRAP.""" + if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY) or context_api.get_value( + SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY + ): + return wrapped(*args, **kwargs) + + llm_model = "unknown" + if hasattr(instance, "_model_id"): + llm_model = instance._model_id + if hasattr(instance, "_model_name"): + llm_model = instance._model_name.replace("publishers/google/models/", "") + + name = to_wrap.get("span_name") + span = tracer.start_span( + name, + kind=SpanKind.CLIENT, + attributes={ + SpanAttributes.LLM_SYSTEM: "VertexAI", + SpanAttributes.LLM_REQUEST_TYPE: LLMRequestTypeValues.COMPLETION.value, + }, + ) + + _handle_request(span, args, kwargs, llm_model) + + response = wrapped(*args, **kwargs) + + if response: + if is_streaming_response(response): + return _build_from_streaming_response(span, response, llm_model) + elif is_async_streaming_response(response): + return _abuild_from_streaming_response(span, response, llm_model) + else: + _handle_response(span, response, llm_model) + + span.end() + return response + + +class VertexAIInstrumentor(BaseInstrumentor): + """An instrumentor for VertextAI's client library.""" + + def __init__(self, exception_logger=None): + super().__init__() + Config.exception_logger = exception_logger + + def instrumentation_dependencies(self) -> Collection[str]: + return _instruments + + def _instrument(self, **kwargs): + tracer_provider = kwargs.get("tracer_provider") + tracer = get_tracer(__name__, __version__, tracer_provider) + for wrapped_method in WRAPPED_METHODS: + wrap_package = wrapped_method.get("package") + wrap_object = wrapped_method.get("object") + wrap_method = wrapped_method.get("method") + + wrap_function_wrapper( + wrap_package, + f"{wrap_object}.{wrap_method}", + ( + _awrap(tracer, wrapped_method) + if wrapped_method.get("is_async") + else _wrap(tracer, wrapped_method) + ), + ) + + def _uninstrument(self, **kwargs): + for wrapped_method in WRAPPED_METHODS: + wrap_package = wrapped_method.get("package") + wrap_object = wrapped_method.get("object") + unwrap( + f"{wrap_package}.{wrap_object}", + wrapped_method.get("method", ""), + ) diff --git a/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/src/opentelemetry/instrumentation/vertexai_v2/config.py b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/src/opentelemetry/instrumentation/vertexai_v2/config.py new file mode 100644 index 0000000000..4689e9292f --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/src/opentelemetry/instrumentation/vertexai_v2/config.py @@ -0,0 +1,2 @@ +class Config: + exception_logger = None diff --git a/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/src/opentelemetry/instrumentation/vertexai_v2/utils.py b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/src/opentelemetry/instrumentation/vertexai_v2/utils.py new file mode 100644 index 0000000000..7ce434fa10 --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/src/opentelemetry/instrumentation/vertexai_v2/utils.py @@ -0,0 +1,29 @@ +import logging +import traceback + +from opentelemetry.instrumentation.vertexai.config import Config + + +def dont_throw(func): + """ + A decorator that wraps the passed in function and logs exceptions instead of throwing them. + + @param func: The function to wrap + @return: The wrapper function + """ + # Obtain a logger specific to the function's module + logger = logging.getLogger(func.__module__) + + def wrapper(*args, **kwargs): + try: + return func(*args, **kwargs) + except Exception as e: + logger.debug( + "OpenLLMetry failed to trace in %s, error: %s", + func.__name__, + traceback.format_exc(), + ) + if Config.exception_logger: + Config.exception_logger(e) + + return wrapper diff --git a/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/__init__.py b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/__init__.py new file mode 100644 index 0000000000..d8e96c603f --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/__init__.py @@ -0,0 +1 @@ +"""unit tests.""" diff --git a/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/conftest.py b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/conftest.py new file mode 100644 index 0000000000..f25ad9eeac --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/conftest.py @@ -0,0 +1,34 @@ +"""Unit tests configuration module.""" + +import pytest +from opentelemetry import trace +from opentelemetry.sdk.trace import TracerProvider +from opentelemetry.sdk.trace.export.in_memory_span_exporter import InMemorySpanExporter +from opentelemetry.sdk.trace.export import SimpleSpanProcessor +from opentelemetry.instrumentation.vertexai import VertexAIInstrumentor + +pytest_plugins = [] + + +@pytest.fixture(scope="session") +def exporter(): + exporter = InMemorySpanExporter() + processor = SimpleSpanProcessor(exporter) + + provider = TracerProvider() + provider.add_span_processor(processor) + trace.set_tracer_provider(provider) + + VertexAIInstrumentor().instrument() + + return exporter + + +@pytest.fixture(autouse=True) +def clear_exporter(exporter): + exporter.clear() + + +@pytest.fixture(scope="module") +def vcr_config(): + return {"filter_headers": ["authorization"]} diff --git a/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/disabled_test_bison.py b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/disabled_test_bison.py new file mode 100644 index 0000000000..f526899450 --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/disabled_test_bison.py @@ -0,0 +1,258 @@ +import asyncio + +import pytest +import vertexai +from opentelemetry.semconv_ai import SpanAttributes +from vertexai.language_models import TextGenerationModel, ChatModel, InputOutputTextPair + +vertexai.init() + + +@pytest.mark.vcr +def test_vertexai_predict(exporter): + parameters = { + "max_output_tokens": 256, + "top_p": 0.8, + "top_k": 40, + } + + model = TextGenerationModel.from_pretrained("text-bison@001") + response = model.predict( + "Give me ten interview questions for the role of program manager.", + **parameters, + ) + + response = response.text + + spans = exporter.get_finished_spans() + assert [span.name for span in spans] == [ + "vertexai.predict", + ] + + vertexai_span = spans[0] + assert ( + vertexai_span.attributes[SpanAttributes.LLM_REQUEST_MODEL] == "text-bison@001" + ) + assert ( + "Give me ten interview questions for the role of program manager." + in vertexai_span.attributes[f"{SpanAttributes.LLM_PROMPTS}.0.user"] + ) + assert vertexai_span.attributes[SpanAttributes.LLM_REQUEST_TOP_P] == 0.8 + assert vertexai_span.attributes[SpanAttributes.LLM_REQUEST_MAX_TOKENS] == 256 + assert vertexai_span.attributes[SpanAttributes.LLM_TOP_K] == 40 + assert ( + vertexai_span.attributes[f"{SpanAttributes.LLM_COMPLETIONS}.0.content"] + == response + ) + + +@pytest.mark.vcr +def test_vertexai_predict_async(exporter): + async def async_predict_text() -> str: + """Ideation example with a Large Language Model""" + + parameters = { + "max_output_tokens": 256, + "top_p": 0.8, + "top_k": 40, + } + + model = TextGenerationModel.from_pretrained("text-bison@001") + response = await model.predict_async( + "Give me ten interview questions for the role of program manager.", + **parameters, + ) + + return response.text + + response = asyncio.run(async_predict_text()) + + spans = exporter.get_finished_spans() + assert [span.name for span in spans] == [ + "vertexai.predict", + ] + + vertexai_span = spans[0] + assert ( + vertexai_span.attributes[SpanAttributes.LLM_REQUEST_MODEL] == "text-bison@001" + ) + assert ( + "Give me ten interview questions for the role of program manager." + in vertexai_span.attributes[f"{SpanAttributes.LLM_PROMPTS}.0.user"] + ) + assert vertexai_span.attributes[SpanAttributes.LLM_REQUEST_TOP_P] == 0.8 + assert vertexai_span.attributes[SpanAttributes.LLM_REQUEST_MAX_TOKENS] == 256 + assert vertexai_span.attributes[SpanAttributes.LLM_TOP_K] == 40 + assert ( + vertexai_span.attributes[f"{SpanAttributes.LLM_COMPLETIONS}.0.content"] + == response + ) + + +@pytest.mark.vcr +def test_vertexai_stream(exporter): + text_generation_model = TextGenerationModel.from_pretrained("text-bison") + parameters = { + "max_output_tokens": 256, + "top_p": 0.8, + "top_k": 40, + } + responses = text_generation_model.predict_streaming( + prompt="Give me ten interview questions for the role of program manager.", + **parameters, + ) + + result = [response.text for response in responses] + response = result + + spans = exporter.get_finished_spans() + assert [span.name for span in spans] == [ + "vertexai.predict", + ] + + vertexai_span = spans[0] + assert vertexai_span.attributes[SpanAttributes.LLM_REQUEST_MODEL] == "text-bison" + assert ( + "Give me ten interview questions for the role of program manager." + in vertexai_span.attributes[f"{SpanAttributes.LLM_PROMPTS}.0.user"] + ) + assert vertexai_span.attributes[SpanAttributes.LLM_REQUEST_TOP_P] == 0.8 + assert vertexai_span.attributes[SpanAttributes.LLM_REQUEST_MAX_TOKENS] == 256 + assert vertexai_span.attributes[SpanAttributes.LLM_TOP_K] == 40 + assert vertexai_span.attributes[ + f"{SpanAttributes.LLM_COMPLETIONS}.0.content" + ] == "".join(response) + + +@pytest.mark.vcr +def test_vertexai_stream_async(exporter): + async def async_streaming_prediction() -> list: + """Streaming Text Example with a Large Language Model""" + + text_generation_model = TextGenerationModel.from_pretrained("text-bison") + parameters = { + "max_output_tokens": 256, + "top_p": 0.8, + "top_k": 40, + } + + responses = text_generation_model.predict_streaming_async( + prompt="Give me ten interview questions for the role of program manager.", + **parameters, + ) + result = [response.text async for response in responses] + return result + + response = asyncio.run(async_streaming_prediction()) + + spans = exporter.get_finished_spans() + assert [span.name for span in spans] == [ + "vertexai.predict", + ] + + vertexai_span = spans[0] + assert vertexai_span.attributes[SpanAttributes.LLM_REQUEST_MODEL] == "text-bison" + assert ( + "Give me ten interview questions for the role of program manager." + in vertexai_span.attributes[f"{SpanAttributes.LLM_PROMPTS}.0.user"] + ) + assert vertexai_span.attributes[SpanAttributes.LLM_REQUEST_TOP_P] == 0.8 + assert vertexai_span.attributes[SpanAttributes.LLM_REQUEST_MAX_TOKENS] == 256 + assert vertexai_span.attributes[SpanAttributes.LLM_TOP_K] == 40 + assert vertexai_span.attributes[ + f"{SpanAttributes.LLM_COMPLETIONS}.0.content" + ] == "".join(response) + + +@pytest.mark.vcr +def test_vertexai_chat(exporter): + chat_model = ChatModel.from_pretrained("chat-bison@001") + + parameters = { + "max_output_tokens": 256, + "top_p": 0.95, + "top_k": 40, + } + + chat = chat_model.start_chat( + context="My name is Miles. You are an astronomer, knowledgeable about the solar system.", + examples=[ + InputOutputTextPair( + input_text="How many moons does Mars have?", + output_text="The planet Mars has two moons, Phobos and Deimos.", + ), + ], + ) + + response = chat.send_message( + "How many planets are there in the solar system?", **parameters + ) + + response = response.text + + spans = exporter.get_finished_spans() + assert [span.name for span in spans] == [ + "vertexai.send_message", + ] + + vertexai_span = spans[0] + assert ( + vertexai_span.attributes[SpanAttributes.LLM_REQUEST_MODEL] == "chat-bison@001" + ) + assert ( + "How many planets are there in the solar system?" + in vertexai_span.attributes[f"{SpanAttributes.LLM_PROMPTS}.0.user"] + ) + assert vertexai_span.attributes[SpanAttributes.LLM_REQUEST_TOP_P] == 0.95 + assert vertexai_span.attributes[SpanAttributes.LLM_REQUEST_MAX_TOKENS] == 256 + assert vertexai_span.attributes[SpanAttributes.LLM_TOP_K] == 40 + assert ( + vertexai_span.attributes[f"{SpanAttributes.LLM_COMPLETIONS}.0.content"] + == response + ) + + +@pytest.mark.vcr +def test_vertexai_chat_stream(exporter): + chat_model = ChatModel.from_pretrained("chat-bison@001") + + parameters = { + "temperature": 0.8, + "max_output_tokens": 256, + "top_p": 0.95, + "top_k": 40, + } + + chat = chat_model.start_chat( + context="My name is Miles. You are an astronomer, knowledgeable about the solar system.", + examples=[ + InputOutputTextPair( + input_text="How many moons does Mars have?", + output_text="The planet Mars has two moons, Phobos and Deimos.", + ), + ], + ) + + responses = chat.send_message_streaming( + message="How many planets are there in the solar system?", **parameters + ) + + result = [response.text for response in responses] + response = result + + spans = exporter.get_finished_spans() + assert [span.name for span in spans] == [ + "vertexai.send_message", + ] + + vertexai_span = spans[0] + assert ( + vertexai_span.attributes[SpanAttributes.LLM_REQUEST_MODEL] == "chat-bison@001" + ) + assert vertexai_span.attributes[SpanAttributes.LLM_REQUEST_TOP_P] == 0.95 + assert vertexai_span.attributes[SpanAttributes.LLM_REQUEST_TEMPERATURE] == 0.8 + assert vertexai_span.attributes[SpanAttributes.LLM_REQUEST_MAX_TOKENS] == 256 + assert vertexai_span.attributes[SpanAttributes.LLM_TOP_K] == 40 + assert vertexai_span.attributes[ + f"{SpanAttributes.LLM_COMPLETIONS}.0.content" + ] == "".join(response) diff --git a/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/disabled_test_gemini.py b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/disabled_test_gemini.py new file mode 100644 index 0000000000..e72631c2c9 --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/disabled_test_gemini.py @@ -0,0 +1,51 @@ +import pytest +import vertexai +from opentelemetry.semconv_ai import SpanAttributes +from vertexai.preview.generative_models import GenerativeModel, Part + +vertexai.init() + + +@pytest.mark.vcr +def test_vertexai_generate_content(exporter): + multimodal_model = GenerativeModel("gemini-pro-vision") + response = multimodal_model.generate_content( + [ + Part.from_uri( + "gs://generativeai-downloads/images/scones.jpg", + mime_type="image/jpeg", + ), + "what is shown in this image?", + ] + ) + + spans = exporter.get_finished_spans() + assert [span.name for span in spans] == [ + "vertexai.generate_content", + ] + + vertexai_span = spans[0] + assert ( + "what is shown in this image?" + in vertexai_span.attributes[f"{SpanAttributes.LLM_PROMPTS}.0.user"] + ) + assert ( + vertexai_span.attributes[SpanAttributes.LLM_REQUEST_MODEL] + == "gemini-pro-vision" + ) + assert ( + vertexai_span.attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS] + == response._raw_response.usage_metadata.total_token_count + ) + assert ( + vertexai_span.attributes[SpanAttributes.LLM_USAGE_PROMPT_TOKENS] + == response._raw_response.usage_metadata.prompt_token_count + ) + assert ( + vertexai_span.attributes[SpanAttributes.LLM_USAGE_COMPLETION_TOKENS] + == response._raw_response.usage_metadata.candidates_token_count + ) + assert ( + vertexai_span.attributes[f"{SpanAttributes.LLM_COMPLETIONS}.0.content"] + == response.text + ) diff --git a/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/test_placeholder.py b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/test_placeholder.py new file mode 100644 index 0000000000..201975fcc0 --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/test_placeholder.py @@ -0,0 +1,2 @@ +def test_placeholder(): + pass From d36bfee47a5670e8ddef7655033f4b67a38e945a Mon Sep 17 00:00:00 2001 From: Aaron Abbott Date: Mon, 25 Nov 2024 21:55:55 +0000 Subject: [PATCH 03/16] Add OTel copyright headers --- .../instrumentation/vertexai_v2/__init__.py | 68 ++++++++++++++----- .../instrumentation/vertexai_v2/config.py | 2 - .../instrumentation/vertexai_v2/utils.py | 14 ++++ 3 files changed, 66 insertions(+), 18 deletions(-) delete mode 100644 instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/src/opentelemetry/instrumentation/vertexai_v2/config.py diff --git a/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/src/opentelemetry/instrumentation/vertexai_v2/__init__.py b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/src/opentelemetry/instrumentation/vertexai_v2/__init__.py index da692b8494..093a4abd8d 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/src/opentelemetry/instrumentation/vertexai_v2/__init__.py +++ b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/src/opentelemetry/instrumentation/vertexai_v2/__init__.py @@ -1,3 +1,17 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + """OpenTelemetry Vertex AI instrumentation""" import logging @@ -13,7 +27,10 @@ from opentelemetry.trace.status import Status, StatusCode from opentelemetry.instrumentation.instrumentor import BaseInstrumentor -from opentelemetry.instrumentation.utils import _SUPPRESS_INSTRUMENTATION_KEY, unwrap +from opentelemetry.instrumentation.utils import ( + _SUPPRESS_INSTRUMENTATION_KEY, + unwrap, +) from opentelemetry.semconv_ai import ( SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY, @@ -103,7 +120,9 @@ def should_send_prompts(): return ( os.getenv("TRACELOOP_TRACE_CONTENT") or "true" - ).lower() == "true" or context_api.get_value("override_enable_content_tracing") + ).lower() == "true" or context_api.get_value( + "override_enable_content_tracing" + ) def is_streaming_response(response): @@ -145,15 +164,23 @@ def _set_input_attributes(span, args, kwargs, llm_model): span, SpanAttributes.LLM_REQUEST_TEMPERATURE, kwargs.get("temperature") ) _set_span_attribute( - span, SpanAttributes.LLM_REQUEST_MAX_TOKENS, kwargs.get("max_output_tokens") + span, + SpanAttributes.LLM_REQUEST_MAX_TOKENS, + kwargs.get("max_output_tokens"), + ) + _set_span_attribute( + span, SpanAttributes.LLM_REQUEST_TOP_P, kwargs.get("top_p") ) - _set_span_attribute(span, SpanAttributes.LLM_REQUEST_TOP_P, kwargs.get("top_p")) _set_span_attribute(span, SpanAttributes.LLM_TOP_K, kwargs.get("top_k")) _set_span_attribute( - span, SpanAttributes.LLM_PRESENCE_PENALTY, kwargs.get("presence_penalty") + span, + SpanAttributes.LLM_PRESENCE_PENALTY, + kwargs.get("presence_penalty"), ) _set_span_attribute( - span, SpanAttributes.LLM_FREQUENCY_PENALTY, kwargs.get("frequency_penalty") + span, + SpanAttributes.LLM_FREQUENCY_PENALTY, + kwargs.get("frequency_penalty"), ) return @@ -180,7 +207,9 @@ def _set_response_attributes(span, llm_model, generation_text, token_usage): token_usage.prompt_token_count, ) - _set_span_attribute(span, f"{SpanAttributes.LLM_COMPLETIONS}.0.role", "assistant") + _set_span_attribute( + span, f"{SpanAttributes.LLM_COMPLETIONS}.0.role", "assistant" + ) _set_span_attribute( span, f"{SpanAttributes.LLM_COMPLETIONS}.0.content", @@ -232,7 +261,10 @@ def _handle_request(span, args, kwargs, llm_model): def _handle_response(span, response, llm_model): if span.is_recording(): _set_response_attributes( - span, llm_model, response.candidates[0].text, response.usage_metadata + span, + llm_model, + response.candidates[0].text, + response.usage_metadata, ) span.set_status(Status(StatusCode.OK)) @@ -253,16 +285,18 @@ def wrapper(wrapped, instance, args, kwargs): @_with_tracer_wrapper async def _awrap(tracer, to_wrap, wrapped, instance, args, kwargs): """Instruments and calls every function defined in TO_WRAP.""" - if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY) or context_api.get_value( - SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY - ): + if context_api.get_value( + _SUPPRESS_INSTRUMENTATION_KEY + ) or context_api.get_value(SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY): return await wrapped(*args, **kwargs) llm_model = "unknown" if hasattr(instance, "_model_id"): llm_model = instance._model_id if hasattr(instance, "_model_name"): - llm_model = instance._model_name.replace("publishers/google/models/", "") + llm_model = instance._model_name.replace( + "publishers/google/models/", "" + ) name = to_wrap.get("span_name") span = tracer.start_span( @@ -293,16 +327,18 @@ async def _awrap(tracer, to_wrap, wrapped, instance, args, kwargs): @_with_tracer_wrapper def _wrap(tracer, to_wrap, wrapped, instance, args, kwargs): """Instruments and calls every function defined in TO_WRAP.""" - if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY) or context_api.get_value( - SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY - ): + if context_api.get_value( + _SUPPRESS_INSTRUMENTATION_KEY + ) or context_api.get_value(SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY): return wrapped(*args, **kwargs) llm_model = "unknown" if hasattr(instance, "_model_id"): llm_model = instance._model_id if hasattr(instance, "_model_name"): - llm_model = instance._model_name.replace("publishers/google/models/", "") + llm_model = instance._model_name.replace( + "publishers/google/models/", "" + ) name = to_wrap.get("span_name") span = tracer.start_span( diff --git a/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/src/opentelemetry/instrumentation/vertexai_v2/config.py b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/src/opentelemetry/instrumentation/vertexai_v2/config.py deleted file mode 100644 index 4689e9292f..0000000000 --- a/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/src/opentelemetry/instrumentation/vertexai_v2/config.py +++ /dev/null @@ -1,2 +0,0 @@ -class Config: - exception_logger = None diff --git a/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/src/opentelemetry/instrumentation/vertexai_v2/utils.py b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/src/opentelemetry/instrumentation/vertexai_v2/utils.py index 7ce434fa10..a6c2db7be5 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/src/opentelemetry/instrumentation/vertexai_v2/utils.py +++ b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/src/opentelemetry/instrumentation/vertexai_v2/utils.py @@ -1,3 +1,17 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import logging import traceback From 623038d1963851ade1555ed11c23ce5693458970 Mon Sep 17 00:00:00 2001 From: Aaron Abbott Date: Mon, 25 Nov 2024 21:56:26 +0000 Subject: [PATCH 04/16] Run ruff --- .../instrumentation/vertexai_v2/__init__.py | 15 +++-- .../tests/conftest.py | 7 ++- .../tests/disabled_test_bison.py | 61 ++++++++++++++----- .../tests/disabled_test_gemini.py | 3 +- 4 files changed, 60 insertions(+), 26 deletions(-) diff --git a/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/src/opentelemetry/instrumentation/vertexai_v2/__init__.py b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/src/opentelemetry/instrumentation/vertexai_v2/__init__.py index 093a4abd8d..2c9a9f6b59 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/src/opentelemetry/instrumentation/vertexai_v2/__init__.py +++ b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/src/opentelemetry/instrumentation/vertexai_v2/__init__.py @@ -18,26 +18,25 @@ import os import types from typing import Collection -from opentelemetry.instrumentation.vertexai.config import Config -from opentelemetry.instrumentation.vertexai.utils import dont_throw + from wrapt import wrap_function_wrapper from opentelemetry import context as context_api -from opentelemetry.trace import get_tracer, SpanKind -from opentelemetry.trace.status import Status, StatusCode - from opentelemetry.instrumentation.instrumentor import BaseInstrumentor from opentelemetry.instrumentation.utils import ( _SUPPRESS_INSTRUMENTATION_KEY, unwrap, ) - +from opentelemetry.instrumentation.vertexai.config import Config +from opentelemetry.instrumentation.vertexai.utils import dont_throw +from opentelemetry.instrumentation.vertexai.version import __version__ from opentelemetry.semconv_ai import ( SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY, - SpanAttributes, LLMRequestTypeValues, + SpanAttributes, ) -from opentelemetry.instrumentation.vertexai.version import __version__ +from opentelemetry.trace import SpanKind, get_tracer +from opentelemetry.trace.status import Status, StatusCode logger = logging.getLogger(__name__) diff --git a/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/conftest.py b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/conftest.py index f25ad9eeac..9a412a2203 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/conftest.py +++ b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/conftest.py @@ -1,11 +1,14 @@ """Unit tests configuration module.""" import pytest + from opentelemetry import trace +from opentelemetry.instrumentation.vertexai import VertexAIInstrumentor from opentelemetry.sdk.trace import TracerProvider -from opentelemetry.sdk.trace.export.in_memory_span_exporter import InMemorySpanExporter from opentelemetry.sdk.trace.export import SimpleSpanProcessor -from opentelemetry.instrumentation.vertexai import VertexAIInstrumentor +from opentelemetry.sdk.trace.export.in_memory_span_exporter import ( + InMemorySpanExporter, +) pytest_plugins = [] diff --git a/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/disabled_test_bison.py b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/disabled_test_bison.py index f526899450..d6fcd301d2 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/disabled_test_bison.py +++ b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/disabled_test_bison.py @@ -2,8 +2,13 @@ import pytest import vertexai +from vertexai.language_models import ( + ChatModel, + InputOutputTextPair, + TextGenerationModel, +) + from opentelemetry.semconv_ai import SpanAttributes -from vertexai.language_models import TextGenerationModel, ChatModel, InputOutputTextPair vertexai.init() @@ -31,14 +36,17 @@ def test_vertexai_predict(exporter): vertexai_span = spans[0] assert ( - vertexai_span.attributes[SpanAttributes.LLM_REQUEST_MODEL] == "text-bison@001" + vertexai_span.attributes[SpanAttributes.LLM_REQUEST_MODEL] + == "text-bison@001" ) assert ( "Give me ten interview questions for the role of program manager." in vertexai_span.attributes[f"{SpanAttributes.LLM_PROMPTS}.0.user"] ) assert vertexai_span.attributes[SpanAttributes.LLM_REQUEST_TOP_P] == 0.8 - assert vertexai_span.attributes[SpanAttributes.LLM_REQUEST_MAX_TOKENS] == 256 + assert ( + vertexai_span.attributes[SpanAttributes.LLM_REQUEST_MAX_TOKENS] == 256 + ) assert vertexai_span.attributes[SpanAttributes.LLM_TOP_K] == 40 assert ( vertexai_span.attributes[f"{SpanAttributes.LLM_COMPLETIONS}.0.content"] @@ -74,14 +82,17 @@ async def async_predict_text() -> str: vertexai_span = spans[0] assert ( - vertexai_span.attributes[SpanAttributes.LLM_REQUEST_MODEL] == "text-bison@001" + vertexai_span.attributes[SpanAttributes.LLM_REQUEST_MODEL] + == "text-bison@001" ) assert ( "Give me ten interview questions for the role of program manager." in vertexai_span.attributes[f"{SpanAttributes.LLM_PROMPTS}.0.user"] ) assert vertexai_span.attributes[SpanAttributes.LLM_REQUEST_TOP_P] == 0.8 - assert vertexai_span.attributes[SpanAttributes.LLM_REQUEST_MAX_TOKENS] == 256 + assert ( + vertexai_span.attributes[SpanAttributes.LLM_REQUEST_MAX_TOKENS] == 256 + ) assert vertexai_span.attributes[SpanAttributes.LLM_TOP_K] == 40 assert ( vertexai_span.attributes[f"{SpanAttributes.LLM_COMPLETIONS}.0.content"] @@ -111,13 +122,18 @@ def test_vertexai_stream(exporter): ] vertexai_span = spans[0] - assert vertexai_span.attributes[SpanAttributes.LLM_REQUEST_MODEL] == "text-bison" + assert ( + vertexai_span.attributes[SpanAttributes.LLM_REQUEST_MODEL] + == "text-bison" + ) assert ( "Give me ten interview questions for the role of program manager." in vertexai_span.attributes[f"{SpanAttributes.LLM_PROMPTS}.0.user"] ) assert vertexai_span.attributes[SpanAttributes.LLM_REQUEST_TOP_P] == 0.8 - assert vertexai_span.attributes[SpanAttributes.LLM_REQUEST_MAX_TOKENS] == 256 + assert ( + vertexai_span.attributes[SpanAttributes.LLM_REQUEST_MAX_TOKENS] == 256 + ) assert vertexai_span.attributes[SpanAttributes.LLM_TOP_K] == 40 assert vertexai_span.attributes[ f"{SpanAttributes.LLM_COMPLETIONS}.0.content" @@ -129,7 +145,9 @@ def test_vertexai_stream_async(exporter): async def async_streaming_prediction() -> list: """Streaming Text Example with a Large Language Model""" - text_generation_model = TextGenerationModel.from_pretrained("text-bison") + text_generation_model = TextGenerationModel.from_pretrained( + "text-bison" + ) parameters = { "max_output_tokens": 256, "top_p": 0.8, @@ -151,13 +169,18 @@ async def async_streaming_prediction() -> list: ] vertexai_span = spans[0] - assert vertexai_span.attributes[SpanAttributes.LLM_REQUEST_MODEL] == "text-bison" + assert ( + vertexai_span.attributes[SpanAttributes.LLM_REQUEST_MODEL] + == "text-bison" + ) assert ( "Give me ten interview questions for the role of program manager." in vertexai_span.attributes[f"{SpanAttributes.LLM_PROMPTS}.0.user"] ) assert vertexai_span.attributes[SpanAttributes.LLM_REQUEST_TOP_P] == 0.8 - assert vertexai_span.attributes[SpanAttributes.LLM_REQUEST_MAX_TOKENS] == 256 + assert ( + vertexai_span.attributes[SpanAttributes.LLM_REQUEST_MAX_TOKENS] == 256 + ) assert vertexai_span.attributes[SpanAttributes.LLM_TOP_K] == 40 assert vertexai_span.attributes[ f"{SpanAttributes.LLM_COMPLETIONS}.0.content" @@ -197,14 +220,17 @@ def test_vertexai_chat(exporter): vertexai_span = spans[0] assert ( - vertexai_span.attributes[SpanAttributes.LLM_REQUEST_MODEL] == "chat-bison@001" + vertexai_span.attributes[SpanAttributes.LLM_REQUEST_MODEL] + == "chat-bison@001" ) assert ( "How many planets are there in the solar system?" in vertexai_span.attributes[f"{SpanAttributes.LLM_PROMPTS}.0.user"] ) assert vertexai_span.attributes[SpanAttributes.LLM_REQUEST_TOP_P] == 0.95 - assert vertexai_span.attributes[SpanAttributes.LLM_REQUEST_MAX_TOKENS] == 256 + assert ( + vertexai_span.attributes[SpanAttributes.LLM_REQUEST_MAX_TOKENS] == 256 + ) assert vertexai_span.attributes[SpanAttributes.LLM_TOP_K] == 40 assert ( vertexai_span.attributes[f"{SpanAttributes.LLM_COMPLETIONS}.0.content"] @@ -247,11 +273,16 @@ def test_vertexai_chat_stream(exporter): vertexai_span = spans[0] assert ( - vertexai_span.attributes[SpanAttributes.LLM_REQUEST_MODEL] == "chat-bison@001" + vertexai_span.attributes[SpanAttributes.LLM_REQUEST_MODEL] + == "chat-bison@001" ) assert vertexai_span.attributes[SpanAttributes.LLM_REQUEST_TOP_P] == 0.95 - assert vertexai_span.attributes[SpanAttributes.LLM_REQUEST_TEMPERATURE] == 0.8 - assert vertexai_span.attributes[SpanAttributes.LLM_REQUEST_MAX_TOKENS] == 256 + assert ( + vertexai_span.attributes[SpanAttributes.LLM_REQUEST_TEMPERATURE] == 0.8 + ) + assert ( + vertexai_span.attributes[SpanAttributes.LLM_REQUEST_MAX_TOKENS] == 256 + ) assert vertexai_span.attributes[SpanAttributes.LLM_TOP_K] == 40 assert vertexai_span.attributes[ f"{SpanAttributes.LLM_COMPLETIONS}.0.content" diff --git a/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/disabled_test_gemini.py b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/disabled_test_gemini.py index e72631c2c9..9abbdb6faa 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/disabled_test_gemini.py +++ b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/disabled_test_gemini.py @@ -1,8 +1,9 @@ import pytest import vertexai -from opentelemetry.semconv_ai import SpanAttributes from vertexai.preview.generative_models import GenerativeModel, Part +from opentelemetry.semconv_ai import SpanAttributes + vertexai.init() From 79214d51493956aaaea84ce91cd94e19623eb812 Mon Sep 17 00:00:00 2001 From: Aaron Abbott Date: Tue, 26 Nov 2024 22:01:00 +0000 Subject: [PATCH 05/16] tox.ini boilerplate --- tox.ini | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/tox.ini b/tox.ini index 5fa58e5139..17af332712 100644 --- a/tox.ini +++ b/tox.ini @@ -11,6 +11,11 @@ envlist = pypy3-test-instrumentation-openai-v2-{0,1} lint-instrumentation-openai-v2 + ; instrumentation-vertexai + py3{8,9,10,11,12}-test-instrumentation-vertexai-v2-{0,1} + pypy3-test-instrumentation-vertexai-v2-{0,1} + lint-instrumentation-vertexai-v2 + ; opentelemetry-resource-detector-container py3{8,9,10,11,12}-test-resource-detector-container pypy3-test-resource-detector-container @@ -427,6 +432,17 @@ commands_pre = openai-1: pip install -r {toxinidir}/instrumentation-genai/opentelemetry-instrumentation-openai-v2/test-requirements-1.txt lint-instrumentation-openai-v2: pip install -r {toxinidir}/instrumentation-genai/opentelemetry-instrumentation-openai-v2/test-requirements-0.txt + # packages that are released individually should provide a test-requirements.txt with the lowest version of OTel API + # and SDK supported to test we are honoring it + vertexai-0: pip install -r {toxinidir}/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/test-requirements-0.txt + # and the latest version of OTel API and SDK + vertexai-1: pip install opentelemetry-api@{env:CORE_REPO}\#egg=opentelemetry-api&subdirectory=opentelemetry-api + vertexai-1: pip install opentelemetry-semantic-conventions@{env:CORE_REPO}\#egg=opentelemetry-semantic-conventions&subdirectory=opentelemetry-semantic-conventions + vertexai-1: pip install opentelemetry-sdk@{env:CORE_REPO}\#egg=opentelemetry-sdk&subdirectory=opentelemetry-sdk + vertexai-1: pip install opentelemetry-test-utils@{env:CORE_REPO}\#egg=opentelemetry-test-utils&subdirectory=tests/opentelemetry-test-utils + vertexai-1: pip install -r {toxinidir}/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/test-requirements-1.txt + lint-instrumentation-vertexai-v2: pip install -r {toxinidir}/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/test-requirements-0.txt + distro: pip install opentelemetry-api@{env:CORE_REPO}\#egg=opentelemetry-api&subdirectory=opentelemetry-api distro: pip install opentelemetry-semantic-conventions@{env:CORE_REPO}\#egg=opentelemetry-semantic-conventions&subdirectory=opentelemetry-semantic-conventions distro: pip install opentelemetry-sdk@{env:CORE_REPO}\#egg=opentelemetry-sdk&subdirectory=opentelemetry-sdk @@ -944,6 +960,9 @@ commands = test-instrumentation-openai-v2: pytest {toxinidir}/instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests {posargs} lint-instrumentation-openai-v2: sh -c "cd instrumentation-genai && pylint --rcfile ../.pylintrc opentelemetry-instrumentation-openai-v2" + test-instrumentation-vertexai-v2: pytest {toxinidir}/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests {posargs} + lint-instrumentation-vertexai-v2: sh -c "cd instrumentation-genai && pylint --rcfile ../.pylintrc opentelemetry-instrumentation-vertexai-v2" + test-instrumentation-sio-pika: pytest {toxinidir}/instrumentation/opentelemetry-instrumentation-pika/tests {posargs} lint-instrumentation-sio-pika: sh -c "cd instrumentation && pylint --rcfile ../.pylintrc opentelemetry-instrumentation-pika" From 15d14f9b1983eab12bce7d7ecf9da08f9d29aea3 Mon Sep 17 00:00:00 2001 From: Aaron Abbott Date: Tue, 26 Nov 2024 22:05:08 +0000 Subject: [PATCH 06/16] Update span attribute semconv besides events --- .../instrumentation/vertexai_v2/__init__.py | 73 ++++++++++--------- .../instrumentation/vertexai_v2/utils.py | 6 +- .../tests/conftest.py | 2 +- 3 files changed, 40 insertions(+), 41 deletions(-) diff --git a/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/src/opentelemetry/instrumentation/vertexai_v2/__init__.py b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/src/opentelemetry/instrumentation/vertexai_v2/__init__.py index 2c9a9f6b59..3666f94c61 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/src/opentelemetry/instrumentation/vertexai_v2/__init__.py +++ b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/src/opentelemetry/instrumentation/vertexai_v2/__init__.py @@ -24,15 +24,13 @@ from opentelemetry import context as context_api from opentelemetry.instrumentation.instrumentor import BaseInstrumentor from opentelemetry.instrumentation.utils import ( - _SUPPRESS_INSTRUMENTATION_KEY, + is_instrumentation_enabled, unwrap, ) -from opentelemetry.instrumentation.vertexai.config import Config -from opentelemetry.instrumentation.vertexai.utils import dont_throw -from opentelemetry.instrumentation.vertexai.version import __version__ -from opentelemetry.semconv_ai import ( - SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY, - LLMRequestTypeValues, +from opentelemetry.instrumentation.vertexai_v2.utils import dont_throw +from opentelemetry.instrumentation.vertexai_v2.version import __version__ +from opentelemetry.semconv._incubating.attributes import gen_ai_attributes +from opentelemetry.semconv.trace import ( SpanAttributes, ) from opentelemetry.trace import SpanKind, get_tracer @@ -42,6 +40,7 @@ _instruments = ("google-cloud-aiplatform >= 1.38.1",) +# TODO: span_name should no longer be needed as it comes from `{gen_ai.operation.name} {gen_ai.request.model}` WRAPPED_METHODS = [ { "package": "vertexai.generative_models", @@ -155,30 +154,36 @@ def _set_input_attributes(span, args, kwargs, llm_model): prompt, ) - _set_span_attribute(span, SpanAttributes.LLM_REQUEST_MODEL, llm_model) + _set_span_attribute( + span, gen_ai_attributes.GEN_AI_REQUEST_MODEL, llm_model + ) _set_span_attribute( span, f"{SpanAttributes.LLM_PROMPTS}.0.user", kwargs.get("prompt") ) _set_span_attribute( - span, SpanAttributes.LLM_REQUEST_TEMPERATURE, kwargs.get("temperature") + span, + gen_ai_attributes.GEN_AI_REQUEST_TEMPERATURE, + kwargs.get("temperature"), ) _set_span_attribute( span, - SpanAttributes.LLM_REQUEST_MAX_TOKENS, + gen_ai_attributes.GEN_AI_REQUEST_MAX_TOKENS, kwargs.get("max_output_tokens"), ) _set_span_attribute( - span, SpanAttributes.LLM_REQUEST_TOP_P, kwargs.get("top_p") + span, gen_ai_attributes.GEN_AI_REQUEST_TOP_P, kwargs.get("top_p") + ) + _set_span_attribute( + span, gen_ai_attributes.GEN_AI_REQUEST_TOP_K, kwargs.get("top_k") ) - _set_span_attribute(span, SpanAttributes.LLM_TOP_K, kwargs.get("top_k")) _set_span_attribute( span, - SpanAttributes.LLM_PRESENCE_PENALTY, + gen_ai_attributes.GEN_AI_REQUEST_PRESENCE_PENALTY, kwargs.get("presence_penalty"), ) _set_span_attribute( span, - SpanAttributes.LLM_FREQUENCY_PENALTY, + gen_ai_attributes.GEN_AI_REQUEST_FREQUENCY_PENALTY, kwargs.get("frequency_penalty"), ) @@ -187,22 +192,19 @@ def _set_input_attributes(span, args, kwargs, llm_model): @dont_throw def _set_response_attributes(span, llm_model, generation_text, token_usage): - _set_span_attribute(span, SpanAttributes.LLM_RESPONSE_MODEL, llm_model) + _set_span_attribute( + span, gen_ai_attributes.GEN_AI_RESPONSE_MODEL, llm_model + ) if token_usage: _set_span_attribute( span, - SpanAttributes.LLM_USAGE_TOTAL_TOKENS, - token_usage.total_token_count, - ) - _set_span_attribute( - span, - SpanAttributes.LLM_USAGE_COMPLETION_TOKENS, + gen_ai_attributes.GEN_AI_USAGE_OUTPUT_TOKENS, token_usage.candidates_token_count, ) _set_span_attribute( span, - SpanAttributes.LLM_USAGE_PROMPT_TOKENS, + gen_ai_attributes.GEN_AI_USAGE_INPUT_TOKENS, token_usage.prompt_token_count, ) @@ -284,9 +286,7 @@ def wrapper(wrapped, instance, args, kwargs): @_with_tracer_wrapper async def _awrap(tracer, to_wrap, wrapped, instance, args, kwargs): """Instruments and calls every function defined in TO_WRAP.""" - if context_api.get_value( - _SUPPRESS_INSTRUMENTATION_KEY - ) or context_api.get_value(SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY): + if not is_instrumentation_enabled(): return await wrapped(*args, **kwargs) llm_model = "unknown" @@ -297,13 +297,16 @@ async def _awrap(tracer, to_wrap, wrapped, instance, args, kwargs): "publishers/google/models/", "" ) - name = to_wrap.get("span_name") + operation_name = ( + gen_ai_attributes.GenAiOperationNameValues.TEXT_COMPLETION.value + ) + name = f"{operation_name} {llm_model}" span = tracer.start_span( name, kind=SpanKind.CLIENT, attributes={ - SpanAttributes.LLM_SYSTEM: "VertexAI", - SpanAttributes.LLM_REQUEST_TYPE: LLMRequestTypeValues.COMPLETION.value, + gen_ai_attributes.GEN_AI_SYSTEM: gen_ai_attributes.GenAiSystemValues.VERTEX_AI.value, + gen_ai_attributes.GEN_AI_OPERATION_NAME: operation_name, }, ) @@ -326,9 +329,7 @@ async def _awrap(tracer, to_wrap, wrapped, instance, args, kwargs): @_with_tracer_wrapper def _wrap(tracer, to_wrap, wrapped, instance, args, kwargs): """Instruments and calls every function defined in TO_WRAP.""" - if context_api.get_value( - _SUPPRESS_INSTRUMENTATION_KEY - ) or context_api.get_value(SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY): + if not is_instrumentation_enabled(): return wrapped(*args, **kwargs) llm_model = "unknown" @@ -339,13 +340,16 @@ def _wrap(tracer, to_wrap, wrapped, instance, args, kwargs): "publishers/google/models/", "" ) - name = to_wrap.get("span_name") + operation_name = ( + gen_ai_attributes.GenAiOperationNameValues.TEXT_COMPLETION.value + ) + name = f"{operation_name} {llm_model}" span = tracer.start_span( name, kind=SpanKind.CLIENT, attributes={ - SpanAttributes.LLM_SYSTEM: "VertexAI", - SpanAttributes.LLM_REQUEST_TYPE: LLMRequestTypeValues.COMPLETION.value, + gen_ai_attributes.GEN_AI_SYSTEM: gen_ai_attributes.GenAiSystemValues.VERTEX_AI.value, + gen_ai_attributes.GEN_AI_OPERATION_NAME: operation_name, }, ) @@ -370,7 +374,6 @@ class VertexAIInstrumentor(BaseInstrumentor): def __init__(self, exception_logger=None): super().__init__() - Config.exception_logger = exception_logger def instrumentation_dependencies(self) -> Collection[str]: return _instruments diff --git a/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/src/opentelemetry/instrumentation/vertexai_v2/utils.py b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/src/opentelemetry/instrumentation/vertexai_v2/utils.py index a6c2db7be5..03af299ca0 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/src/opentelemetry/instrumentation/vertexai_v2/utils.py +++ b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/src/opentelemetry/instrumentation/vertexai_v2/utils.py @@ -15,8 +15,6 @@ import logging import traceback -from opentelemetry.instrumentation.vertexai.config import Config - def dont_throw(func): """ @@ -31,13 +29,11 @@ def dont_throw(func): def wrapper(*args, **kwargs): try: return func(*args, **kwargs) - except Exception as e: + except Exception: logger.debug( "OpenLLMetry failed to trace in %s, error: %s", func.__name__, traceback.format_exc(), ) - if Config.exception_logger: - Config.exception_logger(e) return wrapper diff --git a/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/conftest.py b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/conftest.py index 9a412a2203..7df949ac98 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/conftest.py +++ b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/conftest.py @@ -3,7 +3,7 @@ import pytest from opentelemetry import trace -from opentelemetry.instrumentation.vertexai import VertexAIInstrumentor +from opentelemetry.instrumentation.vertexai_v2 import VertexAIInstrumentor from opentelemetry.sdk.trace import TracerProvider from opentelemetry.sdk.trace.export import SimpleSpanProcessor from opentelemetry.sdk.trace.export.in_memory_span_exporter import ( From 034dc7df5b8955cbd618cf3c8942b016f9704d25 Mon Sep 17 00:00:00 2001 From: Aaron Abbott Date: Wed, 27 Nov 2024 19:41:07 +0000 Subject: [PATCH 07/16] Working with events --- .../instrumentation/vertexai_v2/__init__.py | 172 ++++++++++++------ .../instrumentation/vertexai_v2/events.py | 87 +++++++++ .../instrumentation/vertexai_v2/utils.py | 2 +- 3 files changed, 206 insertions(+), 55 deletions(-) create mode 100644 instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/src/opentelemetry/instrumentation/vertexai_v2/events.py diff --git a/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/src/opentelemetry/instrumentation/vertexai_v2/__init__.py b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/src/opentelemetry/instrumentation/vertexai_v2/__init__.py index 3666f94c61..7853eeced1 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/src/opentelemetry/instrumentation/vertexai_v2/__init__.py +++ b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/src/opentelemetry/instrumentation/vertexai_v2/__init__.py @@ -14,26 +14,32 @@ """OpenTelemetry Vertex AI instrumentation""" +from functools import partial import logging -import os import types -from typing import Collection +from typing import Collection, Optional from wrapt import wrap_function_wrapper -from opentelemetry import context as context_api +from opentelemetry._events import ( + EventLogger, + EventLoggerProvider, + Event, + get_event_logger, +) from opentelemetry.instrumentation.instrumentor import BaseInstrumentor from opentelemetry.instrumentation.utils import ( is_instrumentation_enabled, unwrap, ) +from opentelemetry.instrumentation.vertexai_v2.events import ( + assistant_event, + user_event, +) from opentelemetry.instrumentation.vertexai_v2.utils import dont_throw from opentelemetry.instrumentation.vertexai_v2.version import __version__ from opentelemetry.semconv._incubating.attributes import gen_ai_attributes -from opentelemetry.semconv.trace import ( - SpanAttributes, -) -from opentelemetry.trace import SpanKind, get_tracer +from opentelemetry.trace import SpanKind, TracerProvider, get_tracer from opentelemetry.trace.status import Status, StatusCode logger = logging.getLogger(__name__) @@ -116,11 +122,14 @@ def should_send_prompts(): - return ( - os.getenv("TRACELOOP_TRACE_CONTENT") or "true" - ).lower() == "true" or context_api.get_value( - "override_enable_content_tracing" - ) + # Previously was opt-in by the following check for privacy reasons: + # + # return ( + # os.getenv("TRACELOOP_TRACE_CONTENT") or "true" + # ).lower() == "true" or context_api.get_value( + # "override_enable_content_tracing" + # ) + return True def is_streaming_response(response): @@ -138,7 +147,9 @@ def _set_span_attribute(span, name, value): return -def _set_input_attributes(span, args, kwargs, llm_model): +def _set_input_attributes( + span, event_logger: EventLogger, args, kwargs, llm_model +): if should_send_prompts() and args is not None and len(args) > 0: prompt = "" for arg in args: @@ -148,18 +159,36 @@ def _set_input_attributes(span, args, kwargs, llm_model): for subarg in arg: prompt = f"{prompt}{subarg}\n" - _set_span_attribute( - span, - f"{SpanAttributes.LLM_PROMPTS}.0.user", - prompt, - ) + # _set_span_attribute( + # span, + # f"{SpanAttributes.LLM_PROMPTS}.0.user", + # prompt, + # ) + if prompt: + event_logger.emit( + user_event( + gen_ai_system=gen_ai_attributes.GenAiSystemValues.VERTEX_AI.value, + content=prompt, + span_context=span.get_span_context(), + ) + ) + + # Copied from openllmetry logic + # https://github.com/traceloop/openllmetry/blob/v0.33.12/packages/opentelemetry-instrumentation-vertexai/opentelemetry/instrumentation/vertexai/__init__.py#L141-L143 + # I guess prompt may be in kwargs instead or in addition? + prompt = kwargs.get("prompt") + if prompt: + event_logger.emit( + user_event( + gen_ai_system=gen_ai_attributes.GenAiSystemValues.VERTEX_AI.value, + content=prompt, + span_context=span.get_span_context(), + ) + ) _set_span_attribute( span, gen_ai_attributes.GEN_AI_REQUEST_MODEL, llm_model ) - _set_span_attribute( - span, f"{SpanAttributes.LLM_PROMPTS}.0.user", kwargs.get("prompt") - ) _set_span_attribute( span, gen_ai_attributes.GEN_AI_REQUEST_TEMPERATURE, @@ -191,7 +220,9 @@ def _set_input_attributes(span, args, kwargs, llm_model): @dont_throw -def _set_response_attributes(span, llm_model, generation_text, token_usage): +def _set_response_attributes( + span, event_logger: EventLogger, llm_model, generation_text, token_usage +): _set_span_attribute( span, gen_ai_attributes.GEN_AI_RESPONSE_MODEL, llm_model ) @@ -208,17 +239,19 @@ def _set_response_attributes(span, llm_model, generation_text, token_usage): token_usage.prompt_token_count, ) - _set_span_attribute( - span, f"{SpanAttributes.LLM_COMPLETIONS}.0.role", "assistant" - ) - _set_span_attribute( - span, - f"{SpanAttributes.LLM_COMPLETIONS}.0.content", - generation_text, - ) + if generation_text: + event_logger.emit( + assistant_event( + gen_ai_system=gen_ai_attributes.GenAiSystemValues.VERTEX_AI.value, + content=generation_text, + span_context=span.get_span_context(), + ) + ) -def _build_from_streaming_response(span, response, llm_model): +def _build_from_streaming_response( + span, event_logger: EventLogger, response, llm_model +): complete_response = "" token_usage = None for item in response: @@ -229,13 +262,17 @@ def _build_from_streaming_response(span, response, llm_model): yield item_to_yield - _set_response_attributes(span, llm_model, complete_response, token_usage) + _set_response_attributes( + span, event_logger, llm_model, complete_response, token_usage + ) span.set_status(Status(StatusCode.OK)) span.end() -async def _abuild_from_streaming_response(span, response, llm_model): +async def _abuild_from_streaming_response( + span, event_logger: EventLogger, response, llm_model +): complete_response = "" token_usage = None async for item in response: @@ -246,23 +283,26 @@ async def _abuild_from_streaming_response(span, response, llm_model): yield item_to_yield - _set_response_attributes(span, llm_model, complete_response, token_usage) + _set_response_attributes( + span, event_logger, llm_model, complete_response, token_usage + ) span.set_status(Status(StatusCode.OK)) span.end() @dont_throw -def _handle_request(span, args, kwargs, llm_model): +def _handle_request(span, event_logger, args, kwargs, llm_model): if span.is_recording(): - _set_input_attributes(span, args, kwargs, llm_model) + _set_input_attributes(span, event_logger, args, kwargs, llm_model) @dont_throw -def _handle_response(span, response, llm_model): +def _handle_response(span, event_logger: EventLogger, response, llm_model): if span.is_recording(): _set_response_attributes( span, + event_logger, llm_model, response.candidates[0].text, response.usage_metadata, @@ -283,8 +323,10 @@ def wrapper(wrapped, instance, args, kwargs): return _with_tracer -@_with_tracer_wrapper -async def _awrap(tracer, to_wrap, wrapped, instance, args, kwargs): +# @_with_tracer_wrapper +async def _awrap( + tracer, event_logger: EventLogger, to_wrap, wrapped, instance, args, kwargs +): """Instruments and calls every function defined in TO_WRAP.""" if not is_instrumentation_enabled(): return await wrapped(*args, **kwargs) @@ -310,24 +352,30 @@ async def _awrap(tracer, to_wrap, wrapped, instance, args, kwargs): }, ) - _handle_request(span, args, kwargs, llm_model) + _handle_request(span, event_logger, args, kwargs, llm_model) response = await wrapped(*args, **kwargs) if response: if is_streaming_response(response): - return _build_from_streaming_response(span, response, llm_model) + return _build_from_streaming_response( + span, event_logger, response, llm_model + ) elif is_async_streaming_response(response): - return _abuild_from_streaming_response(span, response, llm_model) + return _abuild_from_streaming_response( + span, event_logger, response, llm_model + ) else: - _handle_response(span, response, llm_model) + _handle_response(span, event_logger, response, llm_model) span.end() return response -@_with_tracer_wrapper -def _wrap(tracer, to_wrap, wrapped, instance, args, kwargs): +# @_with_tracer_wrapper +def _wrap( + tracer, event_logger: EventLogger, to_wrap, wrapped, instance, args, kwargs +): """Instruments and calls every function defined in TO_WRAP.""" if not is_instrumentation_enabled(): return wrapped(*args, **kwargs) @@ -353,17 +401,21 @@ def _wrap(tracer, to_wrap, wrapped, instance, args, kwargs): }, ) - _handle_request(span, args, kwargs, llm_model) + _handle_request(span, event_logger, args, kwargs, llm_model) response = wrapped(*args, **kwargs) if response: if is_streaming_response(response): - return _build_from_streaming_response(span, response, llm_model) + return _build_from_streaming_response( + span, event_logger, response, llm_model + ) elif is_async_streaming_response(response): - return _abuild_from_streaming_response(span, response, llm_model) + return _abuild_from_streaming_response( + span, event_logger, response, llm_model + ) else: - _handle_response(span, response, llm_model) + _handle_response(span, event_logger, response, llm_model) span.end() return response @@ -378,9 +430,21 @@ def __init__(self, exception_logger=None): def instrumentation_dependencies(self) -> Collection[str]: return _instruments - def _instrument(self, **kwargs): - tracer_provider = kwargs.get("tracer_provider") - tracer = get_tracer(__name__, __version__, tracer_provider) + def _instrument( + self, + *, + tracer_provider: Optional[TracerProvider] = None, + event_logger_provider: Optional[EventLoggerProvider] = None, + **kwargs, + ): + tracer = get_tracer( + __name__, __version__, tracer_provider=tracer_provider + ) + event_logger = get_event_logger( + __name__, + version=__version__, + event_logger_provider=event_logger_provider, + ) for wrapped_method in WRAPPED_METHODS: wrap_package = wrapped_method.get("package") wrap_object = wrapped_method.get("object") @@ -390,9 +454,9 @@ def _instrument(self, **kwargs): wrap_package, f"{wrap_object}.{wrap_method}", ( - _awrap(tracer, wrapped_method) + partial(_awrap, tracer, event_logger, wrapped_method) if wrapped_method.get("is_async") - else _wrap(tracer, wrapped_method) + else partial(_wrap, tracer, event_logger, wrapped_method) ), ) diff --git a/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/src/opentelemetry/instrumentation/vertexai_v2/events.py b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/src/opentelemetry/instrumentation/vertexai_v2/events.py new file mode 100644 index 0000000000..8807c7bfaa --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/src/opentelemetry/instrumentation/vertexai_v2/events.py @@ -0,0 +1,87 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Factories for event types described in +https://github.com/open-telemetry/semantic-conventions/blob/main/docs/gen-ai/gen-ai-events.md#system-event. + +Hopefully this code can be autogenerated by Weaver once Gen AI semantic conventions are +schematized in YAML and the Weaver tool supports it. +""" + +from typing import Optional + +from opentelemetry._events import Event +from opentelemetry.semconv._incubating.attributes import gen_ai_attributes +from opentelemetry.trace import SpanContext +from opentelemetry.util.types import AnyValue + + +def _set_span_context(event: Event, span_context: Optional[SpanContext]): + if not span_context: + return + event.span_id = span_context.span_id + event.trace_id = span_context.trace_id + event.trace_flags = span_context.trace_flags + + +def user_event( + *, + gen_ai_system: str, + # TODO: should I just leave role out since it's not required if "user" + role: str = "user", + content: AnyValue, + span_context: Optional[SpanContext] = None, +) -> Event: + """Creates a User event + https://github.com/open-telemetry/semantic-conventions/blob/v1.28.0/docs/gen-ai/gen-ai-events.md#user-event + """ + event = Event( + name="gen_ai.user.message", + attributes={ + gen_ai_attributes.GEN_AI_SYSTEM: gen_ai_system, + }, + body={ + "role": role, + "content": content, + }, + ) + _set_span_context(event, span_context) + return event + + +# TODO: add tool_calls once instrumentation supports it +def assistant_event( + *, + gen_ai_system: str, + # TODO: should I just leave role out since it's not required if "assistant" + role: str = "assistant", + content: AnyValue, + span_context: Optional[SpanContext] = None, +) -> Event: + """Creates an Assistant event + https://github.com/open-telemetry/semantic-conventions/blob/v1.28.0/docs/gen-ai/gen-ai-events.md#assistant-event + """ + event = Event( + name="gen_ai.assistant.message", + attributes={ + gen_ai_attributes.GEN_AI_SYSTEM: gen_ai_system, + }, + body={ + "role": role, + "content": content, + }, + ) + _set_span_context(event, span_context) + return event diff --git a/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/src/opentelemetry/instrumentation/vertexai_v2/utils.py b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/src/opentelemetry/instrumentation/vertexai_v2/utils.py index 03af299ca0..07be88d3f2 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/src/opentelemetry/instrumentation/vertexai_v2/utils.py +++ b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/src/opentelemetry/instrumentation/vertexai_v2/utils.py @@ -31,7 +31,7 @@ def wrapper(*args, **kwargs): return func(*args, **kwargs) except Exception: logger.debug( - "OpenLLMetry failed to trace in %s, error: %s", + "failed to trace in %s, error: %s", func.__name__, traceback.format_exc(), ) From 4551c88abb88634589e029628acbd1835c820a8e Mon Sep 17 00:00:00 2001 From: Aaron Abbott Date: Tue, 3 Dec 2024 19:34:48 +0000 Subject: [PATCH 08/16] Remove _with_tracer_wrapper in favor of partial --- .../instrumentation/vertexai_v2/__init__.py | 17 +---------------- 1 file changed, 1 insertion(+), 16 deletions(-) diff --git a/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/src/opentelemetry/instrumentation/vertexai_v2/__init__.py b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/src/opentelemetry/instrumentation/vertexai_v2/__init__.py index 7853eeced1..cb29480f4f 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/src/opentelemetry/instrumentation/vertexai_v2/__init__.py +++ b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/src/opentelemetry/instrumentation/vertexai_v2/__init__.py @@ -14,9 +14,9 @@ """OpenTelemetry Vertex AI instrumentation""" -from functools import partial import logging import types +from functools import partial from typing import Collection, Optional from wrapt import wrap_function_wrapper @@ -24,7 +24,6 @@ from opentelemetry._events import ( EventLogger, EventLoggerProvider, - Event, get_event_logger, ) from opentelemetry.instrumentation.instrumentor import BaseInstrumentor @@ -311,19 +310,6 @@ def _handle_response(span, event_logger: EventLogger, response, llm_model): span.set_status(Status(StatusCode.OK)) -def _with_tracer_wrapper(func): - """Helper for providing tracer for wrapper functions.""" - - def _with_tracer(tracer, to_wrap): - def wrapper(wrapped, instance, args, kwargs): - return func(tracer, to_wrap, wrapped, instance, args, kwargs) - - return wrapper - - return _with_tracer - - -# @_with_tracer_wrapper async def _awrap( tracer, event_logger: EventLogger, to_wrap, wrapped, instance, args, kwargs ): @@ -372,7 +358,6 @@ async def _awrap( return response -# @_with_tracer_wrapper def _wrap( tracer, event_logger: EventLogger, to_wrap, wrapped, instance, args, kwargs ): From 6ddf7e21d77c21d20cbab7d780b13c2b048671d6 Mon Sep 17 00:00:00 2001 From: Aaron Abbott Date: Thu, 5 Dec 2024 16:22:54 +0000 Subject: [PATCH 09/16] Working sanitized VCR for tests, but missing some assertions --- .../test_vertexai_generate_content.yaml | 59 ++++++++++++++++++ .../tests/conftest.py | 61 ++++++++++++++++++- .../tests/disabled_test_gemini.py | 52 ---------------- .../tests/test_gemini.py | 37 +++++++++++ 4 files changed, 156 insertions(+), 53 deletions(-) create mode 100644 instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/cassettes/test_vertexai_generate_content.yaml delete mode 100644 instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/disabled_test_gemini.py create mode 100644 instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/test_gemini.py diff --git a/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/cassettes/test_vertexai_generate_content.yaml b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/cassettes/test_vertexai_generate_content.yaml new file mode 100644 index 0000000000..6999682200 --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/cassettes/test_vertexai_generate_content.yaml @@ -0,0 +1,59 @@ +interactions: +- request: + body: "{\n \"contents\": [\n {\n \"role\": \"user\",\n \"parts\": + [\n {\n \"fileData\": {\n \"mimeType\": \"image/jpeg\",\n + \ \"fileUri\": \"gs://generativeai-downloads/images/scones.jpg\"\n + \ }\n },\n {\n \"text\": \"what is shown in this + image?\"\n }\n ]\n }\n ]\n}" + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '317' + Content-Type: + - application/json + User-Agent: + - python-requests/2.32.3 + method: POST + uri: https://us-central1-aiplatform.googleapis.com/v1beta1/projects/fake-project/locations/us-central1/publishers/google/models/gemini-pro-vision:generateContent?%24alt=json%3Benum-encoding%3Dint + response: + body: + string: "{\n \"candidates\": [\n {\n \"content\": {\n \"role\": + \"model\",\n \"parts\": [\n {\n \"text\": \" The + image shows a table with a cup of coffee, a bowl of blueberries, and several + blueberry scones. There are also pink flowers on the table.\"\n }\n + \ ]\n },\n \"finishReason\": 1,\n \"safetyRatings\": + [\n {\n \"category\": 1,\n \"probability\": 1,\n + \ \"probabilityScore\": 0.024780273,\n \"severity\": 1,\n + \ \"severityScore\": 0.072753906\n },\n {\n \"category\": + 2,\n \"probability\": 1,\n \"probabilityScore\": 0.025512695,\n + \ \"severity\": 1,\n \"severityScore\": 0.06738281\n },\n + \ {\n \"category\": 3,\n \"probability\": 1,\n \"probabilityScore\": + 0.040283203,\n \"severity\": 1,\n \"severityScore\": 0.03515625\n + \ },\n {\n \"category\": 4,\n \"probability\": + 1,\n \"probabilityScore\": 0.07910156,\n \"severity\": 1,\n + \ \"severityScore\": 0.083984375\n }\n ],\n \"avgLogprobs\": + -0.068832365671793613\n }\n ],\n \"usageMetadata\": {\n \"promptTokenCount\": + 265,\n \"candidatesTokenCount\": 30,\n \"totalTokenCount\": 295\n },\n + \ \"modelVersion\": \"gemini-pro-vision\"\n}\n" + headers: + Cache-Control: + - private + Content-Encoding: + - gzip + Content-Type: + - application/json; charset=UTF-8 + Transfer-Encoding: + - chunked + Vary: + - Origin + - X-Origin + - Referer + status: + code: 200 + message: OK +version: 1 diff --git a/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/conftest.py b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/conftest.py index 7df949ac98..605ae2c784 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/conftest.py +++ b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/conftest.py @@ -1,6 +1,11 @@ """Unit tests configuration module.""" +from os import replace +import re +from typing import Any, Mapping, MutableMapping + import pytest +from google.auth.credentials import AnonymousCredentials from opentelemetry import trace from opentelemetry.instrumentation.vertexai_v2 import VertexAIInstrumentor @@ -12,6 +17,11 @@ pytest_plugins = [] +import vertexai +from vcr import VCR +from vcr.record_mode import RecordMode +from vcr.request import Request + @pytest.fixture(scope="session") def exporter(): @@ -32,6 +42,55 @@ def clear_exporter(exporter): exporter.clear() +@pytest.fixture(autouse=True) +def vertexai_init(vcr: VCR) -> None: + # Unfortunately I couldn't find a nice way to globally reset the global_config for each + # test because different vertex submodules reference the global instance directly + # https://github.com/googleapis/python-aiplatform/blob/v1.74.0/google/cloud/aiplatform/initializer.py#L687 + # so this config will leak if we don't call init() for each test. + + # When not recording (in CI), don't do any auth. That prevents trying to read application + # default credentials from the filesystem or metadata server and oauth token exchange. This + # is not the interesting part of our instrumentation to test. + if vcr.record_mode is RecordMode.NONE: + vertexai.init(credentials=AnonymousCredentials()) + else: + vertexai.init() + + @pytest.fixture(scope="module") def vcr_config(): - return {"filter_headers": ["authorization"]} + filter_header_regexes = [ + r"X-.*", + "Server", + "Date", + "Expires", + "Authorization", + ] + + def filter_headers(headers: Mapping[str, str]) -> Mapping[str, str]: + return { + key: val + for key, val in headers.items() + if not any( + re.match(filter_re, key, re.IGNORECASE) + for filter_re in filter_header_regexes + ) + } + + def before_record_cb(request: Request): + request.headers = filter_headers(request.headers) + request.uri = re.sub( + r"/projects/[^/]+/", f"/projects/fake-project/", request.uri + ) + return request + + def before_response_cb(response: MutableMapping[str, Any]): + response["headers"] = filter_headers(response["headers"]) + return response + + return { + "before_record_request": before_record_cb, + "before_record_response": before_response_cb, + "ignore_hosts": ["oauth2.googleapis.com"], + } diff --git a/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/disabled_test_gemini.py b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/disabled_test_gemini.py deleted file mode 100644 index 9abbdb6faa..0000000000 --- a/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/disabled_test_gemini.py +++ /dev/null @@ -1,52 +0,0 @@ -import pytest -import vertexai -from vertexai.preview.generative_models import GenerativeModel, Part - -from opentelemetry.semconv_ai import SpanAttributes - -vertexai.init() - - -@pytest.mark.vcr -def test_vertexai_generate_content(exporter): - multimodal_model = GenerativeModel("gemini-pro-vision") - response = multimodal_model.generate_content( - [ - Part.from_uri( - "gs://generativeai-downloads/images/scones.jpg", - mime_type="image/jpeg", - ), - "what is shown in this image?", - ] - ) - - spans = exporter.get_finished_spans() - assert [span.name for span in spans] == [ - "vertexai.generate_content", - ] - - vertexai_span = spans[0] - assert ( - "what is shown in this image?" - in vertexai_span.attributes[f"{SpanAttributes.LLM_PROMPTS}.0.user"] - ) - assert ( - vertexai_span.attributes[SpanAttributes.LLM_REQUEST_MODEL] - == "gemini-pro-vision" - ) - assert ( - vertexai_span.attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS] - == response._raw_response.usage_metadata.total_token_count - ) - assert ( - vertexai_span.attributes[SpanAttributes.LLM_USAGE_PROMPT_TOKENS] - == response._raw_response.usage_metadata.prompt_token_count - ) - assert ( - vertexai_span.attributes[SpanAttributes.LLM_USAGE_COMPLETION_TOKENS] - == response._raw_response.usage_metadata.candidates_token_count - ) - assert ( - vertexai_span.attributes[f"{SpanAttributes.LLM_COMPLETIONS}.0.content"] - == response.text - ) diff --git a/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/test_gemini.py b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/test_gemini.py new file mode 100644 index 0000000000..18fd0dad7e --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/test_gemini.py @@ -0,0 +1,37 @@ +import pytest +from vertexai.preview.generative_models import GenerativeModel, Part + +# from opentelemetry.semconv_ai import SpanAttributes + + +@pytest.mark.vcr +def test_vertexai_generate_content(exporter): + multimodal_model = GenerativeModel("gemini-pro-vision") + response = multimodal_model.generate_content( + [ + Part.from_uri( + "gs://generativeai-downloads/images/scones.jpg", + mime_type="image/jpeg", + ), + "what is shown in this image?", + ] + ) + + spans = exporter.get_finished_spans() + assert [span.name for span in spans] == [ + "text_completion gemini-pro-vision" + ] + + vertexai_span = spans[0] + assert len(spans) == 1 + + assert vertexai_span.attributes == { + "gen_ai.system": "vertex_ai", + "gen_ai.operation.name": "text_completion", + "gen_ai.request.model": "gemini-pro-vision", + "gen_ai.response.model": "gemini-pro-vision", + "gen_ai.usage.output_tokens": 30, + "gen_ai.usage.input_tokens": 265, + } + + # TODO: verify Events From 88fd4bc1951e8f3dace5760a7a3bc52931101581 Mon Sep 17 00:00:00 2001 From: Aaron Abbott Date: Thu, 5 Dec 2024 18:07:07 +0000 Subject: [PATCH 10/16] tox -e generate-workflows --- .github/workflows/core_contrib_test_0.yml | 44 +++ .github/workflows/lint_0.yml | 18 + .github/workflows/test_0.yml | 432 +++++++++++----------- .github/workflows/test_1.yml | 432 +++++++++++----------- .github/workflows/test_2.yml | 216 +++++++++++ 5 files changed, 710 insertions(+), 432 deletions(-) diff --git a/.github/workflows/core_contrib_test_0.yml b/.github/workflows/core_contrib_test_0.yml index 67bda629ff..11c28aad89 100644 --- a/.github/workflows/core_contrib_test_0.yml +++ b/.github/workflows/core_contrib_test_0.yml @@ -63,6 +63,50 @@ jobs: - name: Run tests run: tox -e py38-test-instrumentation-openai-v2-1 -- -ra + py38-test-instrumentation-vertexai-v2-0: + name: instrumentation-vertexai-v2-0 + runs-on: ubuntu-latest + steps: + - name: Checkout contrib repo @ SHA - ${{ env.CONTRIB_REPO_SHA }} + uses: actions/checkout@v4 + with: + repository: open-telemetry/opentelemetry-python-contrib + ref: ${{ env.CONTRIB_REPO_SHA }} + + - name: Set up Python 3.8 + uses: actions/setup-python@v5 + with: + python-version: "3.8" + architecture: "x64" + + - name: Install tox + run: pip install tox + + - name: Run tests + run: tox -e py38-test-instrumentation-vertexai-v2-0 -- -ra + + py38-test-instrumentation-vertexai-v2-1: + name: instrumentation-vertexai-v2-1 + runs-on: ubuntu-latest + steps: + - name: Checkout contrib repo @ SHA - ${{ env.CONTRIB_REPO_SHA }} + uses: actions/checkout@v4 + with: + repository: open-telemetry/opentelemetry-python-contrib + ref: ${{ env.CONTRIB_REPO_SHA }} + + - name: Set up Python 3.8 + uses: actions/setup-python@v5 + with: + python-version: "3.8" + architecture: "x64" + + - name: Install tox + run: pip install tox + + - name: Run tests + run: tox -e py38-test-instrumentation-vertexai-v2-1 -- -ra + py38-test-resource-detector-container: name: resource-detector-container runs-on: ubuntu-latest diff --git a/.github/workflows/lint_0.yml b/.github/workflows/lint_0.yml index 9d77ef5e27..bd0f083dcc 100644 --- a/.github/workflows/lint_0.yml +++ b/.github/workflows/lint_0.yml @@ -34,6 +34,24 @@ jobs: - name: Run tests run: tox -e lint-instrumentation-openai-v2 + lint-instrumentation-vertexai-v2: + name: instrumentation-vertexai-v2 + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.12 + uses: actions/setup-python@v5 + with: + python-version: "3.12" + + - name: Install tox + run: pip install tox + + - name: Run tests + run: tox -e lint-instrumentation-vertexai-v2 + lint-resource-detector-container: name: resource-detector-container runs-on: ubuntu-latest diff --git a/.github/workflows/test_0.yml b/.github/workflows/test_0.yml index 47c9a19cf3..aa9c533f78 100644 --- a/.github/workflows/test_0.yml +++ b/.github/workflows/test_0.yml @@ -232,6 +232,222 @@ jobs: - name: Run tests run: tox -e pypy3-test-instrumentation-openai-v2-1 -- -ra + py38-test-instrumentation-vertexai-v2-0_ubuntu-latest: + name: instrumentation-vertexai-v2-0 3.8 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.8 + uses: actions/setup-python@v5 + with: + python-version: "3.8" + + - name: Install tox + run: pip install tox + + - name: Run tests + run: tox -e py38-test-instrumentation-vertexai-v2-0 -- -ra + + py38-test-instrumentation-vertexai-v2-1_ubuntu-latest: + name: instrumentation-vertexai-v2-1 3.8 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.8 + uses: actions/setup-python@v5 + with: + python-version: "3.8" + + - name: Install tox + run: pip install tox + + - name: Run tests + run: tox -e py38-test-instrumentation-vertexai-v2-1 -- -ra + + py39-test-instrumentation-vertexai-v2-0_ubuntu-latest: + name: instrumentation-vertexai-v2-0 3.9 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.9 + uses: actions/setup-python@v5 + with: + python-version: "3.9" + + - name: Install tox + run: pip install tox + + - name: Run tests + run: tox -e py39-test-instrumentation-vertexai-v2-0 -- -ra + + py39-test-instrumentation-vertexai-v2-1_ubuntu-latest: + name: instrumentation-vertexai-v2-1 3.9 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.9 + uses: actions/setup-python@v5 + with: + python-version: "3.9" + + - name: Install tox + run: pip install tox + + - name: Run tests + run: tox -e py39-test-instrumentation-vertexai-v2-1 -- -ra + + py310-test-instrumentation-vertexai-v2-0_ubuntu-latest: + name: instrumentation-vertexai-v2-0 3.10 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.10 + uses: actions/setup-python@v5 + with: + python-version: "3.10" + + - name: Install tox + run: pip install tox + + - name: Run tests + run: tox -e py310-test-instrumentation-vertexai-v2-0 -- -ra + + py310-test-instrumentation-vertexai-v2-1_ubuntu-latest: + name: instrumentation-vertexai-v2-1 3.10 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.10 + uses: actions/setup-python@v5 + with: + python-version: "3.10" + + - name: Install tox + run: pip install tox + + - name: Run tests + run: tox -e py310-test-instrumentation-vertexai-v2-1 -- -ra + + py311-test-instrumentation-vertexai-v2-0_ubuntu-latest: + name: instrumentation-vertexai-v2-0 3.11 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.11 + uses: actions/setup-python@v5 + with: + python-version: "3.11" + + - name: Install tox + run: pip install tox + + - name: Run tests + run: tox -e py311-test-instrumentation-vertexai-v2-0 -- -ra + + py311-test-instrumentation-vertexai-v2-1_ubuntu-latest: + name: instrumentation-vertexai-v2-1 3.11 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.11 + uses: actions/setup-python@v5 + with: + python-version: "3.11" + + - name: Install tox + run: pip install tox + + - name: Run tests + run: tox -e py311-test-instrumentation-vertexai-v2-1 -- -ra + + py312-test-instrumentation-vertexai-v2-0_ubuntu-latest: + name: instrumentation-vertexai-v2-0 3.12 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.12 + uses: actions/setup-python@v5 + with: + python-version: "3.12" + + - name: Install tox + run: pip install tox + + - name: Run tests + run: tox -e py312-test-instrumentation-vertexai-v2-0 -- -ra + + py312-test-instrumentation-vertexai-v2-1_ubuntu-latest: + name: instrumentation-vertexai-v2-1 3.12 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.12 + uses: actions/setup-python@v5 + with: + python-version: "3.12" + + - name: Install tox + run: pip install tox + + - name: Run tests + run: tox -e py312-test-instrumentation-vertexai-v2-1 -- -ra + + pypy3-test-instrumentation-vertexai-v2-0_ubuntu-latest: + name: instrumentation-vertexai-v2-0 pypy-3.8 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python pypy-3.8 + uses: actions/setup-python@v5 + with: + python-version: "pypy-3.8" + + - name: Install tox + run: pip install tox + + - name: Run tests + run: tox -e pypy3-test-instrumentation-vertexai-v2-0 -- -ra + + pypy3-test-instrumentation-vertexai-v2-1_ubuntu-latest: + name: instrumentation-vertexai-v2-1 pypy-3.8 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python pypy-3.8 + uses: actions/setup-python@v5 + with: + python-version: "pypy-3.8" + + - name: Install tox + run: pip install tox + + - name: Run tests + run: tox -e pypy3-test-instrumentation-vertexai-v2-1 -- -ra + py38-test-resource-detector-container_ubuntu-latest: name: resource-detector-container 3.8 Ubuntu runs-on: ubuntu-latest @@ -4299,219 +4515,3 @@ jobs: - name: Run tests run: tox -e pypy3-test-instrumentation-mysql-0 -- -ra - - pypy3-test-instrumentation-mysql-1_ubuntu-latest: - name: instrumentation-mysql-1 pypy-3.8 Ubuntu - runs-on: ubuntu-latest - steps: - - name: Checkout repo @ SHA - ${{ github.sha }} - uses: actions/checkout@v4 - - - name: Set up Python pypy-3.8 - uses: actions/setup-python@v5 - with: - python-version: "pypy-3.8" - - - name: Install tox - run: pip install tox - - - name: Run tests - run: tox -e pypy3-test-instrumentation-mysql-1 -- -ra - - py38-test-instrumentation-mysqlclient_ubuntu-latest: - name: instrumentation-mysqlclient 3.8 Ubuntu - runs-on: ubuntu-latest - steps: - - name: Checkout repo @ SHA - ${{ github.sha }} - uses: actions/checkout@v4 - - - name: Set up Python 3.8 - uses: actions/setup-python@v5 - with: - python-version: "3.8" - - - name: Install tox - run: pip install tox - - - name: Run tests - run: tox -e py38-test-instrumentation-mysqlclient -- -ra - - py39-test-instrumentation-mysqlclient_ubuntu-latest: - name: instrumentation-mysqlclient 3.9 Ubuntu - runs-on: ubuntu-latest - steps: - - name: Checkout repo @ SHA - ${{ github.sha }} - uses: actions/checkout@v4 - - - name: Set up Python 3.9 - uses: actions/setup-python@v5 - with: - python-version: "3.9" - - - name: Install tox - run: pip install tox - - - name: Run tests - run: tox -e py39-test-instrumentation-mysqlclient -- -ra - - py310-test-instrumentation-mysqlclient_ubuntu-latest: - name: instrumentation-mysqlclient 3.10 Ubuntu - runs-on: ubuntu-latest - steps: - - name: Checkout repo @ SHA - ${{ github.sha }} - uses: actions/checkout@v4 - - - name: Set up Python 3.10 - uses: actions/setup-python@v5 - with: - python-version: "3.10" - - - name: Install tox - run: pip install tox - - - name: Run tests - run: tox -e py310-test-instrumentation-mysqlclient -- -ra - - py311-test-instrumentation-mysqlclient_ubuntu-latest: - name: instrumentation-mysqlclient 3.11 Ubuntu - runs-on: ubuntu-latest - steps: - - name: Checkout repo @ SHA - ${{ github.sha }} - uses: actions/checkout@v4 - - - name: Set up Python 3.11 - uses: actions/setup-python@v5 - with: - python-version: "3.11" - - - name: Install tox - run: pip install tox - - - name: Run tests - run: tox -e py311-test-instrumentation-mysqlclient -- -ra - - py312-test-instrumentation-mysqlclient_ubuntu-latest: - name: instrumentation-mysqlclient 3.12 Ubuntu - runs-on: ubuntu-latest - steps: - - name: Checkout repo @ SHA - ${{ github.sha }} - uses: actions/checkout@v4 - - - name: Set up Python 3.12 - uses: actions/setup-python@v5 - with: - python-version: "3.12" - - - name: Install tox - run: pip install tox - - - name: Run tests - run: tox -e py312-test-instrumentation-mysqlclient -- -ra - - pypy3-test-instrumentation-mysqlclient_ubuntu-latest: - name: instrumentation-mysqlclient pypy-3.8 Ubuntu - runs-on: ubuntu-latest - steps: - - name: Checkout repo @ SHA - ${{ github.sha }} - uses: actions/checkout@v4 - - - name: Set up Python pypy-3.8 - uses: actions/setup-python@v5 - with: - python-version: "pypy-3.8" - - - name: Install tox - run: pip install tox - - - name: Run tests - run: tox -e pypy3-test-instrumentation-mysqlclient -- -ra - - py38-test-instrumentation-psycopg2_ubuntu-latest: - name: instrumentation-psycopg2 3.8 Ubuntu - runs-on: ubuntu-latest - steps: - - name: Checkout repo @ SHA - ${{ github.sha }} - uses: actions/checkout@v4 - - - name: Set up Python 3.8 - uses: actions/setup-python@v5 - with: - python-version: "3.8" - - - name: Install tox - run: pip install tox - - - name: Run tests - run: tox -e py38-test-instrumentation-psycopg2 -- -ra - - py39-test-instrumentation-psycopg2_ubuntu-latest: - name: instrumentation-psycopg2 3.9 Ubuntu - runs-on: ubuntu-latest - steps: - - name: Checkout repo @ SHA - ${{ github.sha }} - uses: actions/checkout@v4 - - - name: Set up Python 3.9 - uses: actions/setup-python@v5 - with: - python-version: "3.9" - - - name: Install tox - run: pip install tox - - - name: Run tests - run: tox -e py39-test-instrumentation-psycopg2 -- -ra - - py310-test-instrumentation-psycopg2_ubuntu-latest: - name: instrumentation-psycopg2 3.10 Ubuntu - runs-on: ubuntu-latest - steps: - - name: Checkout repo @ SHA - ${{ github.sha }} - uses: actions/checkout@v4 - - - name: Set up Python 3.10 - uses: actions/setup-python@v5 - with: - python-version: "3.10" - - - name: Install tox - run: pip install tox - - - name: Run tests - run: tox -e py310-test-instrumentation-psycopg2 -- -ra - - py311-test-instrumentation-psycopg2_ubuntu-latest: - name: instrumentation-psycopg2 3.11 Ubuntu - runs-on: ubuntu-latest - steps: - - name: Checkout repo @ SHA - ${{ github.sha }} - uses: actions/checkout@v4 - - - name: Set up Python 3.11 - uses: actions/setup-python@v5 - with: - python-version: "3.11" - - - name: Install tox - run: pip install tox - - - name: Run tests - run: tox -e py311-test-instrumentation-psycopg2 -- -ra - - py312-test-instrumentation-psycopg2_ubuntu-latest: - name: instrumentation-psycopg2 3.12 Ubuntu - runs-on: ubuntu-latest - steps: - - name: Checkout repo @ SHA - ${{ github.sha }} - uses: actions/checkout@v4 - - - name: Set up Python 3.12 - uses: actions/setup-python@v5 - with: - python-version: "3.12" - - - name: Install tox - run: pip install tox - - - name: Run tests - run: tox -e py312-test-instrumentation-psycopg2 -- -ra diff --git a/.github/workflows/test_1.yml b/.github/workflows/test_1.yml index 9c5d48aea3..8a65b3fd6b 100644 --- a/.github/workflows/test_1.yml +++ b/.github/workflows/test_1.yml @@ -16,6 +16,222 @@ env: jobs: + pypy3-test-instrumentation-mysql-1_ubuntu-latest: + name: instrumentation-mysql-1 pypy-3.8 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python pypy-3.8 + uses: actions/setup-python@v5 + with: + python-version: "pypy-3.8" + + - name: Install tox + run: pip install tox + + - name: Run tests + run: tox -e pypy3-test-instrumentation-mysql-1 -- -ra + + py38-test-instrumentation-mysqlclient_ubuntu-latest: + name: instrumentation-mysqlclient 3.8 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.8 + uses: actions/setup-python@v5 + with: + python-version: "3.8" + + - name: Install tox + run: pip install tox + + - name: Run tests + run: tox -e py38-test-instrumentation-mysqlclient -- -ra + + py39-test-instrumentation-mysqlclient_ubuntu-latest: + name: instrumentation-mysqlclient 3.9 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.9 + uses: actions/setup-python@v5 + with: + python-version: "3.9" + + - name: Install tox + run: pip install tox + + - name: Run tests + run: tox -e py39-test-instrumentation-mysqlclient -- -ra + + py310-test-instrumentation-mysqlclient_ubuntu-latest: + name: instrumentation-mysqlclient 3.10 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.10 + uses: actions/setup-python@v5 + with: + python-version: "3.10" + + - name: Install tox + run: pip install tox + + - name: Run tests + run: tox -e py310-test-instrumentation-mysqlclient -- -ra + + py311-test-instrumentation-mysqlclient_ubuntu-latest: + name: instrumentation-mysqlclient 3.11 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.11 + uses: actions/setup-python@v5 + with: + python-version: "3.11" + + - name: Install tox + run: pip install tox + + - name: Run tests + run: tox -e py311-test-instrumentation-mysqlclient -- -ra + + py312-test-instrumentation-mysqlclient_ubuntu-latest: + name: instrumentation-mysqlclient 3.12 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.12 + uses: actions/setup-python@v5 + with: + python-version: "3.12" + + - name: Install tox + run: pip install tox + + - name: Run tests + run: tox -e py312-test-instrumentation-mysqlclient -- -ra + + pypy3-test-instrumentation-mysqlclient_ubuntu-latest: + name: instrumentation-mysqlclient pypy-3.8 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python pypy-3.8 + uses: actions/setup-python@v5 + with: + python-version: "pypy-3.8" + + - name: Install tox + run: pip install tox + + - name: Run tests + run: tox -e pypy3-test-instrumentation-mysqlclient -- -ra + + py38-test-instrumentation-psycopg2_ubuntu-latest: + name: instrumentation-psycopg2 3.8 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.8 + uses: actions/setup-python@v5 + with: + python-version: "3.8" + + - name: Install tox + run: pip install tox + + - name: Run tests + run: tox -e py38-test-instrumentation-psycopg2 -- -ra + + py39-test-instrumentation-psycopg2_ubuntu-latest: + name: instrumentation-psycopg2 3.9 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.9 + uses: actions/setup-python@v5 + with: + python-version: "3.9" + + - name: Install tox + run: pip install tox + + - name: Run tests + run: tox -e py39-test-instrumentation-psycopg2 -- -ra + + py310-test-instrumentation-psycopg2_ubuntu-latest: + name: instrumentation-psycopg2 3.10 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.10 + uses: actions/setup-python@v5 + with: + python-version: "3.10" + + - name: Install tox + run: pip install tox + + - name: Run tests + run: tox -e py310-test-instrumentation-psycopg2 -- -ra + + py311-test-instrumentation-psycopg2_ubuntu-latest: + name: instrumentation-psycopg2 3.11 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.11 + uses: actions/setup-python@v5 + with: + python-version: "3.11" + + - name: Install tox + run: pip install tox + + - name: Run tests + run: tox -e py311-test-instrumentation-psycopg2 -- -ra + + py312-test-instrumentation-psycopg2_ubuntu-latest: + name: instrumentation-psycopg2 3.12 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.12 + uses: actions/setup-python@v5 + with: + python-version: "3.12" + + - name: Install tox + run: pip install tox + + - name: Run tests + run: tox -e py312-test-instrumentation-psycopg2 -- -ra + py38-test-instrumentation-psycopg_ubuntu-latest: name: instrumentation-psycopg 3.8 Ubuntu runs-on: ubuntu-latest @@ -4299,219 +4515,3 @@ jobs: - name: Run tests run: tox -e py310-test-instrumentation-asyncio -- -ra - - py311-test-instrumentation-asyncio_ubuntu-latest: - name: instrumentation-asyncio 3.11 Ubuntu - runs-on: ubuntu-latest - steps: - - name: Checkout repo @ SHA - ${{ github.sha }} - uses: actions/checkout@v4 - - - name: Set up Python 3.11 - uses: actions/setup-python@v5 - with: - python-version: "3.11" - - - name: Install tox - run: pip install tox - - - name: Run tests - run: tox -e py311-test-instrumentation-asyncio -- -ra - - py312-test-instrumentation-asyncio_ubuntu-latest: - name: instrumentation-asyncio 3.12 Ubuntu - runs-on: ubuntu-latest - steps: - - name: Checkout repo @ SHA - ${{ github.sha }} - uses: actions/checkout@v4 - - - name: Set up Python 3.12 - uses: actions/setup-python@v5 - with: - python-version: "3.12" - - - name: Install tox - run: pip install tox - - - name: Run tests - run: tox -e py312-test-instrumentation-asyncio -- -ra - - py38-test-instrumentation-cassandra_ubuntu-latest: - name: instrumentation-cassandra 3.8 Ubuntu - runs-on: ubuntu-latest - steps: - - name: Checkout repo @ SHA - ${{ github.sha }} - uses: actions/checkout@v4 - - - name: Set up Python 3.8 - uses: actions/setup-python@v5 - with: - python-version: "3.8" - - - name: Install tox - run: pip install tox - - - name: Run tests - run: tox -e py38-test-instrumentation-cassandra -- -ra - - py39-test-instrumentation-cassandra_ubuntu-latest: - name: instrumentation-cassandra 3.9 Ubuntu - runs-on: ubuntu-latest - steps: - - name: Checkout repo @ SHA - ${{ github.sha }} - uses: actions/checkout@v4 - - - name: Set up Python 3.9 - uses: actions/setup-python@v5 - with: - python-version: "3.9" - - - name: Install tox - run: pip install tox - - - name: Run tests - run: tox -e py39-test-instrumentation-cassandra -- -ra - - py310-test-instrumentation-cassandra_ubuntu-latest: - name: instrumentation-cassandra 3.10 Ubuntu - runs-on: ubuntu-latest - steps: - - name: Checkout repo @ SHA - ${{ github.sha }} - uses: actions/checkout@v4 - - - name: Set up Python 3.10 - uses: actions/setup-python@v5 - with: - python-version: "3.10" - - - name: Install tox - run: pip install tox - - - name: Run tests - run: tox -e py310-test-instrumentation-cassandra -- -ra - - py311-test-instrumentation-cassandra_ubuntu-latest: - name: instrumentation-cassandra 3.11 Ubuntu - runs-on: ubuntu-latest - steps: - - name: Checkout repo @ SHA - ${{ github.sha }} - uses: actions/checkout@v4 - - - name: Set up Python 3.11 - uses: actions/setup-python@v5 - with: - python-version: "3.11" - - - name: Install tox - run: pip install tox - - - name: Run tests - run: tox -e py311-test-instrumentation-cassandra -- -ra - - py312-test-instrumentation-cassandra_ubuntu-latest: - name: instrumentation-cassandra 3.12 Ubuntu - runs-on: ubuntu-latest - steps: - - name: Checkout repo @ SHA - ${{ github.sha }} - uses: actions/checkout@v4 - - - name: Set up Python 3.12 - uses: actions/setup-python@v5 - with: - python-version: "3.12" - - - name: Install tox - run: pip install tox - - - name: Run tests - run: tox -e py312-test-instrumentation-cassandra -- -ra - - pypy3-test-instrumentation-cassandra_ubuntu-latest: - name: instrumentation-cassandra pypy-3.8 Ubuntu - runs-on: ubuntu-latest - steps: - - name: Checkout repo @ SHA - ${{ github.sha }} - uses: actions/checkout@v4 - - - name: Set up Python pypy-3.8 - uses: actions/setup-python@v5 - with: - python-version: "pypy-3.8" - - - name: Install tox - run: pip install tox - - - name: Run tests - run: tox -e pypy3-test-instrumentation-cassandra -- -ra - - py38-test-processor-baggage_ubuntu-latest: - name: processor-baggage 3.8 Ubuntu - runs-on: ubuntu-latest - steps: - - name: Checkout repo @ SHA - ${{ github.sha }} - uses: actions/checkout@v4 - - - name: Set up Python 3.8 - uses: actions/setup-python@v5 - with: - python-version: "3.8" - - - name: Install tox - run: pip install tox - - - name: Run tests - run: tox -e py38-test-processor-baggage -- -ra - - py39-test-processor-baggage_ubuntu-latest: - name: processor-baggage 3.9 Ubuntu - runs-on: ubuntu-latest - steps: - - name: Checkout repo @ SHA - ${{ github.sha }} - uses: actions/checkout@v4 - - - name: Set up Python 3.9 - uses: actions/setup-python@v5 - with: - python-version: "3.9" - - - name: Install tox - run: pip install tox - - - name: Run tests - run: tox -e py39-test-processor-baggage -- -ra - - py310-test-processor-baggage_ubuntu-latest: - name: processor-baggage 3.10 Ubuntu - runs-on: ubuntu-latest - steps: - - name: Checkout repo @ SHA - ${{ github.sha }} - uses: actions/checkout@v4 - - - name: Set up Python 3.10 - uses: actions/setup-python@v5 - with: - python-version: "3.10" - - - name: Install tox - run: pip install tox - - - name: Run tests - run: tox -e py310-test-processor-baggage -- -ra - - py311-test-processor-baggage_ubuntu-latest: - name: processor-baggage 3.11 Ubuntu - runs-on: ubuntu-latest - steps: - - name: Checkout repo @ SHA - ${{ github.sha }} - uses: actions/checkout@v4 - - - name: Set up Python 3.11 - uses: actions/setup-python@v5 - with: - python-version: "3.11" - - - name: Install tox - run: pip install tox - - - name: Run tests - run: tox -e py311-test-processor-baggage -- -ra diff --git a/.github/workflows/test_2.yml b/.github/workflows/test_2.yml index c23866ffa8..b805ee4509 100644 --- a/.github/workflows/test_2.yml +++ b/.github/workflows/test_2.yml @@ -16,6 +16,222 @@ env: jobs: + py311-test-instrumentation-asyncio_ubuntu-latest: + name: instrumentation-asyncio 3.11 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.11 + uses: actions/setup-python@v5 + with: + python-version: "3.11" + + - name: Install tox + run: pip install tox + + - name: Run tests + run: tox -e py311-test-instrumentation-asyncio -- -ra + + py312-test-instrumentation-asyncio_ubuntu-latest: + name: instrumentation-asyncio 3.12 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.12 + uses: actions/setup-python@v5 + with: + python-version: "3.12" + + - name: Install tox + run: pip install tox + + - name: Run tests + run: tox -e py312-test-instrumentation-asyncio -- -ra + + py38-test-instrumentation-cassandra_ubuntu-latest: + name: instrumentation-cassandra 3.8 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.8 + uses: actions/setup-python@v5 + with: + python-version: "3.8" + + - name: Install tox + run: pip install tox + + - name: Run tests + run: tox -e py38-test-instrumentation-cassandra -- -ra + + py39-test-instrumentation-cassandra_ubuntu-latest: + name: instrumentation-cassandra 3.9 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.9 + uses: actions/setup-python@v5 + with: + python-version: "3.9" + + - name: Install tox + run: pip install tox + + - name: Run tests + run: tox -e py39-test-instrumentation-cassandra -- -ra + + py310-test-instrumentation-cassandra_ubuntu-latest: + name: instrumentation-cassandra 3.10 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.10 + uses: actions/setup-python@v5 + with: + python-version: "3.10" + + - name: Install tox + run: pip install tox + + - name: Run tests + run: tox -e py310-test-instrumentation-cassandra -- -ra + + py311-test-instrumentation-cassandra_ubuntu-latest: + name: instrumentation-cassandra 3.11 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.11 + uses: actions/setup-python@v5 + with: + python-version: "3.11" + + - name: Install tox + run: pip install tox + + - name: Run tests + run: tox -e py311-test-instrumentation-cassandra -- -ra + + py312-test-instrumentation-cassandra_ubuntu-latest: + name: instrumentation-cassandra 3.12 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.12 + uses: actions/setup-python@v5 + with: + python-version: "3.12" + + - name: Install tox + run: pip install tox + + - name: Run tests + run: tox -e py312-test-instrumentation-cassandra -- -ra + + pypy3-test-instrumentation-cassandra_ubuntu-latest: + name: instrumentation-cassandra pypy-3.8 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python pypy-3.8 + uses: actions/setup-python@v5 + with: + python-version: "pypy-3.8" + + - name: Install tox + run: pip install tox + + - name: Run tests + run: tox -e pypy3-test-instrumentation-cassandra -- -ra + + py38-test-processor-baggage_ubuntu-latest: + name: processor-baggage 3.8 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.8 + uses: actions/setup-python@v5 + with: + python-version: "3.8" + + - name: Install tox + run: pip install tox + + - name: Run tests + run: tox -e py38-test-processor-baggage -- -ra + + py39-test-processor-baggage_ubuntu-latest: + name: processor-baggage 3.9 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.9 + uses: actions/setup-python@v5 + with: + python-version: "3.9" + + - name: Install tox + run: pip install tox + + - name: Run tests + run: tox -e py39-test-processor-baggage -- -ra + + py310-test-processor-baggage_ubuntu-latest: + name: processor-baggage 3.10 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.10 + uses: actions/setup-python@v5 + with: + python-version: "3.10" + + - name: Install tox + run: pip install tox + + - name: Run tests + run: tox -e py310-test-processor-baggage -- -ra + + py311-test-processor-baggage_ubuntu-latest: + name: processor-baggage 3.11 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.11 + uses: actions/setup-python@v5 + with: + python-version: "3.11" + + - name: Install tox + run: pip install tox + + - name: Run tests + run: tox -e py311-test-processor-baggage -- -ra + py312-test-processor-baggage_ubuntu-latest: name: processor-baggage 3.12 Ubuntu runs-on: ubuntu-latest From 069e808fdc24deb73ebe7c590d139ce040743e18 Mon Sep 17 00:00:00 2001 From: Aaron Abbott Date: Thu, 5 Dec 2024 19:33:43 +0000 Subject: [PATCH 11/16] Fix requirements to work with python 3.8 --- .../test-requirements-0.txt | 36 ++++++++++--------- .../test-requirements-1.txt | 27 +++++++------- .../test_vertexai_generate_content.yaml | 26 +++++++------- .../tests/conftest.py | 1 + .../tests/test_gemini.py | 4 +-- tox.ini | 2 +- 6 files changed, 51 insertions(+), 45 deletions(-) diff --git a/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/test-requirements-0.txt b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/test-requirements-0.txt index f8290ac243..47b5ce576a 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/test-requirements-0.txt +++ b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/test-requirements-0.txt @@ -2,35 +2,36 @@ annotated-types==0.7.0 cachetools==5.5.0 certifi==2024.8.30 charset-normalizer==3.4.0 -Deprecated==1.2.15 +Deprecated==1.2.14 docstring_parser==0.16 +exceptiongroup==1.2.2 google-api-core==2.23.0 google-auth==2.36.0 -google-cloud-aiplatform==1.73.0 +google-cloud-aiplatform==1.74.0 google-cloud-bigquery==3.27.0 google-cloud-core==2.4.1 google-cloud-resource-manager==1.13.1 -google-cloud-storage==2.18.2 -google-crc32c==1.6.0 +google-cloud-storage==2.19.0 +google-crc32c==1.5.0 google-resumable-media==2.7.2 googleapis-common-protos==1.66.0 grpc-google-iam-v1==0.13.1 -grpcio==1.68.0 -grpcio-status==1.68.0 +grpcio==1.68.1 +grpcio-status==1.68.1 idna==3.10 -importlib_metadata==8.5.0 +importlib-metadata==6.11.0 iniconfig==2.0.0 multidict==6.1.0 -numpy==2.1.3 -packaging==24.2 +numpy==1.24.4 +packaging==24.0 pluggy==1.5.0 propcache==0.2.0 proto-plus==1.25.0 -protobuf==5.28.3 +protobuf==5.29.1 pyasn1==0.6.1 pyasn1_modules==0.4.1 -pydantic==2.10.1 -pydantic_core==2.27.1 +pydantic==2.8.2 +pydantic_core==2.20.1 pytest==7.4.4 pytest-asyncio==0.21.0 pytest-vcr==1.0.2 @@ -39,13 +40,14 @@ PyYAML==6.0.2 requests==2.32.3 rsa==4.9 shapely==2.0.6 -six==1.16.0 +six==1.17.0 +tomli==2.2.1 typing_extensions==4.12.2 -urllib3==2.2.3 +urllib3==1.26.20 vcrpy==6.0.2 -wrapt==1.17.0 -yarl==1.18.0 -zipp==3.21.0 +wrapt==1.16.0 +yarl==1.15.2 +zipp==3.20.2 # when updating, also update in pyproject.toml opentelemetry-api==1.28 diff --git a/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/test-requirements-1.txt b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/test-requirements-1.txt index c1f50b6a2d..2d83c4145a 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/test-requirements-1.txt +++ b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/test-requirements-1.txt @@ -1,35 +1,37 @@ annotated-types==0.7.0 +asgiref==3.8.1 cachetools==5.5.0 certifi==2024.8.30 charset-normalizer==3.4.0 Deprecated==1.2.15 docstring_parser==0.16 +exceptiongroup==1.2.2 google-api-core==2.23.0 google-auth==2.36.0 -google-cloud-aiplatform==1.73.0 +google-cloud-aiplatform==1.74.0 google-cloud-bigquery==3.27.0 google-cloud-core==2.4.1 google-cloud-resource-manager==1.13.1 -google-cloud-storage==2.18.2 -google-crc32c==1.6.0 +google-cloud-storage==2.19.0 +google-crc32c==1.5.0 google-resumable-media==2.7.2 googleapis-common-protos==1.66.0 grpc-google-iam-v1==0.13.1 -grpcio==1.68.0 -grpcio-status==1.68.0 +grpcio==1.68.1 +grpcio-status==1.68.1 idna==3.10 importlib_metadata==8.5.0 iniconfig==2.0.0 multidict==6.1.0 -numpy==2.1.3 +numpy==1.24.4 packaging==24.2 pluggy==1.5.0 propcache==0.2.0 proto-plus==1.25.0 -protobuf==5.28.3 +protobuf==5.29.1 pyasn1==0.6.1 pyasn1_modules==0.4.1 -pydantic==2.10.1 +pydantic==2.10.3 pydantic_core==2.27.1 pytest==7.4.4 pytest-asyncio==0.21.0 @@ -39,13 +41,14 @@ PyYAML==6.0.2 requests==2.32.3 rsa==4.9 shapely==2.0.6 -six==1.16.0 +six==1.17.0 +tomli==2.2.1 typing_extensions==4.12.2 -urllib3==2.2.3 +urllib3==1.26.20 vcrpy==6.0.2 wrapt==1.17.0 -yarl==1.18.0 -zipp==3.21.0 +yarl==1.15.2 +zipp==3.20.2 # test with the latest version of opentelemetry-api, sdk, and semantic conventions -e opentelemetry-instrumentation diff --git a/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/cassettes/test_vertexai_generate_content.yaml b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/cassettes/test_vertexai_generate_content.yaml index 6999682200..4df29c7e3c 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/cassettes/test_vertexai_generate_content.yaml +++ b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/cassettes/test_vertexai_generate_content.yaml @@ -24,27 +24,25 @@ interactions: body: string: "{\n \"candidates\": [\n {\n \"content\": {\n \"role\": \"model\",\n \"parts\": [\n {\n \"text\": \" The - image shows a table with a cup of coffee, a bowl of blueberries, and several - blueberry scones. There are also pink flowers on the table.\"\n }\n - \ ]\n },\n \"finishReason\": 1,\n \"safetyRatings\": + image shows a table with a cup of coffee, a bowl of blueberries, and a plate + of scones with blueberries on it. There are also pink flowers on the table.\"\n + \ }\n ]\n },\n \"finishReason\": 1,\n \"safetyRatings\": [\n {\n \"category\": 1,\n \"probability\": 1,\n - \ \"probabilityScore\": 0.024780273,\n \"severity\": 1,\n + \ \"probabilityScore\": 0.02758789,\n \"severity\": 1,\n \ \"severityScore\": 0.072753906\n },\n {\n \"category\": - 2,\n \"probability\": 1,\n \"probabilityScore\": 0.025512695,\n - \ \"severity\": 1,\n \"severityScore\": 0.06738281\n },\n + 2,\n \"probability\": 1,\n \"probabilityScore\": 0.026000977,\n + \ \"severity\": 1,\n \"severityScore\": 0.07080078\n },\n \ {\n \"category\": 3,\n \"probability\": 1,\n \"probabilityScore\": - 0.040283203,\n \"severity\": 1,\n \"severityScore\": 0.03515625\n + 0.04272461,\n \"severity\": 1,\n \"severityScore\": 0.036865234\n \ },\n {\n \"category\": 4,\n \"probability\": - 1,\n \"probabilityScore\": 0.07910156,\n \"severity\": 1,\n - \ \"severityScore\": 0.083984375\n }\n ],\n \"avgLogprobs\": - -0.068832365671793613\n }\n ],\n \"usageMetadata\": {\n \"promptTokenCount\": - 265,\n \"candidatesTokenCount\": 30,\n \"totalTokenCount\": 295\n },\n + 1,\n \"probabilityScore\": 0.08496094,\n \"severity\": 1,\n + \ \"severityScore\": 0.09033203\n }\n ],\n \"avgLogprobs\": + -0.09576562472752162\n }\n ],\n \"usageMetadata\": {\n \"promptTokenCount\": + 265,\n \"candidatesTokenCount\": 35,\n \"totalTokenCount\": 300\n },\n \ \"modelVersion\": \"gemini-pro-vision\"\n}\n" headers: Cache-Control: - private - Content-Encoding: - - gzip Content-Type: - application/json; charset=UTF-8 Transfer-Encoding: @@ -53,6 +51,8 @@ interactions: - Origin - X-Origin - Referer + content-length: + - '1296' status: code: 200 message: OK diff --git a/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/conftest.py b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/conftest.py index 605ae2c784..095cda2d75 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/conftest.py +++ b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/conftest.py @@ -90,6 +90,7 @@ def before_response_cb(response: MutableMapping[str, Any]): return response return { + "decode_compressed_response": True, "before_record_request": before_record_cb, "before_record_response": before_response_cb, "ignore_hosts": ["oauth2.googleapis.com"], diff --git a/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/test_gemini.py b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/test_gemini.py index 18fd0dad7e..e7b475d787 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/test_gemini.py +++ b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/test_gemini.py @@ -25,12 +25,12 @@ def test_vertexai_generate_content(exporter): vertexai_span = spans[0] assert len(spans) == 1 - assert vertexai_span.attributes == { + assert dict(vertexai_span.attributes) == { "gen_ai.system": "vertex_ai", "gen_ai.operation.name": "text_completion", "gen_ai.request.model": "gemini-pro-vision", "gen_ai.response.model": "gemini-pro-vision", - "gen_ai.usage.output_tokens": 30, + "gen_ai.usage.output_tokens": 35, "gen_ai.usage.input_tokens": 265, } diff --git a/tox.ini b/tox.ini index 17af332712..7f79134b82 100644 --- a/tox.ini +++ b/tox.ini @@ -960,7 +960,7 @@ commands = test-instrumentation-openai-v2: pytest {toxinidir}/instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests {posargs} lint-instrumentation-openai-v2: sh -c "cd instrumentation-genai && pylint --rcfile ../.pylintrc opentelemetry-instrumentation-openai-v2" - test-instrumentation-vertexai-v2: pytest {toxinidir}/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests {posargs} + test-instrumentation-vertexai-v2: pytest {toxinidir}/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests --vcr-record=none {posargs} lint-instrumentation-vertexai-v2: sh -c "cd instrumentation-genai && pylint --rcfile ../.pylintrc opentelemetry-instrumentation-vertexai-v2" test-instrumentation-sio-pika: pytest {toxinidir}/instrumentation/opentelemetry-instrumentation-pika/tests {posargs} From b87ab017f675e810a0643e2bc693e08e1ce4c170 Mon Sep 17 00:00:00 2001 From: Aaron Abbott Date: Thu, 5 Dec 2024 19:46:03 +0000 Subject: [PATCH 12/16] generate and lint --- .../tests/conftest.py | 12 +++++------- .../tests/test_gemini.py | 2 +- .../opentelemetry/instrumentation/bootstrap_gen.py | 4 ++++ 3 files changed, 10 insertions(+), 8 deletions(-) diff --git a/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/conftest.py b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/conftest.py index 095cda2d75..938485442c 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/conftest.py +++ b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/conftest.py @@ -1,11 +1,14 @@ """Unit tests configuration module.""" -from os import replace import re from typing import Any, Mapping, MutableMapping import pytest +import vertexai from google.auth.credentials import AnonymousCredentials +from vcr import VCR +from vcr.record_mode import RecordMode +from vcr.request import Request from opentelemetry import trace from opentelemetry.instrumentation.vertexai_v2 import VertexAIInstrumentor @@ -17,11 +20,6 @@ pytest_plugins = [] -import vertexai -from vcr import VCR -from vcr.record_mode import RecordMode -from vcr.request import Request - @pytest.fixture(scope="session") def exporter(): @@ -81,7 +79,7 @@ def filter_headers(headers: Mapping[str, str]) -> Mapping[str, str]: def before_record_cb(request: Request): request.headers = filter_headers(request.headers) request.uri = re.sub( - r"/projects/[^/]+/", f"/projects/fake-project/", request.uri + r"/projects/[^/]+/", "/projects/fake-project/", request.uri ) return request diff --git a/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/test_gemini.py b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/test_gemini.py index e7b475d787..59ae91cccf 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/test_gemini.py +++ b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/test_gemini.py @@ -7,7 +7,7 @@ @pytest.mark.vcr def test_vertexai_generate_content(exporter): multimodal_model = GenerativeModel("gemini-pro-vision") - response = multimodal_model.generate_content( + multimodal_model.generate_content( [ Part.from_uri( "gs://generativeai-downloads/images/scones.jpg", diff --git a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py index a292299d70..0dffd10c24 100644 --- a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py +++ b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py @@ -20,6 +20,10 @@ "library": "openai >= 1.26.0", "instrumentation": "opentelemetry-instrumentation-openai-v2==2.1b0.dev", }, + { + "library": "google-cloud-aiplatform >= 1.64", + "instrumentation": "opentelemetry-instrumentation-vertexai-v2==2.1b0.dev", + }, { "library": "aio_pika >= 7.2.0, < 10.0.0", "instrumentation": "opentelemetry-instrumentation-aio-pika==0.50b0.dev", From 858c23ad50105a01268fc19fc3a218340f6f3bc1 Mon Sep 17 00:00:00 2001 From: Aaron Abbott Date: Thu, 5 Dec 2024 20:54:07 +0000 Subject: [PATCH 13/16] Remove placeholder/disabled tests and fix lint --- .../instrumentation/vertexai_v2/__init__.py | 13 +- .../instrumentation/vertexai_v2/utils.py | 11 +- .../test_vertexai_generate_content.yaml | 31 +- .../tests/conftest.py | 22 +- .../tests/disabled_test_bison.py | 289 ------------------ .../tests/test_gemini.py | 2 +- .../tests/test_placeholder.py | 2 - tox.ini | 2 +- 8 files changed, 42 insertions(+), 330 deletions(-) delete mode 100644 instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/disabled_test_bison.py delete mode 100644 instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/test_placeholder.py diff --git a/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/src/opentelemetry/instrumentation/vertexai_v2/__init__.py b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/src/opentelemetry/instrumentation/vertexai_v2/__init__.py index cb29480f4f..928486d4c3 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/src/opentelemetry/instrumentation/vertexai_v2/__init__.py +++ b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/src/opentelemetry/instrumentation/vertexai_v2/__init__.py @@ -143,7 +143,6 @@ def _set_span_attribute(span, name, value): if value is not None: if value != "": span.set_attribute(name, value) - return def _set_input_attributes( @@ -215,8 +214,6 @@ def _set_input_attributes( kwargs.get("frequency_penalty"), ) - return - @dont_throw def _set_response_attributes( @@ -347,12 +344,11 @@ async def _awrap( return _build_from_streaming_response( span, event_logger, response, llm_model ) - elif is_async_streaming_response(response): + if is_async_streaming_response(response): return _abuild_from_streaming_response( span, event_logger, response, llm_model ) - else: - _handle_response(span, event_logger, response, llm_model) + _handle_response(span, event_logger, response, llm_model) span.end() return response @@ -395,12 +391,11 @@ def _wrap( return _build_from_streaming_response( span, event_logger, response, llm_model ) - elif is_async_streaming_response(response): + if is_async_streaming_response(response): return _abuild_from_streaming_response( span, event_logger, response, llm_model ) - else: - _handle_response(span, event_logger, response, llm_model) + _handle_response(span, event_logger, response, llm_model) span.end() return response diff --git a/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/src/opentelemetry/instrumentation/vertexai_v2/utils.py b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/src/opentelemetry/instrumentation/vertexai_v2/utils.py index 07be88d3f2..8c1cfdb3f2 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/src/opentelemetry/instrumentation/vertexai_v2/utils.py +++ b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/src/opentelemetry/instrumentation/vertexai_v2/utils.py @@ -14,9 +14,13 @@ import logging import traceback +from typing import Callable, Optional, ParamSpec, TypeVar +P = ParamSpec("P") +R = TypeVar("R") -def dont_throw(func): + +def dont_throw(func: Callable[P, R]) -> Callable[P, Optional[R]]: """ A decorator that wraps the passed in function and logs exceptions instead of throwing them. @@ -26,14 +30,15 @@ def dont_throw(func): # Obtain a logger specific to the function's module logger = logging.getLogger(func.__module__) - def wrapper(*args, **kwargs): + def wrapper(*args: P.args, **kwargs: P.kwargs) -> Optional[R]: try: return func(*args, **kwargs) - except Exception: + except Exception: # pylint: disable=broad-except logger.debug( "failed to trace in %s, error: %s", func.__name__, traceback.format_exc(), ) + return None return wrapper diff --git a/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/cassettes/test_vertexai_generate_content.yaml b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/cassettes/test_vertexai_generate_content.yaml index 4df29c7e3c..2759f5730b 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/cassettes/test_vertexai_generate_content.yaml +++ b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/cassettes/test_vertexai_generate_content.yaml @@ -25,21 +25,20 @@ interactions: string: "{\n \"candidates\": [\n {\n \"content\": {\n \"role\": \"model\",\n \"parts\": [\n {\n \"text\": \" The image shows a table with a cup of coffee, a bowl of blueberries, and a plate - of scones with blueberries on it. There are also pink flowers on the table.\"\n - \ }\n ]\n },\n \"finishReason\": 1,\n \"safetyRatings\": - [\n {\n \"category\": 1,\n \"probability\": 1,\n - \ \"probabilityScore\": 0.02758789,\n \"severity\": 1,\n - \ \"severityScore\": 0.072753906\n },\n {\n \"category\": - 2,\n \"probability\": 1,\n \"probabilityScore\": 0.026000977,\n - \ \"severity\": 1,\n \"severityScore\": 0.07080078\n },\n - \ {\n \"category\": 3,\n \"probability\": 1,\n \"probabilityScore\": - 0.04272461,\n \"severity\": 1,\n \"severityScore\": 0.036865234\n - \ },\n {\n \"category\": 4,\n \"probability\": - 1,\n \"probabilityScore\": 0.08496094,\n \"severity\": 1,\n - \ \"severityScore\": 0.09033203\n }\n ],\n \"avgLogprobs\": - -0.09576562472752162\n }\n ],\n \"usageMetadata\": {\n \"promptTokenCount\": - 265,\n \"candidatesTokenCount\": 35,\n \"totalTokenCount\": 300\n },\n - \ \"modelVersion\": \"gemini-pro-vision\"\n}\n" + of scones. There are also some flowers on the table.\"\n }\n ]\n + \ },\n \"finishReason\": 1,\n \"safetyRatings\": [\n {\n + \ \"category\": 1,\n \"probability\": 1,\n \"probabilityScore\": + 0.02331543,\n \"severity\": 1,\n \"severityScore\": 0.05493164\n + \ },\n {\n \"category\": 2,\n \"probability\": + 1,\n \"probabilityScore\": 0.026367188,\n \"severity\": + 1,\n \"severityScore\": 0.05493164\n },\n {\n \"category\": + 3,\n \"probability\": 1,\n \"probabilityScore\": 0.046142578,\n + \ \"severity\": 1,\n \"severityScore\": 0.030639648\n },\n + \ {\n \"category\": 4,\n \"probability\": 1,\n \"probabilityScore\": + 0.080566406,\n \"severity\": 1,\n \"severityScore\": 0.095214844\n + \ }\n ],\n \"avgLogprobs\": -0.11595650642148909\n }\n + \ ],\n \"usageMetadata\": {\n \"promptTokenCount\": 265,\n \"candidatesTokenCount\": + 31,\n \"totalTokenCount\": 296\n },\n \"modelVersion\": \"gemini-pro-vision\"\n}\n" headers: Cache-Control: - private @@ -52,7 +51,7 @@ interactions: - X-Origin - Referer content-length: - - '1296' + - '1275' status: code: 200 message: OK diff --git a/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/conftest.py b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/conftest.py index 938485442c..183320cc68 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/conftest.py +++ b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/conftest.py @@ -20,11 +20,13 @@ pytest_plugins = [] +FAKE_PROJECT = "fake-project" + @pytest.fixture(scope="session") def exporter(): - exporter = InMemorySpanExporter() - processor = SimpleSpanProcessor(exporter) + span_exporter = InMemorySpanExporter() + processor = SimpleSpanProcessor(span_exporter) provider = TracerProvider() provider.add_span_processor(processor) @@ -32,11 +34,11 @@ def exporter(): VertexAIInstrumentor().instrument() - return exporter + return span_exporter @pytest.fixture(autouse=True) -def clear_exporter(exporter): +def clear_exporter(exporter): # pylint: disable=redefined-outer-name exporter.clear() @@ -50,10 +52,12 @@ def vertexai_init(vcr: VCR) -> None: # When not recording (in CI), don't do any auth. That prevents trying to read application # default credentials from the filesystem or metadata server and oauth token exchange. This # is not the interesting part of our instrumentation to test. - if vcr.record_mode is RecordMode.NONE: - vertexai.init(credentials=AnonymousCredentials()) - else: - vertexai.init() + print(f"VCR Mode is {vcr.record_mode=}, {RecordMode.NONE}") + vertex_init_kwargs = {"api_transport": "rest"} + if vcr.record_mode == RecordMode.NONE: + vertex_init_kwargs["credentials"] = AnonymousCredentials() + vertex_init_kwargs["project"] = FAKE_PROJECT + vertexai.init(**vertex_init_kwargs) @pytest.fixture(scope="module") @@ -79,7 +83,7 @@ def filter_headers(headers: Mapping[str, str]) -> Mapping[str, str]: def before_record_cb(request: Request): request.headers = filter_headers(request.headers) request.uri = re.sub( - r"/projects/[^/]+/", "/projects/fake-project/", request.uri + r"/projects/[^/]+/", f"/projects/{FAKE_PROJECT}/", request.uri ) return request diff --git a/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/disabled_test_bison.py b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/disabled_test_bison.py deleted file mode 100644 index d6fcd301d2..0000000000 --- a/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/disabled_test_bison.py +++ /dev/null @@ -1,289 +0,0 @@ -import asyncio - -import pytest -import vertexai -from vertexai.language_models import ( - ChatModel, - InputOutputTextPair, - TextGenerationModel, -) - -from opentelemetry.semconv_ai import SpanAttributes - -vertexai.init() - - -@pytest.mark.vcr -def test_vertexai_predict(exporter): - parameters = { - "max_output_tokens": 256, - "top_p": 0.8, - "top_k": 40, - } - - model = TextGenerationModel.from_pretrained("text-bison@001") - response = model.predict( - "Give me ten interview questions for the role of program manager.", - **parameters, - ) - - response = response.text - - spans = exporter.get_finished_spans() - assert [span.name for span in spans] == [ - "vertexai.predict", - ] - - vertexai_span = spans[0] - assert ( - vertexai_span.attributes[SpanAttributes.LLM_REQUEST_MODEL] - == "text-bison@001" - ) - assert ( - "Give me ten interview questions for the role of program manager." - in vertexai_span.attributes[f"{SpanAttributes.LLM_PROMPTS}.0.user"] - ) - assert vertexai_span.attributes[SpanAttributes.LLM_REQUEST_TOP_P] == 0.8 - assert ( - vertexai_span.attributes[SpanAttributes.LLM_REQUEST_MAX_TOKENS] == 256 - ) - assert vertexai_span.attributes[SpanAttributes.LLM_TOP_K] == 40 - assert ( - vertexai_span.attributes[f"{SpanAttributes.LLM_COMPLETIONS}.0.content"] - == response - ) - - -@pytest.mark.vcr -def test_vertexai_predict_async(exporter): - async def async_predict_text() -> str: - """Ideation example with a Large Language Model""" - - parameters = { - "max_output_tokens": 256, - "top_p": 0.8, - "top_k": 40, - } - - model = TextGenerationModel.from_pretrained("text-bison@001") - response = await model.predict_async( - "Give me ten interview questions for the role of program manager.", - **parameters, - ) - - return response.text - - response = asyncio.run(async_predict_text()) - - spans = exporter.get_finished_spans() - assert [span.name for span in spans] == [ - "vertexai.predict", - ] - - vertexai_span = spans[0] - assert ( - vertexai_span.attributes[SpanAttributes.LLM_REQUEST_MODEL] - == "text-bison@001" - ) - assert ( - "Give me ten interview questions for the role of program manager." - in vertexai_span.attributes[f"{SpanAttributes.LLM_PROMPTS}.0.user"] - ) - assert vertexai_span.attributes[SpanAttributes.LLM_REQUEST_TOP_P] == 0.8 - assert ( - vertexai_span.attributes[SpanAttributes.LLM_REQUEST_MAX_TOKENS] == 256 - ) - assert vertexai_span.attributes[SpanAttributes.LLM_TOP_K] == 40 - assert ( - vertexai_span.attributes[f"{SpanAttributes.LLM_COMPLETIONS}.0.content"] - == response - ) - - -@pytest.mark.vcr -def test_vertexai_stream(exporter): - text_generation_model = TextGenerationModel.from_pretrained("text-bison") - parameters = { - "max_output_tokens": 256, - "top_p": 0.8, - "top_k": 40, - } - responses = text_generation_model.predict_streaming( - prompt="Give me ten interview questions for the role of program manager.", - **parameters, - ) - - result = [response.text for response in responses] - response = result - - spans = exporter.get_finished_spans() - assert [span.name for span in spans] == [ - "vertexai.predict", - ] - - vertexai_span = spans[0] - assert ( - vertexai_span.attributes[SpanAttributes.LLM_REQUEST_MODEL] - == "text-bison" - ) - assert ( - "Give me ten interview questions for the role of program manager." - in vertexai_span.attributes[f"{SpanAttributes.LLM_PROMPTS}.0.user"] - ) - assert vertexai_span.attributes[SpanAttributes.LLM_REQUEST_TOP_P] == 0.8 - assert ( - vertexai_span.attributes[SpanAttributes.LLM_REQUEST_MAX_TOKENS] == 256 - ) - assert vertexai_span.attributes[SpanAttributes.LLM_TOP_K] == 40 - assert vertexai_span.attributes[ - f"{SpanAttributes.LLM_COMPLETIONS}.0.content" - ] == "".join(response) - - -@pytest.mark.vcr -def test_vertexai_stream_async(exporter): - async def async_streaming_prediction() -> list: - """Streaming Text Example with a Large Language Model""" - - text_generation_model = TextGenerationModel.from_pretrained( - "text-bison" - ) - parameters = { - "max_output_tokens": 256, - "top_p": 0.8, - "top_k": 40, - } - - responses = text_generation_model.predict_streaming_async( - prompt="Give me ten interview questions for the role of program manager.", - **parameters, - ) - result = [response.text async for response in responses] - return result - - response = asyncio.run(async_streaming_prediction()) - - spans = exporter.get_finished_spans() - assert [span.name for span in spans] == [ - "vertexai.predict", - ] - - vertexai_span = spans[0] - assert ( - vertexai_span.attributes[SpanAttributes.LLM_REQUEST_MODEL] - == "text-bison" - ) - assert ( - "Give me ten interview questions for the role of program manager." - in vertexai_span.attributes[f"{SpanAttributes.LLM_PROMPTS}.0.user"] - ) - assert vertexai_span.attributes[SpanAttributes.LLM_REQUEST_TOP_P] == 0.8 - assert ( - vertexai_span.attributes[SpanAttributes.LLM_REQUEST_MAX_TOKENS] == 256 - ) - assert vertexai_span.attributes[SpanAttributes.LLM_TOP_K] == 40 - assert vertexai_span.attributes[ - f"{SpanAttributes.LLM_COMPLETIONS}.0.content" - ] == "".join(response) - - -@pytest.mark.vcr -def test_vertexai_chat(exporter): - chat_model = ChatModel.from_pretrained("chat-bison@001") - - parameters = { - "max_output_tokens": 256, - "top_p": 0.95, - "top_k": 40, - } - - chat = chat_model.start_chat( - context="My name is Miles. You are an astronomer, knowledgeable about the solar system.", - examples=[ - InputOutputTextPair( - input_text="How many moons does Mars have?", - output_text="The planet Mars has two moons, Phobos and Deimos.", - ), - ], - ) - - response = chat.send_message( - "How many planets are there in the solar system?", **parameters - ) - - response = response.text - - spans = exporter.get_finished_spans() - assert [span.name for span in spans] == [ - "vertexai.send_message", - ] - - vertexai_span = spans[0] - assert ( - vertexai_span.attributes[SpanAttributes.LLM_REQUEST_MODEL] - == "chat-bison@001" - ) - assert ( - "How many planets are there in the solar system?" - in vertexai_span.attributes[f"{SpanAttributes.LLM_PROMPTS}.0.user"] - ) - assert vertexai_span.attributes[SpanAttributes.LLM_REQUEST_TOP_P] == 0.95 - assert ( - vertexai_span.attributes[SpanAttributes.LLM_REQUEST_MAX_TOKENS] == 256 - ) - assert vertexai_span.attributes[SpanAttributes.LLM_TOP_K] == 40 - assert ( - vertexai_span.attributes[f"{SpanAttributes.LLM_COMPLETIONS}.0.content"] - == response - ) - - -@pytest.mark.vcr -def test_vertexai_chat_stream(exporter): - chat_model = ChatModel.from_pretrained("chat-bison@001") - - parameters = { - "temperature": 0.8, - "max_output_tokens": 256, - "top_p": 0.95, - "top_k": 40, - } - - chat = chat_model.start_chat( - context="My name is Miles. You are an astronomer, knowledgeable about the solar system.", - examples=[ - InputOutputTextPair( - input_text="How many moons does Mars have?", - output_text="The planet Mars has two moons, Phobos and Deimos.", - ), - ], - ) - - responses = chat.send_message_streaming( - message="How many planets are there in the solar system?", **parameters - ) - - result = [response.text for response in responses] - response = result - - spans = exporter.get_finished_spans() - assert [span.name for span in spans] == [ - "vertexai.send_message", - ] - - vertexai_span = spans[0] - assert ( - vertexai_span.attributes[SpanAttributes.LLM_REQUEST_MODEL] - == "chat-bison@001" - ) - assert vertexai_span.attributes[SpanAttributes.LLM_REQUEST_TOP_P] == 0.95 - assert ( - vertexai_span.attributes[SpanAttributes.LLM_REQUEST_TEMPERATURE] == 0.8 - ) - assert ( - vertexai_span.attributes[SpanAttributes.LLM_REQUEST_MAX_TOKENS] == 256 - ) - assert vertexai_span.attributes[SpanAttributes.LLM_TOP_K] == 40 - assert vertexai_span.attributes[ - f"{SpanAttributes.LLM_COMPLETIONS}.0.content" - ] == "".join(response) diff --git a/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/test_gemini.py b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/test_gemini.py index 59ae91cccf..87a85e4961 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/test_gemini.py +++ b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/test_gemini.py @@ -30,7 +30,7 @@ def test_vertexai_generate_content(exporter): "gen_ai.operation.name": "text_completion", "gen_ai.request.model": "gemini-pro-vision", "gen_ai.response.model": "gemini-pro-vision", - "gen_ai.usage.output_tokens": 35, + "gen_ai.usage.output_tokens": 31, "gen_ai.usage.input_tokens": 265, } diff --git a/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/test_placeholder.py b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/test_placeholder.py deleted file mode 100644 index 201975fcc0..0000000000 --- a/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/test_placeholder.py +++ /dev/null @@ -1,2 +0,0 @@ -def test_placeholder(): - pass diff --git a/tox.ini b/tox.ini index 7f79134b82..cc242ff3c0 100644 --- a/tox.ini +++ b/tox.ini @@ -960,7 +960,7 @@ commands = test-instrumentation-openai-v2: pytest {toxinidir}/instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests {posargs} lint-instrumentation-openai-v2: sh -c "cd instrumentation-genai && pylint --rcfile ../.pylintrc opentelemetry-instrumentation-openai-v2" - test-instrumentation-vertexai-v2: pytest {toxinidir}/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests --vcr-record=none {posargs} + test-instrumentation-vertexai-v2: pytest {toxinidir}/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests {posargs:--vcr-record=none} lint-instrumentation-vertexai-v2: sh -c "cd instrumentation-genai && pylint --rcfile ../.pylintrc opentelemetry-instrumentation-vertexai-v2" test-instrumentation-sio-pika: pytest {toxinidir}/instrumentation/opentelemetry-instrumentation-pika/tests {posargs} From 6a8878d3f863bfd9f17b5ce3cbcc3679f7588942 Mon Sep 17 00:00:00 2001 From: Aaron Abbott Date: Thu, 5 Dec 2024 21:47:56 +0000 Subject: [PATCH 14/16] try to get CI passing --- .../src/opentelemetry/instrumentation/vertexai_v2/utils.py | 7 +++---- .../test-requirements-0.txt | 1 - .../test-requirements-1.txt | 1 - tox.ini | 2 +- 4 files changed, 4 insertions(+), 7 deletions(-) diff --git a/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/src/opentelemetry/instrumentation/vertexai_v2/utils.py b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/src/opentelemetry/instrumentation/vertexai_v2/utils.py index 8c1cfdb3f2..323f11f6f2 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/src/opentelemetry/instrumentation/vertexai_v2/utils.py +++ b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/src/opentelemetry/instrumentation/vertexai_v2/utils.py @@ -14,13 +14,12 @@ import logging import traceback -from typing import Callable, Optional, ParamSpec, TypeVar +from typing import Any, Callable, Optional, TypeVar -P = ParamSpec("P") R = TypeVar("R") -def dont_throw(func: Callable[P, R]) -> Callable[P, Optional[R]]: +def dont_throw(func: Callable[..., R]) -> Callable[..., Optional[R]]: """ A decorator that wraps the passed in function and logs exceptions instead of throwing them. @@ -30,7 +29,7 @@ def dont_throw(func: Callable[P, R]) -> Callable[P, Optional[R]]: # Obtain a logger specific to the function's module logger = logging.getLogger(func.__module__) - def wrapper(*args: P.args, **kwargs: P.kwargs) -> Optional[R]: + def wrapper(*args: Any, **kwargs: Any) -> Optional[R]: try: return func(*args, **kwargs) except Exception: # pylint: disable=broad-except diff --git a/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/test-requirements-0.txt b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/test-requirements-0.txt index 47b5ce576a..537073dee1 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/test-requirements-0.txt +++ b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/test-requirements-0.txt @@ -22,7 +22,6 @@ idna==3.10 importlib-metadata==6.11.0 iniconfig==2.0.0 multidict==6.1.0 -numpy==1.24.4 packaging==24.0 pluggy==1.5.0 propcache==0.2.0 diff --git a/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/test-requirements-1.txt b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/test-requirements-1.txt index 2d83c4145a..6959df4655 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/test-requirements-1.txt +++ b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/test-requirements-1.txt @@ -23,7 +23,6 @@ idna==3.10 importlib_metadata==8.5.0 iniconfig==2.0.0 multidict==6.1.0 -numpy==1.24.4 packaging==24.2 pluggy==1.5.0 propcache==0.2.0 diff --git a/tox.ini b/tox.ini index cc242ff3c0..7f79134b82 100644 --- a/tox.ini +++ b/tox.ini @@ -960,7 +960,7 @@ commands = test-instrumentation-openai-v2: pytest {toxinidir}/instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests {posargs} lint-instrumentation-openai-v2: sh -c "cd instrumentation-genai && pylint --rcfile ../.pylintrc opentelemetry-instrumentation-openai-v2" - test-instrumentation-vertexai-v2: pytest {toxinidir}/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests {posargs:--vcr-record=none} + test-instrumentation-vertexai-v2: pytest {toxinidir}/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests --vcr-record=none {posargs} lint-instrumentation-vertexai-v2: sh -c "cd instrumentation-genai && pylint --rcfile ../.pylintrc opentelemetry-instrumentation-vertexai-v2" test-instrumentation-sio-pika: pytest {toxinidir}/instrumentation/opentelemetry-instrumentation-pika/tests {posargs} From b25a9e412aaad9532cf72d3d0951335fd5116dca Mon Sep 17 00:00:00 2001 From: Aaron Abbott Date: Fri, 6 Dec 2024 03:58:31 +0000 Subject: [PATCH 15/16] Disable pypy since shapely fails to compile its native extension --- .github/workflows/test_0.yml | 72 ++++++++++++++++++------------------ .github/workflows/test_1.yml | 72 ++++++++++++++++++------------------ .github/workflows/test_2.yml | 36 ------------------ tox.ini | 4 +- 4 files changed, 75 insertions(+), 109 deletions(-) diff --git a/.github/workflows/test_0.yml b/.github/workflows/test_0.yml index aa9c533f78..bdf4bd5a7c 100644 --- a/.github/workflows/test_0.yml +++ b/.github/workflows/test_0.yml @@ -412,42 +412,6 @@ jobs: - name: Run tests run: tox -e py312-test-instrumentation-vertexai-v2-1 -- -ra - pypy3-test-instrumentation-vertexai-v2-0_ubuntu-latest: - name: instrumentation-vertexai-v2-0 pypy-3.8 Ubuntu - runs-on: ubuntu-latest - steps: - - name: Checkout repo @ SHA - ${{ github.sha }} - uses: actions/checkout@v4 - - - name: Set up Python pypy-3.8 - uses: actions/setup-python@v5 - with: - python-version: "pypy-3.8" - - - name: Install tox - run: pip install tox - - - name: Run tests - run: tox -e pypy3-test-instrumentation-vertexai-v2-0 -- -ra - - pypy3-test-instrumentation-vertexai-v2-1_ubuntu-latest: - name: instrumentation-vertexai-v2-1 pypy-3.8 Ubuntu - runs-on: ubuntu-latest - steps: - - name: Checkout repo @ SHA - ${{ github.sha }} - uses: actions/checkout@v4 - - - name: Set up Python pypy-3.8 - uses: actions/setup-python@v5 - with: - python-version: "pypy-3.8" - - - name: Install tox - run: pip install tox - - - name: Run tests - run: tox -e pypy3-test-instrumentation-vertexai-v2-1 -- -ra - py38-test-resource-detector-container_ubuntu-latest: name: resource-detector-container 3.8 Ubuntu runs-on: ubuntu-latest @@ -4515,3 +4479,39 @@ jobs: - name: Run tests run: tox -e pypy3-test-instrumentation-mysql-0 -- -ra + + pypy3-test-instrumentation-mysql-1_ubuntu-latest: + name: instrumentation-mysql-1 pypy-3.8 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python pypy-3.8 + uses: actions/setup-python@v5 + with: + python-version: "pypy-3.8" + + - name: Install tox + run: pip install tox + + - name: Run tests + run: tox -e pypy3-test-instrumentation-mysql-1 -- -ra + + py38-test-instrumentation-mysqlclient_ubuntu-latest: + name: instrumentation-mysqlclient 3.8 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.8 + uses: actions/setup-python@v5 + with: + python-version: "3.8" + + - name: Install tox + run: pip install tox + + - name: Run tests + run: tox -e py38-test-instrumentation-mysqlclient -- -ra diff --git a/.github/workflows/test_1.yml b/.github/workflows/test_1.yml index 8a65b3fd6b..e6aa293f9d 100644 --- a/.github/workflows/test_1.yml +++ b/.github/workflows/test_1.yml @@ -16,42 +16,6 @@ env: jobs: - pypy3-test-instrumentation-mysql-1_ubuntu-latest: - name: instrumentation-mysql-1 pypy-3.8 Ubuntu - runs-on: ubuntu-latest - steps: - - name: Checkout repo @ SHA - ${{ github.sha }} - uses: actions/checkout@v4 - - - name: Set up Python pypy-3.8 - uses: actions/setup-python@v5 - with: - python-version: "pypy-3.8" - - - name: Install tox - run: pip install tox - - - name: Run tests - run: tox -e pypy3-test-instrumentation-mysql-1 -- -ra - - py38-test-instrumentation-mysqlclient_ubuntu-latest: - name: instrumentation-mysqlclient 3.8 Ubuntu - runs-on: ubuntu-latest - steps: - - name: Checkout repo @ SHA - ${{ github.sha }} - uses: actions/checkout@v4 - - - name: Set up Python 3.8 - uses: actions/setup-python@v5 - with: - python-version: "3.8" - - - name: Install tox - run: pip install tox - - - name: Run tests - run: tox -e py38-test-instrumentation-mysqlclient -- -ra - py39-test-instrumentation-mysqlclient_ubuntu-latest: name: instrumentation-mysqlclient 3.9 Ubuntu runs-on: ubuntu-latest @@ -4515,3 +4479,39 @@ jobs: - name: Run tests run: tox -e py310-test-instrumentation-asyncio -- -ra + + py311-test-instrumentation-asyncio_ubuntu-latest: + name: instrumentation-asyncio 3.11 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.11 + uses: actions/setup-python@v5 + with: + python-version: "3.11" + + - name: Install tox + run: pip install tox + + - name: Run tests + run: tox -e py311-test-instrumentation-asyncio -- -ra + + py312-test-instrumentation-asyncio_ubuntu-latest: + name: instrumentation-asyncio 3.12 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.12 + uses: actions/setup-python@v5 + with: + python-version: "3.12" + + - name: Install tox + run: pip install tox + + - name: Run tests + run: tox -e py312-test-instrumentation-asyncio -- -ra diff --git a/.github/workflows/test_2.yml b/.github/workflows/test_2.yml index b805ee4509..7614c8988f 100644 --- a/.github/workflows/test_2.yml +++ b/.github/workflows/test_2.yml @@ -16,42 +16,6 @@ env: jobs: - py311-test-instrumentation-asyncio_ubuntu-latest: - name: instrumentation-asyncio 3.11 Ubuntu - runs-on: ubuntu-latest - steps: - - name: Checkout repo @ SHA - ${{ github.sha }} - uses: actions/checkout@v4 - - - name: Set up Python 3.11 - uses: actions/setup-python@v5 - with: - python-version: "3.11" - - - name: Install tox - run: pip install tox - - - name: Run tests - run: tox -e py311-test-instrumentation-asyncio -- -ra - - py312-test-instrumentation-asyncio_ubuntu-latest: - name: instrumentation-asyncio 3.12 Ubuntu - runs-on: ubuntu-latest - steps: - - name: Checkout repo @ SHA - ${{ github.sha }} - uses: actions/checkout@v4 - - - name: Set up Python 3.12 - uses: actions/setup-python@v5 - with: - python-version: "3.12" - - - name: Install tox - run: pip install tox - - - name: Run tests - run: tox -e py312-test-instrumentation-asyncio -- -ra - py38-test-instrumentation-cassandra_ubuntu-latest: name: instrumentation-cassandra 3.8 Ubuntu runs-on: ubuntu-latest diff --git a/tox.ini b/tox.ini index 7f79134b82..d911a1ff25 100644 --- a/tox.ini +++ b/tox.ini @@ -13,7 +13,9 @@ envlist = ; instrumentation-vertexai py3{8,9,10,11,12}-test-instrumentation-vertexai-v2-{0,1} - pypy3-test-instrumentation-vertexai-v2-{0,1} + # Disable pypy which fails in CI because shapely does not have wheels for PyPI and requires + # some C libraries + ## pypy3-test-instrumentation-vertexai-v2-{0,1} lint-instrumentation-vertexai-v2 ; opentelemetry-resource-detector-container From 5dfe192f9af588d33b67053d9a5d85f2f175f5ae Mon Sep 17 00:00:00 2001 From: Aaron Abbott Date: Fri, 6 Dec 2024 04:52:35 +0000 Subject: [PATCH 16/16] Pretty format yaml cassettes --- .../test_vertexai_generate_content.yaml | 98 ++++++++++++++----- .../tests/conftest.py | 69 +++++++++++++ .../tests/test_gemini.py | 2 +- 3 files changed, 143 insertions(+), 26 deletions(-) diff --git a/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/cassettes/test_vertexai_generate_content.yaml b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/cassettes/test_vertexai_generate_content.yaml index 2759f5730b..48cf3524e1 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/cassettes/test_vertexai_generate_content.yaml +++ b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/cassettes/test_vertexai_generate_content.yaml @@ -1,10 +1,24 @@ interactions: - request: - body: "{\n \"contents\": [\n {\n \"role\": \"user\",\n \"parts\": - [\n {\n \"fileData\": {\n \"mimeType\": \"image/jpeg\",\n - \ \"fileUri\": \"gs://generativeai-downloads/images/scones.jpg\"\n - \ }\n },\n {\n \"text\": \"what is shown in this - image?\"\n }\n ]\n }\n ]\n}" + body: |- + { + "contents": [ + { + "role": "user", + "parts": [ + { + "fileData": { + "mimeType": "image/jpeg", + "fileUri": "gs://generativeai-downloads/images/scones.jpg" + } + }, + { + "text": "what is shown in this image?" + } + ] + } + ] + } headers: Accept: - '*/*' @@ -22,26 +36,60 @@ interactions: uri: https://us-central1-aiplatform.googleapis.com/v1beta1/projects/fake-project/locations/us-central1/publishers/google/models/gemini-pro-vision:generateContent?%24alt=json%3Benum-encoding%3Dint response: body: - string: "{\n \"candidates\": [\n {\n \"content\": {\n \"role\": - \"model\",\n \"parts\": [\n {\n \"text\": \" The - image shows a table with a cup of coffee, a bowl of blueberries, and a plate - of scones. There are also some flowers on the table.\"\n }\n ]\n - \ },\n \"finishReason\": 1,\n \"safetyRatings\": [\n {\n - \ \"category\": 1,\n \"probability\": 1,\n \"probabilityScore\": - 0.02331543,\n \"severity\": 1,\n \"severityScore\": 0.05493164\n - \ },\n {\n \"category\": 2,\n \"probability\": - 1,\n \"probabilityScore\": 0.026367188,\n \"severity\": - 1,\n \"severityScore\": 0.05493164\n },\n {\n \"category\": - 3,\n \"probability\": 1,\n \"probabilityScore\": 0.046142578,\n - \ \"severity\": 1,\n \"severityScore\": 0.030639648\n },\n - \ {\n \"category\": 4,\n \"probability\": 1,\n \"probabilityScore\": - 0.080566406,\n \"severity\": 1,\n \"severityScore\": 0.095214844\n - \ }\n ],\n \"avgLogprobs\": -0.11595650642148909\n }\n - \ ],\n \"usageMetadata\": {\n \"promptTokenCount\": 265,\n \"candidatesTokenCount\": - 31,\n \"totalTokenCount\": 296\n },\n \"modelVersion\": \"gemini-pro-vision\"\n}\n" + string: |- + { + "candidates": [ + { + "content": { + "role": "model", + "parts": [ + { + "text": " The image shows a table with a cup of coffee, a bowl of blueberries, and a plate of scones with blueberries on top. There are also pink flowers on the table." + } + ] + }, + "finishReason": 1, + "safetyRatings": [ + { + "category": 1, + "probability": 1, + "probabilityScore": 0.025512695, + "severity": 1, + "severityScore": 0.06933594 + }, + { + "category": 2, + "probability": 1, + "probabilityScore": 0.026367188, + "severity": 1, + "severityScore": 0.07080078 + }, + { + "category": 3, + "probability": 1, + "probabilityScore": 0.041503906, + "severity": 1, + "severityScore": 0.03466797 + }, + { + "category": 4, + "probability": 1, + "probabilityScore": 0.091308594, + "severity": 1, + "severityScore": 0.09033203 + } + ], + "avgLogprobs": -0.09557106835501535 + } + ], + "usageMetadata": { + "promptTokenCount": 265, + "candidatesTokenCount": 35, + "totalTokenCount": 300 + }, + "modelVersion": "gemini-pro-vision" + } headers: - Cache-Control: - - private Content-Type: - application/json; charset=UTF-8 Transfer-Encoding: @@ -51,7 +99,7 @@ interactions: - X-Origin - Referer content-length: - - '1275' + - '1299' status: code: 200 message: OK diff --git a/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/conftest.py b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/conftest.py index 183320cc68..32fc19a333 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/conftest.py +++ b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/conftest.py @@ -1,10 +1,12 @@ """Unit tests configuration module.""" +import json import re from typing import Any, Mapping, MutableMapping import pytest import vertexai +import yaml from google.auth.credentials import AnonymousCredentials from vcr import VCR from vcr.record_mode import RecordMode @@ -97,3 +99,70 @@ def before_response_cb(response: MutableMapping[str, Any]): "before_record_response": before_response_cb, "ignore_hosts": ["oauth2.googleapis.com"], } + + +class LiteralBlockScalar(str): + """Formats the string as a literal block scalar, preserving whitespace and + without interpreting escape characters""" + + +def literal_block_scalar_presenter(dumper, data): + """Represents a scalar string as a literal block, via '|' syntax""" + return dumper.represent_scalar("tag:yaml.org,2002:str", data, style="|") + + +yaml.add_representer(LiteralBlockScalar, literal_block_scalar_presenter) + + +def process_string_value(string_value): + """Pretty-prints JSON or returns long strings as a LiteralBlockScalar""" + try: + json_data = json.loads(string_value) + return LiteralBlockScalar(json.dumps(json_data, indent=2)) + except (ValueError, TypeError): + if len(string_value) > 80: + return LiteralBlockScalar(string_value) + return string_value + + +def convert_body_to_literal(data): + """Searches the data for body strings, attempting to pretty-print JSON""" + if isinstance(data, dict): + for key, value in data.items(): + # Handle response body case (e.g., response.body.string) + if key == "body" and isinstance(value, dict) and "string" in value: + value["string"] = process_string_value(value["string"]) + + # Handle request body case (e.g., request.body) + elif key == "body" and isinstance(value, str): + data[key] = process_string_value(value) + + else: + convert_body_to_literal(value) + + elif isinstance(data, list): + for idx, choice in enumerate(data): + data[idx] = convert_body_to_literal(choice) + + return data + + +class PrettyPrintJSONBody: + """This makes request and response body recordings more readable.""" + + @staticmethod + def serialize(cassette_dict): + cassette_dict = convert_body_to_literal(cassette_dict) + return yaml.dump( + cassette_dict, default_flow_style=False, allow_unicode=True + ) + + @staticmethod + def deserialize(cassette_string): + return yaml.load(cassette_string, Loader=yaml.Loader) + + +@pytest.fixture(scope="module", autouse=True) +def fixture_vcr(vcr): + vcr.register_serializer("yaml", PrettyPrintJSONBody) + return vcr diff --git a/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/test_gemini.py b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/test_gemini.py index 87a85e4961..59ae91cccf 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/test_gemini.py +++ b/instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/test_gemini.py @@ -30,7 +30,7 @@ def test_vertexai_generate_content(exporter): "gen_ai.operation.name": "text_completion", "gen_ai.request.model": "gemini-pro-vision", "gen_ai.response.model": "gemini-pro-vision", - "gen_ai.usage.output_tokens": 31, + "gen_ai.usage.output_tokens": 35, "gen_ai.usage.input_tokens": 265, }