diff --git a/.github/workflows/lint_0.yml b/.github/workflows/lint_0.yml
index cc7cfd536f..b9e14f65dc 100644
--- a/.github/workflows/lint_0.yml
+++ b/.github/workflows/lint_0.yml
@@ -52,6 +52,24 @@ jobs:
- name: Run tests
run: tox -e lint-instrumentation-vertexai
+ lint-instrumentation-google-genai:
+ name: instrumentation-google-genai
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout repo @ SHA - ${{ github.sha }}
+ uses: actions/checkout@v4
+
+ - name: Set up Python 3.13
+ uses: actions/setup-python@v5
+ with:
+ python-version: "3.13"
+
+ - name: Install tox
+ run: pip install tox-uv
+
+ - name: Run tests
+ run: tox -e lint-instrumentation-google-genai
+
lint-resource-detector-container:
name: resource-detector-container
runs-on: ubuntu-latest
diff --git a/.github/workflows/test_0.yml b/.github/workflows/test_0.yml
index 9ef80687bc..2639d50e1c 100644
--- a/.github/workflows/test_0.yml
+++ b/.github/workflows/test_0.yml
@@ -484,6 +484,186 @@ jobs:
- name: Run tests
run: tox -e py313-test-instrumentation-vertexai-latest -- -ra
+ py39-test-instrumentation-google-genai-oldest_ubuntu-latest:
+ name: instrumentation-google-genai-oldest 3.9 Ubuntu
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout repo @ SHA - ${{ github.sha }}
+ uses: actions/checkout@v4
+
+ - name: Set up Python 3.9
+ uses: actions/setup-python@v5
+ with:
+ python-version: "3.9"
+
+ - name: Install tox
+ run: pip install tox-uv
+
+ - name: Run tests
+ run: tox -e py39-test-instrumentation-google-genai-oldest -- -ra
+
+ py39-test-instrumentation-google-genai-latest_ubuntu-latest:
+ name: instrumentation-google-genai-latest 3.9 Ubuntu
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout repo @ SHA - ${{ github.sha }}
+ uses: actions/checkout@v4
+
+ - name: Set up Python 3.9
+ uses: actions/setup-python@v5
+ with:
+ python-version: "3.9"
+
+ - name: Install tox
+ run: pip install tox-uv
+
+ - name: Run tests
+ run: tox -e py39-test-instrumentation-google-genai-latest -- -ra
+
+ py310-test-instrumentation-google-genai-oldest_ubuntu-latest:
+ name: instrumentation-google-genai-oldest 3.10 Ubuntu
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout repo @ SHA - ${{ github.sha }}
+ uses: actions/checkout@v4
+
+ - name: Set up Python 3.10
+ uses: actions/setup-python@v5
+ with:
+ python-version: "3.10"
+
+ - name: Install tox
+ run: pip install tox-uv
+
+ - name: Run tests
+ run: tox -e py310-test-instrumentation-google-genai-oldest -- -ra
+
+ py310-test-instrumentation-google-genai-latest_ubuntu-latest:
+ name: instrumentation-google-genai-latest 3.10 Ubuntu
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout repo @ SHA - ${{ github.sha }}
+ uses: actions/checkout@v4
+
+ - name: Set up Python 3.10
+ uses: actions/setup-python@v5
+ with:
+ python-version: "3.10"
+
+ - name: Install tox
+ run: pip install tox-uv
+
+ - name: Run tests
+ run: tox -e py310-test-instrumentation-google-genai-latest -- -ra
+
+ py311-test-instrumentation-google-genai-oldest_ubuntu-latest:
+ name: instrumentation-google-genai-oldest 3.11 Ubuntu
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout repo @ SHA - ${{ github.sha }}
+ uses: actions/checkout@v4
+
+ - name: Set up Python 3.11
+ uses: actions/setup-python@v5
+ with:
+ python-version: "3.11"
+
+ - name: Install tox
+ run: pip install tox-uv
+
+ - name: Run tests
+ run: tox -e py311-test-instrumentation-google-genai-oldest -- -ra
+
+ py311-test-instrumentation-google-genai-latest_ubuntu-latest:
+ name: instrumentation-google-genai-latest 3.11 Ubuntu
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout repo @ SHA - ${{ github.sha }}
+ uses: actions/checkout@v4
+
+ - name: Set up Python 3.11
+ uses: actions/setup-python@v5
+ with:
+ python-version: "3.11"
+
+ - name: Install tox
+ run: pip install tox-uv
+
+ - name: Run tests
+ run: tox -e py311-test-instrumentation-google-genai-latest -- -ra
+
+ py312-test-instrumentation-google-genai-oldest_ubuntu-latest:
+ name: instrumentation-google-genai-oldest 3.12 Ubuntu
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout repo @ SHA - ${{ github.sha }}
+ uses: actions/checkout@v4
+
+ - name: Set up Python 3.12
+ uses: actions/setup-python@v5
+ with:
+ python-version: "3.12"
+
+ - name: Install tox
+ run: pip install tox-uv
+
+ - name: Run tests
+ run: tox -e py312-test-instrumentation-google-genai-oldest -- -ra
+
+ py312-test-instrumentation-google-genai-latest_ubuntu-latest:
+ name: instrumentation-google-genai-latest 3.12 Ubuntu
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout repo @ SHA - ${{ github.sha }}
+ uses: actions/checkout@v4
+
+ - name: Set up Python 3.12
+ uses: actions/setup-python@v5
+ with:
+ python-version: "3.12"
+
+ - name: Install tox
+ run: pip install tox-uv
+
+ - name: Run tests
+ run: tox -e py312-test-instrumentation-google-genai-latest -- -ra
+
+ py313-test-instrumentation-google-genai-oldest_ubuntu-latest:
+ name: instrumentation-google-genai-oldest 3.13 Ubuntu
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout repo @ SHA - ${{ github.sha }}
+ uses: actions/checkout@v4
+
+ - name: Set up Python 3.13
+ uses: actions/setup-python@v5
+ with:
+ python-version: "3.13"
+
+ - name: Install tox
+ run: pip install tox-uv
+
+ - name: Run tests
+ run: tox -e py313-test-instrumentation-google-genai-oldest -- -ra
+
+ py313-test-instrumentation-google-genai-latest_ubuntu-latest:
+ name: instrumentation-google-genai-latest 3.13 Ubuntu
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout repo @ SHA - ${{ github.sha }}
+ uses: actions/checkout@v4
+
+ - name: Set up Python 3.13
+ uses: actions/setup-python@v5
+ with:
+ python-version: "3.13"
+
+ - name: Install tox
+ run: pip install tox-uv
+
+ - name: Run tests
+ run: tox -e py313-test-instrumentation-google-genai-latest -- -ra
+
py38-test-resource-detector-container_ubuntu-latest:
name: resource-detector-container 3.8 Ubuntu
runs-on: ubuntu-latest
@@ -4335,183 +4515,3 @@ jobs:
- name: Run tests
run: tox -e py310-test-instrumentation-urllib3-0 -- -ra
-
- py310-test-instrumentation-urllib3-1_ubuntu-latest:
- name: instrumentation-urllib3-1 3.10 Ubuntu
- runs-on: ubuntu-latest
- steps:
- - name: Checkout repo @ SHA - ${{ github.sha }}
- uses: actions/checkout@v4
-
- - name: Set up Python 3.10
- uses: actions/setup-python@v5
- with:
- python-version: "3.10"
-
- - name: Install tox
- run: pip install tox-uv
-
- - name: Run tests
- run: tox -e py310-test-instrumentation-urllib3-1 -- -ra
-
- py311-test-instrumentation-urllib3-0_ubuntu-latest:
- name: instrumentation-urllib3-0 3.11 Ubuntu
- runs-on: ubuntu-latest
- steps:
- - name: Checkout repo @ SHA - ${{ github.sha }}
- uses: actions/checkout@v4
-
- - name: Set up Python 3.11
- uses: actions/setup-python@v5
- with:
- python-version: "3.11"
-
- - name: Install tox
- run: pip install tox-uv
-
- - name: Run tests
- run: tox -e py311-test-instrumentation-urllib3-0 -- -ra
-
- py311-test-instrumentation-urllib3-1_ubuntu-latest:
- name: instrumentation-urllib3-1 3.11 Ubuntu
- runs-on: ubuntu-latest
- steps:
- - name: Checkout repo @ SHA - ${{ github.sha }}
- uses: actions/checkout@v4
-
- - name: Set up Python 3.11
- uses: actions/setup-python@v5
- with:
- python-version: "3.11"
-
- - name: Install tox
- run: pip install tox-uv
-
- - name: Run tests
- run: tox -e py311-test-instrumentation-urllib3-1 -- -ra
-
- py312-test-instrumentation-urllib3-0_ubuntu-latest:
- name: instrumentation-urllib3-0 3.12 Ubuntu
- runs-on: ubuntu-latest
- steps:
- - name: Checkout repo @ SHA - ${{ github.sha }}
- uses: actions/checkout@v4
-
- - name: Set up Python 3.12
- uses: actions/setup-python@v5
- with:
- python-version: "3.12"
-
- - name: Install tox
- run: pip install tox-uv
-
- - name: Run tests
- run: tox -e py312-test-instrumentation-urllib3-0 -- -ra
-
- py312-test-instrumentation-urllib3-1_ubuntu-latest:
- name: instrumentation-urllib3-1 3.12 Ubuntu
- runs-on: ubuntu-latest
- steps:
- - name: Checkout repo @ SHA - ${{ github.sha }}
- uses: actions/checkout@v4
-
- - name: Set up Python 3.12
- uses: actions/setup-python@v5
- with:
- python-version: "3.12"
-
- - name: Install tox
- run: pip install tox-uv
-
- - name: Run tests
- run: tox -e py312-test-instrumentation-urllib3-1 -- -ra
-
- py313-test-instrumentation-urllib3-0_ubuntu-latest:
- name: instrumentation-urllib3-0 3.13 Ubuntu
- runs-on: ubuntu-latest
- steps:
- - name: Checkout repo @ SHA - ${{ github.sha }}
- uses: actions/checkout@v4
-
- - name: Set up Python 3.13
- uses: actions/setup-python@v5
- with:
- python-version: "3.13"
-
- - name: Install tox
- run: pip install tox-uv
-
- - name: Run tests
- run: tox -e py313-test-instrumentation-urllib3-0 -- -ra
-
- py313-test-instrumentation-urllib3-1_ubuntu-latest:
- name: instrumentation-urllib3-1 3.13 Ubuntu
- runs-on: ubuntu-latest
- steps:
- - name: Checkout repo @ SHA - ${{ github.sha }}
- uses: actions/checkout@v4
-
- - name: Set up Python 3.13
- uses: actions/setup-python@v5
- with:
- python-version: "3.13"
-
- - name: Install tox
- run: pip install tox-uv
-
- - name: Run tests
- run: tox -e py313-test-instrumentation-urllib3-1 -- -ra
-
- pypy3-test-instrumentation-urllib3-0_ubuntu-latest:
- name: instrumentation-urllib3-0 pypy-3.8 Ubuntu
- runs-on: ubuntu-latest
- steps:
- - name: Checkout repo @ SHA - ${{ github.sha }}
- uses: actions/checkout@v4
-
- - name: Set up Python pypy-3.8
- uses: actions/setup-python@v5
- with:
- python-version: "pypy-3.8"
-
- - name: Install tox
- run: pip install tox-uv
-
- - name: Run tests
- run: tox -e pypy3-test-instrumentation-urllib3-0 -- -ra
-
- pypy3-test-instrumentation-urllib3-1_ubuntu-latest:
- name: instrumentation-urllib3-1 pypy-3.8 Ubuntu
- runs-on: ubuntu-latest
- steps:
- - name: Checkout repo @ SHA - ${{ github.sha }}
- uses: actions/checkout@v4
-
- - name: Set up Python pypy-3.8
- uses: actions/setup-python@v5
- with:
- python-version: "pypy-3.8"
-
- - name: Install tox
- run: pip install tox-uv
-
- - name: Run tests
- run: tox -e pypy3-test-instrumentation-urllib3-1 -- -ra
-
- py38-test-instrumentation-requests_ubuntu-latest:
- name: instrumentation-requests 3.8 Ubuntu
- runs-on: ubuntu-latest
- steps:
- - name: Checkout repo @ SHA - ${{ github.sha }}
- uses: actions/checkout@v4
-
- - name: Set up Python 3.8
- uses: actions/setup-python@v5
- with:
- python-version: "3.8"
-
- - name: Install tox
- run: pip install tox-uv
-
- - name: Run tests
- run: tox -e py38-test-instrumentation-requests -- -ra
diff --git a/.github/workflows/test_1.yml b/.github/workflows/test_1.yml
index f2686cfe91..e3128205c1 100644
--- a/.github/workflows/test_1.yml
+++ b/.github/workflows/test_1.yml
@@ -16,6 +16,186 @@ env:
jobs:
+ py310-test-instrumentation-urllib3-1_ubuntu-latest:
+ name: instrumentation-urllib3-1 3.10 Ubuntu
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout repo @ SHA - ${{ github.sha }}
+ uses: actions/checkout@v4
+
+ - name: Set up Python 3.10
+ uses: actions/setup-python@v5
+ with:
+ python-version: "3.10"
+
+ - name: Install tox
+ run: pip install tox-uv
+
+ - name: Run tests
+ run: tox -e py310-test-instrumentation-urllib3-1 -- -ra
+
+ py311-test-instrumentation-urllib3-0_ubuntu-latest:
+ name: instrumentation-urllib3-0 3.11 Ubuntu
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout repo @ SHA - ${{ github.sha }}
+ uses: actions/checkout@v4
+
+ - name: Set up Python 3.11
+ uses: actions/setup-python@v5
+ with:
+ python-version: "3.11"
+
+ - name: Install tox
+ run: pip install tox-uv
+
+ - name: Run tests
+ run: tox -e py311-test-instrumentation-urllib3-0 -- -ra
+
+ py311-test-instrumentation-urllib3-1_ubuntu-latest:
+ name: instrumentation-urllib3-1 3.11 Ubuntu
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout repo @ SHA - ${{ github.sha }}
+ uses: actions/checkout@v4
+
+ - name: Set up Python 3.11
+ uses: actions/setup-python@v5
+ with:
+ python-version: "3.11"
+
+ - name: Install tox
+ run: pip install tox-uv
+
+ - name: Run tests
+ run: tox -e py311-test-instrumentation-urllib3-1 -- -ra
+
+ py312-test-instrumentation-urllib3-0_ubuntu-latest:
+ name: instrumentation-urllib3-0 3.12 Ubuntu
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout repo @ SHA - ${{ github.sha }}
+ uses: actions/checkout@v4
+
+ - name: Set up Python 3.12
+ uses: actions/setup-python@v5
+ with:
+ python-version: "3.12"
+
+ - name: Install tox
+ run: pip install tox-uv
+
+ - name: Run tests
+ run: tox -e py312-test-instrumentation-urllib3-0 -- -ra
+
+ py312-test-instrumentation-urllib3-1_ubuntu-latest:
+ name: instrumentation-urllib3-1 3.12 Ubuntu
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout repo @ SHA - ${{ github.sha }}
+ uses: actions/checkout@v4
+
+ - name: Set up Python 3.12
+ uses: actions/setup-python@v5
+ with:
+ python-version: "3.12"
+
+ - name: Install tox
+ run: pip install tox-uv
+
+ - name: Run tests
+ run: tox -e py312-test-instrumentation-urllib3-1 -- -ra
+
+ py313-test-instrumentation-urllib3-0_ubuntu-latest:
+ name: instrumentation-urllib3-0 3.13 Ubuntu
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout repo @ SHA - ${{ github.sha }}
+ uses: actions/checkout@v4
+
+ - name: Set up Python 3.13
+ uses: actions/setup-python@v5
+ with:
+ python-version: "3.13"
+
+ - name: Install tox
+ run: pip install tox-uv
+
+ - name: Run tests
+ run: tox -e py313-test-instrumentation-urllib3-0 -- -ra
+
+ py313-test-instrumentation-urllib3-1_ubuntu-latest:
+ name: instrumentation-urllib3-1 3.13 Ubuntu
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout repo @ SHA - ${{ github.sha }}
+ uses: actions/checkout@v4
+
+ - name: Set up Python 3.13
+ uses: actions/setup-python@v5
+ with:
+ python-version: "3.13"
+
+ - name: Install tox
+ run: pip install tox-uv
+
+ - name: Run tests
+ run: tox -e py313-test-instrumentation-urllib3-1 -- -ra
+
+ pypy3-test-instrumentation-urllib3-0_ubuntu-latest:
+ name: instrumentation-urllib3-0 pypy-3.8 Ubuntu
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout repo @ SHA - ${{ github.sha }}
+ uses: actions/checkout@v4
+
+ - name: Set up Python pypy-3.8
+ uses: actions/setup-python@v5
+ with:
+ python-version: "pypy-3.8"
+
+ - name: Install tox
+ run: pip install tox-uv
+
+ - name: Run tests
+ run: tox -e pypy3-test-instrumentation-urllib3-0 -- -ra
+
+ pypy3-test-instrumentation-urllib3-1_ubuntu-latest:
+ name: instrumentation-urllib3-1 pypy-3.8 Ubuntu
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout repo @ SHA - ${{ github.sha }}
+ uses: actions/checkout@v4
+
+ - name: Set up Python pypy-3.8
+ uses: actions/setup-python@v5
+ with:
+ python-version: "pypy-3.8"
+
+ - name: Install tox
+ run: pip install tox-uv
+
+ - name: Run tests
+ run: tox -e pypy3-test-instrumentation-urllib3-1 -- -ra
+
+ py38-test-instrumentation-requests_ubuntu-latest:
+ name: instrumentation-requests 3.8 Ubuntu
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout repo @ SHA - ${{ github.sha }}
+ uses: actions/checkout@v4
+
+ - name: Set up Python 3.8
+ uses: actions/setup-python@v5
+ with:
+ python-version: "3.8"
+
+ - name: Install tox
+ run: pip install tox-uv
+
+ - name: Run tests
+ run: tox -e py38-test-instrumentation-requests -- -ra
+
py39-test-instrumentation-requests_ubuntu-latest:
name: instrumentation-requests 3.9 Ubuntu
runs-on: ubuntu-latest
@@ -4335,183 +4515,3 @@ jobs:
- name: Run tests
run: tox -e py312-test-instrumentation-tortoiseorm -- -ra
-
- py313-test-instrumentation-tortoiseorm_ubuntu-latest:
- name: instrumentation-tortoiseorm 3.13 Ubuntu
- runs-on: ubuntu-latest
- steps:
- - name: Checkout repo @ SHA - ${{ github.sha }}
- uses: actions/checkout@v4
-
- - name: Set up Python 3.13
- uses: actions/setup-python@v5
- with:
- python-version: "3.13"
-
- - name: Install tox
- run: pip install tox-uv
-
- - name: Run tests
- run: tox -e py313-test-instrumentation-tortoiseorm -- -ra
-
- pypy3-test-instrumentation-tortoiseorm_ubuntu-latest:
- name: instrumentation-tortoiseorm pypy-3.8 Ubuntu
- runs-on: ubuntu-latest
- steps:
- - name: Checkout repo @ SHA - ${{ github.sha }}
- uses: actions/checkout@v4
-
- - name: Set up Python pypy-3.8
- uses: actions/setup-python@v5
- with:
- python-version: "pypy-3.8"
-
- - name: Install tox
- run: pip install tox-uv
-
- - name: Run tests
- run: tox -e pypy3-test-instrumentation-tortoiseorm -- -ra
-
- py38-test-instrumentation-httpx-0_ubuntu-latest:
- name: instrumentation-httpx-0 3.8 Ubuntu
- runs-on: ubuntu-latest
- steps:
- - name: Checkout repo @ SHA - ${{ github.sha }}
- uses: actions/checkout@v4
-
- - name: Set up Python 3.8
- uses: actions/setup-python@v5
- with:
- python-version: "3.8"
-
- - name: Install tox
- run: pip install tox-uv
-
- - name: Run tests
- run: tox -e py38-test-instrumentation-httpx-0 -- -ra
-
- py38-test-instrumentation-httpx-1_ubuntu-latest:
- name: instrumentation-httpx-1 3.8 Ubuntu
- runs-on: ubuntu-latest
- steps:
- - name: Checkout repo @ SHA - ${{ github.sha }}
- uses: actions/checkout@v4
-
- - name: Set up Python 3.8
- uses: actions/setup-python@v5
- with:
- python-version: "3.8"
-
- - name: Install tox
- run: pip install tox-uv
-
- - name: Run tests
- run: tox -e py38-test-instrumentation-httpx-1 -- -ra
-
- py39-test-instrumentation-httpx-0_ubuntu-latest:
- name: instrumentation-httpx-0 3.9 Ubuntu
- runs-on: ubuntu-latest
- steps:
- - name: Checkout repo @ SHA - ${{ github.sha }}
- uses: actions/checkout@v4
-
- - name: Set up Python 3.9
- uses: actions/setup-python@v5
- with:
- python-version: "3.9"
-
- - name: Install tox
- run: pip install tox-uv
-
- - name: Run tests
- run: tox -e py39-test-instrumentation-httpx-0 -- -ra
-
- py39-test-instrumentation-httpx-1_ubuntu-latest:
- name: instrumentation-httpx-1 3.9 Ubuntu
- runs-on: ubuntu-latest
- steps:
- - name: Checkout repo @ SHA - ${{ github.sha }}
- uses: actions/checkout@v4
-
- - name: Set up Python 3.9
- uses: actions/setup-python@v5
- with:
- python-version: "3.9"
-
- - name: Install tox
- run: pip install tox-uv
-
- - name: Run tests
- run: tox -e py39-test-instrumentation-httpx-1 -- -ra
-
- py310-test-instrumentation-httpx-0_ubuntu-latest:
- name: instrumentation-httpx-0 3.10 Ubuntu
- runs-on: ubuntu-latest
- steps:
- - name: Checkout repo @ SHA - ${{ github.sha }}
- uses: actions/checkout@v4
-
- - name: Set up Python 3.10
- uses: actions/setup-python@v5
- with:
- python-version: "3.10"
-
- - name: Install tox
- run: pip install tox-uv
-
- - name: Run tests
- run: tox -e py310-test-instrumentation-httpx-0 -- -ra
-
- py310-test-instrumentation-httpx-1_ubuntu-latest:
- name: instrumentation-httpx-1 3.10 Ubuntu
- runs-on: ubuntu-latest
- steps:
- - name: Checkout repo @ SHA - ${{ github.sha }}
- uses: actions/checkout@v4
-
- - name: Set up Python 3.10
- uses: actions/setup-python@v5
- with:
- python-version: "3.10"
-
- - name: Install tox
- run: pip install tox-uv
-
- - name: Run tests
- run: tox -e py310-test-instrumentation-httpx-1 -- -ra
-
- py311-test-instrumentation-httpx-0_ubuntu-latest:
- name: instrumentation-httpx-0 3.11 Ubuntu
- runs-on: ubuntu-latest
- steps:
- - name: Checkout repo @ SHA - ${{ github.sha }}
- uses: actions/checkout@v4
-
- - name: Set up Python 3.11
- uses: actions/setup-python@v5
- with:
- python-version: "3.11"
-
- - name: Install tox
- run: pip install tox-uv
-
- - name: Run tests
- run: tox -e py311-test-instrumentation-httpx-0 -- -ra
-
- py311-test-instrumentation-httpx-1_ubuntu-latest:
- name: instrumentation-httpx-1 3.11 Ubuntu
- runs-on: ubuntu-latest
- steps:
- - name: Checkout repo @ SHA - ${{ github.sha }}
- uses: actions/checkout@v4
-
- - name: Set up Python 3.11
- uses: actions/setup-python@v5
- with:
- python-version: "3.11"
-
- - name: Install tox
- run: pip install tox-uv
-
- - name: Run tests
- run: tox -e py311-test-instrumentation-httpx-1 -- -ra
diff --git a/.github/workflows/test_2.yml b/.github/workflows/test_2.yml
index 96ebfba82a..3b796e31d5 100644
--- a/.github/workflows/test_2.yml
+++ b/.github/workflows/test_2.yml
@@ -16,6 +16,186 @@ env:
jobs:
+ py313-test-instrumentation-tortoiseorm_ubuntu-latest:
+ name: instrumentation-tortoiseorm 3.13 Ubuntu
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout repo @ SHA - ${{ github.sha }}
+ uses: actions/checkout@v4
+
+ - name: Set up Python 3.13
+ uses: actions/setup-python@v5
+ with:
+ python-version: "3.13"
+
+ - name: Install tox
+ run: pip install tox-uv
+
+ - name: Run tests
+ run: tox -e py313-test-instrumentation-tortoiseorm -- -ra
+
+ pypy3-test-instrumentation-tortoiseorm_ubuntu-latest:
+ name: instrumentation-tortoiseorm pypy-3.8 Ubuntu
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout repo @ SHA - ${{ github.sha }}
+ uses: actions/checkout@v4
+
+ - name: Set up Python pypy-3.8
+ uses: actions/setup-python@v5
+ with:
+ python-version: "pypy-3.8"
+
+ - name: Install tox
+ run: pip install tox-uv
+
+ - name: Run tests
+ run: tox -e pypy3-test-instrumentation-tortoiseorm -- -ra
+
+ py38-test-instrumentation-httpx-0_ubuntu-latest:
+ name: instrumentation-httpx-0 3.8 Ubuntu
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout repo @ SHA - ${{ github.sha }}
+ uses: actions/checkout@v4
+
+ - name: Set up Python 3.8
+ uses: actions/setup-python@v5
+ with:
+ python-version: "3.8"
+
+ - name: Install tox
+ run: pip install tox-uv
+
+ - name: Run tests
+ run: tox -e py38-test-instrumentation-httpx-0 -- -ra
+
+ py38-test-instrumentation-httpx-1_ubuntu-latest:
+ name: instrumentation-httpx-1 3.8 Ubuntu
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout repo @ SHA - ${{ github.sha }}
+ uses: actions/checkout@v4
+
+ - name: Set up Python 3.8
+ uses: actions/setup-python@v5
+ with:
+ python-version: "3.8"
+
+ - name: Install tox
+ run: pip install tox-uv
+
+ - name: Run tests
+ run: tox -e py38-test-instrumentation-httpx-1 -- -ra
+
+ py39-test-instrumentation-httpx-0_ubuntu-latest:
+ name: instrumentation-httpx-0 3.9 Ubuntu
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout repo @ SHA - ${{ github.sha }}
+ uses: actions/checkout@v4
+
+ - name: Set up Python 3.9
+ uses: actions/setup-python@v5
+ with:
+ python-version: "3.9"
+
+ - name: Install tox
+ run: pip install tox-uv
+
+ - name: Run tests
+ run: tox -e py39-test-instrumentation-httpx-0 -- -ra
+
+ py39-test-instrumentation-httpx-1_ubuntu-latest:
+ name: instrumentation-httpx-1 3.9 Ubuntu
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout repo @ SHA - ${{ github.sha }}
+ uses: actions/checkout@v4
+
+ - name: Set up Python 3.9
+ uses: actions/setup-python@v5
+ with:
+ python-version: "3.9"
+
+ - name: Install tox
+ run: pip install tox-uv
+
+ - name: Run tests
+ run: tox -e py39-test-instrumentation-httpx-1 -- -ra
+
+ py310-test-instrumentation-httpx-0_ubuntu-latest:
+ name: instrumentation-httpx-0 3.10 Ubuntu
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout repo @ SHA - ${{ github.sha }}
+ uses: actions/checkout@v4
+
+ - name: Set up Python 3.10
+ uses: actions/setup-python@v5
+ with:
+ python-version: "3.10"
+
+ - name: Install tox
+ run: pip install tox-uv
+
+ - name: Run tests
+ run: tox -e py310-test-instrumentation-httpx-0 -- -ra
+
+ py310-test-instrumentation-httpx-1_ubuntu-latest:
+ name: instrumentation-httpx-1 3.10 Ubuntu
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout repo @ SHA - ${{ github.sha }}
+ uses: actions/checkout@v4
+
+ - name: Set up Python 3.10
+ uses: actions/setup-python@v5
+ with:
+ python-version: "3.10"
+
+ - name: Install tox
+ run: pip install tox-uv
+
+ - name: Run tests
+ run: tox -e py310-test-instrumentation-httpx-1 -- -ra
+
+ py311-test-instrumentation-httpx-0_ubuntu-latest:
+ name: instrumentation-httpx-0 3.11 Ubuntu
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout repo @ SHA - ${{ github.sha }}
+ uses: actions/checkout@v4
+
+ - name: Set up Python 3.11
+ uses: actions/setup-python@v5
+ with:
+ python-version: "3.11"
+
+ - name: Install tox
+ run: pip install tox-uv
+
+ - name: Run tests
+ run: tox -e py311-test-instrumentation-httpx-0 -- -ra
+
+ py311-test-instrumentation-httpx-1_ubuntu-latest:
+ name: instrumentation-httpx-1 3.11 Ubuntu
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout repo @ SHA - ${{ github.sha }}
+ uses: actions/checkout@v4
+
+ - name: Set up Python 3.11
+ uses: actions/setup-python@v5
+ with:
+ python-version: "3.11"
+
+ - name: Install tox
+ run: pip install tox-uv
+
+ - name: Run tests
+ run: tox -e py311-test-instrumentation-httpx-1 -- -ra
+
py312-test-instrumentation-httpx-0_ubuntu-latest:
name: instrumentation-httpx-0 3.12 Ubuntu
runs-on: ubuntu-latest
diff --git a/.pylintrc b/.pylintrc
index bc3b25c978..c00daa4fba 100644
--- a/.pylintrc
+++ b/.pylintrc
@@ -7,7 +7,7 @@ extension-pkg-whitelist=cassandra
# Add list of files or directories to be excluded. They should be base names, not
# paths.
-ignore=CVS,gen,Dockerfile,docker-compose.yml,README.md,requirements.txt,docs,.venv
+ignore=CVS,gen,Dockerfile,docker-compose.yml,README.md,requirements.txt,docs,.venv,site-packages,.tox
# Add files or directories matching the regex patterns to be excluded. The
# regex matches against base names, not paths.
diff --git a/eachdist.ini b/eachdist.ini
index bad0f8295a..83d00a2a09 100644
--- a/eachdist.ini
+++ b/eachdist.ini
@@ -50,6 +50,7 @@ packages=
opentelemetry-resource-detector-azure
opentelemetry-sdk-extension-aws
opentelemetry-propagator-aws-xray
+ opentelemetry-instrumentation-google-genai
opentelemetry-instrumentation-vertexai
opentelemetry-instrumentation-openai-v2
opentelemetry-instrumentation-test
diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/.gitignore b/instrumentation-genai/opentelemetry-instrumentation-google-genai/.gitignore
new file mode 100644
index 0000000000..ecc125321e
--- /dev/null
+++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/.gitignore
@@ -0,0 +1,4 @@
+.build
+.test
+dist
+
diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/CHANGELOG.md b/instrumentation-genai/opentelemetry-instrumentation-google-genai/CHANGELOG.md
new file mode 100644
index 0000000000..21e71b8854
--- /dev/null
+++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/CHANGELOG.md
@@ -0,0 +1,11 @@
+# Changelog
+
+All notable changes to this project will be documented in this file.
+
+The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
+and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
+
+## Unreleased
+
+Create an initial version of Open Telemetry instrumentation for github.com/googleapis/python-genai.
+([#3256](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3256))
\ No newline at end of file
diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/LICENSE b/instrumentation-genai/opentelemetry-instrumentation-google-genai/LICENSE
new file mode 100644
index 0000000000..f4f87bd4ed
--- /dev/null
+++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/LICENSE
@@ -0,0 +1,203 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
\ No newline at end of file
diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/README.rst b/instrumentation-genai/opentelemetry-instrumentation-google-genai/README.rst
new file mode 100644
index 0000000000..45fc185525
--- /dev/null
+++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/README.rst
@@ -0,0 +1,92 @@
+OpenTelemetry Google GenAI SDK Instrumentation
+==============================================
+
+|pypi|
+
+.. |pypi| image:: https://badge.fury.io/py/opentelemetry-instrumentation-google-genai.svg
+ :target: https://pypi.org/project/opentelemetry-instrumentation-google-genai/
+
+This library adds instrumentation to the `Google GenAI SDK library `_
+to emit telemetry data following `Semantic Conventions for GenAI systems `_.
+It adds trace spans for GenAI operations, events/logs for recording prompts/responses, and emits metrics that describe the
+GenAI operations in aggregate.
+
+
+Experimental
+------------
+
+This package is still experimental. The instrumentation may not be complete or correct just yet.
+
+Please see "TODOS.md" for a list of known defects/TODOs that are blockers to package stability.
+
+
+Installation
+------------
+
+If your application is already instrumented with OpenTelemetry, add this
+package to your requirements.
+::
+
+ pip install opentelemetry-instrumentation-google-genai
+
+If you don't have a Google GenAI SDK application, yet, try our `examples `_.
+
+Check out `zero-code example `_ for a quick start.
+
+Usage
+-----
+
+This section describes how to set up Google GenAI SDK instrumentation if you're setting OpenTelemetry up manually.
+Check out the `manual example `_ for more details.
+
+Instrumenting all clients
+*************************
+
+When using the instrumentor, all clients will automatically trace GenAI `generate_content` operations.
+You can also optionally capture prompts and responses as log events.
+
+Make sure to configure OpenTelemetry tracing, logging, metrics, and events to capture all telemetry emitted by the instrumentation.
+
+.. code-block:: python
+
+ from opentelemetry.instrumentation.google_genai import GoogleGenAiSdkInstrumentor
+ from google.genai import Client
+
+ GoogleGenAiSdkInstrumentor().instrument()
+
+
+ client = Client()
+ response = client.models.generate_content(
+ model="gemini-1.5-flash-002",
+ contents="Write a short poem on OpenTelemetry.")
+
+Enabling message content
+*************************
+
+Message content such as the contents of the prompt and response
+are not captured by default. To capture message content as log events, set the environment variable
+`OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT` to `true`.
+
+Uninstrument
+************
+
+To uninstrument clients, call the uninstrument method:
+
+.. code-block:: python
+
+ from opentelemetry.instrumentation.google_genai import GoogleGenAiSdkInstrumentor
+
+ GoogleGenAiSdkInstrumentor().instrument()
+ # ...
+
+ # Uninstrument all clients
+ GoogleGenAiSdkInstrumentor().uninstrument()
+
+References
+----------
+* `Google Gen AI SDK Documentation `_
+* `Google Gen AI SDK on GitHub `_
+* `Using Vertex AI with Google Gen AI SDK `_
+* `OpenTelemetry Project `_
+* `OpenTelemetry Python Examples `_
+
diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/TODOS.md b/instrumentation-genai/opentelemetry-instrumentation-google-genai/TODOS.md
new file mode 100644
index 0000000000..a7f6f66f41
--- /dev/null
+++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/TODOS.md
@@ -0,0 +1,22 @@
+# TODOs
+
+## Fundamentals
+
+Here are some TODO items required to achieve stability for this package:
+
+ 1. Add support for streaming interfaces
+ 2. Add support for async interfaces
+ 3. Add more span-level attributes for request configuration
+ 4. Add more span-level attributes for response information
+ 5. Verify and correct formatting of events:
+ - Including the 'role' field for message events
+ - Including tool invocation information
+ 6. Emit events for safety ratings when they block responses
+ 7. Additional cleanup/improvement tasks such as:
+ - Adoption of 'wrapt' instead of 'functools.wraps'
+ - Bolstering test coverage
+ 8. Migrate tests to use VCR.py
+## Future
+
+Beyond the above TODOs, it would also be desirable to extend the
+instrumentation beyond `generate_content` to other API surfaces.
\ No newline at end of file
diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/examples/manual/.env b/instrumentation-genai/opentelemetry-instrumentation-google-genai/examples/manual/.env
new file mode 100644
index 0000000000..02cc2e2045
--- /dev/null
+++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/examples/manual/.env
@@ -0,0 +1,28 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Uncomment and change to your OTLP endpoint
+# OTEL_EXPORTER_OTLP_ENDPOINT=http://localhost:4317
+# OTEL_EXPORTER_OTLP_PROTOCOL=grpc
+
+# Uncomment to change parameters used to configure 'google.genai'
+# GOOGLE_GENAI_USE_VERTEXAI=1
+# GOOGLE_API_KEY=
+# GOOGLE_CLOUD_PROJECT=
+# GOOGLE_CLOUD_LOCATION=
+
+OTEL_SERVICE_NAME=opentelemetry-python-google-genai
+
+# Change to 'false' to hide prompt and completion content
+OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT=true
diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/examples/manual/README.rst b/instrumentation-genai/opentelemetry-instrumentation-google-genai/examples/manual/README.rst
new file mode 100644
index 0000000000..182c5fc11a
--- /dev/null
+++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/examples/manual/README.rst
@@ -0,0 +1,44 @@
+OpenTelemetry Google GenAI SDK Manual Instrumentation Example
+============================================
+
+This is an example of how to instrument Google GenAI SDK calls when configuring
+OpenTelemetry SDK and Instrumentations manually.
+
+When `main.py `_ is run, it exports traces, logs, and metrics to an OTLP
+compatible endpoint. Traces include details such as the model used and the
+duration of the chat request. Logs capture the chat request and the generated
+response, providing a comprehensive view of the performance and behavior of
+your GenAI SDK requests. Metrics include aggregate statistics such as the aggregate
+token usage as well as the latency distribution of the GenAI operations.
+
+Note: `.env <.env>`_ file configures additional environment variables:
+
+- `OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT=true`
+
+... configures Google GenAI SDK instrumentation to capture prompt/response content.
+
+Setup
+-----
+
+An OTLP compatible endpoint should be listening for traces, logs, and metrics on
+http://localhost:4317. If not, update "OTEL_EXPORTER_OTLP_ENDPOINT" as well.
+
+Next, set up a virtual environment like this:
+
+::
+
+ python3 -m venv .venv
+ source .venv/bin/activate
+ pip install "python-dotenv[cli]"
+ pip install -r requirements.txt
+
+Run
+---
+
+Run the example like this:
+
+::
+
+ export PROMPT="Your prompt here"
+ dotenv run -- python main.py
+
diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/examples/manual/main.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/examples/manual/main.py
new file mode 100644
index 0000000000..5ba4f31c77
--- /dev/null
+++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/examples/manual/main.py
@@ -0,0 +1,101 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# We skip linting this file with pylint, because the linter is not
+# configured with the "requirements.txt" dependencies and therefore
+# will give multiple "no-name-in-module" errors for the imports.
+#
+# pylint: skip-file
+
+import os
+
+import google.genai
+
+# NOTE: OpenTelemetry Python Logs and Events APIs are in beta
+from opentelemetry import _events as otel_events
+from opentelemetry import _logs as otel_logs
+from opentelemetry import metrics as otel_metrics
+from opentelemetry import trace as otel_trace
+from opentelemetry.exporter.otlp.proto.grpc._log_exporter import (
+ OTLPLogExporter,
+)
+from opentelemetry.exporter.otlp.proto.grpc.metric_exporter import (
+ OTLPMetricExporter,
+)
+from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import (
+ OTLPSpanExporter,
+)
+from opentelemetry.instrumentation.google_genai import (
+ GoogleGenAiSdkInstrumentor,
+)
+from opentelemetry.instrumentation.requests import RequestsInstrumentor
+from opentelemetry.sdk._events import EventLoggerProvider
+from opentelemetry.sdk._logs import LoggerProvider
+from opentelemetry.sdk._logs.export import BatchLogRecordProcessor
+from opentelemetry.sdk.metrics import MeterProvider
+from opentelemetry.sdk.metrics.export import PeriodicExportingMetricReader
+from opentelemetry.sdk.trace import TracerProvider
+from opentelemetry.sdk.trace.export import BatchSpanProcessor
+
+
+def setup_otel_tracing():
+ otel_trace.set_tracer_provider(TracerProvider())
+ otel_trace.get_tracer_provider().add_span_processor(
+ BatchSpanProcessor(OTLPSpanExporter())
+ )
+
+
+def setup_otel_logs_and_events():
+ otel_logs.set_logger_provider(LoggerProvider())
+ otel_logs.get_logger_provider().add_log_record_processor(
+ BatchLogRecordProcessor(OTLPLogExporter())
+ )
+ otel_events.set_event_logger_provider(EventLoggerProvider())
+
+
+def setup_otel_metrics():
+ meter_provider = MeterProvider(
+ metric_readers=[
+ PeriodicExportingMetricReader(
+ OTLPMetricExporter(),
+ ),
+ ]
+ )
+ otel_metrics.set_meter_provider(meter_provider)
+
+
+def setup_opentelemetry():
+ setup_otel_tracing()
+ setup_otel_logs_and_events()
+ setup_otel_metrics()
+
+
+def instrument_google_genai():
+ GoogleGenAiSdkInstrumentor().instrument()
+ RequestsInstrumentor().instrument()
+
+
+def main():
+ setup_opentelemetry()
+ instrument_google_genai()
+ client = google.genai.Client()
+ response = client.models.generate_content(
+ model=os.getenv("MODEL", "gemini-2.0-flash-001"),
+ contents=os.getenv("PROMPT", "Why is the sky blue?"),
+ )
+ print(response.text)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/examples/manual/requirements.txt b/instrumentation-genai/opentelemetry-instrumentation-google-genai/examples/manual/requirements.txt
new file mode 100644
index 0000000000..d94e15af2c
--- /dev/null
+++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/examples/manual/requirements.txt
@@ -0,0 +1,20 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+google-genai ~= 1.0.0
+opentelemetry-api ~= 1.30.0
+opentelemetry-sdk ~= 1.30.0
+opentelemetry-exporter-otlp-proto-grpc ~= 1.30.0
+opentelemetry-instrumentation-requests ~= 0.51b0
+opentelemetry-instrumentation-google-genai ~= 0.0.1.dev
diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/examples/zero-code/.env b/instrumentation-genai/opentelemetry-instrumentation-google-genai/examples/zero-code/.env
new file mode 100644
index 0000000000..66e50d2d5a
--- /dev/null
+++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/examples/zero-code/.env
@@ -0,0 +1,30 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# Update to change exporter configuration as desired.
+# See: https://opentelemetry.io/docs/zero-code/python/configuration/
+OTEL_EXPORTER_OTLP_ENDPOINT=http://localhost:4317
+OTEL_EXPORTER_OTLP_PROTOCOL=grpc
+
+# Uncomment to change parameters used to configure 'google.genai'
+# GOOGLE_GENAI_USE_VERTEXAI=1
+# GOOGLE_API_KEY=
+# GOOGLE_CLOUD_PROJECT=
+# GOOGLE_CLOUD_LOCATION=
+
+OTEL_SERVICE_NAME=opentelemetry-python-google-genai
+
+# Change to 'false' to hide prompt and completion content
+OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT=true
diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/examples/zero-code/README.rst b/instrumentation-genai/opentelemetry-instrumentation-google-genai/examples/zero-code/README.rst
new file mode 100644
index 0000000000..b841930eb7
--- /dev/null
+++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/examples/zero-code/README.rst
@@ -0,0 +1,46 @@
+OpenTelemetry Google GenAI SDK Manual Instrumentation Example
+============================================
+
+This is an example of how to instrument Google GenAI SDK calls with zero code changes,
+using `opentelemetryh-instrument`.
+
+When `main.py `_ is run, it exports traces, logs, and metrics to an OTLP
+compatible endpoint. Traces include details such as the model used and the
+duration of the chat request. Logs capture the chat request and the generated
+response, providing a comprehensive view of the performance and behavior of
+your GenAI SDK requests. Metrics include aggregate statistics such as the aggregate
+token usage as well as the latency distribution of the GenAI operations.
+
+Note: `.env <.env>`_ file configures additional environment variables:
+
+- `OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT=true`
+
+... configures Google GenAI SDK instrumentation to capture prompt/response content.
+
+Setup
+-----
+
+An OTLP compatible endpoint should be listening for traces, logs, and metrics on
+http://localhost:4317. If not, update "OTEL_EXPORTER_OTLP_ENDPOINT" as well.
+
+Next, set up a virtual environment like this:
+
+::
+
+ python3 -m venv .venv
+ source .venv/bin/activate
+ pip install "python-dotenv[cli]"
+ pip install -r requirements.txt
+ opentelemetry-bootstrap -a install
+
+
+Run
+---
+
+Run the example like this:
+
+::
+
+ export PROMPT="Your prompt here"
+ dotenv run -- opentelemetry-instrument python main.py
+
diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/examples/zero-code/main.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/examples/zero-code/main.py
new file mode 100644
index 0000000000..9cffaa4468
--- /dev/null
+++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/examples/zero-code/main.py
@@ -0,0 +1,30 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+
+import google.genai
+
+
+def main():
+ client = google.genai.Client()
+ response = client.models.generate_content(
+ model=os.getenv("MODEL", "gemini-2.0-flash-001"),
+ contents=os.getenv("PROMPT", "Why is the sky blue?"),
+ )
+ print(response.text)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/examples/zero-code/requirements.txt b/instrumentation-genai/opentelemetry-instrumentation-google-genai/examples/zero-code/requirements.txt
new file mode 100644
index 0000000000..54cc5619e5
--- /dev/null
+++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/examples/zero-code/requirements.txt
@@ -0,0 +1,23 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+google-genai ~= 1.0.0
+opentelemetry-api ~= 1.30.0
+opentelemetry-sdk ~= 1.30.0
+opentelemetry-exporter-otlp-proto-grpc ~= 1.30.0
+opentelemetry-instrumentation ~= 0.51b0
+opentelemetry-instrumentation-requests ~= 0.51b0
+opentelemetry-instrumentation-google-genai ~= 0.0.1.dev
+opentelemetry-contrib-instrumentations ~= 0.51b0
+opentelemetry-distro[otlp] ~= 0.51b0
diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/pyproject.toml b/instrumentation-genai/opentelemetry-instrumentation-google-genai/pyproject.toml
new file mode 100644
index 0000000000..9bb9e0e279
--- /dev/null
+++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/pyproject.toml
@@ -0,0 +1,79 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+[build-system]
+requires = ["hatchling"]
+build-backend = "hatchling.build"
+
+[project]
+name = "opentelemetry-instrumentation-google-genai"
+dynamic = ["version"]
+description = "OpenTelemetry"
+readme = "README.rst"
+license = "Apache-2.0"
+requires-python = ">=3.8"
+authors = [
+ { name = "OpenTelemetry Authors", email = "cncf-opentelemetry-contributors@lists.cncf.io" },
+]
+classifiers = [
+ "Development Status :: 4 - Beta",
+ "Intended Audience :: Developers",
+ "License :: OSI Approved :: Apache Software License",
+ "Programming Language :: Python",
+ "Programming Language :: Python :: 3",
+ "Programming Language :: Python :: 3.10",
+ "Programming Language :: Python :: 3.11",
+ "Programming Language :: Python :: 3.12"
+]
+dependencies = [
+ "opentelemetry-api >=1.30.0, <2",
+ "opentelemetry-instrumentation >=0.51b0, <2",
+ "opentelemetry-semantic-conventions >=0.51b0, <2"
+]
+
+[project.optional-dependencies]
+instruments = [
+ "google-genai >= 1.0.0"
+]
+
+[project.entry-points.opentelemetry_instrumentor]
+google-genai = "opentelemetry.instrumentation.google_genai:GoogleGenAiSdkInstrumentor"
+
+[project.urls]
+Homepage = "https://github.com/open-telemetry/opentelemetry-python-contrib/tree/main/instrumentation-genai/opentelemetry-instrumentation-google-genai"
+Repository = "https://github.com/open-telemetry/opentelemetry-python-contrib"
+
+[tool.hatch.version]
+path = "src/opentelemetry/instrumentation/google_genai/version.py"
+
+[tool.hatch.build.targets.sdist]
+include = [
+ "/src",
+ "/tests",
+]
+
+[tool.hatch.build.targets.wheel]
+packages = ["src/opentelemetry"]
+
+[tool.pyright]
+include = [
+ "src",
+]
+exclude = [
+ "**/__pycache__",
+]
+stubPath = "types"
+reportMissingImports = "error"
+reportMissingTypeStubs = false
+pythonVersion = "3.9"
diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/__init__.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/__init__.py
new file mode 100644
index 0000000000..0ea7f1dbdb
--- /dev/null
+++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/__init__.py
@@ -0,0 +1,47 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Google Gen AI SDK client instrumentation supporting the `google-genai` package.
+
+It can be enabled using ``GoogleGenAiSdkInstrumentor``.
+
+.. _google-genai: https://pypi.org/project/google-genai/
+
+Usage
+-----
+
+.. code:: python
+
+ import os
+ import google.genai
+ from opentelemetry.instrumentation.google_genai import GoogleGenAiSdkInstrumentor
+
+ GoogleGenAiSdkInstrumentor().instrument()
+ model = os.getenv('MODEL', 'gemini-2.0-flash-001')
+ client = google.genai.Client()
+ response = client.models.generate_content(
+ model=model,
+ contents='why is the sky blue?'
+ )
+ print(response.text)
+
+API
+---
+"""
+
+from .instrumentor import GoogleGenAiSdkInstrumentor
+from .version import __version__
+
+__all__ = ["GoogleGenAiSdkInstrumentor", "__version__"]
diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/flags.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/flags.py
new file mode 100644
index 0000000000..541d9ab48f
--- /dev/null
+++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/flags.py
@@ -0,0 +1,23 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+
+_CONTENT_RECORDING_ENV_VAR = (
+ "OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT"
+)
+
+
+def is_content_recording_enabled():
+ return os.getenv(_CONTENT_RECORDING_ENV_VAR, "false").lower() == "true"
diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py
new file mode 100644
index 0000000000..d267f23293
--- /dev/null
+++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py
@@ -0,0 +1,693 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import functools
+import json
+import logging
+import os
+import time
+from typing import Any, AsyncIterator, Awaitable, Iterator, Optional, Union
+
+from google.genai.models import AsyncModels, Models
+from google.genai.types import (
+ BlockedReason,
+ Candidate,
+ Content,
+ ContentListUnion,
+ ContentListUnionDict,
+ ContentUnion,
+ ContentUnionDict,
+ GenerateContentConfigOrDict,
+ GenerateContentResponse,
+)
+
+from opentelemetry import trace
+from opentelemetry.semconv._incubating.attributes import (
+ code_attributes,
+ gen_ai_attributes,
+)
+from opentelemetry.semconv.attributes import error_attributes
+
+from .flags import is_content_recording_enabled
+from .otel_wrapper import OTelWrapper
+
+_logger = logging.getLogger(__name__)
+
+
+# Constant used for the value of 'gen_ai.operation.name".
+_GENERATE_CONTENT_OP_NAME = "generate_content"
+
+# Constant used to make the absence of content more understandable.
+_CONTENT_ELIDED = ""
+
+# Enable these after these cases are fully vetted and tested
+_INSTRUMENT_STREAMING = False
+_INSTRUMENT_ASYNC = False
+
+
+class _MethodsSnapshot:
+ def __init__(self):
+ self._original_generate_content = Models.generate_content
+ self._original_generate_content_stream = Models.generate_content_stream
+ self._original_async_generate_content = AsyncModels.generate_content
+ self._original_async_generate_content_stream = (
+ AsyncModels.generate_content_stream
+ )
+
+ @property
+ def generate_content(self):
+ return self._original_generate_content
+
+ @property
+ def generate_content_stream(self):
+ return self._original_generate_content_stream
+
+ @property
+ def async_generate_content(self):
+ return self._original_async_generate_content
+
+ @property
+ def async_generate_content_stream(self):
+ return self._original_async_generate_content_stream
+
+ def restore(self):
+ Models.generate_content = self._original_generate_content
+ Models.generate_content_stream = self._original_generate_content_stream
+ AsyncModels.generate_content = self._original_async_generate_content
+ AsyncModels.generate_content_stream = (
+ self._original_async_generate_content_stream
+ )
+
+
+def _get_vertexai_system_name():
+ return gen_ai_attributes.GenAiSystemValues.VERTEX_AI.name.lower()
+
+
+def _get_gemini_system_name():
+ return gen_ai_attributes.GenAiSystemValues.GEMINI.name.lower()
+
+
+def _guess_genai_system_from_env():
+ if os.environ.get("GOOGLE_GENAI_USE_VERTEXAI", "0").lower() in [
+ "true",
+ "1",
+ ]:
+ return _get_vertexai_system_name()
+ return _get_gemini_system_name()
+
+
+def _get_is_vertexai(models_object: Union[Models, AsyncModels]):
+ # Since commit 8e561de04965bb8766db87ad8eea7c57c1040442 of "googleapis/python-genai",
+ # it is possible to obtain the information using a documented property.
+ if hasattr(models_object, "vertexai"):
+ vertexai_attr = getattr(models_object, "vertexai")
+ if vertexai_attr is not None:
+ return vertexai_attr
+ # For earlier revisions, it is necessary to deeply inspect the internals.
+ if hasattr(models_object, "_api_client"):
+ client = getattr(models_object, "_api_client")
+ if not client:
+ return None
+ if hasattr(client, "vertexai"):
+ return getattr(client, "vertexai")
+ return None
+
+
+def _determine_genai_system(models_object: Union[Models, AsyncModels]):
+ vertexai_attr = _get_is_vertexai(models_object)
+ if vertexai_attr is None:
+ return _guess_genai_system_from_env()
+ if vertexai_attr:
+ return _get_vertexai_system_name()
+ return _get_gemini_system_name()
+
+
+def _get_config_property(
+ config: Optional[GenerateContentConfigOrDict], path: str
+) -> Any:
+ if config is None:
+ return None
+ path_segments = path.split(".")
+ current_context: Any = config
+ for path_segment in path_segments:
+ if current_context is None:
+ return None
+ if isinstance(current_context, dict):
+ current_context = current_context.get(path_segment)
+ else:
+ current_context = getattr(current_context, path_segment)
+ return current_context
+
+
+def _get_response_property(response: GenerateContentResponse, path: str):
+ path_segments = path.split(".")
+ current_context = response
+ for path_segment in path_segments:
+ if current_context is None:
+ return None
+ if isinstance(current_context, dict):
+ current_context = current_context.get(path_segment)
+ else:
+ current_context = getattr(current_context, path_segment)
+ return current_context
+
+
+def _get_temperature(config: Optional[GenerateContentConfigOrDict]):
+ return _get_config_property(config, "temperature")
+
+
+def _get_top_k(config: Optional[GenerateContentConfigOrDict]):
+ return _get_config_property(config, "top_k")
+
+
+def _get_top_p(config: Optional[GenerateContentConfigOrDict]):
+ return _get_config_property(config, "top_p")
+
+
+# A map from define attributes to the function that can obtain
+# the relevant information from the request object.
+#
+# TODO: expand this to cover a larger set of the available
+# span attributes from GenAI semantic conventions.
+#
+# TODO: define semantic conventions for attributes that
+# are relevant for the Google GenAI SDK which are not
+# currently covered by the existing semantic conventions.
+#
+# See also: TODOS.md
+_SPAN_ATTRIBUTE_TO_CONFIG_EXTRACTOR = {
+ gen_ai_attributes.GEN_AI_REQUEST_TEMPERATURE: _get_temperature,
+ gen_ai_attributes.GEN_AI_REQUEST_TOP_K: _get_top_k,
+ gen_ai_attributes.GEN_AI_REQUEST_TOP_P: _get_top_p,
+}
+
+
+def _to_dict(value: object):
+ if isinstance(value, dict):
+ return value
+ if hasattr(value, "model_dump"):
+ return value.model_dump()
+ return json.loads(json.dumps(value))
+
+
+class _GenerateContentInstrumentationHelper:
+ def __init__(
+ self,
+ models_object: Union[Models, AsyncModels],
+ otel_wrapper: OTelWrapper,
+ model: str,
+ ):
+ self._start_time = time.time_ns()
+ self._otel_wrapper = otel_wrapper
+ self._genai_system = _determine_genai_system(models_object)
+ self._genai_request_model = model
+ self._finish_reasons_set = set()
+ self._error_type = None
+ self._input_tokens = 0
+ self._output_tokens = 0
+ self._content_recording_enabled = is_content_recording_enabled()
+ self._response_index = 0
+ self._candidate_index = 0
+
+ def start_span_as_current_span(self, model_name, function_name):
+ return self._otel_wrapper.start_as_current_span(
+ f"{_GENERATE_CONTENT_OP_NAME} {model_name}",
+ start_time=self._start_time,
+ attributes={
+ code_attributes.CODE_FUNCTION_NAME: function_name,
+ gen_ai_attributes.GEN_AI_SYSTEM: self._genai_system,
+ gen_ai_attributes.GEN_AI_REQUEST_MODEL: self._genai_request_model,
+ gen_ai_attributes.GEN_AI_OPERATION_NAME: _GENERATE_CONTENT_OP_NAME,
+ },
+ )
+
+ def process_request(
+ self,
+ contents: Union[ContentListUnion, ContentListUnionDict],
+ config: Optional[GenerateContentConfigOrDict],
+ ):
+ span = trace.get_current_span()
+ for (
+ attribute_key,
+ extractor,
+ ) in _SPAN_ATTRIBUTE_TO_CONFIG_EXTRACTOR.items():
+ attribute_value = extractor(config)
+ if attribute_value is not None:
+ span.set_attribute(attribute_key, attribute_value)
+ self._maybe_log_system_instruction(config=config)
+ self._maybe_log_user_prompt(contents)
+
+ def process_response(self, response: GenerateContentResponse):
+ # TODO: Determine if there are other response properties that
+ # need to be reflected back into the span attributes.
+ #
+ # See also: TODOS.md.
+ self._maybe_update_token_counts(response)
+ self._maybe_update_error_type(response)
+ self._maybe_log_response(response)
+ self._response_index += 1
+
+ def process_error(self, e: Exception):
+ self._error_type = str(e.__class__.__name__)
+
+ def finalize_processing(self):
+ span = trace.get_current_span()
+ span.set_attribute(
+ gen_ai_attributes.GEN_AI_USAGE_INPUT_TOKENS, self._input_tokens
+ )
+ span.set_attribute(
+ gen_ai_attributes.GEN_AI_USAGE_OUTPUT_TOKENS, self._output_tokens
+ )
+ span.set_attribute(
+ gen_ai_attributes.GEN_AI_RESPONSE_FINISH_REASONS,
+ sorted(self._finish_reasons_set),
+ )
+ self._record_token_usage_metric()
+ self._record_duration_metric()
+
+ def _maybe_update_token_counts(self, response: GenerateContentResponse):
+ input_tokens = _get_response_property(
+ response, "usage_metadata.prompt_token_count"
+ )
+ output_tokens = _get_response_property(
+ response, "usage_metadata.candidates_token_count"
+ )
+ if input_tokens and isinstance(input_tokens, int):
+ self._input_tokens += input_tokens
+ if output_tokens and isinstance(output_tokens, int):
+ self._output_tokens += output_tokens
+
+ def _maybe_update_error_type(self, response: GenerateContentResponse):
+ if response.candidates:
+ return
+ if (
+ (not response.prompt_feedback)
+ or (not response.prompt_feedback.block_reason)
+ or (
+ response.prompt_feedback.block_reason
+ == BlockedReason.BLOCKED_REASON_UNSPECIFIED
+ )
+ ):
+ self._error_type = "NO_CANDIDATES"
+ return
+ # TODO: in the case where there are no candidate responses due to
+ # safety settings like this, it might make sense to emit an event
+ # that contains more details regarding the safety settings, their
+ # thresholds, etc. However, this requires defining an associated
+ # semantic convention to capture this. Follow up with SemConv to
+ # establish appropriate data modelling to capture these details,
+ # and then emit those details accordingly. (For the time being,
+ # we use the defined 'error.type' semantic convention to relay
+ # just the minimum amount of error information here).
+ #
+ # See also: "TODOS.md"
+ block_reason = response.prompt_feedback.block_reason.name.upper()
+ self._error_type = f"BLOCKED_{block_reason}"
+
+ def _maybe_log_system_instruction(
+ self, config: Optional[GenerateContentConfigOrDict] = None
+ ):
+ system_instruction = _get_config_property(config, "system_instruction")
+ if not system_instruction:
+ return
+ attributes = {
+ gen_ai_attributes.GEN_AI_SYSTEM: self._genai_system,
+ }
+ # TODO: determine if "role" should be reported here or not. It is unclear
+ # since the caller does not supply a "role" and since this comes through
+ # a property named "system_instruction" which would seem to align with
+ # the default "role" that is allowed to be omitted by default.
+ #
+ # See also: "TODOS.md"
+ body = {}
+ if self._content_recording_enabled:
+ body["content"] = _to_dict(system_instruction)
+ else:
+ body["content"] = _CONTENT_ELIDED
+ self._otel_wrapper.log_system_prompt(
+ attributes=attributes,
+ body=body,
+ )
+
+ def _maybe_log_user_prompt(
+ self, contents: Union[ContentListUnion, ContentListUnionDict]
+ ):
+ if isinstance(contents, list):
+ total = len(contents)
+ index = 0
+ for entry in contents:
+ self._maybe_log_single_user_prompt(
+ entry, index=index, total=total
+ )
+ index += 1
+ else:
+ self._maybe_log_single_user_prompt(contents)
+
+ def _maybe_log_single_user_prompt(
+ self, contents: Union[ContentUnion, ContentUnionDict], index=0, total=1
+ ):
+ # TODO: figure out how to report the index in a manner that is
+ # aligned with the OTel semantic conventions.
+ attributes = {
+ gen_ai_attributes.GEN_AI_SYSTEM: self._genai_system,
+ }
+
+ # TODO: determine if "role" should be reported here or not and, if so,
+ # what the value ought to be. It is not clear whether there is always
+ # a role supplied (and it looks like there could be cases where there
+ # is more than one role present in the supplied contents)?
+ #
+ # See also: "TODOS.md"
+ body = {}
+ if self._content_recording_enabled:
+ logged_contents = contents
+ if isinstance(contents, list):
+ logged_contents = Content(parts=contents)
+ body["content"] = _to_dict(logged_contents)
+ else:
+ body["content"] = _CONTENT_ELIDED
+ self._otel_wrapper.log_user_prompt(
+ attributes=attributes,
+ body=body,
+ )
+
+ def _maybe_log_response_stats(self, response: GenerateContentResponse):
+ # TODO: Determine if there is a way that we can log a summary
+ # of the overall response in a manner that is aligned with
+ # Semantic Conventions. For example, it would be natural
+ # to report an event that looks something like:
+ #
+ # gen_ai.response.stats {
+ # response_index: 0,
+ # candidate_count: 3,
+ # parts_per_candidate: [
+ # 3,
+ # 1,
+ # 5
+ # ]
+ # }
+ #
+ pass
+
+ def _maybe_log_response_safety_ratings(
+ self, response: GenerateContentResponse
+ ):
+ # TODO: Determine if there is a way that we can log
+ # the "prompt_feedback". This would be especially useful
+ # in the case where the response is blocked.
+ pass
+
+ def _maybe_log_response(self, response: GenerateContentResponse):
+ self._maybe_log_response_stats(response)
+ self._maybe_log_response_safety_ratings(response)
+ if not response.candidates:
+ return
+ candidate_in_response_index = 0
+ for candidate in response.candidates:
+ self._maybe_log_response_candidate(
+ candidate,
+ flat_candidate_index=self._candidate_index,
+ candidate_in_response_index=candidate_in_response_index,
+ response_index=self._response_index,
+ )
+ self._candidate_index += 1
+ candidate_in_response_index += 1
+
+ def _maybe_log_response_candidate(
+ self,
+ candidate: Candidate,
+ flat_candidate_index: int,
+ candidate_in_response_index: int,
+ response_index: int,
+ ):
+ # TODO: Determine if there might be a way to report the
+ # response index and candidate response index.
+ attributes = {
+ gen_ai_attributes.GEN_AI_SYSTEM: self._genai_system,
+ }
+ # TODO: determine if "role" should be reported here or not and, if so,
+ # what the value ought to be.
+ #
+ # TODO: extract tool information into a separate tool message.
+ #
+ # TODO: determine if/when we need to emit a 'gen_ai.assistant.message' event.
+ #
+ # TODO: determine how to report other relevant details in the candidate that
+ # are not presently captured by Semantic Conventions. For example, the
+ # "citation_metadata", "grounding_metadata", "logprobs_result", etc.
+ #
+ # See also: "TODOS.md"
+ body = {
+ "index": flat_candidate_index,
+ }
+ if self._content_recording_enabled:
+ if candidate.content:
+ body["content"] = _to_dict(candidate.content)
+ else:
+ body["content"] = _CONTENT_ELIDED
+ if candidate.finish_reason is not None:
+ body["finish_reason"] = candidate.finish_reason.name
+ self._otel_wrapper.log_response_content(
+ attributes=attributes,
+ body=body,
+ )
+
+ def _record_token_usage_metric(self):
+ self._otel_wrapper.token_usage_metric.record(
+ self._input_tokens,
+ attributes={
+ gen_ai_attributes.GEN_AI_TOKEN_TYPE: "input",
+ gen_ai_attributes.GEN_AI_SYSTEM: self._genai_system,
+ gen_ai_attributes.GEN_AI_REQUEST_MODEL: self._genai_request_model,
+ gen_ai_attributes.GEN_AI_OPERATION_NAME: _GENERATE_CONTENT_OP_NAME,
+ },
+ )
+ self._otel_wrapper.token_usage_metric.record(
+ self._output_tokens,
+ attributes={
+ gen_ai_attributes.GEN_AI_TOKEN_TYPE: "output",
+ gen_ai_attributes.GEN_AI_SYSTEM: self._genai_system,
+ gen_ai_attributes.GEN_AI_REQUEST_MODEL: self._genai_request_model,
+ gen_ai_attributes.GEN_AI_OPERATION_NAME: _GENERATE_CONTENT_OP_NAME,
+ },
+ )
+
+ def _record_duration_metric(self):
+ attributes = {
+ gen_ai_attributes.GEN_AI_SYSTEM: self._genai_system,
+ gen_ai_attributes.GEN_AI_REQUEST_MODEL: self._genai_request_model,
+ gen_ai_attributes.GEN_AI_OPERATION_NAME: _GENERATE_CONTENT_OP_NAME,
+ }
+ if self._error_type is not None:
+ attributes[error_attributes.ERROR_TYPE] = self._error_type
+ duration_nanos = time.time_ns() - self._start_time
+ duration_seconds = duration_nanos / 1e9
+ self._otel_wrapper.operation_duration_metric.record(
+ duration_seconds,
+ attributes=attributes,
+ )
+
+
+def _create_instrumented_generate_content(
+ snapshot: _MethodsSnapshot, otel_wrapper: OTelWrapper
+):
+ wrapped_func = snapshot.generate_content
+
+ @functools.wraps(wrapped_func)
+ def instrumented_generate_content(
+ self: Models,
+ *,
+ model: str,
+ contents: Union[ContentListUnion, ContentListUnionDict],
+ config: Optional[GenerateContentConfigOrDict] = None,
+ **kwargs: Any,
+ ) -> GenerateContentResponse:
+ helper = _GenerateContentInstrumentationHelper(
+ self, otel_wrapper, model
+ )
+ with helper.start_span_as_current_span(
+ model, "google.genai.Models.generate_content"
+ ):
+ helper.process_request(contents, config)
+ try:
+ response = wrapped_func(
+ self,
+ model=model,
+ contents=contents,
+ config=config,
+ **kwargs,
+ )
+ helper.process_response(response)
+ return response
+ except Exception as error:
+ helper.process_error(error)
+ raise
+ finally:
+ helper.finalize_processing()
+
+ return instrumented_generate_content
+
+
+def _create_instrumented_generate_content_stream(
+ snapshot: _MethodsSnapshot, otel_wrapper: OTelWrapper
+):
+ wrapped_func = snapshot.generate_content_stream
+ if not _INSTRUMENT_STREAMING:
+ # TODO: remove once this case has been fully tested
+ return wrapped_func
+
+ @functools.wraps(wrapped_func)
+ def instrumented_generate_content_stream(
+ self: Models,
+ *,
+ model: str,
+ contents: Union[ContentListUnion, ContentListUnionDict],
+ config: Optional[GenerateContentConfigOrDict] = None,
+ **kwargs: Any,
+ ) -> Iterator[GenerateContentResponse]:
+ helper = _GenerateContentInstrumentationHelper(
+ self, otel_wrapper, model
+ )
+ with helper.start_span_as_current_span(
+ model, "google.genai.Models.generate_content_stream"
+ ):
+ helper.process_request(contents, config)
+ try:
+ for response in wrapped_func(
+ self,
+ model=model,
+ contents=contents,
+ config=config,
+ **kwargs,
+ ):
+ helper.process_response(response)
+ yield response
+ except Exception as error:
+ helper.process_error(error)
+ raise
+ finally:
+ helper.finalize_processing()
+
+ return instrumented_generate_content_stream
+
+
+def _create_instrumented_async_generate_content(
+ snapshot: _MethodsSnapshot, otel_wrapper: OTelWrapper
+):
+ wrapped_func = snapshot.async_generate_content
+ if not _INSTRUMENT_ASYNC:
+ # TODO: remove once this case has been fully tested
+ return wrapped_func
+
+ @functools.wraps(wrapped_func)
+ async def instrumented_generate_content(
+ self: AsyncModels,
+ *,
+ model: str,
+ contents: Union[ContentListUnion, ContentListUnionDict],
+ config: Optional[GenerateContentConfigOrDict] = None,
+ **kwargs: Any,
+ ) -> GenerateContentResponse:
+ helper = _GenerateContentInstrumentationHelper(
+ self, otel_wrapper, model
+ )
+ with helper.start_span_as_current_span(
+ model, "google.genai.AsyncModels.generate_content"
+ ):
+ helper.process_request(contents, config)
+ try:
+ response = await wrapped_func(
+ self,
+ model=model,
+ contents=contents,
+ config=config,
+ **kwargs,
+ )
+ helper.process_response(response)
+ return response
+ except Exception as error:
+ helper.process_error(error)
+ raise
+ finally:
+ helper.finalize_processing()
+
+ return instrumented_generate_content
+
+
+# Disabling type checking because this is not yet implemented and tested fully.
+def _create_instrumented_async_generate_content_stream( # pyright: ignore
+ snapshot: _MethodsSnapshot, otel_wrapper: OTelWrapper
+):
+ wrapped_func = snapshot.async_generate_content_stream
+ if not _INSTRUMENT_ASYNC or not _INSTRUMENT_STREAMING:
+ # TODO: remove once this case has been fully tested
+ return wrapped_func
+
+ @functools.wraps(wrapped_func)
+ async def instrumented_generate_content_stream(
+ self: AsyncModels,
+ *,
+ model: str,
+ contents: Union[ContentListUnion, ContentListUnionDict],
+ config: Optional[GenerateContentConfigOrDict] = None,
+ **kwargs: Any,
+ ) -> Awaitable[AsyncIterator[GenerateContentResponse]]: # pyright: ignore
+ helper = _GenerateContentInstrumentationHelper(
+ self, otel_wrapper, model
+ )
+ with helper.start_span_as_current_span(
+ model, "google.genai.AsyncModels.generate_content_stream"
+ ):
+ helper.process_request(contents, config)
+ try:
+ async for response in await wrapped_func(
+ self,
+ model=model,
+ contents=contents,
+ config=config,
+ **kwargs,
+ ): # pyright: ignore
+ helper.process_response(response)
+ yield response # pyright: ignore
+ except Exception as error:
+ helper.process_error(error)
+ raise
+ finally:
+ helper.finalize_processing()
+
+ return instrumented_generate_content_stream
+
+
+def uninstrument_generate_content(snapshot: object):
+ assert isinstance(snapshot, _MethodsSnapshot)
+ snapshot.restore()
+
+
+def instrument_generate_content(otel_wrapper: OTelWrapper) -> object:
+ snapshot = _MethodsSnapshot()
+ Models.generate_content = _create_instrumented_generate_content(
+ snapshot, otel_wrapper
+ )
+ Models.generate_content_stream = (
+ _create_instrumented_generate_content_stream(snapshot, otel_wrapper)
+ )
+ AsyncModels.generate_content = _create_instrumented_async_generate_content(
+ snapshot, otel_wrapper
+ )
+ AsyncModels.generate_content_stream = (
+ _create_instrumented_async_generate_content_stream(
+ snapshot, otel_wrapper
+ )
+ )
+ return snapshot
diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/instrumentor.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/instrumentor.py
new file mode 100644
index 0000000000..ef57f5891c
--- /dev/null
+++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/instrumentor.py
@@ -0,0 +1,56 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import Any, Collection
+
+from opentelemetry._events import get_event_logger_provider
+from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
+from opentelemetry.metrics import get_meter_provider
+from opentelemetry.trace import get_tracer_provider
+
+from .generate_content import (
+ instrument_generate_content,
+ uninstrument_generate_content,
+)
+from .otel_wrapper import OTelWrapper
+
+
+class GoogleGenAiSdkInstrumentor(BaseInstrumentor):
+ def __init__(self):
+ self._generate_content_snapshot = None
+
+ # Inherited, abstract function from 'BaseInstrumentor'. Even though 'self' is
+ # not used in the definition, a method is required per the API contract.
+ def instrumentation_dependencies(self) -> Collection[str]: # pylint: disable=no-self-use
+ return ["google-genai>=1.0.0,<2"]
+
+ def _instrument(self, **kwargs: Any):
+ tracer_provider = (
+ kwargs.get("tracer_provider") or get_tracer_provider()
+ )
+ event_logger_provider = (
+ kwargs.get("event_logger_provider") or get_event_logger_provider()
+ )
+ meter_provider = kwargs.get("meter_provider") or get_meter_provider()
+ otel_wrapper = OTelWrapper.from_providers(
+ tracer_provider=tracer_provider,
+ event_logger_provider=event_logger_provider,
+ meter_provider=meter_provider,
+ )
+ self._generate_content_snapshot = instrument_generate_content(
+ otel_wrapper
+ )
+
+ def _uninstrument(self, **kwargs: Any):
+ uninstrument_generate_content(self._generate_content_snapshot)
diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/otel_wrapper.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/otel_wrapper.py
new file mode 100644
index 0000000000..b7dbb5de41
--- /dev/null
+++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/otel_wrapper.py
@@ -0,0 +1,92 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+
+import google.genai
+
+from opentelemetry._events import Event
+from opentelemetry.semconv._incubating.metrics import gen_ai_metrics
+from opentelemetry.semconv.schemas import Schemas
+
+from .version import __version__ as _LIBRARY_VERSION
+
+_logger = logging.getLogger(__name__)
+
+_SCOPE_NAME = "opentelemetry.instrumentation.google_genai"
+_PYPI_PACKAGE_NAME = "opentelemetry-instrumentation-google-genai"
+_SCHEMA_URL = Schemas.V1_30_0.value
+_SCOPE_ATTRIBUTES = {
+ "gcp.client.name": "google.genai",
+ "gcp.client.repo": "googleapis/python-genai",
+ "gcp.client.version": google.genai.__version__,
+ "pypi.package.name": _PYPI_PACKAGE_NAME,
+}
+
+
+class OTelWrapper:
+ def __init__(self, tracer, event_logger, meter):
+ self._tracer = tracer
+ self._event_logger = event_logger
+ self._meter = meter
+ self._operation_duration_metric = (
+ gen_ai_metrics.create_gen_ai_client_operation_duration(meter)
+ )
+ self._token_usage_metric = (
+ gen_ai_metrics.create_gen_ai_client_token_usage(meter)
+ )
+
+ @staticmethod
+ def from_providers(tracer_provider, event_logger_provider, meter_provider):
+ return OTelWrapper(
+ tracer_provider.get_tracer(
+ _SCOPE_NAME, _LIBRARY_VERSION, _SCHEMA_URL, _SCOPE_ATTRIBUTES
+ ),
+ event_logger_provider.get_event_logger(
+ _SCOPE_NAME, _LIBRARY_VERSION, _SCHEMA_URL, _SCOPE_ATTRIBUTES
+ ),
+ meter=meter_provider.get_meter(
+ _SCOPE_NAME, _LIBRARY_VERSION, _SCHEMA_URL, _SCOPE_ATTRIBUTES
+ ),
+ )
+
+ def start_as_current_span(self, *args, **kwargs):
+ return self._tracer.start_as_current_span(*args, **kwargs)
+
+ @property
+ def operation_duration_metric(self):
+ return self._operation_duration_metric
+
+ @property
+ def token_usage_metric(self):
+ return self._token_usage_metric
+
+ def log_system_prompt(self, attributes, body):
+ _logger.debug("Recording system prompt.")
+ event_name = "gen_ai.system.message"
+ self._log_event(event_name, attributes, body)
+
+ def log_user_prompt(self, attributes, body):
+ _logger.debug("Recording user prompt.")
+ event_name = "gen_ai.user.message"
+ self._log_event(event_name, attributes, body)
+
+ def log_response_content(self, attributes, body):
+ _logger.debug("Recording response.")
+ event_name = "gen_ai.choice"
+ self._log_event(event_name, attributes, body)
+
+ def _log_event(self, event_name, attributes, body):
+ event = Event(event_name, body=body, attributes=attributes)
+ self._event_logger.emit(event)
diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/version.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/version.py
new file mode 100644
index 0000000000..1d3600364b
--- /dev/null
+++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/version.py
@@ -0,0 +1,20 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# **IMPORTANT**:
+#
+# This version should stay below "1.0" until the fundamentals
+# in "TODOS.md" have been addressed. Please revisit the TODOs
+# listed there before bumping to a stable version.
+__version__ = "0.0.1.dev"
diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/__init__.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/common/base.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/common/base.py
new file mode 100644
index 0000000000..307dafda13
--- /dev/null
+++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/common/base.py
@@ -0,0 +1,81 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import unittest
+
+import google.genai
+
+from .instrumentation_context import InstrumentationContext
+from .otel_mocker import OTelMocker
+from .requests_mocker import RequestsMocker
+
+
+class _FakeCredentials(google.auth.credentials.AnonymousCredentials):
+ def refresh(self, request):
+ pass
+
+
+class TestCase(unittest.TestCase):
+ def setUp(self):
+ self._otel = OTelMocker()
+ self._otel.install()
+ self._requests = RequestsMocker()
+ self._requests.install()
+ self._instrumentation_context = None
+ self._api_key = "test-api-key"
+ self._project = "test-project"
+ self._location = "test-location"
+ self._client = None
+ self._uses_vertex = False
+ self._credentials = _FakeCredentials()
+
+ def _lazy_init(self):
+ self._instrumentation_context = InstrumentationContext()
+ self._instrumentation_context.install()
+
+ @property
+ def client(self):
+ if self._client is None:
+ self._client = self._create_client()
+ return self._client
+
+ @property
+ def requests(self):
+ return self._requests
+
+ @property
+ def otel(self):
+ return self._otel
+
+ def set_use_vertex(self, use_vertex):
+ self._uses_vertex = use_vertex
+
+ def _create_client(self):
+ self._lazy_init()
+ if self._uses_vertex:
+ os.environ["GOOGLE_API_KEY"] = self._api_key
+ return google.genai.Client(
+ vertexai=True,
+ project=self._project,
+ location=self._location,
+ credentials=self._credentials,
+ )
+ return google.genai.Client(api_key=self._api_key)
+
+ def tearDown(self):
+ if self._instrumentation_context is not None:
+ self._instrumentation_context.uninstall()
+ self._requests.uninstall()
+ self._otel.uninstall()
diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/common/instrumentation_context.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/common/instrumentation_context.py
new file mode 100644
index 0000000000..6bd6ddd7aa
--- /dev/null
+++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/common/instrumentation_context.py
@@ -0,0 +1,28 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from opentelemetry.instrumentation.google_genai import (
+ GoogleGenAiSdkInstrumentor,
+)
+
+
+class InstrumentationContext:
+ def __init__(self):
+ self._instrumentor = GoogleGenAiSdkInstrumentor()
+
+ def install(self):
+ self._instrumentor.instrument()
+
+ def uninstall(self):
+ self._instrumentor.uninstrument()
diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/common/otel_mocker.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/common/otel_mocker.py
new file mode 100644
index 0000000000..0cd04d1925
--- /dev/null
+++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/common/otel_mocker.py
@@ -0,0 +1,218 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import opentelemetry._events
+import opentelemetry._logs._internal
+import opentelemetry.metrics._internal
+import opentelemetry.trace
+from opentelemetry._events import (
+ get_event_logger_provider,
+ set_event_logger_provider,
+)
+from opentelemetry._logs import get_logger_provider, set_logger_provider
+from opentelemetry.metrics import get_meter_provider, set_meter_provider
+from opentelemetry.sdk._events import EventLoggerProvider
+from opentelemetry.sdk._logs import LoggerProvider
+from opentelemetry.sdk._logs.export import (
+ InMemoryLogExporter,
+ SimpleLogRecordProcessor,
+)
+from opentelemetry.sdk.metrics import MeterProvider
+from opentelemetry.sdk.metrics._internal.export import InMemoryMetricReader
+from opentelemetry.sdk.trace import TracerProvider
+from opentelemetry.sdk.trace.export import SimpleSpanProcessor
+from opentelemetry.sdk.trace.export.in_memory_span_exporter import (
+ InMemorySpanExporter,
+)
+from opentelemetry.trace import get_tracer_provider, set_tracer_provider
+from opentelemetry.util._once import Once
+
+
+def _bypass_otel_once():
+ opentelemetry.trace._TRACER_PROVIDER_SET_ONCE = Once()
+ opentelemetry._logs._internal._LOGGER_PROVIDER_SET_ONCE = Once()
+ opentelemetry._events._EVENT_LOGGER_PROVIDER_SET_ONCE = Once()
+ opentelemetry.metrics._internal._METER_PROVIDER_SET_ONCE = Once()
+
+
+class OTelProviderSnapshot:
+ def __init__(self):
+ self._tracer_provider = get_tracer_provider()
+ self._logger_provider = get_logger_provider()
+ self._event_logger_provider = get_event_logger_provider()
+ self._meter_provider = get_meter_provider()
+
+ def restore(self):
+ _bypass_otel_once()
+ set_tracer_provider(self._tracer_provider)
+ set_logger_provider(self._logger_provider)
+ set_event_logger_provider(self._event_logger_provider)
+ set_meter_provider(self._meter_provider)
+
+
+class _LogWrapper:
+ def __init__(self, log_data):
+ self._log_data = log_data
+
+ @property
+ def scope(self):
+ return self._log_data.instrumentation_scope
+
+ @property
+ def resource(self):
+ return self._log_data.log_record.resource
+
+ @property
+ def attributes(self):
+ return self._log_data.log_record.attributes
+
+ @property
+ def body(self):
+ return self._log_data.log_record.body
+
+ def __str__(self):
+ return self._log_data.log_record.to_json()
+
+
+class _MetricDataPointWrapper:
+ def __init__(self, resource, scope, metric):
+ self._resource = resource
+ self._scope = scope
+ self._metric = metric
+
+ @property
+ def resource(self):
+ return self._resource
+
+ @property
+ def scope(self):
+ return self._scope
+
+ @property
+ def metric(self):
+ return self._metric
+
+ @property
+ def name(self):
+ return self._metric.name
+
+ @property
+ def data(self):
+ return self._metric.data
+
+
+class OTelMocker:
+ def __init__(self):
+ self._snapshot = None
+ self._logs = InMemoryLogExporter()
+ self._traces = InMemorySpanExporter()
+ self._metrics = InMemoryMetricReader()
+ self._spans = []
+ self._finished_logs = []
+ self._metrics_data = []
+
+ def install(self):
+ self._snapshot = OTelProviderSnapshot()
+ _bypass_otel_once()
+ self._install_logs()
+ self._install_metrics()
+ self._install_traces()
+
+ def uninstall(self):
+ self._snapshot.restore()
+
+ def get_finished_logs(self):
+ for log_data in self._logs.get_finished_logs():
+ self._finished_logs.append(_LogWrapper(log_data))
+ return self._finished_logs
+
+ def get_finished_spans(self):
+ for span in self._traces.get_finished_spans():
+ self._spans.append(span)
+ return self._spans
+
+ def get_metrics_data(self):
+ data = self._metrics.get_metrics_data()
+ if data is not None:
+ for resource_metric in data.resource_metrics:
+ resource = resource_metric.resource
+ for scope_metrics in resource_metric.scope_metrics:
+ scope = scope_metrics.scope
+ for metric in scope_metrics.metrics:
+ wrapper = _MetricDataPointWrapper(
+ resource, scope, metric
+ )
+ self._metrics_data.append(wrapper)
+ return self._metrics_data
+
+ def get_span_named(self, name):
+ for span in self.get_finished_spans():
+ if span.name == name:
+ return span
+ return None
+
+ def assert_has_span_named(self, name):
+ span = self.get_span_named(name)
+ finished_spans = [span.name for span in self.get_finished_spans()]
+ assert (
+ span is not None
+ ), f'Could not find span named "{name}"; finished spans: {finished_spans}'
+
+ def get_event_named(self, event_name):
+ for event in self.get_finished_logs():
+ event_name_attr = event.attributes.get("event.name")
+ if event_name_attr is None:
+ continue
+ if event_name_attr == event_name:
+ return event
+ return None
+
+ def assert_has_event_named(self, name):
+ event = self.get_event_named(name)
+ finished_logs = self.get_finished_logs()
+ assert (
+ event is not None
+ ), f'Could not find event named "{name}"; finished logs: {finished_logs}'
+
+ def assert_does_not_have_event_named(self, name):
+ event = self.get_event_named(name)
+ assert event is None, f"Unexpected event: {event}"
+
+ def get_metrics_data_named(self, name):
+ results = []
+ for entry in self.get_metrics_data():
+ if entry.name == name:
+ results.append(entry)
+ return results
+
+ def assert_has_metrics_data_named(self, name):
+ data = self.get_metrics_data_named(name)
+ assert len(data) > 0
+
+ def _install_logs(self):
+ provider = LoggerProvider()
+ provider.add_log_record_processor(SimpleLogRecordProcessor(self._logs))
+ set_logger_provider(provider)
+ event_provider = EventLoggerProvider(logger_provider=provider)
+ set_event_logger_provider(event_provider)
+
+ def _install_metrics(self):
+ provider = MeterProvider(metric_readers=[self._metrics])
+ set_meter_provider(provider)
+
+ def _install_traces(self):
+ provider = TracerProvider()
+ provider.add_span_processor(SimpleSpanProcessor(self._traces))
+ set_tracer_provider(provider)
diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/common/requests_mocker.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/common/requests_mocker.py
new file mode 100644
index 0000000000..c39c9ebd38
--- /dev/null
+++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/common/requests_mocker.py
@@ -0,0 +1,176 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file defines a "RequestMocker" that facilities mocking the "requests"
+# API. There are a few reasons that we use this approach to testing:
+#
+# 1. Security - although "vcrpy" provides a means of filtering data,
+# it can be error-prone; use of this solution risks exposing API keys,
+# auth tokens, etc. It can also inadvertently record fields that are
+# visibility-restricted (such as fields that are returned and available
+# when recording using privileged API keys where such fields would not
+# ordinarily be returned to users with non-privileged API keys).
+#
+# 2. Reproducibility - although the tests may be reproducible once the
+# recording is present, updating the recording often has external
+# dependencies that may be difficult to reproduce.
+#
+# 3. Costs - there are both time costs and monetary costs to the external
+# dependencies required for a record/replay solution.
+#
+# Because they APIs that need to be mocked are simple enough and well documented
+# enough, it seems approachable to mock the requests library, instead.
+
+import copy
+import functools
+import http.client
+import io
+import json
+
+import requests
+import requests.sessions
+
+
+class RequestsCallArgs:
+ def __init__(
+ self,
+ session: requests.sessions.Session,
+ request: requests.PreparedRequest,
+ **kwargs,
+ ):
+ self._session = session
+ self._request = request
+ self._kwargs = kwargs
+
+ @property
+ def session(self):
+ return self._session
+
+ @property
+ def request(self):
+ return self._request
+
+ @property
+ def kwargs(self):
+ return self._kwargs
+
+
+class RequestsCall:
+ def __init__(self, args: RequestsCallArgs, response_generator):
+ self._args = args
+ self._response_generator = response_generator
+
+ @property
+ def args(self):
+ return self._args
+
+ @property
+ def response(self):
+ return self._response_generator(self._args)
+
+
+def _return_error_status(
+ args: RequestsCallArgs, status_code: int, reason: str = None
+):
+ result = requests.Response()
+ result.url = args.request.url
+ result.status_code = status_code
+ result.reason = reason or http.client.responses.get(status_code)
+ result.request = args.request
+ return result
+
+
+def _return_404(args: RequestsCallArgs):
+ return _return_error_status(args, 404, "Not Found")
+
+
+def _to_response_generator(response):
+ if response is None:
+ raise ValueError("response must not be None")
+ if isinstance(response, int):
+ return lambda args: _return_error_status(args, response)
+ if isinstance(response, requests.Response):
+
+ def generate_response_from_response(args):
+ new_response = copy.deepcopy(response)
+ new_response.request = args.request
+ new_response.url = args.request.url
+ return new_response
+
+ return generate_response_from_response
+ if isinstance(response, dict):
+
+ def generate_response_from_dict(args):
+ result = requests.Response()
+ result.status_code = 200
+ result.headers["content-type"] = "application/json"
+ result.encoding = "utf-8"
+ result.raw = io.BytesIO(json.dumps(response).encode())
+ return result
+
+ return generate_response_from_dict
+ raise ValueError(f"Unsupported response type: {type(response)}")
+
+
+class RequestsMocker:
+ def __init__(self):
+ self._original_send = requests.sessions.Session.send
+ self._calls = []
+ self._handlers = []
+
+ def install(self):
+ @functools.wraps(requests.sessions.Session.send)
+ def replacement_send(
+ s: requests.sessions.Session,
+ request: requests.PreparedRequest,
+ **kwargs,
+ ):
+ return self._do_send(s, request, **kwargs)
+
+ requests.sessions.Session.send = replacement_send
+
+ def uninstall(self):
+ requests.sessions.Session.send = self._original_send
+
+ def reset(self):
+ self._calls = []
+ self._handlers = []
+
+ def add_response(self, response, if_matches=None):
+ self._handlers.append((if_matches, _to_response_generator(response)))
+
+ @property
+ def calls(self):
+ return self._calls
+
+ def _do_send(
+ self,
+ session: requests.sessions.Session,
+ request: requests.PreparedRequest,
+ **kwargs,
+ ):
+ args = RequestsCallArgs(session, request, **kwargs)
+ response_generator = self._lookup_response_generator(args)
+ call = RequestsCall(args, response_generator)
+ result = call.response
+ self._calls.append(call)
+ return result
+
+ def _lookup_response_generator(self, args: RequestsCallArgs):
+ for matcher, response_generator in self._handlers:
+ if matcher is None:
+ return response_generator
+ if matcher(args):
+ return response_generator
+ return _return_404
diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/__init__.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/nonstreaming_base.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/nonstreaming_base.py
new file mode 100644
index 0000000000..ce0fbc243d
--- /dev/null
+++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/nonstreaming_base.py
@@ -0,0 +1,233 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json
+import os
+import unittest
+
+from ..common.base import TestCase
+
+
+def create_valid_response(
+ response_text="The model response", input_tokens=10, output_tokens=20
+):
+ return {
+ "modelVersion": "gemini-2.0-flash-test123",
+ "usageMetadata": {
+ "promptTokenCount": input_tokens,
+ "candidatesTokenCount": output_tokens,
+ "totalTokenCount": input_tokens + output_tokens,
+ },
+ "candidates": [
+ {
+ "content": {
+ "role": "model",
+ "parts": [
+ {
+ "text": response_text,
+ }
+ ],
+ }
+ }
+ ],
+ }
+
+
+class NonStreamingTestCase(TestCase):
+ # The "setUp" function is defined by "unittest.TestCase" and thus
+ # this name must be used. Uncertain why pylint doesn't seem to
+ # recognize that this is a unit test class for which this is inherited.
+ def setUp(self): # pylint: disable=invalid-name
+ super().setUp()
+ if self.__class__ == NonStreamingTestCase:
+ raise unittest.SkipTest("Skipping testcase base.")
+
+ def generate_content(self, *args, **kwargs):
+ raise NotImplementedError("Must implement 'generate_content'.")
+
+ def expected_function_name(self):
+ raise NotImplementedError("Must implement 'expected_function_name'.")
+
+ def configure_valid_response(
+ self,
+ response_text="The model_response",
+ input_tokens=10,
+ output_tokens=20,
+ ):
+ self.requests.add_response(
+ create_valid_response(
+ response_text=response_text,
+ input_tokens=input_tokens,
+ output_tokens=output_tokens,
+ )
+ )
+
+ def test_instrumentation_does_not_break_core_functionality(self):
+ self.configure_valid_response(response_text="Yep, it works!")
+ response = self.generate_content(
+ model="gemini-2.0-flash", contents="Does this work?"
+ )
+ self.assertEqual(response.text, "Yep, it works!")
+
+ def test_generates_span(self):
+ self.configure_valid_response(response_text="Yep, it works!")
+ response = self.generate_content(
+ model="gemini-2.0-flash", contents="Does this work?"
+ )
+ self.assertEqual(response.text, "Yep, it works!")
+ self.otel.assert_has_span_named("generate_content gemini-2.0-flash")
+
+ def test_model_reflected_into_span_name(self):
+ self.configure_valid_response(response_text="Yep, it works!")
+ response = self.generate_content(
+ model="gemini-1.5-flash", contents="Does this work?"
+ )
+ self.assertEqual(response.text, "Yep, it works!")
+ self.otel.assert_has_span_named("generate_content gemini-1.5-flash")
+
+ def test_generated_span_has_minimal_genai_attributes(self):
+ self.configure_valid_response(response_text="Yep, it works!")
+ self.generate_content(
+ model="gemini-2.0-flash", contents="Does this work?"
+ )
+ self.otel.assert_has_span_named("generate_content gemini-2.0-flash")
+ span = self.otel.get_span_named("generate_content gemini-2.0-flash")
+ self.assertEqual(span.attributes["gen_ai.system"], "gemini")
+ self.assertEqual(
+ span.attributes["gen_ai.operation.name"], "generate_content"
+ )
+
+ def test_generated_span_has_correct_function_name(self):
+ self.configure_valid_response(response_text="Yep, it works!")
+ self.generate_content(
+ model="gemini-2.0-flash", contents="Does this work?"
+ )
+ self.otel.assert_has_span_named("generate_content gemini-2.0-flash")
+ span = self.otel.get_span_named("generate_content gemini-2.0-flash")
+ self.assertEqual(
+ span.attributes["code.function.name"], self.expected_function_name
+ )
+
+ def test_generated_span_has_vertex_ai_system_when_configured(self):
+ self.set_use_vertex(True)
+ self.configure_valid_response(response_text="Yep, it works!")
+ self.generate_content(
+ model="gemini-2.0-flash", contents="Does this work?"
+ )
+ self.otel.assert_has_span_named("generate_content gemini-2.0-flash")
+ span = self.otel.get_span_named("generate_content gemini-2.0-flash")
+ self.assertEqual(span.attributes["gen_ai.system"], "vertex_ai")
+ self.assertEqual(
+ span.attributes["gen_ai.operation.name"], "generate_content"
+ )
+
+ def test_generated_span_counts_tokens(self):
+ self.configure_valid_response(input_tokens=123, output_tokens=456)
+ self.generate_content(model="gemini-2.0-flash", contents="Some input")
+ self.otel.assert_has_span_named("generate_content gemini-2.0-flash")
+ span = self.otel.get_span_named("generate_content gemini-2.0-flash")
+ self.assertEqual(span.attributes["gen_ai.usage.input_tokens"], 123)
+ self.assertEqual(span.attributes["gen_ai.usage.output_tokens"], 456)
+
+ def test_records_system_prompt_as_log(self):
+ os.environ["OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT"] = (
+ "true"
+ )
+ config = {"system_instruction": "foo"}
+ self.configure_valid_response()
+ self.generate_content(
+ model="gemini-2.0-flash", contents="Some input", config=config
+ )
+ self.otel.assert_has_event_named("gen_ai.system.message")
+ event_record = self.otel.get_event_named("gen_ai.system.message")
+ self.assertEqual(event_record.attributes["gen_ai.system"], "gemini")
+ self.assertEqual(event_record.body["content"], "foo")
+
+ def test_does_not_record_system_prompt_as_log_if_disabled_by_env(self):
+ os.environ["OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT"] = (
+ "false"
+ )
+ config = {"system_instruction": "foo"}
+ self.configure_valid_response()
+ self.generate_content(
+ model="gemini-2.0-flash", contents="Some input", config=config
+ )
+ self.otel.assert_has_event_named("gen_ai.system.message")
+ event_record = self.otel.get_event_named("gen_ai.system.message")
+ self.assertEqual(event_record.attributes["gen_ai.system"], "gemini")
+ self.assertEqual(event_record.body["content"], "")
+
+ def test_does_not_record_system_prompt_as_log_if_no_system_prompt_present(
+ self,
+ ):
+ os.environ["OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT"] = (
+ "true"
+ )
+ self.configure_valid_response()
+ self.generate_content(model="gemini-2.0-flash", contents="Some input")
+ self.otel.assert_does_not_have_event_named("gen_ai.system.message")
+
+ def test_records_user_prompt_as_log(self):
+ os.environ["OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT"] = (
+ "true"
+ )
+ self.configure_valid_response()
+ self.generate_content(model="gemini-2.0-flash", contents="Some input")
+ self.otel.assert_has_event_named("gen_ai.user.message")
+ event_record = self.otel.get_event_named("gen_ai.user.message")
+ self.assertEqual(event_record.attributes["gen_ai.system"], "gemini")
+ self.assertEqual(event_record.body["content"], "Some input")
+
+ def test_does_not_record_user_prompt_as_log_if_disabled_by_env(self):
+ os.environ["OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT"] = (
+ "false"
+ )
+ self.configure_valid_response()
+ self.generate_content(model="gemini-2.0-flash", contents="Some input")
+ self.otel.assert_has_event_named("gen_ai.user.message")
+ event_record = self.otel.get_event_named("gen_ai.user.message")
+ self.assertEqual(event_record.attributes["gen_ai.system"], "gemini")
+ self.assertEqual(event_record.body["content"], "")
+
+ def test_records_response_as_log(self):
+ os.environ["OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT"] = (
+ "true"
+ )
+ self.configure_valid_response(response_text="Some response content")
+ self.generate_content(model="gemini-2.0-flash", contents="Some input")
+ self.otel.assert_has_event_named("gen_ai.choice")
+ event_record = self.otel.get_event_named("gen_ai.choice")
+ self.assertEqual(event_record.attributes["gen_ai.system"], "gemini")
+ self.assertIn(
+ "Some response content", json.dumps(event_record.body["content"])
+ )
+
+ def test_does_not_record_response_as_log_if_disabled_by_env(self):
+ os.environ["OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT"] = (
+ "false"
+ )
+ self.configure_valid_response(response_text="Some response content")
+ self.generate_content(model="gemini-2.0-flash", contents="Some input")
+ self.otel.assert_has_event_named("gen_ai.choice")
+ event_record = self.otel.get_event_named("gen_ai.choice")
+ self.assertEqual(event_record.attributes["gen_ai.system"], "gemini")
+ self.assertEqual(event_record.body["content"], "")
+
+ def test_records_metrics_data(self):
+ self.configure_valid_response()
+ self.generate_content(model="gemini-2.0-flash", contents="Some input")
+ self.otel.assert_has_metrics_data_named("gen_ai.client.token.usage")
+ self.otel.assert_has_metrics_data_named(
+ "gen_ai.client.operation.duration"
+ )
diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_async_nonstreaming.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_async_nonstreaming.py
new file mode 100644
index 0000000000..8a04638eb9
--- /dev/null
+++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_async_nonstreaming.py
@@ -0,0 +1,76 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# TODO: Once the async non-streaming case has been fully implemented,
+# reimplement this in terms of "nonstreaming_base.py".
+
+import asyncio
+
+from ..common.base import TestCase
+
+
+def create_valid_response(
+ response_text="The model response", input_tokens=10, output_tokens=20
+):
+ return {
+ "modelVersion": "gemini-2.0-flash-test123",
+ "usageMetadata": {
+ "promptTokenCount": input_tokens,
+ "candidatesTokenCount": output_tokens,
+ "totalTokenCount": input_tokens + output_tokens,
+ },
+ "candidates": [
+ {
+ "content": {
+ "role": "model",
+ "parts": [
+ {
+ "text": response_text,
+ }
+ ],
+ }
+ }
+ ],
+ }
+
+
+# Temporary test fixture just to ensure that the in-progress work to
+# implement this case doesn't break the original code.
+class TestGenerateContentAsyncNonstreaming(TestCase):
+ def configure_valid_response(
+ self,
+ response_text="The model_response",
+ input_tokens=10,
+ output_tokens=20,
+ ):
+ self.requests.add_response(
+ create_valid_response(
+ response_text=response_text,
+ input_tokens=input_tokens,
+ output_tokens=output_tokens,
+ )
+ )
+
+ def generate_content(self, *args, **kwargs):
+ return asyncio.run(
+ self.client.aio.models.generate_content(*args, **kwargs) # pylint: disable=missing-kwoa
+ )
+
+ def test_async_generate_content_not_broken_by_instrumentation(self):
+ self.configure_valid_response(response_text="Yep, it works!")
+ response = self.generate_content(
+ model="gemini-2.0-flash", contents="Does this work?"
+ )
+ self.assertEqual(response.text, "Yep, it works!")
diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_async_streaming.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_async_streaming.py
new file mode 100644
index 0000000000..2059ea2f40
--- /dev/null
+++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_async_streaming.py
@@ -0,0 +1,85 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# TODO: once the async streaming case has been implemented, we should have
+# two different tests here that inherit from "streaming_base" and "nonstreaming_base",
+# covering the cases of one response and multiple streaming responses.
+
+import asyncio
+
+from ..common.base import TestCase
+
+
+def create_valid_response(
+ response_text="The model response", input_tokens=10, output_tokens=20
+):
+ return {
+ "modelVersion": "gemini-2.0-flash-test123",
+ "usageMetadata": {
+ "promptTokenCount": input_tokens,
+ "candidatesTokenCount": output_tokens,
+ "totalTokenCount": input_tokens + output_tokens,
+ },
+ "candidates": [
+ {
+ "content": {
+ "role": "model",
+ "parts": [
+ {
+ "text": response_text,
+ }
+ ],
+ }
+ }
+ ],
+ }
+
+
+# Temporary test fixture just to ensure that the in-progress work to
+# implement this case doesn't break the original code.
+class TestGenerateContentAsyncStreaming(TestCase):
+ def configure_valid_response(
+ self,
+ response_text="The model_response",
+ input_tokens=10,
+ output_tokens=20,
+ ):
+ self.requests.add_response(
+ create_valid_response(
+ response_text=response_text,
+ input_tokens=input_tokens,
+ output_tokens=output_tokens,
+ )
+ )
+
+ async def _generate_content_helper(self, *args, **kwargs):
+ result = []
+ async for (
+ response
+ ) in await self.client.aio.models.generate_content_stream( # pylint: disable=missing-kwoa
+ *args, **kwargs
+ ):
+ result.append(response)
+ return result
+
+ def generate_content(self, *args, **kwargs):
+ return asyncio.run(self._generate_content_helper(*args, **kwargs))
+
+ def test_async_generate_content_not_broken_by_instrumentation(self):
+ self.configure_valid_response(response_text="Yep, it works!")
+ responses = self.generate_content(
+ model="gemini-2.0-flash", contents="Does this work?"
+ )
+ self.assertEqual(len(responses), 1)
+ self.assertEqual(responses[0].text, "Yep, it works!")
diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_sync_nonstreaming.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_sync_nonstreaming.py
new file mode 100644
index 0000000000..0243083695
--- /dev/null
+++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_sync_nonstreaming.py
@@ -0,0 +1,25 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from .nonstreaming_base import NonStreamingTestCase
+
+
+class TestGenerateContentSyncNonstreaming(NonStreamingTestCase):
+ def generate_content(self, *args, **kwargs):
+ return self.client.models.generate_content(*args, **kwargs)
+
+ @property
+ def expected_function_name(self):
+ return "google.genai.Models.generate_content"
diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_sync_streaming.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_sync_streaming.py
new file mode 100644
index 0000000000..b27e9666a8
--- /dev/null
+++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_sync_streaming.py
@@ -0,0 +1,79 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# TODO: once the async streaming case has been implemented, we should have
+# two different tests here that inherit from "streaming_base" and "nonstreaming_base",
+# covering the cases of one response and multiple streaming responses.
+
+
+from ..common.base import TestCase
+
+
+def create_valid_response(
+ response_text="The model response", input_tokens=10, output_tokens=20
+):
+ return {
+ "modelVersion": "gemini-2.0-flash-test123",
+ "usageMetadata": {
+ "promptTokenCount": input_tokens,
+ "candidatesTokenCount": output_tokens,
+ "totalTokenCount": input_tokens + output_tokens,
+ },
+ "candidates": [
+ {
+ "content": {
+ "role": "model",
+ "parts": [
+ {
+ "text": response_text,
+ }
+ ],
+ }
+ }
+ ],
+ }
+
+
+# Temporary test fixture just to ensure that the in-progress work to
+# implement this case doesn't break the original code.
+class TestGenerateContentSyncStreaming(TestCase):
+ def configure_valid_response(
+ self,
+ response_text="The model_response",
+ input_tokens=10,
+ output_tokens=20,
+ ):
+ self.requests.add_response(
+ create_valid_response(
+ response_text=response_text,
+ input_tokens=input_tokens,
+ output_tokens=output_tokens,
+ )
+ )
+
+ def generate_content(self, *args, **kwargs):
+ result = []
+ for response in self.client.models.generate_content_stream( # pylint: disable=missing-kwoa
+ *args, **kwargs
+ ):
+ result.append(response)
+ return result
+
+ def test_async_generate_content_not_broken_by_instrumentation(self):
+ self.configure_valid_response(response_text="Yep, it works!")
+ responses = self.generate_content(
+ model="gemini-2.0-flash", contents="Does this work?"
+ )
+ self.assertEqual(len(responses), 1)
+ self.assertEqual(responses[0].text, "Yep, it works!")
diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/requirements.latest.txt b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/requirements.latest.txt
new file mode 100644
index 0000000000..32cf3422f5
--- /dev/null
+++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/requirements.latest.txt
@@ -0,0 +1,49 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# ********************************
+# WARNING: NOT HERMETIC !!!!!!!!!!
+# ********************************
+#
+# This "requirements.txt" is installed in conjunction
+# with multiple other dependencies in the top-level "tox.ini"
+# file. In particular, please see:
+#
+# google-genai-recent-deps: {[testenv]test_deps}
+# google-genai-recent-deps: -r {toxinidir}/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/requirements.latest.txt
+#
+# This provides additional dependencies, namely:
+#
+# opentelemetry-api
+# opentelemetry-sdk
+# opentelemetry-semantic-conventions
+#
+# ... with a "dev" version based on the latest distribution.
+
+
+# This variant of the requirements aims to test the system using
+# the newest supported version of external dependencies.
+
+pytest==7.4.4
+pytest-asyncio==0.21.0
+pytest-vcr==1.0.2
+
+google-auth==2.38.0
+google-genai==1.0.0
+
+# Install locally from the folder. This path is relative to the
+# root directory, given invocation from "tox" at root level.
+-e opentelemetry-instrumentation
+-e instrumentation-genai/opentelemetry-instrumentation-google-genai
\ No newline at end of file
diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/requirements.oldest.txt b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/requirements.oldest.txt
new file mode 100644
index 0000000000..f04e668799
--- /dev/null
+++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/requirements.oldest.txt
@@ -0,0 +1,31 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This variant of the requirements aims to test the system using
+# the oldest supported version of external dependencies.
+
+pytest==7.4.4
+pytest-asyncio==0.21.0
+pytest-vcr==1.0.2
+
+google-auth==2.15.0
+google-genai==1.0.0
+opentelemetry-api==1.30.0
+opentelemetry-sdk==1.30.0
+opentelemetry-semantic-conventions==0.51b0
+opentelemetry-instrumentation==0.51b0
+
+# Install locally from the folder. This path is relative to the
+# root directory, given invocation from "tox" at root level.
+-e instrumentation-genai/opentelemetry-instrumentation-google-genai
diff --git a/pyproject.toml b/pyproject.toml
index 5a45c62949..89f415d77c 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -144,6 +144,10 @@ members = [
"propagator/*",
"util/opentelemetry-util-http",
]
+# TODO: remove after https://github.com/open-telemetry/opentelemetry-python-contrib/issues/3300
+exclude = [
+ "instrumentation-genai/opentelemetry-instrumentation-google-genai",
+]
[tool.ruff]
# https://docs.astral.sh/ruff/configuration/
diff --git a/scripts/generate_instrumentation_bootstrap.py b/scripts/generate_instrumentation_bootstrap.py
index 435f651f72..71469b1fb2 100755
--- a/scripts/generate_instrumentation_bootstrap.py
+++ b/scripts/generate_instrumentation_bootstrap.py
@@ -53,13 +53,17 @@
"bootstrap_gen.py",
)
-# AWS Lambda instrumentation is excluded from the default list because it often
-# requires specific configurations and dependencies that may not be set up
-# in all environments. Instead, users who need AWS Lambda support can opt-in
-# by manually adding it to their environment.
-# See https://github.com/open-telemetry/opentelemetry-python-contrib/issues/2787
packages_to_exclude = [
+ # AWS Lambda instrumentation is excluded from the default list because it often
+ # requires specific configurations and dependencies that may not be set up
+ # in all environments. Instead, users who need AWS Lambda support can opt-in
+ # by manually adding it to their environment.
+ # See https://github.com/open-telemetry/opentelemetry-python-contrib/issues/2787
"opentelemetry-instrumentation-aws-lambda",
+ # Google GenAI instrumentation is currently excluded because it is still in early
+ # development. This filter will get removed once it is further along in its
+ # development lifecycle and ready to be included by default.
+ "opentelemetry-instrumentation-google-genai",
"opentelemetry-instrumentation-vertexai", # not released yet
]
@@ -67,6 +71,7 @@
unversioned_packages = [
"opentelemetry-instrumentation-openai-v2",
"opentelemetry-instrumentation-vertexai",
+ "opentelemetry-instrumentation-google-genai",
]
diff --git a/tox.ini b/tox.ini
index c55482e780..6f74535ae4 100644
--- a/tox.ini
+++ b/tox.ini
@@ -19,6 +19,12 @@ envlist =
# pypy3-test-instrumentation-vertexai-{oldest,latest}
lint-instrumentation-vertexai
+ ; instrumentation-google-genai
+ py3{9,10,11,12,13}-test-instrumentation-google-genai-{oldest,latest}
+ # Disabling pypy3 as shapely does not have wheels and fails to compile
+ # pypy3-test-instrumentation-google-genai-{0,1}
+ lint-instrumentation-google-genai
+
; opentelemetry-resource-detector-container
py3{8,9,10,11,12,13}-test-resource-detector-container
pypy3-test-resource-detector-container
@@ -446,6 +452,11 @@ deps =
vertexai-latest: -r {toxinidir}/instrumentation-genai/opentelemetry-instrumentation-vertexai/tests/requirements.latest.txt
lint-instrumentation-vertexai: -r {toxinidir}/instrumentation-genai/opentelemetry-instrumentation-vertexai/tests/requirements.oldest.txt
+ google-genai-oldest: -r {toxinidir}/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/requirements.oldest.txt
+ google-genai-latest: {[testenv]test_deps}
+ google-genai-latest: -r {toxinidir}/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/requirements.latest.txt
+ lint-instrumentation-google-genai: -r {toxinidir}/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/requirements.oldest.txt
+
asgi: {[testenv]test_deps}
asgi: -r {toxinidir}/instrumentation/opentelemetry-instrumentation-asgi/test-requirements.txt
@@ -810,6 +821,9 @@ commands =
test-instrumentation-vertexai: pytest {toxinidir}/instrumentation-genai/opentelemetry-instrumentation-vertexai/tests --vcr-record=none {posargs}
lint-instrumentation-vertexai: sh -c "cd instrumentation-genai && pylint --rcfile ../.pylintrc opentelemetry-instrumentation-vertexai"
+ test-instrumentation-google-genai: pytest {toxinidir}/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests --vcr-record=none {posargs}
+ lint-instrumentation-google-genai: sh -c "cd instrumentation-genai && pylint --rcfile ../.pylintrc opentelemetry-instrumentation-google-genai"
+
test-instrumentation-sio-pika: pytest {toxinidir}/instrumentation/opentelemetry-instrumentation-pika/tests {posargs}
lint-instrumentation-sio-pika: sh -c "cd instrumentation && pylint --rcfile ../.pylintrc opentelemetry-instrumentation-pika"
@@ -1020,5 +1034,6 @@ deps =
{toxinidir}/opentelemetry-instrumentation
{toxinidir}/util/opentelemetry-util-http
{toxinidir}/instrumentation-genai/opentelemetry-instrumentation-vertexai[instruments]
+ {toxinidir}/instrumentation-genai/opentelemetry-instrumentation-google-genai[instruments]
commands =
pyright