diff --git a/.github/workflows/test-integrations-ai.yml b/.github/workflows/test-integrations-ai.yml index fcbb464078..65cc636cd9 100644 --- a/.github/workflows/test-integrations-ai.yml +++ b/.github/workflows/test-integrations-ai.yml @@ -82,6 +82,10 @@ jobs: run: | set -x # print commands that are executed ./scripts/runtox.sh "py${{ matrix.python-version }}-langgraph" + - name: Test google_genai + run: | + set -x # print commands that are executed + ./scripts/runtox.sh "py${{ matrix.python-version }}-google_genai" - name: Test openai_agents run: | set -x # print commands that are executed diff --git a/pyproject.toml b/pyproject.toml index 5b86531014..4441660c50 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -118,6 +118,10 @@ ignore_missing_imports = true module = "langgraph.*" ignore_missing_imports = true +[[tool.mypy.overrides]] +module = "google.genai.*" +ignore_missing_imports = true + [[tool.mypy.overrides]] module = "executing.*" ignore_missing_imports = true diff --git a/scripts/populate_tox/config.py b/scripts/populate_tox/config.py index f6b90e75e6..85988ac3cf 100644 --- a/scripts/populate_tox/config.py +++ b/scripts/populate_tox/config.py @@ -142,6 +142,13 @@ "package": "gql[all]", "num_versions": 2, }, + "google_genai": { + "package": "google-genai", + "deps": { + "*": ["pytest-asyncio"], + }, + "python": ">=3.9", + }, "graphene": { "package": "graphene", "deps": { diff --git a/scripts/populate_tox/releases.jsonl b/scripts/populate_tox/releases.jsonl index 9f937e5e77..5ee83b65bc 100644 --- a/scripts/populate_tox/releases.jsonl +++ b/scripts/populate_tox/releases.jsonl @@ -46,7 +46,7 @@ {"info": {"classifiers": ["Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", "Natural Language :: English", "Programming Language :: Python", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7"], "name": "boto3", "requires_python": "", "version": "1.12.49", "yanked": false}} {"info": {"classifiers": ["Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", "Natural Language :: English", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9"], "name": "boto3", "requires_python": ">= 3.6", "version": "1.20.54", "yanked": false}} {"info": {"classifiers": ["Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", "Natural Language :: English", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9"], "name": "boto3", "requires_python": ">= 3.7", "version": "1.28.85", "yanked": false}} -{"info": {"classifiers": ["Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", "Natural Language :: English", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: 3.14", "Programming Language :: Python :: 3.9"], "name": "boto3", "requires_python": ">=3.9", "version": "1.40.46", "yanked": false}} +{"info": {"classifiers": ["Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", "Natural Language :: English", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: 3.14", "Programming Language :: Python :: 3.9"], "name": "boto3", "requires_python": ">=3.9", "version": "1.40.48", "yanked": false}} {"info": {"classifiers": ["Development Status :: 4 - Beta", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Programming Language :: Python :: 2.5", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.2", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Topic :: Internet :: WWW/HTTP :: Dynamic Content :: CGI Tools/Libraries", "Topic :: Internet :: WWW/HTTP :: HTTP Servers", "Topic :: Internet :: WWW/HTTP :: WSGI", "Topic :: Internet :: WWW/HTTP :: WSGI :: Application", "Topic :: Internet :: WWW/HTTP :: WSGI :: Middleware", "Topic :: Internet :: WWW/HTTP :: WSGI :: Server", "Topic :: Software Development :: Libraries :: Application Frameworks"], "name": "bottle", "requires_python": "", "version": "0.12.25", "yanked": false}} {"info": {"classifiers": ["Development Status :: 4 - Beta", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Topic :: Internet :: WWW/HTTP :: Dynamic Content :: CGI Tools/Libraries", "Topic :: Internet :: WWW/HTTP :: HTTP Servers", "Topic :: Internet :: WWW/HTTP :: WSGI", "Topic :: Internet :: WWW/HTTP :: WSGI :: Application", "Topic :: Internet :: WWW/HTTP :: WSGI :: Middleware", "Topic :: Internet :: WWW/HTTP :: WSGI :: Server", "Topic :: Software Development :: Libraries :: Application Frameworks"], "name": "bottle", "requires_python": null, "version": "0.13.4", "yanked": false}} {"info": {"classifiers": ["Development Status :: 5 - Production/Stable", "License :: OSI Approved :: BSD License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", "Topic :: Software Development :: Object Brokering", "Topic :: System :: Distributed Computing"], "name": "celery", "requires_python": ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*", "version": "4.4.7", "yanked": false}} @@ -66,9 +66,13 @@ {"info": {"classifiers": ["Development Status :: 5 - Production/Stable", "Environment :: Web Environment", "Intended Audience :: Developers", "Intended Audience :: System Administrators", "License :: OSI Approved :: Apache Software License", "Natural Language :: English", "Operating System :: MacOS :: MacOS X", "Operating System :: Microsoft :: Windows", "Operating System :: POSIX", "Programming Language :: Cython", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", "Topic :: Internet :: WWW/HTTP :: WSGI", "Topic :: Software Development :: Libraries :: Application Frameworks"], "name": "falcon", "requires_python": ">=3.5", "version": "3.1.3", "yanked": false}} {"info": {"classifiers": ["Development Status :: 5 - Production/Stable", "Environment :: Web Environment", "Intended Audience :: Developers", "Intended Audience :: System Administrators", "License :: OSI Approved :: Apache Software License", "Natural Language :: English", "Operating System :: MacOS :: MacOS X", "Operating System :: Microsoft :: Windows", "Operating System :: POSIX", "Programming Language :: Cython", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: 3.14", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", "Topic :: Internet :: WWW/HTTP :: WSGI", "Topic :: Software Development :: Libraries :: Application Frameworks", "Typing :: Typed"], "name": "falcon", "requires_python": ">=3.8", "version": "4.1.0", "yanked": false}} {"info": {"classifiers": ["Development Status :: 4 - Beta", "Environment :: Web Environment", "Framework :: AsyncIO", "Framework :: FastAPI", "Framework :: Pydantic", "Framework :: Pydantic :: 1", "Intended Audience :: Developers", "Intended Audience :: Information Technology", "Intended Audience :: System Administrators", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Topic :: Internet", "Topic :: Internet :: WWW/HTTP", "Topic :: Internet :: WWW/HTTP :: HTTP Servers", "Topic :: Software Development", "Topic :: Software Development :: Libraries", "Topic :: Software Development :: Libraries :: Application Frameworks", "Topic :: Software Development :: Libraries :: Python Modules", "Typing :: Typed"], "name": "fastapi", "requires_python": ">=3.8", "version": "0.105.0", "yanked": false}} -{"info": {"classifiers": ["Development Status :: 4 - Beta", "Environment :: Web Environment", "Framework :: AsyncIO", "Framework :: FastAPI", "Framework :: Pydantic", "Framework :: Pydantic :: 1", "Framework :: Pydantic :: 2", "Intended Audience :: Developers", "Intended Audience :: Information Technology", "Intended Audience :: System Administrators", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Topic :: Internet", "Topic :: Internet :: WWW/HTTP", "Topic :: Internet :: WWW/HTTP :: HTTP Servers", "Topic :: Software Development", "Topic :: Software Development :: Libraries", "Topic :: Software Development :: Libraries :: Application Frameworks", "Topic :: Software Development :: Libraries :: Python Modules", "Typing :: Typed"], "name": "fastapi", "requires_python": ">=3.8", "version": "0.118.0", "yanked": false}} +{"info": {"classifiers": ["Development Status :: 4 - Beta", "Environment :: Web Environment", "Framework :: AsyncIO", "Framework :: FastAPI", "Framework :: Pydantic", "Framework :: Pydantic :: 1", "Framework :: Pydantic :: 2", "Intended Audience :: Developers", "Intended Audience :: Information Technology", "Intended Audience :: System Administrators", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Topic :: Internet", "Topic :: Internet :: WWW/HTTP", "Topic :: Internet :: WWW/HTTP :: HTTP Servers", "Topic :: Software Development", "Topic :: Software Development :: Libraries", "Topic :: Software Development :: Libraries :: Application Frameworks", "Topic :: Software Development :: Libraries :: Python Modules", "Typing :: Typed"], "name": "fastapi", "requires_python": ">=3.8", "version": "0.118.2", "yanked": false}} {"info": {"classifiers": ["Development Status :: 4 - Beta", "Environment :: Web Environment", "Framework :: AsyncIO", "Framework :: FastAPI", "Intended Audience :: Developers", "Intended Audience :: Information Technology", "Intended Audience :: System Administrators", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Topic :: Internet", "Topic :: Internet :: WWW/HTTP", "Topic :: Internet :: WWW/HTTP :: HTTP Servers", "Topic :: Software Development", "Topic :: Software Development :: Libraries", "Topic :: Software Development :: Libraries :: Application Frameworks", "Topic :: Software Development :: Libraries :: Python Modules", "Typing :: Typed"], "name": "fastapi", "requires_python": ">=3.6.1", "version": "0.79.1", "yanked": false}} {"info": {"classifiers": ["Development Status :: 4 - Beta", "Environment :: Web Environment", "Framework :: AsyncIO", "Framework :: FastAPI", "Intended Audience :: Developers", "Intended Audience :: Information Technology", "Intended Audience :: System Administrators", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Topic :: Internet", "Topic :: Internet :: WWW/HTTP", "Topic :: Internet :: WWW/HTTP :: HTTP Servers", "Topic :: Software Development", "Topic :: Software Development :: Libraries", "Topic :: Software Development :: Libraries :: Application Frameworks", "Topic :: Software Development :: Libraries :: Python Modules", "Typing :: Typed"], "name": "fastapi", "requires_python": ">=3.7", "version": "0.92.0", "yanked": false}} +{"info": {"classifiers": ["Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: 3.9", "Topic :: Internet", "Topic :: Software Development :: Libraries :: Python Modules"], "name": "google-genai", "requires_python": ">=3.9", "version": "1.29.0", "yanked": false}} +{"info": {"classifiers": ["Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: 3.9", "Topic :: Internet", "Topic :: Software Development :: Libraries :: Python Modules"], "name": "google-genai", "requires_python": ">=3.9", "version": "1.33.0", "yanked": false}} +{"info": {"classifiers": ["Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: 3.9", "Topic :: Internet", "Topic :: Software Development :: Libraries :: Python Modules"], "name": "google-genai", "requires_python": ">=3.9", "version": "1.37.0", "yanked": false}} +{"info": {"classifiers": ["Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: 3.9", "Topic :: Internet", "Topic :: Software Development :: Libraries :: Python Modules"], "name": "google-genai", "requires_python": ">=3.9", "version": "1.42.0", "yanked": false}} {"info": {"classifiers": ["Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: Implementation :: PyPy", "Topic :: Software Development :: Libraries"], "name": "gql", "requires_python": "", "version": "3.4.1", "yanked": false}} {"info": {"classifiers": ["Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: Implementation :: PyPy", "Topic :: Software Development :: Libraries"], "name": "gql", "requires_python": ">=3.8.1", "version": "4.0.0", "yanked": false}} {"info": {"classifiers": ["Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: Implementation :: PyPy", "Topic :: Software Development :: Libraries"], "name": "gql", "requires_python": ">=3.8.1", "version": "4.2.0b0", "yanked": false}} @@ -95,7 +99,7 @@ {"info": {"classifiers": ["Intended Audience :: Developers", "Intended Audience :: Education", "Intended Audience :: Science/Research", "License :: OSI Approved :: Apache Software License", "Operating System :: OS Independent", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Topic :: Scientific/Engineering :: Artificial Intelligence"], "name": "huggingface-hub", "requires_python": ">=3.8.0", "version": "0.28.1", "yanked": false}} {"info": {"classifiers": ["Intended Audience :: Developers", "Intended Audience :: Education", "Intended Audience :: Science/Research", "License :: OSI Approved :: Apache Software License", "Operating System :: OS Independent", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Topic :: Scientific/Engineering :: Artificial Intelligence"], "name": "huggingface-hub", "requires_python": ">=3.8.0", "version": "0.32.6", "yanked": false}} {"info": {"classifiers": ["Intended Audience :: Developers", "Intended Audience :: Education", "Intended Audience :: Science/Research", "License :: OSI Approved :: Apache Software License", "Operating System :: OS Independent", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Topic :: Scientific/Engineering :: Artificial Intelligence"], "name": "huggingface-hub", "requires_python": ">=3.8.0", "version": "0.35.3", "yanked": false}} -{"info": {"classifiers": ["Intended Audience :: Developers", "Intended Audience :: Education", "Intended Audience :: Science/Research", "License :: OSI Approved :: Apache Software License", "Operating System :: OS Independent", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: 3.9", "Topic :: Scientific/Engineering :: Artificial Intelligence"], "name": "huggingface-hub", "requires_python": ">=3.9.0", "version": "1.0.0rc2", "yanked": false}} +{"info": {"classifiers": ["Intended Audience :: Developers", "Intended Audience :: Education", "Intended Audience :: Science/Research", "License :: OSI Approved :: Apache Software License", "Operating System :: OS Independent", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: 3.9", "Topic :: Scientific/Engineering :: Artificial Intelligence"], "name": "huggingface-hub", "requires_python": ">=3.9.0", "version": "1.0.0rc4", "yanked": false}} {"info": {"classifiers": ["License :: OSI Approved :: MIT License", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.9"], "name": "langchain", "requires_python": "<4.0,>=3.8.1", "version": "0.1.20", "yanked": false}} {"info": {"classifiers": ["License :: OSI Approved :: MIT License", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.9"], "name": "langchain", "requires_python": "<4.0,>=3.8.1", "version": "0.2.17", "yanked": false}} {"info": {"classifiers": [], "name": "langchain", "requires_python": "<4.0,>=3.9", "version": "0.3.27", "yanked": false}} @@ -128,7 +132,7 @@ {"info": {"classifiers": ["Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", "Operating System :: MacOS :: MacOS X", "Operating System :: Microsoft :: Windows", "Operating System :: POSIX", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", "Topic :: Database"], "name": "pymongo", "requires_python": "", "version": "3.5.1", "yanked": false}} {"info": {"classifiers": ["Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", "Operating System :: MacOS :: MacOS X", "Operating System :: Microsoft :: Windows", "Operating System :: POSIX", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", "Topic :: Database"], "name": "pymongo", "requires_python": "", "version": "3.6.1", "yanked": false}} {"info": {"classifiers": ["Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", "Operating System :: MacOS :: MacOS X", "Operating System :: Microsoft :: Windows", "Operating System :: POSIX", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", "Topic :: Database"], "name": "pymongo", "requires_python": ">=3.6", "version": "4.0.2", "yanked": false}} -{"info": {"classifiers": ["Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", "Operating System :: MacOS :: MacOS X", "Operating System :: Microsoft :: Windows", "Operating System :: POSIX", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", "Topic :: Database", "Typing :: Typed"], "name": "pymongo", "requires_python": ">=3.9", "version": "4.15.2", "yanked": false}} +{"info": {"classifiers": ["Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", "Operating System :: MacOS :: MacOS X", "Operating System :: Microsoft :: Windows", "Operating System :: POSIX", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", "Topic :: Database", "Typing :: Typed"], "name": "pymongo", "requires_python": ">=3.9", "version": "4.15.3", "yanked": false}} {"info": {"classifiers": ["Framework :: Pylons", "Intended Audience :: Developers", "License :: Repoze Public License", "Programming Language :: Python", "Topic :: Internet :: WWW/HTTP", "Topic :: Internet :: WWW/HTTP :: WSGI"], "name": "pyramid", "requires_python": null, "version": "1.0.2", "yanked": false}} {"info": {"classifiers": ["Development Status :: 6 - Mature", "Framework :: Pyramid", "Intended Audience :: Developers", "License :: Repoze Public License", "Programming Language :: Python", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", "Topic :: Internet :: WWW/HTTP", "Topic :: Internet :: WWW/HTTP :: WSGI"], "name": "pyramid", "requires_python": ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*", "version": "1.10.8", "yanked": false}} {"info": {"classifiers": ["Development Status :: 6 - Mature", "Framework :: Pyramid", "Intended Audience :: Developers", "License :: Repoze Public License", "Programming Language :: Python", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.2", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", "Topic :: Internet :: WWW/HTTP", "Topic :: Internet :: WWW/HTTP :: WSGI"], "name": "pyramid", "requires_python": "", "version": "1.6.5", "yanked": false}} @@ -155,7 +159,7 @@ {"info": {"classifiers": ["Development Status :: 5 - Production/Stable", "Environment :: Console", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy"], "name": "redis", "requires_python": ">=3.7", "version": "4.6.0", "yanked": false}} {"info": {"classifiers": ["Development Status :: 5 - Production/Stable", "Environment :: Console", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy"], "name": "redis", "requires_python": ">=3.8", "version": "5.3.1", "yanked": false}} {"info": {"classifiers": ["Development Status :: 5 - Production/Stable", "Environment :: Console", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy"], "name": "redis", "requires_python": ">=3.9", "version": "6.4.0", "yanked": false}} -{"info": {"classifiers": ["Development Status :: 5 - Production/Stable", "Environment :: Console", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy"], "name": "redis", "requires_python": ">=3.9", "version": "7.0.0b2", "yanked": false}} +{"info": {"classifiers": ["Development Status :: 5 - Production/Stable", "Environment :: Console", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy"], "name": "redis", "requires_python": ">=3.9", "version": "7.0.0b3", "yanked": false}} {"info": {"classifiers": ["Development Status :: 3 - Alpha", "Environment :: Web Environment", "Operating System :: POSIX", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.2", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4"], "name": "redis-py-cluster", "requires_python": null, "version": "0.1.0", "yanked": false}} {"info": {"classifiers": [], "name": "redis-py-cluster", "requires_python": null, "version": "1.1.0", "yanked": false}} {"info": {"classifiers": [], "name": "redis-py-cluster", "requires_python": null, "version": "1.2.0", "yanked": false}} @@ -191,7 +195,7 @@ {"info": {"classifiers": ["Intended Audience :: Developers", "Programming Language :: Python", "Programming Language :: Python :: 3", "Topic :: Software Development :: Libraries"], "name": "statsig", "requires_python": ">=3.7", "version": "0.55.3", "yanked": false}} {"info": {"classifiers": ["Intended Audience :: Developers", "Programming Language :: Python", "Programming Language :: Python :: 3", "Topic :: Software Development :: Libraries"], "name": "statsig", "requires_python": ">=3.7", "version": "0.65.0", "yanked": false}} {"info": {"classifiers": ["Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Topic :: Software Development :: Libraries", "Topic :: Software Development :: Libraries :: Python Modules"], "name": "strawberry-graphql", "requires_python": ">=3.8,<4.0", "version": "0.209.8", "yanked": false}} -{"info": {"classifiers": ["Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Topic :: Software Development :: Libraries", "Topic :: Software Development :: Libraries :: Python Modules"], "name": "strawberry-graphql", "requires_python": "<4.0,>=3.9", "version": "0.283.1", "yanked": false}} +{"info": {"classifiers": ["Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Topic :: Software Development :: Libraries", "Topic :: Software Development :: Libraries :: Python Modules"], "name": "strawberry-graphql", "requires_python": "<4.0,>=3.9", "version": "0.283.2", "yanked": false}} {"info": {"classifiers": ["License :: OSI Approved :: Apache Software License", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy"], "name": "tornado", "requires_python": ">= 3.5", "version": "6.0.4", "yanked": false}} {"info": {"classifiers": ["License :: OSI Approved :: Apache Software License", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy"], "name": "tornado", "requires_python": ">=3.9", "version": "6.5.2", "yanked": false}} {"info": {"classifiers": ["Development Status :: 5 - Production/Stable", "Environment :: No Input/Output (Daemon)", "Intended Audience :: Developers", "License :: OSI Approved :: GNU General Public License (GPL)", "Natural Language :: English", "Natural Language :: French", "Natural Language :: German", "Natural Language :: Spanish", "Operating System :: OS Independent", "Programming Language :: Python", "Topic :: Software Development :: Libraries :: Application Frameworks"], "name": "trytond", "requires_python": null, "version": "1.2.10", "yanked": false}} diff --git a/scripts/split_tox_gh_actions/split_tox_gh_actions.py b/scripts/split_tox_gh_actions/split_tox_gh_actions.py index 81f887ad4f..abfa1b63cc 100755 --- a/scripts/split_tox_gh_actions/split_tox_gh_actions.py +++ b/scripts/split_tox_gh_actions/split_tox_gh_actions.py @@ -78,6 +78,7 @@ "openai-base", "openai-notiktoken", "langgraph", + "google_genai", "openai_agents", "huggingface_hub", ], diff --git a/sentry_sdk/integrations/__init__.py b/sentry_sdk/integrations/__init__.py index 3f71f0f4ba..9e279b8345 100644 --- a/sentry_sdk/integrations/__init__.py +++ b/sentry_sdk/integrations/__init__.py @@ -140,6 +140,7 @@ def iter_default_integrations(with_auto_enabling_integrations): "flask": (1, 1, 4), "gql": (3, 4, 1), "graphene": (3, 3), + "google_genai": (1, 29, 0), # google-genai "grpc": (1, 32, 0), # grpcio "httpx": (0, 16, 0), "huggingface_hub": (0, 24, 7), diff --git a/sentry_sdk/integrations/google_genai/__init__.py b/sentry_sdk/integrations/google_genai/__init__.py new file mode 100644 index 0000000000..7175b64340 --- /dev/null +++ b/sentry_sdk/integrations/google_genai/__init__.py @@ -0,0 +1,298 @@ +from functools import wraps +from typing import ( + Any, + AsyncIterator, + Callable, + Iterator, + List, +) + +import sentry_sdk +from sentry_sdk.ai.utils import get_start_span_function +from sentry_sdk.integrations import DidNotEnable, Integration +from sentry_sdk.consts import OP, SPANDATA +from sentry_sdk.tracing import SPANSTATUS + + +try: + from google.genai.models import Models, AsyncModels +except ImportError: + raise DidNotEnable("google-genai not installed") + + +from .consts import IDENTIFIER, ORIGIN, GEN_AI_SYSTEM +from .utils import ( + set_span_data_for_request, + set_span_data_for_response, + _capture_exception, + prepare_generate_content_args, +) +from .streaming import ( + set_span_data_for_streaming_response, + accumulate_streaming_response, +) + + +class GoogleGenAIIntegration(Integration): + identifier = IDENTIFIER + origin = ORIGIN + + def __init__(self, include_prompts=True): + # type: (GoogleGenAIIntegration, bool) -> None + self.include_prompts = include_prompts + + @staticmethod + def setup_once(): + # type: () -> None + # Patch sync methods + Models.generate_content = _wrap_generate_content(Models.generate_content) + Models.generate_content_stream = _wrap_generate_content_stream( + Models.generate_content_stream + ) + + # Patch async methods + AsyncModels.generate_content = _wrap_async_generate_content( + AsyncModels.generate_content + ) + AsyncModels.generate_content_stream = _wrap_async_generate_content_stream( + AsyncModels.generate_content_stream + ) + + +def _wrap_generate_content_stream(f): + # type: (Callable[..., Any]) -> Callable[..., Any] + @wraps(f) + def new_generate_content_stream(self, *args, **kwargs): + # type: (Any, Any, Any) -> Any + integration = sentry_sdk.get_client().get_integration(GoogleGenAIIntegration) + if integration is None: + return f(self, *args, **kwargs) + + _model, contents, model_name = prepare_generate_content_args(args, kwargs) + + span = get_start_span_function()( + op=OP.GEN_AI_INVOKE_AGENT, + name="invoke_agent", + origin=ORIGIN, + ) + span.__enter__() + span.set_data(SPANDATA.GEN_AI_AGENT_NAME, model_name) + span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "invoke_agent") + set_span_data_for_request(span, integration, model_name, contents, kwargs) + span.set_data(SPANDATA.GEN_AI_RESPONSE_STREAMING, True) + + chat_span = sentry_sdk.start_span( + op=OP.GEN_AI_CHAT, + name=f"chat {model_name}", + origin=ORIGIN, + ) + chat_span.__enter__() + chat_span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "chat") + chat_span.set_data(SPANDATA.GEN_AI_SYSTEM, GEN_AI_SYSTEM) + chat_span.set_data(SPANDATA.GEN_AI_REQUEST_MODEL, model_name) + set_span_data_for_request(chat_span, integration, model_name, contents, kwargs) + chat_span.set_data(SPANDATA.GEN_AI_RESPONSE_STREAMING, True) + + try: + stream = f(self, *args, **kwargs) + + # Create wrapper iterator to accumulate responses + def new_iterator(): + # type: () -> Iterator[Any] + chunks = [] # type: List[Any] + try: + for chunk in stream: + chunks.append(chunk) + yield chunk + except Exception as exc: + _capture_exception(exc) + chat_span.set_status(SPANSTATUS.ERROR) + raise + finally: + # Accumulate all chunks and set final response data on spans + if chunks: + accumulated_response = accumulate_streaming_response(chunks) + set_span_data_for_streaming_response( + chat_span, integration, accumulated_response + ) + set_span_data_for_streaming_response( + span, integration, accumulated_response + ) + chat_span.__exit__(None, None, None) + span.__exit__(None, None, None) + + return new_iterator() + + except Exception as exc: + _capture_exception(exc) + chat_span.__exit__(None, None, None) + span.__exit__(None, None, None) + raise + + return new_generate_content_stream + + +def _wrap_async_generate_content_stream(f): + # type: (Callable[..., Any]) -> Callable[..., Any] + @wraps(f) + async def new_async_generate_content_stream(self, *args, **kwargs): + # type: (Any, Any, Any) -> Any + integration = sentry_sdk.get_client().get_integration(GoogleGenAIIntegration) + if integration is None: + return await f(self, *args, **kwargs) + + _model, contents, model_name = prepare_generate_content_args(args, kwargs) + + span = get_start_span_function()( + op=OP.GEN_AI_INVOKE_AGENT, + name="invoke_agent", + origin=ORIGIN, + ) + span.__enter__() + span.set_data(SPANDATA.GEN_AI_AGENT_NAME, model_name) + span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "invoke_agent") + set_span_data_for_request(span, integration, model_name, contents, kwargs) + span.set_data(SPANDATA.GEN_AI_RESPONSE_STREAMING, True) + + chat_span = sentry_sdk.start_span( + op=OP.GEN_AI_CHAT, + name=f"chat {model_name}", + origin=ORIGIN, + ) + chat_span.__enter__() + chat_span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "chat") + chat_span.set_data(SPANDATA.GEN_AI_SYSTEM, GEN_AI_SYSTEM) + chat_span.set_data(SPANDATA.GEN_AI_REQUEST_MODEL, model_name) + set_span_data_for_request(chat_span, integration, model_name, contents, kwargs) + chat_span.set_data(SPANDATA.GEN_AI_RESPONSE_STREAMING, True) + + try: + stream = await f(self, *args, **kwargs) + + # Create wrapper async iterator to accumulate responses + async def new_async_iterator(): + # type: () -> AsyncIterator[Any] + chunks = [] # type: List[Any] + try: + async for chunk in stream: + chunks.append(chunk) + yield chunk + except Exception as exc: + _capture_exception(exc) + chat_span.set_status(SPANSTATUS.ERROR) + raise + finally: + # Accumulate all chunks and set final response data on spans + if chunks: + accumulated_response = accumulate_streaming_response(chunks) + set_span_data_for_streaming_response( + chat_span, integration, accumulated_response + ) + set_span_data_for_streaming_response( + span, integration, accumulated_response + ) + chat_span.__exit__(None, None, None) + span.__exit__(None, None, None) + + return new_async_iterator() + + except Exception as exc: + _capture_exception(exc) + chat_span.__exit__(None, None, None) + span.__exit__(None, None, None) + raise + + return new_async_generate_content_stream + + +def _wrap_generate_content(f): + # type: (Callable[..., Any]) -> Callable[..., Any] + @wraps(f) + def new_generate_content(self, *args, **kwargs): + # type: (Any, Any, Any) -> Any + integration = sentry_sdk.get_client().get_integration(GoogleGenAIIntegration) + if integration is None: + return f(self, *args, **kwargs) + + model, contents, model_name = prepare_generate_content_args(args, kwargs) + + with get_start_span_function()( + op=OP.GEN_AI_INVOKE_AGENT, + name="invoke_agent", + origin=ORIGIN, + ) as span: + span.set_data(SPANDATA.GEN_AI_AGENT_NAME, model_name) + span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "invoke_agent") + set_span_data_for_request(span, integration, model_name, contents, kwargs) + + with sentry_sdk.start_span( + op=OP.GEN_AI_CHAT, + name=f"chat {model_name}", + origin=ORIGIN, + ) as chat_span: + chat_span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "chat") + chat_span.set_data(SPANDATA.GEN_AI_SYSTEM, GEN_AI_SYSTEM) + chat_span.set_data(SPANDATA.GEN_AI_REQUEST_MODEL, model_name) + set_span_data_for_request( + chat_span, integration, model_name, contents, kwargs + ) + + try: + response = f(self, *args, **kwargs) + except Exception as exc: + _capture_exception(exc) + chat_span.set_status(SPANSTATUS.ERROR) + raise + + set_span_data_for_response(chat_span, integration, response) + set_span_data_for_response(span, integration, response) + + return response + + return new_generate_content + + +def _wrap_async_generate_content(f): + # type: (Callable[..., Any]) -> Callable[..., Any] + @wraps(f) + async def new_async_generate_content(self, *args, **kwargs): + # type: (Any, Any, Any) -> Any + integration = sentry_sdk.get_client().get_integration(GoogleGenAIIntegration) + if integration is None: + return await f(self, *args, **kwargs) + + model, contents, model_name = prepare_generate_content_args(args, kwargs) + + with get_start_span_function()( + op=OP.GEN_AI_INVOKE_AGENT, + name="invoke_agent", + origin=ORIGIN, + ) as span: + span.set_data(SPANDATA.GEN_AI_AGENT_NAME, model_name) + span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "invoke_agent") + set_span_data_for_request(span, integration, model_name, contents, kwargs) + + with sentry_sdk.start_span( + op=OP.GEN_AI_CHAT, + name=f"chat {model_name}", + origin=ORIGIN, + ) as chat_span: + chat_span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "chat") + chat_span.set_data(SPANDATA.GEN_AI_SYSTEM, GEN_AI_SYSTEM) + chat_span.set_data(SPANDATA.GEN_AI_REQUEST_MODEL, model_name) + set_span_data_for_request( + chat_span, integration, model_name, contents, kwargs + ) + try: + response = await f(self, *args, **kwargs) + except Exception as exc: + _capture_exception(exc) + chat_span.set_status(SPANSTATUS.ERROR) + raise + + set_span_data_for_response(chat_span, integration, response) + set_span_data_for_response(span, integration, response) + + return response + + return new_async_generate_content diff --git a/sentry_sdk/integrations/google_genai/consts.py b/sentry_sdk/integrations/google_genai/consts.py new file mode 100644 index 0000000000..5b53ebf0e2 --- /dev/null +++ b/sentry_sdk/integrations/google_genai/consts.py @@ -0,0 +1,16 @@ +GEN_AI_SYSTEM = "gcp.gemini" + +# Mapping of tool attributes to their descriptions +# These are all tools that are available in the Google GenAI API +TOOL_ATTRIBUTES_MAP = { + "google_search_retrieval": "Google Search retrieval tool", + "google_search": "Google Search tool", + "retrieval": "Retrieval tool", + "enterprise_web_search": "Enterprise web search tool", + "google_maps": "Google Maps tool", + "code_execution": "Code execution tool", + "computer_use": "Computer use tool", +} + +IDENTIFIER = "google_genai" +ORIGIN = f"auto.ai.{IDENTIFIER}" diff --git a/sentry_sdk/integrations/google_genai/streaming.py b/sentry_sdk/integrations/google_genai/streaming.py new file mode 100644 index 0000000000..03d09aadf6 --- /dev/null +++ b/sentry_sdk/integrations/google_genai/streaming.py @@ -0,0 +1,155 @@ +from typing import ( + TYPE_CHECKING, + Any, + List, + TypedDict, + Optional, +) + +from sentry_sdk.ai.utils import set_data_normalized +from sentry_sdk.consts import SPANDATA +from sentry_sdk.scope import should_send_default_pii +from sentry_sdk.utils import ( + safe_serialize, +) +from .utils import ( + extract_tool_calls, + extract_finish_reasons, + extract_contents_text, + extract_usage_data, + UsageData, +) + +if TYPE_CHECKING: + from sentry_sdk.tracing import Span + from google.genai.types import GenerateContentResponse + + +class AccumulatedResponse(TypedDict): + id: Optional[str] + model: Optional[str] + text: str + finish_reasons: List[str] + tool_calls: List[dict[str, Any]] + usage_metadata: UsageData + + +def accumulate_streaming_response(chunks): + # type: (List[GenerateContentResponse]) -> AccumulatedResponse + """Accumulate streaming chunks into a single response-like object.""" + accumulated_text = [] + finish_reasons = [] + tool_calls = [] + total_input_tokens = 0 + total_output_tokens = 0 + total_tokens = 0 + total_cached_tokens = 0 + total_reasoning_tokens = 0 + response_id = None + model = None + + for chunk in chunks: + # Extract text and tool calls + if getattr(chunk, "candidates", None): + for candidate in getattr(chunk, "candidates", []): + if hasattr(candidate, "content") and getattr( + candidate.content, "parts", [] + ): + extracted_text = extract_contents_text(candidate.content) + if extracted_text: + accumulated_text.append(extracted_text) + + extracted_finish_reasons = extract_finish_reasons(chunk) + if extracted_finish_reasons: + finish_reasons.extend(extracted_finish_reasons) + + extracted_tool_calls = extract_tool_calls(chunk) + if extracted_tool_calls: + tool_calls.extend(extracted_tool_calls) + + # Accumulate token usage + extracted_usage_data = extract_usage_data(chunk) + total_input_tokens += extracted_usage_data["input_tokens"] + total_output_tokens += extracted_usage_data["output_tokens"] + total_cached_tokens += extracted_usage_data["input_tokens_cached"] + total_reasoning_tokens += extracted_usage_data["output_tokens_reasoning"] + total_tokens += extracted_usage_data["total_tokens"] + + accumulated_response = AccumulatedResponse( + text="".join(accumulated_text), + finish_reasons=finish_reasons, + tool_calls=tool_calls, + usage_metadata=UsageData( + input_tokens=total_input_tokens, + output_tokens=total_output_tokens, + input_tokens_cached=total_cached_tokens, + output_tokens_reasoning=total_reasoning_tokens, + total_tokens=total_tokens, + ), + id=response_id, + model=model, + ) + + return accumulated_response + + +def set_span_data_for_streaming_response(span, integration, accumulated_response): + # type: (Span, Any, AccumulatedResponse) -> None + """Set span data for accumulated streaming response.""" + if ( + should_send_default_pii() + and integration.include_prompts + and accumulated_response.get("text") + ): + span.set_data( + SPANDATA.GEN_AI_RESPONSE_TEXT, + safe_serialize([accumulated_response["text"]]), + ) + + if accumulated_response.get("finish_reasons"): + set_data_normalized( + span, + SPANDATA.GEN_AI_RESPONSE_FINISH_REASONS, + accumulated_response["finish_reasons"], + ) + + if accumulated_response.get("tool_calls"): + span.set_data( + SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS, + safe_serialize(accumulated_response["tool_calls"]), + ) + + if accumulated_response.get("id"): + span.set_data(SPANDATA.GEN_AI_RESPONSE_ID, accumulated_response["id"]) + if accumulated_response.get("model"): + span.set_data(SPANDATA.GEN_AI_RESPONSE_MODEL, accumulated_response["model"]) + + if accumulated_response["usage_metadata"]["input_tokens"]: + span.set_data( + SPANDATA.GEN_AI_USAGE_INPUT_TOKENS, + accumulated_response["usage_metadata"]["input_tokens"], + ) + + if accumulated_response["usage_metadata"]["input_tokens_cached"]: + span.set_data( + SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHED, + accumulated_response["usage_metadata"]["input_tokens_cached"], + ) + + if accumulated_response["usage_metadata"]["output_tokens"]: + span.set_data( + SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS, + accumulated_response["usage_metadata"]["output_tokens"], + ) + + if accumulated_response["usage_metadata"]["output_tokens_reasoning"]: + span.set_data( + SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS_REASONING, + accumulated_response["usage_metadata"]["output_tokens_reasoning"], + ) + + if accumulated_response["usage_metadata"]["total_tokens"]: + span.set_data( + SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS, + accumulated_response["usage_metadata"]["total_tokens"], + ) diff --git a/sentry_sdk/integrations/google_genai/utils.py b/sentry_sdk/integrations/google_genai/utils.py new file mode 100644 index 0000000000..ff973b02d9 --- /dev/null +++ b/sentry_sdk/integrations/google_genai/utils.py @@ -0,0 +1,566 @@ +import copy +import inspect +from functools import wraps +from .consts import ORIGIN, TOOL_ATTRIBUTES_MAP, GEN_AI_SYSTEM +from typing import ( + cast, + TYPE_CHECKING, + Iterable, + Any, + Callable, + List, + Optional, + Union, + TypedDict, +) + +import sentry_sdk +from sentry_sdk.ai.utils import set_data_normalized +from sentry_sdk.consts import OP, SPANDATA +from sentry_sdk.scope import should_send_default_pii +from sentry_sdk.utils import ( + capture_internal_exceptions, + event_from_exception, + safe_serialize, +) +from google.genai.types import GenerateContentConfig + +if TYPE_CHECKING: + from sentry_sdk.tracing import Span + from google.genai.types import ( + GenerateContentResponse, + ContentListUnion, + Tool, + Model, + ) + + +class UsageData(TypedDict): + """Structure for token usage data.""" + + input_tokens: int + input_tokens_cached: int + output_tokens: int + output_tokens_reasoning: int + total_tokens: int + + +def extract_usage_data(response): + # type: (Union[GenerateContentResponse, dict[str, Any]]) -> UsageData + """Extract usage data from response into a structured format. + + Args: + response: The GenerateContentResponse object or dictionary containing usage metadata + + Returns: + UsageData: Dictionary with input_tokens, input_tokens_cached, + output_tokens, and output_tokens_reasoning fields + """ + usage_data = UsageData( + input_tokens=0, + input_tokens_cached=0, + output_tokens=0, + output_tokens_reasoning=0, + total_tokens=0, + ) + + # Handle dictionary response (from streaming) + if isinstance(response, dict): + usage = response.get("usage_metadata", {}) + if not usage: + return usage_data + + prompt_tokens = usage.get("prompt_token_count", 0) or 0 + tool_use_prompt_tokens = usage.get("tool_use_prompt_token_count", 0) or 0 + usage_data["input_tokens"] = prompt_tokens + tool_use_prompt_tokens + + cached_tokens = usage.get("cached_content_token_count", 0) or 0 + usage_data["input_tokens_cached"] = cached_tokens + + reasoning_tokens = usage.get("thoughts_token_count", 0) or 0 + usage_data["output_tokens_reasoning"] = reasoning_tokens + + candidates_tokens = usage.get("candidates_token_count", 0) or 0 + # python-genai reports output and reasoning tokens separately + # reasoning should be sub-category of output tokens + usage_data["output_tokens"] = candidates_tokens + reasoning_tokens + + total_tokens = usage.get("total_token_count", 0) or 0 + usage_data["total_tokens"] = total_tokens + + return usage_data + + if not hasattr(response, "usage_metadata"): + return usage_data + + usage = response.usage_metadata + + # Input tokens include both prompt and tool use prompt tokens + prompt_tokens = getattr(usage, "prompt_token_count", 0) or 0 + tool_use_prompt_tokens = getattr(usage, "tool_use_prompt_token_count", 0) or 0 + usage_data["input_tokens"] = prompt_tokens + tool_use_prompt_tokens + + # Cached input tokens + cached_tokens = getattr(usage, "cached_content_token_count", 0) or 0 + usage_data["input_tokens_cached"] = cached_tokens + + # Reasoning tokens + reasoning_tokens = getattr(usage, "thoughts_token_count", 0) or 0 + usage_data["output_tokens_reasoning"] = reasoning_tokens + + # output_tokens = candidates_tokens + reasoning_tokens + # google-genai reports output and reasoning tokens separately + candidates_tokens = getattr(usage, "candidates_token_count", 0) or 0 + usage_data["output_tokens"] = candidates_tokens + reasoning_tokens + + total_tokens = getattr(usage, "total_token_count", 0) or 0 + usage_data["total_tokens"] = total_tokens + + return usage_data + + +def _capture_exception(exc): + # type: (Any) -> None + """Capture exception with Google GenAI mechanism.""" + event, hint = event_from_exception( + exc, + client_options=sentry_sdk.get_client().options, + mechanism={"type": "google_genai", "handled": False}, + ) + sentry_sdk.capture_event(event, hint=hint) + + +def get_model_name(model): + # type: (Union[str, Model]) -> str + """Extract model name from model parameter.""" + if isinstance(model, str): + return model + # Handle case where model might be an object with a name attribute + if hasattr(model, "name"): + return str(model.name) + return str(model) + + +def extract_contents_text(contents): + # type: (ContentListUnion) -> Optional[str] + """Extract text from contents parameter which can have various formats.""" + if contents is None: + return None + + # Simple string case + if isinstance(contents, str): + return contents + + # List of contents or parts + if isinstance(contents, list): + texts = [] + for item in contents: + # Recursively extract text from each item + extracted = extract_contents_text(item) + if extracted: + texts.append(extracted) + return " ".join(texts) if texts else None + + # Dictionary case + if isinstance(contents, dict): + if "text" in contents: + return contents["text"] + # Try to extract from parts if present in dict + if "parts" in contents: + return extract_contents_text(contents["parts"]) + + # Content object with parts - recurse into parts + if getattr(contents, "parts", None): + return extract_contents_text(contents.parts) + + # Direct text attribute + if hasattr(contents, "text"): + return contents.text + + return None + + +def _format_tools_for_span(tools): + # type: (Iterable[Tool | Callable[..., Any]]) -> Optional[List[dict[str, Any]]] + """Format tools parameter for span data.""" + formatted_tools = [] + for tool in tools: + if callable(tool): + # Handle callable functions passed directly + formatted_tools.append( + { + "name": getattr(tool, "__name__", "unknown"), + "description": getattr(tool, "__doc__", None), + } + ) + elif ( + hasattr(tool, "function_declarations") + and tool.function_declarations is not None + ): + # Tool object with function declarations + for func_decl in tool.function_declarations: + formatted_tools.append( + { + "name": getattr(func_decl, "name", None), + "description": getattr(func_decl, "description", None), + } + ) + else: + # Check for predefined tool attributes - each of these tools + # is an attribute of the tool object, by default set to None + for attr_name, description in TOOL_ATTRIBUTES_MAP.items(): + if getattr(tool, attr_name, None): + formatted_tools.append( + { + "name": attr_name, + "description": description, + } + ) + break + + return formatted_tools if formatted_tools else None + + +def extract_tool_calls(response): + # type: (GenerateContentResponse) -> Optional[List[dict[str, Any]]] + """Extract tool/function calls from response candidates and automatic function calling history.""" + + tool_calls = [] + + # Extract from candidates, sometimes tool calls are nested under the content.parts object + if getattr(response, "candidates", []): + for candidate in response.candidates: + if not hasattr(candidate, "content") or not getattr( + candidate.content, "parts", [] + ): + continue + + for part in candidate.content.parts: + if getattr(part, "function_call", None): + function_call = part.function_call + tool_call = { + "name": getattr(function_call, "name", None), + "type": "function_call", + } + + # Extract arguments if available + if getattr(function_call, "args", None): + tool_call["arguments"] = safe_serialize(function_call.args) + + tool_calls.append(tool_call) + + # Extract from automatic_function_calling_history + # This is the history of tool calls made by the model + if getattr(response, "automatic_function_calling_history", None): + for content in response.automatic_function_calling_history: + if not getattr(content, "parts", None): + continue + + for part in getattr(content, "parts", []): + if getattr(part, "function_call", None): + function_call = part.function_call + tool_call = { + "name": getattr(function_call, "name", None), + "type": "function_call", + } + + # Extract arguments if available + if hasattr(function_call, "args"): + tool_call["arguments"] = safe_serialize(function_call.args) + + tool_calls.append(tool_call) + + return tool_calls if tool_calls else None + + +def _capture_tool_input(args, kwargs, tool): + # type: (tuple[Any, ...], dict[str, Any], Tool) -> dict[str, Any] + """Capture tool input from args and kwargs.""" + tool_input = kwargs.copy() if kwargs else {} + + # If we have positional args, try to map them to the function signature + if args: + try: + sig = inspect.signature(tool) + param_names = list(sig.parameters.keys()) + for i, arg in enumerate(args): + if i < len(param_names): + tool_input[param_names[i]] = arg + except Exception: + # Fallback if we can't get the signature + tool_input["args"] = args + + return tool_input + + +def _create_tool_span(tool_name, tool_doc): + # type: (str, Optional[str]) -> Span + """Create a span for tool execution.""" + span = sentry_sdk.start_span( + op=OP.GEN_AI_EXECUTE_TOOL, + name=f"execute_tool {tool_name}", + origin=ORIGIN, + ) + span.set_data(SPANDATA.GEN_AI_TOOL_NAME, tool_name) + span.set_data(SPANDATA.GEN_AI_TOOL_TYPE, "function") + if tool_doc: + span.set_data(SPANDATA.GEN_AI_TOOL_DESCRIPTION, tool_doc) + return span + + +def wrapped_tool(tool): + # type: (Tool | Callable[..., Any]) -> Tool | Callable[..., Any] + """Wrap a tool to emit execute_tool spans when called.""" + if not callable(tool): + # Not a callable function, return as-is (predefined tools) + return tool + + tool_name = getattr(tool, "__name__", "unknown") + tool_doc = tool.__doc__ + + if inspect.iscoroutinefunction(tool): + # Async function + @wraps(tool) + async def async_wrapped(*args, **kwargs): + # type: (Any, Any) -> Any + with _create_tool_span(tool_name, tool_doc) as span: + # Capture tool input + tool_input = _capture_tool_input(args, kwargs, tool) + with capture_internal_exceptions(): + span.set_data( + SPANDATA.GEN_AI_TOOL_INPUT, safe_serialize(tool_input) + ) + + try: + result = await tool(*args, **kwargs) + + # Capture tool output + with capture_internal_exceptions(): + span.set_data( + SPANDATA.GEN_AI_TOOL_OUTPUT, safe_serialize(result) + ) + + return result + except Exception as exc: + _capture_exception(exc) + raise + + return async_wrapped + else: + # Sync function + @wraps(tool) + def sync_wrapped(*args, **kwargs): + # type: (Any, Any) -> Any + with _create_tool_span(tool_name, tool_doc) as span: + # Capture tool input + tool_input = _capture_tool_input(args, kwargs, tool) + with capture_internal_exceptions(): + span.set_data( + SPANDATA.GEN_AI_TOOL_INPUT, safe_serialize(tool_input) + ) + + try: + result = tool(*args, **kwargs) + + # Capture tool output + with capture_internal_exceptions(): + span.set_data( + SPANDATA.GEN_AI_TOOL_OUTPUT, safe_serialize(result) + ) + + return result + except Exception as exc: + _capture_exception(exc) + raise + + return sync_wrapped + + +def wrapped_config_with_tools(config): + # type: (GenerateContentConfig) -> GenerateContentConfig + """Wrap tools in config to emit execute_tool spans. Tools are sometimes passed directly as + callable functions as a part of the config object.""" + + if not config or not getattr(config, "tools", None): + return config + + result = copy.copy(config) + result.tools = [wrapped_tool(tool) for tool in config.tools] + + return result + + +def _extract_response_text(response): + # type: (GenerateContentResponse) -> Optional[List[str]] + """Extract text from response candidates.""" + + if not response or not getattr(response, "candidates", []): + return None + + texts = [] + for candidate in response.candidates: + if not hasattr(candidate, "content") or not hasattr(candidate.content, "parts"): + continue + + for part in candidate.content.parts: + if getattr(part, "text", None): + texts.append(part.text) + + return texts if texts else None + + +def extract_finish_reasons(response): + # type: (GenerateContentResponse) -> Optional[List[str]] + """Extract finish reasons from response candidates.""" + if not response or not getattr(response, "candidates", []): + return None + + finish_reasons = [] + for candidate in response.candidates: + if getattr(candidate, "finish_reason", None): + # Convert enum value to string if necessary + reason = str(candidate.finish_reason) + # Remove enum prefix if present (e.g., "FinishReason.STOP" -> "STOP") + if "." in reason: + reason = reason.split(".")[-1] + finish_reasons.append(reason) + + return finish_reasons if finish_reasons else None + + +def set_span_data_for_request(span, integration, model, contents, kwargs): + # type: (Span, Any, str, ContentListUnion, dict[str, Any]) -> None + """Set span data for the request.""" + span.set_data(SPANDATA.GEN_AI_SYSTEM, GEN_AI_SYSTEM) + span.set_data(SPANDATA.GEN_AI_REQUEST_MODEL, model) + + if kwargs.get("stream", False): + span.set_data(SPANDATA.GEN_AI_RESPONSE_STREAMING, True) + + config = kwargs.get("config") + + if config is None: + return + + config = cast(GenerateContentConfig, config) + + # Set input messages/prompts if PII is allowed + if should_send_default_pii() and integration.include_prompts: + messages = [] + + # Add system instruction if present + if hasattr(config, "system_instruction"): + system_instruction = config.system_instruction + if system_instruction: + system_text = extract_contents_text(system_instruction) + if system_text: + messages.append({"role": "system", "content": system_text}) + + # Add user message + contents_text = extract_contents_text(contents) + if contents_text: + messages.append({"role": "user", "content": contents_text}) + + if messages: + set_data_normalized( + span, + SPANDATA.GEN_AI_REQUEST_MESSAGES, + messages, + unpack=False, + ) + + # Extract parameters directly from config (not nested under generation_config) + for param, span_key in [ + ("temperature", SPANDATA.GEN_AI_REQUEST_TEMPERATURE), + ("top_p", SPANDATA.GEN_AI_REQUEST_TOP_P), + ("top_k", SPANDATA.GEN_AI_REQUEST_TOP_K), + ("max_output_tokens", SPANDATA.GEN_AI_REQUEST_MAX_TOKENS), + ("presence_penalty", SPANDATA.GEN_AI_REQUEST_PRESENCE_PENALTY), + ("frequency_penalty", SPANDATA.GEN_AI_REQUEST_FREQUENCY_PENALTY), + ("seed", SPANDATA.GEN_AI_REQUEST_SEED), + ]: + if hasattr(config, param): + value = getattr(config, param) + if value is not None: + span.set_data(span_key, value) + + # Set tools if available + if hasattr(config, "tools"): + tools = config.tools + if tools: + formatted_tools = _format_tools_for_span(tools) + if formatted_tools: + set_data_normalized( + span, + SPANDATA.GEN_AI_REQUEST_AVAILABLE_TOOLS, + formatted_tools, + unpack=False, + ) + + +def set_span_data_for_response(span, integration, response): + # type: (Span, Any, GenerateContentResponse) -> None + """Set span data for the response.""" + if not response: + return + + if should_send_default_pii() and integration.include_prompts: + response_texts = _extract_response_text(response) + if response_texts: + # Format as JSON string array as per documentation + span.set_data(SPANDATA.GEN_AI_RESPONSE_TEXT, safe_serialize(response_texts)) + + tool_calls = extract_tool_calls(response) + if tool_calls: + # Tool calls should be JSON serialized + span.set_data(SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS, safe_serialize(tool_calls)) + + finish_reasons = extract_finish_reasons(response) + if finish_reasons: + set_data_normalized( + span, SPANDATA.GEN_AI_RESPONSE_FINISH_REASONS, finish_reasons + ) + + if getattr(response, "response_id", None): + span.set_data(SPANDATA.GEN_AI_RESPONSE_ID, response.response_id) + + if getattr(response, "model_version", None): + span.set_data(SPANDATA.GEN_AI_RESPONSE_MODEL, response.model_version) + + usage_data = extract_usage_data(response) + + if usage_data["input_tokens"]: + span.set_data(SPANDATA.GEN_AI_USAGE_INPUT_TOKENS, usage_data["input_tokens"]) + + if usage_data["input_tokens_cached"]: + span.set_data( + SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHED, + usage_data["input_tokens_cached"], + ) + + if usage_data["output_tokens"]: + span.set_data(SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS, usage_data["output_tokens"]) + + if usage_data["output_tokens_reasoning"]: + span.set_data( + SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS_REASONING, + usage_data["output_tokens_reasoning"], + ) + + if usage_data["total_tokens"]: + span.set_data(SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS, usage_data["total_tokens"]) + + +def prepare_generate_content_args(args, kwargs): + # type: (tuple[Any, ...], dict[str, Any]) -> tuple[Any, Any, str] + """Extract and prepare common arguments for generate_content methods.""" + model = args[0] if args else kwargs.get("model", "unknown") + contents = args[1] if len(args) > 1 else kwargs.get("contents") + model_name = get_model_name(model) + + config = kwargs.get("config") + wrapped_config = wrapped_config_with_tools(config) + if wrapped_config is not config: + kwargs["config"] = wrapped_config + + return model, contents, model_name diff --git a/setup.py b/setup.py index fbb8694e5e..c0b0e42629 100644 --- a/setup.py +++ b/setup.py @@ -84,6 +84,7 @@ def get_file_text(file_name): "statsig": ["statsig>=0.55.3"], "tornado": ["tornado>=6"], "unleash": ["UnleashClient>=6.0.1"], + "google-genai": ["google-genai>=1.29.0"], }, entry_points={ "opentelemetry_propagator": [ diff --git a/tests/integrations/google_genai/__init__.py b/tests/integrations/google_genai/__init__.py new file mode 100644 index 0000000000..5143bf4536 --- /dev/null +++ b/tests/integrations/google_genai/__init__.py @@ -0,0 +1,4 @@ +import pytest + +pytest.importorskip("google") +pytest.importorskip("google.genai") diff --git a/tests/integrations/google_genai/test_google_genai.py b/tests/integrations/google_genai/test_google_genai.py new file mode 100644 index 0000000000..470be31944 --- /dev/null +++ b/tests/integrations/google_genai/test_google_genai.py @@ -0,0 +1,907 @@ +import json +import pytest +from unittest import mock + +from google import genai +from google.genai import types as genai_types + +from sentry_sdk import start_transaction +from sentry_sdk.consts import OP, SPANDATA +from sentry_sdk.integrations.google_genai import GoogleGenAIIntegration + + +@pytest.fixture +def mock_genai_client(): + """Fixture that creates a real genai.Client with mocked HTTP responses.""" + client = genai.Client(api_key="test-api-key") + return client + + +def create_mock_http_response(response_body): + """ + Create a mock HTTP response that the API client's request() method would return. + + Args: + response_body: The JSON body as a string or dict + + Returns: + An HttpResponse object with headers and body + """ + if isinstance(response_body, dict): + response_body = json.dumps(response_body) + + return genai_types.HttpResponse( + headers={ + "content-type": "application/json; charset=UTF-8", + }, + body=response_body, + ) + + +def create_mock_streaming_responses(response_chunks): + """ + Create a generator that yields mock HTTP responses for streaming. + + Args: + response_chunks: List of dicts, each representing a chunk's JSON body + + Returns: + A generator that yields HttpResponse objects + """ + for chunk in response_chunks: + yield create_mock_http_response(chunk) + + +# Sample API response JSON (based on real API format from user) +EXAMPLE_API_RESPONSE_JSON = { + "candidates": [ + { + "content": { + "role": "model", + "parts": [{"text": "Hello! How can I help you today?"}], + }, + "finishReason": "STOP", + } + ], + "usageMetadata": { + "promptTokenCount": 10, + "candidatesTokenCount": 20, + "totalTokenCount": 30, + "cachedContentTokenCount": 5, + "thoughtsTokenCount": 3, + }, + "modelVersion": "gemini-1.5-flash", + "responseId": "response-id-123", +} + + +def create_test_config( + temperature=None, + top_p=None, + top_k=None, + max_output_tokens=None, + presence_penalty=None, + frequency_penalty=None, + seed=None, + system_instruction=None, + tools=None, +): + """Create a GenerateContentConfig.""" + config_dict = {} + + if temperature is not None: + config_dict["temperature"] = temperature + if top_p is not None: + config_dict["top_p"] = top_p + if top_k is not None: + config_dict["top_k"] = top_k + if max_output_tokens is not None: + config_dict["max_output_tokens"] = max_output_tokens + if presence_penalty is not None: + config_dict["presence_penalty"] = presence_penalty + if frequency_penalty is not None: + config_dict["frequency_penalty"] = frequency_penalty + if seed is not None: + config_dict["seed"] = seed + if system_instruction is not None: + # Convert string to Content for system instruction + if isinstance(system_instruction, str): + system_instruction = genai_types.Content( + parts=[genai_types.Part(text=system_instruction)], role="system" + ) + config_dict["system_instruction"] = system_instruction + if tools is not None: + config_dict["tools"] = tools + + return genai_types.GenerateContentConfig(**config_dict) + + +@pytest.mark.parametrize( + "send_default_pii, include_prompts", + [ + (True, True), + (True, False), + (False, True), + (False, False), + ], +) +def test_nonstreaming_generate_content( + sentry_init, capture_events, send_default_pii, include_prompts, mock_genai_client +): + sentry_init( + integrations=[GoogleGenAIIntegration(include_prompts=include_prompts)], + traces_sample_rate=1.0, + send_default_pii=send_default_pii, + ) + events = capture_events() + + # Mock the HTTP response at the _api_client.request() level + mock_http_response = create_mock_http_response(EXAMPLE_API_RESPONSE_JSON) + + with mock.patch.object( + mock_genai_client._api_client, + "request", + return_value=mock_http_response, + ): + with start_transaction(name="google_genai"): + config = create_test_config(temperature=0.7, max_output_tokens=100) + mock_genai_client.models.generate_content( + model="gemini-1.5-flash", contents="Tell me a joke", config=config + ) + assert len(events) == 1 + (event,) = events + + assert event["type"] == "transaction" + assert event["transaction"] == "google_genai" + + # Should have 2 spans: invoke_agent and chat + assert len(event["spans"]) == 2 + invoke_span, chat_span = event["spans"] + + # Check invoke_agent span + assert invoke_span["op"] == OP.GEN_AI_INVOKE_AGENT + assert invoke_span["description"] == "invoke_agent" + assert invoke_span["data"][SPANDATA.GEN_AI_AGENT_NAME] == "gemini-1.5-flash" + assert invoke_span["data"][SPANDATA.GEN_AI_SYSTEM] == "gcp.gemini" + assert invoke_span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "gemini-1.5-flash" + assert invoke_span["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "invoke_agent" + + # Check chat span + assert chat_span["op"] == OP.GEN_AI_CHAT + assert chat_span["description"] == "chat gemini-1.5-flash" + assert chat_span["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" + assert chat_span["data"][SPANDATA.GEN_AI_SYSTEM] == "gcp.gemini" + assert chat_span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "gemini-1.5-flash" + + if send_default_pii and include_prompts: + # Messages are serialized as JSON strings + messages = json.loads(invoke_span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) + assert messages == [{"role": "user", "content": "Tell me a joke"}] + + # Response text is stored as a JSON array + response_text = chat_span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] + # Parse the JSON array + response_texts = json.loads(response_text) + assert response_texts == ["Hello! How can I help you today?"] + else: + assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in invoke_span["data"] + assert SPANDATA.GEN_AI_RESPONSE_TEXT not in chat_span["data"] + + # Check token usage + assert chat_span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 + # Output tokens now include reasoning tokens: candidates_token_count (20) + thoughts_token_count (3) = 23 + assert chat_span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 23 + assert chat_span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 30 + assert chat_span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHED] == 5 + assert chat_span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS_REASONING] == 3 + + # Check configuration parameters + assert invoke_span["data"][SPANDATA.GEN_AI_REQUEST_TEMPERATURE] == 0.7 + assert invoke_span["data"][SPANDATA.GEN_AI_REQUEST_MAX_TOKENS] == 100 + + +def test_generate_content_with_system_instruction( + sentry_init, capture_events, mock_genai_client +): + sentry_init( + integrations=[GoogleGenAIIntegration(include_prompts=True)], + traces_sample_rate=1.0, + send_default_pii=True, + ) + events = capture_events() + + mock_http_response = create_mock_http_response(EXAMPLE_API_RESPONSE_JSON) + + with mock.patch.object( + mock_genai_client._api_client, "request", return_value=mock_http_response + ): + with start_transaction(name="google_genai"): + config = create_test_config( + system_instruction="You are a helpful assistant", + temperature=0.5, + ) + mock_genai_client.models.generate_content( + model="gemini-1.5-flash", contents="What is 2+2?", config=config + ) + + (event,) = events + invoke_span = event["spans"][0] + + # Check that system instruction is included in messages + # (PII is enabled and include_prompts is True in this test) + messages_str = invoke_span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] + # Parse the JSON string to verify content + messages = json.loads(messages_str) + assert len(messages) == 2 + assert messages[0] == {"role": "system", "content": "You are a helpful assistant"} + assert messages[1] == {"role": "user", "content": "What is 2+2?"} + + +def test_generate_content_with_tools(sentry_init, capture_events, mock_genai_client): + sentry_init( + integrations=[GoogleGenAIIntegration()], + traces_sample_rate=1.0, + ) + events = capture_events() + + # Create a mock tool function + def get_weather(location: str) -> str: + """Get the weather for a location""" + return f"The weather in {location} is sunny" + + # Create a tool with function declarations using real types + function_declaration = genai_types.FunctionDeclaration( + name="get_weather_tool", + description="Get weather information (tool object)", + parameters=genai_types.Schema( + type=genai_types.Type.OBJECT, + properties={ + "location": genai_types.Schema( + type=genai_types.Type.STRING, + description="The location to get weather for", + ) + }, + required=["location"], + ), + ) + + mock_tool = genai_types.Tool(function_declarations=[function_declaration]) + + # API response for tool usage + tool_response_json = { + "candidates": [ + { + "content": { + "role": "model", + "parts": [{"text": "I'll check the weather."}], + }, + "finishReason": "STOP", + } + ], + "usageMetadata": { + "promptTokenCount": 15, + "candidatesTokenCount": 10, + "totalTokenCount": 25, + }, + } + + mock_http_response = create_mock_http_response(tool_response_json) + + with mock.patch.object( + mock_genai_client._api_client, "request", return_value=mock_http_response + ): + with start_transaction(name="google_genai"): + config = create_test_config(tools=[get_weather, mock_tool]) + mock_genai_client.models.generate_content( + model="gemini-1.5-flash", contents="What's the weather?", config=config + ) + + (event,) = events + invoke_span = event["spans"][0] + + # Check that tools are recorded (data is serialized as a string) + tools_data_str = invoke_span["data"][SPANDATA.GEN_AI_REQUEST_AVAILABLE_TOOLS] + # Parse the JSON string to verify content + tools_data = json.loads(tools_data_str) + assert len(tools_data) == 2 + + # The order of tools may not be guaranteed, so sort by name and description for comparison + sorted_tools = sorted( + tools_data, key=lambda t: (t.get("name", ""), t.get("description", "")) + ) + + # The function tool + assert sorted_tools[0]["name"] == "get_weather" + assert sorted_tools[0]["description"] == "Get the weather for a location" + + # The FunctionDeclaration tool + assert sorted_tools[1]["name"] == "get_weather_tool" + assert sorted_tools[1]["description"] == "Get weather information (tool object)" + + +def test_tool_execution(sentry_init, capture_events): + sentry_init( + integrations=[GoogleGenAIIntegration(include_prompts=True)], + traces_sample_rate=1.0, + send_default_pii=True, + ) + events = capture_events() + + # Create a mock tool function + def get_weather(location: str) -> str: + """Get the weather for a location""" + return f"The weather in {location} is sunny" + + # Create wrapped version of the tool + from sentry_sdk.integrations.google_genai.utils import wrapped_tool + + wrapped_weather = wrapped_tool(get_weather) + + # Execute the wrapped tool + with start_transaction(name="test_tool"): + result = wrapped_weather("San Francisco") + + assert result == "The weather in San Francisco is sunny" + + (event,) = events + assert len(event["spans"]) == 1 + tool_span = event["spans"][0] + + assert tool_span["op"] == OP.GEN_AI_EXECUTE_TOOL + assert tool_span["description"] == "execute_tool get_weather" + assert tool_span["data"][SPANDATA.GEN_AI_TOOL_NAME] == "get_weather" + assert tool_span["data"][SPANDATA.GEN_AI_TOOL_TYPE] == "function" + assert ( + tool_span["data"][SPANDATA.GEN_AI_TOOL_DESCRIPTION] + == "Get the weather for a location" + ) + + +def test_error_handling(sentry_init, capture_events, mock_genai_client): + sentry_init( + integrations=[GoogleGenAIIntegration()], + traces_sample_rate=1.0, + ) + events = capture_events() + + # Mock an error at the HTTP level + with mock.patch.object( + mock_genai_client._api_client, "request", side_effect=Exception("API Error") + ): + with start_transaction(name="google_genai"): + with pytest.raises(Exception, match="API Error"): + mock_genai_client.models.generate_content( + model="gemini-1.5-flash", + contents="This will fail", + config=create_test_config(), + ) + + # Should have both transaction and error events + assert len(events) == 2 + error_event, transaction_event = events + + assert error_event["level"] == "error" + assert error_event["exception"]["values"][0]["type"] == "Exception" + assert error_event["exception"]["values"][0]["value"] == "API Error" + assert error_event["exception"]["values"][0]["mechanism"]["type"] == "google_genai" + + +def test_streaming_generate_content(sentry_init, capture_events, mock_genai_client): + """Test streaming with generate_content_stream, verifying chunk accumulation.""" + sentry_init( + integrations=[GoogleGenAIIntegration(include_prompts=True)], + traces_sample_rate=1.0, + send_default_pii=True, + ) + events = capture_events() + + # Create streaming chunks - simulating a multi-chunk response + # Chunk 1: First part of text with partial usage metadata + chunk1_json = { + "candidates": [ + { + "content": { + "role": "model", + "parts": [{"text": "Hello! "}], + }, + # No finishReason in intermediate chunks + } + ], + "usageMetadata": { + "promptTokenCount": 10, + "candidatesTokenCount": 2, + "totalTokenCount": 12, # Not set in intermediate chunks + }, + "responseId": "response-id-stream-123", + "modelVersion": "gemini-1.5-flash", + } + + # Chunk 2: Second part of text with more usage metadata + chunk2_json = { + "candidates": [ + { + "content": { + "role": "model", + "parts": [{"text": "How can I "}], + }, + } + ], + "usageMetadata": { + "promptTokenCount": 10, + "candidatesTokenCount": 3, + "totalTokenCount": 13, + }, + } + + # Chunk 3: Final part with finish reason and complete usage metadata + chunk3_json = { + "candidates": [ + { + "content": { + "role": "model", + "parts": [{"text": "help you today?"}], + }, + "finishReason": "STOP", + } + ], + "usageMetadata": { + "promptTokenCount": 10, + "candidatesTokenCount": 7, + "totalTokenCount": 25, + "cachedContentTokenCount": 5, + "thoughtsTokenCount": 3, + }, + } + + # Create streaming mock responses + stream_chunks = [chunk1_json, chunk2_json, chunk3_json] + mock_stream = create_mock_streaming_responses(stream_chunks) + + with mock.patch.object( + mock_genai_client._api_client, "request_streamed", return_value=mock_stream + ): + with start_transaction(name="google_genai"): + config = create_test_config() + stream = mock_genai_client.models.generate_content_stream( + model="gemini-1.5-flash", contents="Stream me a response", config=config + ) + + # Consume the stream (this is what users do with the integration wrapper) + collected_chunks = list(stream) + + # Verify we got all chunks + assert len(collected_chunks) == 3 + assert collected_chunks[0].candidates[0].content.parts[0].text == "Hello! " + assert collected_chunks[1].candidates[0].content.parts[0].text == "How can I " + assert collected_chunks[2].candidates[0].content.parts[0].text == "help you today?" + + (event,) = events + + # There should be 2 spans: invoke_agent and chat + assert len(event["spans"]) == 2 + invoke_span = event["spans"][0] + chat_span = event["spans"][1] + + # Check that streaming flag is set on both spans + assert invoke_span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True + assert chat_span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True + + # Verify accumulated response text (all chunks combined) + expected_full_text = "Hello! How can I help you today?" + # Response text is stored as a JSON string + chat_response_text = json.loads(chat_span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT]) + invoke_response_text = json.loads( + invoke_span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] + ) + assert chat_response_text == [expected_full_text] + assert invoke_response_text == [expected_full_text] + + # Verify finish reasons (only the final chunk has a finish reason) + # When there's a single finish reason, it's stored as a plain string (not JSON) + assert SPANDATA.GEN_AI_RESPONSE_FINISH_REASONS in chat_span["data"] + assert SPANDATA.GEN_AI_RESPONSE_FINISH_REASONS in invoke_span["data"] + assert chat_span["data"][SPANDATA.GEN_AI_RESPONSE_FINISH_REASONS] == "STOP" + assert invoke_span["data"][SPANDATA.GEN_AI_RESPONSE_FINISH_REASONS] == "STOP" + + # Verify token counts - should reflect accumulated values + # Input tokens: max of all chunks = 10 + assert chat_span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 30 + assert invoke_span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 30 + + # Output tokens: candidates (2 + 3 + 7 = 12) + reasoning (3) = 15 + # Note: output_tokens includes both candidates and reasoning tokens + assert chat_span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 15 + assert invoke_span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 15 + + # Total tokens: from the last chunk + assert chat_span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 50 + assert invoke_span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 50 + + # Cached tokens: max of all chunks = 5 + assert chat_span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHED] == 5 + assert invoke_span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHED] == 5 + + # Reasoning tokens: sum of thoughts_token_count = 3 + assert chat_span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS_REASONING] == 3 + assert invoke_span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS_REASONING] == 3 + + # Verify model name + assert chat_span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "gemini-1.5-flash" + assert invoke_span["data"][SPANDATA.GEN_AI_AGENT_NAME] == "gemini-1.5-flash" + + +def test_span_origin(sentry_init, capture_events, mock_genai_client): + sentry_init( + integrations=[GoogleGenAIIntegration()], + traces_sample_rate=1.0, + ) + events = capture_events() + + mock_http_response = create_mock_http_response(EXAMPLE_API_RESPONSE_JSON) + + with mock.patch.object( + mock_genai_client._api_client, "request", return_value=mock_http_response + ): + with start_transaction(name="google_genai"): + config = create_test_config() + mock_genai_client.models.generate_content( + model="gemini-1.5-flash", contents="Test origin", config=config + ) + + (event,) = events + + assert event["contexts"]["trace"]["origin"] == "manual" + for span in event["spans"]: + assert span["origin"] == "auto.ai.google_genai" + + +def test_response_without_usage_metadata( + sentry_init, capture_events, mock_genai_client +): + """Test handling of responses without usage metadata""" + sentry_init( + integrations=[GoogleGenAIIntegration()], + traces_sample_rate=1.0, + ) + events = capture_events() + + # Response without usage metadata + response_json = { + "candidates": [ + { + "content": { + "role": "model", + "parts": [{"text": "No usage data"}], + }, + "finishReason": "STOP", + } + ], + } + + mock_http_response = create_mock_http_response(response_json) + + with mock.patch.object( + mock_genai_client._api_client, "request", return_value=mock_http_response + ): + with start_transaction(name="google_genai"): + config = create_test_config() + mock_genai_client.models.generate_content( + model="gemini-1.5-flash", contents="Test", config=config + ) + + (event,) = events + chat_span = event["spans"][1] + + # Usage data should not be present + assert SPANDATA.GEN_AI_USAGE_INPUT_TOKENS not in chat_span["data"] + assert SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS not in chat_span["data"] + assert SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS not in chat_span["data"] + + +def test_multiple_candidates(sentry_init, capture_events, mock_genai_client): + """Test handling of multiple response candidates""" + sentry_init( + integrations=[GoogleGenAIIntegration(include_prompts=True)], + traces_sample_rate=1.0, + send_default_pii=True, + ) + events = capture_events() + + # Response with multiple candidates + multi_candidate_json = { + "candidates": [ + { + "content": { + "role": "model", + "parts": [{"text": "Response 1"}], + }, + "finishReason": "STOP", + }, + { + "content": { + "role": "model", + "parts": [{"text": "Response 2"}], + }, + "finishReason": "MAX_TOKENS", + }, + ], + "usageMetadata": { + "promptTokenCount": 5, + "candidatesTokenCount": 15, + "totalTokenCount": 20, + }, + } + + mock_http_response = create_mock_http_response(multi_candidate_json) + + with mock.patch.object( + mock_genai_client._api_client, "request", return_value=mock_http_response + ): + with start_transaction(name="google_genai"): + config = create_test_config() + mock_genai_client.models.generate_content( + model="gemini-1.5-flash", contents="Generate multiple", config=config + ) + + (event,) = events + chat_span = event["spans"][1] + + # Should capture all responses + # Response text is stored as a JSON string when there are multiple responses + response_text = chat_span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] + if isinstance(response_text, str) and response_text.startswith("["): + # It's a JSON array + response_list = json.loads(response_text) + assert response_list == ["Response 1", "Response 2"] + else: + # It's concatenated + assert response_text == "Response 1\nResponse 2" + + # Finish reasons are serialized as JSON + finish_reasons = json.loads( + chat_span["data"][SPANDATA.GEN_AI_RESPONSE_FINISH_REASONS] + ) + assert finish_reasons == ["STOP", "MAX_TOKENS"] + + +def test_all_configuration_parameters(sentry_init, capture_events, mock_genai_client): + """Test that all configuration parameters are properly recorded""" + sentry_init( + integrations=[GoogleGenAIIntegration()], + traces_sample_rate=1.0, + ) + events = capture_events() + + mock_http_response = create_mock_http_response(EXAMPLE_API_RESPONSE_JSON) + + with mock.patch.object( + mock_genai_client._api_client, "request", return_value=mock_http_response + ): + with start_transaction(name="google_genai"): + config = create_test_config( + temperature=0.8, + top_p=0.95, + top_k=40, + max_output_tokens=2048, + presence_penalty=0.1, + frequency_penalty=0.2, + seed=12345, + ) + mock_genai_client.models.generate_content( + model="gemini-1.5-flash", contents="Test all params", config=config + ) + + (event,) = events + invoke_span = event["spans"][0] + + # Check all parameters are recorded + assert invoke_span["data"][SPANDATA.GEN_AI_REQUEST_TEMPERATURE] == 0.8 + assert invoke_span["data"][SPANDATA.GEN_AI_REQUEST_TOP_P] == 0.95 + assert invoke_span["data"][SPANDATA.GEN_AI_REQUEST_TOP_K] == 40 + assert invoke_span["data"][SPANDATA.GEN_AI_REQUEST_MAX_TOKENS] == 2048 + assert invoke_span["data"][SPANDATA.GEN_AI_REQUEST_PRESENCE_PENALTY] == 0.1 + assert invoke_span["data"][SPANDATA.GEN_AI_REQUEST_FREQUENCY_PENALTY] == 0.2 + assert invoke_span["data"][SPANDATA.GEN_AI_REQUEST_SEED] == 12345 + + +def test_empty_response(sentry_init, capture_events, mock_genai_client): + """Test handling of minimal response with no content""" + sentry_init( + integrations=[GoogleGenAIIntegration()], + traces_sample_rate=1.0, + ) + events = capture_events() + + # Minimal response with empty candidates array + minimal_response_json = {"candidates": []} + mock_http_response = create_mock_http_response(minimal_response_json) + + with mock.patch.object( + mock_genai_client._api_client, "request", return_value=mock_http_response + ): + with start_transaction(name="google_genai"): + response = mock_genai_client.models.generate_content( + model="gemini-1.5-flash", contents="Test", config=create_test_config() + ) + + # Response will have an empty candidates list + assert response is not None + assert len(response.candidates) == 0 + + (event,) = events + # Should still create spans even with empty candidates + assert len(event["spans"]) == 2 + + +def test_response_with_different_id_fields( + sentry_init, capture_events, mock_genai_client +): + """Test handling of different response ID field names""" + sentry_init( + integrations=[GoogleGenAIIntegration()], + traces_sample_rate=1.0, + ) + events = capture_events() + + # Response with response_id and model_version + response_json = { + "candidates": [ + { + "content": { + "role": "model", + "parts": [{"text": "Test"}], + }, + "finishReason": "STOP", + } + ], + "responseId": "resp-456", + "modelVersion": "gemini-1.5-flash-001", + } + + mock_http_response = create_mock_http_response(response_json) + + with mock.patch.object( + mock_genai_client._api_client, "request", return_value=mock_http_response + ): + with start_transaction(name="google_genai"): + mock_genai_client.models.generate_content( + model="gemini-1.5-flash", contents="Test", config=create_test_config() + ) + + (event,) = events + chat_span = event["spans"][1] + + assert chat_span["data"][SPANDATA.GEN_AI_RESPONSE_ID] == "resp-456" + assert chat_span["data"][SPANDATA.GEN_AI_RESPONSE_MODEL] == "gemini-1.5-flash-001" + + +def test_tool_with_async_function(sentry_init, capture_events): + """Test that async tool functions are properly wrapped""" + sentry_init( + integrations=[GoogleGenAIIntegration()], + traces_sample_rate=1.0, + ) + capture_events() + + # Create an async tool function + async def async_tool(param: str) -> str: + """An async tool""" + return f"Async result: {param}" + + # Import is skipped in sync tests, but we can test the wrapping logic + from sentry_sdk.integrations.google_genai.utils import wrapped_tool + + # The wrapper should handle async functions + wrapped_async_tool = wrapped_tool(async_tool) + assert wrapped_async_tool != async_tool # Should be wrapped + assert hasattr(wrapped_async_tool, "__wrapped__") # Should preserve original + + +def test_contents_as_none(sentry_init, capture_events, mock_genai_client): + """Test handling when contents parameter is None""" + sentry_init( + integrations=[GoogleGenAIIntegration(include_prompts=True)], + traces_sample_rate=1.0, + send_default_pii=True, + ) + events = capture_events() + + mock_http_response = create_mock_http_response(EXAMPLE_API_RESPONSE_JSON) + + with mock.patch.object( + mock_genai_client._api_client, "request", return_value=mock_http_response + ): + with start_transaction(name="google_genai"): + mock_genai_client.models.generate_content( + model="gemini-1.5-flash", contents=None, config=create_test_config() + ) + + (event,) = events + invoke_span = event["spans"][0] + + # Should handle None contents gracefully + messages = invoke_span["data"].get(SPANDATA.GEN_AI_REQUEST_MESSAGES, []) + # Should only have system message if any, not user message + assert all(msg["role"] != "user" or msg["content"] is not None for msg in messages) + + +def test_tool_calls_extraction(sentry_init, capture_events, mock_genai_client): + """Test extraction of tool/function calls from response""" + sentry_init( + integrations=[GoogleGenAIIntegration()], + traces_sample_rate=1.0, + ) + events = capture_events() + + # Response with function calls + function_call_response_json = { + "candidates": [ + { + "content": { + "role": "model", + "parts": [ + {"text": "I'll help you with that."}, + { + "functionCall": { + "name": "get_weather", + "args": { + "location": "San Francisco", + "unit": "celsius", + }, + } + }, + { + "functionCall": { + "name": "get_time", + "args": {"timezone": "PST"}, + } + }, + ], + }, + "finishReason": "STOP", + } + ], + "usageMetadata": { + "promptTokenCount": 20, + "candidatesTokenCount": 30, + "totalTokenCount": 50, + }, + } + + mock_http_response = create_mock_http_response(function_call_response_json) + + with mock.patch.object( + mock_genai_client._api_client, "request", return_value=mock_http_response + ): + with start_transaction(name="google_genai"): + mock_genai_client.models.generate_content( + model="gemini-1.5-flash", + contents="What's the weather and time?", + config=create_test_config(), + ) + + (event,) = events + chat_span = event["spans"][1] # The chat span + + # Check that tool calls are extracted and stored + assert SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS in chat_span["data"] + + # Parse the JSON string to verify content + tool_calls = json.loads(chat_span["data"][SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS]) + + assert len(tool_calls) == 2 + + # First tool call + assert tool_calls[0]["name"] == "get_weather" + assert tool_calls[0]["type"] == "function_call" + # Arguments are serialized as JSON strings + assert json.loads(tool_calls[0]["arguments"]) == { + "location": "San Francisco", + "unit": "celsius", + } + + # Second tool call + assert tool_calls[1]["name"] == "get_time" + assert tool_calls[1]["type"] == "function_call" + # Arguments are serialized as JSON strings + assert json.loads(tool_calls[1]["arguments"]) == {"timezone": "PST"} diff --git a/tox.ini b/tox.ini index 2c77edd07c..1a2f8cb571 100644 --- a/tox.ini +++ b/tox.ini @@ -78,6 +78,11 @@ envlist = {py3.9,py3.12,py3.13}-langgraph-v0.6.8 {py3.10,py3.12,py3.13}-langgraph-v1.0.0a4 + {py3.9,py3.12,py3.13}-google_genai-v1.29.0 + {py3.9,py3.12,py3.13}-google_genai-v1.33.0 + {py3.9,py3.12,py3.13}-google_genai-v1.37.0 + {py3.9,py3.12,py3.13}-google_genai-v1.42.0 + {py3.10,py3.11,py3.12}-openai_agents-v0.0.19 {py3.10,py3.12,py3.13}-openai_agents-v0.1.0 {py3.10,py3.12,py3.13}-openai_agents-v0.2.11 @@ -87,14 +92,14 @@ envlist = {py3.8,py3.12,py3.13}-huggingface_hub-v0.28.1 {py3.8,py3.12,py3.13}-huggingface_hub-v0.32.6 {py3.8,py3.12,py3.13}-huggingface_hub-v0.35.3 - {py3.9,py3.12,py3.13}-huggingface_hub-v1.0.0rc2 + {py3.9,py3.12,py3.13}-huggingface_hub-v1.0.0rc4 # ~~~ Cloud ~~~ {py3.6,py3.7}-boto3-v1.12.49 {py3.6,py3.9,py3.10}-boto3-v1.20.54 {py3.7,py3.11,py3.12}-boto3-v1.28.85 - {py3.9,py3.12,py3.13}-boto3-v1.40.46 + {py3.9,py3.12,py3.13}-boto3-v1.40.48 {py3.6,py3.7,py3.8}-chalice-v1.16.0 {py3.9,py3.12,py3.13}-chalice-v1.32.0 @@ -110,14 +115,14 @@ envlist = {py3.6}-pymongo-v3.5.1 {py3.6,py3.10,py3.11}-pymongo-v3.13.0 - {py3.9,py3.12,py3.13}-pymongo-v4.15.2 + {py3.9,py3.12,py3.13}-pymongo-v4.15.3 {py3.6}-redis-v2.10.6 {py3.6,py3.7,py3.8}-redis-v3.5.3 {py3.7,py3.10,py3.11}-redis-v4.6.0 {py3.8,py3.11,py3.12}-redis-v5.3.1 {py3.9,py3.12,py3.13}-redis-v6.4.0 - {py3.9,py3.12,py3.13}-redis-v7.0.0b2 + {py3.9,py3.12,py3.13}-redis-v7.0.0b3 {py3.6}-redis_py_cluster_legacy-v1.3.6 {py3.6,py3.7,py3.8}-redis_py_cluster_legacy-v2.1.3 @@ -153,7 +158,7 @@ envlist = {py3.8,py3.12,py3.13}-graphene-v3.4.3 {py3.8,py3.10,py3.11}-strawberry-v0.209.8 - {py3.9,py3.12,py3.13}-strawberry-v0.283.1 + {py3.9,py3.12,py3.13}-strawberry-v0.283.2 # ~~~ Network ~~~ @@ -222,7 +227,7 @@ envlist = {py3.6,py3.9,py3.10}-fastapi-v0.79.1 {py3.7,py3.10,py3.11}-fastapi-v0.92.0 {py3.8,py3.10,py3.11}-fastapi-v0.105.0 - {py3.8,py3.12,py3.13}-fastapi-v0.118.0 + {py3.8,py3.12,py3.13}-fastapi-v0.118.2 # ~~~ Web 2 ~~~ @@ -381,6 +386,12 @@ deps = langgraph-v0.6.8: langgraph==0.6.8 langgraph-v1.0.0a4: langgraph==1.0.0a4 + google_genai-v1.29.0: google-genai==1.29.0 + google_genai-v1.33.0: google-genai==1.33.0 + google_genai-v1.37.0: google-genai==1.37.0 + google_genai-v1.42.0: google-genai==1.42.0 + google_genai: pytest-asyncio + openai_agents-v0.0.19: openai-agents==0.0.19 openai_agents-v0.1.0: openai-agents==0.1.0 openai_agents-v0.2.11: openai-agents==0.2.11 @@ -391,7 +402,7 @@ deps = huggingface_hub-v0.28.1: huggingface_hub==0.28.1 huggingface_hub-v0.32.6: huggingface_hub==0.32.6 huggingface_hub-v0.35.3: huggingface_hub==0.35.3 - huggingface_hub-v1.0.0rc2: huggingface_hub==1.0.0rc2 + huggingface_hub-v1.0.0rc4: huggingface_hub==1.0.0rc4 huggingface_hub: responses huggingface_hub: pytest-httpx @@ -400,7 +411,7 @@ deps = boto3-v1.12.49: boto3==1.12.49 boto3-v1.20.54: boto3==1.20.54 boto3-v1.28.85: boto3==1.28.85 - boto3-v1.40.46: boto3==1.40.46 + boto3-v1.40.48: boto3==1.40.48 {py3.7,py3.8}-boto3: urllib3<2.0.0 chalice-v1.16.0: chalice==1.16.0 @@ -419,7 +430,7 @@ deps = pymongo-v3.5.1: pymongo==3.5.1 pymongo-v3.13.0: pymongo==3.13.0 - pymongo-v4.15.2: pymongo==4.15.2 + pymongo-v4.15.3: pymongo==4.15.3 pymongo: mockupdb redis-v2.10.6: redis==2.10.6 @@ -427,7 +438,7 @@ deps = redis-v4.6.0: redis==4.6.0 redis-v5.3.1: redis==5.3.1 redis-v6.4.0: redis==6.4.0 - redis-v7.0.0b2: redis==7.0.0b2 + redis-v7.0.0b3: redis==7.0.0b3 redis: fakeredis!=1.7.4 redis: pytest<8.0.0 redis-v4.6.0: fakeredis<2.31.0 @@ -477,7 +488,7 @@ deps = {py3.6}-graphene: aiocontextvars strawberry-v0.209.8: strawberry-graphql[fastapi,flask]==0.209.8 - strawberry-v0.283.1: strawberry-graphql[fastapi,flask]==0.283.1 + strawberry-v0.283.2: strawberry-graphql[fastapi,flask]==0.283.2 strawberry: httpx strawberry-v0.209.8: pydantic<2.11 @@ -604,7 +615,7 @@ deps = fastapi-v0.79.1: fastapi==0.79.1 fastapi-v0.92.0: fastapi==0.92.0 fastapi-v0.105.0: fastapi==0.105.0 - fastapi-v0.118.0: fastapi==0.118.0 + fastapi-v0.118.2: fastapi==0.118.2 fastapi: httpx fastapi: pytest-asyncio fastapi: python-multipart @@ -747,6 +758,7 @@ setenv = falcon: TESTPATH=tests/integrations/falcon fastapi: TESTPATH=tests/integrations/fastapi flask: TESTPATH=tests/integrations/flask + google_genai: TESTPATH=tests/integrations/google_genai gql: TESTPATH=tests/integrations/gql graphene: TESTPATH=tests/integrations/graphene grpc: TESTPATH=tests/integrations/grpc