Skip to content

Commit 6a94af2

Browse files
xuanyang15copybara-github
authored andcommitted
chore: Disable SetModelResponseTool workaround for Vertex AI Gemini 2+ models
Gemini models now [support Function calling being used together with structured output on Vertex AI](https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/function-calling#structured-output-bp). Co-authored-by: Xuan Yang <[email protected]> PiperOrigin-RevId: 827709903
1 parent 8b6ee57 commit 6a94af2

File tree

6 files changed

+171
-13
lines changed

6 files changed

+171
-13
lines changed

src/google/adk/flows/llm_flows/_output_schema_processor.py

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,7 @@
2525
from ...events.event import Event
2626
from ...models.llm_request import LlmRequest
2727
from ...tools.set_model_response_tool import SetModelResponseTool
28+
from ...utils.output_schema_utils import can_use_output_schema_with_tools
2829
from ._base_llm_processor import BaseLlmRequestProcessor
2930

3031

@@ -39,8 +40,13 @@ async def run_async(
3940

4041
agent = invocation_context.agent
4142

42-
# Check if we need the processor: output_schema + tools
43-
if not agent.output_schema or not agent.tools:
43+
# Check if we need the processor: output_schema + tools + cannot use output
44+
# schema with tools
45+
if (
46+
not agent.output_schema
47+
or not agent.tools
48+
or can_use_output_schema_with_tools(agent.model)
49+
):
4450
return
4551

4652
# Add the set_model_response tool to handle structured output

src/google/adk/flows/llm_flows/basic.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,7 @@
2525
from ...agents.invocation_context import InvocationContext
2626
from ...events.event import Event
2727
from ...models.llm_request import LlmRequest
28+
from ...utils.output_schema_utils import can_use_output_schema_with_tools
2829
from ._base_llm_processor import BaseLlmRequestProcessor
2930

3031

@@ -52,8 +53,9 @@ async def run_async(
5253
# support output_schema and tools together. we have a workaround to support
5354
# both output_schema and tools at the same time. see
5455
# _output_schema_processor.py for details
55-
if agent.output_schema and not agent.tools:
56-
llm_request.set_output_schema(agent.output_schema)
56+
if agent.output_schema:
57+
if not agent.tools or can_use_output_schema_with_tools(agent.model):
58+
llm_request.set_output_schema(agent.output_schema)
5759

5860
llm_request.live_connect_config.response_modalities = (
5961
invocation_context.run_config.response_modalities
Lines changed: 38 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,38 @@
1+
# Copyright 2025 Google LLC
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# http://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
15+
"""Utilities for Output Schema.
16+
17+
This module is for ADK internal use only.
18+
Please do not rely on the implementation details.
19+
"""
20+
21+
from __future__ import annotations
22+
23+
from typing import Union
24+
25+
from ..models.base_llm import BaseLlm
26+
from .model_name_utils import is_gemini_2_or_above
27+
from .variant_utils import get_google_llm_variant
28+
from .variant_utils import GoogleLLMVariant
29+
30+
31+
def can_use_output_schema_with_tools(model: Union[str, BaseLlm]):
32+
"""Returns True if output schema with tools is supported."""
33+
model_string = model if isinstance(model, str) else model.model
34+
35+
return (
36+
get_google_llm_variant() == GoogleLLMVariant.VERTEX_AI
37+
and is_gemini_2_or_above(model_string)
38+
)

tests/unittests/flows/llm_flows/test_basic_processor.py

Lines changed: 42 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,8 @@
1414

1515
"""Tests for basic LLM request processor."""
1616

17+
from unittest import mock
18+
1719
from google.adk.agents.invocation_context import InvocationContext
1820
from google.adk.agents.llm_agent import LlmAgent
1921
from google.adk.agents.run_config import RunConfig
@@ -80,7 +82,7 @@ async def test_sets_output_schema_when_no_tools(self):
8082
assert llm_request.config.response_mime_type == 'application/json'
8183

8284
@pytest.mark.asyncio
83-
async def test_skips_output_schema_when_tools_present(self):
85+
async def test_skips_output_schema_when_tools_present(self, mocker):
8486
"""Test that processor skips output_schema when agent has tools."""
8587
agent = LlmAgent(
8688
name='test_agent',
@@ -93,6 +95,11 @@ async def test_skips_output_schema_when_tools_present(self):
9395
llm_request = LlmRequest()
9496
processor = _BasicLlmRequestProcessor()
9597

98+
can_use_output_schema_with_tools = mocker.patch(
99+
'google.adk.flows.llm_flows.basic.can_use_output_schema_with_tools',
100+
mock.MagicMock(return_value=False),
101+
)
102+
96103
# Process the request
97104
events = []
98105
async for event in processor.run_async(invocation_context, llm_request):
@@ -102,6 +109,40 @@ async def test_skips_output_schema_when_tools_present(self):
102109
assert llm_request.config.response_schema is None
103110
assert llm_request.config.response_mime_type != 'application/json'
104111

112+
# Should have checked if output schema can be used with tools
113+
can_use_output_schema_with_tools.assert_called_once_with(agent.model)
114+
115+
@pytest.mark.asyncio
116+
async def test_sets_output_schema_when_tools_present(self, mocker):
117+
"""Test that processor skips output_schema when agent has tools."""
118+
agent = LlmAgent(
119+
name='test_agent',
120+
model='gemini-2.5-flash',
121+
output_schema=OutputSchema,
122+
tools=[FunctionTool(func=dummy_tool)], # Has tools
123+
)
124+
125+
invocation_context = await _create_invocation_context(agent)
126+
llm_request = LlmRequest()
127+
processor = _BasicLlmRequestProcessor()
128+
129+
can_use_output_schema_with_tools = mocker.patch(
130+
'google.adk.flows.llm_flows.basic.can_use_output_schema_with_tools',
131+
mock.MagicMock(return_value=True),
132+
)
133+
134+
# Process the request
135+
events = []
136+
async for event in processor.run_async(invocation_context, llm_request):
137+
events.append(event)
138+
139+
# Should have set response_schema since output schema can be used with tools
140+
assert llm_request.config.response_schema == OutputSchema
141+
assert llm_request.config.response_mime_type == 'application/json'
142+
143+
# Should have checked if output schema can be used with tools
144+
can_use_output_schema_with_tools.assert_called_once_with(agent.model)
145+
105146
@pytest.mark.asyncio
106147
async def test_no_output_schema_no_tools(self):
107148
"""Test that processor works normally when agent has no output_schema or tools."""

tests/unittests/flows/llm_flows/test_output_schema_processor.py

Lines changed: 29 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -14,14 +14,13 @@
1414

1515
"""Tests for output schema processor functionality."""
1616

17-
import json
17+
from unittest import mock
1818

1919
from google.adk.agents.invocation_context import InvocationContext
2020
from google.adk.agents.llm_agent import LlmAgent
2121
from google.adk.agents.run_config import RunConfig
2222
from google.adk.flows.llm_flows.single_flow import SingleFlow
2323
from google.adk.models.llm_request import LlmRequest
24-
from google.adk.models.llm_response import LlmResponse
2524
from google.adk.sessions.in_memory_session_service import InMemorySessionService
2625
from google.adk.tools.function_tool import FunctionTool
2726
from pydantic import BaseModel
@@ -145,7 +144,16 @@ async def test_basic_processor_sets_output_schema_without_tools():
145144

146145

147146
@pytest.mark.asyncio
148-
async def test_output_schema_request_processor():
147+
@pytest.mark.parametrize(
148+
'output_schema_with_tools_allowed',
149+
[
150+
False,
151+
True,
152+
],
153+
)
154+
async def test_output_schema_request_processor(
155+
output_schema_with_tools_allowed, mocker
156+
):
149157
"""Test that output schema processor adds set_model_response tool."""
150158
from google.adk.flows.llm_flows._output_schema_processor import _OutputSchemaRequestProcessor
151159

@@ -161,16 +169,29 @@ async def test_output_schema_request_processor():
161169
llm_request = LlmRequest()
162170
processor = _OutputSchemaRequestProcessor()
163171

172+
can_use_output_schema_with_tools = mocker.patch(
173+
'google.adk.flows.llm_flows._output_schema_processor.can_use_output_schema_with_tools',
174+
mock.MagicMock(return_value=output_schema_with_tools_allowed),
175+
)
176+
164177
# Process the request
165178
events = []
166179
async for event in processor.run_async(invocation_context, llm_request):
167180
events.append(event)
168181

169-
# Should have added set_model_response tool
170-
assert 'set_model_response' in llm_request.tools_dict
171-
172-
# Should have added instruction about using set_model_response
173-
assert 'set_model_response' in llm_request.config.system_instruction
182+
if not output_schema_with_tools_allowed:
183+
# Should have added set_model_response tool if output schema with tools is
184+
# allowed
185+
assert 'set_model_response' in llm_request.tools_dict
186+
# Should have added instruction about using set_model_response
187+
assert 'set_model_response' in llm_request.config.system_instruction
188+
else:
189+
# Should skip modifying LlmRequest
190+
assert not llm_request.tools_dict
191+
assert not llm_request.config.system_instruction
192+
193+
# Should have checked if output schema can be used with tools
194+
can_use_output_schema_with_tools.assert_called_once_with(agent.model)
174195

175196

176197
@pytest.mark.asyncio
Lines changed: 50 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,50 @@
1+
# Copyright 2025 Google LLC
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# http://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
15+
16+
from google.adk.models.anthropic_llm import Claude
17+
from google.adk.models.google_llm import Gemini
18+
from google.adk.utils.output_schema_utils import can_use_output_schema_with_tools
19+
import pytest
20+
21+
22+
@pytest.mark.parametrize(
23+
"model, env_value, expected",
24+
[
25+
("gemini-2.5-pro", "1", True),
26+
("gemini-2.5-pro", "0", False),
27+
("gemini-2.5-pro", None, False),
28+
(Gemini(model="gemini-2.5-pro"), "1", True),
29+
(Gemini(model="gemini-2.5-pro"), "0", False),
30+
(Gemini(model="gemini-2.5-pro"), None, False),
31+
("gemini-2.0-flash", "1", True),
32+
("gemini-2.0-flash", "0", False),
33+
("gemini-2.0-flash", None, False),
34+
("gemini-1.5-pro", "1", False),
35+
("gemini-1.5-pro", "0", False),
36+
("gemini-1.5-pro", None, False),
37+
(Claude(model="claude-3.7-sonnet"), "1", False),
38+
(Claude(model="claude-3.7-sonnet"), "0", False),
39+
(Claude(model="claude-3.7-sonnet"), None, False),
40+
],
41+
)
42+
def test_can_use_output_schema_with_tools(
43+
monkeypatch, model, env_value, expected
44+
):
45+
"""Test can_use_output_schema_with_tools."""
46+
if env_value is not None:
47+
monkeypatch.setenv("GOOGLE_GENAI_USE_VERTEXAI", env_value)
48+
else:
49+
monkeypatch.delenv("GOOGLE_GENAI_USE_VERTEXAI", raising=False)
50+
assert can_use_output_schema_with_tools(model) == expected

0 commit comments

Comments
 (0)