Skip to content

fix: Updated some flows to run on BaseAgent instances where supported #2510

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 11 additions & 0 deletions src/google/adk/agents/readonly_context.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,17 +14,23 @@

from __future__ import annotations

import copy
from types import MappingProxyType
from typing import Any
from typing import List
from typing import Optional
from typing import TYPE_CHECKING
from typing import TypeVar

if TYPE_CHECKING:
from google.genai import types

from .invocation_context import InvocationContext


Event = TypeVar('Event')


class ReadonlyContext:

def __init__(
Expand Down Expand Up @@ -52,3 +58,8 @@ def agent_name(self) -> str:
def state(self) -> MappingProxyType[str, Any]:
"""The state of the current session. READONLY field."""
return MappingProxyType(self._invocation_context.session.state)

@property
def events(self) -> List[Event]:
"""Historical events from the current session."""
return copy.deepcopy(self._invocation_context.session.events)
4 changes: 2 additions & 2 deletions src/google/adk/auth/auth_preprocessor.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ async def run_async(
from ..agents.llm_agent import LlmAgent

agent = invocation_context.agent
if not isinstance(agent, LlmAgent):
if not hasattr(agent, 'canonical_tools'):
return
events = invocation_context.session.events
if not events:
Expand Down Expand Up @@ -110,7 +110,7 @@ async def run_async(
event,
{
tool.name: tool
for tool in await agent.canonical_tools(
for tool in await getattr(agent, 'canonical_tools')(
ReadonlyContext(invocation_context)
)
},
Expand Down
6 changes: 4 additions & 2 deletions src/google/adk/flows/llm_flows/base_llm_flow.py
Original file line number Diff line number Diff line change
Expand Up @@ -378,14 +378,16 @@ async def _preprocess_async(
from ...agents.llm_agent import LlmAgent

agent = invocation_context.agent
if not isinstance(agent, LlmAgent):
return

# Runs processors.
for processor in self.request_processors:
logging.debug(f'Running processor: {type(processor).__name__}')
async for event in processor.run_async(invocation_context, llm_request):
yield event

if not isinstance(agent, LlmAgent):
return

# Run processors for tools.
for tool_union in agent.tools:
tool_context = ToolContext(invocation_context)
Expand Down
39 changes: 18 additions & 21 deletions src/google/adk/flows/llm_flows/basic.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@
from __future__ import annotations

from typing import AsyncGenerator
from typing import Generator

from google.genai import types
from typing_extensions import override
Expand All @@ -36,26 +35,7 @@ async def run_async(
) -> AsyncGenerator[Event, None]:
from ...agents.llm_agent import LlmAgent

agent = invocation_context.agent
if not isinstance(agent, LlmAgent):
return

llm_request.model = (
agent.canonical_model
if isinstance(agent.canonical_model, str)
else agent.canonical_model.model
)
llm_request.config = (
agent.generate_content_config.model_copy(deep=True)
if agent.generate_content_config
else types.GenerateContentConfig()
)
# Only set output_schema if no tools are specified. as of now, model don't
# support output_schema and tools together. we have a workaround to support
# both outoput_schema and tools at the same time. see
# _output_schema_processor.py for details
if agent.output_schema and not agent.tools:
llm_request.set_output_schema(agent.output_schema)
llm_request.config = types.GenerateContentConfig()

llm_request.live_connect_config.response_modalities = (
invocation_context.run_config.response_modalities
Expand All @@ -81,6 +61,23 @@ async def run_async(
llm_request.live_connect_config.session_resumption = (
invocation_context.run_config.session_resumption
)
agent = invocation_context.agent
if not isinstance(agent, LlmAgent):
return

llm_request.model = (
agent.canonical_model
if isinstance(agent.canonical_model, str)
else agent.canonical_model.model
)
# Only set output_schema if no tools are specified. as of now, model don't
# support output_schema and tools together. we have a workaround to support
# both outoput_schema and tools at the same time. see
# _output_schema_processor.py for details
if agent.output_schema and not agent.tools:
llm_request.set_output_schema(agent.output_schema)
if agent.generate_content_config:
llm_request.config = agent.generate_content_config.model_copy(deep=True)

# TODO: handle tool append here, instead of in BaseTool.process_llm_request.

Expand Down
20 changes: 13 additions & 7 deletions src/google/adk/flows/llm_flows/contents.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,22 +33,24 @@
class _ContentLlmRequestProcessor(BaseLlmRequestProcessor):
"""Builds the contents for the LLM request."""

def __init__(self, mutating: bool):
self.mutating = mutating

@override
async def run_async(
self, invocation_context: InvocationContext, llm_request: LlmRequest
) -> AsyncGenerator[Event, None]:
from ...agents.llm_agent import LlmAgent

agent = invocation_context.agent
if not isinstance(agent, LlmAgent):
return

if agent.include_contents == 'default':
if not isinstance(agent, LlmAgent) or agent.include_contents == 'default':
# Include full conversation history
llm_request.contents = _get_contents(
invocation_context.branch,
invocation_context.session.events,
agent.name,
self.mutating,
)
else:
# Include current turn context only (no conversation history)
Expand All @@ -63,7 +65,8 @@ async def run_async(
yield # This is a no-op but maintains generator structure


request_processor = _ContentLlmRequestProcessor()
request_processor = _ContentLlmRequestProcessor(mutating=True)
non_mutating_request_processor = _ContentLlmRequestProcessor(mutating=False)


def _rearrange_events_for_async_function_responses_in_history(
Expand Down Expand Up @@ -203,7 +206,10 @@ def _rearrange_events_for_latest_function_response(


def _get_contents(
current_branch: Optional[str], events: list[Event], agent_name: str = ''
current_branch: Optional[str],
events: list[Event],
agent_name: str = '',
mutating: bool = True,
) -> list[types.Content]:
"""Get the contents for the LLM request.
Expand All @@ -213,6 +219,7 @@ def _get_contents(
current_branch: The current branch of the agent.
events: Events to process.
agent_name: The name of the agent.
mutating: Whether to rewrite all conversation turns as user verbalizations.
Returns:
A list of processed contents.
Expand Down Expand Up @@ -240,7 +247,7 @@ def _get_contents(
continue
filtered_events.append(
_convert_foreign_event(event)
if _is_other_agent_reply(agent_name, event)
if mutating and _is_other_agent_reply(agent_name, event)
else event
)

Expand Down Expand Up @@ -313,7 +320,6 @@ def _convert_foreign_event(event: Event) -> Event:
Returns:
The converted event.
"""
if not event.content or not event.content.parts:
return event
Expand Down
16 changes: 14 additions & 2 deletions src/google/adk/tools/agent_tool.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,11 +46,19 @@ class AgentTool(BaseTool):
Attributes:
agent: The agent to wrap.
skip_summarization: Whether to skip summarization of the agent output.
include_conversational_context: Whether to pass conversation history through
to the sub-agent.
"""

def __init__(self, agent: BaseAgent, skip_summarization: bool = False):
def __init__(
self,
agent: BaseAgent,
skip_summarization: bool = False,
include_conversational_context: bool = False,
):
self.agent = agent
self.skip_summarization: bool = skip_summarization
self.skip_summarization = skip_summarization
self.include_conversational_context = include_conversational_context

super().__init__(name=agent.name, description=agent.description)

Expand Down Expand Up @@ -140,6 +148,10 @@ async def run_async(
state=tool_context.state.to_dict(),
)

if self.include_conversational_context:
for event in tool_context.events[:-1]:
await runner.session_service.append_event(session, event)

last_event = None
async for event in runner.run_async(
user_id=session.user_id, session_id=session.id, new_message=content
Expand Down