diff --git a/src/google/adk/agents/readonly_context.py b/src/google/adk/agents/readonly_context.py index 548425367..2415994b0 100644 --- a/src/google/adk/agents/readonly_context.py +++ b/src/google/adk/agents/readonly_context.py @@ -14,10 +14,13 @@ from __future__ import annotations +import copy from types import MappingProxyType from typing import Any +from typing import List from typing import Optional from typing import TYPE_CHECKING +from typing import TypeVar if TYPE_CHECKING: from google.genai import types @@ -25,6 +28,9 @@ from .invocation_context import InvocationContext +Event = TypeVar('Event') + + class ReadonlyContext: def __init__( @@ -52,3 +58,8 @@ def agent_name(self) -> str: def state(self) -> MappingProxyType[str, Any]: """The state of the current session. READONLY field.""" return MappingProxyType(self._invocation_context.session.state) + + @property + def events(self) -> List[Event]: + """Historical events from the current session.""" + return copy.deepcopy(self._invocation_context.session.events) diff --git a/src/google/adk/auth/auth_preprocessor.py b/src/google/adk/auth/auth_preprocessor.py index b06774973..667c60223 100644 --- a/src/google/adk/auth/auth_preprocessor.py +++ b/src/google/adk/auth/auth_preprocessor.py @@ -44,7 +44,7 @@ async def run_async( from ..agents.llm_agent import LlmAgent agent = invocation_context.agent - if not isinstance(agent, LlmAgent): + if not hasattr(agent, 'canonical_tools'): return events = invocation_context.session.events if not events: @@ -110,7 +110,7 @@ async def run_async( event, { tool.name: tool - for tool in await agent.canonical_tools( + for tool in await getattr(agent, 'canonical_tools')( ReadonlyContext(invocation_context) ) }, diff --git a/src/google/adk/flows/llm_flows/base_llm_flow.py b/src/google/adk/flows/llm_flows/base_llm_flow.py index 90cf0fbcf..c628fbc93 100644 --- a/src/google/adk/flows/llm_flows/base_llm_flow.py +++ b/src/google/adk/flows/llm_flows/base_llm_flow.py @@ -378,14 +378,16 @@ async def _preprocess_async( from ...agents.llm_agent import LlmAgent agent = invocation_context.agent - if not isinstance(agent, LlmAgent): - return # Runs processors. for processor in self.request_processors: + logging.debug(f'Running processor: {type(processor).__name__}') async for event in processor.run_async(invocation_context, llm_request): yield event + if not isinstance(agent, LlmAgent): + return + # Run processors for tools. for tool_union in agent.tools: tool_context = ToolContext(invocation_context) diff --git a/src/google/adk/flows/llm_flows/basic.py b/src/google/adk/flows/llm_flows/basic.py index 549c6d875..84d81c247 100644 --- a/src/google/adk/flows/llm_flows/basic.py +++ b/src/google/adk/flows/llm_flows/basic.py @@ -17,7 +17,6 @@ from __future__ import annotations from typing import AsyncGenerator -from typing import Generator from google.genai import types from typing_extensions import override @@ -36,26 +35,7 @@ async def run_async( ) -> AsyncGenerator[Event, None]: from ...agents.llm_agent import LlmAgent - agent = invocation_context.agent - if not isinstance(agent, LlmAgent): - return - - llm_request.model = ( - agent.canonical_model - if isinstance(agent.canonical_model, str) - else agent.canonical_model.model - ) - llm_request.config = ( - agent.generate_content_config.model_copy(deep=True) - if agent.generate_content_config - else types.GenerateContentConfig() - ) - # Only set output_schema if no tools are specified. as of now, model don't - # support output_schema and tools together. we have a workaround to support - # both outoput_schema and tools at the same time. see - # _output_schema_processor.py for details - if agent.output_schema and not agent.tools: - llm_request.set_output_schema(agent.output_schema) + llm_request.config = types.GenerateContentConfig() llm_request.live_connect_config.response_modalities = ( invocation_context.run_config.response_modalities @@ -81,6 +61,23 @@ async def run_async( llm_request.live_connect_config.session_resumption = ( invocation_context.run_config.session_resumption ) + agent = invocation_context.agent + if not isinstance(agent, LlmAgent): + return + + llm_request.model = ( + agent.canonical_model + if isinstance(agent.canonical_model, str) + else agent.canonical_model.model + ) + # Only set output_schema if no tools are specified. as of now, model don't + # support output_schema and tools together. we have a workaround to support + # both outoput_schema and tools at the same time. see + # _output_schema_processor.py for details + if agent.output_schema and not agent.tools: + llm_request.set_output_schema(agent.output_schema) + if agent.generate_content_config: + llm_request.config = agent.generate_content_config.model_copy(deep=True) # TODO: handle tool append here, instead of in BaseTool.process_llm_request. diff --git a/src/google/adk/flows/llm_flows/contents.py b/src/google/adk/flows/llm_flows/contents.py index ae1bd44ad..d847c69b9 100644 --- a/src/google/adk/flows/llm_flows/contents.py +++ b/src/google/adk/flows/llm_flows/contents.py @@ -33,6 +33,9 @@ class _ContentLlmRequestProcessor(BaseLlmRequestProcessor): """Builds the contents for the LLM request.""" + def __init__(self, mutating: bool): + self.mutating = mutating + @override async def run_async( self, invocation_context: InvocationContext, llm_request: LlmRequest @@ -40,15 +43,14 @@ async def run_async( from ...agents.llm_agent import LlmAgent agent = invocation_context.agent - if not isinstance(agent, LlmAgent): - return - if agent.include_contents == 'default': + if not isinstance(agent, LlmAgent) or agent.include_contents == 'default': # Include full conversation history llm_request.contents = _get_contents( invocation_context.branch, invocation_context.session.events, agent.name, + self.mutating, ) else: # Include current turn context only (no conversation history) @@ -63,7 +65,8 @@ async def run_async( yield # This is a no-op but maintains generator structure -request_processor = _ContentLlmRequestProcessor() +request_processor = _ContentLlmRequestProcessor(mutating=True) +non_mutating_request_processor = _ContentLlmRequestProcessor(mutating=False) def _rearrange_events_for_async_function_responses_in_history( @@ -203,7 +206,10 @@ def _rearrange_events_for_latest_function_response( def _get_contents( - current_branch: Optional[str], events: list[Event], agent_name: str = '' + current_branch: Optional[str], + events: list[Event], + agent_name: str = '', + mutating: bool = True, ) -> list[types.Content]: """Get the contents for the LLM request. @@ -213,6 +219,7 @@ def _get_contents( current_branch: The current branch of the agent. events: Events to process. agent_name: The name of the agent. + mutating: Whether to rewrite all conversation turns as user verbalizations. Returns: A list of processed contents. @@ -240,7 +247,7 @@ def _get_contents( continue filtered_events.append( _convert_foreign_event(event) - if _is_other_agent_reply(agent_name, event) + if mutating and _is_other_agent_reply(agent_name, event) else event ) @@ -313,7 +320,6 @@ def _convert_foreign_event(event: Event) -> Event: Returns: The converted event. - """ if not event.content or not event.content.parts: return event diff --git a/src/google/adk/tools/agent_tool.py b/src/google/adk/tools/agent_tool.py index c0d07238d..6399a6e3e 100644 --- a/src/google/adk/tools/agent_tool.py +++ b/src/google/adk/tools/agent_tool.py @@ -46,11 +46,19 @@ class AgentTool(BaseTool): Attributes: agent: The agent to wrap. skip_summarization: Whether to skip summarization of the agent output. + include_conversational_context: Whether to pass conversation history through + to the sub-agent. """ - def __init__(self, agent: BaseAgent, skip_summarization: bool = False): + def __init__( + self, + agent: BaseAgent, + skip_summarization: bool = False, + include_conversational_context: bool = False, + ): self.agent = agent - self.skip_summarization: bool = skip_summarization + self.skip_summarization = skip_summarization + self.include_conversational_context = include_conversational_context super().__init__(name=agent.name, description=agent.description) @@ -140,6 +148,10 @@ async def run_async( state=tool_context.state.to_dict(), ) + if self.include_conversational_context: + for event in tool_context.events[:-1]: + await runner.session_service.append_event(session, event) + last_event = None async for event in runner.run_async( user_id=session.user_id, session_id=session.id, new_message=content