diff --git a/src/google/adk/flows/llm_flows/contents.py b/src/google/adk/flows/llm_flows/contents.py index 93d9a332ae..42d41c873d 100644 --- a/src/google/adk/flows/llm_flows/contents.py +++ b/src/google/adk/flows/llm_flows/contents.py @@ -221,6 +221,27 @@ def _contains_empty_content(event: Event) -> bool: or event.content.parts[0].text == '' ) and (not event.output_transcription and not event.input_transcription) +def _should_include_event_in_context(current_branch: Optional[str], event: Event) -> bool: + """Determines if an event should be included in the LLM context. + + This filters out events that are considered empty (e.g., no text, function + calls, or transcriptions), do not belong to the current agent's branch, or + are internal events like authentication or confirmation requests. + + Args: + current_branch: The current branch of the agent. + event: The event to filter. + + Returns: + True if the event should be included in the context, False otherwise. + """ + return not ( + _contains_empty_content(event) + or not _is_event_belongs_to_branch(current_branch, event) + or _is_auth_event(event) + or _is_request_confirmation_event(event) + ) + def _get_contents( current_branch: Optional[str], events: list[Event], agent_name: str = '' @@ -240,23 +261,10 @@ def _get_contents( accumulated_input_transcription = '' accumulated_output_transcription = '' - # Parse the events, leaving the contents and the function calls and - # responses from the current agent. - raw_filtered_events = [] - for event in events: - if _contains_empty_content(event): - continue - if not _is_event_belongs_to_branch(current_branch, event): - # Skip events not belong to current branch. - continue - if _is_auth_event(event): - # Skip auth events. - continue - if _is_request_confirmation_event(event): - # Skip request confirmation events. - continue - - raw_filtered_events.append(event) + raw_filtered_events = [ + e for e in events + if _should_include_event_in_context(current_branch, e) + ] filtered_events = [] # aggregate transcription events @@ -343,7 +351,13 @@ def _get_current_turn_contents( # Find the latest event that starts the current turn and process from there for i in range(len(events) - 1, -1, -1): event = events[i] - if event.author == 'user' or _is_other_agent_reply(agent_name, event): + if ( + _should_include_event_in_context(current_branch, event) + and ( + event.author == 'user' + or _is_other_agent_reply(agent_name, event) + ) + ): return _get_contents(current_branch, events[i:], agent_name) return [] diff --git a/tests/unittests/flows/llm_flows/test_contents.py b/tests/unittests/flows/llm_flows/test_contents.py index 7283a0ce7f..e8cfd1b64e 100644 --- a/tests/unittests/flows/llm_flows/test_contents.py +++ b/tests/unittests/flows/llm_flows/test_contents.py @@ -197,6 +197,58 @@ async def test_include_contents_none_multi_agent_current_turn(): assert llm_request.contents[1] == types.ModelContent("Current agent in turn") +@pytest.mark.asyncio +async def test_include_contents_none_multi_branch_current_turn(): + """Test current turn detection in multi-branch scenarios with include_contents='none'.""" + agent = Agent( + model="gemini-2.5-flash", name="current_agent", include_contents="none" + ) + llm_request = LlmRequest(model="gemini-2.5-flash") + invocation_context = await testing_utils.create_invocation_context( + agent=agent + ) + invocation_context.branch = "root.parent_agent" + + # Create multi-branch conversation where current turn starts from user + # This can arise from having a Parallel Agent with two or more Sequential + # Agents as sub agents, each with two Llm Agents as sub agents + events = [ + Event( + invocation_id="inv1", + branch="root", + author="user", + content=types.UserContent("First user message"), + ), + Event( + invocation_id="inv1", + branch="root.parent_agent", + author="sibling_agent", + content=types.ModelContent("Sibling agent response"), + ), + Event( + invocation_id="inv1", + branch="root.uncle_agent", + author="cousin_agent", + content=types.ModelContent("Cousin agent response"), + ), + ] + invocation_context.session.events = events + + # Process the request + async for _ in contents.request_processor.run_async( + invocation_context, llm_request + ): + pass + + # Verify current turn starts from the most recent other agent message of the current branch + assert len(llm_request.contents) == 1 + assert llm_request.contents[0].role == "user" + assert llm_request.contents[0].parts == [ + types.Part(text="For context:"), + types.Part(text="[sibling_agent] said: Sibling agent response"), + ] + + @pytest.mark.asyncio async def test_authentication_events_are_filtered(): """Test that authentication function calls and responses are filtered out."""