diff --git a/homeassistant/components/anthropic/conversation.py b/homeassistant/components/anthropic/conversation.py index 846249b1cafac1..f17294fe0e7e01 100644 --- a/homeassistant/components/anthropic/conversation.py +++ b/homeassistant/components/anthropic/conversation.py @@ -366,11 +366,11 @@ async def _async_handle_message( options = self.entry.options try: - await chat_log.async_update_llm_data( - DOMAIN, - user_input, + await chat_log.async_provide_llm_data( + user_input.as_llm_context(DOMAIN), options.get(CONF_LLM_HASS_API), options.get(CONF_PROMPT), + user_input.extra_system_prompt, ) except conversation.ConverseError as err: return err.as_conversation_result() diff --git a/homeassistant/components/conversation/chat_log.py b/homeassistant/components/conversation/chat_log.py index 7580b878d04efa..6322bdb44350c3 100644 --- a/homeassistant/components/conversation/chat_log.py +++ b/homeassistant/components/conversation/chat_log.py @@ -14,12 +14,11 @@ from homeassistant.core import HomeAssistant, callback from homeassistant.exceptions import HomeAssistantError, TemplateError -from homeassistant.helpers import chat_session, intent, llm, template +from homeassistant.helpers import chat_session, frame, intent, llm, template from homeassistant.util.hass_dict import HassKey from homeassistant.util.json import JsonObjectType from . import trace -from .const import DOMAIN from .models import ConversationInput, ConversationResult DATA_CHAT_LOGS: HassKey[dict[str, ChatLog]] = HassKey("conversation_chat_logs") @@ -359,7 +358,7 @@ async def _async_expand_prompt_template( self, llm_context: llm.LLMContext, prompt: str, - language: str, + language: str | None, user_name: str | None = None, ) -> str: try: @@ -373,7 +372,7 @@ async def _async_expand_prompt_template( ) except TemplateError as err: LOGGER.error("Error rendering prompt: %s", err) - intent_response = intent.IntentResponse(language=language) + intent_response = intent.IntentResponse(language=language or "") intent_response.async_set_error( intent.IntentResponseErrorCode.UNKNOWN, "Sorry, I had a problem with my template", @@ -392,14 +391,25 @@ async def async_update_llm_data( user_llm_prompt: str | None = None, ) -> None: """Set the LLM system prompt.""" - llm_context = llm.LLMContext( - platform=conversing_domain, - context=user_input.context, - language=user_input.language, - assistant=DOMAIN, - device_id=user_input.device_id, + frame.report_usage( + "ChatLog.async_update_llm_data", + breaks_in_ha_version="2026.1", + ) + return await self.async_provide_llm_data( + llm_context=user_input.as_llm_context(conversing_domain), + user_llm_hass_api=user_llm_hass_api, + user_llm_prompt=user_llm_prompt, + user_extra_system_prompt=user_input.extra_system_prompt, ) + async def async_provide_llm_data( + self, + llm_context: llm.LLMContext, + user_llm_hass_api: str | list[str] | None = None, + user_llm_prompt: str | None = None, + user_extra_system_prompt: str | None = None, + ) -> None: + """Set the LLM system prompt.""" llm_api: llm.APIInstance | None = None if user_llm_hass_api: @@ -413,10 +423,12 @@ async def async_update_llm_data( LOGGER.error( "Error getting LLM API %s for %s: %s", user_llm_hass_api, - conversing_domain, + llm_context.platform, err, ) - intent_response = intent.IntentResponse(language=user_input.language) + intent_response = intent.IntentResponse( + language=llm_context.language or "" + ) intent_response.async_set_error( intent.IntentResponseErrorCode.UNKNOWN, "Error preparing LLM API", @@ -430,10 +442,10 @@ async def async_update_llm_data( user_name: str | None = None if ( - user_input.context - and user_input.context.user_id + llm_context.context + and llm_context.context.user_id and ( - user := await self.hass.auth.async_get_user(user_input.context.user_id) + user := await self.hass.auth.async_get_user(llm_context.context.user_id) ) ): user_name = user.name @@ -443,7 +455,7 @@ async def async_update_llm_data( await self._async_expand_prompt_template( llm_context, (user_llm_prompt or llm.DEFAULT_INSTRUCTIONS_PROMPT), - user_input.language, + llm_context.language, user_name, ) ) @@ -455,14 +467,14 @@ async def async_update_llm_data( await self._async_expand_prompt_template( llm_context, llm.BASE_PROMPT, - user_input.language, + llm_context.language, user_name, ) ) if extra_system_prompt := ( # Take new system prompt if one was given - user_input.extra_system_prompt or self.extra_system_prompt + user_extra_system_prompt or self.extra_system_prompt ): prompt_parts.append(extra_system_prompt) diff --git a/homeassistant/components/conversation/models.py b/homeassistant/components/conversation/models.py index 00097f5b4d3765..dac1fb862ec406 100644 --- a/homeassistant/components/conversation/models.py +++ b/homeassistant/components/conversation/models.py @@ -7,7 +7,9 @@ from typing import Any, Literal from homeassistant.core import Context -from homeassistant.helpers import intent +from homeassistant.helpers import intent, llm + +from .const import DOMAIN @dataclass(frozen=True) @@ -56,6 +58,16 @@ def as_dict(self) -> dict[str, Any]: "extra_system_prompt": self.extra_system_prompt, } + def as_llm_context(self, conversing_domain: str) -> llm.LLMContext: + """Return input as an LLM context.""" + return llm.LLMContext( + platform=conversing_domain, + context=self.context, + language=self.language, + assistant=DOMAIN, + device_id=self.device_id, + ) + @dataclass(slots=True) class ConversationResult: diff --git a/homeassistant/components/google_generative_ai_conversation/conversation.py b/homeassistant/components/google_generative_ai_conversation/conversation.py index 726572fc5aee60..00199f5fe1f5ba 100644 --- a/homeassistant/components/google_generative_ai_conversation/conversation.py +++ b/homeassistant/components/google_generative_ai_conversation/conversation.py @@ -73,11 +73,11 @@ async def _async_handle_message( options = self.entry.options try: - await chat_log.async_update_llm_data( - DOMAIN, - user_input, + await chat_log.async_provide_llm_data( + user_input.as_llm_context(DOMAIN), options.get(CONF_LLM_HASS_API), options.get(CONF_PROMPT), + user_input.extra_system_prompt, ) except conversation.ConverseError as err: return err.as_conversation_result() diff --git a/homeassistant/components/ollama/conversation.py b/homeassistant/components/ollama/conversation.py index 4b4f79d4eed3cf..e304a39f061f67 100644 --- a/homeassistant/components/ollama/conversation.py +++ b/homeassistant/components/ollama/conversation.py @@ -219,11 +219,11 @@ async def _async_handle_message( settings = {**self.entry.data, **self.entry.options} try: - await chat_log.async_update_llm_data( - DOMAIN, - user_input, + await chat_log.async_provide_llm_data( + user_input.as_llm_context(DOMAIN), settings.get(CONF_LLM_HASS_API), settings.get(CONF_PROMPT), + user_input.extra_system_prompt, ) except conversation.ConverseError as err: return err.as_conversation_result() diff --git a/homeassistant/components/openai_conversation/conversation.py b/homeassistant/components/openai_conversation/conversation.py index a129400194b59f..8fea4613ce0400 100644 --- a/homeassistant/components/openai_conversation/conversation.py +++ b/homeassistant/components/openai_conversation/conversation.py @@ -279,11 +279,11 @@ async def _async_handle_message( options = self.entry.options try: - await chat_log.async_update_llm_data( - DOMAIN, - user_input, + await chat_log.async_provide_llm_data( + user_input.as_llm_context(DOMAIN), options.get(CONF_LLM_HASS_API), options.get(CONF_PROMPT), + user_input.extra_system_prompt, ) except conversation.ConverseError as err: return err.as_conversation_result() diff --git a/tests/components/assist_pipeline/test_pipeline.py b/tests/components/assist_pipeline/test_pipeline.py index abdcb55054c6c0..9ea3802d9f6e1f 100644 --- a/tests/components/assist_pipeline/test_pipeline.py +++ b/tests/components/assist_pipeline/test_pipeline.py @@ -1779,11 +1779,11 @@ async def stream_llm_response(): conversation_input, ) as chat_log, ): - await chat_log.async_update_llm_data( - conversing_domain="test", - user_input=conversation_input, + await chat_log.async_provide_llm_data( + conversation_input.as_llm_context("test"), user_llm_hass_api="assist", user_llm_prompt=None, + user_extra_system_prompt=conversation_input.extra_system_prompt, ) async for _content in chat_log.async_add_delta_content_stream( agent_id, stream_llm_response() diff --git a/tests/components/conversation/test_chat_log.py b/tests/components/conversation/test_chat_log.py index c9e72ae5a032dc..0e2a384f1daa0e 100644 --- a/tests/components/conversation/test_chat_log.py +++ b/tests/components/conversation/test_chat_log.py @@ -106,9 +106,8 @@ async def test_llm_api( chat_session.async_get_chat_session(hass) as session, async_get_chat_log(hass, session, mock_conversation_input) as chat_log, ): - await chat_log.async_update_llm_data( - conversing_domain="test", - user_input=mock_conversation_input, + await chat_log.async_provide_llm_data( + mock_conversation_input.as_llm_context("test"), user_llm_hass_api="assist", user_llm_prompt=None, ) @@ -128,9 +127,8 @@ async def test_unknown_llm_api( async_get_chat_log(hass, session, mock_conversation_input) as chat_log, pytest.raises(ConverseError) as exc_info, ): - await chat_log.async_update_llm_data( - conversing_domain="test", - user_input=mock_conversation_input, + await chat_log.async_provide_llm_data( + mock_conversation_input.as_llm_context("test"), user_llm_hass_api="unknown-api", user_llm_prompt=None, ) @@ -170,9 +168,8 @@ async def async_get_api_instance( chat_session.async_get_chat_session(hass) as session, async_get_chat_log(hass, session, mock_conversation_input) as chat_log, ): - await chat_log.async_update_llm_data( - conversing_domain="test", - user_input=mock_conversation_input, + await chat_log.async_provide_llm_data( + mock_conversation_input.as_llm_context("test"), user_llm_hass_api=["assist", "my-api"], user_llm_prompt=None, ) @@ -192,9 +189,8 @@ async def test_template_error( async_get_chat_log(hass, session, mock_conversation_input) as chat_log, pytest.raises(ConverseError) as exc_info, ): - await chat_log.async_update_llm_data( - conversing_domain="test", - user_input=mock_conversation_input, + await chat_log.async_provide_llm_data( + mock_conversation_input.as_llm_context("test"), user_llm_hass_api=None, user_llm_prompt="{{ invalid_syntax", ) @@ -217,9 +213,8 @@ async def test_template_variables( async_get_chat_log(hass, session, mock_conversation_input) as chat_log, patch("homeassistant.auth.AuthManager.async_get_user", return_value=mock_user), ): - await chat_log.async_update_llm_data( - conversing_domain="test", - user_input=mock_conversation_input, + await chat_log.async_provide_llm_data( + mock_conversation_input.as_llm_context("test"), user_llm_hass_api=None, user_llm_prompt=( "The instance name is {{ ha_name }}. " @@ -249,11 +244,11 @@ async def test_extra_systen_prompt( chat_session.async_get_chat_session(hass) as session, async_get_chat_log(hass, session, mock_conversation_input) as chat_log, ): - await chat_log.async_update_llm_data( - conversing_domain="test", - user_input=mock_conversation_input, + await chat_log.async_provide_llm_data( + mock_conversation_input.as_llm_context("test"), user_llm_hass_api=None, user_llm_prompt=None, + user_extra_system_prompt=mock_conversation_input.extra_system_prompt, ) chat_log.async_add_assistant_content_without_tools( AssistantContent( @@ -273,11 +268,11 @@ async def test_extra_systen_prompt( chat_session.async_get_chat_session(hass, conversation_id) as session, async_get_chat_log(hass, session, mock_conversation_input) as chat_log, ): - await chat_log.async_update_llm_data( - conversing_domain="test", - user_input=mock_conversation_input, + await chat_log.async_provide_llm_data( + mock_conversation_input.as_llm_context("test"), user_llm_hass_api=None, user_llm_prompt=None, + user_extra_system_prompt=mock_conversation_input.extra_system_prompt, ) assert chat_log.extra_system_prompt == extra_system_prompt @@ -290,11 +285,11 @@ async def test_extra_systen_prompt( chat_session.async_get_chat_session(hass, conversation_id) as session, async_get_chat_log(hass, session, mock_conversation_input) as chat_log, ): - await chat_log.async_update_llm_data( - conversing_domain="test", - user_input=mock_conversation_input, + await chat_log.async_provide_llm_data( + mock_conversation_input.as_llm_context("test"), user_llm_hass_api=None, user_llm_prompt=None, + user_extra_system_prompt=mock_conversation_input.extra_system_prompt, ) chat_log.async_add_assistant_content_without_tools( AssistantContent( @@ -314,11 +309,11 @@ async def test_extra_systen_prompt( chat_session.async_get_chat_session(hass, conversation_id) as session, async_get_chat_log(hass, session, mock_conversation_input) as chat_log, ): - await chat_log.async_update_llm_data( - conversing_domain="test", - user_input=mock_conversation_input, + await chat_log.async_provide_llm_data( + mock_conversation_input.as_llm_context("test"), user_llm_hass_api=None, user_llm_prompt=None, + user_extra_system_prompt=mock_conversation_input.extra_system_prompt, ) assert chat_log.extra_system_prompt == extra_system_prompt2 @@ -357,9 +352,8 @@ async def test_tool_call( chat_session.async_get_chat_session(hass) as session, async_get_chat_log(hass, session, mock_conversation_input) as chat_log, ): - await chat_log.async_update_llm_data( - conversing_domain="test", - user_input=mock_conversation_input, + await chat_log.async_provide_llm_data( + mock_conversation_input.as_llm_context("test"), user_llm_hass_api="assist", user_llm_prompt=None, ) @@ -434,9 +428,8 @@ async def test_tool_call_exception( async_get_chat_log(hass, session, mock_conversation_input) as chat_log, ): mock_get_tools.return_value = [mock_tool] - await chat_log.async_update_llm_data( - conversing_domain="test", - user_input=mock_conversation_input, + await chat_log.async_provide_llm_data( + mock_conversation_input.as_llm_context("test"), user_llm_hass_api="assist", user_llm_prompt=None, ) @@ -595,9 +588,8 @@ async def stream(): ) as chat_log, ): mock_get_tools.return_value = [mock_tool] - await chat_log.async_update_llm_data( - conversing_domain="test", - user_input=mock_conversation_input, + await chat_log.async_provide_llm_data( + mock_conversation_input.as_llm_context("test"), user_llm_hass_api="assist", user_llm_prompt=None, ) diff --git a/tests/components/google_generative_ai_conversation/conftest.py b/tests/components/google_generative_ai_conversation/conftest.py index 0d222c78472071..f499f18bc159d6 100644 --- a/tests/components/google_generative_ai_conversation/conftest.py +++ b/tests/components/google_generative_ai_conversation/conftest.py @@ -1,6 +1,7 @@ """Tests helpers.""" -from unittest.mock import Mock, patch +from collections.abc import Generator +from unittest.mock import AsyncMock, Mock, patch import pytest @@ -77,3 +78,22 @@ async def mock_init_component( async def setup_ha(hass: HomeAssistant) -> None: """Set up Home Assistant.""" assert await async_setup_component(hass, "homeassistant", {}) + + +@pytest.fixture +def mock_send_message_stream() -> Generator[AsyncMock]: + """Mock stream response.""" + + async def mock_generator(stream): + for value in stream: + yield value + + with patch( + "google.genai.chats.AsyncChat.send_message_stream", + AsyncMock(), + ) as mock_send_message_stream: + mock_send_message_stream.side_effect = lambda **kwargs: mock_generator( + mock_send_message_stream.return_value.pop(0) + ) + + yield mock_send_message_stream diff --git a/tests/components/google_generative_ai_conversation/test_conversation.py b/tests/components/google_generative_ai_conversation/test_conversation.py index a55a86b67c92bc..92aa6f08d42a0d 100644 --- a/tests/components/google_generative_ai_conversation/test_conversation.py +++ b/tests/components/google_generative_ai_conversation/test_conversation.py @@ -1,6 +1,5 @@ """Tests for the Google Generative AI Conversation integration conversation platform.""" -from collections.abc import Generator from unittest.mock import AsyncMock, patch from freezegun import freeze_time @@ -41,25 +40,6 @@ def mock_ulid_tools(): yield -@pytest.fixture -def mock_send_message_stream() -> Generator[AsyncMock]: - """Mock stream response.""" - - async def mock_generator(stream): - for value in stream: - yield value - - with patch( - "google.genai.chats.AsyncChat.send_message_stream", - AsyncMock(), - ) as mock_send_message_stream: - mock_send_message_stream.side_effect = lambda **kwargs: mock_generator( - mock_send_message_stream.return_value.pop(0) - ) - - yield mock_send_message_stream - - @pytest.mark.parametrize( ("error"), [