22
33import inspect
44from collections .abc import Callable
5- from typing import Literal
5+ from typing import Literal , Any
66
77from mellea .backends .aloras import Alora
88from mellea .backends .formatter import Formatter
1313from mellea .stdlib .requirement import ALoraRequirement , LLMaJRequirement , Requirement
1414
1515# Chat = dict[Literal["role", "content"], str] # external apply_chat_template type hint is weaker
16+ # Chat = dict[str, str | list[dict[str, Any]] ] # for multi-modal models
1617Chat = dict [str , str ]
1718
1819
@@ -22,6 +23,11 @@ def to_chat(
2223 formatter : Formatter ,
2324 system_prompt : str | None ,
2425) -> list [Chat ]:
26+ """
27+ Converts a context and an action into a series of dicts to be passed to apply_chat_template .
28+
29+ This function is used by local inference backends.
30+ """
2531 assert ctx .is_chat_context
2632
2733 linearized_ctx = ctx .view_for_generation ()
@@ -46,7 +52,7 @@ def to_chat(
4652
4753 # handle custom system prompts. It's important that we do this before the _parse_and_**clean**_model_options step.
4854 if system_prompt is not None :
49- system_msg : dict [ str , str ] = {"role" : "system" , "content" : system_prompt }
55+ system_msg : Chat = {"role" : "system" , "content" : system_prompt }
5056 ctx_as_conversation .insert (0 , system_msg )
5157
5258 return ctx_as_conversation
0 commit comments