|
34 | 34 | logger = logging.getLogger(__name__) |
35 | 35 |
|
36 | 36 |
|
| 37 | +def final_response_tool(func=None): |
| 38 | + """ |
| 39 | + Decorator to mark a tool whose raw output should be returned directly as the |
| 40 | + agent response (bypassing the post-tool LLM synthesis step). |
| 41 | +
|
| 42 | + This is useful for pre-formatted outputs (e.g., large markdown tables) |
| 43 | + where token-by-token regeneration by the LLM is unnecessary. |
| 44 | +
|
| 45 | + Usage: |
| 46 | +
|
| 47 | + .. code-block:: python |
| 48 | +
|
| 49 | + from sdialog.agents import final_response_tool |
| 50 | +
|
| 51 | + @final_response_tool |
| 52 | + def my_tool(...) -> str: |
| 53 | + ... |
| 54 | +
|
| 55 | + :param func: The tool function to mark. |
| 56 | + :type func: Optional[callable] |
| 57 | + :return: Decorated function. |
| 58 | + :rtype: callable |
| 59 | + """ |
| 60 | + if func is None: |
| 61 | + return final_response_tool |
| 62 | + setattr(func, "_sdialog_final_response_tool", True) |
| 63 | + return func |
| 64 | + |
| 65 | + |
37 | 66 | class Agent: |
38 | 67 | """ |
39 | 68 | Agent that simulates a persona-driven conversational actor using an LLM. |
@@ -88,6 +117,8 @@ class Agent: |
88 | 117 | :type example_dialogs: Optional[List[Dialog]] |
89 | 118 | :param tools: List of functions to be used as tools by the agent (if supported by the LLM). |
90 | 119 | :type tools: Optional[List[callable]] |
| 120 | + Tools decorated with ``@final_response_tool`` return their |
| 121 | + raw output directly as the final agent response. |
91 | 122 | :param think: If True, enables "thinking" segments in responses (if supported by the LLM). |
92 | 123 | :type think: bool |
93 | 124 | :param thinking_pattern: Regex pattern to manually identify "thinking" segments in responses. |
@@ -161,6 +192,10 @@ def __init__(self, |
161 | 192 | # Private attributes |
162 | 193 | self._system_prompt_template = system_prompt_template |
163 | 194 | self._thinking_pattern = thinking_pattern |
| 195 | + self._final_response_tools = { |
| 196 | + fn.__name__ for fn in tools |
| 197 | + if getattr(fn, "_sdialog_final_response_tool", False) |
| 198 | + } if tools else set() |
164 | 199 | self._tools = {fn.__name__: tool(fn) for fn in tools} if tools else None |
165 | 200 | self._model_uri = model |
166 | 201 | self._context = context |
@@ -479,6 +514,14 @@ def _get_llm_response(self, messages, update_tool_memory: bool = False) -> Tuple |
479 | 514 | "output": tool_msg.content, |
480 | 515 | "call_id": tool_msg.tool_call_id}, |
481 | 516 | timestamp=int(time()))) |
| 517 | + |
| 518 | + # For tools explicitly marked as direct-response tools, |
| 519 | + # bypass the post-tool LLM step only if output is non-empty. |
| 520 | + # Empty outputs should be handled as regular tools. |
| 521 | + if tool_call["name"] in self._final_response_tools: |
| 522 | + output = tool_msg.content.strip() if isinstance(tool_msg.content, str) else tool_msg.content |
| 523 | + if output: |
| 524 | + return AIMessage(content=str(output)), events |
482 | 525 | else: |
483 | 526 | logger.warning(f"Tool '{tool_call['name']}' not found among bound tools.") |
484 | 527 |
|
|
0 commit comments