Skip to content

Commit 3fd1e59

Browse files
committed
Refactor action default value to None in LLMOutput and related models; improve code consistency and clarity.
1 parent 1f83b1c commit 3fd1e59

File tree

1 file changed

+11
-8
lines changed

1 file changed

+11
-8
lines changed

src/agentlab/llm/response_api.py

Lines changed: 11 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ class LLMOutput:
3636

3737
raw_response: Any = field(default_factory=dict)
3838
think: str = field(default="")
39-
action: str = field(default="noop()") # Default action if no tool call is made
39+
action: str = field(default=None) # Default action if no tool call is made
4040
tool_calls: Any = field(default=None) # This will hold the tool call response if any
4141

4242

@@ -190,8 +190,8 @@ def prepare_message(self) -> List[Message]:
190190
if self.role == "assistant":
191191
# Strip whitespace from assistant text responses. See anthropic error code 400.
192192
for c in output["content"]:
193-
if 'text' in c:
194-
c["text"] = c["text"].strip()
193+
if "text" in c:
194+
c["text"] = c["text"].strip()
195195

196196
return [output]
197197

@@ -351,7 +351,7 @@ def _parse_response(self, response: dict) -> dict:
351351
result = LLMOutput(
352352
raw_response=response,
353353
think="",
354-
action="noop()",
354+
action=None,
355355
tool_calls=None,
356356
)
357357
interesting_keys = ["output_text"]
@@ -429,7 +429,7 @@ def _parse_response(self, response: openai.types.chat.ChatCompletion) -> LLMOutp
429429
output = LLMOutput(
430430
raw_response=response,
431431
think="",
432-
action="noop()", # Default if no tool call
432+
action=None, # Default if no tool call
433433
tool_calls=None,
434434
)
435435
message = response.choices[0].message.to_dict()
@@ -513,11 +513,13 @@ def __init__(
513513

514514
self.client = Anthropic(api_key=api_key)
515515

516-
def _call_api(self, messages: list[dict | MessageBuilder], **kwargs) -> dict:
516+
def _call_api(
517+
self, messages: list[dict | MessageBuilder], tool_choice="auto", **kwargs
518+
) -> dict:
517519
input = []
518520

519521
sys_msg, other_msgs = self.filter_system_messages(messages)
520-
sys_msg_text = "\n".join(c['text'] for m in sys_msg for c in m.content)
522+
sys_msg_text = "\n".join(c["text"] for m in sys_msg for c in m.content)
521523
for msg in other_msgs:
522524
input.extend(msg.prepare_message() if isinstance(msg, MessageBuilder) else [msg])
523525

@@ -527,6 +529,7 @@ def _call_api(self, messages: list[dict | MessageBuilder], **kwargs) -> dict:
527529
"temperature": self.temperature,
528530
"max_tokens": self.max_tokens,
529531
"system": sys_msg_text, # Anthropic API expects system message as a string
532+
"tool_choice": {"type": tool_choice}, # Tool choice for Claude API
530533
**self.extra_kwargs, # Pass tools, tool_choice, etc. here
531534
}
532535
if self.tools is not None:
@@ -562,7 +565,7 @@ def _parse_response(self, response: dict) -> dict:
562565
result = LLMOutput(
563566
raw_response=response,
564567
think="",
565-
action="noop()",
568+
action=None,
566569
tool_calls={
567570
"role": "assistant",
568571
"content": response.content,

0 commit comments

Comments
 (0)