Skip to content

Commit 707462e

Browse files
Remove commented-out code in MessageBuilder and response models
1 parent f3eff52 commit 707462e

File tree

1 file changed

+4
-18
lines changed

1 file changed

+4
-18
lines changed

src/agentlab/llm/response_api.py

Lines changed: 4 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -73,10 +73,6 @@ def update_last_raw_response(self, last_raw_response: Any) -> "MessageBuilder":
7373
self.last_raw_response = last_raw_response
7474
return self
7575

76-
# def add_tool_id(self, id: str) -> "MessageBuilder":
77-
# self.tool_call_id = id
78-
# return self
79-
8076
def add_text(self, text: str) -> "MessageBuilder":
8177
self.content.append({"text": text})
8278
return self
@@ -96,10 +92,6 @@ def to_markdown(self) -> str:
9692
markdown = f"### {self.role.capitalize()}\n"
9793
markdown += "\n".join(parts)
9894

99-
# if self.role == "tool":
100-
# assert self.tool_call_id is not None, "Tool call ID is required for tool messages"
101-
# markdown += f"\n\n---\n\n**Tool Call ID:** `{self.tool_call_id}`"
102-
10395
return markdown
10496

10597
def add_image_url(self, image_url: str) -> "MessageBuilder":
@@ -176,7 +168,7 @@ def prepare_message(self) -> List[Message]:
176168
output["role"] = "user"
177169

178170
if self.role == "tool":
179-
# assert self.tool_call_id is not None, "Tool call ID is required for tool messages"
171+
180172
api_response = self.last_raw_response
181173
fn_calls = [content for content in api_response.content if content.type == "tool_use"]
182174
assert len(fn_calls) > 0, "No tool calls found in the last response"
@@ -197,7 +189,6 @@ def prepare_message(self) -> List[Message]:
197189
for c in output["content"]:
198190
if "text" in c:
199191
c["text"] = c["text"].strip()
200-
201192
return [output]
202193

203194
def transform_content(self, content: ContentItem) -> ContentItem:
@@ -377,7 +368,7 @@ def _parse_response(self, response: dict) -> dict:
377368
if len(output.summary) > 0:
378369
result.think += output.summary[0].text + "\n"
379370

380-
elif output.type == "message" and output.content: # Why did i add a 'message' here?
371+
elif output.type == "message" and output.content:
381372
result.think += output.content[0].text + "\n"
382373
for key in interesting_keys:
383374
if key_content := getattr(output, "output_text", None) is not None:
@@ -486,18 +477,13 @@ def extract_content_with_reasoning(message, wrap_tag="think"):
486477

487478
if reasoning_content:
488479
# Wrap reasoning in <think> tags with newlines for clarity
489-
reasoning_content = f"<{wrap_tag}>{reasoning_content}</{wrap_tag}>\n" # why I do need to enclose reasoning in <think> tags?
480+
reasoning_content = f"<{wrap_tag}>{reasoning_content}</{wrap_tag}>\n"
490481
logging.debug("Extracting content from response.choices[i].message.reasoning")
491482
else:
492483
reasoning_content = ""
493484
return f"{reasoning_content}{msg_content}{message.get('content', '')}"
494485

495486

496-
# To Do: Double check the expected action format by browsergym.
497-
# openai action output do not have parenthesis but the antropic action parsing does.
498-
# Confirm with allac if this is the expected format.
499-
500-
501487
class ClaudeResponseModel(BaseModelWithPricing):
502488
def __init__(
503489
self,
@@ -600,7 +586,7 @@ def _parse_response(self, response: dict) -> dict:
600586
def apply_cache_breakpoints(self, msg: Message, prepared_msg: dict) -> List[Message]:
601587
"""Apply cache breakpoints to the messages."""
602588
if getattr(msg, "_cache_breakpoint", False):
603-
prepared_msg[-1]["content"][-1]["cache_control"] = {"type": "ephemeral"}
589+
prepared_msg[-1]["content"][-1]["cache_control"] = {"type": "ephemeral"}
604590
return prepared_msg
605591

606592

0 commit comments

Comments
 (0)