Skip to content

Commit e611703

Browse files
refactor: significantly simplify ACP server.py by extracting core classes and utilities
- Reduced server.py from 607 to 296 lines (51% reduction) - Extracted 3 new modules for better maintainability: - utils.py: Tool kind mapping utilities (get_tool_kind function) - llm_config.py: LLM configuration validation and creation - events.py: EventSubscriber class with full event handling logic - Updated server.py to use extracted modules with proper imports - Fixed workspace field issue in newSession method - Updated all test imports to use new module structure - All 16 ACP tests passing, functionality preserved Co-authored-by: openhands <openhands@all-hands.dev>
1 parent 41412c6 commit e611703

5 files changed

Lines changed: 376 additions & 332 deletions

File tree

Lines changed: 217 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,217 @@
1+
"""Event handling for ACP server."""
2+
3+
import logging
4+
from typing import TYPE_CHECKING
5+
6+
from acp import SessionNotification
7+
from acp.schema import (
8+
ContentBlock1,
9+
ContentBlock2,
10+
SessionUpdate2,
11+
SessionUpdate4,
12+
SessionUpdate5,
13+
ToolCallContent1,
14+
)
15+
16+
from openhands.agent_server.pub_sub import Subscriber
17+
from openhands.sdk import ImageContent, TextContent
18+
from openhands.sdk.event.base import LLMConvertibleEvent
19+
from openhands.sdk.event.llm_convertible.action import ActionEvent
20+
from openhands.sdk.event.llm_convertible.observation import (
21+
AgentErrorEvent,
22+
ObservationEvent,
23+
UserRejectObservation,
24+
)
25+
26+
from .utils import get_tool_kind
27+
28+
29+
if TYPE_CHECKING:
30+
from acp import AgentSideConnection
31+
32+
logger = logging.getLogger(__name__)
33+
34+
35+
class EventSubscriber(Subscriber):
36+
"""Subscriber for handling OpenHands events and converting them to ACP
37+
notifications."""
38+
39+
def __init__(self, session_id: str, conn: "AgentSideConnection"):
40+
"""Initialize the event subscriber.
41+
42+
Args:
43+
session_id: The ACP session ID
44+
conn: The ACP connection for sending notifications
45+
"""
46+
self.session_id = session_id
47+
self.conn = conn
48+
49+
async def __call__(self, event):
50+
"""Handle incoming events and convert them to ACP notifications."""
51+
# Handle different event types
52+
if isinstance(event, ActionEvent):
53+
await self._handle_action_event(event)
54+
elif isinstance(
55+
event, (ObservationEvent, UserRejectObservation, AgentErrorEvent)
56+
):
57+
await self._handle_observation_event(event)
58+
elif isinstance(event, LLMConvertibleEvent):
59+
await self._handle_llm_convertible_event(event)
60+
61+
async def _handle_action_event(self, event: ActionEvent):
62+
"""Handle ActionEvent by sending tool_call notification."""
63+
try:
64+
tool_kind = get_tool_kind(event.tool_name)
65+
66+
# Create a human-readable title
67+
action_name = event.action.__class__.__name__
68+
title = f"{action_name} with {event.tool_name}"
69+
70+
# Extract thought content as text
71+
thought_text = " ".join([t.text for t in event.thought])
72+
73+
await self.conn.sessionUpdate(
74+
SessionNotification(
75+
sessionId=self.session_id,
76+
update=SessionUpdate4(
77+
sessionUpdate="tool_call",
78+
toolCallId=event.tool_call_id,
79+
title=title,
80+
kind=tool_kind,
81+
status="pending",
82+
content=[
83+
ToolCallContent1(
84+
type="content",
85+
content=ContentBlock1(
86+
type="text",
87+
text=thought_text
88+
if thought_text.strip()
89+
else f"Executing {action_name}",
90+
),
91+
)
92+
]
93+
if thought_text.strip()
94+
else None,
95+
rawInput=event.tool_call.function.arguments
96+
if hasattr(event.tool_call.function, "arguments")
97+
else None,
98+
),
99+
)
100+
)
101+
except Exception as e:
102+
logger.debug(f"Error processing ActionEvent: {e}")
103+
104+
async def _handle_observation_event(
105+
self, event: ObservationEvent | UserRejectObservation | AgentErrorEvent
106+
):
107+
"""Handle observation events by sending tool_call_update notification."""
108+
try:
109+
if isinstance(event, ObservationEvent):
110+
# Successful tool execution
111+
status = "completed"
112+
# Extract content from observation
113+
content_parts = []
114+
for item in event.observation.agent_observation:
115+
if isinstance(item, TextContent):
116+
content_parts.append(item.text)
117+
elif hasattr(item, "text") and not isinstance(item, ImageContent):
118+
content_parts.append(getattr(item, "text"))
119+
else:
120+
content_parts.append(str(item))
121+
content_text = "".join(content_parts)
122+
elif isinstance(event, UserRejectObservation):
123+
# User rejected the action
124+
status = "failed"
125+
content_text = f"User rejected: {event.rejection_reason}"
126+
else: # AgentErrorEvent
127+
# Agent error
128+
status = "failed"
129+
content_text = f"Error: {event.error}"
130+
131+
await self.conn.sessionUpdate(
132+
SessionNotification(
133+
sessionId=self.session_id,
134+
update=SessionUpdate5(
135+
sessionUpdate="tool_call_update",
136+
toolCallId=event.tool_call_id,
137+
status=status,
138+
content=[
139+
ToolCallContent1(
140+
type="content",
141+
content=ContentBlock1(
142+
type="text",
143+
text=content_text,
144+
),
145+
)
146+
]
147+
if content_text.strip()
148+
else None,
149+
rawOutput={"result": content_text}
150+
if content_text.strip()
151+
else None,
152+
),
153+
)
154+
)
155+
except Exception as e:
156+
logger.debug(f"Error processing observation event: {e}")
157+
158+
async def _handle_llm_convertible_event(self, event: LLMConvertibleEvent):
159+
"""Handle other LLMConvertibleEvent events."""
160+
try:
161+
llm_message = event.to_llm_message()
162+
163+
# Send the event as a session update
164+
if llm_message.role == "assistant":
165+
# Send all content items from the LLM message
166+
for content_item in llm_message.content:
167+
if isinstance(content_item, TextContent):
168+
if content_item.text.strip():
169+
# Send text content
170+
await self.conn.sessionUpdate(
171+
SessionNotification(
172+
sessionId=self.session_id,
173+
update=SessionUpdate2(
174+
sessionUpdate="agent_message_chunk",
175+
content=ContentBlock1(
176+
type="text",
177+
text=content_item.text,
178+
),
179+
),
180+
)
181+
)
182+
elif isinstance(content_item, ImageContent):
183+
# Send each image URL as separate content
184+
for image_url in content_item.image_urls:
185+
# Determine if it's a URI or base64 data
186+
is_uri = image_url.startswith(("http://", "https://"))
187+
await self.conn.sessionUpdate(
188+
SessionNotification(
189+
sessionId=self.session_id,
190+
update=SessionUpdate2(
191+
sessionUpdate="agent_message_chunk",
192+
content=ContentBlock2(
193+
type="image",
194+
data=image_url,
195+
mimeType="image/png",
196+
uri=image_url if is_uri else None,
197+
),
198+
),
199+
)
200+
)
201+
elif isinstance(content_item, str):
202+
if content_item.strip():
203+
# Send string content as text
204+
await self.conn.sessionUpdate(
205+
SessionNotification(
206+
sessionId=self.session_id,
207+
update=SessionUpdate2(
208+
sessionUpdate="agent_message_chunk",
209+
content=ContentBlock1(
210+
type="text",
211+
text=content_item,
212+
),
213+
),
214+
)
215+
)
216+
except Exception as e:
217+
logger.debug(f"Error processing LLMConvertibleEvent: {e}")
Lines changed: 118 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,118 @@
1+
"""LLM configuration management for ACP server."""
2+
3+
import logging
4+
from typing import Any
5+
6+
from pydantic import SecretStr
7+
8+
from openhands.sdk import LLM
9+
10+
11+
logger = logging.getLogger(__name__)
12+
13+
14+
def validate_llm_config(config: dict[str, Any]) -> dict[str, Any]:
15+
"""Validate and sanitize LLM configuration."""
16+
# Define allowed LLM configuration parameters based on the LLM class
17+
allowed_params = {
18+
"model",
19+
"api_key",
20+
"base_url",
21+
"api_version",
22+
"aws_access_key_id",
23+
"aws_secret_access_key",
24+
"aws_region_name",
25+
"openrouter_site_url",
26+
"openrouter_app_name",
27+
"num_retries",
28+
"retry_multiplier",
29+
"retry_min_wait",
30+
"retry_max_wait",
31+
"timeout",
32+
"max_message_chars",
33+
"temperature",
34+
"top_p",
35+
"top_k",
36+
"custom_llm_provider",
37+
"max_input_tokens",
38+
"max_output_tokens",
39+
"input_cost_per_token",
40+
"output_cost_per_token",
41+
"ollama_base_url",
42+
"drop_params",
43+
"modify_params",
44+
"disable_vision",
45+
"disable_stop_word",
46+
"caching_prompt",
47+
"log_completions",
48+
"log_completions_folder",
49+
"custom_tokenizer",
50+
"native_tool_calling",
51+
"reasoning_effort",
52+
"seed",
53+
"safety_settings",
54+
}
55+
56+
# Filter and validate configuration
57+
validated_config = {}
58+
for key, value in config.items():
59+
if key in allowed_params and value is not None:
60+
validated_config[key] = value
61+
elif key not in allowed_params:
62+
logger.warning(f"Unknown LLM parameter ignored: {key}")
63+
64+
return validated_config
65+
66+
67+
def create_llm_from_config(llm_config: dict[str, Any]) -> LLM:
68+
"""Create an LLM instance using stored configuration or defaults."""
69+
import os
70+
71+
# Start with default configuration
72+
llm_kwargs: dict[str, Any] = {
73+
"service_id": "acp-agent",
74+
"model": "claude-sonnet-4-20250514", # Default model
75+
}
76+
77+
# Apply user-provided configuration from authentication
78+
if llm_config:
79+
# Type-safe update of configuration
80+
for key, value in llm_config.items():
81+
llm_kwargs[key] = value
82+
logger.info(f"Using authenticated LLM config: {list(llm_config.keys())}")
83+
else:
84+
# Fallback to environment variables if no auth config provided
85+
logger.info("No authenticated LLM config, using environment/defaults")
86+
87+
# Try to get API key from environment
88+
api_key = os.getenv("LITELLM_API_KEY") or os.getenv("OPENAI_API_KEY")
89+
if api_key:
90+
llm_kwargs["api_key"] = api_key
91+
92+
# Configure for litellm proxy if available
93+
if os.getenv("LITELLM_API_KEY"):
94+
llm_kwargs.update(
95+
{
96+
"model": "litellm_proxy/anthropic/claude-sonnet-4-5-20250929",
97+
"base_url": "https://llm-proxy.eval.all-hands.dev",
98+
"drop_params": True,
99+
}
100+
)
101+
else:
102+
llm_kwargs["model"] = "gpt-4o-mini"
103+
else:
104+
logger.warning("No API key found. Agent responses may not work.")
105+
llm_kwargs["api_key"] = "dummy-key"
106+
107+
# Convert api_key to SecretStr if it's a string
108+
if "api_key" in llm_kwargs and isinstance(llm_kwargs["api_key"], str):
109+
llm_kwargs["api_key"] = SecretStr(llm_kwargs["api_key"])
110+
111+
# Convert other secret fields to SecretStr if needed
112+
secret_fields = ["aws_access_key_id", "aws_secret_access_key"]
113+
for field in secret_fields:
114+
if field in llm_kwargs and isinstance(llm_kwargs[field], str):
115+
llm_kwargs[field] = SecretStr(llm_kwargs[field])
116+
117+
logger.info(f"Creating LLM with model: {llm_kwargs.get('model', 'unknown')}")
118+
return LLM(**llm_kwargs)

0 commit comments

Comments
 (0)