Skip to content

Commit f7fa86b

Browse files
committed
feat: add MCP tool orchestration and server config support
Introduce server-side MCP config loading from /etc/pr-agent/mcp.json or MCP_CONFIG_PATH, including JSONC parsing and VS Code / Claude schema normalization. Add the MCP runtime, HTTP and stdio clients, structured tool-calling orchestration on the base AI handler, and wire /ask, /review, and /improve through the MCP-aware integration helper. Expose MCP runtime status in /config output, document the configuration flow and AWS Knowledge example, and add focused tests for config loading, runtime behavior, tool orchestration, integration, and discovery.
1 parent 0e37fc8 commit f7fa86b

18 files changed

Lines changed: 2415 additions & 17 deletions

docs/docs/usage-guide/additional_configurations.md

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,27 @@ To print all the available configurations as a comment on your PR, you can use t
99
/config
1010
```
1111

12+
When MCP is enabled, the `/config` comment also includes a small MCP runtime status block showing whether MCP is enabled and which servers are configured and connected.
13+
14+
## MCP runtime configuration
15+
16+
PR-Agent can load MCP servers from a server-side JSON or JSONC file. By default, it reads `/etc/pr-agent/mcp.json`, and you can override that path with `MCP_CONFIG_PATH` or the `[mcp].config_path` setting.
17+
18+
The file may use either the `servers` key, which matches the VS Code MCP schema, or `mcpServers`, which matches the Claude Desktop schema.
19+
20+
For example, an AWS Knowledge MCP server can be configured like this:
21+
22+
```json
23+
{
24+
"servers": {
25+
"AWS Knowledge": {
26+
"url": "https://knowledge-mcp.global.api.aws",
27+
"type": "http"
28+
}
29+
}
30+
}
31+
```
32+
1233
![possible_config1](https://codium.ai/images/pr_agent/possible_config1.png){width=512}
1334

1435
To view the **actual** configurations used for a specific tool, after all the user settings are applied, you can add for each tool a `--config.output_relevant_configurations=true` suffix.

docs/docs/usage-guide/automations_and_usage.md

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -75,6 +75,8 @@ For example, if you want to edit the `review` tool configurations, you can run:
7575

7676
Any configuration value in [configuration file](https://github.com/the-pr-agent/pr-agent/blob/main/pr_agent/settings/configuration.toml) file can be similarly edited. Comment `/config` to see the list of available configurations.
7777

78+
If you want PR-Agent to use MCP tools, mount a server-side MCP config file at `/etc/pr-agent/mcp.json` or point `MCP_CONFIG_PATH` at another JSON/JSONC file. The `/config` comment will show the active MCP runtime status when MCP is enabled.
79+
7880
## PR-Agent Automatic Feedback
7981

8082
### Disabling all automatic feedback

pr_agent/algo/ai_handlers/base_ai_handler.py

Lines changed: 223 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,10 @@
1+
import inspect
2+
import json
3+
import logging
14
from abc import ABC, abstractmethod
5+
from typing import Any, Awaitable, Callable, Optional
6+
7+
from pr_agent.mcp.runtime import MCPRuntimeError
28

39

410
class BaseAiHandler(ABC):
@@ -10,6 +16,8 @@ class BaseAiHandler(ABC):
1016
def __init__(self):
1117
pass
1218

19+
_logger = logging.getLogger(__name__)
20+
1321
@property
1422
@abstractmethod
1523
def deployment_id(self):
@@ -26,3 +34,218 @@ async def chat_completion(self, model: str, system: str, user: str, temperature:
2634
temperature (float): the temperature to use for the chat completion
2735
"""
2836
pass
37+
38+
async def chat_completion_with_tools(
39+
self,
40+
model: str,
41+
system: str,
42+
user: str,
43+
tools: Optional[list[dict[str, Any]]] = None,
44+
tool_executor: Optional[Callable[[str, dict[str, Any]], Any | Awaitable[Any]]] = None,
45+
temperature: float = 0.2,
46+
img_path: str = None,
47+
max_tool_turns: int = 4,
48+
max_tool_output_chars: int = 12000,
49+
):
50+
"""
51+
Run a structured tool-calling loop on top of plain chat completion.
52+
53+
The model is instructed to emit JSON tool requests in the form:
54+
{"type": "tool_call", "tool": "server.tool", "arguments": {...}}
55+
and to finish with:
56+
{"type": "final", "content": "..."}
57+
58+
max_tool_output_chars is applied per tool call, not across all tool calls.
59+
"""
60+
if not tools or tool_executor is None:
61+
return await self.chat_completion(model, system, user, temperature=temperature, img_path=img_path)
62+
63+
allowed_tool_names = self._extract_allowed_tool_names(tools)
64+
tool_call_example = json.dumps(
65+
{
66+
"type": "tool_call",
67+
"tool": "server.tool",
68+
"arguments": {"example": "value"},
69+
},
70+
separators=(",", ":"),
71+
).replace("{\"example\":\"value\"}", "{...}")
72+
final_response_example = json.dumps(
73+
{"type": "final", "content": "..."},
74+
separators=(",", ":"),
75+
)
76+
77+
tool_catalog_text = json.dumps(tools, indent=2, sort_keys=True)
78+
structured_system = (
79+
f"{system}\n\n"
80+
f"Available MCP tools (JSON schema):\n{tool_catalog_text}\n\n"
81+
"Always inspect the available tools first and use them before responding "
82+
"whenever they can help answer the user's request.\n"
83+
"When you need a tool, respond with ONLY a JSON object exactly in this shape:\n"
84+
f"{tool_call_example}\n"
85+
"Do not include a final answer in the same message as a tool call.\n"
86+
"When you are finished, respond with ONLY a JSON object exactly in this shape:\n"
87+
f"{final_response_example}\n"
88+
"Do not wrap the JSON in markdown fences."
89+
)
90+
91+
conversation_history = [user]
92+
remaining_turns = max_tool_turns
93+
current_img_path = img_path
94+
95+
while True:
96+
current_user = "\n\n".join(conversation_history)
97+
response_text, finish_reason = await self.chat_completion(
98+
model=model,
99+
system=structured_system,
100+
user=current_user,
101+
temperature=temperature,
102+
img_path=current_img_path,
103+
)
104+
current_img_path = None
105+
106+
parsed_response = self._parse_tool_or_final_response(response_text)
107+
if parsed_response is None:
108+
return response_text, finish_reason
109+
110+
response_type = parsed_response.get("type", "final")
111+
if response_type == "final":
112+
return str(parsed_response.get("content", "")), finish_reason
113+
114+
if response_type != "tool_call":
115+
return response_text, finish_reason
116+
117+
if remaining_turns <= 0:
118+
self._logger.warning("MCP tool orchestration exceeded the configured turn budget")
119+
return response_text, finish_reason
120+
121+
tool_name = str(parsed_response.get("tool", "")).strip()
122+
arguments = parsed_response.get("arguments") or {}
123+
if not tool_name:
124+
self._logger.warning("MCP tool orchestration returned an empty tool name; aborting tool loop")
125+
return response_text, finish_reason
126+
if not isinstance(arguments, dict):
127+
self._logger.warning("MCP tool orchestration arguments must be a JSON object; aborting tool loop")
128+
return response_text, finish_reason
129+
130+
if tool_name not in allowed_tool_names:
131+
self._logger.warning("MCP tool '%s' was not in the advertised tool catalog; skipping", tool_name)
132+
tool_result = f"Tool not available: {tool_name}"
133+
else:
134+
try:
135+
tool_result = tool_executor(tool_name, arguments)
136+
if inspect.isawaitable(tool_result):
137+
tool_result = await tool_result
138+
except (MCPRuntimeError, TypeError, ValueError) as exc:
139+
self._logger.warning("MCP tool '%s' raised an exception: %s", tool_name, exc)
140+
tool_result = f"Tool error: {exc}"
141+
142+
tool_result_text = self._normalize_tool_result_text(
143+
tool_result,
144+
max_tool_output_chars=max_tool_output_chars,
145+
tool_name=tool_name,
146+
)
147+
conversation_history.append(f"Previous assistant tool request:\n{response_text}")
148+
conversation_history.append(f"Tool result for {tool_name}:\n{tool_result_text}")
149+
remaining_turns -= 1
150+
151+
@classmethod
152+
def _normalize_tool_result_text(
153+
cls,
154+
tool_result: Any,
155+
max_tool_output_chars: int,
156+
tool_name: str = "<unknown>",
157+
) -> str:
158+
if isinstance(tool_result, str):
159+
result_text = tool_result
160+
else:
161+
result_text = json.dumps(tool_result, indent=2, sort_keys=True, default=str)
162+
163+
if len(result_text) > max_tool_output_chars:
164+
cls._logger.warning(
165+
"Tool output for '%s' exceeded per-tool max_tool_output_chars (%s > %s); truncating output",
166+
tool_name,
167+
len(result_text),
168+
max_tool_output_chars,
169+
)
170+
if max_tool_output_chars <= 0:
171+
return ""
172+
suffix = "\n[tool output truncated]"
173+
if max_tool_output_chars <= len(suffix):
174+
return suffix[:max_tool_output_chars]
175+
truncated_prefix_len = max(0, max_tool_output_chars - len(suffix))
176+
return result_text[:truncated_prefix_len] + suffix
177+
return result_text
178+
179+
@staticmethod
180+
def _parse_tool_or_final_response(response_text: str) -> Optional[dict[str, Any]]:
181+
candidate = response_text.strip()
182+
if not candidate:
183+
return None
184+
185+
for json_candidate in BaseAiHandler._iter_json_object_candidates(candidate):
186+
try:
187+
parsed = json.loads(json_candidate)
188+
except json.JSONDecodeError:
189+
continue
190+
191+
if isinstance(parsed, dict):
192+
response_type = parsed.get("type")
193+
if response_type in {"tool_call", "final"}:
194+
return parsed
195+
196+
return None
197+
198+
@staticmethod
199+
def _iter_json_object_candidates(text: str) -> list[str]:
200+
candidates: list[str] = []
201+
depth = 0
202+
start_index: Optional[int] = None
203+
in_string = False
204+
is_escaped = False
205+
206+
for index, char in enumerate(text):
207+
if in_string:
208+
if is_escaped:
209+
is_escaped = False
210+
elif char == "\\":
211+
is_escaped = True
212+
elif char == '"':
213+
in_string = False
214+
continue
215+
216+
if char == '"':
217+
in_string = True
218+
continue
219+
220+
if char == "{":
221+
if depth == 0:
222+
start_index = index
223+
depth += 1
224+
continue
225+
226+
if char == "}" and depth > 0:
227+
depth -= 1
228+
if depth == 0 and start_index is not None:
229+
candidates.append(text[start_index : index + 1])
230+
start_index = None
231+
232+
return candidates
233+
234+
@staticmethod
235+
def _extract_allowed_tool_names(tools: list[dict[str, Any]]) -> set[str]:
236+
allowed: set[str] = set()
237+
for tool in tools:
238+
if not isinstance(tool, dict):
239+
continue
240+
241+
function_info = tool.get("function")
242+
if isinstance(function_info, dict):
243+
function_name = function_info.get("name")
244+
if isinstance(function_name, str) and function_name.strip():
245+
allowed.add(function_name.strip())
246+
247+
simple_name = tool.get("name")
248+
if isinstance(simple_name, str) and simple_name.strip():
249+
allowed.add(simple_name.strip())
250+
251+
return allowed

0 commit comments

Comments
 (0)