Skip to content

Commit 31d3a0a

Browse files
author
mcp-release-bot
committed
Fix get_user_feedback tool parameter mismatch
- Add missing parameters to function signature: documentation_requests, success_criteria_questions, environment_context_questions, testing_requirements_questions - Update UserFeedbackSchema to include response fields for new question types - Update user prompt template to display new question sections - Switch from hardcoded elicitation message to template-based rendering - Update response processing to handle and include new response types in requirements - Fix f-string linting issues This resolves the 'Missing required parameter suggested_options' error in Cursor by ensuring the function signature matches the tool description exactly.
1 parent d48b18b commit 31d3a0a

File tree

2 files changed

+78
-20
lines changed

2 files changed

+78
-20
lines changed

src/mcp_as_a_judge/prompts/user/get_user_feedback.md

Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,34 @@
3131
{% endfor %}
3232
{% endfor %}
3333

34+
## Documentation and Reference Requests
35+
{% if documentation_requests %}
36+
{% for request in documentation_requests %}
37+
- {{ request }}
38+
{% endfor %}
39+
{% endif %}
40+
41+
## Success Criteria Questions
42+
{% if success_criteria_questions %}
43+
{% for question in success_criteria_questions %}
44+
- {{ question }}
45+
{% endfor %}
46+
{% endif %}
47+
48+
## Environment and Context Questions
49+
{% if environment_context_questions %}
50+
{% for question in environment_context_questions %}
51+
- {{ question }}
52+
{% endfor %}
53+
{% endif %}
54+
55+
## Testing Requirements Questions
56+
{% if testing_requirements_questions %}
57+
{% for question in testing_requirements_questions %}
58+
- {{ question }}
59+
{% endfor %}
60+
{% endif %}
61+
3462
## Task Context
3563
- **Task ID**: {{ task_id }}
3664
- **Current State**: Requirements feedback gathering

src/mcp_as_a_judge/server.py

Lines changed: 50 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -2551,6 +2551,10 @@ async def get_user_feedback(
25512551
decision_areas: list[str],
25522552
suggested_options: list[dict],
25532553
repository_analysis: str,
2554+
documentation_requests: list[str],
2555+
success_criteria_questions: list[str],
2556+
environment_context_questions: list[str],
2557+
testing_requirements_questions: list[str],
25542558
task_id: str,
25552559
ctx: Context,
25562560
) -> ElicitationResult:
@@ -2598,29 +2602,43 @@ class UserFeedbackSchema(BaseModel):
25982602
default="",
25992603
description="Preferences that affect implementation approach",
26002604
)
2605+
documentation_responses: dict[str, str] = Field(
2606+
default_factory=dict,
2607+
description="Responses to documentation and reference requests",
2608+
)
2609+
success_criteria_responses: dict[str, str] = Field(
2610+
default_factory=dict,
2611+
description="Responses to success criteria questions",
2612+
)
2613+
environment_context_responses: dict[str, str] = Field(
2614+
default_factory=dict,
2615+
description="Responses to environment and context questions",
2616+
)
2617+
testing_requirements_responses: dict[str, str] = Field(
2618+
default_factory=dict,
2619+
description="Responses to testing requirements questions",
2620+
)
26012621

2602-
# Format elicitation message
2603-
elicitation_message = f"""
2604-
## Requirement Clarification Needed
2605-
2606-
**Current Understanding:** {current_request}
2607-
2608-
**Repository Analysis:** {repository_analysis}
2609-
2610-
**Identified Gaps:**
2611-
{chr(10).join(f"- {gap}" for gap in identified_gaps)}
2612-
2613-
**Specific Questions:**
2614-
{chr(10).join(f"- {question}" for question in specific_questions)}
2615-
2616-
**Technical Decisions Needed:**
2617-
{chr(10).join(f"- {area}" for area in decision_areas)}
2622+
# Format elicitation message using template
2623+
from mcp_as_a_judge.prompts import prompt_loader
26182624

2619-
**Suggested Options:**
2620-
{chr(10).join(f"- **{opt.get('area', 'Unknown')}**: {', '.join(o.get('name', 'Unknown') for o in opt.get('options', []))}" for opt in suggested_options)}
2625+
template_vars = {
2626+
"current_request": current_request,
2627+
"repository_analysis": repository_analysis,
2628+
"identified_gaps": identified_gaps,
2629+
"specific_questions": specific_questions,
2630+
"decision_areas": decision_areas,
2631+
"suggested_options": suggested_options,
2632+
"documentation_requests": documentation_requests,
2633+
"success_criteria_questions": success_criteria_questions,
2634+
"environment_context_questions": environment_context_questions,
2635+
"testing_requirements_questions": testing_requirements_questions,
2636+
"task_id": task_id,
2637+
}
26212638

2622-
Please provide clarified requirements and make technical decisions to proceed with implementation.
2623-
"""
2639+
elicitation_message = prompt_loader.render_prompt(
2640+
"get_user_feedback", "user", template_vars
2641+
)
26242642

26252643
# Get user input through elicitation
26262644
elicitation_result = await elicitation_provider.elicit_user_input(
@@ -2648,6 +2666,10 @@ class UserFeedbackSchema(BaseModel):
26482666
technical_decisions = user_data.get("technical_decisions", {})
26492667
additional_context = user_data.get("additional_context", "")
26502668
workflow_preferences = user_data.get("workflow_preferences", "")
2669+
documentation_responses = user_data.get("documentation_responses", {})
2670+
success_criteria_responses = user_data.get("success_criteria_responses", {})
2671+
environment_context_responses = user_data.get("environment_context_responses", {})
2672+
testing_requirements_responses = user_data.get("testing_requirements_responses", {})
26512673

26522674
# Update task metadata with new requirements
26532675
combined_requirements = f"{task_metadata.user_requirements}\n\n## User Clarifications:\n{clarified_requirements}"
@@ -2657,6 +2679,14 @@ class UserFeedbackSchema(BaseModel):
26572679
combined_requirements += (
26582680
f"\n\n## Workflow Preferences:\n{workflow_preferences}"
26592681
)
2682+
if documentation_responses:
2683+
combined_requirements += "\n\n## Documentation Responses:\n" + "\n".join(f"- {k}: {v}" for k, v in documentation_responses.items())
2684+
if success_criteria_responses:
2685+
combined_requirements += "\n\n## Success Criteria:\n" + "\n".join(f"- {k}: {v}" for k, v in success_criteria_responses.items())
2686+
if environment_context_responses:
2687+
combined_requirements += "\n\n## Environment Context:\n" + "\n".join(f"- {k}: {v}" for k, v in environment_context_responses.items())
2688+
if testing_requirements_responses:
2689+
combined_requirements += "\n\n## Testing Requirements:\n" + "\n".join(f"- {k}: {v}" for k, v in testing_requirements_responses.items())
26602690

26612691
task_metadata.update_requirements(combined_requirements, source="user_feedback")
26622692

0 commit comments

Comments
 (0)