@@ -2543,6 +2543,138 @@ async def judge_testing_implementation(
25432543 return error_result
25442544
25452545
2546+ async def _handle_sequential_elicitation (
2547+ identified_gaps : list [str ],
2548+ specific_questions : list [str ],
2549+ decision_areas : list [str ],
2550+ suggested_options : list [dict ],
2551+ documentation_requests : list [str ],
2552+ success_criteria_questions : list [str ],
2553+ environment_context_questions : list [str ],
2554+ testing_requirements_questions : list [str ],
2555+ current_request : str ,
2556+ repository_analysis : str ,
2557+ task_metadata : "TaskMetadata" ,
2558+ task_id : str ,
2559+ ctx : Context ,
2560+ ) -> ElicitationResult :
2561+ """Handle sequential elicitation of user feedback, one question at a time."""
2562+ from pydantic import BaseModel , Field
2563+
2564+ from mcp_as_a_judge .elicitation import elicitation_provider
2565+
2566+ # Collect all questions into categories
2567+ all_questions = []
2568+
2569+ # Add identified gaps
2570+ for gap in identified_gaps :
2571+ all_questions .append (("gap" , gap , f"Please clarify: { gap } " ))
2572+
2573+ # Add specific questions
2574+ for question in specific_questions :
2575+ all_questions .append (("question" , question , question ))
2576+
2577+ # Add decision areas with options
2578+ for area in decision_areas :
2579+ options_for_area = [opt for opt in suggested_options if opt .get ("area" ) == area ]
2580+ if options_for_area :
2581+ options_text = "\n " .join ([
2582+ f"- **{ choice .get ('name' , 'Unknown' )} **: { ', ' .join (choice .get ('pros' , []))} (Cons: { ', ' .join (choice .get ('cons' , []))} )"
2583+ for choice in options_for_area [0 ].get ("options" , [])
2584+ ])
2585+ question_text = f"**{ area } **: Please choose from these options:\n { options_text } "
2586+ else :
2587+ question_text = f"**{ area } **: Please specify your preference"
2588+ all_questions .append (("decision" , area , question_text ))
2589+
2590+ # Add other question types
2591+ for request in documentation_requests :
2592+ all_questions .append (("documentation" , request , f"Documentation needed: { request } " ))
2593+
2594+ for question in success_criteria_questions :
2595+ all_questions .append (("success_criteria" , question , question ))
2596+
2597+ for question in environment_context_questions :
2598+ all_questions .append (("environment" , question , question ))
2599+
2600+ for question in testing_requirements_questions :
2601+ all_questions .append (("testing" , question , question ))
2602+
2603+ # Collect responses
2604+ all_responses = {}
2605+ technical_decisions = {}
2606+
2607+ # Process questions one by one
2608+ for i , (category , key , question_text ) in enumerate (all_questions ):
2609+ # Create simple schema for single question
2610+ class SingleQuestionSchema (BaseModel ):
2611+ answer : str = Field (description = "Your answer to this question" )
2612+ additional_notes : str = Field (default = "" , description = "Any additional notes or context" )
2613+
2614+ # Format the question with context
2615+ context_message = f"""
2616+ ## Question { i + 1 } of { len (all_questions )}
2617+
2618+ **Context**: { current_request }
2619+
2620+ **Repository**: { repository_analysis }
2621+
2622+ **Question**: { question_text }
2623+
2624+ Please provide your answer:
2625+ """
2626+
2627+ # Elicit single response
2628+ result = await elicitation_provider .elicit_user_input (
2629+ message = context_message ,
2630+ schema = SingleQuestionSchema ,
2631+ ctx = ctx ,
2632+ )
2633+
2634+ if result .success and result .data :
2635+ answer = result .data .get ("answer" , "" )
2636+ notes = result .data .get ("additional_notes" , "" )
2637+
2638+ # Store response by category
2639+ if category == "decision" :
2640+ technical_decisions [key ] = answer
2641+
2642+ all_responses [f"{ category } _{ key } " ] = {
2643+ "question" : question_text ,
2644+ "answer" : answer ,
2645+ "notes" : notes
2646+ }
2647+ else :
2648+ # If elicitation fails, fall back to guidance
2649+ return ElicitationResult (
2650+ success = True ,
2651+ clarified_requirements = "Sequential elicitation not available. Please answer questions directly." ,
2652+ technical_decisions = {},
2653+ user_responses = {},
2654+ repository_context = repository_analysis ,
2655+ workflow_impact = "AI assistant should ask the user the questions directly and collect responses." ,
2656+ error_message = "" ,
2657+ )
2658+
2659+ # Compile all responses into requirements
2660+ combined_requirements = f"{ task_metadata .user_requirements } \n \n ## Sequential User Feedback:\n "
2661+
2662+ for response_data in all_responses .values ():
2663+ combined_requirements += f"\n **{ response_data ['question' ]} **\n "
2664+ combined_requirements += f"Answer: { response_data ['answer' ]} \n "
2665+ if response_data ['notes' ]:
2666+ combined_requirements += f"Notes: { response_data ['notes' ]} \n "
2667+
2668+ return ElicitationResult (
2669+ success = True ,
2670+ clarified_requirements = combined_requirements ,
2671+ technical_decisions = technical_decisions ,
2672+ user_responses = all_responses ,
2673+ repository_context = repository_analysis ,
2674+ workflow_impact = "Sequential user feedback completed. AI assistant should now create detailed implementation plan." ,
2675+ )
2676+
2677+
25462678@mcp .tool (description = tool_description_provider .get_description ("get_user_feedback" )) # type: ignore[misc,unused-ignore]
25472679async def get_user_feedback (
25482680 current_request : str ,
@@ -2557,6 +2689,7 @@ async def get_user_feedback(
25572689 testing_requirements_questions : list [str ],
25582690 task_id : str ,
25592691 ctx : Context ,
2692+ sequential_mode : bool = True ,
25602693) -> ElicitationResult :
25612694 """Get user feedback for requirement clarification - description loaded from tool_description_provider."""
25622695 # Log tool execution start
@@ -2636,16 +2769,36 @@ class UserFeedbackSchema(BaseModel):
26362769 "task_id" : task_id ,
26372770 }
26382771
2639- elicitation_message = prompt_loader .render_prompt (
2640- "get_user_feedback" , "user" , template_vars
2641- )
2772+ # Handle sequential vs batch elicitation
2773+ if sequential_mode :
2774+ # Sequential elicitation: ask questions one by one
2775+ return await _handle_sequential_elicitation (
2776+ identified_gaps = identified_gaps ,
2777+ specific_questions = specific_questions ,
2778+ decision_areas = decision_areas ,
2779+ suggested_options = suggested_options ,
2780+ documentation_requests = documentation_requests ,
2781+ success_criteria_questions = success_criteria_questions ,
2782+ environment_context_questions = environment_context_questions ,
2783+ testing_requirements_questions = testing_requirements_questions ,
2784+ current_request = current_request ,
2785+ repository_analysis = repository_analysis ,
2786+ task_metadata = task_metadata ,
2787+ task_id = task_id ,
2788+ ctx = ctx ,
2789+ )
2790+ else :
2791+ # Original batch elicitation
2792+ elicitation_message = prompt_loader .render_prompt (
2793+ "get_user_feedback" , "user" , template_vars
2794+ )
26422795
2643- # Get user input through elicitation
2644- elicitation_result = await elicitation_provider .elicit_user_input (
2645- message = elicitation_message ,
2646- schema = UserFeedbackSchema ,
2647- ctx = ctx ,
2648- )
2796+ # Get user input through elicitation
2797+ elicitation_result = await elicitation_provider .elicit_user_input (
2798+ message = elicitation_message ,
2799+ schema = UserFeedbackSchema ,
2800+ ctx = ctx ,
2801+ )
26492802
26502803 if not elicitation_result .success :
26512804 # Instead of failing, provide a structured fallback that guides the AI assistant
0 commit comments