Skip to content

Commit 441a94e

Browse files
author
mcp-release-bot
committed
mypy
1 parent 564b3ba commit 441a94e

File tree

4 files changed

+36
-45
lines changed

4 files changed

+36
-45
lines changed

src/mcp_as_a_judge/server.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@
1010
import json
1111
import re
1212
import time
13+
from typing import Any
1314

1415
from mcp.server.fastmcp import Context, FastMCP
1516
from pydantic import ValidationError
@@ -2787,7 +2788,7 @@ class UserFeedbackSchema(BaseModel):
27872788
feedback_context = {
27882789
"repository_analysis": repository_analysis,
27892790
"task_description": task_metadata.description,
2790-
"task_size": task_metadata.size.value if task_metadata.size else "unknown",
2791+
"task_size": task_metadata.task_size.value if task_metadata.task_size else "unknown",
27912792
"workflow_state": task_metadata.state.value,
27922793
"elicitation_success": elicitation_result.success,
27932794
"questions_asked": specific_questions[:5], # Limit to avoid token bloat
@@ -3150,7 +3151,7 @@ async def get_user_approve_requirement(
31503151
plan: str,
31513152
design: str,
31523153
research: str,
3153-
technical_decisions: list[dict],
3154+
technical_decisions: list[dict | Any],
31543155
implementation_scope: dict,
31553156
language_specific_practices: list[str],
31563157
task_id: str,
@@ -3357,7 +3358,7 @@ class PlanApprovalSchema(BaseModel):
33573358
# Save plan approval interaction to database for better LLM context
33583359
approval_context = {
33593360
"task_description": task_metadata.description,
3360-
"task_size": task_metadata.size.value if task_metadata.size else "unknown",
3361+
"task_size": task_metadata.task_size.value if task_metadata.task_size else "unknown",
33613362
"workflow_state": task_metadata.state.value,
33623363
"plan_content": formatted_plan[:1000], # Limit to avoid token bloat
33633364
"user_approved": approved,

src/mcp_as_a_judge/utils/repository_analyzer.py

Lines changed: 11 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -121,8 +121,8 @@ def analyze(self) -> dict:
121121

122122
def _detect_languages(self) -> dict:
123123
"""Detect programming languages used in the repository."""
124-
language_scores = {}
125-
file_counts = Counter()
124+
language_scores: dict[str, float] = {}
125+
file_counts: Counter[str] = Counter()
126126

127127
# Walk through repository
128128
for root, dirs, files in os.walk(self.repo_path):
@@ -177,7 +177,7 @@ def _detect_languages(self) -> dict:
177177

178178
def _detect_frameworks(self) -> dict:
179179
"""Detect frameworks and libraries used."""
180-
framework_scores = {}
180+
framework_scores: dict[str, float] = {}
181181

182182
for _root, dirs, files in os.walk(self.repo_path):
183183
dirs[:] = [
@@ -212,7 +212,7 @@ def _detect_frameworks(self) -> dict:
212212

213213
def _analyze_structure(self) -> dict:
214214
"""Analyze project structure and organization patterns."""
215-
structure = {"directories": [], "key_files": [], "patterns": []}
215+
structure: dict[str, list[str]] = {"directories": [], "key_files": [], "patterns": []}
216216

217217
# Get top-level directories and files
218218
if self.repo_path.exists():
@@ -383,36 +383,37 @@ def _generate_recommendations(self) -> dict:
383383
languages = self._detect_languages()
384384
frameworks = self._detect_frameworks()
385385

386+
reasoning_list: list[str] = []
386387
recommendations = {
387388
"technology_stack": "unclear",
388389
"suggested_approach": "ask_user",
389-
"reasoning": [],
390+
"reasoning": reasoning_list,
390391
}
391392

392393
if languages["confidence"] == "high" and languages["primary"]:
393394
recommendations["technology_stack"] = "detected"
394395
recommendations["suggested_approach"] = "confirm_or_choose_different"
395-
recommendations["reasoning"].append(
396+
reasoning_list.append(
396397
f"Clear {languages['primary']} project detected - recommend continuing with {languages['primary']} for consistency"
397398
)
398-
recommendations["reasoning"].append(
399+
reasoning_list.append(
399400
"However, user may choose different language for valid reasons (microservices, tooling, team expertise, etc.)"
400401
)
401402

402403
if frameworks["likely"]:
403-
recommendations["reasoning"].append(
404+
reasoning_list.append(
404405
f"Likely frameworks: {', '.join(frameworks['likely'])}"
405406
)
406407
elif languages["confidence"] == "medium":
407408
recommendations["technology_stack"] = "mixed_or_unclear"
408409
recommendations["suggested_approach"] = "clarify_with_user"
409-
recommendations["reasoning"].append(
410+
reasoning_list.append(
410411
"Multiple languages detected or unclear primary language"
411412
)
412413
else:
413414
recommendations["technology_stack"] = "empty_or_new"
414415
recommendations["suggested_approach"] = "ask_user_preferences"
415-
recommendations["reasoning"].append("No clear technology stack detected")
416+
reasoning_list.append("No clear technology stack detected")
416417

417418
return recommendations
418419

src/mcp_as_a_judge/workflow/workflow_guidance.py

Lines changed: 8 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -565,33 +565,19 @@ async def calculate_next_stage(
565565
)
566566

567567
# Fallback: if next_tool missing/None and not completed, route to get_current_coding_task
568-
if (
569-
workflow_guidance.next_tool is None
570-
and task_metadata.state != TaskState.COMPLETED
571-
):
568+
if workflow_guidance.next_tool is None and task_metadata.state != TaskState.COMPLETED:
572569
if "get_current_coding_task" in available_name_set:
573570
workflow_guidance.next_tool = "get_current_coding_task"
574571
else:
575572
# As a last resort, pick appropriate tool based on state
576-
if (
577-
task_metadata.state == TaskState.CREATED
578-
or task_metadata.state == TaskState.REQUIREMENTS_FEEDBACK
579-
):
580-
workflow_guidance.next_tool = "get_user_feedback"
581-
elif task_metadata.state == TaskState.USER_APPROVE_REQUIREMENTS:
582-
workflow_guidance.next_tool = (
583-
None # Let AI assistant create plan first
584-
)
585-
elif task_metadata.state == TaskState.PLANNING:
573+
# Note: CREATED, REQUIREMENTS_FEEDBACK, USER_APPROVE_REQUIREMENTS, PLAN_APPROVED,
574+
# IMPLEMENTING, REVIEW_READY, TESTING, and COMPLETED are handled by early returns
575+
current_state = task_metadata.state
576+
if current_state == TaskState.PLANNING:
586577
workflow_guidance.next_tool = "judge_coding_plan"
587-
elif task_metadata.state in (
588-
TaskState.PLAN_APPROVED,
589-
TaskState.IMPLEMENTING,
590-
TaskState.REVIEW_READY,
591-
):
592-
workflow_guidance.next_tool = "judge_code_change"
593-
elif task_metadata.state == TaskState.TESTING:
594-
workflow_guidance.next_tool = "judge_testing_implementation"
578+
elif current_state in (TaskState.BLOCKED, TaskState.CANCELLED):
579+
# For blocked/cancelled tasks, no specific tool recommendation
580+
workflow_guidance.next_tool = None
595581

596582
logger.info(
597583
f"Calculated next stage: next_tool={workflow_guidance.next_tool}, "

tests/conftest.py

Lines changed: 13 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@
99
from unittest.mock import AsyncMock, MagicMock
1010

1111
import pytest
12+
from pydantic import BaseModel
1213

1314
from mcp_as_a_judge.models import JudgeResponse
1415

@@ -140,6 +141,15 @@ def mock_no_sampling_context():
140141
return mock_context
141142

142143

144+
class MockTextContent(BaseModel):
145+
type: str = "text"
146+
text: str
147+
148+
149+
class MockCreateMessageResult(BaseModel):
150+
content: MockTextContent
151+
152+
143153
class MockServerSession:
144154
"""Mock server session for testing."""
145155

@@ -148,25 +158,18 @@ def __init__(self, has_sampling: bool = True):
148158
self.has_sampling = has_sampling
149159

150160
async def create_message(self, **kwargs):
151-
"""Mock create_message method."""
161+
"""Mock create_message method returning Pydantic-like response objects."""
152162
if not self.has_sampling:
153163
raise RuntimeError("Context is not available outside of a request")
154164

155165
# Return proper JSON response for workflow guidance
156166
if "workflow" in str(kwargs).lower() or "guidance" in str(kwargs).lower():
157167
json_response = '{"next_tool": "judge_coding_plan", "reasoning": "Need to validate the coding plan", "preparation_needed": ["Gather requirements", "Research best practices"], "guidance": "Start by analyzing the requirements and creating a comprehensive plan"}'
158-
# Create a mock that mimics the MCP response structure
159-
mock_content = MagicMock()
160-
mock_content.type = "text"
161-
mock_content.text = json_response
162-
return MagicMock(content=mock_content)
168+
return MockCreateMessageResult(content=MockTextContent(text=json_response))
163169

164170
# Return proper JSON response for judge responses
165171
json_response = '{"approved": true, "feedback": "Mocked evaluation response"}'
166-
mock_content = MagicMock()
167-
mock_content.type = "text"
168-
mock_content.text = json_response
169-
return MagicMock(content=mock_content)
172+
return MockCreateMessageResult(content=MockTextContent(text=json_response))
170173

171174

172175
class MockContext:

0 commit comments

Comments
 (0)