Skip to content

Commit cf14423

Browse files
author
Zvi Fried
committed
fix for validation error
1 parent bd83fae commit cf14423

File tree

3 files changed

+106
-120
lines changed

3 files changed

+106
-120
lines changed

src/mcp_as_a_judge/models.py

Lines changed: 18 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88
from pydantic import BaseModel, Field
99

1010
from mcp_as_a_judge.constants import MAX_TOKENS
11-
from mcp_as_a_judge.models.task_metadata import TaskMetadata
11+
from mcp_as_a_judge.models.task_metadata import TaskMetadata, TaskSize
1212
from mcp_as_a_judge.workflow import WorkflowGuidance
1313

1414

@@ -31,12 +31,24 @@ class JudgeResponse(BaseModel):
3131
description="Detailed explanation of the decision and recommendations"
3232
)
3333

34-
# Enhanced workflow fields
34+
# Enhanced workflow fields (defaults added for robustness)
3535
current_task_metadata: TaskMetadata = Field(
36-
description="Current state of task metadata after operation"
36+
default_factory=lambda: TaskMetadata(
37+
title="Unknown Task",
38+
description="No metadata provided",
39+
user_requirements="",
40+
task_size=TaskSize.M,
41+
),
42+
description="Current state of task metadata after operation",
3743
)
3844
workflow_guidance: WorkflowGuidance = Field(
39-
description="LLM-generated next steps and instructions"
45+
default_factory=lambda: WorkflowGuidance(
46+
next_tool=None,
47+
reasoning="Default guidance: insufficient context",
48+
preparation_needed=[],
49+
guidance="Provide required parameters and context",
50+
),
51+
description="LLM-generated next steps and instructions",
4052
)
4153

4254

@@ -61,26 +73,8 @@ class ObstacleResolutionDecision(BaseModel):
6173
# This allows for context-specific elicitation fields generated by LLM
6274

6375

64-
class WorkflowGuidance(BaseModel):
65-
"""Schema for workflow guidance responses.
66-
67-
Used by the build_workflow tool to provide
68-
structured guidance on which tools to use next.
69-
"""
70-
71-
next_tool: str = Field(
72-
description="The specific MCP tool that should be called next: 'judge_coding_plan', 'judge_code_change', 'raise_obstacle', or 'elicit_missing_requirements'"
73-
)
74-
reasoning: str = Field(
75-
description="Clear explanation of why this tool should be used next"
76-
)
77-
preparation_needed: list[str] = Field(
78-
default_factory=list,
79-
description="List of things that need to be prepared before calling the recommended tool",
80-
)
81-
guidance: str = Field(
82-
description="Detailed step-by-step guidance for the AI assistant"
83-
)
76+
# NOTE: WorkflowGuidance is defined in `workflow/workflow_guidance.py` and imported above.
77+
# This file intentionally does not redefine it to avoid duplication.
8478

8579

8680
class ResearchValidationResponse(BaseModel):
Lines changed: 80 additions & 89 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,9 @@
11
"""
22
Models package for MCP as a Judge.
33
4-
This package contains all data models used throughout the application,
5-
including task metadata, enhanced responses, and workflow guidance models.
4+
This package re-exports models for convenient `from mcp_as_a_judge.models import ...`
5+
usage. The canonical `WorkflowGuidance` lives in `workflow/workflow_guidance.py` and
6+
is imported here for a single source of truth.
67
"""
78

89
# Task metadata models
@@ -49,97 +50,87 @@
4950
"WorkflowGuidanceUserVars",
5051
]
5152

52-
# Import additional models from the original models.py file
53-
# Import them here to avoid circular imports
54-
try:
55-
import importlib.util
56-
import os
53+
# Import additional models from the file `src/mcp_as_a_judge/models.py`.
54+
# We use a lightweight file-based import to avoid the Python package/module name
55+
# collision and keep downstream imports stable.
56+
import importlib.util
57+
import os
58+
from typing import Any
5759

58-
# Get the path to models.py
59-
current_dir = os.path.dirname(__file__)
60-
models_py_path = os.path.join(os.path.dirname(current_dir), "models.py")
61-
62-
if os.path.exists(models_py_path):
63-
spec = importlib.util.spec_from_file_location("models_py", models_py_path)
64-
if spec is not None:
65-
models_py = importlib.util.module_from_spec(spec)
66-
if spec.loader is not None:
67-
spec.loader.exec_module(models_py) # type: ignore[union-attr]
68-
69-
# Import the models we need
70-
ElicitationFallbackUserVars = models_py.ElicitationFallbackUserVars
71-
JudgeCodeChangeUserVars = models_py.JudgeCodeChangeUserVars
72-
JudgeCodingPlanUserVars = models_py.JudgeCodingPlanUserVars
73-
ResearchValidationResponse = models_py.ResearchValidationResponse
74-
ResearchValidationUserVars = models_py.ResearchValidationUserVars
75-
WorkflowGuidanceUserVars = models_py.WorkflowGuidanceUserVars
76-
DynamicSchemaUserVars = models_py.DynamicSchemaUserVars
77-
ValidationErrorUserVars = models_py.ValidationErrorUserVars
78-
SystemVars = models_py.SystemVars
79-
80-
# Import research-related models
81-
ResearchComplexityFactors = models_py.ResearchComplexityFactors
82-
ResearchRequirementsAnalysis = models_py.ResearchRequirementsAnalysis
83-
ResearchRequirementsAnalysisUserVars = (
84-
models_py.ResearchRequirementsAnalysisUserVars
85-
)
86-
URLValidationResult = models_py.URLValidationResult
87-
88-
except Exception:
89-
# Fallback if models.py doesn't exist or has issues
90-
# Create minimal fallback classes to prevent import errors
91-
from pydantic import BaseModel, Field
92-
93-
class ElicitationFallbackUserVars(BaseModel):
94-
pass
95-
96-
class JudgeCodeChangeUserVars(BaseModel):
97-
pass
60+
from pydantic import BaseModel, Field
9861

99-
class JudgeCodingPlanUserVars(BaseModel):
100-
pass
10162

102-
class ResearchValidationResponse(BaseModel):
103-
pass
104-
105-
class ResearchValidationUserVars(BaseModel):
106-
pass
107-
108-
class WorkflowGuidanceUserVars(BaseModel):
109-
pass
110-
111-
class DynamicSchemaUserVars(BaseModel):
112-
pass
113-
114-
class SystemVars(BaseModel):
115-
pass
116-
117-
class ResearchComplexityFactors(BaseModel):
118-
domain_specialization: str = Field(default="general")
119-
technology_maturity: str = Field(default="established")
120-
integration_scope: str = Field(default="moderate")
121-
existing_solutions: str = Field(default="limited")
122-
risk_level: str = Field(default="medium")
63+
def _load_models_py() -> Any | None:
64+
current_dir = os.path.dirname(__file__)
65+
models_py_path = os.path.join(os.path.dirname(current_dir), "models.py")
66+
if not os.path.exists(models_py_path):
67+
return None
68+
spec = importlib.util.spec_from_file_location("models_py", models_py_path)
69+
if spec is None or spec.loader is None: # type: ignore[truthy-function]
70+
return None
71+
module = importlib.util.module_from_spec(spec)
72+
spec.loader.exec_module(module) # type: ignore[attr-defined]
73+
return module
12374

124-
class ResearchRequirementsAnalysis(BaseModel):
125-
expected_url_count: int = Field(default=3)
126-
minimum_url_count: int = Field(default=2)
127-
reasoning: str = Field(default="Fallback analysis")
128-
complexity_factors: ResearchComplexityFactors = Field(
129-
default_factory=ResearchComplexityFactors
130-
)
131-
quality_requirements: list[str] = Field(default_factory=list)
13275

133-
class ResearchRequirementsAnalysisSystemVars(BaseModel):
134-
pass
76+
models_py = _load_models_py()
13577

136-
class ResearchRequirementsAnalysisUserVars(BaseModel):
137-
pass
78+
# Names to re-export from models.py
79+
_NAMES = [
80+
"ElicitationFallbackUserVars",
81+
"JudgeCodeChangeUserVars",
82+
"JudgeCodingPlanUserVars",
83+
"ResearchValidationResponse",
84+
"ResearchValidationUserVars",
85+
"WorkflowGuidanceUserVars",
86+
"DynamicSchemaUserVars",
87+
"ValidationErrorUserVars",
88+
"SystemVars",
89+
"ResearchComplexityFactors",
90+
"ResearchRequirementsAnalysis",
91+
"ResearchRequirementsAnalysisUserVars",
92+
"URLValidationResult",
93+
]
13894

139-
class URLValidationResult(BaseModel):
140-
adequate: bool = Field(default=False)
141-
provided_count: int = Field(default=0)
142-
expected_count: int = Field(default=3)
143-
minimum_count: int = Field(default=2)
144-
feedback: str = Field(default="Fallback validation")
145-
meets_quality_standards: bool = Field(default=False)
95+
for _name in _NAMES:
96+
if models_py is not None and hasattr(models_py, _name):
97+
globals()[_name] = getattr(models_py, _name)
98+
else:
99+
# Minimal, safe placeholders only used if models.py cannot be loaded
100+
if _name in {
101+
"ResearchComplexityFactors",
102+
"ResearchRequirementsAnalysis",
103+
"URLValidationResult",
104+
}:
105+
# Provide slightly richer defaults for research-related types
106+
if _name == "ResearchComplexityFactors":
107+
class ResearchComplexityFactors(BaseModel): # type: ignore[no-redef]
108+
domain_specialization: str = Field(default="general")
109+
technology_maturity: str = Field(default="established")
110+
integration_scope: str = Field(default="moderate")
111+
existing_solutions: str = Field(default="limited")
112+
risk_level: str = Field(default="medium")
113+
114+
globals()[_name] = ResearchComplexityFactors
115+
elif _name == "ResearchRequirementsAnalysis":
116+
class ResearchRequirementsAnalysis(BaseModel): # type: ignore[no-redef]
117+
expected_url_count: int = Field(default=3)
118+
minimum_url_count: int = Field(default=1)
119+
reasoning: str = Field(default="Fallback analysis")
120+
complexity_factors: Any = Field(default=None)
121+
quality_requirements: list[str] = Field(default_factory=list)
122+
123+
globals()[_name] = ResearchRequirementsAnalysis
124+
else: # URLValidationResult
125+
class URLValidationResult(BaseModel): # type: ignore[no-redef]
126+
adequate: bool = Field(default=False)
127+
provided_count: int = Field(default=0)
128+
expected_count: int = Field(default=0)
129+
minimum_count: int = Field(default=0)
130+
feedback: str = Field(default="Fallback validation")
131+
meets_quality_standards: bool = Field(default=False)
132+
133+
globals()[_name] = URLValidationResult
134+
else:
135+
# Generic placeholder
136+
globals()[_name] = type(_name, (BaseModel,), {})

src/mcp_as_a_judge/workflow/workflow_guidance.py

Lines changed: 8 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -37,16 +37,16 @@ def should_skip_planning(task_metadata: TaskMetadata) -> bool:
3737

3838
class WorkflowGuidance(BaseModel):
3939
"""
40-
LLM-generated workflow guidance from shared calculate_next_stage method.
40+
Canonical workflow guidance model used across the system.
4141
42-
This model is returned by all tools to provide consistent next steps
43-
and instructions for the coding assistant.
44-
45-
Compatible with the original WorkflowGuidance model from models.py.
42+
Returned by tools to provide consistent next steps and instructions for
43+
the coding assistant. This is the single source of truth for the
44+
WorkflowGuidance schema.
4645
"""
4746

4847
next_tool: str | None = Field(
49-
description="Next tool to call, or None if workflow complete"
48+
default=None,
49+
description="Next tool to call, or None if workflow complete",
5050
)
5151
reasoning: str = Field(
5252
default="", description="Clear explanation of why this tool should be used next"
@@ -56,7 +56,8 @@ class WorkflowGuidance(BaseModel):
5656
description="List of things that need to be prepared before calling the recommended tool",
5757
)
5858
guidance: str = Field(
59-
description="Detailed step-by-step guidance for the AI assistant"
59+
default="",
60+
description="Detailed step-by-step guidance for the AI assistant",
6061
)
6162

6263
# Research requirement determination for new tasks (only populated when task is CREATED)

0 commit comments

Comments
 (0)