Skip to content

Commit d80a11c

Browse files
author
Zvi Fried
committed
align loggers
1 parent f990117 commit d80a11c

File tree

4 files changed

+54
-50
lines changed

4 files changed

+54
-50
lines changed

src/mcp_as_a_judge/coding_task_manager.py

Lines changed: 26 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -6,15 +6,14 @@
66
"""
77

88
import json
9-
import logging
109
import time
11-
from typing import Optional
1210

1311
from mcp_as_a_judge.db.conversation_history_service import ConversationHistoryService
12+
from mcp_as_a_judge.logging_config import get_logger
1413
from mcp_as_a_judge.models.task_metadata import TaskMetadata, TaskState
1514

16-
# Set up logger directly to avoid circular imports
17-
logger = logging.getLogger(__name__)
15+
# Set up logger using custom get_logger function
16+
logger = get_logger(__name__)
1817

1918

2019
async def create_new_coding_task(
@@ -40,7 +39,7 @@ async def create_new_coding_task(
4039
New TaskMetadata instance
4140
"""
4241
logger.info(f"📝 Creating new coding task: {task_title}")
43-
42+
4443
# Create new TaskMetadata with auto-generated UUID
4544
task_metadata = TaskMetadata(
4645
title=task_title,
@@ -49,11 +48,11 @@ async def create_new_coding_task(
4948
state=TaskState.CREATED, # Default state for new tasks
5049
tags=tags,
5150
)
52-
51+
5352
# Add initial requirements to history if provided
5453
if user_requirements:
5554
task_metadata.update_requirements(user_requirements, source="initial")
56-
55+
5756
logger.info(f"✅ Created new task metadata: {task_metadata.task_id}")
5857
return task_metadata
5958

@@ -63,8 +62,8 @@ async def update_existing_coding_task(
6362
user_request: str,
6463
task_title: str,
6564
task_description: str,
66-
user_requirements: Optional[str],
67-
state: Optional[TaskState],
65+
user_requirements: str | None,
66+
state: TaskState | None,
6867
tags: list[str],
6968
conversation_service: ConversationHistoryService,
7069
) -> TaskMetadata:
@@ -88,46 +87,46 @@ async def update_existing_coding_task(
8887
ValueError: If task not found or invalid state transition
8988
"""
9089
logger.info(f"📝 Updating existing coding task: {task_id}")
91-
90+
9291
# Load existing task metadata from conversation history
9392
existing_metadata = await load_task_metadata_from_history(
9493
task_id=task_id,
9594
conversation_service=conversation_service,
9695
)
97-
96+
9897
if not existing_metadata:
9998
raise ValueError(f"Task not found: {task_id}")
100-
99+
101100
# Update mutable fields
102101
existing_metadata.title = task_title
103102
existing_metadata.description = task_description
104103
existing_metadata.tags = tags
105104
existing_metadata.updated_at = int(time.time())
106-
105+
107106
# Update requirements if provided
108107
if user_requirements is not None:
109108
existing_metadata.update_requirements(user_requirements, source="update")
110-
109+
111110
# Update state if provided (with validation)
112111
if state is not None:
113112
validate_state_transition(existing_metadata.state, state)
114113
existing_metadata.update_state(state)
115-
114+
116115
logger.info(f"✅ Updated task metadata: {task_id}")
117116
return existing_metadata
118117

119118

120119
async def load_task_metadata_from_history(
121120
task_id: str,
122121
conversation_service: ConversationHistoryService,
123-
) -> Optional[TaskMetadata]:
122+
) -> TaskMetadata | None:
124123
"""
125124
Load TaskMetadata from conversation history using task_id as primary key.
126-
125+
127126
Args:
128127
task_id: Task ID to load
129128
conversation_service: Conversation service
130-
129+
131130
Returns:
132131
TaskMetadata if found, None otherwise
133132
"""
@@ -136,7 +135,7 @@ async def load_task_metadata_from_history(
136135
conversation_history = await conversation_service.get_conversation_history(
137136
session_id=task_id
138137
)
139-
138+
140139
# Look for the most recent task metadata record
141140
for record in reversed(conversation_history):
142141
if record.source == "set_coding_task" and "task_metadata" in record.output:
@@ -145,9 +144,9 @@ async def load_task_metadata_from_history(
145144
if "current_task_metadata" in output_data:
146145
metadata_dict = output_data["current_task_metadata"]
147146
return TaskMetadata.model_validate(metadata_dict)
148-
147+
149148
return None
150-
149+
151150
except Exception as e:
152151
logger.warning(f"⚠️ Failed to load task metadata from history: {e}")
153152
return None
@@ -161,7 +160,7 @@ async def save_task_metadata_to_history(
161160
) -> None:
162161
"""
163162
Save TaskMetadata to conversation history using task_id as primary key.
164-
163+
165164
Args:
166165
task_metadata: Task metadata to save
167166
user_request: Original user request
@@ -180,9 +179,9 @@ async def save_task_metadata_to_history(
180179
"timestamp": int(time.time()),
181180
}),
182181
)
183-
182+
184183
logger.info(f"💾 Saved task metadata to conversation history: {task_metadata.task_id}")
185-
184+
186185
except Exception as e:
187186
logger.error(f"❌ Failed to save task metadata to history: {e}")
188187
# Don't raise - this is not critical for tool operation
@@ -191,11 +190,11 @@ async def save_task_metadata_to_history(
191190
def validate_state_transition(current_state: TaskState, new_state: TaskState) -> None:
192191
"""
193192
Validate that the state transition is allowed.
194-
193+
195194
Args:
196195
current_state: Current TaskState
197196
new_state: Requested new TaskState
198-
197+
199198
Raises:
200199
ValueError: If transition is not allowed
201200
"""
@@ -210,7 +209,7 @@ def validate_state_transition(current_state: TaskState, new_state: TaskState) ->
210209
TaskState.BLOCKED: [TaskState.CREATED, TaskState.PLANNING, TaskState.PLAN_APPROVED, TaskState.IMPLEMENTING, TaskState.REVIEW_READY, TaskState.CANCELLED],
211210
TaskState.CANCELLED: [], # No transitions from cancelled state
212211
}
213-
212+
214213
if new_state not in valid_transitions.get(current_state, []):
215214
raise ValueError(
216215
f"Invalid state transition: {current_state.value}{new_state.value}. "

src/mcp_as_a_judge/logging_config.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -88,13 +88,16 @@ def configure_application_loggers(level: int = logging.INFO) -> None:
8888
# List of application-specific loggers to configure
8989
app_loggers = [
9090
"mcp_as_a_judge.server",
91+
"mcp_as_a_judge.server_helpers",
9192
"mcp_as_a_judge.conversation_history_service",
9293
"mcp_as_a_judge.db.conversation_history_service",
9394
"mcp_as_a_judge.db.providers.in_memory",
9495
"mcp_as_a_judge.db.providers.sqlite_provider",
9596
"mcp_as_a_judge.messaging",
9697
"mcp_as_a_judge.llm_client",
9798
"mcp_as_a_judge.config",
99+
"mcp_as_a_judge.workflow.workflow_guidance",
100+
"mcp_as_a_judge.coding_task_manager",
98101
]
99102

100103
# Set level for each application logger

src/mcp_as_a_judge/server_helpers.py

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@
1313
from mcp_as_a_judge.constants import MAX_TOKENS
1414
from mcp_as_a_judge.llm_client import llm_manager
1515
from mcp_as_a_judge.llm_integration import load_llm_config_from_env
16+
from mcp_as_a_judge.logging_config import get_logger
1617
from mcp_as_a_judge.messaging.llm_provider import llm_provider
1718
from mcp_as_a_judge.prompt_loader import create_separate_messages
1819

@@ -27,17 +28,18 @@ def initialize_llm_configuration() -> None:
2728
2829
This function loads LLM configuration from environment variables and
2930
configures the LLM manager if a valid configuration is found.
30-
Prints status messages to inform users about the configuration state.
31+
Logs status messages to inform users about the configuration state.
3132
"""
33+
logger = get_logger(__name__)
3234
llm_config = load_llm_config_from_env()
3335
if llm_config:
3436
llm_manager.configure(llm_config)
3537
vendor_name = llm_config.vendor.value if llm_config.vendor else "unknown"
36-
print(
37-
f"LLM fallback configured: {vendor_name} with model {llm_config.model_name}"
38+
logger.info(
39+
f"🔧 LLM fallback configured: {vendor_name} with model {llm_config.model_name}"
3840
)
3941
else:
40-
print("No LLM API key found in environment. MCP sampling will be required.")
42+
logger.info("🔧 No LLM API key found in environment. MCP sampling will be required.")
4143

4244

4345
def extract_json_from_response(response_text: str) -> str:

src/mcp_as_a_judge/workflow/workflow_guidance.py

Lines changed: 19 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -7,19 +7,18 @@
77
"""
88

99
import json
10-
from typing import Any, Dict, Optional
10+
from typing import Any
1111

1212
from pydantic import BaseModel, Field
1313

14-
import logging
15-
1614
from mcp_as_a_judge.constants import MAX_TOKENS
1715
from mcp_as_a_judge.db.conversation_history_service import ConversationHistoryService
16+
from mcp_as_a_judge.logging_config import get_logger
1817
from mcp_as_a_judge.messaging.llm_provider import llm_provider
1918
from mcp_as_a_judge.models.task_metadata import TaskMetadata, TaskState
2019

21-
# Set up logger directly to avoid circular imports
22-
logger = logging.getLogger(__name__)
20+
# Set up logger using custom get_logger function
21+
logger = get_logger(__name__)
2322

2423

2524

@@ -34,7 +33,7 @@ class WorkflowGuidance(BaseModel):
3433
3534
Compatible with the original WorkflowGuidance model from models.py.
3635
"""
37-
next_tool: Optional[str] = Field(
36+
next_tool: str | None = Field(
3837
description="Next tool to call, or None if workflow complete"
3938
)
4039
reasoning: str = Field(
@@ -84,10 +83,10 @@ async def calculate_next_stage(
8483
task_metadata: TaskMetadata,
8584
current_operation: str,
8685
conversation_service: ConversationHistoryService,
87-
ctx: Optional[Any] = None, # MCP Context for llm_provider
88-
validation_result: Optional[Any] = None,
89-
completion_result: Optional[Any] = None,
90-
accumulated_changes: Optional[Dict] = None,
86+
ctx: Any | None = None, # MCP Context for llm_provider
87+
validation_result: Any | None = None,
88+
completion_result: Any | None = None,
89+
accumulated_changes: dict | None = None,
9190
) -> WorkflowGuidance:
9291
"""
9392
SHARED METHOD used by all tools to calculate next_tool and instructions.
@@ -110,14 +109,14 @@ async def calculate_next_stage(
110109
Exception: If LLM fails to generate valid navigation
111110
"""
112111
logger.info(f"🧠 Calculating next stage for task {task_metadata.task_id}")
113-
112+
114113
try:
115114
# Load conversation history using task_id as primary key
116115
# Note: For now we'll use task_id as session_id until we update the DB schema
117116
conversation_history = await conversation_service.get_conversation_history(
118117
session_id=task_metadata.task_id
119118
)
120-
119+
121120
# Format conversation history for LLM context
122121
conversation_context = _format_conversation_for_llm(conversation_history)
123122

@@ -174,9 +173,10 @@ async def calculate_next_stage(
174173
logger.info(f"📤 Sending navigation request to LLM for task {task_metadata.task_id}")
175174

176175
# Use the same messaging pattern as other tools
177-
from mcp_as_a_judge.prompt_loader import create_separate_messages
178176
from mcp.types import SamplingMessage
179177

178+
from mcp_as_a_judge.prompt_loader import create_separate_messages
179+
180180
# Create system and user variables for the workflow guidance
181181
system_vars = WorkflowGuidanceSystemVars(
182182
response_schema=json.dumps(WorkflowGuidance.model_json_schema())
@@ -227,7 +227,7 @@ async def calculate_next_stage(
227227
except (ValueError, json.JSONDecodeError) as e:
228228
logger.error(f"❌ Failed to parse LLM response: {e}")
229229
logger.error(f"❌ Raw response: {response[:500]}...")
230-
raise ValueError(f"Failed to parse workflow guidance response: {e}")
230+
raise ValueError(f"Failed to parse workflow guidance response: {e}") from e
231231

232232
# Validate required fields
233233
required_fields = ["next_tool", "reasoning", "preparation_needed", "guidance"]
@@ -278,31 +278,31 @@ async def calculate_next_stage(
278278
next_tool=None,
279279
reasoning="Error occurred during workflow calculation",
280280
preparation_needed=["Review the error and task state"],
281-
guidance=f"Error calculating next stage: {str(e)}. Please review task manually."
281+
guidance=f"Error calculating next stage: {e!s}. Please review task manually."
282282
)
283283

284284

285285
def _format_conversation_for_llm(conversation_history) -> str:
286286
"""
287287
Format conversation history for LLM context.
288-
288+
289289
Args:
290290
conversation_history: List of conversation records
291-
291+
292292
Returns:
293293
Formatted string for LLM prompt
294294
"""
295295
if not conversation_history:
296296
return "No previous conversation history."
297-
297+
298298
formatted_lines = []
299299
for record in conversation_history[-10:]: # Last 10 records
300300
formatted_lines.append(
301301
f"[{record.timestamp}] {record.source}:\n"
302302
f"Input: {record.input}\n"
303303
f"Output: {record.output}\n"
304304
)
305-
305+
306306
return "\n".join(formatted_lines)
307307

308308

0 commit comments

Comments
 (0)