Skip to content

Commit 91cb5ec

Browse files
author
dori
committed
feat: try to fix build
1 parent c6bac7b commit 91cb5ec

File tree

3 files changed

+17
-10
lines changed

3 files changed

+17
-10
lines changed

src/mcp_as_a_judge/db/conversation_history_service.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,8 @@
77
3. Managing session-based conversation history
88
"""
99

10+
from typing import Any
11+
1012
from mcp_as_a_judge.db import (
1113
ConversationHistoryDB,
1214
ConversationRecord,
@@ -39,7 +41,7 @@ def __init__(
3941
self.db = db_provider or create_database_provider(config)
4042

4143
async def load_filtered_context_for_enrichment(
42-
self, session_id: str, current_prompt: str = "", ctx=None
44+
self, session_id: str, current_prompt: str = "", ctx: Any = None
4345
) -> list[ConversationRecord]:
4446
"""
4547
Load recent conversation records for LLM context enrichment.

src/mcp_as_a_judge/db/token_utils.py

Lines changed: 11 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,11 @@
66
with fallback to character-based approximation.
77
"""
88

9+
from typing import TYPE_CHECKING, Any
10+
11+
if TYPE_CHECKING:
12+
from mcp_as_a_judge.db import ConversationRecord
13+
914
from mcp_as_a_judge.db.dynamic_token_limits import get_llm_input_limit
1015
from mcp_as_a_judge.logging_config import get_logger
1116

@@ -16,7 +21,7 @@
1621
_cached_model_name: str | None = None
1722

1823

19-
async def detect_model_name(ctx=None) -> str | None:
24+
async def detect_model_name(ctx: Any = None) -> str | None:
2025
"""
2126
Unified method to detect model name from either LLM config or MCP sampling.
2227
@@ -62,7 +67,7 @@ async def detect_model_name(ctx=None) -> str | None:
6267

6368
# Extract model name from response
6469
if hasattr(result, "model") and result.model:
65-
return result.model
70+
return str(result.model)
6671

6772
except ImportError:
6873
logger.debug("MCP types not available for sampling")
@@ -74,7 +79,7 @@ async def detect_model_name(ctx=None) -> str | None:
7479
return None
7580

7681

77-
async def get_current_model_limits(ctx=None) -> tuple[int, int]:
82+
async def get_current_model_limits(ctx: Any = None) -> tuple[int, int]:
7883
"""
7984
Simple wrapper: detect current model and return its token limits.
8085
@@ -100,7 +105,7 @@ async def get_current_model_limits(ctx=None) -> tuple[int, int]:
100105

101106

102107
async def calculate_tokens_in_string(
103-
text: str, model_name: str | None = None, ctx=None
108+
text: str, model_name: str | None = None, ctx: Any = None
104109
) -> int:
105110
"""
106111
Calculate accurate token count from text using LiteLLM's token_counter.
@@ -145,7 +150,7 @@ async def calculate_tokens_in_string(
145150

146151

147152
async def calculate_tokens_in_record(
148-
input_text: str, output_text: str, model_name: str | None = None, ctx=None
153+
input_text: str, output_text: str, model_name: str | None = None, ctx: Any = None
149154
) -> int:
150155
"""
151156
Calculate total token count for input and output text.
@@ -181,7 +186,7 @@ def calculate_tokens_in_records(records: list) -> int:
181186

182187

183188
async def filter_records_by_token_limit(
184-
records: list, current_prompt: str = "", ctx=None
189+
records: list, current_prompt: str = "", ctx: Any = None
185190
) -> list:
186191
"""
187192
Filter conversation records to stay within token and record limits.

src/mcp_as_a_judge/server.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -92,7 +92,7 @@ async def build_workflow(
9292
# STEP 1: Load conversation history and format as JSON array
9393
conversation_history = (
9494
await conversation_service.load_filtered_context_for_enrichment(
95-
session_id, original_input, ctx
95+
session_id, current_prompt, ctx
9696
)
9797
)
9898
history_json_array = (
@@ -572,7 +572,7 @@ async def judge_coding_plan(
572572
# STEP 1: Load conversation history and format as JSON array
573573
conversation_history = (
574574
await conversation_service.load_filtered_context_for_enrichment(
575-
session_id, original_input, ctx
575+
session_id, current_prompt, ctx
576576
)
577577
)
578578
history_json_array = (
@@ -660,7 +660,7 @@ async def judge_code_change(
660660
# STEP 1: Load conversation history and format as JSON array
661661
conversation_history = (
662662
await conversation_service.load_filtered_context_for_enrichment(
663-
session_id, original_input, ctx
663+
session_id, current_prompt, ctx
664664
)
665665
)
666666
history_json_array = (

0 commit comments

Comments
 (0)