Skip to content

Commit c6bac7b

Browse files
author
dori
committed
feat: try to fix build
1 parent 3160bd1 commit c6bac7b

File tree

2 files changed

+35
-12
lines changed

2 files changed

+35
-12
lines changed

src/mcp_as_a_judge/db/dynamic_token_limits.py

Lines changed: 11 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,10 @@
99
from dataclasses import dataclass
1010

1111
from mcp_as_a_judge.constants import MAX_CONTEXT_TOKENS, MAX_RESPONSE_TOKENS
12+
from mcp_as_a_judge.logging_config import get_logger
13+
14+
# Set up logger
15+
logger = get_logger(__name__)
1216

1317

1418
@dataclass
@@ -67,11 +71,15 @@ def get_model_limits(model_name: str | None = None) -> ModelLimits:
6771

6872
# Cache and return what we have
6973
_model_limits_cache[model_name] = limits
74+
logger.debug(
75+
f"Retrieved model limits from LiteLLM for {model_name}: {limits.max_input_tokens} input tokens"
76+
)
7077

71-
except Exception:
72-
# LiteLLM not available or model info retrieval failed
78+
except ImportError:
79+
logger.debug("LiteLLM not available, using hardcoded defaults")
80+
except Exception as e:
81+
logger.debug(f"Failed to get model info from LiteLLM for {model_name}: {e}")
7382
# Continue with hardcoded defaults
74-
pass
7583

7684
return limits
7785

src/mcp_as_a_judge/db/token_utils.py

Lines changed: 24 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,10 @@
77
"""
88

99
from mcp_as_a_judge.db.dynamic_token_limits import get_llm_input_limit
10+
from mcp_as_a_judge.logging_config import get_logger
11+
12+
# Set up logger
13+
logger = get_logger(__name__)
1014

1115
# Global cache for model name detection
1216
_cached_model_name: str | None = None
@@ -34,9 +38,12 @@ async def detect_model_name(ctx=None) -> str | None:
3438
client = llm_manager.get_client()
3539
if client and hasattr(client, "config") and client.config.model_name:
3640
return client.config.model_name
37-
except Exception:
38-
# LLM client not available or configuration error
39-
pass
41+
except ImportError:
42+
logger.debug("LLM client module not available")
43+
except AttributeError as e:
44+
logger.debug(f"LLM client configuration incomplete: {e}")
45+
except Exception as e:
46+
logger.debug(f"Failed to get model name from LLM client: {e}")
4047

4148
# Try MCP sampling if context available
4249
if ctx:
@@ -57,9 +64,12 @@ async def detect_model_name(ctx=None) -> str | None:
5764
if hasattr(result, "model") and result.model:
5865
return result.model
5966

60-
except Exception:
61-
# MCP sampling failed or not available
62-
pass
67+
except ImportError:
68+
logger.debug("MCP types not available for sampling")
69+
except AttributeError as e:
70+
logger.debug(f"MCP sampling response missing expected attributes: {e}")
71+
except Exception as e:
72+
logger.debug(f"MCP sampling failed: {e}")
6373

6474
return None
6575

@@ -121,9 +131,14 @@ async def calculate_tokens_in_string(
121131
token_count = litellm.token_counter(model=model_name, text=text)
122132
return token_count
123133

124-
except Exception:
125-
# Fall back to approximation if LiteLLM fails
126-
pass
134+
except ImportError:
135+
logger.debug(
136+
"LiteLLM not available for token counting, using approximation"
137+
)
138+
except Exception as e:
139+
logger.debug(
140+
f"LiteLLM token counting failed for model {model_name}: {e}, using approximation"
141+
)
127142

128143
# Fallback to character-based approximation
129144
return (len(text) + 3) // 4

0 commit comments

Comments
 (0)