Skip to content

Commit 6d992ac

Browse files
committed
Remove problematic token usage tracking
1 parent 3145c19 commit 6d992ac

File tree

2 files changed

+8
-45
lines changed

2 files changed

+8
-45
lines changed

mxgo/agents/email_agent.py

Lines changed: 8 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -11,17 +11,22 @@
1111
from smolagents import Tool, ToolCallingAgent
1212
from smolagents.monitoring import TokenUsage
1313

14-
# Monkey patch TokenUsage to handle None values
14+
# Monkey patch TokenUsage to handle None values robustly
1515
original_post_init = TokenUsage.__post_init__
1616

1717

1818
def patched_post_init(self):
19-
# Handle None values by setting them to 0
19+
# Handle None values by setting them to 0 before any operations
2020
if self.input_tokens is None:
2121
self.input_tokens = 0
2222
if self.output_tokens is None:
2323
self.output_tokens = 0
24-
original_post_init(self)
24+
# Call original post_init which will now work with non-None values
25+
try:
26+
original_post_init(self)
27+
except Exception:
28+
# If original post_init still fails, set total_tokens manually
29+
self.total_tokens = self.input_tokens + self.output_tokens
2530

2631

2732
TokenUsage.__post_init__ = patched_post_init

mxgo/routed_litellm_model.py

Lines changed: 0 additions & 42 deletions
Original file line numberDiff line numberDiff line change
@@ -48,10 +48,6 @@ def __init__(self, current_handle: ProcessingInstructions | None = None, target_
4848
)
4949
raise exceptions.EnvironmentVariableNotFoundError(msg)
5050

51-
# Initialize token count attributes before calling super().__init__
52-
self._last_input_token_count = 0
53-
self._last_output_token_count = 0
54-
5551
# Set environment variables for Azure OpenAI models before initializing the parent class
5652
# This is required because LiteLLM's Azure provider looks for specific environment variables
5753
self._set_azure_environment_variables(model_list)
@@ -240,15 +236,6 @@ def generate(
240236

241237
response = self.client.completion(**completion_kwargs)
242238

243-
# Safely handle response usage tracking
244-
if response and hasattr(response, "usage") and response.usage:
245-
self._last_input_token_count = response.usage.prompt_tokens
246-
self._last_output_token_count = response.usage.completion_tokens
247-
else:
248-
# Set default values if usage information is not available
249-
self._last_input_token_count = 0
250-
self._last_output_token_count = 0
251-
252239
return ChatMessage.from_dict(
253240
response.choices[0].message.model_dump(include={"role", "content", "tool_calls"}),
254241
raw=response,
@@ -305,32 +292,3 @@ def __call__(
305292
raise RuntimeError(msg) from e
306293
else:
307294
return chat_message
308-
309-
@property
310-
def last_input_token_count(self) -> int:
311-
"""Safely return the last input token count."""
312-
return getattr(self, "_last_input_token_count", 0)
313-
314-
@last_input_token_count.setter
315-
def last_input_token_count(self, value: int | None) -> None:
316-
"""Safely set the last input token count."""
317-
self._last_input_token_count = value if value is not None else 0
318-
319-
@property
320-
def last_output_token_count(self) -> int:
321-
"""Safely return the last output token count."""
322-
return getattr(self, "_last_output_token_count", 0)
323-
324-
@last_output_token_count.setter
325-
def last_output_token_count(self, value: int | None) -> None:
326-
"""Safely set the last output token count."""
327-
self._last_output_token_count = value if value is not None else 0
328-
329-
def __getattr__(self, name: str):
330-
"""Handle any missing attribute access gracefully, especially for token-related properties."""
331-
# Handle various token-related attribute names that might be accessed
332-
if name in ("input_tokens", "output_tokens", "total_tokens", "prompt_tokens", "completion_tokens", "usage"):
333-
return 0
334-
# For other missing attributes, raise AttributeError as normal
335-
msg = f"'{self.__class__.__name__}' object has no attribute '{name}'"
336-
raise AttributeError(msg)

0 commit comments

Comments
 (0)