feat: Improve logging for debugging and monitoring#118
feat: Improve logging for debugging and monitoring#118thisisharsh7 wants to merge 1 commit intojentic:mainfrom
Conversation
|
kindly review it and let me know for the changes. thanks! |
|
hey @rishikesh-jentic can you please let me know if there is any change needed? |
|
@CodeRabbit review this PR |
|
✅ Actions performedReview triggered.
|
📝 WalkthroughWalkthroughThis pull request adds structured logging instrumentation across the agent framework to enhance observability. Logs capture initialization events, LLM interactions with token usage metrics, goal preprocessing activities, and tool execution details without modifying control flow or function signatures. Changes
Estimated code review effort🎯 2 (Simple) | ⏱️ ~12 minutes
🚥 Pre-merge checks | ✅ 4 | ❌ 1❌ Failed checks (1 warning)
✅ Passed checks (4 passed)
✏️ Tip: You can configure your own custom pre-merge checks in the settings. ✨ Finishing touches
🧹 Recent nitpick comments
📜 Recent review detailsConfiguration used: defaults Review profile: CHILL Plan: Pro 📒 Files selected for processing (7)
🧰 Additional context used🧬 Code graph analysis (3)agents/goal_preprocessor/conversational.py (1)
agents/llm/litellm.py (1)
agents/llm/base_llm.py (2)
🪛 Ruff (0.14.11)agents/llm/litellm.py147-147: Do not catch blind exception: (BLE001) 🔇 Additional comments (11)
✏️ Tip: You can disable this entire section by setting Comment |
|
|
||
| @observe() | ||
| def process(self, goal: str, history: Sequence[Dict[str, Any]]) -> Tuple[str, str | None]: | ||
| logger.debug("goal_preprocessor_entry", |
There was a problem hiding this comment.
| logger.debug("goal_preprocessor_entry", | |
| logger.debug("goal_preprocessor_entry", goal=goal, history_length=len(history)) |
| Returns: | ||
| The assistant's response text. | ||
| """ | ||
| logger.debug("llm_prompt", |
There was a problem hiding this comment.
| logger.debug("llm_prompt", | |
| logger.debug("llm_prompt", model=self.model, prompt_preview=content[:100] + "..." if len(content) > 100 else content, kwargs=kwargs) |
| prompt_preview=content[:100] + "..." if len(content) > 100 else content, | ||
| kwargs=kwargs) | ||
| resp = self.completion([{"role": "user", "content": content}], **kwargs) | ||
| logger.debug("llm_response", |
There was a problem hiding this comment.
| logger.debug("llm_response", | |
| logger.debug("llm_response", model=self.model, response_length=len(resp.text), prompt_tokens=resp.prompt_tokens, completion_tokens=resp.completion_tokens) |
|
|
||
| prompt_tokens, completion_tokens, total_tokens = self._extract_token_usage(resp) | ||
|
|
||
| logger.debug("llm_completion_success", |
There was a problem hiding this comment.
| logger.debug("llm_completion_success", | |
| logger.debug("llm_completion_success", model=self.model, prompt_tokens=prompt_tokens, completion_tokens=completion_tokens, total_tokens=total_tokens) |
| self.max_turns = max_turns | ||
| self.top_k = top_k | ||
|
|
||
| logger.info("react_reasoner_initialized", |
There was a problem hiding this comment.
| logger.info("react_reasoner_initialized", | |
| logger.info("react_reasoner_initialized", max_turns=self.max_turns, top_k=self.top_k) |
| self.max_retries = max_retries | ||
| self.top_k = top_k | ||
|
|
||
| logger.info("rewoo_reasoner_initialized", |
There was a problem hiding this comment.
| logger.info("rewoo_reasoner_initialized", | |
| logger.info("rewoo_reasoner_initialized", max_iterations=self.max_iterations, max_retries=self.max_retries, top_k=self.top_k) |
| run_id = uuid4().hex | ||
| start_time = time.perf_counter() | ||
|
|
||
| logger.info("agent_solve_start", |
There was a problem hiding this comment.
| logger.info("agent_solve_start", | |
| logger.info("agent_solve_start", run_id=run_id, goal_preview=goal[:100] + "..." if len(goal) > 100 else goal) |
| return [JenticTool(result.model_dump(exclude_none=False)) for result in response.results] if response.results else [] | ||
| results = [JenticTool(result.model_dump(exclude_none=False)) for result in response.results] if response.results else [] | ||
|
|
||
| logger.info("tool_search_complete", |
There was a problem hiding this comment.
| logger.info("tool_search_complete", | |
| logger.info("tool_search_complete", query=query, result_count=len(results), found_tools=len(results) > 0) |
|
|
||
| raise ToolExecutionError(result.error, tool) | ||
|
|
||
| logger.info("tool_execution_success", |
There was a problem hiding this comment.
| logger.info("tool_execution_success", | |
| logger.info("tool_execution_success", tool_id=tool.id, output_length=len(str(result.output)) if result.output else 0) |
Summary
Adds structured logging to key decision points throughout the agent execution flow.
Fixes #84
Summary by CodeRabbit
Release Notes
✏️ Tip: You can customize this high-level summary in your review settings.