Skip to content

Commit 4561622

Browse files
committed
feat: visual prettifying outputs
1 parent 3069614 commit 4561622

File tree

1 file changed

+24
-8
lines changed

1 file changed

+24
-8
lines changed

β€Žagentic_rag/a2a_handler.pyβ€Ž

Lines changed: 24 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -367,9 +367,13 @@ async def handle_agent_query(self, params: Dict[str, Any]) -> Dict[str, Any]:
367367
368368
Steps:"""
369369

370-
logger.info(f"Calling Planner with model: {model}")
370+
logger.info(f"🎯 Calling Planner with model: {model}")
371+
logger.info(f"πŸ“‹ System Prompt:\n{system_prompt}")
372+
logger.info(f"πŸ’¬ User Prompt:\n{user_prompt}")
373+
371374
plan = self._call_ollama_api(model, user_prompt, system_prompt)
372-
logger.info(f"Planner response: {plan[:200]}...")
375+
376+
logger.info(f"βœ… Planner response: {plan[:200]}...")
373377

374378
# Extract steps from plan
375379
steps = []
@@ -415,9 +419,13 @@ async def handle_agent_query(self, params: Dict[str, Any]) -> Dict[str, Any]:
415419
416420
Key Findings:"""
417421

418-
logger.info(f"Calling Researcher with model: {model}")
422+
logger.info(f"πŸ” Calling Researcher with model: {model}")
423+
logger.info(f"πŸ“‹ System Prompt:\n{system_prompt}")
424+
logger.info(f"πŸ’¬ User Prompt:\n{user_prompt}")
425+
419426
summary = self._call_ollama_api(model, user_prompt, system_prompt)
420-
logger.info(f"Researcher response: {summary[:200]}...")
427+
428+
logger.info(f"βœ… Researcher response: {summary[:200]}...")
421429

422430
findings = [{"content": summary, "metadata": {"source": "Research Summary"}}]
423431
findings.extend(all_results[:3])
@@ -450,9 +458,13 @@ async def handle_agent_query(self, params: Dict[str, Any]) -> Dict[str, Any]:
450458
451459
Conclusion:"""
452460

453-
logger.info(f"Calling Reasoner with model: {model}")
461+
logger.info(f"πŸ€” Calling Reasoner with model: {model}")
462+
logger.info(f"πŸ“‹ System Prompt:\n{system_prompt}")
463+
logger.info(f"πŸ’¬ User Prompt:\n{user_prompt}")
464+
454465
conclusion = self._call_ollama_api(model, user_prompt, system_prompt)
455-
logger.info(f"Reasoner response: {conclusion[:200]}...")
466+
467+
logger.info(f"βœ… Reasoner response: {conclusion[:200]}...")
456468

457469
return {
458470
"conclusion": conclusion,
@@ -481,9 +493,13 @@ async def handle_agent_query(self, params: Dict[str, Any]) -> Dict[str, Any]:
481493
482494
Final Answer:"""
483495

484-
logger.info(f"Calling Synthesizer with model: {model}")
496+
logger.info(f"πŸ“ Calling Synthesizer with model: {model}")
497+
logger.info(f"πŸ“‹ System Prompt:\n{system_prompt}")
498+
logger.info(f"πŸ’¬ User Prompt:\n{user_prompt}")
499+
485500
answer = self._call_ollama_api(model, user_prompt, system_prompt)
486-
logger.info(f"Synthesizer response: {answer[:200]}...")
501+
502+
logger.info(f"βœ… Synthesizer response: {answer[:200]}...")
487503

488504
return {
489505
"answer": answer,

0 commit comments

Comments
Β (0)