Skip to content

Update #40

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 5 commits into from
Aug 5, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
54 changes: 32 additions & 22 deletions agentic_rag/gradio_app.py
Original file line number Diff line number Diff line change
Expand Up @@ -177,8 +177,16 @@ def chat(message: str, history: List[List[str]], agent_type: str, use_cot: bool,
response = agent.process_query(message)
print("Query processed successfully")

# Handle string responses from Ollama models
if isinstance(response, str):
response = {
"answer": response,
"reasoning_steps": [response] if use_cot else [],
"context": []
}

# Format response with reasoning steps if CoT is enabled
if use_cot and "reasoning_steps" in response:
if use_cot and isinstance(response, dict) and "reasoning_steps" in response:
formatted_response = "🤔 Let me think about this step by step:\n\n"
print("\nChain of Thought Reasoning Steps:")
print("-" * 50)
Expand All @@ -195,7 +203,7 @@ def chat(message: str, history: List[List[str]], agent_type: str, use_cot: bool,
# Add final answer
print("\nFinal Answer:")
print("-" * 50)
final_answer = "\n🎯 Final Answer:\n" + response["answer"]
final_answer = "\n🎯 Final Answer:\n" + response.get("answer", "No answer provided")
formatted_response += final_answer
print(final_answer)

Expand All @@ -208,43 +216,45 @@ def chat(message: str, history: List[List[str]], agent_type: str, use_cot: bool,
print(sources_text)

for ctx in response["context"]:
source = ctx["metadata"].get("source", "Unknown")
if "page_numbers" in ctx["metadata"]:
pages = ctx["metadata"].get("page_numbers", [])
source_line = f"- {source} (pages: {pages})\n"
else:
file_path = ctx["metadata"].get("file_path", "Unknown")
source_line = f"- {source} (file: {file_path})\n"
formatted_response += source_line
print(source_line)
if isinstance(ctx, dict) and "metadata" in ctx:
source = ctx["metadata"].get("source", "Unknown")
if "page_numbers" in ctx["metadata"]:
pages = ctx["metadata"].get("page_numbers", [])
source_line = f"- {source} (pages: {pages})\n"
else:
file_path = ctx["metadata"].get("file_path", "Unknown")
source_line = f"- {source} (file: {file_path})\n"
formatted_response += source_line
print(source_line)

# Add final formatted response to history
history.append([message, formatted_response])
else:
# For standard response (no CoT)
formatted_response = response["answer"]
formatted_response = response.get("answer", "No answer provided") if isinstance(response, dict) else str(response)
print("\nStandard Response:")
print("-" * 50)
print(formatted_response)

# Add sources if available
if response.get("context"):
if isinstance(response, dict) and response.get("context"):
print("\nSources Used:")
print("-" * 50)
sources_text = "\n\n📚 Sources used:\n"
formatted_response += sources_text
print(sources_text)

for ctx in response["context"]:
source = ctx["metadata"].get("source", "Unknown")
if "page_numbers" in ctx["metadata"]:
pages = ctx["metadata"].get("page_numbers", [])
source_line = f"- {source} (pages: {pages})\n"
else:
file_path = ctx["metadata"].get("file_path", "Unknown")
source_line = f"- {source} (file: {file_path})\n"
formatted_response += source_line
print(source_line)
if isinstance(ctx, dict) and "metadata" in ctx:
source = ctx["metadata"].get("source", "Unknown")
if "page_numbers" in ctx["metadata"]:
pages = ctx["metadata"].get("page_numbers", [])
source_line = f"- {source} (pages: {pages})\n"
else:
file_path = ctx["metadata"].get("file_path", "Unknown")
source_line = f"- {source} (file: {file_path})\n"
formatted_response += source_line
print(source_line)

history.append([message, formatted_response])

Expand Down
11 changes: 10 additions & 1 deletion agentic_rag/local_rag_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -358,8 +358,17 @@ def _process_query_with_cot(self, query: str) -> Dict[str, Any]:
logger.info("Falling back to general response")
return self._generate_general_response(query)

# Handle string response from synthesis
if isinstance(synthesis_result, str):
return {
"answer": synthesis_result,
"reasoning_steps": reasoning_steps,
"context": context
}

# Handle dictionary response
return {
"answer": synthesis_result["answer"],
"answer": synthesis_result.get("answer", synthesis_result) if isinstance(synthesis_result, dict) else synthesis_result,
"reasoning_steps": reasoning_steps,
"context": context
}
Expand Down
132 changes: 132 additions & 0 deletions agentic_rag/tests/test_cot_chat.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,132 @@
import sys
import logging
import json
from pathlib import Path

# Add parent directory to path to import modules
sys.path.append(str(Path(__file__).parent.parent))

from gradio_app import chat
from store import VectorStore
from local_rag_agent import LocalRAGAgent

# Configure logging
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s | %(name)s | %(levelname)s | %(message)s',
handlers=[
logging.StreamHandler(sys.stdout)
]
)

logger = logging.getLogger(__name__)

def debug_response_structure(response, prefix=""):
"""Helper function to debug response structure"""
logger.debug(f"{prefix}Response type: {type(response)}")
if isinstance(response, dict):
logger.debug(f"{prefix}Response keys: {list(response.keys())}")
for key, value in response.items():
logger.debug(f"{prefix}Key '{key}' type: {type(value)}")
if isinstance(value, list):
logger.debug(f"{prefix}List length: {len(value)}")
if value and isinstance(value[0], dict):
logger.debug(f"{prefix}First item keys: {list(value[0].keys())}")
elif isinstance(response, str):
logger.debug(f"{prefix}String length: {len(response)}")
logger.debug(f"{prefix}First 100 chars: {response[:100]}")

def test_cot_chat():
"""Test the CoT chat interface with detailed logging"""
try:
# Initialize components
logger.info("Initializing vector store...")
vector_store = VectorStore()

logger.info("Initializing local agent...")
agent = LocalRAGAgent(vector_store, model_name="ollama:phi3", use_cot=True)

# Test message
test_message = "What is self-instruct in AI?"
logger.info(f"Test message: {test_message}")

# Initialize empty chat history
history = []

# Log initial state
logger.info("Initial state:")
logger.info(f"History type: {type(history)}")
logger.info(f"History length: {len(history)}")

# Process the chat
logger.info("Processing chat...")
try:
# Get raw response from agent
logger.info("Getting raw response from agent...")
raw_response = agent.process_query(test_message)
logger.info("Raw response received")
debug_response_structure(raw_response, "Raw response: ")

# Verify response structure
if not isinstance(raw_response, dict):
logger.error(f"Unexpected response type: {type(raw_response)}")
raise TypeError(f"Expected dict response, got {type(raw_response)}")

required_keys = ["answer", "reasoning_steps", "context"]
missing_keys = [key for key in required_keys if key not in raw_response]
if missing_keys:
logger.error(f"Missing required keys in response: {missing_keys}")
raise KeyError(f"Response missing required keys: {missing_keys}")

# Process through chat function
logger.info("Processing through chat function...")
result = chat(
message=test_message,
history=history,
agent_type="ollama:phi3",
use_cot=True,
collection="PDF Collection"
)
logger.info("Chat processing completed")
debug_response_structure(result, "Final result: ")

except Exception as e:
logger.error(f"Error during processing: {str(e)}", exc_info=True)
raise

# Log final state
logger.info("Final state:")
logger.info(f"Result type: {type(result)}")
logger.info(f"Result length: {len(result)}")

# Save debug information to file
debug_info = {
"test_message": test_message,
"raw_response": {
"type": str(type(raw_response)),
"keys": list(raw_response.keys()) if isinstance(raw_response, dict) else None,
"content": str(raw_response)
},
"final_result": {
"type": str(type(result)),
"length": len(result) if isinstance(result, list) else None,
"content": str(result)
},
"history": {
"type": str(type(history)),
"length": len(history),
"content": str(history)
}
}

with open("cot_chat_debug.json", "w") as f:
json.dump(debug_info, f, indent=2)

logger.info("Debug information saved to cot_chat_debug.json")

except Exception as e:
logger.error(f"Test failed: {str(e)}", exc_info=True)
raise

if __name__ == "__main__":
test_cot_chat()
Loading