Skip to content

Commit f2c7897

Browse files
authored
Merge pull request #40 from oracle-devrel/update
Update
2 parents 66c091d + 6cdbfdd commit f2c7897

36 files changed

+1294
-727
lines changed

agentic_rag/gradio_app.py

Lines changed: 32 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -177,8 +177,16 @@ def chat(message: str, history: List[List[str]], agent_type: str, use_cot: bool,
177177
response = agent.process_query(message)
178178
print("Query processed successfully")
179179

180+
# Handle string responses from Ollama models
181+
if isinstance(response, str):
182+
response = {
183+
"answer": response,
184+
"reasoning_steps": [response] if use_cot else [],
185+
"context": []
186+
}
187+
180188
# Format response with reasoning steps if CoT is enabled
181-
if use_cot and "reasoning_steps" in response:
189+
if use_cot and isinstance(response, dict) and "reasoning_steps" in response:
182190
formatted_response = "🤔 Let me think about this step by step:\n\n"
183191
print("\nChain of Thought Reasoning Steps:")
184192
print("-" * 50)
@@ -195,7 +203,7 @@ def chat(message: str, history: List[List[str]], agent_type: str, use_cot: bool,
195203
# Add final answer
196204
print("\nFinal Answer:")
197205
print("-" * 50)
198-
final_answer = "\n🎯 Final Answer:\n" + response["answer"]
206+
final_answer = "\n🎯 Final Answer:\n" + response.get("answer", "No answer provided")
199207
formatted_response += final_answer
200208
print(final_answer)
201209

@@ -208,43 +216,45 @@ def chat(message: str, history: List[List[str]], agent_type: str, use_cot: bool,
208216
print(sources_text)
209217

210218
for ctx in response["context"]:
211-
source = ctx["metadata"].get("source", "Unknown")
212-
if "page_numbers" in ctx["metadata"]:
213-
pages = ctx["metadata"].get("page_numbers", [])
214-
source_line = f"- {source} (pages: {pages})\n"
215-
else:
216-
file_path = ctx["metadata"].get("file_path", "Unknown")
217-
source_line = f"- {source} (file: {file_path})\n"
218-
formatted_response += source_line
219-
print(source_line)
219+
if isinstance(ctx, dict) and "metadata" in ctx:
220+
source = ctx["metadata"].get("source", "Unknown")
221+
if "page_numbers" in ctx["metadata"]:
222+
pages = ctx["metadata"].get("page_numbers", [])
223+
source_line = f"- {source} (pages: {pages})\n"
224+
else:
225+
file_path = ctx["metadata"].get("file_path", "Unknown")
226+
source_line = f"- {source} (file: {file_path})\n"
227+
formatted_response += source_line
228+
print(source_line)
220229

221230
# Add final formatted response to history
222231
history.append([message, formatted_response])
223232
else:
224233
# For standard response (no CoT)
225-
formatted_response = response["answer"]
234+
formatted_response = response.get("answer", "No answer provided") if isinstance(response, dict) else str(response)
226235
print("\nStandard Response:")
227236
print("-" * 50)
228237
print(formatted_response)
229238

230239
# Add sources if available
231-
if response.get("context"):
240+
if isinstance(response, dict) and response.get("context"):
232241
print("\nSources Used:")
233242
print("-" * 50)
234243
sources_text = "\n\n📚 Sources used:\n"
235244
formatted_response += sources_text
236245
print(sources_text)
237246

238247
for ctx in response["context"]:
239-
source = ctx["metadata"].get("source", "Unknown")
240-
if "page_numbers" in ctx["metadata"]:
241-
pages = ctx["metadata"].get("page_numbers", [])
242-
source_line = f"- {source} (pages: {pages})\n"
243-
else:
244-
file_path = ctx["metadata"].get("file_path", "Unknown")
245-
source_line = f"- {source} (file: {file_path})\n"
246-
formatted_response += source_line
247-
print(source_line)
248+
if isinstance(ctx, dict) and "metadata" in ctx:
249+
source = ctx["metadata"].get("source", "Unknown")
250+
if "page_numbers" in ctx["metadata"]:
251+
pages = ctx["metadata"].get("page_numbers", [])
252+
source_line = f"- {source} (pages: {pages})\n"
253+
else:
254+
file_path = ctx["metadata"].get("file_path", "Unknown")
255+
source_line = f"- {source} (file: {file_path})\n"
256+
formatted_response += source_line
257+
print(source_line)
248258

249259
history.append([message, formatted_response])
250260

agentic_rag/local_rag_agent.py

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -358,8 +358,17 @@ def _process_query_with_cot(self, query: str) -> Dict[str, Any]:
358358
logger.info("Falling back to general response")
359359
return self._generate_general_response(query)
360360

361+
# Handle string response from synthesis
362+
if isinstance(synthesis_result, str):
363+
return {
364+
"answer": synthesis_result,
365+
"reasoning_steps": reasoning_steps,
366+
"context": context
367+
}
368+
369+
# Handle dictionary response
361370
return {
362-
"answer": synthesis_result["answer"],
371+
"answer": synthesis_result.get("answer", synthesis_result) if isinstance(synthesis_result, dict) else synthesis_result,
363372
"reasoning_steps": reasoning_steps,
364373
"context": context
365374
}

agentic_rag/tests/test_cot_chat.py

Lines changed: 132 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,132 @@
1+
import sys
2+
import logging
3+
import json
4+
from pathlib import Path
5+
6+
# Add parent directory to path to import modules
7+
sys.path.append(str(Path(__file__).parent.parent))
8+
9+
from gradio_app import chat
10+
from store import VectorStore
11+
from local_rag_agent import LocalRAGAgent
12+
13+
# Configure logging
14+
logging.basicConfig(
15+
level=logging.DEBUG,
16+
format='%(asctime)s | %(name)s | %(levelname)s | %(message)s',
17+
handlers=[
18+
logging.StreamHandler(sys.stdout)
19+
]
20+
)
21+
22+
logger = logging.getLogger(__name__)
23+
24+
def debug_response_structure(response, prefix=""):
25+
"""Helper function to debug response structure"""
26+
logger.debug(f"{prefix}Response type: {type(response)}")
27+
if isinstance(response, dict):
28+
logger.debug(f"{prefix}Response keys: {list(response.keys())}")
29+
for key, value in response.items():
30+
logger.debug(f"{prefix}Key '{key}' type: {type(value)}")
31+
if isinstance(value, list):
32+
logger.debug(f"{prefix}List length: {len(value)}")
33+
if value and isinstance(value[0], dict):
34+
logger.debug(f"{prefix}First item keys: {list(value[0].keys())}")
35+
elif isinstance(response, str):
36+
logger.debug(f"{prefix}String length: {len(response)}")
37+
logger.debug(f"{prefix}First 100 chars: {response[:100]}")
38+
39+
def test_cot_chat():
40+
"""Test the CoT chat interface with detailed logging"""
41+
try:
42+
# Initialize components
43+
logger.info("Initializing vector store...")
44+
vector_store = VectorStore()
45+
46+
logger.info("Initializing local agent...")
47+
agent = LocalRAGAgent(vector_store, model_name="ollama:phi3", use_cot=True)
48+
49+
# Test message
50+
test_message = "What is self-instruct in AI?"
51+
logger.info(f"Test message: {test_message}")
52+
53+
# Initialize empty chat history
54+
history = []
55+
56+
# Log initial state
57+
logger.info("Initial state:")
58+
logger.info(f"History type: {type(history)}")
59+
logger.info(f"History length: {len(history)}")
60+
61+
# Process the chat
62+
logger.info("Processing chat...")
63+
try:
64+
# Get raw response from agent
65+
logger.info("Getting raw response from agent...")
66+
raw_response = agent.process_query(test_message)
67+
logger.info("Raw response received")
68+
debug_response_structure(raw_response, "Raw response: ")
69+
70+
# Verify response structure
71+
if not isinstance(raw_response, dict):
72+
logger.error(f"Unexpected response type: {type(raw_response)}")
73+
raise TypeError(f"Expected dict response, got {type(raw_response)}")
74+
75+
required_keys = ["answer", "reasoning_steps", "context"]
76+
missing_keys = [key for key in required_keys if key not in raw_response]
77+
if missing_keys:
78+
logger.error(f"Missing required keys in response: {missing_keys}")
79+
raise KeyError(f"Response missing required keys: {missing_keys}")
80+
81+
# Process through chat function
82+
logger.info("Processing through chat function...")
83+
result = chat(
84+
message=test_message,
85+
history=history,
86+
agent_type="ollama:phi3",
87+
use_cot=True,
88+
collection="PDF Collection"
89+
)
90+
logger.info("Chat processing completed")
91+
debug_response_structure(result, "Final result: ")
92+
93+
except Exception as e:
94+
logger.error(f"Error during processing: {str(e)}", exc_info=True)
95+
raise
96+
97+
# Log final state
98+
logger.info("Final state:")
99+
logger.info(f"Result type: {type(result)}")
100+
logger.info(f"Result length: {len(result)}")
101+
102+
# Save debug information to file
103+
debug_info = {
104+
"test_message": test_message,
105+
"raw_response": {
106+
"type": str(type(raw_response)),
107+
"keys": list(raw_response.keys()) if isinstance(raw_response, dict) else None,
108+
"content": str(raw_response)
109+
},
110+
"final_result": {
111+
"type": str(type(result)),
112+
"length": len(result) if isinstance(result, list) else None,
113+
"content": str(result)
114+
},
115+
"history": {
116+
"type": str(type(history)),
117+
"length": len(history),
118+
"content": str(history)
119+
}
120+
}
121+
122+
with open("cot_chat_debug.json", "w") as f:
123+
json.dump(debug_info, f, indent=2)
124+
125+
logger.info("Debug information saved to cot_chat_debug.json")
126+
127+
except Exception as e:
128+
logger.error(f"Test failed: {str(e)}", exc_info=True)
129+
raise
130+
131+
if __name__ == "__main__":
132+
test_cot_chat()

0 commit comments

Comments
 (0)