Skip to content

Commit 27f514d

Browse files
committed
feat: Enhance CoT display in Gradio interface
- Add multi-agent CoT system integration - Add detailed console logging - Improve response formatting with emojis - Show reasoning steps, final answer, and sources
1 parent ed54773 commit 27f514d

File tree

1 file changed

+66
-12
lines changed

1 file changed

+66
-12
lines changed

agentic_rag/gradio_app.py

Lines changed: 66 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -79,10 +79,17 @@ def process_repo(repo_path: str) -> str:
7979
def chat(message: str, history: List[List[str]], agent_type: str, use_cot: bool, language: str, collection: str) -> List[List[str]]:
8080
"""Process chat message using selected agent and collection"""
8181
try:
82+
print("\n" + "="*50)
83+
print(f"New message received: {message}")
84+
print(f"Agent: {agent_type}, CoT: {use_cot}, Language: {language}, Collection: {collection}")
85+
print("="*50 + "\n")
86+
8287
# Select appropriate agent
8388
agent = local_agent if agent_type == "Local (Mistral)" else openai_agent
8489
if not agent:
85-
return history + [[message, "Agent not available. Please check your configuration."]]
90+
response_text = "Agent not available. Please check your configuration."
91+
print(f"Error: {response_text}")
92+
return history + [[message, response_text]]
8693

8794
# Convert language selection to language code
8895
lang_code = "es" if language == "Spanish" else "en"
@@ -91,21 +98,68 @@ def chat(message: str, history: List[List[str]], agent_type: str, use_cot: bool,
9198
agent.use_cot = use_cot
9299
agent.language = lang_code
93100

94-
# Process query based on selected collection
95-
if collection == "PDF Collection":
96-
context = vector_store.query_pdf_collection(message)
97-
response = agent._generate_response(message, context) if context else agent._generate_general_response(message)
98-
elif collection == "Repository Collection":
99-
context = vector_store.query_repo_collection(message)
100-
response = agent._generate_response(message, context) if context else agent._generate_general_response(message)
101-
else: # General Knowledge
102-
response = agent._generate_general_response(message)
101+
# Process query and get response
102+
print("Processing query...")
103+
response = agent.process_query(message)
104+
print("Query processed successfully")
105+
106+
# Format response with reasoning steps if CoT is enabled
107+
if use_cot and "reasoning_steps" in response:
108+
formatted_response = "🤔 Let me think about this step by step:\n\n"
109+
print("\nChain of Thought Reasoning Steps:")
110+
print("-" * 50)
111+
112+
# Add each reasoning step
113+
for i, step in enumerate(response["reasoning_steps"], 1):
114+
step_text = f"Step {i}:\n{step}\n"
115+
formatted_response += step_text
116+
print(step_text)
117+
118+
# Add final answer
119+
print("\nFinal Answer:")
120+
print("-" * 50)
121+
final_answer = "🎯 Final Answer:\n" + response["answer"]
122+
formatted_response += final_answer
123+
print(final_answer)
124+
125+
# Add sources if available
126+
if response.get("context"):
127+
print("\nSources Used:")
128+
print("-" * 50)
129+
sources_text = "\n📚 Sources used:\n"
130+
formatted_response += sources_text
131+
print(sources_text)
132+
133+
for ctx in response["context"]:
134+
source = ctx["metadata"].get("source", "Unknown")
135+
if "page_numbers" in ctx["metadata"]:
136+
pages = ctx["metadata"].get("page_numbers", [])
137+
source_line = f"- {source} (pages: {pages})\n"
138+
else:
139+
file_path = ctx["metadata"].get("file_path", "Unknown")
140+
source_line = f"- {source} (file: {file_path})\n"
141+
formatted_response += source_line
142+
print(source_line)
143+
else:
144+
formatted_response = response["answer"]
145+
print("\nStandard Response:")
146+
print("-" * 50)
147+
print(formatted_response)
148+
149+
print("\n" + "="*50)
150+
print("Response complete")
151+
print("="*50 + "\n")
103152

104153
# Return updated history with new message pair
105-
history.append([message, response["answer"]])
154+
history.append([message, formatted_response])
106155
return history
107156
except Exception as e:
108-
history.append([message, f"Error processing query: {str(e)}"])
157+
error_msg = f"Error processing query: {str(e)}"
158+
print(f"\nError occurred:")
159+
print("-" * 50)
160+
print(error_msg)
161+
print("="*50 + "\n")
162+
history.append([message, error_msg])
109163
return history
110164

111165
def create_interface():

0 commit comments

Comments
 (0)