Skip to content

Commit 314c7d6

Browse files
committed
feat: add Spanish language support
1 parent f934e53 commit 314c7d6

File tree

3 files changed

+135
-29
lines changed

3 files changed

+135
-29
lines changed

agentic_rag/gradio_app.py

Lines changed: 29 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -60,16 +60,17 @@ def process_url(url: str) -> str:
6060
except Exception as e:
6161
return f"✗ Error processing URL: {str(e)}"
6262

63-
def chat(message: str, history: List[List[str]], agent_type: str, use_cot: bool) -> List[List[str]]:
63+
def chat(message: str, history: List[List[str]], agent_type: str, use_cot: bool, language: str) -> List[List[str]]:
6464
"""Process chat message using selected agent"""
6565
try:
6666
# Select appropriate agent
6767
agent = local_agent if agent_type == "Local (Mistral)" else openai_agent
6868
if not agent:
6969
return history + [[message, "Agent not available. Please check your configuration."]]
7070

71-
# Set CoT option
71+
# Set CoT option and language
7272
agent.use_cot = use_cot
73+
agent.language = language
7374

7475
# Process query
7576
response = agent.process_query(message)
@@ -103,20 +104,38 @@ def create_interface():
103104
url_output = gr.Textbox(label="URL Processing Output")
104105

105106
with gr.Tab("Chat Interface"):
106-
agent_dropdown = gr.Dropdown(
107-
choices=["Local (Mistral)", "OpenAI"] if openai_key else ["Local (Mistral)"],
108-
value="Local (Mistral)",
109-
label="Select Agent"
110-
)
111-
cot_checkbox = gr.Checkbox(label="Enable Chain of Thought Reasoning", value=False)
107+
with gr.Row():
108+
with gr.Column():
109+
agent_dropdown = gr.Dropdown(
110+
choices=["Local (Mistral)", "OpenAI"] if openai_key else ["Local (Mistral)"],
111+
value="Local (Mistral)",
112+
label="Select Agent"
113+
)
114+
cot_checkbox = gr.Checkbox(label="Enable Chain of Thought Reasoning", value=False)
115+
with gr.Column():
116+
language_dropdown = gr.Dropdown(
117+
choices=["English", "Spanish"],
118+
value="English",
119+
label="Response Language"
120+
)
112121
chatbot = gr.Chatbot(height=400)
113122
msg = gr.Textbox(label="Your Message")
114123
clear = gr.Button("Clear Chat")
115124

116125
# Event handlers
117126
pdf_button.click(process_pdf, inputs=[pdf_file], outputs=[pdf_output])
118127
url_button.click(process_url, inputs=[url_input], outputs=[url_output])
119-
msg.submit(chat, inputs=[msg, chatbot, agent_dropdown, cot_checkbox], outputs=[chatbot])
128+
msg.submit(
129+
chat,
130+
inputs=[
131+
msg,
132+
chatbot,
133+
agent_dropdown,
134+
cot_checkbox,
135+
language_dropdown
136+
],
137+
outputs=[chatbot]
138+
)
120139
clear.click(lambda: None, None, chatbot, queue=False)
121140

122141
# Instructions
@@ -131,6 +150,7 @@ def create_interface():
131150
2. **Chat Interface**:
132151
- Select your preferred agent (Local Mistral or OpenAI)
133152
- Toggle Chain of Thought reasoning for more detailed responses
153+
- Choose your preferred response language (English or Spanish)
134154
- Chat with your documents using natural language
135155
136156
Note: OpenAI agent requires an API key in `.env` file

agentic_rag/local_rag_agent.py

Lines changed: 53 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -29,10 +29,11 @@ class QueryAnalysis(BaseModel):
2929
)
3030

3131
class LocalRAGAgent:
32-
def __init__(self, vector_store: VectorStore, model_name: str = "mistralai/Mistral-7B-Instruct-v0.2", use_cot: bool = False):
32+
def __init__(self, vector_store: VectorStore, model_name: str = "mistralai/Mistral-7B-Instruct-v0.2", use_cot: bool = False, language: str = "en"):
3333
"""Initialize local RAG agent with vector store and local LLM"""
3434
self.vector_store = vector_store
3535
self.use_cot = use_cot
36+
self.language = language
3637

3738
# Load HuggingFace token from config
3839
try:
@@ -195,8 +196,32 @@ def _generate_response(self, query: str, context: List[Dict[str, Any]]) -> Dict[
195196
for i, item in enumerate(context)])
196197

197198
logger.info("Building prompt with context...")
198-
if self.use_cot:
199-
prompt = f"""Answer the following query using the provided context and chain of thought reasoning.
199+
if self.language == "es":
200+
if self.use_cot:
201+
prompt = f"""Responde a la siguiente consulta en español usando el contexto proporcionado y razonamiento paso a paso.
202+
Primero divide el problema en pasos, luego usa el contexto para resolver cada paso y llegar a la respuesta final.
203+
Si el contexto no contiene suficiente información para responder con precisión, dilo explícitamente.
204+
205+
Contexto:
206+
{context_str}
207+
208+
Consulta: {query}
209+
210+
Pensemos en esto paso a paso:"""
211+
else:
212+
prompt = f"""Responde a la siguiente consulta en español usando el contexto proporcionado.
213+
Si el contexto no contiene suficiente información para responder con precisión,
214+
dilo explícitamente.
215+
216+
Contexto:
217+
{context_str}
218+
219+
Consulta: {query}
220+
221+
Respuesta:"""
222+
else:
223+
if self.use_cot:
224+
prompt = f"""Answer the following query using the provided context and chain of thought reasoning.
200225
First break down the problem into steps, then use the context to solve each step and arrive at the final answer.
201226
If the context doesn't contain enough information to answer accurately, say so explicitly.
202227
@@ -206,8 +231,8 @@ def _generate_response(self, query: str, context: List[Dict[str, Any]]) -> Dict[
206231
Query: {query}
207232
208233
Let's think about this step by step:"""
209-
else:
210-
prompt = f"""Answer the following query using the provided context.
234+
else:
235+
prompt = f"""Answer the following query using the provided context.
211236
If the context doesn't contain enough information to answer accurately,
212237
say so explicitly.
213238
@@ -231,16 +256,32 @@ def _generate_general_response(self, query: str) -> Dict[str, Any]:
231256
"""Generate a response using general knowledge when no context is available"""
232257
logger.info("Generating general knowledge response...")
233258

234-
if self.use_cot:
235-
prompt = f"""You are a helpful AI assistant. While I don't have specific information from my document collection about this query, I'll share what I generally know about it.
259+
if self.language == "es":
260+
if self.use_cot:
261+
prompt = f"""Eres un asistente de IA útil. Si bien no tengo información específica de mi colección de documentos sobre esta consulta, compartiré lo que sé al respecto.
262+
263+
Por favor, responde a la siguiente consulta en español usando razonamiento paso a paso:
264+
265+
Consulta: {query}
266+
267+
Pensemos en esto paso a paso:"""
268+
else:
269+
prompt = f"""Eres un asistente de IA útil. Si bien no tengo información específica de mi colección de documentos sobre esta consulta, compartiré lo que sé al respecto.
270+
271+
Consulta: {query}
272+
273+
Respuesta:"""
274+
else:
275+
if self.use_cot:
276+
prompt = f"""You are a helpful AI assistant. While I don't have specific information from my document collection about this query, I'll share what I know about it.
236277
237278
Please answer the following query using chain of thought reasoning:
238279
239280
Query: {query}
240281
241282
Let's think about this step by step:"""
242-
else:
243-
prompt = f"""You are a helpful AI assistant. While I don't have specific information from my document collection about this query, I'll share what I generally know about it.
283+
else:
284+
prompt = f"""You are a helpful AI assistant. While I don't have specific information from my document collection about this query, I'll share what I know about it.
244285
245286
Query: {query}
246287
@@ -250,8 +291,10 @@ def _generate_general_response(self, query: str) -> Dict[str, Any]:
250291
response = self._generate_text(prompt, max_length=1024)
251292
logger.info("Response generation complete")
252293

294+
prefix = "No encontré información específica en mis documentos, pero esto es lo que sé al respecto:\n\n" if self.language == "es" else "I didn't find specific information in my documents, but here's what I know about it:\n\n"
295+
253296
return {
254-
"answer": "I didn't find specific information in my documents, but here's what I know about it:\n\n" + response,
297+
"answer": prefix + response,
255298
"context": []
256299
}
257300

agentic_rag/rag_agent.py

Lines changed: 53 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -21,10 +21,11 @@ class QueryAnalysis(BaseModel):
2121
)
2222

2323
class RAGAgent:
24-
def __init__(self, vector_store: VectorStore, openai_api_key: str, use_cot: bool = False):
24+
def __init__(self, vector_store: VectorStore, openai_api_key: str, use_cot: bool = False, language: str = "en"):
2525
"""Initialize RAG agent with vector store and LLM"""
2626
self.vector_store = vector_store
2727
self.use_cot = use_cot
28+
self.language = language
2829
self.llm = ChatOpenAI(
2930
model="gpt-4-turbo-preview",
3031
temperature=0,
@@ -87,8 +88,32 @@ def _generate_response(self, query: str, context: List[Dict[str, Any]]) -> Dict[
8788
context_str = "\n\n".join([f"Context {i+1}:\n{item['content']}"
8889
for i, item in enumerate(context)])
8990

90-
if self.use_cot:
91-
template = """Answer the following query using the provided context and chain of thought reasoning.
91+
if self.language == "es":
92+
if self.use_cot:
93+
template = """Responde a la siguiente consulta en español usando el contexto proporcionado y razonamiento paso a paso.
94+
Primero divide el problema en pasos, luego usa el contexto para resolver cada paso y llegar a la respuesta final.
95+
Si el contexto no contiene suficiente información para responder con precisión, dilo explícitamente.
96+
97+
Contexto:
98+
{context}
99+
100+
Consulta: {query}
101+
102+
Pensemos en esto paso a paso:"""
103+
else:
104+
template = """Responde a la siguiente consulta en español usando el contexto proporcionado.
105+
Si el contexto no contiene suficiente información para responder con precisión,
106+
dilo explícitamente.
107+
108+
Contexto:
109+
{context}
110+
111+
Consulta: {query}
112+
113+
Respuesta:"""
114+
else:
115+
if self.use_cot:
116+
template = """Answer the following query using the provided context and chain of thought reasoning.
92117
First break down the problem into steps, then use the context to solve each step and arrive at the final answer.
93118
If the context doesn't contain enough information to answer accurately, say so explicitly.
94119
@@ -98,8 +123,8 @@ def _generate_response(self, query: str, context: List[Dict[str, Any]]) -> Dict[
98123
Query: {query}
99124
100125
Let's think about this step by step:"""
101-
else:
102-
template = """Answer the following query using the provided context.
126+
else:
127+
template = """Answer the following query using the provided context.
103128
If the context doesn't contain enough information to answer accurately,
104129
say so explicitly.
105130
@@ -122,16 +147,32 @@ def _generate_response(self, query: str, context: List[Dict[str, Any]]) -> Dict[
122147

123148
def _generate_general_response(self, query: str) -> Dict[str, Any]:
124149
"""Generate a response using general knowledge when no context is available"""
125-
if self.use_cot:
126-
template = """You are a helpful AI assistant. While I don't have specific information from my document collection about this query, I'll share what I generally know about it.
150+
if self.language == "es":
151+
if self.use_cot:
152+
template = """Eres un asistente de IA útil. Si bien no tengo información específica de mi colección de documentos sobre esta consulta, compartiré lo que sé al respecto.
153+
154+
Por favor, responde a la siguiente consulta en español usando razonamiento paso a paso:
155+
156+
Consulta: {query}
157+
158+
Pensemos en esto paso a paso:"""
159+
else:
160+
template = """Eres un asistente de IA útil. Si bien no tengo información específica de mi colección de documentos sobre esta consulta, compartiré lo que sé al respecto.
161+
162+
Consulta: {query}
163+
164+
Respuesta:"""
165+
else:
166+
if self.use_cot:
167+
template = """You are a helpful AI assistant. While I don't have specific information from my document collection about this query, I'll share what I know about it.
127168
128169
Please answer the following query using chain of thought reasoning:
129170
130171
Query: {query}
131172
132173
Let's think about this step by step:"""
133-
else:
134-
template = """You are a helpful AI assistant. While I don't have specific information from my document collection about this query, I'll share what I generally know about it.
174+
else:
175+
template = """You are a helpful AI assistant. While I don't have specific information from my document collection about this query, I'll share what I know about it.
135176
136177
Query: {query}
137178
@@ -141,8 +182,10 @@ def _generate_general_response(self, query: str) -> Dict[str, Any]:
141182
messages = prompt.format_messages(query=query)
142183
response = self.llm.invoke(messages)
143184

185+
prefix = "No encontré información específica en mis documentos, pero esto es lo que sé al respecto:\n\n" if self.language == "es" else "I didn't find specific information in my documents, but here's what I know about it:\n\n"
186+
144187
return {
145-
"answer": "I didn't find specific information in my documents, but here's what I know about it:\n\n" + response.content,
188+
"answer": prefix + response.content,
146189
"context": []
147190
}
148191

0 commit comments

Comments
 (0)