Skip to content

Commit 3069614

Browse files
committed
fix: models not registering with prompt + query in a2a tests
1 parent fd63db6 commit 3069614

File tree

2 files changed

+167
-49
lines changed

2 files changed

+167
-49
lines changed

agentic_rag/a2a_handler.py

Lines changed: 163 additions & 48 deletions
Original file line numberDiff line numberDiff line change
@@ -73,6 +73,18 @@ def _load_agent_endpoints(self) -> Dict[str, str]:
7373
"synthesizer_url": "http://localhost:8000"
7474
}
7575

76+
def _load_specialized_agent_model(self) -> str:
77+
"""Load specialized agent model from config"""
78+
try:
79+
with open('config.yaml', 'r') as f:
80+
config = yaml.safe_load(f)
81+
model = config.get('SPECIALIZED_AGENT_MODEL', 'deepseek-r1')
82+
logger.info(f"Loaded specialized agent model: {model}")
83+
return model
84+
except Exception as e:
85+
logger.warning(f"Could not load model from config: {str(e)}")
86+
return "deepseek-r1" # Default model
87+
7688
def _register_self(self):
7789
"""Register this agent in the agent registry"""
7890
try:
@@ -268,53 +280,41 @@ async def handle_document_upload(self, params: Dict[str, Any]) -> Dict[str, Any]
268280
"message": f"Unsupported document type: {upload_params.document_type}"
269281
}
270282

271-
def _get_specialized_agent(self, agent_id: str):
272-
"""Get or create a specialized agent instance"""
273-
if agent_id in self._specialized_agents:
274-
return self._specialized_agents[agent_id]
275-
276-
# Import agent factory
277-
from agents.agent_factory import create_agents
278-
from langchain_openai import ChatOpenAI
279-
import os
280-
281-
# Create LLM (using OpenAI for now, can be configured)
282-
openai_key = os.getenv("OPENAI_API_KEY")
283-
if not openai_key:
284-
# Try Ollama as fallback
285-
try:
286-
from langchain_community.llms import Ollama
287-
llm = Ollama(model="qwq")
288-
logger.info("Using Ollama qwq for specialized agents")
289-
except Exception as e:
290-
logger.error(f"Could not initialize LLM for specialized agents: {str(e)}")
291-
raise ValueError("No LLM available for specialized agents")
292-
else:
293-
llm = ChatOpenAI(model="gpt-4", temperature=0.7, api_key=openai_key)
294-
logger.info("Using OpenAI GPT-4 for specialized agents")
283+
def _call_ollama_api(self, model: str, prompt: str, system_prompt: str = None) -> str:
284+
"""Call Ollama API directly for inference"""
285+
import requests
295286

296-
# Create all agents
297-
agents = create_agents(llm, self.vector_store)
287+
url = "http://127.0.0.1:11434/api/generate"
298288

299-
# Map agent_id to agent type
300-
agent_map = {
301-
"planner_agent_v1": agents["planner"],
302-
"researcher_agent_v1": agents["researcher"],
303-
"reasoner_agent_v1": agents["reasoner"],
304-
"synthesizer_agent_v1": agents["synthesizer"]
289+
payload = {
290+
"model": model,
291+
"prompt": prompt,
292+
"stream": False,
293+
"options": {
294+
"temperature": 0.7,
295+
"num_predict": 512
296+
}
305297
}
306298

307-
if agent_id not in agent_map:
308-
raise ValueError(f"Unknown agent ID: {agent_id}")
299+
if system_prompt:
300+
payload["system"] = system_prompt
309301

310-
# Cache the agent
311-
self._specialized_agents[agent_id] = agent_map[agent_id]
312-
logger.info(f"Created and cached specialized agent: {agent_id}")
313-
314-
return agent_map[agent_id]
302+
try:
303+
response = requests.post(url, json=payload, timeout=120)
304+
response.raise_for_status()
305+
result = response.json()
306+
return result.get("response", "")
307+
except Exception as e:
308+
logger.error(f"Error calling Ollama API: {str(e)}")
309+
raise
310+
311+
def _get_specialized_agent_card(self, agent_id: str) -> dict:
312+
"""Get the agent card for a specialized agent"""
313+
from specialized_agent_cards import get_agent_card_by_id
314+
return get_agent_card_by_id(agent_id, self.agent_endpoints)
315315

316316
async def handle_agent_query(self, params: Dict[str, Any]) -> Dict[str, Any]:
317-
"""Handle agent query requests - routes to specialized agents"""
317+
"""Handle agent query requests - routes to specialized agents using Ollama API"""
318318
try:
319319
# Extract agent_id from params
320320
agent_id = params.get("agent_id")
@@ -324,41 +324,136 @@ async def handle_agent_query(self, params: Dict[str, Any]) -> Dict[str, Any]:
324324
"message": "Must specify which agent to query"
325325
}
326326

327-
# Get the specialized agent
328-
agent = self._get_specialized_agent(agent_id)
327+
# Get the agent card to extract personality and role
328+
agent_card = self._get_specialized_agent_card(agent_id)
329+
if not agent_card:
330+
return {
331+
"error": f"Agent card not found for {agent_id}",
332+
"message": "Agent not registered"
333+
}
334+
335+
# Extract agent metadata
336+
metadata = agent_card.get("metadata", {})
337+
personality = metadata.get("personality", "")
338+
role = metadata.get("role", "")
339+
expertise = metadata.get("expertise", [])
329340

330341
# Extract query parameters
331342
query = params.get("query")
332343
step = params.get("step")
333344
context = params.get("context", [])
334345
reasoning_steps = params.get("reasoning_steps", [])
335346

347+
# Get model from config
348+
model = self._load_specialized_agent_model()
349+
336350
# Route to appropriate agent method based on agent_id
337351
if agent_id == "planner_agent_v1":
338352
# Planner: Break down the query into steps
339-
plan = agent.plan(query, context)
353+
system_prompt = f"""You are a {role} with expertise in {', '.join(expertise)}.
354+
Your personality: {personality}
355+
356+
Your task is to break down complex problems into 3-4 clear, manageable steps for systematic problem-solving.
357+
Be strategic, analytical, and methodical in your planning."""
358+
359+
user_prompt = f"""Query: {query}
360+
361+
Break down this query into 3-4 clear, actionable steps. Format your response as:
362+
363+
Step 1: [First step description]
364+
Step 2: [Second step description]
365+
Step 3: [Third step description]
366+
Step 4: [Fourth step description] (if needed)
367+
368+
Steps:"""
369+
370+
logger.info(f"Calling Planner with model: {model}")
371+
plan = self._call_ollama_api(model, user_prompt, system_prompt)
372+
logger.info(f"Planner response: {plan[:200]}...")
373+
374+
# Extract steps from plan
375+
steps = []
376+
for line in plan.split("\n"):
377+
line = line.strip()
378+
if line and (line.startswith("Step") or line.startswith("-") or (len(line) > 10 and not line.startswith("Your"))):
379+
# Clean up the step
380+
clean_step = line.replace("Step 1:", "").replace("Step 2:", "").replace("Step 3:", "").replace("Step 4:", "").replace("-", "").strip()
381+
if clean_step and len(clean_step) > 10:
382+
steps.append(clean_step)
383+
340384
return {
341385
"plan": plan,
342-
"steps": plan.split("\n") if plan else [],
386+
"steps": steps[:4], # Limit to 4 steps
343387
"agent_id": agent_id
344388
}
345389

346390
elif agent_id == "researcher_agent_v1":
347391
# Researcher: Gather information for a specific step
348392
if not step:
349393
return {"error": "step is required for researcher agent"}
350-
findings = agent.research(query, step)
394+
395+
system_prompt = f"""You are a {role} with expertise in {', '.join(expertise)}.
396+
Your personality: {personality}
397+
398+
Your task is to gather and analyze relevant information from the provided context, extracting key findings for each research step."""
399+
400+
# Query vector store for relevant information
401+
logger.info(f"Researching for step: {step}")
402+
pdf_results = self.vector_store.query_pdf_collection(query) if self.vector_store else []
403+
repo_results = self.vector_store.query_repo_collection(query) if self.vector_store else []
404+
all_results = pdf_results + repo_results
405+
406+
context_str = "\n\n".join([f"Source {i+1}:\n{item['content']}" for i, item in enumerate(all_results[:3])])
407+
408+
user_prompt = f"""Original Query: {query}
409+
Research Step: {step}
410+
411+
Context from knowledge base:
412+
{context_str if context_str else "No specific context found in knowledge base."}
413+
414+
Based on this context, extract and summarize key findings relevant to this research step. Be thorough and detail-oriented.
415+
416+
Key Findings:"""
417+
418+
logger.info(f"Calling Researcher with model: {model}")
419+
summary = self._call_ollama_api(model, user_prompt, system_prompt)
420+
logger.info(f"Researcher response: {summary[:200]}...")
421+
422+
findings = [{"content": summary, "metadata": {"source": "Research Summary"}}]
423+
findings.extend(all_results[:3])
424+
351425
return {
352426
"findings": findings,
353-
"summary": findings[0]["content"] if findings else "",
427+
"summary": summary,
354428
"agent_id": agent_id
355429
}
356430

357431
elif agent_id == "reasoner_agent_v1":
358432
# Reasoner: Apply logical reasoning to the step
359433
if not step:
360434
return {"error": "step is required for reasoner agent"}
361-
conclusion = agent.reason(query, step, context)
435+
436+
system_prompt = f"""You are a {role} with expertise in {', '.join(expertise)}.
437+
Your personality: {personality}
438+
439+
Your task is to apply logical reasoning and analysis to information, drawing clear conclusions for each step."""
440+
441+
context_str = "\n\n".join([f"Context {i+1}:\n{item.get('content', str(item))}" for i, item in enumerate(context)])
442+
443+
user_prompt = f"""Original Query: {query}
444+
Reasoning Step: {step}
445+
446+
Research Findings:
447+
{context_str}
448+
449+
Analyze this information and draw a clear, logical conclusion for this step. Be critical and analytical in your reasoning.
450+
451+
Conclusion:"""
452+
453+
logger.info(f"Calling Reasoner with model: {model}")
454+
conclusion = self._call_ollama_api(model, user_prompt, system_prompt)
455+
logger.info(f"Reasoner response: {conclusion[:200]}...")
456+
362457
return {
363458
"conclusion": conclusion,
364459
"reasoning": conclusion,
@@ -369,7 +464,27 @@ async def handle_agent_query(self, params: Dict[str, Any]) -> Dict[str, Any]:
369464
# Synthesizer: Combine all reasoning steps into final answer
370465
if not reasoning_steps:
371466
return {"error": "reasoning_steps is required for synthesizer agent"}
372-
answer = agent.synthesize(query, reasoning_steps)
467+
468+
system_prompt = f"""You are a {role} with expertise in {', '.join(expertise)}.
469+
Your personality: {personality}
470+
471+
Your task is to combine multiple reasoning steps into a clear, comprehensive final answer."""
472+
473+
steps_str = "\n\n".join([f"Step {i+1}:\n{step}" for i, step in enumerate(reasoning_steps)])
474+
475+
user_prompt = f"""Original Query: {query}
476+
477+
Reasoning Steps:
478+
{steps_str}
479+
480+
Combine these reasoning steps into a clear, comprehensive final answer. Be integrative and ensure the answer is coherent.
481+
482+
Final Answer:"""
483+
484+
logger.info(f"Calling Synthesizer with model: {model}")
485+
answer = self._call_ollama_api(model, user_prompt, system_prompt)
486+
logger.info(f"Synthesizer response: {answer[:200]}...")
487+
373488
return {
374489
"answer": answer,
375490
"summary": answer,

agentic_rag/config_example.yaml

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,4 +17,7 @@ AGENT_ENDPOINTS:
1717
planner_url: http://localhost:8000
1818
researcher_url: http://localhost:8000
1919
reasoner_url: http://localhost:8000
20-
synthesizer_url: http://localhost:8000
20+
synthesizer_url: http://localhost:8000
21+
22+
# Model configuration for specialized agents (via Ollama API)
23+
SPECIALIZED_AGENT_MODEL: deepseek-r1 # Options: qwq, deepseek-r1, llama3.3, phi4, gemma3, etc.

0 commit comments

Comments
 (0)