Skip to content

Commit 061c8d5

Browse files
fix: add multi-provider support for AutoAgents
- Replace OpenAI-specific beta.chat.completions.parse() with cross-provider LLM class - Maintain backward compatibility by using structured output for OpenAI models - Add support for all LiteLLM providers (Gemini, Anthropic, etc.) - Include robust JSON parsing to handle markdown-wrapped responses - Fixes #788: AttributeError when using AutoAgent with Gemini provider Co-authored-by: Mervin Praison <MervinPraison@users.noreply.github.com>
1 parent 35df788 commit 061c8d5

File tree

1 file changed

+65
-23
lines changed

1 file changed

+65
-23
lines changed

src/praisonai-agents/praisonaiagents/agents/autoagents.py

Lines changed: 65 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,8 @@
1313
import os
1414
from pydantic import BaseModel, ConfigDict
1515
from ..main import display_instruction, display_tool_call, display_interaction
16-
from ..llm import get_openai_client
16+
from ..llm import get_openai_client, LLM
17+
import json
1718

1819
# Define Pydantic models for structured output
1920
class TaskConfig(BaseModel):
@@ -238,33 +239,74 @@ def _generate_config(self) -> AutoAgentsConfig:
238239
"""
239240

240241
try:
241-
# Get OpenAI client
242+
# Try to use OpenAI's structured output if available
243+
use_openai_structured = False
244+
client = None
245+
242246
try:
243-
client = get_openai_client()
244-
except ValueError as e:
245-
# AutoAgents requires OpenAI for structured output generation
246-
raise ValueError(
247-
"AutoAgents requires OpenAI API for automatic agent generation. "
248-
"Please set OPENAI_API_KEY environment variable or use PraisonAIAgents class directly "
249-
"with manually configured agents for non-OpenAI providers."
250-
) from e
247+
# Check if we have OpenAI API and the model supports structured output
248+
if self.llm and (self.llm.startswith('gpt-') or self.llm.startswith('o1-') or self.llm.startswith('o3-')):
249+
client = get_openai_client()
250+
use_openai_structured = True
251+
except:
252+
# If OpenAI client is not available, we'll use the LLM class
253+
pass
254+
255+
if use_openai_structured and client:
256+
# Use OpenAI's structured output for OpenAI models (backward compatibility)
257+
response = client.beta.chat.completions.parse(
258+
model=self.llm,
259+
response_format=AutoAgentsConfig,
260+
messages=[
261+
{"role": "system", "content": "You are a helpful assistant designed to generate AI agent configurations."},
262+
{"role": "user", "content": prompt}
263+
]
264+
)
265+
config = response.choices[0].message.parsed
266+
else:
267+
# Use LLM class for all other providers (Gemini, Anthropic, etc.)
268+
llm_instance = LLM(
269+
model=self.llm,
270+
base_url=self.base_url,
271+
api_key=self.api_key
272+
)
251273

252-
response = client.beta.chat.completions.parse(
253-
model=self.llm,
254-
response_format=AutoAgentsConfig,
255-
messages=[
256-
{"role": "system", "content": "You are a helpful assistant designed to generate AI agent configurations."},
257-
{"role": "user", "content": prompt}
258-
]
259-
)
274+
response_text = llm_instance.response(
275+
prompt=prompt,
276+
system_prompt="You are a helpful assistant designed to generate AI agent configurations.",
277+
output_pydantic=AutoAgentsConfig,
278+
temperature=0.7,
279+
stream=False,
280+
verbose=False
281+
)
282+
283+
# Parse the JSON response
284+
try:
285+
# First try to parse as is
286+
config_dict = json.loads(response_text)
287+
config = AutoAgentsConfig(**config_dict)
288+
except json.JSONDecodeError:
289+
# If that fails, try to extract JSON from the response
290+
# Handle cases where the model might wrap JSON in markdown blocks
291+
cleaned_response = response_text.strip()
292+
if cleaned_response.startswith("```json"):
293+
cleaned_response = cleaned_response[7:]
294+
if cleaned_response.startswith("```"):
295+
cleaned_response = cleaned_response[3:]
296+
if cleaned_response.endswith("```"):
297+
cleaned_response = cleaned_response[:-3]
298+
cleaned_response = cleaned_response.strip()
299+
300+
config_dict = json.loads(cleaned_response)
301+
config = AutoAgentsConfig(**config_dict)
260302

261303
# Ensure we have exactly max_agents number of agents
262-
if len(response.choices[0].message.parsed.agents) > self.max_agents:
263-
response.choices[0].message.parsed.agents = response.choices[0].message.parsed.agents[:self.max_agents]
264-
elif len(response.choices[0].message.parsed.agents) < self.max_agents:
265-
logging.warning(f"Generated {len(response.choices[0].message.parsed.agents)} agents, expected {self.max_agents}")
304+
if len(config.agents) > self.max_agents:
305+
config.agents = config.agents[:self.max_agents]
306+
elif len(config.agents) < self.max_agents:
307+
logging.warning(f"Generated {len(config.agents)} agents, expected {self.max_agents}")
266308

267-
return response.choices[0].message.parsed
309+
return config
268310
except Exception as e:
269311
logging.error(f"Error generating configuration: {e}")
270312
raise

0 commit comments

Comments
 (0)