Skip to content

Commit 806ca0a

Browse files
fix: resolve IndentationError in autoagents.py
- Fixed indentation issues that caused SyntaxError on line 360 - Removed duplicate OpenAI client initialization logic - Properly indented the LLM instance creation and response parsing code - Maintained backward compatibility with existing functionality Co-authored-by: Mervin Praison <[email protected]>
1 parent 52281af commit 806ca0a

File tree

1 file changed

+49
-82
lines changed

1 file changed

+49
-82
lines changed

src/praisonai-agents/praisonaiagents/agents/autoagents.py

Lines changed: 49 additions & 82 deletions
Original file line numberDiff line numberDiff line change
@@ -349,94 +349,61 @@ def _generate_config(self) -> AutoAgentsConfig:
349349
config = response.choices[0].message.parsed
350350
else:
351351
# Use LLM class for all other providers (Gemini, Anthropic, etc.)
352-
llm_instance = LLM(
352+
llm_instance = LLM(
353353
model=self.llm,
354354
base_url=self.base_url,
355355
api_key=self.api_key
356-
)
357-
358-
try:
359-
# Check if we have OpenAI API and the model supports structured output
360-
if self.llm and (self.llm.startswith('gpt-') or self.llm.startswith('o1-') or self.llm.startswith('o3-')):
361-
# Create a new client instance if custom parameters are provided
362-
if self.api_key or self.base_url:
363-
client = OpenAIClient(api_key=self.api_key, base_url=self.base_url)
364-
else:
365-
client = get_openai_client()
366-
use_openai_structured = True
367-
except:
368-
# If OpenAI client is not available, we'll use the LLM class
369-
pass
370-
371-
if use_openai_structured and client:
372-
# Use OpenAI's structured output for OpenAI models (backward compatibility)
373-
config = client.parse_structured_output(
374-
messages=[
375-
{"role": "system", "content": "You are a helpful assistant designed to generate AI agent configurations."},
376-
{"role": "user", "content": prompt}
377-
],
378-
response_format=AutoAgentsConfig,
379-
model=self.llm
380-
)
381-
# Store the response for potential retry
382-
last_response = json.dumps(config.model_dump(), indent=2)
383-
else:
384-
# Use LLM class for all other providers (Gemini, Anthropic, etc.)
385-
llm_instance = LLM(
386-
model=self.llm,
387-
base_url=self.base_url,
388-
api_key=self.api_key
389-
)
390-
391-
response_text = llm_instance.response(
392-
prompt=prompt,
393-
system_prompt="You are a helpful assistant designed to generate AI agent configurations.",
394-
output_pydantic=AutoAgentsConfig,
395-
temperature=0.7,
396-
stream=False,
397-
verbose=False
398-
)
399-
400-
# Store the raw response for potential retry
401-
last_response = response_text
402-
403-
# Parse the JSON response
404-
try:
405-
# First try to parse as is
406-
config_dict = json.loads(response_text)
407-
config = AutoAgentsConfig(**config_dict)
408-
except json.JSONDecodeError:
409-
# If that fails, try to extract JSON from the response
410-
# Handle cases where the model might wrap JSON in markdown blocks
411-
cleaned_response = response_text.strip()
412-
if cleaned_response.startswith("```json"):
413-
cleaned_response = cleaned_response[7:]
414-
if cleaned_response.startswith("```"):
415-
cleaned_response = cleaned_response[3:]
416-
if cleaned_response.endswith("```"):
417-
cleaned_response = cleaned_response[:-3]
418-
cleaned_response = cleaned_response.strip()
419-
420-
config_dict = json.loads(cleaned_response)
421-
config = AutoAgentsConfig(**config_dict)
356+
)
422357

423-
# Validate the configuration
424-
is_valid, error_msg = self._validate_config(config)
425-
if not is_valid:
426-
last_error = error_msg
427-
if attempt < max_retries - 1:
428-
logging.warning(f"Configuration validation failed (attempt {attempt + 1}/{max_retries}): {error_msg}")
429-
continue
430-
else:
431-
raise ValueError(f"Configuration validation failed after {max_retries} attempts: {error_msg}")
358+
response_text = llm_instance.response(
359+
prompt=prompt,
360+
system_prompt="You are a helpful assistant designed to generate AI agent configurations.",
361+
output_pydantic=AutoAgentsConfig,
362+
temperature=0.7,
363+
stream=False,
364+
verbose=False
365+
)
432366

433-
# Ensure we have exactly max_agents number of agents
434-
if len(config.agents) > self.max_agents:
435-
config.agents = config.agents[:self.max_agents]
436-
elif len(config.agents) < self.max_agents:
437-
logging.warning(f"Generated {len(config.agents)} agents, expected {self.max_agents}")
367+
# Store the raw response for potential retry
368+
last_response = response_text
438369

439-
return config
370+
# Parse the JSON response
371+
try:
372+
# First try to parse as is
373+
config_dict = json.loads(response_text)
374+
config = AutoAgentsConfig(**config_dict)
375+
except json.JSONDecodeError:
376+
# If that fails, try to extract JSON from the response
377+
# Handle cases where the model might wrap JSON in markdown blocks
378+
cleaned_response = response_text.strip()
379+
if cleaned_response.startswith("```json"):
380+
cleaned_response = cleaned_response[7:]
381+
if cleaned_response.startswith("```"):
382+
cleaned_response = cleaned_response[3:]
383+
if cleaned_response.endswith("```"):
384+
cleaned_response = cleaned_response[:-3]
385+
cleaned_response = cleaned_response.strip()
386+
387+
config_dict = json.loads(cleaned_response)
388+
config = AutoAgentsConfig(**config_dict)
389+
390+
# Validate the configuration
391+
is_valid, error_msg = self._validate_config(config)
392+
if not is_valid:
393+
last_error = error_msg
394+
if attempt < max_retries - 1:
395+
logging.warning(f"Configuration validation failed (attempt {attempt + 1}/{max_retries}): {error_msg}")
396+
continue
397+
else:
398+
raise ValueError(f"Configuration validation failed after {max_retries} attempts: {error_msg}")
399+
400+
# Ensure we have exactly max_agents number of agents
401+
if len(config.agents) > self.max_agents:
402+
config.agents = config.agents[:self.max_agents]
403+
elif len(config.agents) < self.max_agents:
404+
logging.warning(f"Generated {len(config.agents)} agents, expected {self.max_agents}")
405+
406+
return config
440407

441408
except ValueError as e:
442409
# Re-raise validation errors

0 commit comments

Comments
 (0)