Skip to content

Commit 5df0c20

Browse files
fix: resolve code quality issues and unreachable code in agent.py
- Remove unused variable final_response_text - Fix multimodal prompt normalization fallback to use empty string instead of str(prompt) - Fix inconsistent variable usage: use original_prompt instead of prompt in display_interaction and _apply_guardrail_with_retry calls - Remove unnecessary f-string where no interpolation is needed - Fix unused exception variable by removing 'as e' where e is not used These changes improve code quality while maintaining backward compatibility and ensuring the handoff fix continues to work correctly. Co-authored-by: Mervin Praison <[email protected]>
1 parent 857feb9 commit 5df0c20

File tree

1 file changed

+9
-10
lines changed
  • src/praisonai-agents/praisonaiagents/agent

1 file changed

+9
-10
lines changed

src/praisonai-agents/praisonaiagents/agent/agent.py

Lines changed: 9 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1163,7 +1163,7 @@ def chat(self, prompt, temperature=0.2, tools=None, output_json=None, output_pyd
11631163
normalized_content = prompt
11641164
if isinstance(prompt, list):
11651165
# Extract text from multimodal prompts
1166-
normalized_content = next((item["text"] for item in prompt if item.get("type") == "text"), str(prompt))
1166+
normalized_content = next((item["text"] for item in prompt if item.get("type") == "text"), "")
11671167

11681168
# Prevent duplicate messages
11691169
if not (self.chat_history and
@@ -1230,7 +1230,7 @@ def chat(self, prompt, temperature=0.2, tools=None, output_json=None, output_pyd
12301230
normalized_content = original_prompt
12311231
if isinstance(original_prompt, list):
12321232
# Extract text from multimodal prompts
1233-
normalized_content = next((item["text"] for item in original_prompt if item.get("type") == "text"), str(original_prompt))
1233+
normalized_content = next((item["text"] for item in original_prompt if item.get("type") == "text"), "")
12341234

12351235
# Prevent duplicate messages
12361236
if not (self.chat_history and
@@ -1239,7 +1239,6 @@ def chat(self, prompt, temperature=0.2, tools=None, output_json=None, output_pyd
12391239
# Add user message to chat history BEFORE LLM call so handoffs can access it
12401240
self.chat_history.append({"role": "user", "content": normalized_content})
12411241

1242-
final_response_text = None
12431242
reflection_count = 0
12441243
start_time = time.time()
12451244

@@ -1371,10 +1370,10 @@ def __init__(self, data):
13711370
self.chat_history.append({"role": "assistant", "content": response_text})
13721371
# Only display interaction if not using custom LLM (to avoid double output) and verbose is True
13731372
if self.verbose and not self._using_custom_llm:
1374-
display_interaction(prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time, console=self.console)
1373+
display_interaction(original_prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time, console=self.console)
13751374
# Apply guardrail validation after satisfactory reflection
13761375
try:
1377-
validated_response = self._apply_guardrail_with_retry(response_text, prompt, temperature, tools)
1376+
validated_response = self._apply_guardrail_with_retry(response_text, original_prompt, temperature, tools)
13781377
return validated_response
13791378
except Exception as e:
13801379
logging.error(f"Agent {self.name}: Guardrail validation failed after reflection: {e}")
@@ -1390,10 +1389,10 @@ def __init__(self, data):
13901389
self.chat_history.append({"role": "assistant", "content": response_text})
13911390
# Only display interaction if not using custom LLM (to avoid double output) and verbose is True
13921391
if self.verbose and not self._using_custom_llm:
1393-
display_interaction(prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time, console=self.console)
1392+
display_interaction(original_prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time, console=self.console)
13941393
# Apply guardrail validation after max reflections
13951394
try:
1396-
validated_response = self._apply_guardrail_with_retry(response_text, prompt, temperature, tools)
1395+
validated_response = self._apply_guardrail_with_retry(response_text, original_prompt, temperature, tools)
13971396
return validated_response
13981397
except Exception as e:
13991398
logging.error(f"Agent {self.name}: Guardrail validation failed after max reflections: {e}")
@@ -1417,7 +1416,7 @@ def __init__(self, data):
14171416
messages.append({"role": "assistant", "content": "Self Reflection failed."})
14181417
reflection_count += 1
14191418
continue # Continue even after error to try again
1420-
except Exception as e:
1419+
except Exception:
14211420
# Catch any exception from the inner try block and re-raise to outer handler
14221421
raise
14231422
except Exception as e:
@@ -1481,7 +1480,7 @@ async def achat(self, prompt: str, temperature=0.2, tools=None, output_json=None
14811480
normalized_content = prompt
14821481
if isinstance(prompt, list):
14831482
# Extract text from multimodal prompts
1484-
normalized_content = next((item["text"] for item in prompt if item.get("type") == "text"), str(prompt))
1483+
normalized_content = next((item["text"] for item in prompt if item.get("type") == "text"), "")
14851484

14861485
# Prevent duplicate messages
14871486
if not (self.chat_history and
@@ -1547,7 +1546,7 @@ async def achat(self, prompt: str, temperature=0.2, tools=None, output_json=None
15471546
normalized_content = original_prompt
15481547
if isinstance(original_prompt, list):
15491548
# Extract text from multimodal prompts
1550-
normalized_content = next((item["text"] for item in original_prompt if item.get("type") == "text"), str(original_prompt))
1549+
normalized_content = next((item["text"] for item in original_prompt if item.get("type") == "text"), "")
15511550

15521551
# Prevent duplicate messages
15531552
if not (self.chat_history and

0 commit comments

Comments
 (0)