Skip to content

Commit 2c9e8b4

Browse files
committed
Release v3.9.23
1 parent ce3b25c commit 2c9e8b4

File tree

7 files changed

+113
-20
lines changed

7 files changed

+113
-20
lines changed

docker/Dockerfile.chat

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@ RUN mkdir -p /root/.praison
1616
# Install Python packages (using latest versions)
1717
RUN pip install --no-cache-dir \
1818
praisonai_tools \
19-
"praisonai>=3.9.22" \
19+
"praisonai>=3.9.23" \
2020
"praisonai[chat]" \
2121
"embedchain[github,youtube]"
2222

docker/Dockerfile.dev

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ RUN mkdir -p /root/.praison
2020
# Install Python packages (using latest versions)
2121
RUN pip install --no-cache-dir \
2222
praisonai_tools \
23-
"praisonai>=3.9.22" \
23+
"praisonai>=3.9.23" \
2424
"praisonai[ui]" \
2525
"praisonai[chat]" \
2626
"praisonai[realtime]" \

docker/Dockerfile.ui

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@ RUN mkdir -p /root/.praison
1616
# Install Python packages (using latest versions)
1717
RUN pip install --no-cache-dir \
1818
praisonai_tools \
19-
"praisonai>=3.9.22" \
19+
"praisonai>=3.9.23" \
2020
"praisonai[ui]" \
2121
"praisonai[crewai]"
2222

src/praisonai-agents/praisonaiagents/agent/deep_research_agent.py

Lines changed: 106 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -487,22 +487,85 @@ def _parse_openai_response(self, response: Any) -> DeepResearchResponse:
487487
raw_response=response
488488
)
489489

490-
def _parse_gemini_response(self, interaction: Any) -> DeepResearchResponse:
491-
"""Parse Gemini Deep Research API response."""
490+
def _parse_gemini_response(
491+
self,
492+
interaction: Any,
493+
fallback_text: str = "",
494+
fallback_reasoning: Optional[List[ReasoningStep]] = None
495+
) -> DeepResearchResponse:
496+
"""Parse Gemini Deep Research API response.
497+
498+
Args:
499+
interaction: The Gemini interaction object
500+
fallback_text: Accumulated text from streaming (used if outputs parsing fails)
501+
fallback_reasoning: Reasoning steps collected during streaming
502+
"""
492503
report = ""
493504
citations = []
494-
reasoning_steps = []
505+
reasoning_steps = fallback_reasoning or []
495506

496-
# Get the final output
507+
# Try multiple attribute paths for Gemini output structure
508+
# Path 1: Direct outputs with text attribute
497509
if hasattr(interaction, 'outputs') and interaction.outputs:
498510
last_output = interaction.outputs[-1]
499-
if hasattr(last_output, 'text'):
511+
if hasattr(last_output, 'text') and last_output.text:
500512
report = last_output.text
501513
elif hasattr(last_output, 'content'):
502-
report = str(last_output.content)
514+
# Gemini nested structure: content.parts[0].text
515+
content = last_output.content
516+
if hasattr(content, 'parts') and content.parts:
517+
first_part = content.parts[0]
518+
if hasattr(first_part, 'text'):
519+
report = first_part.text
520+
else:
521+
report = str(first_part)
522+
elif hasattr(content, 'text'):
523+
report = content.text
524+
else:
525+
report = str(content)
526+
527+
# Path 2: Direct result attribute
528+
if not report and hasattr(interaction, 'result'):
529+
result = interaction.result
530+
if hasattr(result, 'text'):
531+
report = result.text
532+
else:
533+
report = str(result)
534+
535+
# Path 3: Response attribute
536+
if not report and hasattr(interaction, 'response'):
537+
resp = interaction.response
538+
if hasattr(resp, 'text'):
539+
report = resp.text
540+
elif hasattr(resp, 'content'):
541+
report = str(resp.content)
542+
543+
# Path 4: Fallback to streamed content (critical fix)
544+
if not report and fallback_text:
545+
report = fallback_text
546+
if self.verbose:
547+
self.logger.debug("Using fallback streamed text for report")
503548

504-
# Gemini doesn't provide structured citations in the same way
505-
# but we can try to extract them from annotations if available
549+
# Try to extract citations from grounding metadata
550+
if hasattr(interaction, 'outputs') and interaction.outputs:
551+
for output in interaction.outputs:
552+
if hasattr(output, 'grounding_metadata'):
553+
metadata = output.grounding_metadata
554+
if hasattr(metadata, 'grounding_chunks'):
555+
for chunk in metadata.grounding_chunks:
556+
if hasattr(chunk, 'web') and chunk.web:
557+
citations.append(Citation(
558+
title=getattr(chunk.web, 'title', ''),
559+
url=getattr(chunk.web, 'uri', ''),
560+
))
561+
562+
# Log warning if report is empty
563+
if not report:
564+
self.logger.warning(
565+
"Gemini response parsing returned empty report. "
566+
f"Interaction ID: {getattr(interaction, 'id', 'unknown')}, "
567+
f"Status: {getattr(interaction, 'status', 'unknown')}"
568+
)
506569

507570
return DeepResearchResponse(
508571
report=report,
@@ -878,10 +941,32 @@ def _research_gemini_streaming(
878941
print("\n\n" + "=" * 60)
879942
print("✅ Research Complete")
880943
print("=" * 60 + "\n")
881-
# Get final interaction for full response
944+
945+
# Poll until interaction status is actually 'completed'
946+
# (workaround for timing issue where GET returns stale status)
882947
if interaction_id:
883-
final_interaction = self.gemini_client.interactions.get(interaction_id)
884-
return self._parse_gemini_response(final_interaction)
948+
max_poll_attempts = 30 # 30 seconds max
949+
for attempt in range(max_poll_attempts):
950+
final_interaction = self.gemini_client.interactions.get(interaction_id)
951+
if final_interaction.status == "completed":
952+
return self._parse_gemini_response(
953+
final_interaction,
954+
fallback_text=final_text,
955+
fallback_reasoning=reasoning_steps
956+
)
957+
elif final_interaction.status in ["failed", "cancelled"]:
958+
raise RuntimeError(f"Research {final_interaction.status}")
959+
time.sleep(1)
960+
961+
# If still not completed, use fallback
962+
if self.verbose:
963+
self.logger.warning("Interaction not completed after polling, using streamed content")
964+
return DeepResearchResponse(
965+
report=final_text,
966+
reasoning_steps=reasoning_steps,
967+
provider="gemini",
968+
interaction_id=interaction_id
969+
)
885970

886971
elif chunk.event_type == "error":
887972
error_msg = getattr(chunk, 'error', 'Unknown streaming error')
@@ -949,7 +1034,11 @@ def _resume_gemini_stream(
9491034
if self.verbose:
9501035
print("\n\n✅ Research Complete (resumed)")
9511036
final_interaction = self.gemini_client.interactions.get(interaction_id)
952-
return self._parse_gemini_response(final_interaction)
1037+
return self._parse_gemini_response(
1038+
final_interaction,
1039+
fallback_text=accumulated_text,
1040+
fallback_reasoning=reasoning_steps
1041+
)
9531042

9541043
except Exception as e:
9551044
retry_count += 1
@@ -963,7 +1052,11 @@ def _resume_gemini_stream(
9631052
while True:
9641053
interaction = self.gemini_client.interactions.get(interaction_id)
9651054
if interaction.status == "completed":
966-
return self._parse_gemini_response(interaction)
1055+
return self._parse_gemini_response(
1056+
interaction,
1057+
fallback_text=accumulated_text,
1058+
fallback_reasoning=reasoning_steps
1059+
)
9671060
elif interaction.status in ["failed", "cancelled"]:
9681061
raise RuntimeError(f"Research {interaction.status}")
9691062
time.sleep(self.poll_interval)

src/praisonai/praisonai.rb

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,8 +3,8 @@ class Praisonai < Formula
33

44
desc "AI tools for various AI applications"
55
homepage "https://github.com/MervinPraison/PraisonAI"
6-
url "https://github.com/MervinPraison/PraisonAI/archive/refs/tags/v3.9.22.tar.gz"
7-
sha256 `curl -sL https://github.com/MervinPraison/PraisonAI/archive/refs/tags/v3.9.22.tar.gz | shasum -a 256`.split.first
6+
url "https://github.com/MervinPraison/PraisonAI/archive/refs/tags/v3.9.23.tar.gz"
7+
sha256 `curl -sL https://github.com/MervinPraison/PraisonAI/archive/refs/tags/v3.9.23.tar.gz | shasum -a 256`.split.first
88
license "MIT"
99

1010
depends_on "python@3.11"

src/praisonai/praisonai/deploy.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -57,7 +57,7 @@ def create_dockerfile(self):
5757
file.write("FROM python:3.11-slim\n")
5858
file.write("WORKDIR /app\n")
5959
file.write("COPY . .\n")
60-
file.write("RUN pip install flask praisonai==3.9.22 gunicorn markdown\n")
60+
file.write("RUN pip install flask praisonai==3.9.23 gunicorn markdown\n")
6161
file.write("EXPOSE 8080\n")
6262
file.write('CMD ["gunicorn", "-b", "0.0.0.0:8080", "api:app"]\n')
6363

src/praisonai/praisonai/version.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
__version__ = "3.9.22"
1+
__version__ = "3.9.23"

0 commit comments

Comments
 (0)