Skip to content

Commit 19d75af

Browse files
committed
fix: Resolve shell duplication and optimize Gemini streaming
1 parent 8d8e1c5 commit 19d75af

File tree

3 files changed

+62
-9
lines changed

3 files changed

+62
-9
lines changed

jdev_cli/core/providers/gemini.py

Lines changed: 52 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,25 @@
99

1010
# REMOVED top-level import: import google.generativeai as genai
1111

12+
# Anti-repetition and table formatting suffix added to all system prompts
13+
# Based on: https://ai.google.dev/gemini-api/docs/troubleshooting
14+
GEMINI_OUTPUT_RULES = """
15+
16+
CRITICAL OUTPUT RULES:
17+
- Be concise and direct
18+
- Never repeat yourself
19+
- Never duplicate content horizontally or vertically
20+
- Provide each answer only once
21+
- If you find yourself repeating, STOP and move on
22+
23+
MARKDOWN TABLES - CRITICAL:
24+
- Use EXACTLY 3 hyphens per column: |---|---|---|
25+
- NO extra spaces or padding for visual alignment
26+
- NO tabs - only single spaces
27+
- FOR TABLE HEADINGS, IMMEDIATELY ADD ' |' AFTER THE HEADING
28+
- Keep cell content short (under 30 chars)
29+
"""
30+
1231
class GeminiProvider:
1332
"""Google Gemini API provider."""
1433

@@ -232,10 +251,20 @@ async def stream_chat(
232251
# 2. Initialize Model (with System Instruction)
233252
# We create a specific instance for this chat to support dynamic system prompt
234253
# This is lightweight and ensures we use the native system_instruction
254+
255+
# Add anti-repetition instructions
256+
full_system_prompt = (system_prompt or "") + GEMINI_OUTPUT_RULES
257+
258+
# CRITICAL: Temperature MUST be 1.0 for Gemini 2.5+ to prevent looping
259+
safe_temperature = 1.0
260+
if temperature != 1.0:
261+
# We silently enforce 1.0 for stability as per DeepMind docs
262+
safe_temperature = 1.0
263+
235264
model = self._genai.GenerativeModel(
236265
self.model_name,
237266
tools=tools if tools else None,
238-
system_instruction=system_prompt
267+
system_instruction=full_system_prompt
239268
)
240269

241270
# 3. Prepare History
@@ -257,16 +286,35 @@ def _send():
257286
last_user_msg,
258287
generation_config={
259288
'max_output_tokens': max_tokens,
260-
'temperature': temperature,
289+
'temperature': safe_temperature,
261290
},
262291
stream=True
263292
)
264293

265294
loop = asyncio.get_event_loop()
266295
response = await loop.run_in_executor(None, _send)
267296

297+
# FIX: Convert iterable response to iterator
298+
response_iterator = iter(response)
299+
268300
# 6. Stream Response
269-
for chunk in response:
301+
# We iterate manually to avoid blocking the event loop
302+
def _next_chunk():
303+
try:
304+
return next(response_iterator)
305+
except StopIteration:
306+
return None
307+
except Exception as e:
308+
return e
309+
310+
while True:
311+
chunk = await loop.run_in_executor(None, _next_chunk)
312+
313+
if chunk is None:
314+
break
315+
if isinstance(chunk, Exception):
316+
raise chunk
317+
270318
try:
271319
# Handle Code Execution Parts
272320
if hasattr(chunk, 'parts'):
@@ -286,6 +334,7 @@ def _send():
286334
continue
287335

288336
await asyncio.sleep(0)
337+
289338

290339
except Exception as e:
291340
logger.error(f"Gemini streaming error: {e}")

jdev_core/language_detector.py

Lines changed: 8 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -133,10 +133,13 @@ def get_prompt_instruction(cls, text: str) -> Optional[str]:
133133
Returns:
134134
Instruction string like "Respond in Portuguese." or None for English
135135
"""
136-
code, name = cls.detect_with_name(text)
136+
# DISABLE FORCED TRANSLATION (Fix for "OláOlá" duplication issue)
137+
# The model is smart enough to reply in the correct language without this.
138+
# code, name = cls.detect_with_name(text)
137139

138-
# Don't add instruction for English (default)
139-
if code == "en":
140-
return None
140+
# # Don't add instruction for English (default)
141+
# if code == "en":
142+
# return None
141143

142-
return f"Respond in {name}."
144+
# return f"Respond in {name}."
145+
return None

jdev_tui/core/streaming/gemini_stream.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -239,7 +239,8 @@ def _generate():
239239

240240
try:
241241
# 1. Start Request (Threaded)
242-
response_iterator = await loop.run_in_executor(None, _generate)
242+
response = await loop.run_in_executor(None, _generate)
243+
response_iterator = iter(response)
243244

244245
# 2. Iterate Chunks (Threaded to prevent UI freeze)
245246
def _next_chunk():

0 commit comments

Comments
 (0)