Skip to content

Commit 907a56c

Browse files
committed
[C7] UI streaming renderer (typed sequential ghost text; no乱序)
1 parent fc50a99 commit 907a56c

File tree

4 files changed

+255
-5
lines changed

4 files changed

+255
-5
lines changed

issues/2026-02-10_18-23-04-ui-autosave-streaming-v1.csv

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,4 +5,4 @@ C3,Bind EditorPanel documents to ProjectManager (autosave works),Add EditorPanel
55
C4,Restore last session (reopen recent project & last document),"Persist and restore last-open project/document when enabled: wire SettingsDialog restore_session/auto_save prefs into Config; on startup, if enabled, auto-open most recent project and re-open last document (fallback to first scene).",Restart restores the previous project and last document content instead of showing a blank scratch doc (when restore_session enabled).,manual,none,DONE,DONE,TODO,src/main.py | src/core/config_schema.py | src/gui/dialogs/settings_dialog.py | src/core/config.py | src/gui/main_window_parts/ui.py | src/gui/main_window_parts/integrations.py | src/gui/main_window_parts/dialogs.py | src/gui/editor/text_editor.py,C3,"manual_checklist: 1) Create/open a project, edit a scene, wait for autosave, close app. 2) Reopen app; expect project auto-open and edited content present. 3) Disable restore_session in settings; restart; expect no auto-open. | validation_limited:manual app restart not executed here | evidence:python -m compileall -q src PASS | risk:medium session-restore depends on runtime restart flow | done_at:2026-02-10"
66
C5,Streaming dispatch respects stream_response toggle,Plumb config ai.stream_response (default on) through AICompletionService/AIRequestDispatcher to use QtAIClient.complete_stream_async; include context/request_id in chunk signals; ensure cancellation prevents stale chunks from updating UI; add unit test for dispatch selection.,"When stream_response enabled, streaming client method is used and chunk signals are emitted; when disabled, non-stream method used; unit test passes.",python -m pytest -q tests/test_streaming_dispatcher.py,none,DONE,DONE,TODO,src/application/ai_completion_service.py | src/core/ai_qt_client.py | src/gui/ai/enhanced_ai_manager.py | src/gui/main_window_parts/integrations.py | tests/test_streaming_dispatcher.py | src/core/config_schema.py,none,test:pytest -q tests/test_streaming_dispatcher.py PASS | done_at:2026-02-10
77
C6,Provider streaming adaptation (Gemini endpoint + chunk parsing),Implement provider-specific streaming endpoint/headers/parsing: Gemini uses streamGenerateContent + SSE; add Gemini extract_stream_content; improve streaming parser robustness; ensure OpenAI/Claude continue working; add unit tests with fixture chunks.,Gemini streaming endpoint/path is correct and chunk parsing yields incremental text; unit tests pass.,python -m pytest -q tests/test_provider_streaming_gemini.py,none,DONE,DONE,TODO,src/core/ai_client.py | src/core/ai_providers/gemini.py | src/core/ai_providers/openai.py | src/core/ai_providers/claude.py | tests/test_provider_streaming_gemini.py,C5,test:pytest -q tests/test_provider_streaming_gemini.py PASS | done_at:2026-02-10
8-
C7,UI streaming renderer (typed sequential ghost text; no乱序),"Connect streaming chunk updates to SmartCompletionManager and render as ordered, typing-like ghost text; buffer and flush to UI to avoid乱序/抖动; ignore chunks from cancelled/old requests; finalize cleanly on completion; add unit/Qt test.","During streaming completion, suggestion grows in order like typing; cancel stops updates; no stale chunks applied; test passes.",python -m pytest -q tests/test_streaming_typed_renderer.py,none,TODO,TODO,TODO,src/gui/editor/smart_completion_manager.py | src/gui/main_window_parts/integrations.py | src/gui/ai/enhanced_ai_manager.py | tests/test_streaming_typed_renderer.py,C5|C6,none
8+
C7,UI streaming renderer (typed sequential ghost text; no乱序),"Connect streaming chunk updates to SmartCompletionManager and render as ordered, typing-like ghost text; buffer and flush to UI to avoid乱序/抖动; ignore chunks from cancelled/old requests; finalize cleanly on completion; add unit/Qt test.","During streaming completion, suggestion grows in order like typing; cancel stops updates; no stale chunks applied; test passes.",python -m pytest -q tests/test_streaming_typed_renderer.py,none,DONE,DONE,TODO,src/gui/editor/smart_completion_manager.py | src/gui/main_window_parts/integrations.py | src/gui/ai/enhanced_ai_manager.py | tests/test_streaming_typed_renderer.py,C5|C6,test:pytest -q tests/test_streaming_typed_renderer.py PASS | done_at:2026-02-10

src/gui/ai/enhanced_ai_manager.py

Lines changed: 39 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -54,7 +54,7 @@ class EnhancedAIManager(QObject):
5454
completionReady = pyqtSignal(str, str) # (completion_text, context)
5555
completionReceived = pyqtSignal(str, dict) # (response, metadata)
5656
completionError = pyqtSignal(str) # (error_message)
57-
streamUpdate = pyqtSignal(str) # (partial_text)
57+
streamUpdate = pyqtSignal(str, dict) # (partial_text, context)
5858
configChanged = pyqtSignal() # 配置更改信号
5959

6060
def __init__(self, config: Config, shared=None, parent: QWidget = None):
@@ -64,6 +64,7 @@ def __init__(self, config: Config, shared=None, parent: QWidget = None):
6464
self._parent = parent
6565
self._task_manager = getattr(shared, "task_manager", None) if shared else None
6666
self._cancelled_task_keys: set[str] = set()
67+
self._active_request_ids: dict[str, str] = {}
6768
self._ai_service = AICompletionService(
6869
config,
6970
shared,
@@ -156,6 +157,10 @@ def _init_ai_client(self):
156157
# 连接信号
157158
self._ai_client.responseReceived.connect(self._on_completion_ready)
158159
self._ai_client.errorOccurred.connect(self._on_completion_error)
160+
if hasattr(self._ai_client, "requestStarted"):
161+
self._ai_client.requestStarted.connect(self._on_request_started)
162+
if hasattr(self._ai_client, "requestCompleted"):
163+
self._ai_client.requestCompleted.connect(self._on_request_completed)
159164
if hasattr(self._ai_client, 'streamChunkReceived'):
160165
self._ai_client.streamChunkReceived.connect(self._on_stream_update)
161166

@@ -433,6 +438,23 @@ def _on_completion_error(self, error: str, context: dict):
433438
}
434439
self._task_manager.fail_external(task_key, error, details)
435440
self.completionError.emit(error)
441+
442+
@pyqtSlot(dict)
443+
def _on_request_started(self, context: dict) -> None:
444+
task_key = context.get("task_key") if isinstance(context, dict) else None
445+
request_id = context.get("request_id") if isinstance(context, dict) else None
446+
if task_key and request_id:
447+
self._active_request_ids[str(task_key)] = str(request_id)
448+
449+
@pyqtSlot(dict)
450+
def _on_request_completed(self, context: dict) -> None:
451+
task_key = context.get("task_key") if isinstance(context, dict) else None
452+
request_id = context.get("request_id") if isinstance(context, dict) else None
453+
if not task_key:
454+
return
455+
active = self._active_request_ids.get(str(task_key))
456+
if (not request_id) or (active == str(request_id)):
457+
self._active_request_ids.pop(str(task_key), None)
436458

437459
@pyqtSlot(str, dict)
438460
def _on_stream_update(self, partial_text: str, context: dict):
@@ -443,7 +465,21 @@ def _on_stream_update(self, partial_text: str, context: dict):
443465
task_key = context.get("task_key")
444466
if task_key and task_key in self._cancelled_task_keys:
445467
return
446-
self.streamUpdate.emit(partial_text)
468+
request_id = context.get("request_id")
469+
if task_key and request_id:
470+
active = self._active_request_ids.get(str(task_key))
471+
if active and active != str(request_id):
472+
return
473+
474+
try:
475+
editor = getattr(self, "_current_editor", None)
476+
smart_completion = getattr(editor, "_smart_completion", None) if editor else None
477+
if smart_completion and hasattr(smart_completion, "update_streaming_ai_completion"):
478+
smart_completion.update_streaming_ai_completion(partial_text, context)
479+
except Exception:
480+
logger.debug("Failed to forward stream update to SmartCompletionManager", exc_info=True)
481+
482+
self.streamUpdate.emit(partial_text, context)
447483

448484
# 兼容性方法 - 保持与SimpleAIManager的接口兼容
449485
def set_completion_enabled(self, enabled: bool):
@@ -947,7 +983,7 @@ def start_stream_response(self, text: str):
947983
try:
948984
logger.debug(f"开始流式响应: {text[:50]}...")
949985
# 发送流式更新信号
950-
self.streamUpdate.emit(text)
986+
self.streamUpdate.emit(text, {})
951987
except Exception as e:
952988
logger.error(f"流式响应失败: {e}")
953989

src/gui/editor/smart_completion_manager.py

Lines changed: 131 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66
import logging
77
import re
88
import time
9-
from typing import Optional, List, Dict, Any, Tuple
9+
from typing import Optional, List, Dict, Any, Tuple, Callable
1010
from PyQt6.QtWidgets import QWidget, QLabel
1111
from PyQt6.QtCore import Qt, QTimer, pyqtSignal, QObject
1212
from PyQt6.QtGui import QFont, QTextCursor, QKeyEvent
@@ -35,6 +35,92 @@
3535
DEFAULT_AI_RENDERER_ORDER = ("ghost_text", "inline", "direct_insert")
3636

3737

38+
class TypedStreamingGhostTextController(QObject):
39+
"""Buffer streaming chunks and render as typing-like ghost text.
40+
41+
- Appends incoming chunks in-order.
42+
- Flushes to UI at a steady cadence to avoid jitter.
43+
- Ignores chunks from stale request_id/task_key.
44+
"""
45+
46+
def __init__(
47+
self,
48+
*,
49+
render: Callable[[str], None],
50+
hide: Callable[[], None],
51+
flush_interval_ms: int = 15,
52+
max_chars_per_tick: int = 3,
53+
parent: QObject | None = None,
54+
) -> None:
55+
super().__init__(parent)
56+
self._render = render
57+
self._hide = hide
58+
self._flush_interval_ms = max(1, int(flush_interval_ms))
59+
self._max_chars_per_tick = max(1, int(max_chars_per_tick))
60+
61+
self._active_request_id: str | None = None
62+
self._active_task_key: str | None = None
63+
self._pending: str = ""
64+
self._displayed: str = ""
65+
66+
self._timer = QTimer(self)
67+
self._timer.setInterval(self._flush_interval_ms)
68+
self._timer.timeout.connect(self._on_tick)
69+
70+
def cancel(self) -> None:
71+
self._timer.stop()
72+
self._pending = ""
73+
self._displayed = ""
74+
self._active_request_id = None
75+
self._active_task_key = None
76+
try:
77+
self._hide()
78+
except Exception:
79+
return
80+
81+
def on_chunk(self, chunk_text: str, context: dict) -> None:
82+
chunk = str(chunk_text or "")
83+
if not chunk:
84+
return
85+
86+
request_id = context.get("request_id") if isinstance(context, dict) else None
87+
task_key = context.get("task_key") if isinstance(context, dict) else None
88+
89+
if self._active_task_key and task_key and task_key != self._active_task_key:
90+
return
91+
92+
if self._active_request_id and request_id and request_id != self._active_request_id:
93+
return
94+
95+
if self._active_request_id is None and request_id:
96+
self._active_request_id = str(request_id)
97+
if self._active_task_key is None and task_key:
98+
self._active_task_key = str(task_key)
99+
100+
self._pending += chunk
101+
if not self._timer.isActive():
102+
self._timer.start()
103+
104+
def _on_tick(self) -> None:
105+
if not self._pending:
106+
self._timer.stop()
107+
return
108+
109+
take = self._pending[: self._max_chars_per_tick]
110+
self._pending = self._pending[self._max_chars_per_tick :]
111+
self._displayed += take
112+
113+
try:
114+
self._render(self._displayed)
115+
except Exception:
116+
# Rendering should never break the stream loop.
117+
self.cancel()
118+
return
119+
120+
if not self._pending:
121+
self._timer.stop()
122+
123+
38124
class SmartCompletionManager(QObject):
39125
"""智能补全管理器 - 统一所有补全功能"""
40126

@@ -91,6 +177,14 @@ def __init__(self, text_editor, completion_engine: CompletionEngine):
91177
# 纯状态机(domain/application 层)用于描述 completion 生命周期
92178
self._completion_state_machine = CompletionStateMachine()
93179

180+
self._streaming_typed_controller = TypedStreamingGhostTextController(
181+
render=self._render_streaming_ghost_text,
182+
hide=self._hide_streaming_ghost_text,
183+
flush_interval_ms=15,
184+
max_chars_per_tick=3,
185+
parent=self,
186+
)
187+
94188
# 连接 Ghost Text 接受/拒绝信号到状态机,确保 Esc/Tab/输入 等路径一致收口
95189
self._bind_ghost_signals()
96190
# 移除FloatingStatusIndicator以避免状态指示器冲突
@@ -205,6 +299,7 @@ def cancel_current_completion(self):
205299
self._ai_timeout_timer.stop()
206300
if hasattr(self, "_auto_completion_timer") and self._auto_completion_timer.isActive():
207301
self._auto_completion_timer.stop()
302+
self.stop_streaming_ai_completion()
208303
self.hide_all_completions()
209304
self._is_completing = False
210305
self._last_completion_pos = -1
@@ -463,6 +558,9 @@ def _ai_complete(self, text: str, position: int, trigger_type: str = 'auto'):
463558
self._is_completing = False
464559
return
465560

561+
# Reset any in-flight streaming render before starting a new request.
562+
self.stop_streaming_ai_completion()
563+
466564
try:
467565
self._completion_state_machine.reset()
468566
self._completion_state_machine.set_state(CompletionState.REQUESTING)
@@ -615,9 +713,41 @@ def _show_popup_completion(self, suggestions: List[CompletionSuggestion]):
615713
self.suggestionRendered.emit(suggestions[0].text)
616714
except Exception:
617715
self.suggestionRendered.emit("")
716+
717+
def update_streaming_ai_completion(self, chunk_text: str, context: dict) -> None:
718+
"""Render streaming chunks as typing-like Ghost Text."""
719+
if not chunk_text:
720+
return
721+
if not self._ghost_completion:
722+
self._redetect_ghost_text_system()
723+
if not self._ghost_completion:
724+
return
725+
try:
726+
self._streaming_typed_controller.on_chunk(chunk_text, context or {})
727+
except Exception as exc: # noqa: BLE001
728+
logger.debug("Streaming typed render failed: %s", exc)
729+
730+
def stop_streaming_ai_completion(self) -> None:
731+
try:
732+
self._streaming_typed_controller.cancel()
733+
except Exception:
734+
return
735+
736+
def _render_streaming_ghost_text(self, suggestion: str) -> None:
737+
if not suggestion:
738+
return
739+
if not self._ghost_completion:
740+
self._redetect_ghost_text_system()
741+
renderer = GhostTextRenderer(self._ghost_completion, self._ghost_state_manager)
742+
renderer.render(suggestion)
743+
744+
def _hide_streaming_ghost_text(self) -> None:
745+
renderer = GhostTextRenderer(self._ghost_completion, self._ghost_state_manager)
746+
renderer.hide()
618747

619748
def show_ai_completion(self, suggestion: str):
620749
"""显示AI补全建议 - 增强版本,支持多种显示模式和状态同步"""
750+
self.stop_streaming_ai_completion()
621751
# 🔧 修复:标记请求已完成,防止超时处理冲突
622752
self._ai_request_completed = True
623753

Lines changed: 84 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,84 @@
1+
from __future__ import annotations
2+
3+
import os
4+
5+
import pytest
6+
7+
os.environ.setdefault("QT_QPA_PLATFORM", "offscreen")
8+
9+
pytest.importorskip("PyQt6")
10+
pytest.importorskip("pytestqt")
11+
12+
from gui.editor.smart_completion_manager import TypedStreamingGhostTextController
13+
14+
15+
def test_typed_streaming_renderer_types_in_order(qtbot) -> None:
16+
rendered: list[str] = []
17+
18+
def _render(text: str) -> None:
19+
rendered.append(text)
20+
21+
def _hide() -> None:
22+
return None
23+
24+
controller = TypedStreamingGhostTextController(
25+
render=_render,
26+
hide=_hide,
27+
flush_interval_ms=1,
28+
max_chars_per_tick=1,
29+
)
30+
31+
controller.on_chunk("abc", {"request_id": "r1", "task_key": "t1"})
32+
33+
qtbot.waitUntil(lambda: bool(rendered) and rendered[-1] == "abc", timeout=1000)
34+
assert rendered == ["a", "ab", "abc"]
35+
36+
37+
def test_typed_streaming_renderer_cancel_stops_updates(qtbot) -> None:
38+
rendered: list[str] = []
39+
40+
def _render(text: str) -> None:
41+
rendered.append(text)
42+
43+
def _hide() -> None:
44+
rendered.append("<hide>")
45+
46+
controller = TypedStreamingGhostTextController(
47+
render=_render,
48+
hide=_hide,
49+
flush_interval_ms=10,
50+
max_chars_per_tick=1,
51+
)
52+
53+
controller.on_chunk("ab", {"request_id": "r1"})
54+
qtbot.waitUntil(lambda: bool(rendered) and rendered[-1] == "a", timeout=1000)
55+
56+
controller.cancel()
57+
qtbot.wait(40)
58+
59+
assert rendered.count("ab") == 0
60+
61+
62+
def test_typed_streaming_renderer_ignores_stale_request_id(qtbot) -> None:
63+
rendered: list[str] = []
64+
65+
def _render(text: str) -> None:
66+
rendered.append(text)
67+
68+
def _hide() -> None:
69+
return None
70+
71+
controller = TypedStreamingGhostTextController(
72+
render=_render,
73+
hide=_hide,
74+
flush_interval_ms=1,
75+
max_chars_per_tick=10,
76+
)
77+
78+
controller.on_chunk("X", {"request_id": "r2"})
79+
qtbot.waitUntil(lambda: bool(rendered) and rendered[-1] == "X", timeout=1000)
80+
81+
controller.on_chunk("Y", {"request_id": "r1"})
82+
qtbot.wait(20)
83+
84+
assert rendered[-1] == "X"

0 commit comments

Comments
 (0)