|
6 | 6 | import logging |
7 | 7 | import re |
8 | 8 | import time |
9 | | -from typing import Optional, List, Dict, Any, Tuple |
| 9 | +from typing import Optional, List, Dict, Any, Tuple, Callable |
10 | 10 | from PyQt6.QtWidgets import QWidget, QLabel |
11 | 11 | from PyQt6.QtCore import Qt, QTimer, pyqtSignal, QObject |
12 | 12 | from PyQt6.QtGui import QFont, QTextCursor, QKeyEvent |
|
35 | 35 | DEFAULT_AI_RENDERER_ORDER = ("ghost_text", "inline", "direct_insert") |
36 | 36 |
|
37 | 37 |
|
| 38 | +class TypedStreamingGhostTextController(QObject): |
| 39 | + """Buffer streaming chunks and render as typing-like ghost text. |
| 40 | +
|
| 41 | + - Appends incoming chunks in-order. |
| 42 | + - Flushes to UI at a steady cadence to avoid jitter. |
| 43 | + - Ignores chunks from stale request_id/task_key. |
| 44 | + """ |
| 45 | + |
| 46 | + def __init__( |
| 47 | + self, |
| 48 | + *, |
| 49 | + render: Callable[[str], None], |
| 50 | + hide: Callable[[], None], |
| 51 | + flush_interval_ms: int = 15, |
| 52 | + max_chars_per_tick: int = 3, |
| 53 | + parent: QObject | None = None, |
| 54 | + ) -> None: |
| 55 | + super().__init__(parent) |
| 56 | + self._render = render |
| 57 | + self._hide = hide |
| 58 | + self._flush_interval_ms = max(1, int(flush_interval_ms)) |
| 59 | + self._max_chars_per_tick = max(1, int(max_chars_per_tick)) |
| 60 | + |
| 61 | + self._active_request_id: str | None = None |
| 62 | + self._active_task_key: str | None = None |
| 63 | + self._pending: str = "" |
| 64 | + self._displayed: str = "" |
| 65 | + |
| 66 | + self._timer = QTimer(self) |
| 67 | + self._timer.setInterval(self._flush_interval_ms) |
| 68 | + self._timer.timeout.connect(self._on_tick) |
| 69 | + |
| 70 | + def cancel(self) -> None: |
| 71 | + self._timer.stop() |
| 72 | + self._pending = "" |
| 73 | + self._displayed = "" |
| 74 | + self._active_request_id = None |
| 75 | + self._active_task_key = None |
| 76 | + try: |
| 77 | + self._hide() |
| 78 | + except Exception: |
| 79 | + return |
| 80 | + |
| 81 | + def on_chunk(self, chunk_text: str, context: dict) -> None: |
| 82 | + chunk = str(chunk_text or "") |
| 83 | + if not chunk: |
| 84 | + return |
| 85 | + |
| 86 | + request_id = context.get("request_id") if isinstance(context, dict) else None |
| 87 | + task_key = context.get("task_key") if isinstance(context, dict) else None |
| 88 | + |
| 89 | + if self._active_task_key and task_key and task_key != self._active_task_key: |
| 90 | + return |
| 91 | + |
| 92 | + if self._active_request_id and request_id and request_id != self._active_request_id: |
| 93 | + return |
| 94 | + |
| 95 | + if self._active_request_id is None and request_id: |
| 96 | + self._active_request_id = str(request_id) |
| 97 | + if self._active_task_key is None and task_key: |
| 98 | + self._active_task_key = str(task_key) |
| 99 | + |
| 100 | + self._pending += chunk |
| 101 | + if not self._timer.isActive(): |
| 102 | + self._timer.start() |
| 103 | + |
| 104 | + def _on_tick(self) -> None: |
| 105 | + if not self._pending: |
| 106 | + self._timer.stop() |
| 107 | + return |
| 108 | + |
| 109 | + take = self._pending[: self._max_chars_per_tick] |
| 110 | + self._pending = self._pending[self._max_chars_per_tick :] |
| 111 | + self._displayed += take |
| 112 | + |
| 113 | + try: |
| 114 | + self._render(self._displayed) |
| 115 | + except Exception: |
| 116 | + # Rendering should never break the stream loop. |
| 117 | + self.cancel() |
| 118 | + return |
| 119 | + |
| 120 | + if not self._pending: |
| 121 | + self._timer.stop() |
| 122 | + |
| 123 | + |
38 | 124 | class SmartCompletionManager(QObject): |
39 | 125 | """智能补全管理器 - 统一所有补全功能""" |
40 | 126 |
|
@@ -91,6 +177,14 @@ def __init__(self, text_editor, completion_engine: CompletionEngine): |
91 | 177 | # 纯状态机(domain/application 层)用于描述 completion 生命周期 |
92 | 178 | self._completion_state_machine = CompletionStateMachine() |
93 | 179 |
|
| 180 | + self._streaming_typed_controller = TypedStreamingGhostTextController( |
| 181 | + render=self._render_streaming_ghost_text, |
| 182 | + hide=self._hide_streaming_ghost_text, |
| 183 | + flush_interval_ms=15, |
| 184 | + max_chars_per_tick=3, |
| 185 | + parent=self, |
| 186 | + ) |
| 187 | + |
94 | 188 | # 连接 Ghost Text 接受/拒绝信号到状态机,确保 Esc/Tab/输入 等路径一致收口 |
95 | 189 | self._bind_ghost_signals() |
96 | 190 | # 移除FloatingStatusIndicator以避免状态指示器冲突 |
@@ -205,6 +299,7 @@ def cancel_current_completion(self): |
205 | 299 | self._ai_timeout_timer.stop() |
206 | 300 | if hasattr(self, "_auto_completion_timer") and self._auto_completion_timer.isActive(): |
207 | 301 | self._auto_completion_timer.stop() |
| 302 | + self.stop_streaming_ai_completion() |
208 | 303 | self.hide_all_completions() |
209 | 304 | self._is_completing = False |
210 | 305 | self._last_completion_pos = -1 |
@@ -463,6 +558,9 @@ def _ai_complete(self, text: str, position: int, trigger_type: str = 'auto'): |
463 | 558 | self._is_completing = False |
464 | 559 | return |
465 | 560 |
|
| 561 | + # Reset any in-flight streaming render before starting a new request. |
| 562 | + self.stop_streaming_ai_completion() |
| 563 | + |
466 | 564 | try: |
467 | 565 | self._completion_state_machine.reset() |
468 | 566 | self._completion_state_machine.set_state(CompletionState.REQUESTING) |
@@ -615,9 +713,41 @@ def _show_popup_completion(self, suggestions: List[CompletionSuggestion]): |
615 | 713 | self.suggestionRendered.emit(suggestions[0].text) |
616 | 714 | except Exception: |
617 | 715 | self.suggestionRendered.emit("") |
| 716 | + |
| 717 | + def update_streaming_ai_completion(self, chunk_text: str, context: dict) -> None: |
| 718 | + """Render streaming chunks as typing-like Ghost Text.""" |
| 719 | + if not chunk_text: |
| 720 | + return |
| 721 | + if not self._ghost_completion: |
| 722 | + self._redetect_ghost_text_system() |
| 723 | + if not self._ghost_completion: |
| 724 | + return |
| 725 | + try: |
| 726 | + self._streaming_typed_controller.on_chunk(chunk_text, context or {}) |
| 727 | + except Exception as exc: # noqa: BLE001 |
| 728 | + logger.debug("Streaming typed render failed: %s", exc) |
| 729 | + |
| 730 | + def stop_streaming_ai_completion(self) -> None: |
| 731 | + try: |
| 732 | + self._streaming_typed_controller.cancel() |
| 733 | + except Exception: |
| 734 | + return |
| 735 | + |
| 736 | + def _render_streaming_ghost_text(self, suggestion: str) -> None: |
| 737 | + if not suggestion: |
| 738 | + return |
| 739 | + if not self._ghost_completion: |
| 740 | + self._redetect_ghost_text_system() |
| 741 | + renderer = GhostTextRenderer(self._ghost_completion, self._ghost_state_manager) |
| 742 | + renderer.render(suggestion) |
| 743 | + |
| 744 | + def _hide_streaming_ghost_text(self) -> None: |
| 745 | + renderer = GhostTextRenderer(self._ghost_completion, self._ghost_state_manager) |
| 746 | + renderer.hide() |
618 | 747 |
|
619 | 748 | def show_ai_completion(self, suggestion: str): |
620 | 749 | """显示AI补全建议 - 增强版本,支持多种显示模式和状态同步""" |
| 750 | + self.stop_streaming_ai_completion() |
621 | 751 | # 🔧 修复:标记请求已完成,防止超时处理冲突 |
622 | 752 | self._ai_request_completed = True |
623 | 753 |
|
|
0 commit comments