|
15 | 15 | from pydantic import BaseModel, Field |
16 | 16 | from typing_extensions import Optional |
17 | 17 |
|
| 18 | +import volcenginesdkarkruntime.types.chat.chat_completion_chunk as completion_chunk |
18 | 19 | from arkitect.core.component.llm import BaseChatLanguageModel |
19 | 20 | from arkitect.core.component.llm.model import ArkMessage, ArkChatRequest, ArkChatResponse, ArkChatCompletionChunk |
20 | 21 | from arkitect.core.component.prompts import CustomPromptTemplate |
|
23 | 24 | from search_engine import SearchEngine, SearchResult |
24 | 25 | from search_engine.volc_bot import VolcBotSearchEngine |
25 | 26 | from prompt import DEFAULT_PLANNING_PROMPT, DEFAULT_SUMMARY_PROMPT |
26 | | -from utils import get_current_date, cast_content_to_reasoning_content |
| 27 | +from utils import get_current_date, cast_content_to_reasoning_content, gen_metadata_chunk |
27 | 28 |
|
28 | 29 | """ |
29 | 30 | ResultsSummary is using to store the result searched so far |
@@ -110,7 +111,8 @@ async def arun_deep_research(self, request: ArkChatRequest, question: str) -> Ar |
110 | 111 | references=references |
111 | 112 | ) |
112 | 113 | # append the reasoning buffer |
113 | | - resp.choices[0].message.reasoning_content = (buffered_reasoning_content + resp.choices[0].message.reasoning_content) |
| 114 | + resp.choices[0].message.reasoning_content = ( |
| 115 | + buffered_reasoning_content + resp.choices[0].message.reasoning_content) |
114 | 116 | return resp |
115 | 117 |
|
116 | 118 | async def astream_deep_research(self, request: ArkChatRequest, question: str) \ |
@@ -153,8 +155,8 @@ async def astream_planning( |
153 | 155 | references: ResultsSummary |
154 | 156 | ) -> AsyncIterable[ArkChatCompletionChunk]: |
155 | 157 |
|
156 | | - planned_rounds = 0 |
157 | | - while planned_rounds < self.extra_config.max_planning_rounds: |
| 158 | + planned_rounds = 1 |
| 159 | + while planned_rounds <= self.extra_config.max_planning_rounds: |
158 | 160 | planned_rounds += 1 |
159 | 161 |
|
160 | 162 | llm = BaseChatLanguageModel( |
@@ -184,12 +186,35 @@ async def astream_planning( |
184 | 186 |
|
185 | 187 | new_queries = self.check_query(planning_result) |
186 | 188 | if not new_queries: |
| 189 | + # YIELD state with metadata |
| 190 | + yield gen_metadata_chunk( |
| 191 | + metadata={ |
| 192 | + 'search_state': 'finished' |
| 193 | + } |
| 194 | + ) |
187 | 195 | INFO("planning finished") |
188 | 196 | break |
189 | 197 | else: |
190 | 198 | INFO(f"searching: {new_queries}") |
| 199 | + # YIELD state with metadata |
| 200 | + yield gen_metadata_chunk( |
| 201 | + metadata={ |
| 202 | + 'search_rounds': planned_rounds, |
| 203 | + 'search_state': 'searching', |
| 204 | + 'search_keywords': new_queries |
| 205 | + } |
| 206 | + ) |
191 | 207 | search_results = await self.search_engine.asearch(new_queries) |
192 | 208 | INFO(f"search result: {search_results}") |
| 209 | + # YIELD state with metadata |
| 210 | + yield gen_metadata_chunk( |
| 211 | + metadata={ |
| 212 | + 'search_rounds': planned_rounds, |
| 213 | + 'search_state': 'searched', |
| 214 | + 'search_keywords': new_queries, |
| 215 | + 'search_results': search_results |
| 216 | + } |
| 217 | + ) |
193 | 218 | for search_result in search_results: |
194 | 219 | references.add_result(query=search_result.query, results=[search_result]) |
195 | 220 |
|
|
0 commit comments