Skip to content

Commit 7e7e786

Browse files
committed
Merge branch 'v2' into knowledge_workflow
# Conflicts: # apps/application/flow/workflow_manage.py # apps/common/utils/tool_code.py # ui/src/views/tool/component/ToolListContainer.vue
2 parents ada109d + d6a3a3c commit 7e7e786

File tree

201 files changed

+2574
-999
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

201 files changed

+2574
-999
lines changed

.github/workflows/build-and-push-vector-model.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@ on:
55
inputs:
66
dockerImageTag:
77
description: 'Docker Image Tag'
8-
default: 'v2.0.2'
8+
default: 'v2.0.3'
99
required: true
1010
architecture:
1111
description: 'Architecture'

.github/workflows/build-and-push.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@ on:
77
inputs:
88
dockerImageTag:
99
description: 'Image Tag'
10-
default: 'v2.3.0-dev'
10+
default: 'v2.4.0-dev'
1111
required: true
1212
dockerImageTagWithLatest:
1313
description: '是否发布latest tag(正式发版时选择,测试版本切勿选择)'

.gitignore

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -188,4 +188,5 @@ apps/models_provider/impl/*/icon/
188188
apps/models_provider/impl/tencent_model_provider/credential/stt.py
189189
apps/models_provider/impl/tencent_model_provider/model/stt.py
190190
tmp/
191-
config.yml
191+
config.yml
192+
.SANDBOX_BANNED_HOSTS

apps/application/chat_pipeline/step/chat_step/impl/base_chat_step.py

Lines changed: 16 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -93,6 +93,7 @@ def event_content(response,
9393
reasoning_content_chunk = chunk.additional_kwargs.get('reasoning_content', '')
9494
else:
9595
reasoning_content_chunk = reasoning_chunk.get('reasoning_content')
96+
content_chunk = reasoning._normalize_content(content_chunk)
9697
all_text += content_chunk
9798
if reasoning_content_chunk is None:
9899
reasoning_content_chunk = ''
@@ -191,23 +192,17 @@ def execute(self, message_list: List[BaseMessage],
191192
manage, padding_problem_text, chat_user_id, chat_user_type,
192193
no_references_setting,
193194
model_setting,
194-
mcp_enable, mcp_tool_ids, mcp_servers, mcp_source, tool_enable, tool_ids, mcp_output_enable)
195+
mcp_enable, mcp_tool_ids, mcp_servers, mcp_source, tool_enable, tool_ids,
196+
mcp_output_enable)
195197
else:
196198
return self.execute_block(message_list, chat_id, problem_text, post_response_handler, chat_model,
197199
paragraph_list,
198200
manage, padding_problem_text, chat_user_id, chat_user_type, no_references_setting,
199201
model_setting,
200-
mcp_enable, mcp_tool_ids, mcp_servers, mcp_source, tool_enable, tool_ids, mcp_output_enable)
202+
mcp_enable, mcp_tool_ids, mcp_servers, mcp_source, tool_enable, tool_ids,
203+
mcp_output_enable)
201204

202205
def get_details(self, manage, **kwargs):
203-
# 删除临时生成的MCP代码文件
204-
if self.context.get('execute_ids'):
205-
executor = ToolExecutor(CONFIG.get('SANDBOX'))
206-
# 清理工具代码文件,延时删除,避免文件被占用
207-
for tool_id in self.context.get('execute_ids'):
208-
code_path = f'{executor.sandbox_path}/execute/{tool_id}.py'
209-
if os.path.exists(code_path):
210-
os.remove(code_path)
211206
return {
212207
'step_type': 'chat_step',
213208
'run_time': self.context['run_time'],
@@ -254,7 +249,6 @@ def _handle_mcp_request(self, mcp_enable, tool_enable, mcp_source, mcp_servers,
254249
if tool_enable:
255250
if tool_ids and len(tool_ids) > 0: # 如果有工具ID,则将其转换为MCP
256251
self.context['tool_ids'] = tool_ids
257-
self.context['execute_ids'] = []
258252
for tool_id in tool_ids:
259253
tool = QuerySet(Tool).filter(id=tool_id).first()
260254
if tool is None or tool.is_active is False:
@@ -264,17 +258,15 @@ def _handle_mcp_request(self, mcp_enable, tool_enable, mcp_source, mcp_servers,
264258
params = json.loads(rsa_long_decrypt(tool.init_params))
265259
else:
266260
params = {}
267-
_id, tool_config = executor.get_tool_mcp_config(tool.code, params)
261+
tool_config = executor.get_tool_mcp_config(tool.code, params)
268262

269-
self.context['execute_ids'].append(_id)
270263
mcp_servers_config[str(tool.id)] = tool_config
271264

272265
if len(mcp_servers_config) > 0:
273266
return mcp_response_generator(chat_model, message_list, json.dumps(mcp_servers_config), mcp_output_enable)
274267

275268
return None
276269

277-
278270
def get_stream_result(self, message_list: List[BaseMessage],
279271
chat_model: BaseChatModel = None,
280272
paragraph_list=None,
@@ -304,7 +296,8 @@ def get_stream_result(self, message_list: List[BaseMessage],
304296
else:
305297
# 处理 MCP 请求
306298
mcp_result = self._handle_mcp_request(
307-
mcp_enable, tool_enable, mcp_source, mcp_servers, mcp_tool_ids, tool_ids, mcp_output_enable, chat_model, message_list,
299+
mcp_enable, tool_enable, mcp_source, mcp_servers, mcp_tool_ids, tool_ids, mcp_output_enable, chat_model,
300+
message_list,
308301
)
309302
if mcp_result:
310303
return mcp_result, True
@@ -329,7 +322,8 @@ def execute_stream(self, message_list: List[BaseMessage],
329322
tool_ids=None,
330323
mcp_output_enable=True):
331324
chat_result, is_ai_chat = self.get_stream_result(message_list, chat_model, paragraph_list,
332-
no_references_setting, problem_text, mcp_enable, mcp_tool_ids, mcp_servers, mcp_source, tool_enable, tool_ids,
325+
no_references_setting, problem_text, mcp_enable, mcp_tool_ids,
326+
mcp_servers, mcp_source, tool_enable, tool_ids,
333327
mcp_output_enable)
334328
chat_record_id = uuid.uuid7()
335329
r = StreamingHttpResponse(
@@ -404,7 +398,9 @@ def execute_block(self, message_list: List[BaseMessage],
404398
# 调用模型
405399
try:
406400
chat_result, is_ai_chat = self.get_block_result(message_list, chat_model, paragraph_list,
407-
no_references_setting, problem_text, mcp_enable, mcp_tool_ids, mcp_servers, mcp_source, tool_enable, tool_ids, mcp_output_enable)
401+
no_references_setting, problem_text, mcp_enable,
402+
mcp_tool_ids, mcp_servers, mcp_source, tool_enable,
403+
tool_ids, mcp_output_enable)
408404
if is_ai_chat:
409405
request_token = chat_model.get_num_tokens_from_messages(message_list)
410406
response_token = chat_model.get_num_tokens(chat_result.content)
@@ -416,10 +412,10 @@ def execute_block(self, message_list: List[BaseMessage],
416412
reasoning_result_end = reasoning.get_end_reasoning_content()
417413
content = reasoning_result.get('content') + reasoning_result_end.get('content')
418414
if 'reasoning_content' in chat_result.response_metadata:
419-
reasoning_content = chat_result.response_metadata.get('reasoning_content', '')
415+
reasoning_content = (chat_result.response_metadata.get('reasoning_content', '') or '')
420416
else:
421-
reasoning_content = reasoning_result.get('reasoning_content') + reasoning_result_end.get(
422-
'reasoning_content')
417+
reasoning_content = (reasoning_result.get('reasoning_content') or "") + (reasoning_result_end.get(
418+
'reasoning_content') or "")
423419
post_response_handler.handler(chat_id, chat_record_id, paragraph_list, problem_text,
424420
content, manage, self, padding_problem_text,
425421
reasoning_content=reasoning_content)

apps/application/flow/i_step_node.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -95,6 +95,7 @@ def handler(self, workflow):
9595
application_public_access_client.access_num = application_public_access_client.access_num + 1
9696
application_public_access_client.intraday_access_num = application_public_access_client.intraday_access_num + 1
9797
application_public_access_client.save()
98+
self.chat_info = None
9899

99100

100101
class KnowledgeWorkflowPostHandler(WorkFlowPostHandler):

apps/application/flow/step_node/ai_chat_step_node/impl/base_chat_node.py

Lines changed: 3 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -108,9 +108,9 @@ def write_context(node_variable: Dict, workflow_variable: Dict, node: INode, wor
108108
content = reasoning_result.get('content') + reasoning_result_end.get('content')
109109
meta = {**response.response_metadata, **response.additional_kwargs}
110110
if 'reasoning_content' in meta:
111-
reasoning_content = meta.get('reasoning_content', '')
111+
reasoning_content = (meta.get('reasoning_content', '') or '')
112112
else:
113-
reasoning_content = reasoning_result.get('reasoning_content') + reasoning_result_end.get('reasoning_content')
113+
reasoning_content = (reasoning_result.get('reasoning_content') or '') + (reasoning_result_end.get('reasoning_content') or '')
114114
_write_context(node_variable, workflow_variable, node, workflow, content, reasoning_content)
115115

116116

@@ -233,7 +233,6 @@ def _handle_mcp_request(self, mcp_enable, tool_enable, mcp_source, mcp_servers,
233233
if tool_enable:
234234
if tool_ids and len(tool_ids) > 0: # 如果有工具ID,则将其转换为MCP
235235
self.context['tool_ids'] = tool_ids
236-
self.context['execute_ids'] = []
237236
for tool_id in tool_ids:
238237
tool = QuerySet(Tool).filter(id=tool_id).first()
239238
if not tool.is_active:
@@ -243,9 +242,8 @@ def _handle_mcp_request(self, mcp_enable, tool_enable, mcp_source, mcp_servers,
243242
params = json.loads(rsa_long_decrypt(tool.init_params))
244243
else:
245244
params = {}
246-
_id, tool_config = executor.get_tool_mcp_config(tool.code, params)
245+
tool_config = executor.get_tool_mcp_config(tool.code, params)
247246

248-
self.context['execute_ids'].append(_id)
249247
mcp_servers_config[str(tool.id)] = tool_config
250248

251249
if len(mcp_servers_config) > 0:
@@ -307,14 +305,6 @@ def reset_message_list(message_list: List[BaseMessage], answer_text):
307305
return result
308306

309307
def get_details(self, index: int, **kwargs):
310-
# 删除临时生成的MCP代码文件
311-
if self.context.get('execute_ids'):
312-
executor = ToolExecutor(CONFIG.get('SANDBOX'))
313-
# 清理工具代码文件,延时删除,避免文件被占用
314-
for tool_id in self.context.get('execute_ids'):
315-
code_path = f'{executor.sandbox_path}/execute/{tool_id}.py'
316-
if os.path.exists(code_path):
317-
os.remove(code_path)
318308
return {
319309
'name': self.node.properties.get('stepName'),
320310
"index": index,

apps/application/flow/step_node/image_to_video_step_node/impl/base_image_to_video_node.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -74,7 +74,7 @@ def execute(self, model_id, prompt, negative_prompt, dialogue_number, dialogue_t
7474
def get_file_base64(self, image_url):
7575
try:
7676
if isinstance(image_url, list):
77-
image_url = image_url[0].get('file_id')
77+
image_url = image_url[0].get('file_id') if 'file_id' in image_url[0] else image_url[0].get('url')
7878
if isinstance(image_url, str) and not image_url.startswith('http'):
7979
file = QuerySet(File).filter(id=image_url).first()
8080
file_bytes = file.get_bytes()

apps/application/flow/step_node/image_understand_step_node/impl/base_image_understand_node.py

Lines changed: 32 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -131,11 +131,18 @@ def generate_history_human_message_for_details(self, chat_record):
131131
image_list = data['image_list']
132132
if len(image_list) == 0 or data['dialogue_type'] == 'WORKFLOW':
133133
return HumanMessage(content=chat_record.problem_text)
134-
file_id_list = [image.get('file_id') for image in image_list]
134+
135+
file_id_list = []
136+
url_list = []
137+
for image in image_list:
138+
if 'file_id' in image:
139+
file_id_list.append(image.get('file_id'))
140+
elif 'url' in image:
141+
url_list.append(image.get('url'))
135142
return HumanMessage(content=[
136143
{'type': 'text', 'text': data['question']},
137-
*[{'type': 'image_url', 'image_url': {'url': f'./oss/file/{file_id}'}} for file_id in file_id_list]
138-
144+
*[{'type': 'image_url', 'image_url': {'url': f'./oss/file/{file_id}'}} for file_id in file_id_list],
145+
*[{'type': 'image_url', 'image_url': {'url': url}} for url in url_list]
139146
])
140147
return HumanMessage(content=chat_record.problem_text)
141148

@@ -155,13 +162,22 @@ def generate_history_human_message(self, chat_record):
155162
image_list = data['image_list']
156163
if len(image_list) == 0 or data['dialogue_type'] == 'WORKFLOW':
157164
return HumanMessage(content=chat_record.problem_text)
158-
image_base64_list = [file_id_to_base64(image.get('file_id')) for image in image_list]
165+
file_id_list = []
166+
url_list = []
167+
for image in image_list:
168+
if 'file_id' in image:
169+
file_id_list.append(image.get('file_id'))
170+
elif 'url' in image:
171+
url_list.append(image.get('url'))
172+
image_base64_list = [file_id_to_base64(file_id) for file_id in file_id_list]
173+
159174
return HumanMessage(
160175
content=[
161176
{'type': 'text', 'text': data['question']},
162177
*[{'type': 'image_url',
163178
'image_url': {'url': f'data:image/{base64_image[1]};base64,{base64_image[0]}'}} for
164-
base64_image in image_base64_list]
179+
base64_image in image_base64_list],
180+
*[{'type': 'image_url', 'image_url': url} for url in url_list]
165181
])
166182
return HumanMessage(content=chat_record.problem_text)
167183

@@ -177,13 +193,17 @@ def _process_images(self, image):
177193
images.append({'type': 'image_url', 'image_url': {'url': image}})
178194
elif image is not None and len(image) > 0:
179195
for img in image:
180-
file_id = img['file_id']
181-
file = QuerySet(File).filter(id=file_id).first()
182-
image_bytes = file.get_bytes()
183-
base64_image = base64.b64encode(image_bytes).decode("utf-8")
184-
image_format = what(None, image_bytes)
185-
images.append(
186-
{'type': 'image_url', 'image_url': {'url': f'data:image/{image_format};base64,{base64_image}'}})
196+
if 'file_id' in img:
197+
file_id = img['file_id']
198+
file = QuerySet(File).filter(id=file_id).first()
199+
image_bytes = file.get_bytes()
200+
base64_image = base64.b64encode(image_bytes).decode("utf-8")
201+
image_format = what(None, image_bytes)
202+
images.append(
203+
{'type': 'image_url', 'image_url': {'url': f'data:image/{image_format};base64,{base64_image}'}})
204+
elif 'url' in img and img['url'].startswith('http'):
205+
images.append(
206+
{'type': 'image_url', 'image_url': {'url': img["url"]}})
187207
return images
188208

189209
def generate_message_list(self, image_model, system: str, prompt: str, history_message, image):

apps/application/flow/step_node/loop_node/impl/base_loop_node.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -226,11 +226,14 @@ class BaseLoopNode(ILoopNode):
226226
def save_context(self, details, workflow_manage):
227227
self.context['loop_context_data'] = details.get('loop_context_data')
228228
self.context['loop_answer_data'] = details.get('loop_answer_data')
229+
self.context['loop_node_data'] = details.get('loop_node_data')
229230
self.context['result'] = details.get('result')
230231
self.context['params'] = details.get('params')
231232
self.context['run_time'] = details.get('run_time')
232233
self.context['index'] = details.get('current_index')
233234
self.context['item'] = details.get('current_item')
235+
for key, value in (details.get('loop_context_data') or {}).items():
236+
self.context[key] = value
234237
self.answer_text = ""
235238

236239
def get_answer_list(self) -> List[Answer] | None:

apps/application/flow/step_node/text_to_video_step_node/impl/base_text_to_video_node.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,6 @@ def execute(self, model_id, prompt, negative_prompt, dialogue_number, dialogue_t
3737
self.context['dialogue_type'] = dialogue_type
3838
self.context['negative_prompt'] = self.generate_prompt_question(negative_prompt)
3939
video_urls = ttv_model.generate_video(question, negative_prompt)
40-
print('video_urls', video_urls)
4140
# 保存图片
4241
if video_urls is None:
4342
return NodeResult({'answer': gettext('Failed to generate video')}, {})

0 commit comments

Comments
 (0)