Skip to content

Commit 2e91480

Browse files
test(graph_engine): block response nodes during streaming (#26377)
Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com>
1 parent d00a72a commit 2e91480

File tree

2 files changed

+250
-0
lines changed

2 files changed

+250
-0
lines changed
Lines changed: 222 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,222 @@
1+
app:
2+
description: 'this is a chatflow with 2 answer nodes.
3+
4+
5+
it''s outouts should like:
6+
7+
8+
```
9+
10+
--- answer 1 ---
11+
12+
13+
foo
14+
15+
--- answer 2 ---
16+
17+
18+
<llm''s outputs>
19+
20+
```'
21+
icon: 🤖
22+
icon_background: '#FFEAD5'
23+
mode: advanced-chat
24+
name: test-answer-order
25+
use_icon_as_answer_icon: false
26+
dependencies:
27+
- current_identifier: null
28+
type: marketplace
29+
value:
30+
marketplace_plugin_unique_identifier: langgenius/openai:0.2.6@e2665624a156f52160927bceac9e169bd7e5ae6b936ae82575e14c90af390e6e
31+
version: null
32+
kind: app
33+
version: 0.4.0
34+
workflow:
35+
conversation_variables: []
36+
environment_variables: []
37+
features:
38+
file_upload:
39+
allowed_file_extensions:
40+
- .JPG
41+
- .JPEG
42+
- .PNG
43+
- .GIF
44+
- .WEBP
45+
- .SVG
46+
allowed_file_types:
47+
- image
48+
allowed_file_upload_methods:
49+
- local_file
50+
- remote_url
51+
enabled: false
52+
fileUploadConfig:
53+
audio_file_size_limit: 50
54+
batch_count_limit: 5
55+
file_size_limit: 15
56+
image_file_size_limit: 10
57+
video_file_size_limit: 100
58+
workflow_file_upload_limit: 10
59+
image:
60+
enabled: false
61+
number_limits: 3
62+
transfer_methods:
63+
- local_file
64+
- remote_url
65+
number_limits: 3
66+
opening_statement: ''
67+
retriever_resource:
68+
enabled: true
69+
sensitive_word_avoidance:
70+
enabled: false
71+
speech_to_text:
72+
enabled: false
73+
suggested_questions: []
74+
suggested_questions_after_answer:
75+
enabled: false
76+
text_to_speech:
77+
enabled: false
78+
language: ''
79+
voice: ''
80+
graph:
81+
edges:
82+
- data:
83+
isInIteration: false
84+
isInLoop: false
85+
sourceType: answer
86+
targetType: answer
87+
id: 1759052466526-source-1759052469368-target
88+
source: '1759052466526'
89+
sourceHandle: source
90+
target: '1759052469368'
91+
targetHandle: target
92+
type: custom
93+
zIndex: 0
94+
- data:
95+
isInIteration: false
96+
isInLoop: false
97+
sourceType: start
98+
targetType: llm
99+
id: 1759052439553-source-1759052580454-target
100+
source: '1759052439553'
101+
sourceHandle: source
102+
target: '1759052580454'
103+
targetHandle: target
104+
type: custom
105+
zIndex: 0
106+
- data:
107+
isInIteration: false
108+
isInLoop: false
109+
sourceType: llm
110+
targetType: answer
111+
id: 1759052580454-source-1759052466526-target
112+
source: '1759052580454'
113+
sourceHandle: source
114+
target: '1759052466526'
115+
targetHandle: target
116+
type: custom
117+
zIndex: 0
118+
nodes:
119+
- data:
120+
selected: false
121+
title: Start
122+
type: start
123+
variables: []
124+
height: 52
125+
id: '1759052439553'
126+
position:
127+
x: 30
128+
y: 242
129+
positionAbsolute:
130+
x: 30
131+
y: 242
132+
sourcePosition: right
133+
targetPosition: left
134+
type: custom
135+
width: 242
136+
- data:
137+
answer: '--- answer 1 ---
138+
139+
140+
foo
141+
142+
'
143+
selected: false
144+
title: Answer
145+
type: answer
146+
variables: []
147+
height: 100
148+
id: '1759052466526'
149+
position:
150+
x: 632
151+
y: 242
152+
positionAbsolute:
153+
x: 632
154+
y: 242
155+
selected: true
156+
sourcePosition: right
157+
targetPosition: left
158+
type: custom
159+
width: 242
160+
- data:
161+
answer: '--- answer 2 ---
162+
163+
164+
{{#1759052580454.text#}}
165+
166+
'
167+
selected: false
168+
title: Answer 2
169+
type: answer
170+
variables: []
171+
height: 103
172+
id: '1759052469368'
173+
position:
174+
x: 934
175+
y: 242
176+
positionAbsolute:
177+
x: 934
178+
y: 242
179+
selected: false
180+
sourcePosition: right
181+
targetPosition: left
182+
type: custom
183+
width: 242
184+
- data:
185+
context:
186+
enabled: false
187+
variable_selector: []
188+
model:
189+
completion_params:
190+
temperature: 0.7
191+
mode: chat
192+
name: gpt-4o
193+
provider: langgenius/openai/openai
194+
prompt_template:
195+
- id: 5c1d873b-06b2-4dce-939e-672882bbd7c0
196+
role: system
197+
text: ''
198+
- role: user
199+
text: '{{#sys.query#}}'
200+
selected: false
201+
title: LLM
202+
type: llm
203+
vision:
204+
enabled: false
205+
height: 88
206+
id: '1759052580454'
207+
position:
208+
x: 332
209+
y: 242
210+
positionAbsolute:
211+
x: 332
212+
y: 242
213+
selected: false
214+
sourcePosition: right
215+
targetPosition: left
216+
type: custom
217+
width: 242
218+
viewport:
219+
x: 126.2797574512839
220+
y: 289.55932160537446
221+
zoom: 1.0743222672006216
222+
rag_pipeline_variables: []
Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,28 @@
1+
from .test_mock_config import MockConfigBuilder
2+
from .test_table_runner import TableTestRunner, WorkflowTestCase
3+
4+
LLM_NODE_ID = "1759052580454"
5+
6+
7+
def test_answer_nodes_emit_in_order() -> None:
8+
mock_config = (
9+
MockConfigBuilder()
10+
.with_llm_response("unused default")
11+
.with_node_output(LLM_NODE_ID, {"text": "mocked llm text"})
12+
.build()
13+
)
14+
15+
expected_answer = "--- answer 1 ---\n\nfoo\n--- answer 2 ---\n\nmocked llm text\n"
16+
17+
case = WorkflowTestCase(
18+
fixture_path="test-answer-order",
19+
query="",
20+
expected_outputs={"answer": expected_answer},
21+
use_auto_mock=True,
22+
mock_config=mock_config,
23+
)
24+
25+
runner = TableTestRunner()
26+
result = runner.run_test_case(case)
27+
28+
assert result.success, result.error

0 commit comments

Comments
 (0)