1
+ from unittest .mock import patch
2
+
1
3
from core .app .entities .app_invoke_entities import InvokeFrom
4
+ from core .workflow .entities .node_entities import NodeRunMetadataKey , NodeRunResult
2
5
from core .workflow .enums import SystemVariableKey
3
6
from core .workflow .graph_engine .entities .event import (
4
7
GraphRunPartialSucceededEvent ,
5
8
NodeRunExceptionEvent ,
9
+ NodeRunFailedEvent ,
6
10
NodeRunStreamChunkEvent ,
7
11
)
8
12
from core .workflow .graph_engine .entities .graph import Graph
9
13
from core .workflow .graph_engine .graph_engine import GraphEngine
14
+ from core .workflow .nodes .event .event import RunCompletedEvent , RunStreamChunkEvent
15
+ from core .workflow .nodes .llm .node import LLMNode
10
16
from models .enums import UserFrom
11
- from models .workflow import WorkflowType
17
+ from models .workflow import WorkflowNodeExecutionStatus , WorkflowType
12
18
13
19
14
20
class ContinueOnErrorTestHelper :
@@ -492,10 +498,7 @@ def test_no_node_in_fail_branch_continue_on_error():
492
498
"edges" : FAIL_BRANCH_EDGES [:- 1 ],
493
499
"nodes" : [
494
500
{"data" : {"title" : "Start" , "type" : "start" , "variables" : []}, "id" : "start" },
495
- {
496
- "data" : {"title" : "success" , "type" : "answer" , "answer" : "HTTP request successful" },
497
- "id" : "success" ,
498
- },
501
+ {"data" : {"title" : "success" , "type" : "answer" , "answer" : "HTTP request successful" }, "id" : "success" },
499
502
ContinueOnErrorTestHelper .get_http_node (),
500
503
],
501
504
}
@@ -506,3 +509,47 @@ def test_no_node_in_fail_branch_continue_on_error():
506
509
assert any (isinstance (e , NodeRunExceptionEvent ) for e in events )
507
510
assert any (isinstance (e , GraphRunPartialSucceededEvent ) and e .outputs == {} for e in events )
508
511
assert sum (1 for e in events if isinstance (e , NodeRunStreamChunkEvent )) == 0
512
+
513
+
514
+ def test_stream_output_with_fail_branch_continue_on_error ():
515
+ """Test stream output with fail-branch error strategy"""
516
+ graph_config = {
517
+ "edges" : FAIL_BRANCH_EDGES ,
518
+ "nodes" : [
519
+ {"data" : {"title" : "Start" , "type" : "start" , "variables" : []}, "id" : "start" },
520
+ {
521
+ "data" : {"title" : "success" , "type" : "answer" , "answer" : "LLM request successful" },
522
+ "id" : "success" ,
523
+ },
524
+ {
525
+ "data" : {"title" : "error" , "type" : "answer" , "answer" : "{{#node.text#}}" },
526
+ "id" : "error" ,
527
+ },
528
+ ContinueOnErrorTestHelper .get_llm_node (),
529
+ ],
530
+ }
531
+ graph_engine = ContinueOnErrorTestHelper .create_test_graph_engine (graph_config )
532
+
533
+ def llm_generator (self ):
534
+ contents = ["hi" , "bye" , "good morning" ]
535
+
536
+ yield RunStreamChunkEvent (chunk_content = contents [0 ], from_variable_selector = [self .node_id , "text" ])
537
+
538
+ yield RunCompletedEvent (
539
+ run_result = NodeRunResult (
540
+ status = WorkflowNodeExecutionStatus .SUCCEEDED ,
541
+ inputs = {},
542
+ process_data = {},
543
+ outputs = {},
544
+ metadata = {
545
+ NodeRunMetadataKey .TOTAL_TOKENS : 1 ,
546
+ NodeRunMetadataKey .TOTAL_PRICE : 1 ,
547
+ NodeRunMetadataKey .CURRENCY : "USD" ,
548
+ },
549
+ )
550
+ )
551
+
552
+ with patch .object (LLMNode , "_run" , new = llm_generator ):
553
+ events = list (graph_engine .run ())
554
+ assert sum (isinstance (e , NodeRunStreamChunkEvent ) for e in events ) == 1
555
+ assert all (not isinstance (e , NodeRunFailedEvent | NodeRunExceptionEvent ) for e in events )
0 commit comments