@@ -66,279 +66,3 @@ fn parse_drv_path(path: &str) -> Option<Drv> {
6666
6767 Some ( Drv { name, hash } )
6868}
69-
70- #[ tokio:: test]
71- async fn test_rebuild_cancel_tree_display ( ) {
72- let mut state = JobsStateInner :: default ( ) ;
73-
74- // Read log files to extract target info
75- let log1_path = env ! ( "CARGO_MANIFEST_DIR" )
76- . parse :: < std:: path:: PathBuf > ( )
77- . unwrap ( )
78- . parent ( )
79- . unwrap ( )
80- . parent ( )
81- . unwrap ( )
82- . join ( "OUT1" ) ;
83-
84- let log2_path = env ! ( "CARGO_MANIFEST_DIR" )
85- . parse :: < std:: path:: PathBuf > ( )
86- . unwrap ( )
87- . parent ( )
88- . unwrap ( )
89- . parent ( )
90- . unwrap ( )
91- . join ( "OUT2" ) ;
92-
93- let log1_content =
94- fs:: read_to_string ( & log1_path) . expect ( "Failed to read OUT1" ) ;
95- let _log2_content =
96- fs:: read_to_string ( & log2_path) . expect ( "Failed to read OUT2" ) ;
97-
98- let ( target_ref, root_drv) = extract_target_info ( & log1_content)
99- . expect ( "Failed to extract target info from OUT1" ) ;
100-
101- println ! ( "\n === Setting up state for rebuild+cancel test ===" ) ;
102- println ! ( "Target: {}" , target_ref) ;
103- println ! ( "Root drv: {:?}" , root_drv) ;
104-
105- // Setup dependency tree with root drv
106- let mut node = DrvNode :: default ( ) ;
107- node. root = root_drv. clone ( ) ;
108- state. dep_tree . nodes . insert ( root_drv. clone ( ) , node) ;
109- state. dep_tree . tree_roots . insert ( root_drv. clone ( ) ) ;
110-
111- // Mark root drv as already built (simulates --rebuild)
112- state. already_built_drvs . insert ( root_drv. clone ( ) ) ;
113-
114- // Create first target and simulate it being built
115- let requester1 = RequesterId ( 1 ) ;
116- state. register_requester ( requester1) ;
117- let target1_id =
118- state. create_target ( target_ref. clone ( ) , root_drv. clone ( ) , requester1) ;
119-
120- // Simulate an active build job for the root drv (rebuilding)
121- let job1 = nix_btm:: handle_internal_json:: BuildJob {
122- jid : JobId ( 1 ) ,
123- rid : requester1,
124- drv : root_drv. clone ( ) ,
125- status : JobStatus :: BuildPhaseType ( "buildPhase" . to_string ( ) ) ,
126- start_time_ns : 1000 ,
127- stop_time_ns : None ,
128- } ;
129-
130- state. jid_to_job . insert ( JobId ( 1 ) , job1) ;
131- state
132- . drv_to_jobs
133- . entry ( root_drv. clone ( ) )
134- . or_default ( )
135- . insert ( JobId ( 1 ) ) ;
136-
137- // Update target status
138- state. update_target_status ( target1_id) ;
139-
140- println ! ( "\n === Before cancellation ===" ) ;
141- {
142- let target1 = state
143- . targets
144- . get ( & target1_id)
145- . expect ( "Target 1 should exist" ) ;
146- println ! ( "Target 1 status: {:?}" , target1. status) ;
147- assert_eq ! (
148- target1. status,
149- TargetStatus :: Active ,
150- "Should be Active while building"
151- ) ;
152-
153- let root_status =
154- state. get_drv_status_for_target ( & root_drv, target1_id) ;
155- println ! ( "Root drv status: {:?}" , root_status) ;
156- assert ! (
157- matches!( root_status, JobStatus :: BuildPhaseType ( _) ) ,
158- "Root should be BuildPhaseType"
159- ) ;
160- }
161-
162- // Simulate cancellation - mark job as cancelled and cleanup requester
163- println ! ( "\n === Simulating cancellation of first build ===" ) ;
164- if let Some ( job) = state. jid_to_job . get_mut ( & JobId ( 1 ) ) {
165- job. status = JobStatus :: Cancelled ;
166- job. stop_time_ns = Some ( 2000 ) ;
167- }
168-
169- // Call cleanup_requester_inner to simulate what happens when user cancels
170- // This is the CRITICAL part that was missing - this is what causes the bug!
171- state. cleanup_requester_inner ( requester1) ;
172-
173- println ! ( "\n === After cancellation and cleanup ===" ) ;
174- {
175- let target1 = state
176- . targets
177- . get ( & target1_id)
178- . expect ( "Target 1 should exist" ) ;
179- println ! ( "Target 1 status: {:?}" , target1. status) ;
180- println ! ( "Target 1 was_cancelled: {:?}" , target1. was_cancelled) ;
181- assert_eq ! (
182- target1. status,
183- TargetStatus :: Cancelled ,
184- "Target 1 should be Cancelled (not Queued!)"
185- ) ;
186- assert ! (
187- target1. was_cancelled,
188- "Target 1 was_cancelled flag should be true"
189- ) ;
190-
191- // After cleanup, jobs are removed, so drv status should be derived from
192- // was_cancelled flag
193- let root_status =
194- state. get_drv_status_for_target ( & root_drv, target1_id) ;
195- println ! ( "Root drv status: {:?}" , root_status) ;
196- assert_eq ! (
197- root_status,
198- JobStatus :: Cancelled ,
199- "Root drv should show Cancelled (was being rebuilt)"
200- ) ;
201- }
202-
203- // Generate tree and verify
204- println ! ( "\n === Generating tree for cancelled target 1 ===" ) ;
205- {
206- let mut cache = TreeCache :: default ( ) ;
207- let tree_items =
208- gen_drv_tree_leaves_from_state ( & mut cache, & state, PruneType :: None ) ;
209-
210- assert ! ( !tree_items. is_empty( ) , "Tree should have items" ) ;
211- println ! ( "Tree items:" ) ;
212- for item in tree_items {
213- println ! ( " {}" , item. identifier( ) ) ;
214- }
215-
216- // Verify target is in tree (tree display logic is separate from status)
217- let target_item = & tree_items[ 0 ] ;
218- assert ! (
219- target_item. identifier( ) . contains( "github:nixos/nix" ) ,
220- "Tree should contain the target"
221- ) ;
222- }
223-
224- // Now create second target (same drv, different requester)
225- println ! ( "\n === Creating second build of same target ===" ) ;
226- let requester2 = RequesterId ( 2 ) ;
227- state. register_requester ( requester2) ;
228- let target2_id =
229- state. create_target ( target_ref. clone ( ) , root_drv. clone ( ) , requester2) ;
230-
231- // Verify different target IDs
232- assert_ne ! ( target1_id, target2_id, "Should be different target IDs" ) ;
233-
234- // Verify they share the same root drv
235- let target1 = state. targets . get ( & target1_id) . unwrap ( ) ;
236- let target2 = state. targets . get ( & target2_id) . unwrap ( ) ;
237- assert_eq ! ( target1. root_drv, target2. root_drv, "Should share root drv" ) ;
238-
239- // Simulate second build starting
240- let job2 = nix_btm:: handle_internal_json:: BuildJob {
241- jid : JobId ( 2 ) ,
242- rid : requester2,
243- drv : root_drv. clone ( ) ,
244- status : JobStatus :: BuildPhaseType ( "buildPhase" . to_string ( ) ) ,
245- start_time_ns : 3000 ,
246- stop_time_ns : None ,
247- } ;
248-
249- state. jid_to_job . insert ( JobId ( 2 ) , job2) ;
250- state
251- . drv_to_jobs
252- . entry ( root_drv. clone ( ) )
253- . or_default ( )
254- . insert ( JobId ( 2 ) ) ;
255- state. update_target_status ( target2_id) ;
256-
257- println ! ( "\n === Second build active ===" ) ;
258- {
259- // Target 1 should still be cancelled
260- let target1 = state. targets . get ( & target1_id) . unwrap ( ) ;
261- assert_eq ! ( target1. status, TargetStatus :: Cancelled ) ;
262-
263- // Target 2 should be active
264- let target2 = state. targets . get ( & target2_id) . unwrap ( ) ;
265- assert_eq ! ( target2. status, TargetStatus :: Active ) ;
266-
267- // Root drv status should be different for each target
268- let root1_status =
269- state. get_drv_status_for_target ( & root_drv, target1_id) ;
270- let root2_status =
271- state. get_drv_status_for_target ( & root_drv, target2_id) ;
272-
273- println ! ( "Root status in target1: {:?}" , root1_status) ;
274- println ! ( "Root status in target2: {:?}" , root2_status) ;
275-
276- assert_eq ! ( root1_status, JobStatus :: Cancelled ) ;
277- assert ! ( matches!( root2_status, JobStatus :: BuildPhaseType ( _) ) ) ;
278- }
279-
280- // Cancel second build
281- println ! ( "\n === Cancelling second build ===" ) ;
282- if let Some ( job) = state. jid_to_job . get_mut ( & JobId ( 2 ) ) {
283- job. status = JobStatus :: Cancelled ;
284- job. stop_time_ns = Some ( 4000 ) ;
285- }
286- // Call cleanup_requester_inner for second requester
287- state. cleanup_requester_inner ( requester2) ;
288-
289- println ! ( "\n === Both builds cancelled ===" ) ;
290- {
291- let target1 = state. targets . get ( & target1_id) . unwrap ( ) ;
292- let target2 = state. targets . get ( & target2_id) . unwrap ( ) ;
293-
294- assert_eq ! ( target1. status, TargetStatus :: Cancelled ) ;
295- assert_eq ! ( target2. status, TargetStatus :: Cancelled ) ;
296-
297- // Both should show root drv as Cancelled
298- let root1_status =
299- state. get_drv_status_for_target ( & root_drv, target1_id) ;
300- let root2_status =
301- state. get_drv_status_for_target ( & root_drv, target2_id) ;
302-
303- assert_eq ! ( root1_status, JobStatus :: Cancelled ) ;
304- assert_eq ! ( root2_status, JobStatus :: Cancelled ) ;
305- }
306-
307- // Generate final tree
308- println ! ( "\n === Final tree with both cancelled targets ===" ) ;
309- {
310- // Debug: print target statuses in state before tree generation
311- println ! ( "Target statuses in state before tree generation:" ) ;
312- for ( id, target) in & state. targets {
313- println ! ( " {:?}: {:?}" , id, target. status) ;
314- }
315-
316- let mut cache = TreeCache :: default ( ) ;
317- let tree_items =
318- gen_drv_tree_leaves_from_state ( & mut cache, & state, PruneType :: None ) ;
319-
320- // Should have 2 targets in the tree (both cancelled)
321- assert_eq ! ( tree_items. len( ) , 2 , "Should have 2 targets in tree" ) ;
322-
323- println ! ( "Total tree items: {}" , tree_items. len( ) ) ;
324- for item in tree_items {
325- let id = item. identifier ( ) ;
326- println ! ( " {}" , id) ;
327- }
328-
329- // The fix: update_target_status now increments state.version, which
330- // invalidates the TreeCache. This ensures that when the tree is
331- // re-generated, it uses the updated Cancelled status from the
332- // state.
333- //
334- // The tree generation logic at tree_generation.rs:491 creates display
335- // text as: format!("{} - {:?}", target.reference,
336- // target.status)
337- //
338- // So with the cache properly invalidated, both targets will show
339- // "Cancelled" in their display text (which we verified manually
340- // shows correct state above)
341- }
342-
343- println ! ( "\n === Test passed! ===" ) ;
344- }
0 commit comments