@@ -1635,8 +1635,8 @@ static void defer_request(struct i915_request *rq, struct list_head * const pl)
1635
1635
!i915_request_completed (rq ));
1636
1636
1637
1637
GEM_BUG_ON (i915_request_is_active (w ));
1638
- if (list_empty ( & w -> sched . link ))
1639
- continue ; /* Not yet submitted; unready */
1638
+ if (! i915_request_is_ready ( w ))
1639
+ continue ;
1640
1640
1641
1641
if (rq_prio (w ) < rq_prio (rq ))
1642
1642
continue ;
@@ -2354,6 +2354,145 @@ static void __execlists_submission_tasklet(struct intel_engine_cs *const engine)
2354
2354
}
2355
2355
}
2356
2356
2357
+ static void __execlists_hold (struct i915_request * rq )
2358
+ {
2359
+ LIST_HEAD (list );
2360
+
2361
+ do {
2362
+ struct i915_dependency * p ;
2363
+
2364
+ if (i915_request_is_active (rq ))
2365
+ __i915_request_unsubmit (rq );
2366
+
2367
+ RQ_TRACE (rq , "on hold\n" );
2368
+ clear_bit (I915_FENCE_FLAG_PQUEUE , & rq -> fence .flags );
2369
+ list_move_tail (& rq -> sched .link , & rq -> engine -> active .hold );
2370
+ i915_request_set_hold (rq );
2371
+
2372
+ list_for_each_entry (p , & rq -> sched .waiters_list , wait_link ) {
2373
+ struct i915_request * w =
2374
+ container_of (p -> waiter , typeof (* w ), sched );
2375
+
2376
+ /* Leave semaphores spinning on the other engines */
2377
+ if (w -> engine != rq -> engine )
2378
+ continue ;
2379
+
2380
+ if (!i915_request_is_ready (w ))
2381
+ continue ;
2382
+
2383
+ if (i915_request_completed (w ))
2384
+ continue ;
2385
+
2386
+ if (i915_request_on_hold (rq ))
2387
+ continue ;
2388
+
2389
+ list_move_tail (& w -> sched .link , & list );
2390
+ }
2391
+
2392
+ rq = list_first_entry_or_null (& list , typeof (* rq ), sched .link );
2393
+ } while (rq );
2394
+ }
2395
+
2396
+ __maybe_unused
2397
+ static void execlists_hold (struct intel_engine_cs * engine ,
2398
+ struct i915_request * rq )
2399
+ {
2400
+ spin_lock_irq (& engine -> active .lock );
2401
+
2402
+ /*
2403
+ * Transfer this request onto the hold queue to prevent it
2404
+ * being resumbitted to HW (and potentially completed) before we have
2405
+ * released it. Since we may have already submitted following
2406
+ * requests, we need to remove those as well.
2407
+ */
2408
+ GEM_BUG_ON (i915_request_on_hold (rq ));
2409
+ GEM_BUG_ON (rq -> engine != engine );
2410
+ __execlists_hold (rq );
2411
+
2412
+ spin_unlock_irq (& engine -> active .lock );
2413
+ }
2414
+
2415
+ static bool hold_request (const struct i915_request * rq )
2416
+ {
2417
+ struct i915_dependency * p ;
2418
+
2419
+ /*
2420
+ * If one of our ancestors is on hold, we must also be on hold,
2421
+ * otherwise we will bypass it and execute before it.
2422
+ */
2423
+ list_for_each_entry (p , & rq -> sched .signalers_list , signal_link ) {
2424
+ const struct i915_request * s =
2425
+ container_of (p -> signaler , typeof (* s ), sched );
2426
+
2427
+ if (s -> engine != rq -> engine )
2428
+ continue ;
2429
+
2430
+ if (i915_request_on_hold (s ))
2431
+ return true;
2432
+ }
2433
+
2434
+ return false;
2435
+ }
2436
+
2437
+ static void __execlists_unhold (struct i915_request * rq )
2438
+ {
2439
+ LIST_HEAD (list );
2440
+
2441
+ do {
2442
+ struct i915_dependency * p ;
2443
+
2444
+ GEM_BUG_ON (!i915_request_on_hold (rq ));
2445
+ GEM_BUG_ON (!i915_sw_fence_signaled (& rq -> submit ));
2446
+
2447
+ i915_request_clear_hold (rq );
2448
+ list_move_tail (& rq -> sched .link ,
2449
+ i915_sched_lookup_priolist (rq -> engine ,
2450
+ rq_prio (rq )));
2451
+ set_bit (I915_FENCE_FLAG_PQUEUE , & rq -> fence .flags );
2452
+ RQ_TRACE (rq , "hold release\n" );
2453
+
2454
+ /* Also release any children on this engine that are ready */
2455
+ list_for_each_entry (p , & rq -> sched .waiters_list , wait_link ) {
2456
+ struct i915_request * w =
2457
+ container_of (p -> waiter , typeof (* w ), sched );
2458
+
2459
+ if (w -> engine != rq -> engine )
2460
+ continue ;
2461
+
2462
+ if (!i915_request_on_hold (rq ))
2463
+ continue ;
2464
+
2465
+ /* Check that no other parents are also on hold */
2466
+ if (hold_request (rq ))
2467
+ continue ;
2468
+
2469
+ list_move_tail (& w -> sched .link , & list );
2470
+ }
2471
+
2472
+ rq = list_first_entry_or_null (& list , typeof (* rq ), sched .link );
2473
+ } while (rq );
2474
+ }
2475
+
2476
+ __maybe_unused
2477
+ static void execlists_unhold (struct intel_engine_cs * engine ,
2478
+ struct i915_request * rq )
2479
+ {
2480
+ spin_lock_irq (& engine -> active .lock );
2481
+
2482
+ /*
2483
+ * Move this request back to the priority queue, and all of its
2484
+ * children and grandchildren that were suspended along with it.
2485
+ */
2486
+ __execlists_unhold (rq );
2487
+
2488
+ if (rq_prio (rq ) > engine -> execlists .queue_priority_hint ) {
2489
+ engine -> execlists .queue_priority_hint = rq_prio (rq );
2490
+ tasklet_hi_schedule (& engine -> execlists .tasklet );
2491
+ }
2492
+
2493
+ spin_unlock_irq (& engine -> active .lock );
2494
+ }
2495
+
2357
2496
static noinline void preempt_reset (struct intel_engine_cs * engine )
2358
2497
{
2359
2498
const unsigned int bit = I915_RESET_ENGINE + engine -> id ;
@@ -2466,6 +2605,13 @@ static void submit_queue(struct intel_engine_cs *engine,
2466
2605
__submit_queue_imm (engine );
2467
2606
}
2468
2607
2608
+ static bool ancestor_on_hold (const struct intel_engine_cs * engine ,
2609
+ const struct i915_request * rq )
2610
+ {
2611
+ GEM_BUG_ON (i915_request_on_hold (rq ));
2612
+ return !list_empty (& engine -> active .hold ) && hold_request (rq );
2613
+ }
2614
+
2469
2615
static void execlists_submit_request (struct i915_request * request )
2470
2616
{
2471
2617
struct intel_engine_cs * engine = request -> engine ;
@@ -2474,12 +2620,17 @@ static void execlists_submit_request(struct i915_request *request)
2474
2620
/* Will be called from irq-context when using foreign fences. */
2475
2621
spin_lock_irqsave (& engine -> active .lock , flags );
2476
2622
2477
- queue_request (engine , request );
2623
+ if (unlikely (ancestor_on_hold (engine , request ))) {
2624
+ list_add_tail (& request -> sched .link , & engine -> active .hold );
2625
+ i915_request_set_hold (request );
2626
+ } else {
2627
+ queue_request (engine , request );
2478
2628
2479
- GEM_BUG_ON (RB_EMPTY_ROOT (& engine -> execlists .queue .rb_root ));
2480
- GEM_BUG_ON (list_empty (& request -> sched .link ));
2629
+ GEM_BUG_ON (RB_EMPTY_ROOT (& engine -> execlists .queue .rb_root ));
2630
+ GEM_BUG_ON (list_empty (& request -> sched .link ));
2481
2631
2482
- submit_queue (engine , request );
2632
+ submit_queue (engine , request );
2633
+ }
2483
2634
2484
2635
spin_unlock_irqrestore (& engine -> active .lock , flags );
2485
2636
}
@@ -3328,6 +3479,10 @@ static void execlists_reset_cancel(struct intel_engine_cs *engine)
3328
3479
i915_priolist_free (p );
3329
3480
}
3330
3481
3482
+ /* On-hold requests will be flushed to timeline upon their release */
3483
+ list_for_each_entry (rq , & engine -> active .hold , sched .link )
3484
+ mark_eio (rq );
3485
+
3331
3486
/* Cancel all attached virtual engines */
3332
3487
while ((rb = rb_first_cached (& execlists -> virtual ))) {
3333
3488
struct virtual_engine * ve =
0 commit comments