@@ -6522,11 +6522,13 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
6522
6522
/*
6523
6523
* Helper function for __schedule()
6524
6524
*
6525
- * If a task does not have signals pending, deactivate it
6526
- * Otherwise marks the task's __state as RUNNING
6525
+ * Tries to deactivate the task, unless the should_block arg
6526
+ * is false or if a signal is pending. In the case a signal
6527
+ * is pending, marks the task's __state as RUNNING (and clear
6528
+ * blocked_on).
6527
6529
*/
6528
6530
static bool try_to_block_task (struct rq * rq , struct task_struct * p ,
6529
- unsigned long * task_state_p )
6531
+ unsigned long * task_state_p , bool should_block )
6530
6532
{
6531
6533
unsigned long task_state = * task_state_p ;
6532
6534
int flags = DEQUEUE_NOCLOCK ;
@@ -6537,6 +6539,16 @@ static bool try_to_block_task(struct rq *rq, struct task_struct *p,
6537
6539
return false;
6538
6540
}
6539
6541
6542
+ /*
6543
+ * We check should_block after signal_pending because we
6544
+ * will want to wake the task in that case. But if
6545
+ * should_block is false, its likely due to the task being
6546
+ * blocked on a mutex, and we want to keep it on the runqueue
6547
+ * to be selectable for proxy-execution.
6548
+ */
6549
+ if (!should_block )
6550
+ return false;
6551
+
6540
6552
p -> sched_contributes_to_load =
6541
6553
(task_state & TASK_UNINTERRUPTIBLE ) &&
6542
6554
!(task_state & TASK_NOLOAD ) &&
@@ -6560,6 +6572,88 @@ static bool try_to_block_task(struct rq *rq, struct task_struct *p,
6560
6572
return true;
6561
6573
}
6562
6574
6575
+ #ifdef CONFIG_SCHED_PROXY_EXEC
6576
+ static inline void proxy_resched_idle (struct rq * rq )
6577
+ {
6578
+ put_prev_set_next_task (rq , rq -> donor , rq -> idle );
6579
+ rq_set_donor (rq , rq -> idle );
6580
+ set_tsk_need_resched (rq -> idle );
6581
+ }
6582
+
6583
+ static bool __proxy_deactivate (struct rq * rq , struct task_struct * donor )
6584
+ {
6585
+ unsigned long state = READ_ONCE (donor -> __state );
6586
+
6587
+ /* Don't deactivate if the state has been changed to TASK_RUNNING */
6588
+ if (state == TASK_RUNNING )
6589
+ return false;
6590
+ /*
6591
+ * Because we got donor from pick_next_task(), it is *crucial*
6592
+ * that we call proxy_resched_idle() before we deactivate it.
6593
+ * As once we deactivate donor, donor->on_rq is set to zero,
6594
+ * which allows ttwu() to immediately try to wake the task on
6595
+ * another rq. So we cannot use *any* references to donor
6596
+ * after that point. So things like cfs_rq->curr or rq->donor
6597
+ * need to be changed from next *before* we deactivate.
6598
+ */
6599
+ proxy_resched_idle (rq );
6600
+ return try_to_block_task (rq , donor , & state , true);
6601
+ }
6602
+
6603
+ static struct task_struct * proxy_deactivate (struct rq * rq , struct task_struct * donor )
6604
+ {
6605
+ if (!__proxy_deactivate (rq , donor )) {
6606
+ /*
6607
+ * XXX: For now, if deactivation failed, set donor
6608
+ * as unblocked, as we aren't doing proxy-migrations
6609
+ * yet (more logic will be needed then).
6610
+ */
6611
+ donor -> blocked_on = NULL ;
6612
+ }
6613
+ return NULL ;
6614
+ }
6615
+
6616
+ /*
6617
+ * Initial simple sketch that just deactivates the blocked task
6618
+ * chosen by pick_next_task() so we can then pick something that
6619
+ * isn't blocked.
6620
+ */
6621
+ static struct task_struct *
6622
+ find_proxy_task (struct rq * rq , struct task_struct * donor , struct rq_flags * rf )
6623
+ {
6624
+ struct mutex * mutex ;
6625
+
6626
+ mutex = donor -> blocked_on ;
6627
+ /* Something changed in the chain, so pick again */
6628
+ if (!mutex )
6629
+ return NULL ;
6630
+ /*
6631
+ * By taking mutex->wait_lock we hold off concurrent mutex_unlock()
6632
+ * and ensure @owner sticks around.
6633
+ */
6634
+ guard (raw_spinlock )(& mutex -> wait_lock );
6635
+
6636
+ /* Check again that donor is blocked with blocked_lock held */
6637
+ if (!task_is_blocked (donor ) || mutex != __get_task_blocked_on (donor )) {
6638
+ /*
6639
+ * Something changed in the blocked_on chain and
6640
+ * we don't know if only at this level. So, let's
6641
+ * just bail out completely and let __schedule()
6642
+ * figure things out (pick_again loop).
6643
+ */
6644
+ return NULL ; /* do pick_next_task() again */
6645
+ }
6646
+ return proxy_deactivate (rq , donor );
6647
+ }
6648
+ #else /* SCHED_PROXY_EXEC */
6649
+ static struct task_struct *
6650
+ find_proxy_task (struct rq * rq , struct task_struct * donor , struct rq_flags * rf )
6651
+ {
6652
+ WARN_ONCE (1 , "This should never be called in the !SCHED_PROXY_EXEC case\n" );
6653
+ return donor ;
6654
+ }
6655
+ #endif /* SCHED_PROXY_EXEC */
6656
+
6563
6657
/*
6564
6658
* __schedule() is the main scheduler function.
6565
6659
*
@@ -6672,12 +6766,25 @@ static void __sched notrace __schedule(int sched_mode)
6672
6766
goto picked ;
6673
6767
}
6674
6768
} else if (!preempt && prev_state ) {
6675
- try_to_block_task (rq , prev , & prev_state );
6769
+ /*
6770
+ * We pass task_is_blocked() as the should_block arg
6771
+ * in order to keep mutex-blocked tasks on the runqueue
6772
+ * for slection with proxy-exec (without proxy-exec
6773
+ * task_is_blocked() will always be false).
6774
+ */
6775
+ try_to_block_task (rq , prev , & prev_state ,
6776
+ !task_is_blocked (prev ));
6676
6777
switch_count = & prev -> nvcsw ;
6677
6778
}
6678
6779
6679
- next = pick_next_task (rq , prev , & rf );
6780
+ pick_again :
6781
+ next = pick_next_task (rq , rq -> donor , & rf );
6680
6782
rq_set_donor (rq , next );
6783
+ if (unlikely (task_is_blocked (next ))) {
6784
+ next = find_proxy_task (rq , next , & rf );
6785
+ if (!next )
6786
+ goto pick_again ;
6787
+ }
6681
6788
picked :
6682
6789
clear_tsk_need_resched (prev );
6683
6790
clear_preempt_need_resched ();
0 commit comments