Skip to content

Commit cd9626e

Browse files
Peter ZijlstraIngo Molnar
authored andcommitted
sched/fair: Fix external p->on_rq users
Sean noted that ever since commit 152e11f ("sched/fair: Implement delayed dequeue") KVM's preemption notifiers have started mis-classifying preemption vs blocking. Notably p->on_rq is no longer sufficient to determine if a task is runnable or blocked -- the aforementioned commit introduces tasks that remain on the runqueue even through they will not run again, and should be considered blocked for many cases. Add the task_is_runnable() helper to classify things and audit all external users of the p->on_rq state. Also add a few comments. Fixes: 152e11f ("sched/fair: Implement delayed dequeue") Reported-by: Sean Christopherson <[email protected]> Tested-by: Sean Christopherson <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Signed-off-by: Ingo Molnar <[email protected]> Link: https://lkml.kernel.org/r/[email protected]
1 parent c650812 commit cd9626e

File tree

8 files changed

+38
-7
lines changed

8 files changed

+38
-7
lines changed

include/linux/sched.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2133,6 +2133,11 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
21332133

21342134
#endif /* CONFIG_SMP */
21352135

2136+
static inline bool task_is_runnable(struct task_struct *p)
2137+
{
2138+
return p->on_rq && !p->se.sched_delayed;
2139+
}
2140+
21362141
extern bool sched_task_on_rq(struct task_struct *p);
21372142
extern unsigned long get_wchan(struct task_struct *p);
21382143
extern struct task_struct *cpu_curr_snapshot(int cpu);

kernel/events/core.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9251,7 +9251,7 @@ static void perf_event_switch(struct task_struct *task,
92519251
},
92529252
};
92539253

9254-
if (!sched_in && task->on_rq) {
9254+
if (!sched_in && task_is_runnable(task)) {
92559255
switch_event.event_id.header.misc |=
92569256
PERF_RECORD_MISC_SWITCH_OUT_PREEMPT;
92579257
}

kernel/freezer.c

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -109,7 +109,12 @@ static int __set_task_frozen(struct task_struct *p, void *arg)
109109
{
110110
unsigned int state = READ_ONCE(p->__state);
111111

112-
if (p->on_rq)
112+
/*
113+
* Allow freezing the sched_delayed tasks; they will not execute until
114+
* ttwu() fixes them up, so it is safe to swap their state now, instead
115+
* of waiting for them to get fully dequeued.
116+
*/
117+
if (task_is_runnable(p))
113118
return 0;
114119

115120
if (p != current && task_curr(p))

kernel/rcu/tasks.h

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -985,6 +985,15 @@ static bool rcu_tasks_is_holdout(struct task_struct *t)
985985
if (!READ_ONCE(t->on_rq))
986986
return false;
987987

988+
/*
989+
* t->on_rq && !t->se.sched_delayed *could* be considered sleeping but
990+
* since it is a spurious state (it will transition into the
991+
* traditional blocked state or get woken up without outside
992+
* dependencies), not considering it such should only affect timing.
993+
*
994+
* Be conservative for now and not include it.
995+
*/
996+
988997
/*
989998
* Idle tasks (or idle injection) within the idle loop are RCU-tasks
990999
* quiescent states. But CPU boot code performed by the idle task

kernel/sched/core.c

Lines changed: 9 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -548,6 +548,11 @@ sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags) { }
548548
* ON_RQ_MIGRATING state is used for migration without holding both
549549
* rq->locks. It indicates task_cpu() is not stable, see task_rq_lock().
550550
*
551+
* Additionally it is possible to be ->on_rq but still be considered not
552+
* runnable when p->se.sched_delayed is true. These tasks are on the runqueue
553+
* but will be dequeued as soon as they get picked again. See the
554+
* task_is_runnable() helper.
555+
*
551556
* p->on_cpu <- { 0, 1 }:
552557
*
553558
* is set by prepare_task() and cleared by finish_task() such that it will be
@@ -4317,9 +4322,10 @@ static bool __task_needs_rq_lock(struct task_struct *p)
43174322
* @arg: Argument to function.
43184323
*
43194324
* Fix the task in it's current state by avoiding wakeups and or rq operations
4320-
* and call @func(@arg) on it. This function can use ->on_rq and task_curr()
4321-
* to work out what the state is, if required. Given that @func can be invoked
4322-
* with a runqueue lock held, it had better be quite lightweight.
4325+
* and call @func(@arg) on it. This function can use task_is_runnable() and
4326+
* task_curr() to work out what the state is, if required. Given that @func
4327+
* can be invoked with a runqueue lock held, it had better be quite
4328+
* lightweight.
43234329
*
43244330
* Returns:
43254331
* Whatever @func returns

kernel/time/tick-sched.c

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -434,6 +434,12 @@ static void tick_nohz_kick_task(struct task_struct *tsk)
434434
* smp_mb__after_spin_lock()
435435
* tick_nohz_task_switch()
436436
* LOAD p->tick_dep_mask
437+
*
438+
* XXX given a task picks up the dependency on schedule(), should we
439+
* only care about tasks that are currently on the CPU instead of all
440+
* that are on the runqueue?
441+
*
442+
* That is, does this want to be: task_on_cpu() / task_curr()?
437443
*/
438444
if (!sched_task_on_rq(tsk))
439445
return;

kernel/trace/trace_selftest.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1485,7 +1485,7 @@ trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
14851485
/* reset the max latency */
14861486
tr->max_latency = 0;
14871487

1488-
while (p->on_rq) {
1488+
while (task_is_runnable(p)) {
14891489
/*
14901490
* Sleep to make sure the -deadline thread is asleep too.
14911491
* On virtual machines we can't rely on timings,

virt/kvm/kvm_main.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6387,7 +6387,7 @@ static void kvm_sched_out(struct preempt_notifier *pn,
63876387

63886388
WRITE_ONCE(vcpu->scheduled_out, true);
63896389

6390-
if (current->on_rq && vcpu->wants_to_run) {
6390+
if (task_is_runnable(current) && vcpu->wants_to_run) {
63916391
WRITE_ONCE(vcpu->preempted, true);
63926392
WRITE_ONCE(vcpu->ready, true);
63936393
}

0 commit comments

Comments
 (0)