Skip to content

Commit 7b3d61f

Browse files
johnstultz-workPeter Zijlstra
authored andcommitted
sched: Split out __schedule() deactivate task logic into a helper
As we're going to re-use the deactivation logic, split it into a helper. Signed-off-by: John Stultz <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Reviewed-by: Metin Kaya <[email protected]> Reviewed-by: Qais Yousef <[email protected]> Tested-by: K Prateek Nayak <[email protected]> Tested-by: Metin Kaya <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent 18adad1 commit 7b3d61f

File tree

1 file changed

+40
-27
lines changed

1 file changed

+40
-27
lines changed

kernel/sched/core.c

Lines changed: 40 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -6490,6 +6490,45 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
64906490
#define SM_PREEMPT 1
64916491
#define SM_RTLOCK_WAIT 2
64926492

6493+
/*
6494+
* Helper function for __schedule()
6495+
*
6496+
* If a task does not have signals pending, deactivate it
6497+
* Otherwise marks the task's __state as RUNNING
6498+
*/
6499+
static bool try_to_block_task(struct rq *rq, struct task_struct *p,
6500+
unsigned long task_state)
6501+
{
6502+
int flags = DEQUEUE_NOCLOCK;
6503+
6504+
if (signal_pending_state(task_state, p)) {
6505+
WRITE_ONCE(p->__state, TASK_RUNNING);
6506+
return false;
6507+
}
6508+
6509+
p->sched_contributes_to_load =
6510+
(task_state & TASK_UNINTERRUPTIBLE) &&
6511+
!(task_state & TASK_NOLOAD) &&
6512+
!(task_state & TASK_FROZEN);
6513+
6514+
if (unlikely(is_special_task_state(task_state)))
6515+
flags |= DEQUEUE_SPECIAL;
6516+
6517+
/*
6518+
* __schedule() ttwu()
6519+
* prev_state = prev->state; if (p->on_rq && ...)
6520+
* if (prev_state) goto out;
6521+
* p->on_rq = 0; smp_acquire__after_ctrl_dep();
6522+
* p->state = TASK_WAKING
6523+
*
6524+
* Where __schedule() and ttwu() have matching control dependencies.
6525+
*
6526+
* After this, schedule() must not care about p->state any more.
6527+
*/
6528+
block_task(rq, p, flags);
6529+
return true;
6530+
}
6531+
64936532
/*
64946533
* __schedule() is the main scheduler function.
64956534
*
@@ -6598,33 +6637,7 @@ static void __sched notrace __schedule(int sched_mode)
65986637
goto picked;
65996638
}
66006639
} else if (!preempt && prev_state) {
6601-
if (signal_pending_state(prev_state, prev)) {
6602-
WRITE_ONCE(prev->__state, TASK_RUNNING);
6603-
} else {
6604-
int flags = DEQUEUE_NOCLOCK;
6605-
6606-
prev->sched_contributes_to_load =
6607-
(prev_state & TASK_UNINTERRUPTIBLE) &&
6608-
!(prev_state & TASK_NOLOAD) &&
6609-
!(prev_state & TASK_FROZEN);
6610-
6611-
if (unlikely(is_special_task_state(prev_state)))
6612-
flags |= DEQUEUE_SPECIAL;
6613-
6614-
/*
6615-
* __schedule() ttwu()
6616-
* prev_state = prev->state; if (p->on_rq && ...)
6617-
* if (prev_state) goto out;
6618-
* p->on_rq = 0; smp_acquire__after_ctrl_dep();
6619-
* p->state = TASK_WAKING
6620-
*
6621-
* Where __schedule() and ttwu() have matching control dependencies.
6622-
*
6623-
* After this, schedule() must not care about p->state any more.
6624-
*/
6625-
block_task(rq, prev, flags);
6626-
block = true;
6627-
}
6640+
block = try_to_block_task(rq, prev, prev_state);
66286641
switch_count = &prev->nvcsw;
66296642
}
66306643

0 commit comments

Comments
 (0)