Skip to content

Commit df9e210

Browse files
committed
Merge tag 'sched_urgent_for_v6.13_rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler fixes from Borislav Petkov: - Remove wrong enqueueing of a task for a later wakeup when a task blocks on a RT mutex - Do not setup a new deadline entity on a boosted task as that has happened already - Update preempt= kernel command line param - Prevent needless softirqd wakeups in the idle task's context - Detect the case where the idle load balancer CPU becomes busy and avoid unnecessary load balancing invocation - Remove an unnecessary load balancing need_resched() call in nohz_csd_func() - Allow for raising of SCHED_SOFTIRQ softirq type on RT but retain the warning to catch any other cases - Remove a wrong warning when a cpuset update makes the task affinity no longer a subset of the cpuset * tag 'sched_urgent_for_v6.13_rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: locking: rtmutex: Fix wake_q logic in task_blocks_on_rt_mutex sched/deadline: Fix warning in migrate_enable for boosted tasks sched/core: Update kernel boot parameters for LAZY preempt. sched/core: Prevent wakeup of ksoftirqd during idle load balance sched/fair: Check idle_cpu() before need_resched() to detect ilb CPU turning busy sched/core: Remove the unnecessary need_resched() check in nohz_csd_func() softirq: Allow raising SCHED_SOFTIRQ from SMP-call-function on RT kernel sched: fix warning in sched_setaffinity sched/deadline: Fix replenish_dl_new_period dl_server condition
2 parents aeb6893 + 82f9cc0 commit df9e210

File tree

7 files changed

+22
-12
lines changed

7 files changed

+22
-12
lines changed

Documentation/admin-guide/kernel-parameters.txt

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4822,6 +4822,11 @@
48224822
can be preempted anytime. Tasks will also yield
48234823
contended spinlocks (if the critical section isn't
48244824
explicitly preempt disabled beyond the lock itself).
4825+
lazy - Scheduler controlled. Similar to full but instead
4826+
of preempting the task immediately, the task gets
4827+
one HZ tick time to yield itself before the
4828+
preemption will be forced. One preemption is when the
4829+
task returns to user space.
48254830

48264831
print-fatal-signals=
48274832
[KNL] debug: print fatal signals

kernel/locking/rtmutex.c

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1248,10 +1248,7 @@ static int __sched task_blocks_on_rt_mutex(struct rt_mutex_base *lock,
12481248

12491249
/* Check whether the waiter should back out immediately */
12501250
rtm = container_of(lock, struct rt_mutex, rtmutex);
1251-
preempt_disable();
12521251
res = __ww_mutex_add_waiter(waiter, rtm, ww_ctx, wake_q);
1253-
wake_up_q(wake_q);
1254-
preempt_enable();
12551252
if (res) {
12561253
raw_spin_lock(&task->pi_lock);
12571254
rt_mutex_dequeue(lock, waiter);

kernel/sched/core.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1283,9 +1283,9 @@ static void nohz_csd_func(void *info)
12831283
WARN_ON(!(flags & NOHZ_KICK_MASK));
12841284

12851285
rq->idle_balance = idle_cpu(cpu);
1286-
if (rq->idle_balance && !need_resched()) {
1286+
if (rq->idle_balance) {
12871287
rq->nohz_idle_balance = flags;
1288-
raise_softirq_irqoff(SCHED_SOFTIRQ);
1288+
__raise_softirq_irqoff(SCHED_SOFTIRQ);
12891289
}
12901290
}
12911291

kernel/sched/deadline.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -781,7 +781,7 @@ static inline void replenish_dl_new_period(struct sched_dl_entity *dl_se,
781781
* If it is a deferred reservation, and the server
782782
* is not handling an starvation case, defer it.
783783
*/
784-
if (dl_se->dl_defer & !dl_se->dl_defer_running) {
784+
if (dl_se->dl_defer && !dl_se->dl_defer_running) {
785785
dl_se->dl_throttled = 1;
786786
dl_se->dl_defer_armed = 1;
787787
}
@@ -2042,6 +2042,7 @@ enqueue_dl_entity(struct sched_dl_entity *dl_se, int flags)
20422042
} else if (flags & ENQUEUE_REPLENISH) {
20432043
replenish_dl_entity(dl_se);
20442044
} else if ((flags & ENQUEUE_RESTORE) &&
2045+
!is_dl_boosted(dl_se) &&
20452046
dl_time_before(dl_se->deadline, rq_clock(rq_of_dl_se(dl_se)))) {
20462047
setup_new_dl_entity(dl_se);
20472048
}

kernel/sched/fair.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12574,7 +12574,7 @@ static void _nohz_idle_balance(struct rq *this_rq, unsigned int flags)
1257412574
* work being done for other CPUs. Next load
1257512575
* balancing owner will pick it up.
1257612576
*/
12577-
if (need_resched()) {
12577+
if (!idle_cpu(this_cpu) && need_resched()) {
1257812578
if (flags & NOHZ_STATS_KICK)
1257912579
has_blocked_load = true;
1258012580
if (flags & NOHZ_NEXT_KICK)

kernel/sched/syscalls.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1200,7 +1200,7 @@ int __sched_setaffinity(struct task_struct *p, struct affinity_context *ctx)
12001200
bool empty = !cpumask_and(new_mask, new_mask,
12011201
ctx->user_mask);
12021202

1203-
if (WARN_ON_ONCE(empty))
1203+
if (empty)
12041204
cpumask_copy(new_mask, cpus_allowed);
12051205
}
12061206
__set_cpus_allowed_ptr(p, ctx);

kernel/softirq.c

Lines changed: 11 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -280,17 +280,24 @@ static inline void invoke_softirq(void)
280280
wakeup_softirqd();
281281
}
282282

283+
#define SCHED_SOFTIRQ_MASK BIT(SCHED_SOFTIRQ)
284+
283285
/*
284286
* flush_smp_call_function_queue() can raise a soft interrupt in a function
285-
* call. On RT kernels this is undesired and the only known functionality
286-
* in the block layer which does this is disabled on RT. If soft interrupts
287-
* get raised which haven't been raised before the flush, warn so it can be
287+
* call. On RT kernels this is undesired and the only known functionalities
288+
* are in the block layer which is disabled on RT, and in the scheduler for
289+
* idle load balancing. If soft interrupts get raised which haven't been
290+
* raised before the flush, warn if it is not a SCHED_SOFTIRQ so it can be
288291
* investigated.
289292
*/
290293
void do_softirq_post_smp_call_flush(unsigned int was_pending)
291294
{
292-
if (WARN_ON_ONCE(was_pending != local_softirq_pending()))
295+
unsigned int is_pending = local_softirq_pending();
296+
297+
if (unlikely(was_pending != is_pending)) {
298+
WARN_ON_ONCE(was_pending != (is_pending & ~SCHED_SOFTIRQ_MASK));
293299
invoke_softirq();
300+
}
294301
}
295302

296303
#else /* CONFIG_PREEMPT_RT */

0 commit comments

Comments
 (0)