Skip to content

Commit 772b78c

Browse files
committed
Merge tag 'sched_urgent_for_v6.16_rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler fixes from Borislav Petkov: - Fix the calculation of the deadline server task's runtime as this mishap was preventing realtime tasks from running - Avoid a race condition during migrate-swapping two tasks - Fix the string reported for the "none" dynamic preemption option * tag 'sched_urgent_for_v6.16_rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: sched/deadline: Fix dl_server runtime calculation formula sched/core: Fix migrate_swap() vs. hotplug sched: Fix preemption string of preempt_dynamic_none
2 parents 95eb0d3 + fc975cf commit 772b78c

File tree

3 files changed

+21
-16
lines changed

3 files changed

+21
-16
lines changed

kernel/sched/core.c

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3943,6 +3943,11 @@ static inline bool ttwu_queue_cond(struct task_struct *p, int cpu)
39433943
if (!scx_allow_ttwu_queue(p))
39443944
return false;
39453945

3946+
#ifdef CONFIG_SMP
3947+
if (p->sched_class == &stop_sched_class)
3948+
return false;
3949+
#endif
3950+
39463951
/*
39473952
* Do not complicate things with the async wake_list while the CPU is
39483953
* in hotplug state.
@@ -7663,7 +7668,7 @@ const char *preempt_model_str(void)
76637668

76647669
if (IS_ENABLED(CONFIG_PREEMPT_DYNAMIC)) {
76657670
seq_buf_printf(&s, "(%s)%s",
7666-
preempt_dynamic_mode > 0 ?
7671+
preempt_dynamic_mode >= 0 ?
76677672
preempt_modes[preempt_dynamic_mode] : "undef",
76687673
brace ? "}" : "");
76697674
return seq_buf_str(&s);

kernel/sched/deadline.c

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1504,7 +1504,9 @@ static void update_curr_dl_se(struct rq *rq, struct sched_dl_entity *dl_se, s64
15041504
if (dl_entity_is_special(dl_se))
15051505
return;
15061506

1507-
scaled_delta_exec = dl_scaled_delta_exec(rq, dl_se, delta_exec);
1507+
scaled_delta_exec = delta_exec;
1508+
if (!dl_server(dl_se))
1509+
scaled_delta_exec = dl_scaled_delta_exec(rq, dl_se, delta_exec);
15081510

15091511
dl_se->runtime -= scaled_delta_exec;
15101512

@@ -1611,7 +1613,7 @@ static void update_curr_dl_se(struct rq *rq, struct sched_dl_entity *dl_se, s64
16111613
*/
16121614
void dl_server_update_idle_time(struct rq *rq, struct task_struct *p)
16131615
{
1614-
s64 delta_exec, scaled_delta_exec;
1616+
s64 delta_exec;
16151617

16161618
if (!rq->fair_server.dl_defer)
16171619
return;
@@ -1624,9 +1626,7 @@ void dl_server_update_idle_time(struct rq *rq, struct task_struct *p)
16241626
if (delta_exec < 0)
16251627
return;
16261628

1627-
scaled_delta_exec = dl_scaled_delta_exec(rq, &rq->fair_server, delta_exec);
1628-
1629-
rq->fair_server.runtime -= scaled_delta_exec;
1629+
rq->fair_server.runtime -= delta_exec;
16301630

16311631
if (rq->fair_server.runtime < 0) {
16321632
rq->fair_server.dl_defer_running = 0;

kernel/stop_machine.c

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -82,31 +82,29 @@ static void cpu_stop_signal_done(struct cpu_stop_done *done)
8282
}
8383

8484
static void __cpu_stop_queue_work(struct cpu_stopper *stopper,
85-
struct cpu_stop_work *work,
86-
struct wake_q_head *wakeq)
85+
struct cpu_stop_work *work)
8786
{
8887
list_add_tail(&work->list, &stopper->works);
89-
wake_q_add(wakeq, stopper->thread);
9088
}
9189

9290
/* queue @work to @stopper. if offline, @work is completed immediately */
9391
static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)
9492
{
9593
struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
96-
DEFINE_WAKE_Q(wakeq);
9794
unsigned long flags;
9895
bool enabled;
9996

10097
preempt_disable();
10198
raw_spin_lock_irqsave(&stopper->lock, flags);
10299
enabled = stopper->enabled;
103100
if (enabled)
104-
__cpu_stop_queue_work(stopper, work, &wakeq);
101+
__cpu_stop_queue_work(stopper, work);
105102
else if (work->done)
106103
cpu_stop_signal_done(work->done);
107104
raw_spin_unlock_irqrestore(&stopper->lock, flags);
108105

109-
wake_up_q(&wakeq);
106+
if (enabled)
107+
wake_up_process(stopper->thread);
110108
preempt_enable();
111109

112110
return enabled;
@@ -264,7 +262,6 @@ static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1,
264262
{
265263
struct cpu_stopper *stopper1 = per_cpu_ptr(&cpu_stopper, cpu1);
266264
struct cpu_stopper *stopper2 = per_cpu_ptr(&cpu_stopper, cpu2);
267-
DEFINE_WAKE_Q(wakeq);
268265
int err;
269266

270267
retry:
@@ -300,8 +297,8 @@ static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1,
300297
}
301298

302299
err = 0;
303-
__cpu_stop_queue_work(stopper1, work1, &wakeq);
304-
__cpu_stop_queue_work(stopper2, work2, &wakeq);
300+
__cpu_stop_queue_work(stopper1, work1);
301+
__cpu_stop_queue_work(stopper2, work2);
305302

306303
unlock:
307304
raw_spin_unlock(&stopper2->lock);
@@ -316,7 +313,10 @@ static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1,
316313
goto retry;
317314
}
318315

319-
wake_up_q(&wakeq);
316+
if (!err) {
317+
wake_up_process(stopper1->thread);
318+
wake_up_process(stopper2->thread);
319+
}
320320
preempt_enable();
321321

322322
return err;

0 commit comments

Comments
 (0)