Skip to content

Commit cbb104f

Browse files
committed
Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull misc scheduler fixes from Ingo Molnar: - Fix potential deadlock under CONFIG_DEBUG_OBJECTS=y - PELT metrics update ordering fix - uclamp logic fix * 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: sched/uclamp: Fix incorrect condition sched/pelt: Fix update of blocked PELT ordering sched/core: Avoid spurious lock dependencies
2 parents 6b27354 + 6e1ff07 commit cbb104f

File tree

2 files changed

+23
-11
lines changed

2 files changed

+23
-11
lines changed

kernel/sched/core.c

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1065,7 +1065,7 @@ uclamp_update_active(struct task_struct *p, enum uclamp_id clamp_id)
10651065
* affecting a valid clamp bucket, the next time it's enqueued,
10661066
* it will already see the updated clamp bucket value.
10671067
*/
1068-
if (!p->uclamp[clamp_id].active) {
1068+
if (p->uclamp[clamp_id].active) {
10691069
uclamp_rq_dec_id(rq, p, clamp_id);
10701070
uclamp_rq_inc_id(rq, p, clamp_id);
10711071
}
@@ -6019,10 +6019,11 @@ void init_idle(struct task_struct *idle, int cpu)
60196019
struct rq *rq = cpu_rq(cpu);
60206020
unsigned long flags;
60216021

6022+
__sched_fork(0, idle);
6023+
60226024
raw_spin_lock_irqsave(&idle->pi_lock, flags);
60236025
raw_spin_lock(&rq->lock);
60246026

6025-
__sched_fork(0, idle);
60266027
idle->state = TASK_RUNNING;
60276028
idle->se.exec_start = sched_clock();
60286029
idle->flags |= PF_IDLE;

kernel/sched/fair.c

Lines changed: 20 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -7547,6 +7547,19 @@ static void update_blocked_averages(int cpu)
75477547
rq_lock_irqsave(rq, &rf);
75487548
update_rq_clock(rq);
75497549

7550+
/*
7551+
* update_cfs_rq_load_avg() can call cpufreq_update_util(). Make sure
7552+
* that RT, DL and IRQ signals have been updated before updating CFS.
7553+
*/
7554+
curr_class = rq->curr->sched_class;
7555+
update_rt_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &rt_sched_class);
7556+
update_dl_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &dl_sched_class);
7557+
update_irq_load_avg(rq, 0);
7558+
7559+
/* Don't need periodic decay once load/util_avg are null */
7560+
if (others_have_blocked(rq))
7561+
done = false;
7562+
75507563
/*
75517564
* Iterates the task_group tree in a bottom up fashion, see
75527565
* list_add_leaf_cfs_rq() for details.
@@ -7574,14 +7587,6 @@ static void update_blocked_averages(int cpu)
75747587
done = false;
75757588
}
75767589

7577-
curr_class = rq->curr->sched_class;
7578-
update_rt_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &rt_sched_class);
7579-
update_dl_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &dl_sched_class);
7580-
update_irq_load_avg(rq, 0);
7581-
/* Don't need periodic decay once load/util_avg are null */
7582-
if (others_have_blocked(rq))
7583-
done = false;
7584-
75857590
update_blocked_load_status(rq, !done);
75867591
rq_unlock_irqrestore(rq, &rf);
75877592
}
@@ -7642,12 +7647,18 @@ static inline void update_blocked_averages(int cpu)
76427647

76437648
rq_lock_irqsave(rq, &rf);
76447649
update_rq_clock(rq);
7645-
update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq);
76467650

7651+
/*
7652+
* update_cfs_rq_load_avg() can call cpufreq_update_util(). Make sure
7653+
* that RT, DL and IRQ signals have been updated before updating CFS.
7654+
*/
76477655
curr_class = rq->curr->sched_class;
76487656
update_rt_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &rt_sched_class);
76497657
update_dl_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &dl_sched_class);
76507658
update_irq_load_avg(rq, 0);
7659+
7660+
update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq);
7661+
76517662
update_blocked_load_status(rq, cfs_rq_has_blocked(cfs_rq) || others_have_blocked(rq));
76527663
rq_unlock_irqrestore(rq, &rf);
76537664
}

0 commit comments

Comments
 (0)