Skip to content

Commit c677124

Browse files
committed
Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler updates from Ingo Molnar: "These were the main changes in this cycle: - More -rt motivated separation of CONFIG_PREEMPT and CONFIG_PREEMPTION. - Add more low level scheduling topology sanity checks and warnings to filter out nonsensical topologies that break scheduling. - Extend uclamp constraints to influence wakeup CPU placement - Make the RT scheduler more aware of asymmetric topologies and CPU capacities, via uclamp metrics, if CONFIG_UCLAMP_TASK=y - Make idle CPU selection more consistent - Various fixes, smaller cleanups, updates and enhancements - please see the git log for details" * 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (58 commits) sched/fair: Define sched_idle_cpu() only for SMP configurations sched/topology: Assert non-NUMA topology masks don't (partially) overlap idle: fix spelling mistake "iterrupts" -> "interrupts" sched/fair: Remove redundant call to cpufreq_update_util() sched/psi: create /proc/pressure and /proc/pressure/{io|memory|cpu} only when psi enabled sched/fair: Fix sgc->{min,max}_capacity calculation for SD_OVERLAP sched/fair: calculate delta runnable load only when it's needed sched/cputime: move rq parameter in irqtime_account_process_tick stop_machine: Make stop_cpus() static sched/debug: Reset watchdog on all CPUs while processing sysrq-t sched/core: Fix size of rq::uclamp initialization sched/uclamp: Fix a bug in propagating uclamp value in new cgroups sched/fair: Load balance aggressively for SCHED_IDLE CPUs sched/fair : Improve update_sd_pick_busiest for spare capacity case watchdog: Remove soft_lockup_hrtimer_cnt and related code sched/rt: Make RT capacity-aware sched/fair: Make EAS wakeup placement consider uclamp restrictions sched/fair: Make task_fits_capacity() consider uclamp restrictions sched/uclamp: Rename uclamp_util_with() into uclamp_rq_util_with() sched/uclamp: Make uclamp util helpers use and return UL values ...
2 parents c0e809e + afa70d9 commit c677124

File tree

72 files changed

+446
-329
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

72 files changed

+446
-329
lines changed

arch/arc/kernel/entry.S

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -337,11 +337,11 @@ resume_user_mode_begin:
337337
resume_kernel_mode:
338338

339339
; Disable Interrupts from this point on
340-
; CONFIG_PREEMPT: This is a must for preempt_schedule_irq()
341-
; !CONFIG_PREEMPT: To ensure restore_regs is intr safe
340+
; CONFIG_PREEMPTION: This is a must for preempt_schedule_irq()
341+
; !CONFIG_PREEMPTION: To ensure restore_regs is intr safe
342342
IRQ_DISABLE r9
343343

344-
#ifdef CONFIG_PREEMPT
344+
#ifdef CONFIG_PREEMPTION
345345

346346
; Can't preempt if preemption disabled
347347
GET_CURR_THR_INFO_FROM_SP r10

arch/arm/include/asm/switch_to.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@
1010
* to ensure that the maintenance completes in case we migrate to another
1111
* CPU.
1212
*/
13-
#if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP) && defined(CONFIG_CPU_V7)
13+
#if defined(CONFIG_PREEMPTION) && defined(CONFIG_SMP) && defined(CONFIG_CPU_V7)
1414
#define __complete_pending_tlbi() dsb(ish)
1515
#else
1616
#define __complete_pending_tlbi()

arch/arm/kernel/entry-armv.S

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -211,7 +211,7 @@ __irq_svc:
211211
svc_entry
212212
irq_handler
213213

214-
#ifdef CONFIG_PREEMPT
214+
#ifdef CONFIG_PREEMPTION
215215
ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
216216
ldr r0, [tsk, #TI_FLAGS] @ get flags
217217
teq r8, #0 @ if preempt count != 0
@@ -226,7 +226,7 @@ ENDPROC(__irq_svc)
226226

227227
.ltorg
228228

229-
#ifdef CONFIG_PREEMPT
229+
#ifdef CONFIG_PREEMPTION
230230
svc_preempt:
231231
mov r8, lr
232232
1: bl preempt_schedule_irq @ irq en/disable is done inside

arch/arm/kernel/traps.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -248,6 +248,8 @@ void show_stack(struct task_struct *tsk, unsigned long *sp)
248248

249249
#ifdef CONFIG_PREEMPT
250250
#define S_PREEMPT " PREEMPT"
251+
#elif defined(CONFIG_PREEMPT_RT)
252+
#define S_PREEMPT " PREEMPT_RT"
251253
#else
252254
#define S_PREEMPT ""
253255
#endif

arch/arm/mm/cache-v7.S

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -135,13 +135,13 @@ flush_levels:
135135
and r1, r1, #7 @ mask of the bits for current cache only
136136
cmp r1, #2 @ see what cache we have at this level
137137
blt skip @ skip if no cache, or just i-cache
138-
#ifdef CONFIG_PREEMPT
138+
#ifdef CONFIG_PREEMPTION
139139
save_and_disable_irqs_notrace r9 @ make cssr&csidr read atomic
140140
#endif
141141
mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
142142
isb @ isb to sych the new cssr&csidr
143143
mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
144-
#ifdef CONFIG_PREEMPT
144+
#ifdef CONFIG_PREEMPTION
145145
restore_irqs_notrace r9
146146
#endif
147147
and r2, r1, #7 @ extract the length of the cache lines

arch/arm/mm/cache-v7m.S

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -183,13 +183,13 @@ flush_levels:
183183
and r1, r1, #7 @ mask of the bits for current cache only
184184
cmp r1, #2 @ see what cache we have at this level
185185
blt skip @ skip if no cache, or just i-cache
186-
#ifdef CONFIG_PREEMPT
186+
#ifdef CONFIG_PREEMPTION
187187
save_and_disable_irqs_notrace r9 @ make cssr&csidr read atomic
188188
#endif
189189
write_csselr r10, r1 @ set current cache level
190190
isb @ isb to sych the new cssr&csidr
191191
read_ccsidr r1 @ read the new csidr
192-
#ifdef CONFIG_PREEMPT
192+
#ifdef CONFIG_PREEMPTION
193193
restore_irqs_notrace r9
194194
#endif
195195
and r2, r1, #7 @ extract the length of the cache lines

arch/arm64/Kconfig

Lines changed: 26 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -34,32 +34,32 @@ config ARM64
3434
select ARCH_HAS_TEARDOWN_DMA_OPS if IOMMU_SUPPORT
3535
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
3636
select ARCH_HAVE_NMI_SAFE_CMPXCHG
37-
select ARCH_INLINE_READ_LOCK if !PREEMPT
38-
select ARCH_INLINE_READ_LOCK_BH if !PREEMPT
39-
select ARCH_INLINE_READ_LOCK_IRQ if !PREEMPT
40-
select ARCH_INLINE_READ_LOCK_IRQSAVE if !PREEMPT
41-
select ARCH_INLINE_READ_UNLOCK if !PREEMPT
42-
select ARCH_INLINE_READ_UNLOCK_BH if !PREEMPT
43-
select ARCH_INLINE_READ_UNLOCK_IRQ if !PREEMPT
44-
select ARCH_INLINE_READ_UNLOCK_IRQRESTORE if !PREEMPT
45-
select ARCH_INLINE_WRITE_LOCK if !PREEMPT
46-
select ARCH_INLINE_WRITE_LOCK_BH if !PREEMPT
47-
select ARCH_INLINE_WRITE_LOCK_IRQ if !PREEMPT
48-
select ARCH_INLINE_WRITE_LOCK_IRQSAVE if !PREEMPT
49-
select ARCH_INLINE_WRITE_UNLOCK if !PREEMPT
50-
select ARCH_INLINE_WRITE_UNLOCK_BH if !PREEMPT
51-
select ARCH_INLINE_WRITE_UNLOCK_IRQ if !PREEMPT
52-
select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE if !PREEMPT
53-
select ARCH_INLINE_SPIN_TRYLOCK if !PREEMPT
54-
select ARCH_INLINE_SPIN_TRYLOCK_BH if !PREEMPT
55-
select ARCH_INLINE_SPIN_LOCK if !PREEMPT
56-
select ARCH_INLINE_SPIN_LOCK_BH if !PREEMPT
57-
select ARCH_INLINE_SPIN_LOCK_IRQ if !PREEMPT
58-
select ARCH_INLINE_SPIN_LOCK_IRQSAVE if !PREEMPT
59-
select ARCH_INLINE_SPIN_UNLOCK if !PREEMPT
60-
select ARCH_INLINE_SPIN_UNLOCK_BH if !PREEMPT
61-
select ARCH_INLINE_SPIN_UNLOCK_IRQ if !PREEMPT
62-
select ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE if !PREEMPT
37+
select ARCH_INLINE_READ_LOCK if !PREEMPTION
38+
select ARCH_INLINE_READ_LOCK_BH if !PREEMPTION
39+
select ARCH_INLINE_READ_LOCK_IRQ if !PREEMPTION
40+
select ARCH_INLINE_READ_LOCK_IRQSAVE if !PREEMPTION
41+
select ARCH_INLINE_READ_UNLOCK if !PREEMPTION
42+
select ARCH_INLINE_READ_UNLOCK_BH if !PREEMPTION
43+
select ARCH_INLINE_READ_UNLOCK_IRQ if !PREEMPTION
44+
select ARCH_INLINE_READ_UNLOCK_IRQRESTORE if !PREEMPTION
45+
select ARCH_INLINE_WRITE_LOCK if !PREEMPTION
46+
select ARCH_INLINE_WRITE_LOCK_BH if !PREEMPTION
47+
select ARCH_INLINE_WRITE_LOCK_IRQ if !PREEMPTION
48+
select ARCH_INLINE_WRITE_LOCK_IRQSAVE if !PREEMPTION
49+
select ARCH_INLINE_WRITE_UNLOCK if !PREEMPTION
50+
select ARCH_INLINE_WRITE_UNLOCK_BH if !PREEMPTION
51+
select ARCH_INLINE_WRITE_UNLOCK_IRQ if !PREEMPTION
52+
select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE if !PREEMPTION
53+
select ARCH_INLINE_SPIN_TRYLOCK if !PREEMPTION
54+
select ARCH_INLINE_SPIN_TRYLOCK_BH if !PREEMPTION
55+
select ARCH_INLINE_SPIN_LOCK if !PREEMPTION
56+
select ARCH_INLINE_SPIN_LOCK_BH if !PREEMPTION
57+
select ARCH_INLINE_SPIN_LOCK_IRQ if !PREEMPTION
58+
select ARCH_INLINE_SPIN_LOCK_IRQSAVE if !PREEMPTION
59+
select ARCH_INLINE_SPIN_UNLOCK if !PREEMPTION
60+
select ARCH_INLINE_SPIN_UNLOCK_BH if !PREEMPTION
61+
select ARCH_INLINE_SPIN_UNLOCK_IRQ if !PREEMPTION
62+
select ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE if !PREEMPTION
6363
select ARCH_KEEP_MEMBLOCK
6464
select ARCH_USE_CMPXCHG_LOCKREF
6565
select ARCH_USE_QUEUED_RWLOCKS

arch/arm64/crypto/sha256-glue.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -97,7 +97,7 @@ static int sha256_update_neon(struct shash_desc *desc, const u8 *data,
9797
* input when running on a preemptible kernel, but process the
9898
* data block by block instead.
9999
*/
100-
if (IS_ENABLED(CONFIG_PREEMPT) &&
100+
if (IS_ENABLED(CONFIG_PREEMPTION) &&
101101
chunk + sctx->count % SHA256_BLOCK_SIZE > SHA256_BLOCK_SIZE)
102102
chunk = SHA256_BLOCK_SIZE -
103103
sctx->count % SHA256_BLOCK_SIZE;

arch/arm64/include/asm/assembler.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -675,8 +675,8 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU
675675
* where <label> is optional, and marks the point where execution will resume
676676
* after a yield has been performed. If omitted, execution resumes right after
677677
* the endif_yield_neon invocation. Note that the entire sequence, including
678-
* the provided patchup code, will be omitted from the image if CONFIG_PREEMPT
679-
* is not defined.
678+
* the provided patchup code, will be omitted from the image if
679+
* CONFIG_PREEMPTION is not defined.
680680
*
681681
* As a convenience, in the case where no patchup code is required, the above
682682
* sequence may be abbreviated to
@@ -704,7 +704,7 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU
704704
.endm
705705

706706
.macro if_will_cond_yield_neon
707-
#ifdef CONFIG_PREEMPT
707+
#ifdef CONFIG_PREEMPTION
708708
get_current_task x0
709709
ldr x0, [x0, #TSK_TI_PREEMPT]
710710
sub x0, x0, #PREEMPT_DISABLE_OFFSET

arch/arm64/include/asm/preempt.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -79,11 +79,11 @@ static inline bool should_resched(int preempt_offset)
7979
return pc == preempt_offset;
8080
}
8181

82-
#ifdef CONFIG_PREEMPT
82+
#ifdef CONFIG_PREEMPTION
8383
void preempt_schedule(void);
8484
#define __preempt_schedule() preempt_schedule()
8585
void preempt_schedule_notrace(void);
8686
#define __preempt_schedule_notrace() preempt_schedule_notrace()
87-
#endif /* CONFIG_PREEMPT */
87+
#endif /* CONFIG_PREEMPTION */
8888

8989
#endif /* __ASM_PREEMPT_H */

0 commit comments

Comments
 (0)