Skip to content

Commit 0fa6831

Browse files
npigginmpe
authored andcommitted
powerpc/64: Fix msr_check_and_set/clear MSR[EE] race
irq soft-masking means that when Linux irqs are disabled, the MSR[EE] value can change from 1 to 0 asynchronously: if a masked interrupt of the PACA_IRQ_MUST_HARD_MASK variety fires while irqs are disabled, the masked handler will return with MSR[EE]=0. This means a sequence like mtmsr(mfmsr() | MSR_FP) is racy if it can be called with local irqs disabled, unless a hard_irq_disable has been done. Reported-by: Sachin Sant <[email protected]> Signed-off-by: Nicholas Piggin <[email protected]> Signed-off-by: Michael Ellerman <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent 8154850 commit 0fa6831

File tree

2 files changed

+26
-2
lines changed

2 files changed

+26
-2
lines changed

arch/powerpc/include/asm/hw_irq.h

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -471,6 +471,30 @@ static inline void irq_soft_mask_regs_set_state(struct pt_regs *regs, unsigned l
471471
}
472472
#endif /* CONFIG_PPC64 */
473473

474+
static inline unsigned long mtmsr_isync_irqsafe(unsigned long msr)
475+
{
476+
#ifdef CONFIG_PPC64
477+
if (arch_irqs_disabled()) {
478+
/*
479+
* With soft-masking, MSR[EE] can change from 1 to 0
480+
* asynchronously when irqs are disabled, and we don't want to
481+
* set MSR[EE] back to 1 here if that has happened. A race-free
482+
* way to do this is ensure EE is already 0. Another way it
483+
* could be done is with a RESTART_TABLE handler, but that's
484+
* probably overkill here.
485+
*/
486+
msr &= ~MSR_EE;
487+
mtmsr_isync(msr);
488+
irq_soft_mask_set(IRQS_ALL_DISABLED);
489+
local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
490+
} else
491+
#endif
492+
mtmsr_isync(msr);
493+
494+
return msr;
495+
}
496+
497+
474498
#define ARCH_IRQ_INIT_FLAGS IRQ_NOREQUEST
475499

476500
#endif /* __ASSEMBLY__ */

arch/powerpc/kernel/process.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -127,7 +127,7 @@ unsigned long notrace msr_check_and_set(unsigned long bits)
127127
newmsr |= MSR_VSX;
128128

129129
if (oldmsr != newmsr)
130-
mtmsr_isync(newmsr);
130+
newmsr = mtmsr_isync_irqsafe(newmsr);
131131

132132
return newmsr;
133133
}
@@ -145,7 +145,7 @@ void notrace __msr_check_and_clear(unsigned long bits)
145145
newmsr &= ~MSR_VSX;
146146

147147
if (oldmsr != newmsr)
148-
mtmsr_isync(newmsr);
148+
mtmsr_isync_irqsafe(newmsr);
149149
}
150150
EXPORT_SYMBOL(__msr_check_and_clear);
151151

0 commit comments

Comments
 (0)