Skip to content

Commit 94f2be5

Browse files
committed
Merge branch 'printk-rework' into for-linus
2 parents d8c0321 + 3342aa8 commit 94f2be5

File tree

3 files changed

+159
-36
lines changed

3 files changed

+159
-36
lines changed

include/linux/printk.h

Lines changed: 41 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -282,6 +282,47 @@ static inline void printk_safe_flush_on_panic(void)
282282
}
283283
#endif
284284

285+
#ifdef CONFIG_SMP
286+
extern int __printk_cpu_trylock(void);
287+
extern void __printk_wait_on_cpu_lock(void);
288+
extern void __printk_cpu_unlock(void);
289+
290+
/**
291+
* printk_cpu_lock_irqsave() - Acquire the printk cpu-reentrant spinning
292+
* lock and disable interrupts.
293+
* @flags: Stack-allocated storage for saving local interrupt state,
294+
* to be passed to printk_cpu_unlock_irqrestore().
295+
*
296+
* If the lock is owned by another CPU, spin until it becomes available.
297+
* Interrupts are restored while spinning.
298+
*/
299+
#define printk_cpu_lock_irqsave(flags) \
300+
for (;;) { \
301+
local_irq_save(flags); \
302+
if (__printk_cpu_trylock()) \
303+
break; \
304+
local_irq_restore(flags); \
305+
__printk_wait_on_cpu_lock(); \
306+
}
307+
308+
/**
309+
* printk_cpu_unlock_irqrestore() - Release the printk cpu-reentrant spinning
310+
* lock and restore interrupts.
311+
* @flags: Caller's saved interrupt state, from printk_cpu_lock_irqsave().
312+
*/
313+
#define printk_cpu_unlock_irqrestore(flags) \
314+
do { \
315+
__printk_cpu_unlock(); \
316+
local_irq_restore(flags); \
317+
} while (0) \
318+
319+
#else
320+
321+
#define printk_cpu_lock_irqsave(flags) ((void)flags)
322+
#define printk_cpu_unlock_irqrestore(flags) ((void)flags)
323+
324+
#endif /* CONFIG_SMP */
325+
285326
extern int kptr_restrict;
286327

287328
/**

kernel/printk/printk.c

Lines changed: 116 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3531,3 +3531,119 @@ void kmsg_dump_rewind(struct kmsg_dump_iter *iter)
35313531
EXPORT_SYMBOL_GPL(kmsg_dump_rewind);
35323532

35333533
#endif
3534+
3535+
#ifdef CONFIG_SMP
3536+
static atomic_t printk_cpulock_owner = ATOMIC_INIT(-1);
3537+
static atomic_t printk_cpulock_nested = ATOMIC_INIT(0);
3538+
3539+
/**
3540+
* __printk_wait_on_cpu_lock() - Busy wait until the printk cpu-reentrant
3541+
* spinning lock is not owned by any CPU.
3542+
*
3543+
* Context: Any context.
3544+
*/
3545+
void __printk_wait_on_cpu_lock(void)
3546+
{
3547+
do {
3548+
cpu_relax();
3549+
} while (atomic_read(&printk_cpulock_owner) != -1);
3550+
}
3551+
EXPORT_SYMBOL(__printk_wait_on_cpu_lock);
3552+
3553+
/**
3554+
* __printk_cpu_trylock() - Try to acquire the printk cpu-reentrant
3555+
* spinning lock.
3556+
*
3557+
* If no processor has the lock, the calling processor takes the lock and
3558+
* becomes the owner. If the calling processor is already the owner of the
3559+
* lock, this function succeeds immediately.
3560+
*
3561+
* Context: Any context. Expects interrupts to be disabled.
3562+
* Return: 1 on success, otherwise 0.
3563+
*/
3564+
int __printk_cpu_trylock(void)
3565+
{
3566+
int cpu;
3567+
int old;
3568+
3569+
cpu = smp_processor_id();
3570+
3571+
/*
3572+
* Guarantee loads and stores from this CPU when it is the lock owner
3573+
* are _not_ visible to the previous lock owner. This pairs with
3574+
* __printk_cpu_unlock:B.
3575+
*
3576+
* Memory barrier involvement:
3577+
*
3578+
* If __printk_cpu_trylock:A reads from __printk_cpu_unlock:B, then
3579+
* __printk_cpu_unlock:A can never read from __printk_cpu_trylock:B.
3580+
*
3581+
* Relies on:
3582+
*
3583+
* RELEASE from __printk_cpu_unlock:A to __printk_cpu_unlock:B
3584+
* of the previous CPU
3585+
* matching
3586+
* ACQUIRE from __printk_cpu_trylock:A to __printk_cpu_trylock:B
3587+
* of this CPU
3588+
*/
3589+
old = atomic_cmpxchg_acquire(&printk_cpulock_owner, -1,
3590+
cpu); /* LMM(__printk_cpu_trylock:A) */
3591+
if (old == -1) {
3592+
/*
3593+
* This CPU is now the owner and begins loading/storing
3594+
* data: LMM(__printk_cpu_trylock:B)
3595+
*/
3596+
return 1;
3597+
3598+
} else if (old == cpu) {
3599+
/* This CPU is already the owner. */
3600+
atomic_inc(&printk_cpulock_nested);
3601+
return 1;
3602+
}
3603+
3604+
return 0;
3605+
}
3606+
EXPORT_SYMBOL(__printk_cpu_trylock);
3607+
3608+
/**
3609+
* __printk_cpu_unlock() - Release the printk cpu-reentrant spinning lock.
3610+
*
3611+
* The calling processor must be the owner of the lock.
3612+
*
3613+
* Context: Any context. Expects interrupts to be disabled.
3614+
*/
3615+
void __printk_cpu_unlock(void)
3616+
{
3617+
if (atomic_read(&printk_cpulock_nested)) {
3618+
atomic_dec(&printk_cpulock_nested);
3619+
return;
3620+
}
3621+
3622+
/*
3623+
* This CPU is finished loading/storing data:
3624+
* LMM(__printk_cpu_unlock:A)
3625+
*/
3626+
3627+
/*
3628+
* Guarantee loads and stores from this CPU when it was the
3629+
* lock owner are visible to the next lock owner. This pairs
3630+
* with __printk_cpu_trylock:A.
3631+
*
3632+
* Memory barrier involvement:
3633+
*
3634+
* If __printk_cpu_trylock:A reads from __printk_cpu_unlock:B,
3635+
* then __printk_cpu_trylock:B reads from __printk_cpu_unlock:A.
3636+
*
3637+
* Relies on:
3638+
*
3639+
* RELEASE from __printk_cpu_unlock:A to __printk_cpu_unlock:B
3640+
* of this CPU
3641+
* matching
3642+
* ACQUIRE from __printk_cpu_trylock:A to __printk_cpu_trylock:B
3643+
* of the next CPU
3644+
*/
3645+
atomic_set_release(&printk_cpulock_owner,
3646+
-1); /* LMM(__printk_cpu_unlock:B) */
3647+
}
3648+
EXPORT_SYMBOL(__printk_cpu_unlock);
3649+
#endif /* CONFIG_SMP */

lib/dump_stack.c

Lines changed: 2 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -84,50 +84,16 @@ static void __dump_stack(void)
8484
*
8585
* Architectures can override this implementation by implementing its own.
8686
*/
87-
#ifdef CONFIG_SMP
88-
static atomic_t dump_lock = ATOMIC_INIT(-1);
89-
9087
asmlinkage __visible void dump_stack(void)
9188
{
9289
unsigned long flags;
93-
int was_locked;
94-
int old;
95-
int cpu;
9690

9791
/*
9892
* Permit this cpu to perform nested stack dumps while serialising
9993
* against other CPUs
10094
*/
101-
retry:
102-
local_irq_save(flags);
103-
cpu = smp_processor_id();
104-
old = atomic_cmpxchg(&dump_lock, -1, cpu);
105-
if (old == -1) {
106-
was_locked = 0;
107-
} else if (old == cpu) {
108-
was_locked = 1;
109-
} else {
110-
local_irq_restore(flags);
111-
/*
112-
* Wait for the lock to release before jumping to
113-
* atomic_cmpxchg() in order to mitigate the thundering herd
114-
* problem.
115-
*/
116-
do { cpu_relax(); } while (atomic_read(&dump_lock) != -1);
117-
goto retry;
118-
}
119-
120-
__dump_stack();
121-
122-
if (!was_locked)
123-
atomic_set(&dump_lock, -1);
124-
125-
local_irq_restore(flags);
126-
}
127-
#else
128-
asmlinkage __visible void dump_stack(void)
129-
{
95+
printk_cpu_lock_irqsave(flags);
13096
__dump_stack();
97+
printk_cpu_unlock_irqrestore(flags);
13198
}
132-
#endif
13399
EXPORT_SYMBOL(dump_stack);

0 commit comments

Comments
 (0)