Skip to content

Commit faebd69

Browse files
jognesspmladek
authored andcommitted
printk: rename cpulock functions
Since the printk cpulock is CPU-reentrant and since it is used in all contexts, its usage must be carefully considered and most likely will require programming locklessly. To avoid mistaking the printk cpulock as a typical lock, rename it to cpu_sync. The main functions then become: printk_cpu_sync_get_irqsave(flags); printk_cpu_sync_put_irqrestore(flags); Add extra notes of caution in the function description to help developers understand the requirements for correct usage. Signed-off-by: John Ogness <[email protected]> Reviewed-by: Petr Mladek <[email protected]> Signed-off-by: Petr Mladek <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent 3ef4ea3 commit faebd69

File tree

4 files changed

+73
-60
lines changed

4 files changed

+73
-60
lines changed

include/linux/printk.h

Lines changed: 33 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -277,43 +277,55 @@ static inline void printk_trigger_flush(void)
277277
#endif
278278

279279
#ifdef CONFIG_SMP
280-
extern int __printk_cpu_trylock(void);
281-
extern void __printk_wait_on_cpu_lock(void);
282-
extern void __printk_cpu_unlock(void);
280+
extern int __printk_cpu_sync_try_get(void);
281+
extern void __printk_cpu_sync_wait(void);
282+
extern void __printk_cpu_sync_put(void);
283283

284284
/**
285-
* printk_cpu_lock_irqsave() - Acquire the printk cpu-reentrant spinning
286-
* lock and disable interrupts.
285+
* printk_cpu_sync_get_irqsave() - Acquire the printk cpu-reentrant spinning
286+
* lock and disable interrupts.
287287
* @flags: Stack-allocated storage for saving local interrupt state,
288-
* to be passed to printk_cpu_unlock_irqrestore().
288+
* to be passed to printk_cpu_sync_put_irqrestore().
289289
*
290290
* If the lock is owned by another CPU, spin until it becomes available.
291291
* Interrupts are restored while spinning.
292+
*
293+
* CAUTION: This function must be used carefully. It does not behave like a
294+
* typical lock. Here are important things to watch out for...
295+
*
296+
* * This function is reentrant on the same CPU. Therefore the calling
297+
* code must not assume exclusive access to data if code accessing the
298+
* data can run reentrant or within NMI context on the same CPU.
299+
*
300+
* * If there exists usage of this function from NMI context, it becomes
301+
* unsafe to perform any type of locking or spinning to wait for other
302+
* CPUs after calling this function from any context. This includes
303+
* using spinlocks or any other busy-waiting synchronization methods.
292304
*/
293-
#define printk_cpu_lock_irqsave(flags) \
294-
for (;;) { \
295-
local_irq_save(flags); \
296-
if (__printk_cpu_trylock()) \
297-
break; \
298-
local_irq_restore(flags); \
299-
__printk_wait_on_cpu_lock(); \
305+
#define printk_cpu_sync_get_irqsave(flags) \
306+
for (;;) { \
307+
local_irq_save(flags); \
308+
if (__printk_cpu_sync_try_get()) \
309+
break; \
310+
local_irq_restore(flags); \
311+
__printk_cpu_sync_wait(); \
300312
}
301313

302314
/**
303-
* printk_cpu_unlock_irqrestore() - Release the printk cpu-reentrant spinning
304-
* lock and restore interrupts.
305-
* @flags: Caller's saved interrupt state, from printk_cpu_lock_irqsave().
315+
* printk_cpu_sync_put_irqrestore() - Release the printk cpu-reentrant spinning
316+
* lock and restore interrupts.
317+
* @flags: Caller's saved interrupt state, from printk_cpu_sync_get_irqsave().
306318
*/
307-
#define printk_cpu_unlock_irqrestore(flags) \
319+
#define printk_cpu_sync_put_irqrestore(flags) \
308320
do { \
309-
__printk_cpu_unlock(); \
321+
__printk_cpu_sync_put(); \
310322
local_irq_restore(flags); \
311-
} while (0) \
323+
} while (0)
312324

313325
#else
314326

315-
#define printk_cpu_lock_irqsave(flags) ((void)flags)
316-
#define printk_cpu_unlock_irqrestore(flags) ((void)flags)
327+
#define printk_cpu_sync_get_irqsave(flags) ((void)flags)
328+
#define printk_cpu_sync_put_irqrestore(flags) ((void)flags)
317329

318330
#endif /* CONFIG_SMP */
319331

kernel/printk/printk.c

Lines changed: 36 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -3667,26 +3667,26 @@ EXPORT_SYMBOL_GPL(kmsg_dump_rewind);
36673667
#endif
36683668

36693669
#ifdef CONFIG_SMP
3670-
static atomic_t printk_cpulock_owner = ATOMIC_INIT(-1);
3671-
static atomic_t printk_cpulock_nested = ATOMIC_INIT(0);
3670+
static atomic_t printk_cpu_sync_owner = ATOMIC_INIT(-1);
3671+
static atomic_t printk_cpu_sync_nested = ATOMIC_INIT(0);
36723672

36733673
/**
3674-
* __printk_wait_on_cpu_lock() - Busy wait until the printk cpu-reentrant
3675-
* spinning lock is not owned by any CPU.
3674+
* __printk_cpu_sync_wait() - Busy wait until the printk cpu-reentrant
3675+
* spinning lock is not owned by any CPU.
36763676
*
36773677
* Context: Any context.
36783678
*/
3679-
void __printk_wait_on_cpu_lock(void)
3679+
void __printk_cpu_sync_wait(void)
36803680
{
36813681
do {
36823682
cpu_relax();
3683-
} while (atomic_read(&printk_cpulock_owner) != -1);
3683+
} while (atomic_read(&printk_cpu_sync_owner) != -1);
36843684
}
3685-
EXPORT_SYMBOL(__printk_wait_on_cpu_lock);
3685+
EXPORT_SYMBOL(__printk_cpu_sync_wait);
36863686

36873687
/**
3688-
* __printk_cpu_trylock() - Try to acquire the printk cpu-reentrant
3689-
* spinning lock.
3688+
* __printk_cpu_sync_try_get() - Try to acquire the printk cpu-reentrant
3689+
* spinning lock.
36903690
*
36913691
* If no processor has the lock, the calling processor takes the lock and
36923692
* becomes the owner. If the calling processor is already the owner of the
@@ -3695,7 +3695,7 @@ EXPORT_SYMBOL(__printk_wait_on_cpu_lock);
36953695
* Context: Any context. Expects interrupts to be disabled.
36963696
* Return: 1 on success, otherwise 0.
36973697
*/
3698-
int __printk_cpu_trylock(void)
3698+
int __printk_cpu_sync_try_get(void)
36993699
{
37003700
int cpu;
37013701
int old;
@@ -3705,79 +3705,80 @@ int __printk_cpu_trylock(void)
37053705
/*
37063706
* Guarantee loads and stores from this CPU when it is the lock owner
37073707
* are _not_ visible to the previous lock owner. This pairs with
3708-
* __printk_cpu_unlock:B.
3708+
* __printk_cpu_sync_put:B.
37093709
*
37103710
* Memory barrier involvement:
37113711
*
3712-
* If __printk_cpu_trylock:A reads from __printk_cpu_unlock:B, then
3713-
* __printk_cpu_unlock:A can never read from __printk_cpu_trylock:B.
3712+
* If __printk_cpu_sync_try_get:A reads from __printk_cpu_sync_put:B,
3713+
* then __printk_cpu_sync_put:A can never read from
3714+
* __printk_cpu_sync_try_get:B.
37143715
*
37153716
* Relies on:
37163717
*
3717-
* RELEASE from __printk_cpu_unlock:A to __printk_cpu_unlock:B
3718+
* RELEASE from __printk_cpu_sync_put:A to __printk_cpu_sync_put:B
37183719
* of the previous CPU
37193720
* matching
3720-
* ACQUIRE from __printk_cpu_trylock:A to __printk_cpu_trylock:B
3721-
* of this CPU
3721+
* ACQUIRE from __printk_cpu_sync_try_get:A to
3722+
* __printk_cpu_sync_try_get:B of this CPU
37223723
*/
3723-
old = atomic_cmpxchg_acquire(&printk_cpulock_owner, -1,
3724-
cpu); /* LMM(__printk_cpu_trylock:A) */
3724+
old = atomic_cmpxchg_acquire(&printk_cpu_sync_owner, -1,
3725+
cpu); /* LMM(__printk_cpu_sync_try_get:A) */
37253726
if (old == -1) {
37263727
/*
37273728
* This CPU is now the owner and begins loading/storing
3728-
* data: LMM(__printk_cpu_trylock:B)
3729+
* data: LMM(__printk_cpu_sync_try_get:B)
37293730
*/
37303731
return 1;
37313732

37323733
} else if (old == cpu) {
37333734
/* This CPU is already the owner. */
3734-
atomic_inc(&printk_cpulock_nested);
3735+
atomic_inc(&printk_cpu_sync_nested);
37353736
return 1;
37363737
}
37373738

37383739
return 0;
37393740
}
3740-
EXPORT_SYMBOL(__printk_cpu_trylock);
3741+
EXPORT_SYMBOL(__printk_cpu_sync_try_get);
37413742

37423743
/**
3743-
* __printk_cpu_unlock() - Release the printk cpu-reentrant spinning lock.
3744+
* __printk_cpu_sync_put() - Release the printk cpu-reentrant spinning lock.
37443745
*
37453746
* The calling processor must be the owner of the lock.
37463747
*
37473748
* Context: Any context. Expects interrupts to be disabled.
37483749
*/
3749-
void __printk_cpu_unlock(void)
3750+
void __printk_cpu_sync_put(void)
37503751
{
3751-
if (atomic_read(&printk_cpulock_nested)) {
3752-
atomic_dec(&printk_cpulock_nested);
3752+
if (atomic_read(&printk_cpu_sync_nested)) {
3753+
atomic_dec(&printk_cpu_sync_nested);
37533754
return;
37543755
}
37553756

37563757
/*
37573758
* This CPU is finished loading/storing data:
3758-
* LMM(__printk_cpu_unlock:A)
3759+
* LMM(__printk_cpu_sync_put:A)
37593760
*/
37603761

37613762
/*
37623763
* Guarantee loads and stores from this CPU when it was the
37633764
* lock owner are visible to the next lock owner. This pairs
3764-
* with __printk_cpu_trylock:A.
3765+
* with __printk_cpu_sync_try_get:A.
37653766
*
37663767
* Memory barrier involvement:
37673768
*
3768-
* If __printk_cpu_trylock:A reads from __printk_cpu_unlock:B,
3769-
* then __printk_cpu_trylock:B reads from __printk_cpu_unlock:A.
3769+
* If __printk_cpu_sync_try_get:A reads from __printk_cpu_sync_put:B,
3770+
* then __printk_cpu_sync_try_get:B reads from __printk_cpu_sync_put:A.
37703771
*
37713772
* Relies on:
37723773
*
3773-
* RELEASE from __printk_cpu_unlock:A to __printk_cpu_unlock:B
3774+
* RELEASE from __printk_cpu_sync_put:A to __printk_cpu_sync_put:B
37743775
* of this CPU
37753776
* matching
3776-
* ACQUIRE from __printk_cpu_trylock:A to __printk_cpu_trylock:B
3777-
* of the next CPU
3777+
* ACQUIRE from __printk_cpu_sync_try_get:A to
3778+
* __printk_cpu_sync_try_get:B of the next CPU
37783779
*/
3779-
atomic_set_release(&printk_cpulock_owner,
3780-
-1); /* LMM(__printk_cpu_unlock:B) */
3780+
atomic_set_release(&printk_cpu_sync_owner,
3781+
-1); /* LMM(__printk_cpu_sync_put:B) */
37813782
}
3782-
EXPORT_SYMBOL(__printk_cpu_unlock);
3783+
EXPORT_SYMBOL(__printk_cpu_sync_put);
37833784
#endif /* CONFIG_SMP */

lib/dump_stack.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -102,9 +102,9 @@ asmlinkage __visible void dump_stack_lvl(const char *log_lvl)
102102
* Permit this cpu to perform nested stack dumps while serialising
103103
* against other CPUs
104104
*/
105-
printk_cpu_lock_irqsave(flags);
105+
printk_cpu_sync_get_irqsave(flags);
106106
__dump_stack(log_lvl);
107-
printk_cpu_unlock_irqrestore(flags);
107+
printk_cpu_sync_put_irqrestore(flags);
108108
}
109109
EXPORT_SYMBOL(dump_stack_lvl);
110110

lib/nmi_backtrace.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -99,7 +99,7 @@ bool nmi_cpu_backtrace(struct pt_regs *regs)
9999
* Allow nested NMI backtraces while serializing
100100
* against other CPUs.
101101
*/
102-
printk_cpu_lock_irqsave(flags);
102+
printk_cpu_sync_get_irqsave(flags);
103103
if (!READ_ONCE(backtrace_idle) && regs && cpu_in_idle(instruction_pointer(regs))) {
104104
pr_warn("NMI backtrace for cpu %d skipped: idling at %pS\n",
105105
cpu, (void *)instruction_pointer(regs));
@@ -110,7 +110,7 @@ bool nmi_cpu_backtrace(struct pt_regs *regs)
110110
else
111111
dump_stack();
112112
}
113-
printk_cpu_unlock_irqrestore(flags);
113+
printk_cpu_sync_put_irqrestore(flags);
114114
cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
115115
return true;
116116
}

0 commit comments

Comments
 (0)