@@ -3531,3 +3531,119 @@ void kmsg_dump_rewind(struct kmsg_dump_iter *iter)
35313531EXPORT_SYMBOL_GPL (kmsg_dump_rewind );
35323532
35333533#endif
3534+
3535+ #ifdef CONFIG_SMP
3536+ static atomic_t printk_cpulock_owner = ATOMIC_INIT (-1 );
3537+ static atomic_t printk_cpulock_nested = ATOMIC_INIT (0 );
3538+
3539+ /**
3540+ * __printk_wait_on_cpu_lock() - Busy wait until the printk cpu-reentrant
3541+ * spinning lock is not owned by any CPU.
3542+ *
3543+ * Context: Any context.
3544+ */
3545+ void __printk_wait_on_cpu_lock (void )
3546+ {
3547+ do {
3548+ cpu_relax ();
3549+ } while (atomic_read (& printk_cpulock_owner ) != -1 );
3550+ }
3551+ EXPORT_SYMBOL (__printk_wait_on_cpu_lock );
3552+
3553+ /**
3554+ * __printk_cpu_trylock() - Try to acquire the printk cpu-reentrant
3555+ * spinning lock.
3556+ *
3557+ * If no processor has the lock, the calling processor takes the lock and
3558+ * becomes the owner. If the calling processor is already the owner of the
3559+ * lock, this function succeeds immediately.
3560+ *
3561+ * Context: Any context. Expects interrupts to be disabled.
3562+ * Return: 1 on success, otherwise 0.
3563+ */
3564+ int __printk_cpu_trylock (void )
3565+ {
3566+ int cpu ;
3567+ int old ;
3568+
3569+ cpu = smp_processor_id ();
3570+
3571+ /*
3572+ * Guarantee loads and stores from this CPU when it is the lock owner
3573+ * are _not_ visible to the previous lock owner. This pairs with
3574+ * __printk_cpu_unlock:B.
3575+ *
3576+ * Memory barrier involvement:
3577+ *
3578+ * If __printk_cpu_trylock:A reads from __printk_cpu_unlock:B, then
3579+ * __printk_cpu_unlock:A can never read from __printk_cpu_trylock:B.
3580+ *
3581+ * Relies on:
3582+ *
3583+ * RELEASE from __printk_cpu_unlock:A to __printk_cpu_unlock:B
3584+ * of the previous CPU
3585+ * matching
3586+ * ACQUIRE from __printk_cpu_trylock:A to __printk_cpu_trylock:B
3587+ * of this CPU
3588+ */
3589+ old = atomic_cmpxchg_acquire (& printk_cpulock_owner , -1 ,
3590+ cpu ); /* LMM(__printk_cpu_trylock:A) */
3591+ if (old == -1 ) {
3592+ /*
3593+ * This CPU is now the owner and begins loading/storing
3594+ * data: LMM(__printk_cpu_trylock:B)
3595+ */
3596+ return 1 ;
3597+
3598+ } else if (old == cpu ) {
3599+ /* This CPU is already the owner. */
3600+ atomic_inc (& printk_cpulock_nested );
3601+ return 1 ;
3602+ }
3603+
3604+ return 0 ;
3605+ }
3606+ EXPORT_SYMBOL (__printk_cpu_trylock );
3607+
3608+ /**
3609+ * __printk_cpu_unlock() - Release the printk cpu-reentrant spinning lock.
3610+ *
3611+ * The calling processor must be the owner of the lock.
3612+ *
3613+ * Context: Any context. Expects interrupts to be disabled.
3614+ */
3615+ void __printk_cpu_unlock (void )
3616+ {
3617+ if (atomic_read (& printk_cpulock_nested )) {
3618+ atomic_dec (& printk_cpulock_nested );
3619+ return ;
3620+ }
3621+
3622+ /*
3623+ * This CPU is finished loading/storing data:
3624+ * LMM(__printk_cpu_unlock:A)
3625+ */
3626+
3627+ /*
3628+ * Guarantee loads and stores from this CPU when it was the
3629+ * lock owner are visible to the next lock owner. This pairs
3630+ * with __printk_cpu_trylock:A.
3631+ *
3632+ * Memory barrier involvement:
3633+ *
3634+ * If __printk_cpu_trylock:A reads from __printk_cpu_unlock:B,
3635+ * then __printk_cpu_trylock:B reads from __printk_cpu_unlock:A.
3636+ *
3637+ * Relies on:
3638+ *
3639+ * RELEASE from __printk_cpu_unlock:A to __printk_cpu_unlock:B
3640+ * of this CPU
3641+ * matching
3642+ * ACQUIRE from __printk_cpu_trylock:A to __printk_cpu_trylock:B
3643+ * of the next CPU
3644+ */
3645+ atomic_set_release (& printk_cpulock_owner ,
3646+ -1 ); /* LMM(__printk_cpu_unlock:B) */
3647+ }
3648+ EXPORT_SYMBOL (__printk_cpu_unlock );
3649+ #endif /* CONFIG_SMP */
0 commit comments