@@ -3667,26 +3667,26 @@ EXPORT_SYMBOL_GPL(kmsg_dump_rewind);
3667
3667
#endif
3668
3668
3669
3669
#ifdef CONFIG_SMP
3670
- static atomic_t printk_cpulock_owner = ATOMIC_INIT (-1 );
3671
- static atomic_t printk_cpulock_nested = ATOMIC_INIT (0 );
3670
+ static atomic_t printk_cpu_sync_owner = ATOMIC_INIT (-1 );
3671
+ static atomic_t printk_cpu_sync_nested = ATOMIC_INIT (0 );
3672
3672
3673
3673
/**
3674
- * __printk_wait_on_cpu_lock () - Busy wait until the printk cpu-reentrant
3675
- * spinning lock is not owned by any CPU.
3674
+ * __printk_cpu_sync_wait () - Busy wait until the printk cpu-reentrant
3675
+ * spinning lock is not owned by any CPU.
3676
3676
*
3677
3677
* Context: Any context.
3678
3678
*/
3679
- void __printk_wait_on_cpu_lock (void )
3679
+ void __printk_cpu_sync_wait (void )
3680
3680
{
3681
3681
do {
3682
3682
cpu_relax ();
3683
- } while (atomic_read (& printk_cpulock_owner ) != -1 );
3683
+ } while (atomic_read (& printk_cpu_sync_owner ) != -1 );
3684
3684
}
3685
- EXPORT_SYMBOL (__printk_wait_on_cpu_lock );
3685
+ EXPORT_SYMBOL (__printk_cpu_sync_wait );
3686
3686
3687
3687
/**
3688
- * __printk_cpu_trylock () - Try to acquire the printk cpu-reentrant
3689
- * spinning lock.
3688
+ * __printk_cpu_sync_try_get () - Try to acquire the printk cpu-reentrant
3689
+ * spinning lock.
3690
3690
*
3691
3691
* If no processor has the lock, the calling processor takes the lock and
3692
3692
* becomes the owner. If the calling processor is already the owner of the
@@ -3695,7 +3695,7 @@ EXPORT_SYMBOL(__printk_wait_on_cpu_lock);
3695
3695
* Context: Any context. Expects interrupts to be disabled.
3696
3696
* Return: 1 on success, otherwise 0.
3697
3697
*/
3698
- int __printk_cpu_trylock (void )
3698
+ int __printk_cpu_sync_try_get (void )
3699
3699
{
3700
3700
int cpu ;
3701
3701
int old ;
@@ -3705,79 +3705,80 @@ int __printk_cpu_trylock(void)
3705
3705
/*
3706
3706
* Guarantee loads and stores from this CPU when it is the lock owner
3707
3707
* are _not_ visible to the previous lock owner. This pairs with
3708
- * __printk_cpu_unlock :B.
3708
+ * __printk_cpu_sync_put :B.
3709
3709
*
3710
3710
* Memory barrier involvement:
3711
3711
*
3712
- * If __printk_cpu_trylock:A reads from __printk_cpu_unlock:B, then
3713
- * __printk_cpu_unlock:A can never read from __printk_cpu_trylock:B.
3712
+ * If __printk_cpu_sync_try_get:A reads from __printk_cpu_sync_put:B,
3713
+ * then __printk_cpu_sync_put:A can never read from
3714
+ * __printk_cpu_sync_try_get:B.
3714
3715
*
3715
3716
* Relies on:
3716
3717
*
3717
- * RELEASE from __printk_cpu_unlock :A to __printk_cpu_unlock :B
3718
+ * RELEASE from __printk_cpu_sync_put :A to __printk_cpu_sync_put :B
3718
3719
* of the previous CPU
3719
3720
* matching
3720
- * ACQUIRE from __printk_cpu_trylock :A to __printk_cpu_trylock:B
3721
- * of this CPU
3721
+ * ACQUIRE from __printk_cpu_sync_try_get :A to
3722
+ * __printk_cpu_sync_try_get:B of this CPU
3722
3723
*/
3723
- old = atomic_cmpxchg_acquire (& printk_cpulock_owner , -1 ,
3724
- cpu ); /* LMM(__printk_cpu_trylock :A) */
3724
+ old = atomic_cmpxchg_acquire (& printk_cpu_sync_owner , -1 ,
3725
+ cpu ); /* LMM(__printk_cpu_sync_try_get :A) */
3725
3726
if (old == -1 ) {
3726
3727
/*
3727
3728
* This CPU is now the owner and begins loading/storing
3728
- * data: LMM(__printk_cpu_trylock :B)
3729
+ * data: LMM(__printk_cpu_sync_try_get :B)
3729
3730
*/
3730
3731
return 1 ;
3731
3732
3732
3733
} else if (old == cpu ) {
3733
3734
/* This CPU is already the owner. */
3734
- atomic_inc (& printk_cpulock_nested );
3735
+ atomic_inc (& printk_cpu_sync_nested );
3735
3736
return 1 ;
3736
3737
}
3737
3738
3738
3739
return 0 ;
3739
3740
}
3740
- EXPORT_SYMBOL (__printk_cpu_trylock );
3741
+ EXPORT_SYMBOL (__printk_cpu_sync_try_get );
3741
3742
3742
3743
/**
3743
- * __printk_cpu_unlock () - Release the printk cpu-reentrant spinning lock.
3744
+ * __printk_cpu_sync_put () - Release the printk cpu-reentrant spinning lock.
3744
3745
*
3745
3746
* The calling processor must be the owner of the lock.
3746
3747
*
3747
3748
* Context: Any context. Expects interrupts to be disabled.
3748
3749
*/
3749
- void __printk_cpu_unlock (void )
3750
+ void __printk_cpu_sync_put (void )
3750
3751
{
3751
- if (atomic_read (& printk_cpulock_nested )) {
3752
- atomic_dec (& printk_cpulock_nested );
3752
+ if (atomic_read (& printk_cpu_sync_nested )) {
3753
+ atomic_dec (& printk_cpu_sync_nested );
3753
3754
return ;
3754
3755
}
3755
3756
3756
3757
/*
3757
3758
* This CPU is finished loading/storing data:
3758
- * LMM(__printk_cpu_unlock :A)
3759
+ * LMM(__printk_cpu_sync_put :A)
3759
3760
*/
3760
3761
3761
3762
/*
3762
3763
* Guarantee loads and stores from this CPU when it was the
3763
3764
* lock owner are visible to the next lock owner. This pairs
3764
- * with __printk_cpu_trylock :A.
3765
+ * with __printk_cpu_sync_try_get :A.
3765
3766
*
3766
3767
* Memory barrier involvement:
3767
3768
*
3768
- * If __printk_cpu_trylock :A reads from __printk_cpu_unlock :B,
3769
- * then __printk_cpu_trylock :B reads from __printk_cpu_unlock :A.
3769
+ * If __printk_cpu_sync_try_get :A reads from __printk_cpu_sync_put :B,
3770
+ * then __printk_cpu_sync_try_get :B reads from __printk_cpu_sync_put :A.
3770
3771
*
3771
3772
* Relies on:
3772
3773
*
3773
- * RELEASE from __printk_cpu_unlock :A to __printk_cpu_unlock :B
3774
+ * RELEASE from __printk_cpu_sync_put :A to __printk_cpu_sync_put :B
3774
3775
* of this CPU
3775
3776
* matching
3776
- * ACQUIRE from __printk_cpu_trylock :A to __printk_cpu_trylock:B
3777
- * of the next CPU
3777
+ * ACQUIRE from __printk_cpu_sync_try_get :A to
3778
+ * __printk_cpu_sync_try_get:B of the next CPU
3778
3779
*/
3779
- atomic_set_release (& printk_cpulock_owner ,
3780
- -1 ); /* LMM(__printk_cpu_unlock :B) */
3780
+ atomic_set_release (& printk_cpu_sync_owner ,
3781
+ -1 ); /* LMM(__printk_cpu_sync_put :B) */
3781
3782
}
3782
- EXPORT_SYMBOL (__printk_cpu_unlock );
3783
+ EXPORT_SYMBOL (__printk_cpu_sync_put );
3783
3784
#endif /* CONFIG_SMP */
0 commit comments