32
32
33
33
static __always_inline void arch_spin_lock (arch_spinlock_t * lock )
34
34
{
35
- u32 val = atomic_fetch_add (1 <<16 , lock );
35
+ u32 val = atomic_fetch_add (1 <<16 , & lock -> val );
36
36
u16 ticket = val >> 16 ;
37
37
38
38
if (ticket == (u16 )val )
@@ -46,31 +46,31 @@ static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
46
46
* have no outstanding writes due to the atomic_fetch_add() the extra
47
47
* orderings are free.
48
48
*/
49
- atomic_cond_read_acquire (lock , ticket == (u16 )VAL );
49
+ atomic_cond_read_acquire (& lock -> val , ticket == (u16 )VAL );
50
50
smp_mb ();
51
51
}
52
52
53
53
static __always_inline bool arch_spin_trylock (arch_spinlock_t * lock )
54
54
{
55
- u32 old = atomic_read (lock );
55
+ u32 old = atomic_read (& lock -> val );
56
56
57
57
if ((old >> 16 ) != (old & 0xffff ))
58
58
return false;
59
59
60
- return atomic_try_cmpxchg (lock , & old , old + (1 <<16 )); /* SC, for RCsc */
60
+ return atomic_try_cmpxchg (& lock -> val , & old , old + (1 <<16 )); /* SC, for RCsc */
61
61
}
62
62
63
63
static __always_inline void arch_spin_unlock (arch_spinlock_t * lock )
64
64
{
65
65
u16 * ptr = (u16 * )lock + IS_ENABLED (CONFIG_CPU_BIG_ENDIAN );
66
- u32 val = atomic_read (lock );
66
+ u32 val = atomic_read (& lock -> val );
67
67
68
68
smp_store_release (ptr , (u16 )val + 1 );
69
69
}
70
70
71
71
static __always_inline int arch_spin_value_unlocked (arch_spinlock_t lock )
72
72
{
73
- u32 val = lock .counter ;
73
+ u32 val = lock .val . counter ;
74
74
75
75
return ((val >> 16 ) == (val & 0xffff ));
76
76
}
@@ -84,7 +84,7 @@ static __always_inline int arch_spin_is_locked(arch_spinlock_t *lock)
84
84
85
85
static __always_inline int arch_spin_is_contended (arch_spinlock_t * lock )
86
86
{
87
- u32 val = atomic_read (lock );
87
+ u32 val = atomic_read (& lock -> val );
88
88
89
89
return (s16 )((val >> 16 ) - (val & 0xffff )) > 1 ;
90
90
}
0 commit comments