@@ -34,13 +34,13 @@ extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
34
34
/* Can't use raw_spin_lock_irq because of #include problems, so
35
35
* this is the substitute */
36
36
#define _atomic_spin_lock_irqsave (l ,f ) do { \
37
- arch_spinlock_t *s = ATOMIC_HASH(l); \
37
+ arch_spinlock_t *s = ATOMIC_HASH(l); \
38
38
local_irq_save(f); \
39
39
arch_spin_lock(s); \
40
40
} while(0)
41
41
42
42
#define _atomic_spin_unlock_irqrestore (l ,f ) do { \
43
- arch_spinlock_t *s = ATOMIC_HASH(l); \
43
+ arch_spinlock_t *s = ATOMIC_HASH(l); \
44
44
arch_spin_unlock(s); \
45
45
local_irq_restore(f); \
46
46
} while(0)
@@ -85,7 +85,7 @@ static __inline__ void atomic_##op(int i, atomic_t *v) \
85
85
_atomic_spin_lock_irqsave(v, flags); \
86
86
v->counter c_op i; \
87
87
_atomic_spin_unlock_irqrestore(v, flags); \
88
- } \
88
+ }
89
89
90
90
#define ATOMIC_OP_RETURN (op , c_op ) \
91
91
static __inline__ int atomic_##op##_return(int i, atomic_t *v) \
@@ -150,7 +150,7 @@ static __inline__ void atomic64_##op(s64 i, atomic64_t *v) \
150
150
_atomic_spin_lock_irqsave(v, flags); \
151
151
v->counter c_op i; \
152
152
_atomic_spin_unlock_irqrestore(v, flags); \
153
- } \
153
+ }
154
154
155
155
#define ATOMIC64_OP_RETURN (op , c_op ) \
156
156
static __inline__ s64 atomic64_##op##_return(s64 i, atomic64_t *v) \
0 commit comments