Skip to content

Commit 21689e4

Browse files
ubizjakIngo Molnar
authored andcommitted
locking/atomic/x86: Define arch_atomic_sub() family using arch_atomic_add() functions
There is no need to implement arch_atomic_sub() family of inline functions, corresponding macros can be directly implemented using arch_atomic_add() inlines with negated argument. No functional changes intended. Signed-off-by: Uros Bizjak <[email protected]> Signed-off-by: Ingo Molnar <[email protected]> Cc: Linus Torvalds <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent 95ece48 commit 21689e4

File tree

2 files changed

+4
-20
lines changed

2 files changed

+4
-20
lines changed

arch/x86/include/asm/atomic.h

Lines changed: 2 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -86,23 +86,15 @@ static __always_inline int arch_atomic_add_return(int i, atomic_t *v)
8686
}
8787
#define arch_atomic_add_return arch_atomic_add_return
8888

89-
static __always_inline int arch_atomic_sub_return(int i, atomic_t *v)
90-
{
91-
return arch_atomic_add_return(-i, v);
92-
}
93-
#define arch_atomic_sub_return arch_atomic_sub_return
89+
#define arch_atomic_sub_return(i, v) arch_atomic_add_return(-(i), v)
9490

9591
static __always_inline int arch_atomic_fetch_add(int i, atomic_t *v)
9692
{
9793
return xadd(&v->counter, i);
9894
}
9995
#define arch_atomic_fetch_add arch_atomic_fetch_add
10096

101-
static __always_inline int arch_atomic_fetch_sub(int i, atomic_t *v)
102-
{
103-
return xadd(&v->counter, -i);
104-
}
105-
#define arch_atomic_fetch_sub arch_atomic_fetch_sub
97+
#define arch_atomic_fetch_sub(i, v) arch_atomic_fetch_add(-(i), v)
10698

10799
static __always_inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
108100
{

arch/x86/include/asm/atomic64_64.h

Lines changed: 2 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -80,23 +80,15 @@ static __always_inline s64 arch_atomic64_add_return(s64 i, atomic64_t *v)
8080
}
8181
#define arch_atomic64_add_return arch_atomic64_add_return
8282

83-
static __always_inline s64 arch_atomic64_sub_return(s64 i, atomic64_t *v)
84-
{
85-
return arch_atomic64_add_return(-i, v);
86-
}
87-
#define arch_atomic64_sub_return arch_atomic64_sub_return
83+
#define arch_atomic64_sub_return(i, v) arch_atomic64_add_return(-(i), v)
8884

8985
static __always_inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v)
9086
{
9187
return xadd(&v->counter, i);
9288
}
9389
#define arch_atomic64_fetch_add arch_atomic64_fetch_add
9490

95-
static __always_inline s64 arch_atomic64_fetch_sub(s64 i, atomic64_t *v)
96-
{
97-
return xadd(&v->counter, -i);
98-
}
99-
#define arch_atomic64_fetch_sub arch_atomic64_fetch_sub
91+
#define arch_atomic64_fetch_sub(i, v) arch_atomic64_fetch_add(-(i), v)
10092

10193
static __always_inline s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
10294
{

0 commit comments

Comments
 (0)