Skip to content

Commit 01cac82

Browse files
iii-iVasily Gorbik
authored andcommitted
s390/atomic: mark all functions __always_inline
Atomic functions are quite ubiquitous and may be called by noinstr ones, introducing unwanted instrumentation. They are very small, so there are no significant downsides to force-inlining them. Signed-off-by: Ilya Leoshkevich <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Heiko Carstens <[email protected]> Signed-off-by: Vasily Gorbik <[email protected]>
1 parent e6ec07d commit 01cac82

File tree

2 files changed

+33
-33
lines changed

2 files changed

+33
-33
lines changed

arch/s390/include/asm/atomic.h

Lines changed: 22 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -15,31 +15,31 @@
1515
#include <asm/barrier.h>
1616
#include <asm/cmpxchg.h>
1717

18-
static inline int arch_atomic_read(const atomic_t *v)
18+
static __always_inline int arch_atomic_read(const atomic_t *v)
1919
{
2020
return __atomic_read(v);
2121
}
2222
#define arch_atomic_read arch_atomic_read
2323

24-
static inline void arch_atomic_set(atomic_t *v, int i)
24+
static __always_inline void arch_atomic_set(atomic_t *v, int i)
2525
{
2626
__atomic_set(v, i);
2727
}
2828
#define arch_atomic_set arch_atomic_set
2929

30-
static inline int arch_atomic_add_return(int i, atomic_t *v)
30+
static __always_inline int arch_atomic_add_return(int i, atomic_t *v)
3131
{
3232
return __atomic_add_barrier(i, &v->counter) + i;
3333
}
3434
#define arch_atomic_add_return arch_atomic_add_return
3535

36-
static inline int arch_atomic_fetch_add(int i, atomic_t *v)
36+
static __always_inline int arch_atomic_fetch_add(int i, atomic_t *v)
3737
{
3838
return __atomic_add_barrier(i, &v->counter);
3939
}
4040
#define arch_atomic_fetch_add arch_atomic_fetch_add
4141

42-
static inline void arch_atomic_add(int i, atomic_t *v)
42+
static __always_inline void arch_atomic_add(int i, atomic_t *v)
4343
{
4444
__atomic_add(i, &v->counter);
4545
}
@@ -50,11 +50,11 @@ static inline void arch_atomic_add(int i, atomic_t *v)
5050
#define arch_atomic_fetch_sub(_i, _v) arch_atomic_fetch_add(-(int)(_i), _v)
5151

5252
#define ATOMIC_OPS(op) \
53-
static inline void arch_atomic_##op(int i, atomic_t *v) \
53+
static __always_inline void arch_atomic_##op(int i, atomic_t *v) \
5454
{ \
5555
__atomic_##op(i, &v->counter); \
5656
} \
57-
static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
57+
static __always_inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
5858
{ \
5959
return __atomic_##op##_barrier(i, &v->counter); \
6060
}
@@ -74,60 +74,60 @@ ATOMIC_OPS(xor)
7474

7575
#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new))
7676

77-
static inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
77+
static __always_inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
7878
{
7979
return __atomic_cmpxchg(&v->counter, old, new);
8080
}
8181
#define arch_atomic_cmpxchg arch_atomic_cmpxchg
8282

8383
#define ATOMIC64_INIT(i) { (i) }
8484

85-
static inline s64 arch_atomic64_read(const atomic64_t *v)
85+
static __always_inline s64 arch_atomic64_read(const atomic64_t *v)
8686
{
8787
return __atomic64_read(v);
8888
}
8989
#define arch_atomic64_read arch_atomic64_read
9090

91-
static inline void arch_atomic64_set(atomic64_t *v, s64 i)
91+
static __always_inline void arch_atomic64_set(atomic64_t *v, s64 i)
9292
{
9393
__atomic64_set(v, i);
9494
}
9595
#define arch_atomic64_set arch_atomic64_set
9696

97-
static inline s64 arch_atomic64_add_return(s64 i, atomic64_t *v)
97+
static __always_inline s64 arch_atomic64_add_return(s64 i, atomic64_t *v)
9898
{
9999
return __atomic64_add_barrier(i, (long *)&v->counter) + i;
100100
}
101101
#define arch_atomic64_add_return arch_atomic64_add_return
102102

103-
static inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v)
103+
static __always_inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v)
104104
{
105105
return __atomic64_add_barrier(i, (long *)&v->counter);
106106
}
107107
#define arch_atomic64_fetch_add arch_atomic64_fetch_add
108108

109-
static inline void arch_atomic64_add(s64 i, atomic64_t *v)
109+
static __always_inline void arch_atomic64_add(s64 i, atomic64_t *v)
110110
{
111111
__atomic64_add(i, (long *)&v->counter);
112112
}
113113
#define arch_atomic64_add arch_atomic64_add
114114

115115
#define arch_atomic64_xchg(v, new) (arch_xchg(&((v)->counter), new))
116116

117-
static inline s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
117+
static __always_inline s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
118118
{
119119
return __atomic64_cmpxchg((long *)&v->counter, old, new);
120120
}
121121
#define arch_atomic64_cmpxchg arch_atomic64_cmpxchg
122122

123-
#define ATOMIC64_OPS(op) \
124-
static inline void arch_atomic64_##op(s64 i, atomic64_t *v) \
125-
{ \
126-
__atomic64_##op(i, (long *)&v->counter); \
127-
} \
128-
static inline long arch_atomic64_fetch_##op(s64 i, atomic64_t *v) \
129-
{ \
130-
return __atomic64_##op##_barrier(i, (long *)&v->counter); \
123+
#define ATOMIC64_OPS(op) \
124+
static __always_inline void arch_atomic64_##op(s64 i, atomic64_t *v) \
125+
{ \
126+
__atomic64_##op(i, (long *)&v->counter); \
127+
} \
128+
static __always_inline long arch_atomic64_fetch_##op(s64 i, atomic64_t *v) \
129+
{ \
130+
return __atomic64_##op##_barrier(i, (long *)&v->counter); \
131131
}
132132

133133
ATOMIC64_OPS(and)

arch/s390/include/asm/atomic_ops.h

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88
#ifndef __ARCH_S390_ATOMIC_OPS__
99
#define __ARCH_S390_ATOMIC_OPS__
1010

11-
static inline int __atomic_read(const atomic_t *v)
11+
static __always_inline int __atomic_read(const atomic_t *v)
1212
{
1313
int c;
1414

@@ -18,14 +18,14 @@ static inline int __atomic_read(const atomic_t *v)
1818
return c;
1919
}
2020

21-
static inline void __atomic_set(atomic_t *v, int i)
21+
static __always_inline void __atomic_set(atomic_t *v, int i)
2222
{
2323
asm volatile(
2424
" st %1,%0\n"
2525
: "=R" (v->counter) : "d" (i));
2626
}
2727

28-
static inline s64 __atomic64_read(const atomic64_t *v)
28+
static __always_inline s64 __atomic64_read(const atomic64_t *v)
2929
{
3030
s64 c;
3131

@@ -35,7 +35,7 @@ static inline s64 __atomic64_read(const atomic64_t *v)
3535
return c;
3636
}
3737

38-
static inline void __atomic64_set(atomic64_t *v, s64 i)
38+
static __always_inline void __atomic64_set(atomic64_t *v, s64 i)
3939
{
4040
asm volatile(
4141
" stg %1,%0\n"
@@ -45,7 +45,7 @@ static inline void __atomic64_set(atomic64_t *v, s64 i)
4545
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
4646

4747
#define __ATOMIC_OP(op_name, op_type, op_string, op_barrier) \
48-
static inline op_type op_name(op_type val, op_type *ptr) \
48+
static __always_inline op_type op_name(op_type val, op_type *ptr) \
4949
{ \
5050
op_type old; \
5151
\
@@ -96,7 +96,7 @@ __ATOMIC_CONST_OPS(__atomic64_add_const, long, "agsi")
9696
#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
9797

9898
#define __ATOMIC_OP(op_name, op_string) \
99-
static inline int op_name(int val, int *ptr) \
99+
static __always_inline int op_name(int val, int *ptr) \
100100
{ \
101101
int old, new; \
102102
\
@@ -122,7 +122,7 @@ __ATOMIC_OPS(__atomic_xor, "xr")
122122
#undef __ATOMIC_OPS
123123

124124
#define __ATOMIC64_OP(op_name, op_string) \
125-
static inline long op_name(long val, long *ptr) \
125+
static __always_inline long op_name(long val, long *ptr) \
126126
{ \
127127
long old, new; \
128128
\
@@ -154,7 +154,7 @@ __ATOMIC64_OPS(__atomic64_xor, "xgr")
154154

155155
#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
156156

157-
static inline int __atomic_cmpxchg(int *ptr, int old, int new)
157+
static __always_inline int __atomic_cmpxchg(int *ptr, int old, int new)
158158
{
159159
asm volatile(
160160
" cs %[old],%[new],%[ptr]"
@@ -164,7 +164,7 @@ static inline int __atomic_cmpxchg(int *ptr, int old, int new)
164164
return old;
165165
}
166166

167-
static inline bool __atomic_cmpxchg_bool(int *ptr, int old, int new)
167+
static __always_inline bool __atomic_cmpxchg_bool(int *ptr, int old, int new)
168168
{
169169
int old_expected = old;
170170

@@ -176,7 +176,7 @@ static inline bool __atomic_cmpxchg_bool(int *ptr, int old, int new)
176176
return old == old_expected;
177177
}
178178

179-
static inline long __atomic64_cmpxchg(long *ptr, long old, long new)
179+
static __always_inline long __atomic64_cmpxchg(long *ptr, long old, long new)
180180
{
181181
asm volatile(
182182
" csg %[old],%[new],%[ptr]"
@@ -186,7 +186,7 @@ static inline long __atomic64_cmpxchg(long *ptr, long old, long new)
186186
return old;
187187
}
188188

189-
static inline bool __atomic64_cmpxchg_bool(long *ptr, long old, long new)
189+
static __always_inline bool __atomic64_cmpxchg_bool(long *ptr, long old, long new)
190190
{
191191
long old_expected = old;
192192

0 commit comments

Comments
 (0)