Skip to content

Commit 37f8173

Browse files
Peter ZijlstraKAGA-KOKO
authored andcommitted
locking/atomics: Flip fallbacks and instrumentation
Currently instrumentation of atomic primitives is done at the architecture level, while composites or fallbacks are provided at the generic level. The result is that there are no uninstrumented variants of the fallbacks. Since there is now need of such variants to isolate text poke from any form of instrumentation invert this ordering. Doing this means moving the instrumentation into the generic code as well as having (for now) two variants of the fallbacks. Notes: - the various *cond_read* primitives are not proper fallbacks and got moved into linux/atomic.c. No arch_ variants are generated because the base primitives smp_cond_load*() are instrumented. - once all architectures are moved over to arch_atomic_ one of the fallback variants can be removed and some 2300 lines reclaimed. - atomic_{read,set}*() are no longer double-instrumented Reported-by: Thomas Gleixner <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Signed-off-by: Thomas Gleixner <[email protected]> Acked-by: Mark Rutland <[email protected]> Link: https://lkml.kernel.org/r/[email protected]
1 parent 765dcd2 commit 37f8173

28 files changed

+2403
-82
lines changed

arch/arm64/include/asm/atomic.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -101,8 +101,8 @@ static inline long arch_atomic64_dec_if_positive(atomic64_t *v)
101101

102102
#define ATOMIC_INIT(i) { (i) }
103103

104-
#define arch_atomic_read(v) READ_ONCE((v)->counter)
105-
#define arch_atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
104+
#define arch_atomic_read(v) __READ_ONCE((v)->counter)
105+
#define arch_atomic_set(v, i) __WRITE_ONCE(((v)->counter), (i))
106106

107107
#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
108108
#define arch_atomic_add_return_acquire arch_atomic_add_return_acquire
@@ -225,6 +225,6 @@ static inline long arch_atomic64_dec_if_positive(atomic64_t *v)
225225

226226
#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
227227

228-
#include <asm-generic/atomic-instrumented.h>
228+
#define ARCH_ATOMIC
229229

230230
#endif /* __ASM_ATOMIC_H */

arch/x86/include/asm/atomic.h

Lines changed: 13 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ static __always_inline int arch_atomic_read(const atomic_t *v)
2828
* Note for KASAN: we deliberately don't use READ_ONCE_NOCHECK() here,
2929
* it's non-inlined function that increases binary size and stack usage.
3030
*/
31-
return READ_ONCE((v)->counter);
31+
return __READ_ONCE((v)->counter);
3232
}
3333

3434
/**
@@ -40,7 +40,7 @@ static __always_inline int arch_atomic_read(const atomic_t *v)
4040
*/
4141
static __always_inline void arch_atomic_set(atomic_t *v, int i)
4242
{
43-
WRITE_ONCE(v->counter, i);
43+
__WRITE_ONCE(v->counter, i);
4444
}
4545

4646
/**
@@ -166,6 +166,7 @@ static __always_inline int arch_atomic_add_return(int i, atomic_t *v)
166166
{
167167
return i + xadd(&v->counter, i);
168168
}
169+
#define arch_atomic_add_return arch_atomic_add_return
169170

170171
/**
171172
* arch_atomic_sub_return - subtract integer and return
@@ -178,32 +179,37 @@ static __always_inline int arch_atomic_sub_return(int i, atomic_t *v)
178179
{
179180
return arch_atomic_add_return(-i, v);
180181
}
182+
#define arch_atomic_sub_return arch_atomic_sub_return
181183

182184
static __always_inline int arch_atomic_fetch_add(int i, atomic_t *v)
183185
{
184186
return xadd(&v->counter, i);
185187
}
188+
#define arch_atomic_fetch_add arch_atomic_fetch_add
186189

187190
static __always_inline int arch_atomic_fetch_sub(int i, atomic_t *v)
188191
{
189192
return xadd(&v->counter, -i);
190193
}
194+
#define arch_atomic_fetch_sub arch_atomic_fetch_sub
191195

192196
static __always_inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
193197
{
194198
return arch_cmpxchg(&v->counter, old, new);
195199
}
200+
#define arch_atomic_cmpxchg arch_atomic_cmpxchg
196201

197-
#define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg
198202
static __always_inline bool arch_atomic_try_cmpxchg(atomic_t *v, int *old, int new)
199203
{
200204
return try_cmpxchg(&v->counter, old, new);
201205
}
206+
#define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg
202207

203208
static inline int arch_atomic_xchg(atomic_t *v, int new)
204209
{
205210
return arch_xchg(&v->counter, new);
206211
}
212+
#define arch_atomic_xchg arch_atomic_xchg
207213

208214
static inline void arch_atomic_and(int i, atomic_t *v)
209215
{
@@ -221,6 +227,7 @@ static inline int arch_atomic_fetch_and(int i, atomic_t *v)
221227

222228
return val;
223229
}
230+
#define arch_atomic_fetch_and arch_atomic_fetch_and
224231

225232
static inline void arch_atomic_or(int i, atomic_t *v)
226233
{
@@ -238,6 +245,7 @@ static inline int arch_atomic_fetch_or(int i, atomic_t *v)
238245

239246
return val;
240247
}
248+
#define arch_atomic_fetch_or arch_atomic_fetch_or
241249

242250
static inline void arch_atomic_xor(int i, atomic_t *v)
243251
{
@@ -255,13 +263,14 @@ static inline int arch_atomic_fetch_xor(int i, atomic_t *v)
255263

256264
return val;
257265
}
266+
#define arch_atomic_fetch_xor arch_atomic_fetch_xor
258267

259268
#ifdef CONFIG_X86_32
260269
# include <asm/atomic64_32.h>
261270
#else
262271
# include <asm/atomic64_64.h>
263272
#endif
264273

265-
#include <asm-generic/atomic-instrumented.h>
274+
#define ARCH_ATOMIC
266275

267276
#endif /* _ASM_X86_ATOMIC_H */

arch/x86/include/asm/atomic64_32.h

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -75,6 +75,7 @@ static inline s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n)
7575
{
7676
return arch_cmpxchg64(&v->counter, o, n);
7777
}
78+
#define arch_atomic64_cmpxchg arch_atomic64_cmpxchg
7879

7980
/**
8081
* arch_atomic64_xchg - xchg atomic64 variable
@@ -94,6 +95,7 @@ static inline s64 arch_atomic64_xchg(atomic64_t *v, s64 n)
9495
: "memory");
9596
return o;
9697
}
98+
#define arch_atomic64_xchg arch_atomic64_xchg
9799

98100
/**
99101
* arch_atomic64_set - set atomic64 variable
@@ -138,6 +140,7 @@ static inline s64 arch_atomic64_add_return(s64 i, atomic64_t *v)
138140
ASM_NO_INPUT_CLOBBER("memory"));
139141
return i;
140142
}
143+
#define arch_atomic64_add_return arch_atomic64_add_return
141144

142145
/*
143146
* Other variants with different arithmetic operators:
@@ -149,6 +152,7 @@ static inline s64 arch_atomic64_sub_return(s64 i, atomic64_t *v)
149152
ASM_NO_INPUT_CLOBBER("memory"));
150153
return i;
151154
}
155+
#define arch_atomic64_sub_return arch_atomic64_sub_return
152156

153157
static inline s64 arch_atomic64_inc_return(atomic64_t *v)
154158
{
@@ -242,6 +246,7 @@ static inline int arch_atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
242246
"S" (v) : "memory");
243247
return (int)a;
244248
}
249+
#define arch_atomic64_add_unless arch_atomic64_add_unless
245250

246251
static inline int arch_atomic64_inc_not_zero(atomic64_t *v)
247252
{
@@ -281,6 +286,7 @@ static inline s64 arch_atomic64_fetch_and(s64 i, atomic64_t *v)
281286

282287
return old;
283288
}
289+
#define arch_atomic64_fetch_and arch_atomic64_fetch_and
284290

285291
static inline void arch_atomic64_or(s64 i, atomic64_t *v)
286292
{
@@ -299,6 +305,7 @@ static inline s64 arch_atomic64_fetch_or(s64 i, atomic64_t *v)
299305

300306
return old;
301307
}
308+
#define arch_atomic64_fetch_or arch_atomic64_fetch_or
302309

303310
static inline void arch_atomic64_xor(s64 i, atomic64_t *v)
304311
{
@@ -317,6 +324,7 @@ static inline s64 arch_atomic64_fetch_xor(s64 i, atomic64_t *v)
317324

318325
return old;
319326
}
327+
#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor
320328

321329
static inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v)
322330
{
@@ -327,6 +335,7 @@ static inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v)
327335

328336
return old;
329337
}
338+
#define arch_atomic64_fetch_add arch_atomic64_fetch_add
330339

331340
#define arch_atomic64_fetch_sub(i, v) arch_atomic64_fetch_add(-(i), (v))
332341

arch/x86/include/asm/atomic64_64.h

Lines changed: 12 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@
1919
*/
2020
static inline s64 arch_atomic64_read(const atomic64_t *v)
2121
{
22-
return READ_ONCE((v)->counter);
22+
return __READ_ONCE((v)->counter);
2323
}
2424

2525
/**
@@ -31,7 +31,7 @@ static inline s64 arch_atomic64_read(const atomic64_t *v)
3131
*/
3232
static inline void arch_atomic64_set(atomic64_t *v, s64 i)
3333
{
34-
WRITE_ONCE(v->counter, i);
34+
__WRITE_ONCE(v->counter, i);
3535
}
3636

3737
/**
@@ -159,37 +159,43 @@ static __always_inline s64 arch_atomic64_add_return(s64 i, atomic64_t *v)
159159
{
160160
return i + xadd(&v->counter, i);
161161
}
162+
#define arch_atomic64_add_return arch_atomic64_add_return
162163

163164
static inline s64 arch_atomic64_sub_return(s64 i, atomic64_t *v)
164165
{
165166
return arch_atomic64_add_return(-i, v);
166167
}
168+
#define arch_atomic64_sub_return arch_atomic64_sub_return
167169

168170
static inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v)
169171
{
170172
return xadd(&v->counter, i);
171173
}
174+
#define arch_atomic64_fetch_add arch_atomic64_fetch_add
172175

173176
static inline s64 arch_atomic64_fetch_sub(s64 i, atomic64_t *v)
174177
{
175178
return xadd(&v->counter, -i);
176179
}
180+
#define arch_atomic64_fetch_sub arch_atomic64_fetch_sub
177181

178182
static inline s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
179183
{
180184
return arch_cmpxchg(&v->counter, old, new);
181185
}
186+
#define arch_atomic64_cmpxchg arch_atomic64_cmpxchg
182187

183-
#define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg
184188
static __always_inline bool arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
185189
{
186190
return try_cmpxchg(&v->counter, old, new);
187191
}
192+
#define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg
188193

189194
static inline s64 arch_atomic64_xchg(atomic64_t *v, s64 new)
190195
{
191196
return arch_xchg(&v->counter, new);
192197
}
198+
#define arch_atomic64_xchg arch_atomic64_xchg
193199

194200
static inline void arch_atomic64_and(s64 i, atomic64_t *v)
195201
{
@@ -207,6 +213,7 @@ static inline s64 arch_atomic64_fetch_and(s64 i, atomic64_t *v)
207213
} while (!arch_atomic64_try_cmpxchg(v, &val, val & i));
208214
return val;
209215
}
216+
#define arch_atomic64_fetch_and arch_atomic64_fetch_and
210217

211218
static inline void arch_atomic64_or(s64 i, atomic64_t *v)
212219
{
@@ -224,6 +231,7 @@ static inline s64 arch_atomic64_fetch_or(s64 i, atomic64_t *v)
224231
} while (!arch_atomic64_try_cmpxchg(v, &val, val | i));
225232
return val;
226233
}
234+
#define arch_atomic64_fetch_or arch_atomic64_fetch_or
227235

228236
static inline void arch_atomic64_xor(s64 i, atomic64_t *v)
229237
{
@@ -241,5 +249,6 @@ static inline s64 arch_atomic64_fetch_xor(s64 i, atomic64_t *v)
241249
} while (!arch_atomic64_try_cmpxchg(v, &val, val ^ i));
242250
return val;
243251
}
252+
#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor
244253

245254
#endif /* _ASM_X86_ATOMIC64_64_H */

0 commit comments

Comments
 (0)