@@ -196,22 +196,28 @@ ATOMIC_OPS(xor, xor, i)
196
196
#undef ATOMIC_FETCH_OP
197
197
#undef ATOMIC_OP_RETURN
198
198
199
+ #define _arch_atomic_fetch_add_unless (_prev , _rc , counter , _a , _u , sfx ) \
200
+ ({ \
201
+ __asm__ __volatile__ ( \
202
+ "0: lr." sfx " %[p], %[c]\n" \
203
+ " beq %[p], %[u], 1f\n" \
204
+ " add %[rc], %[p], %[a]\n" \
205
+ " sc." sfx ".rl %[rc], %[rc], %[c]\n" \
206
+ " bnez %[rc], 0b\n" \
207
+ " fence rw, rw\n" \
208
+ "1:\n" \
209
+ : [p]"=&r" (_prev), [rc]"=&r" (_rc), [c]"+A" (counter) \
210
+ : [a]"r" (_a), [u]"r" (_u) \
211
+ : "memory"); \
212
+ })
213
+
199
214
/* This is required to provide a full barrier on success. */
200
215
static __always_inline int arch_atomic_fetch_add_unless (atomic_t * v , int a , int u )
201
216
{
202
217
int prev , rc ;
203
218
204
- __asm__ __volatile__ (
205
- "0: lr.w %[p], %[c]\n"
206
- " beq %[p], %[u], 1f\n"
207
- " add %[rc], %[p], %[a]\n"
208
- " sc.w.rl %[rc], %[rc], %[c]\n"
209
- " bnez %[rc], 0b\n"
210
- " fence rw, rw\n"
211
- "1:\n"
212
- : [p ]"=&r" (prev ), [rc ]"=&r" (rc ), [c ]"+A" (v -> counter )
213
- : [a ]"r" (a ), [u ]"r" (u )
214
- : "memory" );
219
+ _arch_atomic_fetch_add_unless (prev , rc , v -> counter , a , u , "w" );
220
+
215
221
return prev ;
216
222
}
217
223
#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
@@ -222,77 +228,86 @@ static __always_inline s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a,
222
228
s64 prev ;
223
229
long rc ;
224
230
225
- __asm__ __volatile__ (
226
- "0: lr.d %[p], %[c]\n"
227
- " beq %[p], %[u], 1f\n"
228
- " add %[rc], %[p], %[a]\n"
229
- " sc.d.rl %[rc], %[rc], %[c]\n"
230
- " bnez %[rc], 0b\n"
231
- " fence rw, rw\n"
232
- "1:\n"
233
- : [p ]"=&r" (prev ), [rc ]"=&r" (rc ), [c ]"+A" (v -> counter )
234
- : [a ]"r" (a ), [u ]"r" (u )
235
- : "memory" );
231
+ _arch_atomic_fetch_add_unless (prev , rc , v -> counter , a , u , "d" );
232
+
236
233
return prev ;
237
234
}
238
235
#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
239
236
#endif
240
237
238
+ #define _arch_atomic_inc_unless_negative (_prev , _rc , counter , sfx ) \
239
+ ({ \
240
+ __asm__ __volatile__ ( \
241
+ "0: lr." sfx " %[p], %[c]\n" \
242
+ " bltz %[p], 1f\n" \
243
+ " addi %[rc], %[p], 1\n" \
244
+ " sc." sfx ".rl %[rc], %[rc], %[c]\n" \
245
+ " bnez %[rc], 0b\n" \
246
+ " fence rw, rw\n" \
247
+ "1:\n" \
248
+ : [p]"=&r" (_prev), [rc]"=&r" (_rc), [c]"+A" (counter) \
249
+ : \
250
+ : "memory"); \
251
+ })
252
+
241
253
static __always_inline bool arch_atomic_inc_unless_negative (atomic_t * v )
242
254
{
243
255
int prev , rc ;
244
256
245
- __asm__ __volatile__ (
246
- "0: lr.w %[p], %[c]\n"
247
- " bltz %[p], 1f\n"
248
- " addi %[rc], %[p], 1\n"
249
- " sc.w.rl %[rc], %[rc], %[c]\n"
250
- " bnez %[rc], 0b\n"
251
- " fence rw, rw\n"
252
- "1:\n"
253
- : [p ]"=&r" (prev ), [rc ]"=&r" (rc ), [c ]"+A" (v -> counter )
254
- :
255
- : "memory" );
257
+ _arch_atomic_inc_unless_negative (prev , rc , v -> counter , "w" );
258
+
256
259
return !(prev < 0 );
257
260
}
258
261
259
262
#define arch_atomic_inc_unless_negative arch_atomic_inc_unless_negative
260
263
264
+ #define _arch_atomic_dec_unless_positive (_prev , _rc , counter , sfx ) \
265
+ ({ \
266
+ __asm__ __volatile__ ( \
267
+ "0: lr." sfx " %[p], %[c]\n" \
268
+ " bgtz %[p], 1f\n" \
269
+ " addi %[rc], %[p], -1\n" \
270
+ " sc." sfx ".rl %[rc], %[rc], %[c]\n" \
271
+ " bnez %[rc], 0b\n" \
272
+ " fence rw, rw\n" \
273
+ "1:\n" \
274
+ : [p]"=&r" (_prev), [rc]"=&r" (_rc), [c]"+A" (counter) \
275
+ : \
276
+ : "memory"); \
277
+ })
278
+
261
279
static __always_inline bool arch_atomic_dec_unless_positive (atomic_t * v )
262
280
{
263
281
int prev , rc ;
264
282
265
- __asm__ __volatile__ (
266
- "0: lr.w %[p], %[c]\n"
267
- " bgtz %[p], 1f\n"
268
- " addi %[rc], %[p], -1\n"
269
- " sc.w.rl %[rc], %[rc], %[c]\n"
270
- " bnez %[rc], 0b\n"
271
- " fence rw, rw\n"
272
- "1:\n"
273
- : [p ]"=&r" (prev ), [rc ]"=&r" (rc ), [c ]"+A" (v -> counter )
274
- :
275
- : "memory" );
283
+ _arch_atomic_dec_unless_positive (prev , rc , v -> counter , "w" );
284
+
276
285
return !(prev > 0 );
277
286
}
278
287
279
288
#define arch_atomic_dec_unless_positive arch_atomic_dec_unless_positive
280
289
290
+ #define _arch_atomic_dec_if_positive (_prev , _rc , counter , sfx ) \
291
+ ({ \
292
+ __asm__ __volatile__ ( \
293
+ "0: lr." sfx " %[p], %[c]\n" \
294
+ " addi %[rc], %[p], -1\n" \
295
+ " bltz %[rc], 1f\n" \
296
+ " sc." sfx ".rl %[rc], %[rc], %[c]\n" \
297
+ " bnez %[rc], 0b\n" \
298
+ " fence rw, rw\n" \
299
+ "1:\n" \
300
+ : [p]"=&r" (_prev), [rc]"=&r" (_rc), [c]"+A" (counter) \
301
+ : \
302
+ : "memory"); \
303
+ })
304
+
281
305
static __always_inline int arch_atomic_dec_if_positive (atomic_t * v )
282
306
{
283
307
int prev , rc ;
284
308
285
- __asm__ __volatile__ (
286
- "0: lr.w %[p], %[c]\n"
287
- " addi %[rc], %[p], -1\n"
288
- " bltz %[rc], 1f\n"
289
- " sc.w.rl %[rc], %[rc], %[c]\n"
290
- " bnez %[rc], 0b\n"
291
- " fence rw, rw\n"
292
- "1:\n"
293
- : [p ]"=&r" (prev ), [rc ]"=&r" (rc ), [c ]"+A" (v -> counter )
294
- :
295
- : "memory" );
309
+ _arch_atomic_dec_if_positive (prev , rc , v -> counter , "w" );
310
+
296
311
return prev - 1 ;
297
312
}
298
313
@@ -304,17 +319,8 @@ static __always_inline bool arch_atomic64_inc_unless_negative(atomic64_t *v)
304
319
s64 prev ;
305
320
long rc ;
306
321
307
- __asm__ __volatile__ (
308
- "0: lr.d %[p], %[c]\n"
309
- " bltz %[p], 1f\n"
310
- " addi %[rc], %[p], 1\n"
311
- " sc.d.rl %[rc], %[rc], %[c]\n"
312
- " bnez %[rc], 0b\n"
313
- " fence rw, rw\n"
314
- "1:\n"
315
- : [p ]"=&r" (prev ), [rc ]"=&r" (rc ), [c ]"+A" (v -> counter )
316
- :
317
- : "memory" );
322
+ _arch_atomic_inc_unless_negative (prev , rc , v -> counter , "d" );
323
+
318
324
return !(prev < 0 );
319
325
}
320
326
@@ -325,17 +331,8 @@ static __always_inline bool arch_atomic64_dec_unless_positive(atomic64_t *v)
325
331
s64 prev ;
326
332
long rc ;
327
333
328
- __asm__ __volatile__ (
329
- "0: lr.d %[p], %[c]\n"
330
- " bgtz %[p], 1f\n"
331
- " addi %[rc], %[p], -1\n"
332
- " sc.d.rl %[rc], %[rc], %[c]\n"
333
- " bnez %[rc], 0b\n"
334
- " fence rw, rw\n"
335
- "1:\n"
336
- : [p ]"=&r" (prev ), [rc ]"=&r" (rc ), [c ]"+A" (v -> counter )
337
- :
338
- : "memory" );
334
+ _arch_atomic_dec_unless_positive (prev , rc , v -> counter , "d" );
335
+
339
336
return !(prev > 0 );
340
337
}
341
338
@@ -346,17 +343,8 @@ static __always_inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
346
343
s64 prev ;
347
344
long rc ;
348
345
349
- __asm__ __volatile__ (
350
- "0: lr.d %[p], %[c]\n"
351
- " addi %[rc], %[p], -1\n"
352
- " bltz %[rc], 1f\n"
353
- " sc.d.rl %[rc], %[rc], %[c]\n"
354
- " bnez %[rc], 0b\n"
355
- " fence rw, rw\n"
356
- "1:\n"
357
- : [p ]"=&r" (prev ), [rc ]"=&r" (rc ), [c ]"+A" (v -> counter )
358
- :
359
- : "memory" );
346
+ _arch_atomic_dec_if_positive (prev , rc , v -> counter , "d" );
347
+
360
348
return prev - 1 ;
361
349
}
362
350
0 commit comments