@@ -227,69 +227,62 @@ static __always_inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
227
227
228
228
static __always_inline void arch_atomic64_and (s64 i , atomic64_t * v )
229
229
{
230
- s64 old , c = 0 ;
230
+ s64 val = arch_atomic64_read_nonatomic ( v ) ;
231
231
232
- while ((old = arch_atomic64_cmpxchg (v , c , c & i )) != c )
233
- c = old ;
232
+ do { } while (!arch_atomic64_try_cmpxchg (v , & val , val & i ));
234
233
}
235
234
236
235
static __always_inline s64 arch_atomic64_fetch_and (s64 i , atomic64_t * v )
237
236
{
238
- s64 old , c = 0 ;
237
+ s64 val = arch_atomic64_read_nonatomic ( v ) ;
239
238
240
- while ((old = arch_atomic64_cmpxchg (v , c , c & i )) != c )
241
- c = old ;
239
+ do { } while (!arch_atomic64_try_cmpxchg (v , & val , val & i ));
242
240
243
- return old ;
241
+ return val ;
244
242
}
245
243
#define arch_atomic64_fetch_and arch_atomic64_fetch_and
246
244
247
245
static __always_inline void arch_atomic64_or (s64 i , atomic64_t * v )
248
246
{
249
- s64 old , c = 0 ;
247
+ s64 val = arch_atomic64_read_nonatomic ( v ) ;
250
248
251
- while ((old = arch_atomic64_cmpxchg (v , c , c | i )) != c )
252
- c = old ;
249
+ do { } while (!arch_atomic64_try_cmpxchg (v , & val , val | i ));
253
250
}
254
251
255
252
static __always_inline s64 arch_atomic64_fetch_or (s64 i , atomic64_t * v )
256
253
{
257
- s64 old , c = 0 ;
254
+ s64 val = arch_atomic64_read_nonatomic ( v ) ;
258
255
259
- while ((old = arch_atomic64_cmpxchg (v , c , c | i )) != c )
260
- c = old ;
256
+ do { } while (!arch_atomic64_try_cmpxchg (v , & val , val | i ));
261
257
262
- return old ;
258
+ return val ;
263
259
}
264
260
#define arch_atomic64_fetch_or arch_atomic64_fetch_or
265
261
266
262
static __always_inline void arch_atomic64_xor (s64 i , atomic64_t * v )
267
263
{
268
- s64 old , c = 0 ;
264
+ s64 val = arch_atomic64_read_nonatomic ( v ) ;
269
265
270
- while ((old = arch_atomic64_cmpxchg (v , c , c ^ i )) != c )
271
- c = old ;
266
+ do { } while (!arch_atomic64_try_cmpxchg (v , & val , val ^ i ));
272
267
}
273
268
274
269
static __always_inline s64 arch_atomic64_fetch_xor (s64 i , atomic64_t * v )
275
270
{
276
- s64 old , c = 0 ;
271
+ s64 val = arch_atomic64_read_nonatomic ( v ) ;
277
272
278
- while ((old = arch_atomic64_cmpxchg (v , c , c ^ i )) != c )
279
- c = old ;
273
+ do { } while (!arch_atomic64_try_cmpxchg (v , & val , val ^ i ));
280
274
281
- return old ;
275
+ return val ;
282
276
}
283
277
#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor
284
278
285
279
static __always_inline s64 arch_atomic64_fetch_add (s64 i , atomic64_t * v )
286
280
{
287
- s64 old , c = 0 ;
281
+ s64 val = arch_atomic64_read_nonatomic ( v ) ;
288
282
289
- while ((old = arch_atomic64_cmpxchg (v , c , c + i )) != c )
290
- c = old ;
283
+ do { } while (!arch_atomic64_try_cmpxchg (v , & val , val + i ));
291
284
292
- return old ;
285
+ return val ;
293
286
}
294
287
#define arch_atomic64_fetch_add arch_atomic64_fetch_add
295
288
0 commit comments