Skip to content

Commit ffa164f

Browse files
terminusKernel Patches Daemon
authored andcommitted
rqspinlock: Use smp_cond_load_acquire_timeout()
Switch out the conditional load interfaces used by rqspinlock to atomic_cond_read_acquire_timeout(), and smp_cond_read_acquire_timeout(). Both these handle the timeout and amortize as needed, so use check_timeout() directly. Also, when using spin-wait implementations, redefine SMP_TIMEOUT_POLL_COUNT to be 16k to be similar to the spin-count used in RES_CHECK_TIMEOUT(). Cc: Kumar Kartikeya Dwivedi <[email protected]> Cc: Alexei Starovoitov <[email protected]> Signed-off-by: Ankur Arora <[email protected]>
1 parent 7bf3210 commit ffa164f

File tree

1 file changed

+15
-17
lines changed

1 file changed

+15
-17
lines changed

kernel/bpf/rqspinlock.c

Lines changed: 15 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -238,20 +238,14 @@ static noinline int check_timeout(rqspinlock_t *lock, u32 mask,
238238
}
239239

240240
/*
241-
* Do not amortize with spins when res_smp_cond_load_acquire is defined,
242-
* as the macro does internal amortization for us.
241+
* Amortize timeout check for busy-wait loops.
243242
*/
244-
#ifndef res_smp_cond_load_acquire
245243
#define RES_CHECK_TIMEOUT(ts, ret, mask) \
246244
({ \
247245
if (!(ts).spin++) \
248246
(ret) = check_timeout((lock), (mask), &(ts)); \
249247
(ret); \
250248
})
251-
#else
252-
#define RES_CHECK_TIMEOUT(ts, ret, mask) \
253-
({ (ret) = check_timeout((lock), (mask), &(ts)); })
254-
#endif
255249

256250
/*
257251
* Initialize the 'spin' member.
@@ -265,6 +259,16 @@ static noinline int check_timeout(rqspinlock_t *lock, u32 mask,
265259
*/
266260
#define RES_RESET_TIMEOUT(ts, _duration) ({ (ts).timeout_end = 0; (ts).duration = _duration; })
267261

262+
/*
263+
* Limit how often check_timeout() is invoked while spin-waiting in
264+
* smp_cond_load_acquire_timeout() or atomic_cond_read_acquire_timeout().
265+
* (ARM64, typically uses a waited implementation so we exclude that.)
266+
*/
267+
#ifndef CONFIG_ARM64
268+
#undef SMP_TIMEOUT_POLL_COUNT
269+
#define SMP_TIMEOUT_POLL_COUNT (16*1024)
270+
#endif
271+
268272
/*
269273
* Provide a test-and-set fallback for cases when queued spin lock support is
270274
* absent from the architecture.
@@ -310,12 +314,6 @@ EXPORT_SYMBOL_GPL(resilient_tas_spin_lock);
310314
*/
311315
static DEFINE_PER_CPU_ALIGNED(struct qnode, rqnodes[_Q_MAX_NODES]);
312316

313-
#ifndef res_smp_cond_load_acquire
314-
#define res_smp_cond_load_acquire(v, c) smp_cond_load_acquire(v, c)
315-
#endif
316-
317-
#define res_atomic_cond_read_acquire(v, c) res_smp_cond_load_acquire(&(v)->counter, (c))
318-
319317
/**
320318
* resilient_queued_spin_lock_slowpath - acquire the queued spinlock
321319
* @lock: Pointer to queued spinlock structure
@@ -415,7 +413,8 @@ int __lockfunc resilient_queued_spin_lock_slowpath(rqspinlock_t *lock, u32 val)
415413
*/
416414
if (val & _Q_LOCKED_MASK) {
417415
RES_RESET_TIMEOUT(ts, RES_DEF_TIMEOUT);
418-
res_smp_cond_load_acquire(&lock->locked, !VAL || RES_CHECK_TIMEOUT(ts, ret, _Q_LOCKED_MASK));
416+
smp_cond_load_acquire_timeout(&lock->locked, !VAL,
417+
(ret = check_timeout(lock, _Q_LOCKED_MASK, &ts)));
419418
}
420419

421420
if (ret) {
@@ -569,9 +568,8 @@ int __lockfunc resilient_queued_spin_lock_slowpath(rqspinlock_t *lock, u32 val)
569568
* us.
570569
*/
571570
RES_RESET_TIMEOUT(ts, RES_DEF_TIMEOUT * 2);
572-
val = res_atomic_cond_read_acquire(&lock->val, !(VAL & _Q_LOCKED_PENDING_MASK) ||
573-
RES_CHECK_TIMEOUT(ts, ret, _Q_LOCKED_PENDING_MASK));
574-
571+
val = atomic_cond_read_acquire_timeout(&lock->val, !(VAL & _Q_LOCKED_PENDING_MASK),
572+
(ret = check_timeout(lock, _Q_LOCKED_PENDING_MASK, &ts)));
575573
waitq_timeout:
576574
if (ret) {
577575
/*

0 commit comments

Comments
 (0)