Skip to content

Commit 27eaa30

Browse files
terminusKernel Patches Daemon
authored andcommitted
rqspinlock: use smp_cond_load_acquire_timeout()
Switch out the conditional load interfaces used by rqspinlock to atomic_cond_read_acquire_timeout() and, smp_cond_read_acquire_timeout(). Both these handle the timeout and amortize as needed, so use check_timeout() directly. Also, when using spin-wait implementations, redefine SMP_TIMEOUT_POLL_COUNT to be 16k to be similar to the spin-count used in RES_CHECK_TIMEOUT(). Cc: Kumar Kartikeya Dwivedi <[email protected]> Cc: Alexei Starovoitov <[email protected]> Signed-off-by: Ankur Arora <[email protected]>
1 parent 1876a0f commit 27eaa30

File tree

1 file changed

+14
-17
lines changed

1 file changed

+14
-17
lines changed

kernel/bpf/rqspinlock.c

Lines changed: 14 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -238,20 +238,14 @@ static noinline int check_timeout(rqspinlock_t *lock, u32 mask,
238238
}
239239

240240
/*
241-
* Do not amortize with spins when res_smp_cond_load_acquire is defined,
242-
* as the macro does internal amortization for us.
241+
* Amortize timeout check for busy-wait loops.
243242
*/
244-
#ifndef res_smp_cond_load_acquire
245243
#define RES_CHECK_TIMEOUT(ts, ret, mask) \
246244
({ \
247245
if (!(ts).spin++) \
248246
(ret) = check_timeout((lock), (mask), &(ts)); \
249247
(ret); \
250248
})
251-
#else
252-
#define RES_CHECK_TIMEOUT(ts, ret, mask) \
253-
({ (ret) = check_timeout((lock), (mask), &(ts)); })
254-
#endif
255249

256250
/*
257251
* Initialize the 'spin' member.
@@ -265,6 +259,15 @@ static noinline int check_timeout(rqspinlock_t *lock, u32 mask,
265259
*/
266260
#define RES_RESET_TIMEOUT(ts, _duration) ({ (ts).timeout_end = 0; (ts).duration = _duration; })
267261

262+
/*
263+
* Limit how often check_timeout() is invoked while spin-waiting by
264+
* smp_cond_load_acquire_timeout() or, atomic_cond_read_acquire_timeout().
265+
*/
266+
#ifndef CONFIG_ARM64
267+
#undef SMP_TIMEOUT_POLL_COUNT
268+
#define SMP_TIMEOUT_POLL_COUNT (16*1024)
269+
#endif
270+
268271
/*
269272
* Provide a test-and-set fallback for cases when queued spin lock support is
270273
* absent from the architecture.
@@ -310,12 +313,6 @@ EXPORT_SYMBOL_GPL(resilient_tas_spin_lock);
310313
*/
311314
static DEFINE_PER_CPU_ALIGNED(struct qnode, rqnodes[_Q_MAX_NODES]);
312315

313-
#ifndef res_smp_cond_load_acquire
314-
#define res_smp_cond_load_acquire(v, c) smp_cond_load_acquire(v, c)
315-
#endif
316-
317-
#define res_atomic_cond_read_acquire(v, c) res_smp_cond_load_acquire(&(v)->counter, (c))
318-
319316
/**
320317
* resilient_queued_spin_lock_slowpath - acquire the queued spinlock
321318
* @lock: Pointer to queued spinlock structure
@@ -415,7 +412,8 @@ int __lockfunc resilient_queued_spin_lock_slowpath(rqspinlock_t *lock, u32 val)
415412
*/
416413
if (val & _Q_LOCKED_MASK) {
417414
RES_RESET_TIMEOUT(ts, RES_DEF_TIMEOUT);
418-
res_smp_cond_load_acquire(&lock->locked, !VAL || RES_CHECK_TIMEOUT(ts, ret, _Q_LOCKED_MASK));
415+
smp_cond_load_acquire_timeout(&lock->locked, !VAL,
416+
(ret = check_timeout(lock, _Q_LOCKED_MASK, &ts)));
419417
}
420418

421419
if (ret) {
@@ -569,9 +567,8 @@ int __lockfunc resilient_queued_spin_lock_slowpath(rqspinlock_t *lock, u32 val)
569567
* us.
570568
*/
571569
RES_RESET_TIMEOUT(ts, RES_DEF_TIMEOUT * 2);
572-
val = res_atomic_cond_read_acquire(&lock->val, !(VAL & _Q_LOCKED_PENDING_MASK) ||
573-
RES_CHECK_TIMEOUT(ts, ret, _Q_LOCKED_PENDING_MASK));
574-
570+
val = atomic_cond_read_acquire_timeout(&lock->val, !(VAL & _Q_LOCKED_PENDING_MASK),
571+
(ret = check_timeout(lock, _Q_LOCKED_PENDING_MASK, &ts)));
575572
waitq_timeout:
576573
if (ret) {
577574
/*

0 commit comments

Comments
 (0)