Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
22 changes: 22 additions & 0 deletions arch/arm64/include/asm/barrier.h
Original file line number Diff line number Diff line change
Expand Up @@ -219,6 +219,28 @@ do { \
(typeof(*ptr))VAL; \
})

extern bool arch_timer_evtstrm_available(void);

#define smp_cond_load_relaxed_timewait(ptr, cond_expr, time_check_expr) \
({ \
typeof(ptr) __PTR = (ptr); \
__unqual_scalar_typeof(*ptr) VAL; \
bool __wfe = arch_timer_evtstrm_available(); \
\
for (;;) { \
VAL = READ_ONCE(*__PTR); \
if (cond_expr) \
break; \
if (time_check_expr) \
break; \
if (likely(__wfe)) \
__cmpwait_relaxed(__PTR, VAL); \
else \
cpu_relax(); \
} \
(typeof(*ptr)) VAL; \
})

#include <asm-generic/barrier.h>

#endif /* __ASSEMBLY__ */
Expand Down
84 changes: 1 addition & 83 deletions arch/arm64/include/asm/rqspinlock.h
Original file line number Diff line number Diff line change
Expand Up @@ -4,89 +4,7 @@

#include <asm/barrier.h>

/*
* Hardcode res_smp_cond_load_acquire implementations for arm64 to a custom
* version based on [0]. In rqspinlock code, our conditional expression involves
* checking the value _and_ additionally a timeout. However, on arm64, the
* WFE-based implementation may never spin again if no stores occur to the
* locked byte in the lock word. As such, we may be stuck forever if
* event-stream based unblocking is not available on the platform for WFE spin
* loops (arch_timer_evtstrm_available).
*
* Once support for smp_cond_load_acquire_timewait [0] lands, we can drop this
* copy-paste.
*
* While we rely on the implementation to amortize the cost of sampling
* cond_expr for us, it will not happen when event stream support is
* unavailable, time_expr check is amortized. This is not the common case, and
* it would be difficult to fit our logic in the time_expr_ns >= time_limit_ns
* comparison, hence just let it be. In case of event-stream, the loop is woken
* up at microsecond granularity.
*
* [0]: https://lore.kernel.org/lkml/[email protected]
*/

#ifndef smp_cond_load_acquire_timewait

#define smp_cond_time_check_count 200

#define __smp_cond_load_relaxed_spinwait(ptr, cond_expr, time_expr_ns, \
time_limit_ns) ({ \
typeof(ptr) __PTR = (ptr); \
__unqual_scalar_typeof(*ptr) VAL; \
unsigned int __count = 0; \
for (;;) { \
VAL = READ_ONCE(*__PTR); \
if (cond_expr) \
break; \
cpu_relax(); \
if (__count++ < smp_cond_time_check_count) \
continue; \
if ((time_expr_ns) >= (time_limit_ns)) \
break; \
__count = 0; \
} \
(typeof(*ptr))VAL; \
})

#define __smp_cond_load_acquire_timewait(ptr, cond_expr, \
time_expr_ns, time_limit_ns) \
({ \
typeof(ptr) __PTR = (ptr); \
__unqual_scalar_typeof(*ptr) VAL; \
for (;;) { \
VAL = smp_load_acquire(__PTR); \
if (cond_expr) \
break; \
__cmpwait_relaxed(__PTR, VAL); \
if ((time_expr_ns) >= (time_limit_ns)) \
break; \
} \
(typeof(*ptr))VAL; \
})

#define smp_cond_load_acquire_timewait(ptr, cond_expr, \
time_expr_ns, time_limit_ns) \
({ \
__unqual_scalar_typeof(*ptr) _val; \
int __wfe = arch_timer_evtstrm_available(); \
\
if (likely(__wfe)) { \
_val = __smp_cond_load_acquire_timewait(ptr, cond_expr, \
time_expr_ns, \
time_limit_ns); \
} else { \
_val = __smp_cond_load_relaxed_spinwait(ptr, cond_expr, \
time_expr_ns, \
time_limit_ns); \
smp_acquire__after_ctrl_dep(); \
} \
(typeof(*ptr))_val; \
})

#endif

#define res_smp_cond_load_acquire(v, c) smp_cond_load_acquire_timewait(v, c, 0, 1)
#define res_smp_cond_load_acquire_waiting() arch_timer_evtstrm_available()

#include <asm-generic/rqspinlock.h>

Expand Down
57 changes: 57 additions & 0 deletions include/asm-generic/barrier.h
Original file line number Diff line number Diff line change
Expand Up @@ -273,6 +273,63 @@ do { \
})
#endif

#ifndef SMP_TIMEWAIT_SPIN_COUNT
#define SMP_TIMEWAIT_SPIN_COUNT 200
#endif

/**
* smp_cond_load_relaxed_timewait() - (Spin) wait for cond with no ordering
* guarantees until a timeout expires.
* @ptr: pointer to the variable to wait on
* @cond: boolean expression to wait for
* @time_check_expr: expression to decide when to bail out
*
* Equivalent to using READ_ONCE() on the condition variable.
*/
#ifndef smp_cond_load_relaxed_timewait
#define smp_cond_load_relaxed_timewait(ptr, cond_expr, time_check_expr) \
({ \
typeof(ptr) __PTR = (ptr); \
__unqual_scalar_typeof(*ptr) VAL; \
u32 __n = 0, __spin = SMP_TIMEWAIT_SPIN_COUNT; \
\
for (;;) { \
VAL = READ_ONCE(*__PTR); \
if (cond_expr) \
break; \
cpu_relax(); \
if (++__n < __spin) \
continue; \
if (time_check_expr) \
break; \
__n = 0; \
} \
(typeof(*ptr))VAL; \
})
#endif

/**
* smp_cond_load_acquire_timewait() - (Spin) wait for cond with ACQUIRE ordering
* until a timeout expires.
*
* Arguments: same as smp_cond_load_relaxed_timeout().
*
* Equivalent to using smp_cond_load_acquire() on the condition variable with
* a timeout.
*/
#ifndef smp_cond_load_acquire_timewait
#define smp_cond_load_acquire_timewait(ptr, cond_expr, time_check_expr) \
({ \
__unqual_scalar_typeof(*ptr) _val; \
_val = smp_cond_load_relaxed_timewait(ptr, cond_expr, \
time_check_expr); \
\
/* Depends on the control dependency of the wait above. */ \
smp_acquire__after_ctrl_dep(); \
(typeof(*ptr))_val; \
})
#endif

/*
* pmem_wmb() ensures that all stores for which the modification
* are written to persistent storage by preceding instructions have
Expand Down
4 changes: 4 additions & 0 deletions include/asm-generic/rqspinlock.h
Original file line number Diff line number Diff line change
Expand Up @@ -247,4 +247,8 @@ static __always_inline void res_spin_unlock(rqspinlock_t *lock)

#define raw_res_spin_unlock_irqrestore(lock, flags) ({ raw_res_spin_unlock(lock); local_irq_restore(flags); })

#ifndef res_smp_cond_load_acquire_waiting
#define res_smp_cond_load_acquire_waiting() 0
#endif

#endif /* __ASM_GENERIC_RQSPINLOCK_H */
25 changes: 9 additions & 16 deletions kernel/bpf/rqspinlock.c
Original file line number Diff line number Diff line change
Expand Up @@ -82,6 +82,7 @@ struct rqspinlock_timeout {
u64 duration;
u64 cur;
u16 spin;
u8 wait;
};

#define RES_TIMEOUT_VAL 2
Expand Down Expand Up @@ -241,26 +242,20 @@ static noinline int check_timeout(rqspinlock_t *lock, u32 mask,
}

/*
* Do not amortize with spins when res_smp_cond_load_acquire is defined,
* as the macro does internal amortization for us.
* Only amortize with spins when we don't have a waiting implementation.
*/
#ifndef res_smp_cond_load_acquire
#define RES_CHECK_TIMEOUT(ts, ret, mask) \
({ \
if (!(ts).spin++) \
if ((ts).wait || !(ts).spin++) \
(ret) = check_timeout((lock), (mask), &(ts)); \
(ret); \
})
#else
#define RES_CHECK_TIMEOUT(ts, ret, mask) \
({ (ret) = check_timeout((lock), (mask), &(ts)); })
#endif

/*
* Initialize the 'spin' member.
* Set spin member to 0 to trigger AA/ABBA checks immediately.
*/
#define RES_INIT_TIMEOUT(ts) ({ (ts).spin = 0; })
#define RES_INIT_TIMEOUT(ts) ({ (ts).spin = 0; (ts).wait = res_smp_cond_load_acquire_waiting(); })

/*
* We only need to reset 'timeout_end', 'spin' will just wrap around as necessary.
Expand Down Expand Up @@ -313,11 +308,8 @@ EXPORT_SYMBOL_GPL(resilient_tas_spin_lock);
*/
static DEFINE_PER_CPU_ALIGNED(struct qnode, rqnodes[_Q_MAX_NODES]);

#ifndef res_smp_cond_load_acquire
#define res_smp_cond_load_acquire(v, c) smp_cond_load_acquire(v, c)
#endif

#define res_atomic_cond_read_acquire(v, c) res_smp_cond_load_acquire(&(v)->counter, (c))
#define res_atomic_cond_read_acquire(v, c, t) smp_cond_load_acquire_timewait(&(v)->counter, (c), (t))
#define res_smp_cond_load_acquire_timewait(v, c, t) smp_cond_load_acquire_timewait(v, (c), (t))

/**
* resilient_queued_spin_lock_slowpath - acquire the queued spinlock
Expand Down Expand Up @@ -418,7 +410,8 @@ int __lockfunc resilient_queued_spin_lock_slowpath(rqspinlock_t *lock, u32 val)
*/
if (val & _Q_LOCKED_MASK) {
RES_RESET_TIMEOUT(ts, RES_DEF_TIMEOUT);
res_smp_cond_load_acquire(&lock->locked, !VAL || RES_CHECK_TIMEOUT(ts, ret, _Q_LOCKED_MASK));
res_smp_cond_load_acquire_timewait(&lock->locked, !VAL,
RES_CHECK_TIMEOUT(ts, ret, _Q_LOCKED_MASK));
}

if (ret) {
Expand Down Expand Up @@ -572,7 +565,7 @@ int __lockfunc resilient_queued_spin_lock_slowpath(rqspinlock_t *lock, u32 val)
* us.
*/
RES_RESET_TIMEOUT(ts, RES_DEF_TIMEOUT * 2);
val = res_atomic_cond_read_acquire(&lock->val, !(VAL & _Q_LOCKED_PENDING_MASK) ||
val = res_atomic_cond_read_acquire(&lock->val, !(VAL & _Q_LOCKED_PENDING_MASK),
RES_CHECK_TIMEOUT(ts, ret, _Q_LOCKED_PENDING_MASK));

waitq_timeout:
Expand Down
Loading