diff --git a/kernel/bpf/rqspinlock.c b/kernel/bpf/rqspinlock.c index a00561b1d3e51..6ec4e97a73a26 100644 --- a/kernel/bpf/rqspinlock.c +++ b/kernel/bpf/rqspinlock.c @@ -89,15 +89,14 @@ struct rqspinlock_timeout { DEFINE_PER_CPU_ALIGNED(struct rqspinlock_held, rqspinlock_held_locks); EXPORT_SYMBOL_GPL(rqspinlock_held_locks); -static bool is_lock_released(rqspinlock_t *lock, u32 mask, struct rqspinlock_timeout *ts) +static bool is_lock_released(rqspinlock_t *lock, u32 mask) { if (!(atomic_read_acquire(&lock->val) & (mask))) return true; return false; } -static noinline int check_deadlock_AA(rqspinlock_t *lock, u32 mask, - struct rqspinlock_timeout *ts) +static noinline int check_deadlock_AA(rqspinlock_t *lock) { struct rqspinlock_held *rqh = this_cpu_ptr(&rqspinlock_held_locks); int cnt = min(RES_NR_HELD, rqh->cnt); @@ -118,8 +117,7 @@ static noinline int check_deadlock_AA(rqspinlock_t *lock, u32 mask, * more locks, which reduce to ABBA). This is not exhaustive, and we rely on * timeouts as the final line of defense. */ -static noinline int check_deadlock_ABBA(rqspinlock_t *lock, u32 mask, - struct rqspinlock_timeout *ts) +static noinline int check_deadlock_ABBA(rqspinlock_t *lock, u32 mask) { struct rqspinlock_held *rqh = this_cpu_ptr(&rqspinlock_held_locks); int rqh_cnt = min(RES_NR_HELD, rqh->cnt); @@ -142,7 +140,7 @@ static noinline int check_deadlock_ABBA(rqspinlock_t *lock, u32 mask, * Let's ensure to break out of this loop if the lock is available for * us to potentially acquire. */ - if (is_lock_released(lock, mask, ts)) + if (is_lock_released(lock, mask)) return 0; /* @@ -198,15 +196,14 @@ static noinline int check_deadlock_ABBA(rqspinlock_t *lock, u32 mask, return 0; } -static noinline int check_deadlock(rqspinlock_t *lock, u32 mask, - struct rqspinlock_timeout *ts) +static noinline int check_deadlock(rqspinlock_t *lock, u32 mask) { int ret; - ret = check_deadlock_AA(lock, mask, ts); + ret = check_deadlock_AA(lock); if (ret) return ret; - ret = check_deadlock_ABBA(lock, mask, ts); + ret = check_deadlock_ABBA(lock, mask); if (ret) return ret; @@ -234,7 +231,7 @@ static noinline int check_timeout(rqspinlock_t *lock, u32 mask, */ if (prev + NSEC_PER_MSEC < time) { ts->cur = time; - return check_deadlock(lock, mask, ts); + return check_deadlock(lock, mask); } return 0; @@ -350,7 +347,7 @@ int __lockfunc resilient_queued_spin_lock_slowpath(rqspinlock_t *lock, u32 val) struct mcs_spinlock *prev, *next, *node; struct rqspinlock_timeout ts; int idx, ret = 0; - u32 old, tail; + u32 old, tail, mask = _Q_LOCKED_MASK; BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS)); @@ -359,6 +356,21 @@ int __lockfunc resilient_queued_spin_lock_slowpath(rqspinlock_t *lock, u32 val) RES_INIT_TIMEOUT(ts); + /* + * Grab an entry in the held locks array, to enable deadlock detection + */ + grab_held_lock_entry(lock); + + if (val & _Q_PENDING_VAL) + mask = _Q_LOCKED_PENDING_MASK; + + /* + * Do a deadlock check on the entry of the slowpath + */ + ret = check_deadlock(lock, mask); + if (ret) + goto err_release_entry; + /* * Wait for in-progress pending->locked hand-overs with a bounded * number of spins so that we guarantee forward progress. @@ -400,11 +412,6 @@ int __lockfunc resilient_queued_spin_lock_slowpath(rqspinlock_t *lock, u32 val) goto queue; } - /* - * Grab an entry in the held locks array, to enable deadlock detection. - */ - grab_held_lock_entry(lock); - /* * We're pending, wait for the owner to go away. * @@ -451,10 +458,6 @@ int __lockfunc resilient_queued_spin_lock_slowpath(rqspinlock_t *lock, u32 val) */ queue: lockevent_inc(lock_slowpath); - /* - * Grab deadlock detection entry for the queue path. - */ - grab_held_lock_entry(lock); node = this_cpu_ptr(&rqnodes[0].mcs); idx = node->count++;