Skip to content
Closed
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
45 changes: 24 additions & 21 deletions kernel/bpf/rqspinlock.c
Original file line number Diff line number Diff line change
Expand Up @@ -89,15 +89,14 @@ struct rqspinlock_timeout {
DEFINE_PER_CPU_ALIGNED(struct rqspinlock_held, rqspinlock_held_locks);
EXPORT_SYMBOL_GPL(rqspinlock_held_locks);

static bool is_lock_released(rqspinlock_t *lock, u32 mask, struct rqspinlock_timeout *ts)
static bool is_lock_released(rqspinlock_t *lock, u32 mask)
{
if (!(atomic_read_acquire(&lock->val) & (mask)))
return true;
return false;
}

static noinline int check_deadlock_AA(rqspinlock_t *lock, u32 mask,
struct rqspinlock_timeout *ts)
static noinline int check_deadlock_AA(rqspinlock_t *lock)
{
struct rqspinlock_held *rqh = this_cpu_ptr(&rqspinlock_held_locks);
int cnt = min(RES_NR_HELD, rqh->cnt);
Expand All @@ -118,8 +117,7 @@ static noinline int check_deadlock_AA(rqspinlock_t *lock, u32 mask,
* more locks, which reduce to ABBA). This is not exhaustive, and we rely on
* timeouts as the final line of defense.
*/
static noinline int check_deadlock_ABBA(rqspinlock_t *lock, u32 mask,
struct rqspinlock_timeout *ts)
static noinline int check_deadlock_ABBA(rqspinlock_t *lock, u32 mask)
{
struct rqspinlock_held *rqh = this_cpu_ptr(&rqspinlock_held_locks);
int rqh_cnt = min(RES_NR_HELD, rqh->cnt);
Expand All @@ -142,7 +140,7 @@ static noinline int check_deadlock_ABBA(rqspinlock_t *lock, u32 mask,
* Let's ensure to break out of this loop if the lock is available for
* us to potentially acquire.
*/
if (is_lock_released(lock, mask, ts))
if (is_lock_released(lock, mask))
return 0;

/*
Expand Down Expand Up @@ -198,15 +196,14 @@ static noinline int check_deadlock_ABBA(rqspinlock_t *lock, u32 mask,
return 0;
}

static noinline int check_deadlock(rqspinlock_t *lock, u32 mask,
struct rqspinlock_timeout *ts)
static noinline int check_deadlock(rqspinlock_t *lock, u32 mask)
{
int ret;

ret = check_deadlock_AA(lock, mask, ts);
ret = check_deadlock_AA(lock);
if (ret)
return ret;
ret = check_deadlock_ABBA(lock, mask, ts);
ret = check_deadlock_ABBA(lock, mask);
if (ret)
return ret;

Expand Down Expand Up @@ -234,7 +231,7 @@ static noinline int check_timeout(rqspinlock_t *lock, u32 mask,
*/
if (prev + NSEC_PER_MSEC < time) {
ts->cur = time;
return check_deadlock(lock, mask, ts);
return check_deadlock(lock, mask);
}

return 0;
Expand Down Expand Up @@ -350,7 +347,7 @@ int __lockfunc resilient_queued_spin_lock_slowpath(rqspinlock_t *lock, u32 val)
struct mcs_spinlock *prev, *next, *node;
struct rqspinlock_timeout ts;
int idx, ret = 0;
u32 old, tail;
u32 old, tail, mask = _Q_LOCKED_MASK;

BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS));

Expand All @@ -359,6 +356,21 @@ int __lockfunc resilient_queued_spin_lock_slowpath(rqspinlock_t *lock, u32 val)

RES_INIT_TIMEOUT(ts);

/*
* Grab an entry in the held locks array, to enable deadlock detection
*/
grab_held_lock_entry(lock);

if (val & _Q_PENDING_VAL)
mask = _Q_LOCKED_PENDING_MASK;

/*
* Do a deadlock check on the entry of the slowpath
*/
ret = check_deadlock(lock, mask);
if (ret)
goto err_release_entry;

/*
* Wait for in-progress pending->locked hand-overs with a bounded
* number of spins so that we guarantee forward progress.
Expand Down Expand Up @@ -400,11 +412,6 @@ int __lockfunc resilient_queued_spin_lock_slowpath(rqspinlock_t *lock, u32 val)
goto queue;
}

/*
* Grab an entry in the held locks array, to enable deadlock detection.
*/
grab_held_lock_entry(lock);

/*
* We're pending, wait for the owner to go away.
*
Expand Down Expand Up @@ -451,10 +458,6 @@ int __lockfunc resilient_queued_spin_lock_slowpath(rqspinlock_t *lock, u32 val)
*/
queue:
lockevent_inc(lock_slowpath);
/*
* Grab deadlock detection entry for the queue path.
*/
grab_held_lock_entry(lock);

node = this_cpu_ptr(&rqnodes[0].mcs);
idx = node->count++;
Expand Down
Loading