@@ -89,15 +89,14 @@ struct rqspinlock_timeout {
8989DEFINE_PER_CPU_ALIGNED (struct rqspinlock_held , rqspinlock_held_locks );
9090EXPORT_SYMBOL_GPL (rqspinlock_held_locks );
9191
92- static bool is_lock_released (rqspinlock_t * lock , u32 mask , struct rqspinlock_timeout * ts )
92+ static bool is_lock_released (rqspinlock_t * lock , u32 mask )
9393{
9494 if (!(atomic_read_acquire (& lock -> val ) & (mask )))
9595 return true;
9696 return false;
9797}
9898
99- static noinline int check_deadlock_AA (rqspinlock_t * lock , u32 mask ,
100- struct rqspinlock_timeout * ts )
99+ static noinline int check_deadlock_AA (rqspinlock_t * lock )
101100{
102101 struct rqspinlock_held * rqh = this_cpu_ptr (& rqspinlock_held_locks );
103102 int cnt = min (RES_NR_HELD , rqh -> cnt );
@@ -118,8 +117,7 @@ static noinline int check_deadlock_AA(rqspinlock_t *lock, u32 mask,
118117 * more locks, which reduce to ABBA). This is not exhaustive, and we rely on
119118 * timeouts as the final line of defense.
120119 */
121- static noinline int check_deadlock_ABBA (rqspinlock_t * lock , u32 mask ,
122- struct rqspinlock_timeout * ts )
120+ static noinline int check_deadlock_ABBA (rqspinlock_t * lock , u32 mask )
123121{
124122 struct rqspinlock_held * rqh = this_cpu_ptr (& rqspinlock_held_locks );
125123 int rqh_cnt = min (RES_NR_HELD , rqh -> cnt );
@@ -142,7 +140,7 @@ static noinline int check_deadlock_ABBA(rqspinlock_t *lock, u32 mask,
142140 * Let's ensure to break out of this loop if the lock is available for
143141 * us to potentially acquire.
144142 */
145- if (is_lock_released (lock , mask , ts ))
143+ if (is_lock_released (lock , mask ))
146144 return 0 ;
147145
148146 /*
@@ -198,15 +196,14 @@ static noinline int check_deadlock_ABBA(rqspinlock_t *lock, u32 mask,
198196 return 0 ;
199197}
200198
201- static noinline int check_deadlock (rqspinlock_t * lock , u32 mask ,
202- struct rqspinlock_timeout * ts )
199+ static noinline int check_deadlock (rqspinlock_t * lock , u32 mask )
203200{
204201 int ret ;
205202
206- ret = check_deadlock_AA (lock , mask , ts );
203+ ret = check_deadlock_AA (lock );
207204 if (ret )
208205 return ret ;
209- ret = check_deadlock_ABBA (lock , mask , ts );
206+ ret = check_deadlock_ABBA (lock , mask );
210207 if (ret )
211208 return ret ;
212209
@@ -234,7 +231,7 @@ static noinline int check_timeout(rqspinlock_t *lock, u32 mask,
234231 */
235232 if (prev + NSEC_PER_MSEC < time ) {
236233 ts -> cur = time ;
237- return check_deadlock (lock , mask , ts );
234+ return check_deadlock (lock , mask );
238235 }
239236
240237 return 0 ;
@@ -350,7 +347,7 @@ int __lockfunc resilient_queued_spin_lock_slowpath(rqspinlock_t *lock, u32 val)
350347 struct mcs_spinlock * prev , * next , * node ;
351348 struct rqspinlock_timeout ts ;
352349 int idx , ret = 0 ;
353- u32 old , tail ;
350+ u32 old , tail , mask = _Q_LOCKED_MASK ;
354351
355352 BUILD_BUG_ON (CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS ));
356353
@@ -359,6 +356,21 @@ int __lockfunc resilient_queued_spin_lock_slowpath(rqspinlock_t *lock, u32 val)
359356
360357 RES_INIT_TIMEOUT (ts );
361358
359+ /*
360+ * Grab an entry in the held locks array, to enable deadlock detection
361+ */
362+ grab_held_lock_entry (lock );
363+
364+ if (val & _Q_PENDING_VAL )
365+ mask = _Q_LOCKED_PENDING_MASK ;
366+
367+ /*
368+ * Do a deadlock check on the entry of the slowpath
369+ */
370+ ret = check_deadlock (lock , mask );
371+ if (ret )
372+ goto err_release_entry ;
373+
362374 /*
363375 * Wait for in-progress pending->locked hand-overs with a bounded
364376 * number of spins so that we guarantee forward progress.
@@ -400,11 +412,6 @@ int __lockfunc resilient_queued_spin_lock_slowpath(rqspinlock_t *lock, u32 val)
400412 goto queue ;
401413 }
402414
403- /*
404- * Grab an entry in the held locks array, to enable deadlock detection.
405- */
406- grab_held_lock_entry (lock );
407-
408415 /*
409416 * We're pending, wait for the owner to go away.
410417 *
@@ -451,10 +458,6 @@ int __lockfunc resilient_queued_spin_lock_slowpath(rqspinlock_t *lock, u32 val)
451458 */
452459queue :
453460 lockevent_inc (lock_slowpath );
454- /*
455- * Grab deadlock detection entry for the queue path.
456- */
457- grab_held_lock_entry (lock );
458461
459462 node = this_cpu_ptr (& rqnodes [0 ].mcs );
460463 idx = node -> count ++ ;
0 commit comments