@@ -89,15 +89,14 @@ struct rqspinlock_timeout {
8989DEFINE_PER_CPU_ALIGNED (struct  rqspinlock_held , rqspinlock_held_locks );
9090EXPORT_SYMBOL_GPL (rqspinlock_held_locks );
9191
92- static  bool  is_lock_released (rqspinlock_t  * lock , u32  mask ,  struct   rqspinlock_timeout   * ts )
92+ static  bool  is_lock_released (rqspinlock_t  * lock , u32  mask )
9393{
9494	if  (!(atomic_read_acquire (& lock -> val ) &  (mask )))
9595		return  true;
9696	return  false;
9797}
9898
99- static  noinline  int  check_deadlock_AA (rqspinlock_t  * lock , u32  mask ,
100- 				      struct  rqspinlock_timeout  * ts )
99+ static  noinline  int  check_deadlock_AA (rqspinlock_t  * lock )
101100{
102101	struct  rqspinlock_held  * rqh  =  this_cpu_ptr (& rqspinlock_held_locks );
103102	int  cnt  =  min (RES_NR_HELD , rqh -> cnt );
@@ -118,8 +117,7 @@ static noinline int check_deadlock_AA(rqspinlock_t *lock, u32 mask,
118117 * more locks, which reduce to ABBA). This is not exhaustive, and we rely on 
119118 * timeouts as the final line of defense. 
120119 */ 
121- static  noinline  int  check_deadlock_ABBA (rqspinlock_t  * lock , u32  mask ,
122- 					struct  rqspinlock_timeout  * ts )
120+ static  noinline  int  check_deadlock_ABBA (rqspinlock_t  * lock , u32  mask )
123121{
124122	struct  rqspinlock_held  * rqh  =  this_cpu_ptr (& rqspinlock_held_locks );
125123	int  rqh_cnt  =  min (RES_NR_HELD , rqh -> cnt );
@@ -142,7 +140,7 @@ static noinline int check_deadlock_ABBA(rqspinlock_t *lock, u32 mask,
142140		 * Let's ensure to break out of this loop if the lock is available for 
143141		 * us to potentially acquire. 
144142		 */ 
145- 		if  (is_lock_released (lock , mask ,  ts ))
143+ 		if  (is_lock_released (lock , mask ))
146144			return  0 ;
147145
148146		/* 
@@ -198,15 +196,14 @@ static noinline int check_deadlock_ABBA(rqspinlock_t *lock, u32 mask,
198196	return  0 ;
199197}
200198
201- static  noinline  int  check_deadlock (rqspinlock_t  * lock , u32  mask ,
202- 				   struct  rqspinlock_timeout  * ts )
199+ static  noinline  int  check_deadlock (rqspinlock_t  * lock , u32  mask )
203200{
204201	int  ret ;
205202
206- 	ret  =  check_deadlock_AA (lock ,  mask ,  ts );
203+ 	ret  =  check_deadlock_AA (lock );
207204	if  (ret )
208205		return  ret ;
209- 	ret  =  check_deadlock_ABBA (lock , mask ,  ts );
206+ 	ret  =  check_deadlock_ABBA (lock , mask );
210207	if  (ret )
211208		return  ret ;
212209
@@ -234,7 +231,7 @@ static noinline int check_timeout(rqspinlock_t *lock, u32 mask,
234231	 */ 
235232	if  (prev  +  NSEC_PER_MSEC  <  time ) {
236233		ts -> cur  =  time ;
237- 		return  check_deadlock (lock , mask ,  ts );
234+ 		return  check_deadlock (lock , mask );
238235	}
239236
240237	return  0 ;
0 commit comments