@@ -626,7 +626,7 @@ static inline int mutex_can_spin_on_owner(struct mutex *lock)
626
626
*/
627
627
static __always_inline bool
628
628
mutex_optimistic_spin (struct mutex * lock , struct ww_acquire_ctx * ww_ctx ,
629
- const bool use_ww_ctx , struct mutex_waiter * waiter )
629
+ struct mutex_waiter * waiter )
630
630
{
631
631
if (!waiter ) {
632
632
/*
@@ -702,7 +702,7 @@ mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
702
702
#else
703
703
static __always_inline bool
704
704
mutex_optimistic_spin (struct mutex * lock , struct ww_acquire_ctx * ww_ctx ,
705
- const bool use_ww_ctx , struct mutex_waiter * waiter )
705
+ struct mutex_waiter * waiter )
706
706
{
707
707
return false;
708
708
}
@@ -922,14 +922,17 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
922
922
struct ww_mutex * ww ;
923
923
int ret ;
924
924
925
+ if (!use_ww_ctx )
926
+ ww_ctx = NULL ;
927
+
925
928
might_sleep ();
926
929
927
930
#ifdef CONFIG_DEBUG_MUTEXES
928
931
DEBUG_LOCKS_WARN_ON (lock -> magic != lock );
929
932
#endif
930
933
931
934
ww = container_of (lock , struct ww_mutex , base );
932
- if (use_ww_ctx && ww_ctx ) {
935
+ if (ww_ctx ) {
933
936
if (unlikely (ww_ctx == READ_ONCE (ww -> ctx )))
934
937
return - EALREADY ;
935
938
@@ -946,10 +949,10 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
946
949
mutex_acquire_nest (& lock -> dep_map , subclass , 0 , nest_lock , ip );
947
950
948
951
if (__mutex_trylock (lock ) ||
949
- mutex_optimistic_spin (lock , ww_ctx , use_ww_ctx , NULL )) {
952
+ mutex_optimistic_spin (lock , ww_ctx , NULL )) {
950
953
/* got the lock, yay! */
951
954
lock_acquired (& lock -> dep_map , ip );
952
- if (use_ww_ctx && ww_ctx )
955
+ if (ww_ctx )
953
956
ww_mutex_set_context_fastpath (ww , ww_ctx );
954
957
preempt_enable ();
955
958
return 0 ;
@@ -960,7 +963,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
960
963
* After waiting to acquire the wait_lock, try again.
961
964
*/
962
965
if (__mutex_trylock (lock )) {
963
- if (use_ww_ctx && ww_ctx )
966
+ if (ww_ctx )
964
967
__ww_mutex_check_waiters (lock , ww_ctx );
965
968
966
969
goto skip_wait ;
@@ -1013,7 +1016,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
1013
1016
goto err ;
1014
1017
}
1015
1018
1016
- if (use_ww_ctx && ww_ctx ) {
1019
+ if (ww_ctx ) {
1017
1020
ret = __ww_mutex_check_kill (lock , & waiter , ww_ctx );
1018
1021
if (ret )
1019
1022
goto err ;
@@ -1026,7 +1029,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
1026
1029
* ww_mutex needs to always recheck its position since its waiter
1027
1030
* list is not FIFO ordered.
1028
1031
*/
1029
- if (( use_ww_ctx && ww_ctx ) || !first ) {
1032
+ if (ww_ctx || !first ) {
1030
1033
first = __mutex_waiter_is_first (lock , & waiter );
1031
1034
if (first )
1032
1035
__mutex_set_flag (lock , MUTEX_FLAG_HANDOFF );
@@ -1039,7 +1042,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
1039
1042
* or we must see its unlock and acquire.
1040
1043
*/
1041
1044
if (__mutex_trylock (lock ) ||
1042
- (first && mutex_optimistic_spin (lock , ww_ctx , use_ww_ctx , & waiter )))
1045
+ (first && mutex_optimistic_spin (lock , ww_ctx , & waiter )))
1043
1046
break ;
1044
1047
1045
1048
spin_lock (& lock -> wait_lock );
@@ -1048,7 +1051,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
1048
1051
acquired :
1049
1052
__set_current_state (TASK_RUNNING );
1050
1053
1051
- if (use_ww_ctx && ww_ctx ) {
1054
+ if (ww_ctx ) {
1052
1055
/*
1053
1056
* Wound-Wait; we stole the lock (!first_waiter), check the
1054
1057
* waiters as anyone might want to wound us.
@@ -1068,7 +1071,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
1068
1071
/* got the lock - cleanup and rejoice! */
1069
1072
lock_acquired (& lock -> dep_map , ip );
1070
1073
1071
- if (use_ww_ctx && ww_ctx )
1074
+ if (ww_ctx )
1072
1075
ww_mutex_lock_acquired (ww , ww_ctx );
1073
1076
1074
1077
spin_unlock (& lock -> wait_lock );
0 commit comments