Skip to content

Commit 5de2055

Browse files
Waiman-LongIngo Molnar
authored andcommitted
locking/ww_mutex: Simplify use_ww_ctx & ww_ctx handling
The use_ww_ctx flag is passed to mutex_optimistic_spin(), but the function doesn't use it. The frequent use of the (use_ww_ctx && ww_ctx) combination is repetitive. In fact, ww_ctx should not be used at all if !use_ww_ctx. Simplify ww_mutex code by dropping use_ww_ctx from mutex_optimistic_spin() an clear ww_ctx if !use_ww_ctx. In this way, we can replace (use_ww_ctx && ww_ctx) by just (ww_ctx). Signed-off-by: Waiman Long <[email protected]> Signed-off-by: Ingo Molnar <[email protected]> Acked-by: Davidlohr Bueso <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent 1df2731 commit 5de2055

File tree

1 file changed

+14
-11
lines changed

1 file changed

+14
-11
lines changed

kernel/locking/mutex.c

Lines changed: 14 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -626,7 +626,7 @@ static inline int mutex_can_spin_on_owner(struct mutex *lock)
626626
*/
627627
static __always_inline bool
628628
mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
629-
const bool use_ww_ctx, struct mutex_waiter *waiter)
629+
struct mutex_waiter *waiter)
630630
{
631631
if (!waiter) {
632632
/*
@@ -702,7 +702,7 @@ mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
702702
#else
703703
static __always_inline bool
704704
mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
705-
const bool use_ww_ctx, struct mutex_waiter *waiter)
705+
struct mutex_waiter *waiter)
706706
{
707707
return false;
708708
}
@@ -922,14 +922,17 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
922922
struct ww_mutex *ww;
923923
int ret;
924924

925+
if (!use_ww_ctx)
926+
ww_ctx = NULL;
927+
925928
might_sleep();
926929

927930
#ifdef CONFIG_DEBUG_MUTEXES
928931
DEBUG_LOCKS_WARN_ON(lock->magic != lock);
929932
#endif
930933

931934
ww = container_of(lock, struct ww_mutex, base);
932-
if (use_ww_ctx && ww_ctx) {
935+
if (ww_ctx) {
933936
if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
934937
return -EALREADY;
935938

@@ -946,10 +949,10 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
946949
mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
947950

948951
if (__mutex_trylock(lock) ||
949-
mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, NULL)) {
952+
mutex_optimistic_spin(lock, ww_ctx, NULL)) {
950953
/* got the lock, yay! */
951954
lock_acquired(&lock->dep_map, ip);
952-
if (use_ww_ctx && ww_ctx)
955+
if (ww_ctx)
953956
ww_mutex_set_context_fastpath(ww, ww_ctx);
954957
preempt_enable();
955958
return 0;
@@ -960,7 +963,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
960963
* After waiting to acquire the wait_lock, try again.
961964
*/
962965
if (__mutex_trylock(lock)) {
963-
if (use_ww_ctx && ww_ctx)
966+
if (ww_ctx)
964967
__ww_mutex_check_waiters(lock, ww_ctx);
965968

966969
goto skip_wait;
@@ -1013,7 +1016,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
10131016
goto err;
10141017
}
10151018

1016-
if (use_ww_ctx && ww_ctx) {
1019+
if (ww_ctx) {
10171020
ret = __ww_mutex_check_kill(lock, &waiter, ww_ctx);
10181021
if (ret)
10191022
goto err;
@@ -1026,7 +1029,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
10261029
* ww_mutex needs to always recheck its position since its waiter
10271030
* list is not FIFO ordered.
10281031
*/
1029-
if ((use_ww_ctx && ww_ctx) || !first) {
1032+
if (ww_ctx || !first) {
10301033
first = __mutex_waiter_is_first(lock, &waiter);
10311034
if (first)
10321035
__mutex_set_flag(lock, MUTEX_FLAG_HANDOFF);
@@ -1039,7 +1042,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
10391042
* or we must see its unlock and acquire.
10401043
*/
10411044
if (__mutex_trylock(lock) ||
1042-
(first && mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, &waiter)))
1045+
(first && mutex_optimistic_spin(lock, ww_ctx, &waiter)))
10431046
break;
10441047

10451048
spin_lock(&lock->wait_lock);
@@ -1048,7 +1051,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
10481051
acquired:
10491052
__set_current_state(TASK_RUNNING);
10501053

1051-
if (use_ww_ctx && ww_ctx) {
1054+
if (ww_ctx) {
10521055
/*
10531056
* Wound-Wait; we stole the lock (!first_waiter), check the
10541057
* waiters as anyone might want to wound us.
@@ -1068,7 +1071,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
10681071
/* got the lock - cleanup and rejoice! */
10691072
lock_acquired(&lock->dep_map, ip);
10701073

1071-
if (use_ww_ctx && ww_ctx)
1074+
if (ww_ctx)
10721075
ww_mutex_lock_acquired(ww, ww_ctx);
10731076

10741077
spin_unlock(&lock->wait_lock);

0 commit comments

Comments
 (0)