Skip to content

Commit 5ec5852

Browse files
jlelliPeter Zijlstra
authored andcommitted
locking/mutex: Make mutex::wait_lock irq safe
With the proxy-execution series, we traverse the task->mutex->task blocked_on/owner chain in the scheduler core. We do this while holding the rq::lock to keep the structures in place while taking and releasing the alternating lock types. Since the mutex::wait_lock is one of the locks we will take in this way under the rq::lock in the scheduler core, we need to make sure that its usage elsewhere is irq safe. [rebase & fix {un,}lock_wait_lock helpers in ww_mutex.h] Signed-off-by: Juri Lelli <[email protected]> Signed-off-by: Connor O'Brien <[email protected]> Signed-off-by: John Stultz <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Reviewed-by: Metin Kaya <[email protected]> Reviewed-by: Valentin Schneider <[email protected]> Tested-by: K Prateek Nayak <[email protected]> Tested-by: Metin Kaya <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent 894d1b3 commit 5ec5852

File tree

2 files changed

+21
-18
lines changed

2 files changed

+21
-18
lines changed

kernel/locking/mutex.c

Lines changed: 10 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -578,6 +578,7 @@ __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclas
578578
DEFINE_WAKE_Q(wake_q);
579579
struct mutex_waiter waiter;
580580
struct ww_mutex *ww;
581+
unsigned long flags;
581582
int ret;
582583

583584
if (!use_ww_ctx)
@@ -620,7 +621,7 @@ __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclas
620621
return 0;
621622
}
622623

623-
raw_spin_lock(&lock->wait_lock);
624+
raw_spin_lock_irqsave(&lock->wait_lock, flags);
624625
/*
625626
* After waiting to acquire the wait_lock, try again.
626627
*/
@@ -681,7 +682,7 @@ __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclas
681682
goto err;
682683
}
683684

684-
raw_spin_unlock(&lock->wait_lock);
685+
raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
685686
/* Make sure we do wakeups before calling schedule */
686687
wake_up_q(&wake_q);
687688
wake_q_init(&wake_q);
@@ -706,9 +707,9 @@ __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclas
706707
trace_contention_begin(lock, LCB_F_MUTEX);
707708
}
708709

709-
raw_spin_lock(&lock->wait_lock);
710+
raw_spin_lock_irqsave(&lock->wait_lock, flags);
710711
}
711-
raw_spin_lock(&lock->wait_lock);
712+
raw_spin_lock_irqsave(&lock->wait_lock, flags);
712713
acquired:
713714
__set_current_state(TASK_RUNNING);
714715

@@ -734,7 +735,7 @@ __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclas
734735
if (ww_ctx)
735736
ww_mutex_lock_acquired(ww, ww_ctx);
736737

737-
raw_spin_unlock(&lock->wait_lock);
738+
raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
738739
wake_up_q(&wake_q);
739740
preempt_enable();
740741
return 0;
@@ -744,7 +745,7 @@ __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclas
744745
__mutex_remove_waiter(lock, &waiter);
745746
err_early_kill:
746747
trace_contention_end(lock, ret);
747-
raw_spin_unlock(&lock->wait_lock);
748+
raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
748749
debug_mutex_free_waiter(&waiter);
749750
mutex_release(&lock->dep_map, ip);
750751
wake_up_q(&wake_q);
@@ -915,6 +916,7 @@ static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigne
915916
struct task_struct *next = NULL;
916917
DEFINE_WAKE_Q(wake_q);
917918
unsigned long owner;
919+
unsigned long flags;
918920

919921
mutex_release(&lock->dep_map, ip);
920922

@@ -941,7 +943,7 @@ static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigne
941943
}
942944
}
943945

944-
raw_spin_lock(&lock->wait_lock);
946+
raw_spin_lock_irqsave(&lock->wait_lock, flags);
945947
debug_mutex_unlock(lock);
946948
if (!list_empty(&lock->wait_list)) {
947949
/* get the first entry from the wait-list: */
@@ -959,7 +961,7 @@ static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigne
959961
__mutex_handoff(lock, next);
960962

961963
preempt_disable();
962-
raw_spin_unlock(&lock->wait_lock);
964+
raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
963965
wake_up_q(&wake_q);
964966
preempt_enable();
965967
}

kernel/locking/ww_mutex.h

Lines changed: 11 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -70,14 +70,14 @@ __ww_mutex_has_waiters(struct mutex *lock)
7070
return atomic_long_read(&lock->owner) & MUTEX_FLAG_WAITERS;
7171
}
7272

73-
static inline void lock_wait_lock(struct mutex *lock)
73+
static inline void lock_wait_lock(struct mutex *lock, unsigned long *flags)
7474
{
75-
raw_spin_lock(&lock->wait_lock);
75+
raw_spin_lock_irqsave(&lock->wait_lock, *flags);
7676
}
7777

78-
static inline void unlock_wait_lock(struct mutex *lock)
78+
static inline void unlock_wait_lock(struct mutex *lock, unsigned long *flags)
7979
{
80-
raw_spin_unlock(&lock->wait_lock);
80+
raw_spin_unlock_irqrestore(&lock->wait_lock, *flags);
8181
}
8282

8383
static inline void lockdep_assert_wait_lock_held(struct mutex *lock)
@@ -144,14 +144,14 @@ __ww_mutex_has_waiters(struct rt_mutex *lock)
144144
return rt_mutex_has_waiters(&lock->rtmutex);
145145
}
146146

147-
static inline void lock_wait_lock(struct rt_mutex *lock)
147+
static inline void lock_wait_lock(struct rt_mutex *lock, unsigned long *flags)
148148
{
149-
raw_spin_lock(&lock->rtmutex.wait_lock);
149+
raw_spin_lock_irqsave(&lock->rtmutex.wait_lock, *flags);
150150
}
151151

152-
static inline void unlock_wait_lock(struct rt_mutex *lock)
152+
static inline void unlock_wait_lock(struct rt_mutex *lock, unsigned long *flags)
153153
{
154-
raw_spin_unlock(&lock->rtmutex.wait_lock);
154+
raw_spin_unlock_irqrestore(&lock->rtmutex.wait_lock, *flags);
155155
}
156156

157157
static inline void lockdep_assert_wait_lock_held(struct rt_mutex *lock)
@@ -380,6 +380,7 @@ static __always_inline void
380380
ww_mutex_set_context_fastpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
381381
{
382382
DEFINE_WAKE_Q(wake_q);
383+
unsigned long flags;
383384

384385
ww_mutex_lock_acquired(lock, ctx);
385386

@@ -408,10 +409,10 @@ ww_mutex_set_context_fastpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
408409
* Uh oh, we raced in fastpath, check if any of the waiters need to
409410
* die or wound us.
410411
*/
411-
lock_wait_lock(&lock->base);
412+
lock_wait_lock(&lock->base, &flags);
412413
__ww_mutex_check_waiters(&lock->base, ctx, &wake_q);
413414
preempt_disable();
414-
unlock_wait_lock(&lock->base);
415+
unlock_wait_lock(&lock->base, &flags);
415416
wake_up_q(&wake_q);
416417
preempt_enable();
417418
}

0 commit comments

Comments
 (0)