3434
3535static inline int __ww_mutex_add_waiter (struct rt_mutex_waiter * waiter ,
3636 struct rt_mutex * lock ,
37- struct ww_acquire_ctx * ww_ctx )
37+ struct ww_acquire_ctx * ww_ctx ,
38+ struct wake_q_head * wake_q )
3839{
3940 return 0 ;
4041}
4142
4243static inline void __ww_mutex_check_waiters (struct rt_mutex * lock ,
43- struct ww_acquire_ctx * ww_ctx )
44+ struct ww_acquire_ctx * ww_ctx ,
45+ struct wake_q_head * wake_q )
4446{
4547}
4648
@@ -1201,7 +1203,8 @@ static int __sched task_blocks_on_rt_mutex(struct rt_mutex_base *lock,
12011203 struct rt_mutex_waiter * waiter ,
12021204 struct task_struct * task ,
12031205 struct ww_acquire_ctx * ww_ctx ,
1204- enum rtmutex_chainwalk chwalk )
1206+ enum rtmutex_chainwalk chwalk ,
1207+ struct wake_q_head * wake_q )
12051208{
12061209 struct task_struct * owner = rt_mutex_owner (lock );
12071210 struct rt_mutex_waiter * top_waiter = waiter ;
@@ -1245,7 +1248,10 @@ static int __sched task_blocks_on_rt_mutex(struct rt_mutex_base *lock,
12451248
12461249 /* Check whether the waiter should back out immediately */
12471250 rtm = container_of (lock , struct rt_mutex , rtmutex );
1248- res = __ww_mutex_add_waiter (waiter , rtm , ww_ctx );
1251+ preempt_disable ();
1252+ res = __ww_mutex_add_waiter (waiter , rtm , ww_ctx , wake_q );
1253+ wake_up_q (wake_q );
1254+ preempt_enable ();
12491255 if (res ) {
12501256 raw_spin_lock (& task -> pi_lock );
12511257 rt_mutex_dequeue (lock , waiter );
@@ -1674,12 +1680,14 @@ static void __sched rt_mutex_handle_deadlock(int res, int detect_deadlock,
16741680 * @state: The task state for sleeping
16751681 * @chwalk: Indicator whether full or partial chainwalk is requested
16761682 * @waiter: Initializer waiter for blocking
1683+ * @wake_q: The wake_q to wake tasks after we release the wait_lock
16771684 */
16781685static int __sched __rt_mutex_slowlock (struct rt_mutex_base * lock ,
16791686 struct ww_acquire_ctx * ww_ctx ,
16801687 unsigned int state ,
16811688 enum rtmutex_chainwalk chwalk ,
1682- struct rt_mutex_waiter * waiter )
1689+ struct rt_mutex_waiter * waiter ,
1690+ struct wake_q_head * wake_q )
16831691{
16841692 struct rt_mutex * rtm = container_of (lock , struct rt_mutex , rtmutex );
16851693 struct ww_mutex * ww = ww_container_of (rtm );
@@ -1690,7 +1698,7 @@ static int __sched __rt_mutex_slowlock(struct rt_mutex_base *lock,
16901698 /* Try to acquire the lock again: */
16911699 if (try_to_take_rt_mutex (lock , current , NULL )) {
16921700 if (build_ww_mutex () && ww_ctx ) {
1693- __ww_mutex_check_waiters (rtm , ww_ctx );
1701+ __ww_mutex_check_waiters (rtm , ww_ctx , wake_q );
16941702 ww_mutex_lock_acquired (ww , ww_ctx );
16951703 }
16961704 return 0 ;
@@ -1700,15 +1708,15 @@ static int __sched __rt_mutex_slowlock(struct rt_mutex_base *lock,
17001708
17011709 trace_contention_begin (lock , LCB_F_RT );
17021710
1703- ret = task_blocks_on_rt_mutex (lock , waiter , current , ww_ctx , chwalk );
1711+ ret = task_blocks_on_rt_mutex (lock , waiter , current , ww_ctx , chwalk , wake_q );
17041712 if (likely (!ret ))
17051713 ret = rt_mutex_slowlock_block (lock , ww_ctx , state , NULL , waiter );
17061714
17071715 if (likely (!ret )) {
17081716 /* acquired the lock */
17091717 if (build_ww_mutex () && ww_ctx ) {
17101718 if (!ww_ctx -> is_wait_die )
1711- __ww_mutex_check_waiters (rtm , ww_ctx );
1719+ __ww_mutex_check_waiters (rtm , ww_ctx , wake_q );
17121720 ww_mutex_lock_acquired (ww , ww_ctx );
17131721 }
17141722 } else {
@@ -1730,7 +1738,8 @@ static int __sched __rt_mutex_slowlock(struct rt_mutex_base *lock,
17301738
17311739static inline int __rt_mutex_slowlock_locked (struct rt_mutex_base * lock ,
17321740 struct ww_acquire_ctx * ww_ctx ,
1733- unsigned int state )
1741+ unsigned int state ,
1742+ struct wake_q_head * wake_q )
17341743{
17351744 struct rt_mutex_waiter waiter ;
17361745 int ret ;
@@ -1739,7 +1748,7 @@ static inline int __rt_mutex_slowlock_locked(struct rt_mutex_base *lock,
17391748 waiter .ww_ctx = ww_ctx ;
17401749
17411750 ret = __rt_mutex_slowlock (lock , ww_ctx , state , RT_MUTEX_MIN_CHAINWALK ,
1742- & waiter );
1751+ & waiter , wake_q );
17431752
17441753 debug_rt_mutex_free_waiter (& waiter );
17451754 return ret ;
@@ -1755,6 +1764,7 @@ static int __sched rt_mutex_slowlock(struct rt_mutex_base *lock,
17551764 struct ww_acquire_ctx * ww_ctx ,
17561765 unsigned int state )
17571766{
1767+ DEFINE_WAKE_Q (wake_q );
17581768 unsigned long flags ;
17591769 int ret ;
17601770
@@ -1776,8 +1786,11 @@ static int __sched rt_mutex_slowlock(struct rt_mutex_base *lock,
17761786 * irqsave/restore variants.
17771787 */
17781788 raw_spin_lock_irqsave (& lock -> wait_lock , flags );
1779- ret = __rt_mutex_slowlock_locked (lock , ww_ctx , state );
1789+ ret = __rt_mutex_slowlock_locked (lock , ww_ctx , state , & wake_q );
1790+ preempt_disable ();
17801791 raw_spin_unlock_irqrestore (& lock -> wait_lock , flags );
1792+ wake_up_q (& wake_q );
1793+ preempt_enable ();
17811794 rt_mutex_post_schedule ();
17821795
17831796 return ret ;
@@ -1803,8 +1816,10 @@ static __always_inline int __rt_mutex_lock(struct rt_mutex_base *lock,
18031816/**
18041817 * rtlock_slowlock_locked - Slow path lock acquisition for RT locks
18051818 * @lock: The underlying RT mutex
1819+ * @wake_q: The wake_q to wake tasks after we release the wait_lock
18061820 */
1807- static void __sched rtlock_slowlock_locked (struct rt_mutex_base * lock )
1821+ static void __sched rtlock_slowlock_locked (struct rt_mutex_base * lock ,
1822+ struct wake_q_head * wake_q )
18081823{
18091824 struct rt_mutex_waiter waiter ;
18101825 struct task_struct * owner ;
@@ -1821,7 +1836,7 @@ static void __sched rtlock_slowlock_locked(struct rt_mutex_base *lock)
18211836
18221837 trace_contention_begin (lock , LCB_F_RT );
18231838
1824- task_blocks_on_rt_mutex (lock , & waiter , current , NULL , RT_MUTEX_MIN_CHAINWALK );
1839+ task_blocks_on_rt_mutex (lock , & waiter , current , NULL , RT_MUTEX_MIN_CHAINWALK , wake_q );
18251840
18261841 for (;;) {
18271842 /* Try to acquire the lock again */
@@ -1832,7 +1847,11 @@ static void __sched rtlock_slowlock_locked(struct rt_mutex_base *lock)
18321847 owner = rt_mutex_owner (lock );
18331848 else
18341849 owner = NULL ;
1850+ preempt_disable ();
18351851 raw_spin_unlock_irq (& lock -> wait_lock );
1852+ wake_up_q (wake_q );
1853+ wake_q_init (wake_q );
1854+ preempt_enable ();
18361855
18371856 if (!owner || !rtmutex_spin_on_owner (lock , & waiter , owner ))
18381857 schedule_rtlock ();
@@ -1857,10 +1876,14 @@ static void __sched rtlock_slowlock_locked(struct rt_mutex_base *lock)
18571876static __always_inline void __sched rtlock_slowlock (struct rt_mutex_base * lock )
18581877{
18591878 unsigned long flags ;
1879+ DEFINE_WAKE_Q (wake_q );
18601880
18611881 raw_spin_lock_irqsave (& lock -> wait_lock , flags );
1862- rtlock_slowlock_locked (lock );
1882+ rtlock_slowlock_locked (lock , & wake_q );
1883+ preempt_disable ();
18631884 raw_spin_unlock_irqrestore (& lock -> wait_lock , flags );
1885+ wake_up_q (& wake_q );
1886+ preempt_enable ();
18641887}
18651888
18661889#endif /* RT_MUTEX_BUILD_SPINLOCKS */
0 commit comments