Skip to content

Commit b76b44f

Browse files
Waiman-LongIngo Molnar
authored andcommitted
locking/lock_events: Add locking events for rtmutex slow paths
Add locking events for rtlock_slowlock() and rt_mutex_slowlock() for profiling the slow path behavior of rt_spin_lock() and rt_mutex_lock(). Signed-off-by: Waiman Long <[email protected]> Signed-off-by: Boqun Feng <[email protected]> Signed-off-by: Ingo Molnar <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent f23ecef commit b76b44f

File tree

2 files changed

+45
-5
lines changed

2 files changed

+45
-5
lines changed

kernel/locking/lock_events_list.h

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -67,3 +67,24 @@ LOCK_EVENT(rwsem_rlock_handoff) /* # of read lock handoffs */
6767
LOCK_EVENT(rwsem_wlock) /* # of write locks acquired */
6868
LOCK_EVENT(rwsem_wlock_fail) /* # of failed write lock acquisitions */
6969
LOCK_EVENT(rwsem_wlock_handoff) /* # of write lock handoffs */
70+
71+
/*
72+
* Locking events for rtlock_slowlock()
73+
*/
74+
LOCK_EVENT(rtlock_slowlock) /* # of rtlock_slowlock() calls */
75+
LOCK_EVENT(rtlock_slow_acq1) /* # of locks acquired after wait_lock */
76+
LOCK_EVENT(rtlock_slow_acq2) /* # of locks acquired in for loop */
77+
LOCK_EVENT(rtlock_slow_sleep) /* # of sleeps */
78+
LOCK_EVENT(rtlock_slow_wake) /* # of wakeup's */
79+
80+
/*
81+
* Locking events for rt_mutex_slowlock()
82+
*/
83+
LOCK_EVENT(rtmutex_slowlock) /* # of rt_mutex_slowlock() calls */
84+
LOCK_EVENT(rtmutex_slow_block) /* # of rt_mutex_slowlock_block() calls */
85+
LOCK_EVENT(rtmutex_slow_acq1) /* # of locks acquired after wait_lock */
86+
LOCK_EVENT(rtmutex_slow_acq2) /* # of locks acquired at the end */
87+
LOCK_EVENT(rtmutex_slow_acq3) /* # of locks acquired in *block() */
88+
LOCK_EVENT(rtmutex_slow_sleep) /* # of sleeps */
89+
LOCK_EVENT(rtmutex_slow_wake) /* # of wakeup's */
90+
LOCK_EVENT(rtmutex_deadlock) /* # of rt_mutex_handle_deadlock()'s */

kernel/locking/rtmutex.c

Lines changed: 24 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,7 @@
2727
#include <trace/events/lock.h>
2828

2929
#include "rtmutex_common.h"
30+
#include "lock_events.h"
3031

3132
#ifndef WW_RT
3233
# define build_ww_mutex() (false)
@@ -1612,10 +1613,13 @@ static int __sched rt_mutex_slowlock_block(struct rt_mutex_base *lock,
16121613
struct task_struct *owner;
16131614
int ret = 0;
16141615

1616+
lockevent_inc(rtmutex_slow_block);
16151617
for (;;) {
16161618
/* Try to acquire the lock: */
1617-
if (try_to_take_rt_mutex(lock, current, waiter))
1619+
if (try_to_take_rt_mutex(lock, current, waiter)) {
1620+
lockevent_inc(rtmutex_slow_acq3);
16181621
break;
1622+
}
16191623

16201624
if (timeout && !timeout->task) {
16211625
ret = -ETIMEDOUT;
@@ -1638,8 +1642,10 @@ static int __sched rt_mutex_slowlock_block(struct rt_mutex_base *lock,
16381642
owner = NULL;
16391643
raw_spin_unlock_irq_wake(&lock->wait_lock, wake_q);
16401644

1641-
if (!owner || !rtmutex_spin_on_owner(lock, waiter, owner))
1645+
if (!owner || !rtmutex_spin_on_owner(lock, waiter, owner)) {
1646+
lockevent_inc(rtmutex_slow_sleep);
16421647
rt_mutex_schedule();
1648+
}
16431649

16441650
raw_spin_lock_irq(&lock->wait_lock);
16451651
set_current_state(state);
@@ -1694,13 +1700,15 @@ static int __sched __rt_mutex_slowlock(struct rt_mutex_base *lock,
16941700
int ret;
16951701

16961702
lockdep_assert_held(&lock->wait_lock);
1703+
lockevent_inc(rtmutex_slowlock);
16971704

16981705
/* Try to acquire the lock again: */
16991706
if (try_to_take_rt_mutex(lock, current, NULL)) {
17001707
if (build_ww_mutex() && ww_ctx) {
17011708
__ww_mutex_check_waiters(rtm, ww_ctx, wake_q);
17021709
ww_mutex_lock_acquired(ww, ww_ctx);
17031710
}
1711+
lockevent_inc(rtmutex_slow_acq1);
17041712
return 0;
17051713
}
17061714

@@ -1719,10 +1727,12 @@ static int __sched __rt_mutex_slowlock(struct rt_mutex_base *lock,
17191727
__ww_mutex_check_waiters(rtm, ww_ctx, wake_q);
17201728
ww_mutex_lock_acquired(ww, ww_ctx);
17211729
}
1730+
lockevent_inc(rtmutex_slow_acq2);
17221731
} else {
17231732
__set_current_state(TASK_RUNNING);
17241733
remove_waiter(lock, waiter);
17251734
rt_mutex_handle_deadlock(ret, chwalk, lock, waiter);
1735+
lockevent_inc(rtmutex_deadlock);
17261736
}
17271737

17281738
/*
@@ -1751,6 +1761,7 @@ static inline int __rt_mutex_slowlock_locked(struct rt_mutex_base *lock,
17511761
&waiter, wake_q);
17521762

17531763
debug_rt_mutex_free_waiter(&waiter);
1764+
lockevent_cond_inc(rtmutex_slow_wake, !wake_q_empty(wake_q));
17541765
return ret;
17551766
}
17561767

@@ -1823,9 +1834,12 @@ static void __sched rtlock_slowlock_locked(struct rt_mutex_base *lock,
18231834
struct task_struct *owner;
18241835

18251836
lockdep_assert_held(&lock->wait_lock);
1837+
lockevent_inc(rtlock_slowlock);
18261838

1827-
if (try_to_take_rt_mutex(lock, current, NULL))
1839+
if (try_to_take_rt_mutex(lock, current, NULL)) {
1840+
lockevent_inc(rtlock_slow_acq1);
18281841
return;
1842+
}
18291843

18301844
rt_mutex_init_rtlock_waiter(&waiter);
18311845

@@ -1838,17 +1852,21 @@ static void __sched rtlock_slowlock_locked(struct rt_mutex_base *lock,
18381852

18391853
for (;;) {
18401854
/* Try to acquire the lock again */
1841-
if (try_to_take_rt_mutex(lock, current, &waiter))
1855+
if (try_to_take_rt_mutex(lock, current, &waiter)) {
1856+
lockevent_inc(rtlock_slow_acq2);
18421857
break;
1858+
}
18431859

18441860
if (&waiter == rt_mutex_top_waiter(lock))
18451861
owner = rt_mutex_owner(lock);
18461862
else
18471863
owner = NULL;
18481864
raw_spin_unlock_irq_wake(&lock->wait_lock, wake_q);
18491865

1850-
if (!owner || !rtmutex_spin_on_owner(lock, &waiter, owner))
1866+
if (!owner || !rtmutex_spin_on_owner(lock, &waiter, owner)) {
1867+
lockevent_inc(rtlock_slow_sleep);
18511868
schedule_rtlock();
1869+
}
18521870

18531871
raw_spin_lock_irq(&lock->wait_lock);
18541872
set_current_state(TASK_RTLOCK_WAIT);
@@ -1865,6 +1883,7 @@ static void __sched rtlock_slowlock_locked(struct rt_mutex_base *lock,
18651883
debug_rt_mutex_free_waiter(&waiter);
18661884

18671885
trace_contention_end(lock, 0);
1886+
lockevent_cond_inc(rtlock_slow_wake, !wake_q_empty(wake_q));
18681887
}
18691888

18701889
static __always_inline void __sched rtlock_slowlock(struct rt_mutex_base *lock)

0 commit comments

Comments
 (0)