Skip to content

Commit 992caf7

Browse files
rostedtIngo Molnar
authored andcommitted
locking/rtmutex: Add adaptive spinwait mechanism
Going to sleep when locks are contended can be quite inefficient when the contention time is short and the lock owner is running on a different CPU. The MCS mechanism cannot be used because MCS is strictly FIFO ordered while for rtmutex based locks the waiter ordering is priority based. Provide a simple adaptive spinwait mechanism which currently restricts the spinning to the top priority waiter. [ tglx: Provide a contemporary changelog, extended it to all rtmutex based locks and updated it to match the other spin on owner implementations ] Originally-by: Gregory Haskins <[email protected]> Signed-off-by: Steven Rostedt <[email protected]> Signed-off-by: Thomas Gleixner <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Signed-off-by: Ingo Molnar <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent 48eb3f4 commit 992caf7

File tree

1 file changed

+65
-2
lines changed

1 file changed

+65
-2
lines changed

kernel/locking/rtmutex.c

Lines changed: 65 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,11 @@
88
* Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <[email protected]>
99
* Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt
1010
* Copyright (C) 2006 Esben Nielsen
11+
* Adaptive Spinlocks:
12+
* Copyright (C) 2008 Novell, Inc., Gregory Haskins, Sven Dietrich,
13+
* and Peter Morreale,
14+
* Adaptive Spinlocks simplification:
15+
* Copyright (C) 2008 Red Hat, Inc., Steven Rostedt <[email protected]>
1116
*
1217
* See Documentation/locking/rt-mutex-design.rst for details.
1318
*/
@@ -1297,6 +1302,52 @@ static __always_inline void __rt_mutex_unlock(struct rt_mutex_base *lock)
12971302
rt_mutex_slowunlock(lock);
12981303
}
12991304

1305+
#ifdef CONFIG_SMP
1306+
static bool rtmutex_spin_on_owner(struct rt_mutex_base *lock,
1307+
struct rt_mutex_waiter *waiter,
1308+
struct task_struct *owner)
1309+
{
1310+
bool res = true;
1311+
1312+
rcu_read_lock();
1313+
for (;;) {
1314+
/* If owner changed, trylock again. */
1315+
if (owner != rt_mutex_owner(lock))
1316+
break;
1317+
/*
1318+
* Ensure that @owner is dereferenced after checking that
1319+
* the lock owner still matches @owner. If that fails,
1320+
* @owner might point to freed memory. If it still matches,
1321+
* the rcu_read_lock() ensures the memory stays valid.
1322+
*/
1323+
barrier();
1324+
/*
1325+
* Stop spinning when:
1326+
* - the lock owner has been scheduled out
1327+
* - current is not longer the top waiter
1328+
* - current is requested to reschedule (redundant
1329+
* for CONFIG_PREEMPT_RCU=y)
1330+
* - the VCPU on which owner runs is preempted
1331+
*/
1332+
if (!owner->on_cpu || waiter != rt_mutex_top_waiter(lock) ||
1333+
need_resched() || vcpu_is_preempted(task_cpu(owner))) {
1334+
res = false;
1335+
break;
1336+
}
1337+
cpu_relax();
1338+
}
1339+
rcu_read_unlock();
1340+
return res;
1341+
}
1342+
#else
1343+
static bool rtmutex_spin_on_owner(struct rt_mutex_base *lock,
1344+
struct rt_mutex_waiter *waiter,
1345+
struct task_struct *owner)
1346+
{
1347+
return false;
1348+
}
1349+
#endif
1350+
13001351
#ifdef RT_MUTEX_BUILD_MUTEX
13011352
/*
13021353
* Functions required for:
@@ -1381,6 +1432,7 @@ static int __sched rt_mutex_slowlock_block(struct rt_mutex_base *lock,
13811432
struct rt_mutex_waiter *waiter)
13821433
{
13831434
struct rt_mutex *rtm = container_of(lock, struct rt_mutex, rtmutex);
1435+
struct task_struct *owner;
13841436
int ret = 0;
13851437

13861438
for (;;) {
@@ -1403,9 +1455,14 @@ static int __sched rt_mutex_slowlock_block(struct rt_mutex_base *lock,
14031455
break;
14041456
}
14051457

1458+
if (waiter == rt_mutex_top_waiter(lock))
1459+
owner = rt_mutex_owner(lock);
1460+
else
1461+
owner = NULL;
14061462
raw_spin_unlock_irq(&lock->wait_lock);
14071463

1408-
schedule();
1464+
if (!owner || !rtmutex_spin_on_owner(lock, waiter, owner))
1465+
schedule();
14091466

14101467
raw_spin_lock_irq(&lock->wait_lock);
14111468
set_current_state(state);
@@ -1561,6 +1618,7 @@ static __always_inline int __rt_mutex_lock(struct rt_mutex_base *lock,
15611618
static void __sched rtlock_slowlock_locked(struct rt_mutex_base *lock)
15621619
{
15631620
struct rt_mutex_waiter waiter;
1621+
struct task_struct *owner;
15641622

15651623
lockdep_assert_held(&lock->wait_lock);
15661624

@@ -1579,9 +1637,14 @@ static void __sched rtlock_slowlock_locked(struct rt_mutex_base *lock)
15791637
if (try_to_take_rt_mutex(lock, current, &waiter))
15801638
break;
15811639

1640+
if (&waiter == rt_mutex_top_waiter(lock))
1641+
owner = rt_mutex_owner(lock);
1642+
else
1643+
owner = NULL;
15821644
raw_spin_unlock_irq(&lock->wait_lock);
15831645

1584-
schedule_rtlock();
1646+
if (!owner || !rtmutex_spin_on_owner(lock, &waiter, owner))
1647+
schedule_rtlock();
15851648

15861649
raw_spin_lock_irq(&lock->wait_lock);
15871650
set_current_state(TASK_RTLOCK_WAIT);

0 commit comments

Comments
 (0)