Skip to content

Commit 6c2787f

Browse files
Yanfei XuPeter Zijlstra
authored andcommitted
locking: Remove rcu_read_{,un}lock() for preempt_{dis,en}able()
preempt_disable/enable() is equal to RCU read-side crital section, and the spinning codes in mutex and rwsem could ensure that the preemption is disabled. So let's remove the unnecessary rcu_read_lock/unlock for saving some cycles in hot codes. Signed-off-by: Yanfei Xu <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Acked-by: Waiman Long <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent 7cdacc5 commit 6c2787f

File tree

2 files changed

+24
-12
lines changed

2 files changed

+24
-12
lines changed

kernel/locking/mutex.c

Lines changed: 15 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -351,13 +351,16 @@ bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner,
351351
{
352352
bool ret = true;
353353

354-
rcu_read_lock();
354+
lockdep_assert_preemption_disabled();
355+
355356
while (__mutex_owner(lock) == owner) {
356357
/*
357358
* Ensure we emit the owner->on_cpu, dereference _after_
358-
* checking lock->owner still matches owner. If that fails,
359-
* owner might point to freed memory. If it still matches,
360-
* the rcu_read_lock() ensures the memory stays valid.
359+
* checking lock->owner still matches owner. And we already
360+
* disabled preemption which is equal to the RCU read-side
361+
* crital section in optimistic spinning code. Thus the
362+
* task_strcut structure won't go away during the spinning
363+
* period
361364
*/
362365
barrier();
363366

@@ -377,7 +380,6 @@ bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner,
377380

378381
cpu_relax();
379382
}
380-
rcu_read_unlock();
381383

382384
return ret;
383385
}
@@ -390,19 +392,25 @@ static inline int mutex_can_spin_on_owner(struct mutex *lock)
390392
struct task_struct *owner;
391393
int retval = 1;
392394

395+
lockdep_assert_preemption_disabled();
396+
393397
if (need_resched())
394398
return 0;
395399

396-
rcu_read_lock();
400+
/*
401+
* We already disabled preemption which is equal to the RCU read-side
402+
* crital section in optimistic spinning code. Thus the task_strcut
403+
* structure won't go away during the spinning period.
404+
*/
397405
owner = __mutex_owner(lock);
398406

399407
/*
400408
* As lock holder preemption issue, we both skip spinning if task is not
401409
* on cpu or its cpu is preempted
402410
*/
411+
403412
if (owner)
404413
retval = owner->on_cpu && !vcpu_is_preempted(task_cpu(owner));
405-
rcu_read_unlock();
406414

407415
/*
408416
* If lock->owner is not set, the mutex has been released. Return true

kernel/locking/rwsem.c

Lines changed: 9 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -635,15 +635,17 @@ static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
635635
}
636636

637637
preempt_disable();
638-
rcu_read_lock();
638+
/*
639+
* Disable preemption is equal to the RCU read-side crital section,
640+
* thus the task_strcut structure won't go away.
641+
*/
639642
owner = rwsem_owner_flags(sem, &flags);
640643
/*
641644
* Don't check the read-owner as the entry may be stale.
642645
*/
643646
if ((flags & RWSEM_NONSPINNABLE) ||
644647
(owner && !(flags & RWSEM_READER_OWNED) && !owner_on_cpu(owner)))
645648
ret = false;
646-
rcu_read_unlock();
647649
preempt_enable();
648650

649651
lockevent_cond_inc(rwsem_opt_fail, !ret);
@@ -671,12 +673,13 @@ rwsem_spin_on_owner(struct rw_semaphore *sem)
671673
unsigned long flags, new_flags;
672674
enum owner_state state;
673675

676+
lockdep_assert_preemption_disabled();
677+
674678
owner = rwsem_owner_flags(sem, &flags);
675679
state = rwsem_owner_state(owner, flags);
676680
if (state != OWNER_WRITER)
677681
return state;
678682

679-
rcu_read_lock();
680683
for (;;) {
681684
/*
682685
* When a waiting writer set the handoff flag, it may spin
@@ -694,7 +697,9 @@ rwsem_spin_on_owner(struct rw_semaphore *sem)
694697
* Ensure we emit the owner->on_cpu, dereference _after_
695698
* checking sem->owner still matches owner, if that fails,
696699
* owner might point to free()d memory, if it still matches,
697-
* the rcu_read_lock() ensures the memory stays valid.
700+
* our spinning context already disabled preemption which is
701+
* equal to RCU read-side crital section ensures the memory
702+
* stays valid.
698703
*/
699704
barrier();
700705

@@ -705,7 +710,6 @@ rwsem_spin_on_owner(struct rw_semaphore *sem)
705710

706711
cpu_relax();
707712
}
708-
rcu_read_unlock();
709713

710714
return state;
711715
}

0 commit comments

Comments
 (0)