Skip to content

Commit fea0e18

Browse files
ubizjakIngo Molnar
authored andcommitted
locking/pvqspinlock: Use try_cmpxchg() in qspinlock_paravirt.h
Use try_cmpxchg(*ptr, &old, new) instead of cmpxchg(*ptr, old, new) == old in qspinlock_paravirt.h x86 CMPXCHG instruction returns success in ZF flag, so this change saves a compare after cmpxchg. No functional change intended. Signed-off-by: Uros Bizjak <[email protected]> Signed-off-by: Ingo Molnar <[email protected]> Reviewed-by: Waiman Long <[email protected]> Cc: Linus Torvalds <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent 6a97734 commit fea0e18

File tree

1 file changed

+8
-8
lines changed

1 file changed

+8
-8
lines changed

kernel/locking/qspinlock_paravirt.h

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -86,9 +86,10 @@ static inline bool pv_hybrid_queued_unfair_trylock(struct qspinlock *lock)
8686
*/
8787
for (;;) {
8888
int val = atomic_read(&lock->val);
89+
u8 old = 0;
8990

9091
if (!(val & _Q_LOCKED_PENDING_MASK) &&
91-
(cmpxchg_acquire(&lock->locked, 0, _Q_LOCKED_VAL) == 0)) {
92+
try_cmpxchg_acquire(&lock->locked, &old, _Q_LOCKED_VAL)) {
9293
lockevent_inc(pv_lock_stealing);
9394
return true;
9495
}
@@ -211,8 +212,9 @@ static struct qspinlock **pv_hash(struct qspinlock *lock, struct pv_node *node)
211212
int hopcnt = 0;
212213

213214
for_each_hash_entry(he, offset, hash) {
215+
struct qspinlock *old = NULL;
214216
hopcnt++;
215-
if (!cmpxchg(&he->lock, NULL, lock)) {
217+
if (try_cmpxchg(&he->lock, &old, lock)) {
216218
WRITE_ONCE(he->node, node);
217219
lockevent_pv_hop(hopcnt);
218220
return &he->lock;
@@ -355,7 +357,7 @@ static void pv_wait_node(struct mcs_spinlock *node, struct mcs_spinlock *prev)
355357
static void pv_kick_node(struct qspinlock *lock, struct mcs_spinlock *node)
356358
{
357359
struct pv_node *pn = (struct pv_node *)node;
358-
360+
enum vcpu_state old = vcpu_halted;
359361
/*
360362
* If the vCPU is indeed halted, advance its state to match that of
361363
* pv_wait_node(). If OTOH this fails, the vCPU was running and will
@@ -372,8 +374,7 @@ static void pv_kick_node(struct qspinlock *lock, struct mcs_spinlock *node)
372374
* subsequent writes.
373375
*/
374376
smp_mb__before_atomic();
375-
if (cmpxchg_relaxed(&pn->state, vcpu_halted, vcpu_hashed)
376-
!= vcpu_halted)
377+
if (!try_cmpxchg_relaxed(&pn->state, &old, vcpu_hashed))
377378
return;
378379

379380
/*
@@ -541,15 +542,14 @@ __pv_queued_spin_unlock_slowpath(struct qspinlock *lock, u8 locked)
541542
#ifndef __pv_queued_spin_unlock
542543
__visible __lockfunc void __pv_queued_spin_unlock(struct qspinlock *lock)
543544
{
544-
u8 locked;
545+
u8 locked = _Q_LOCKED_VAL;
545546

546547
/*
547548
* We must not unlock if SLOW, because in that case we must first
548549
* unhash. Otherwise it would be possible to have multiple @lock
549550
* entries, which would be BAD.
550551
*/
551-
locked = cmpxchg_release(&lock->locked, _Q_LOCKED_VAL, 0);
552-
if (likely(locked == _Q_LOCKED_VAL))
552+
if (try_cmpxchg_release(&lock->locked, &locked, 0))
553553
return;
554554

555555
__pv_queued_spin_unlock_slowpath(lock, locked);

0 commit comments

Comments
 (0)