Skip to content

Commit fcf77d4

Browse files
npigginmpe
authored andcommitted
powerpc/qspinlock: don't propagate the not-sleepy state
To simplify things, don't propagate the not-sleepy condition back down the queue. Instead, have the waiters clear their own node->sleepy when finding the lock owner is not preempted. Signed-off-by: Nicholas Piggin <[email protected]> Tested-by: Shrikanth Hegde <[email protected]> Reviewed-by: "Nysal Jan K.A" <[email protected]> Signed-off-by: Michael Ellerman <[email protected]> Link: https://msgid.link/[email protected]
1 parent fd8fae5 commit fcf77d4

File tree

1 file changed

+8
-18
lines changed

1 file changed

+8
-18
lines changed

arch/powerpc/lib/qspinlock.c

Lines changed: 8 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -350,7 +350,7 @@ static __always_inline bool yield_head_to_locked_owner(struct qspinlock *lock, u
350350
return __yield_to_locked_owner(lock, val, paravirt, mustq);
351351
}
352352

353-
static __always_inline void propagate_sleepy(struct qnode *node, u32 val, bool *set_sleepy, bool paravirt)
353+
static __always_inline void propagate_sleepy(struct qnode *node, u32 val, bool paravirt)
354354
{
355355
struct qnode *next;
356356
int owner;
@@ -359,18 +359,17 @@ static __always_inline void propagate_sleepy(struct qnode *node, u32 val, bool *
359359
return;
360360
if (!pv_yield_propagate_owner)
361361
return;
362-
if (*set_sleepy)
363-
return;
364362

365363
next = READ_ONCE(node->next);
366364
if (!next)
367365
return;
368366

367+
if (next->sleepy)
368+
return;
369+
369370
owner = get_owner_cpu(val);
370-
if (vcpu_is_preempted(owner)) {
371+
if (vcpu_is_preempted(owner))
371372
next->sleepy = 1;
372-
*set_sleepy = true;
373-
}
374373
}
375374

376375
/* Called inside spin_begin() */
@@ -385,12 +384,7 @@ static __always_inline bool yield_to_prev(struct qspinlock *lock, struct qnode *
385384
if (!pv_yield_propagate_owner)
386385
goto yield_prev;
387386

388-
if (!READ_ONCE(node->sleepy)) {
389-
/* Propagate back sleepy==false */
390-
if (node->next && node->next->sleepy)
391-
node->next->sleepy = 0;
392-
goto yield_prev;
393-
} else {
387+
if (node->sleepy) {
394388
u32 val = READ_ONCE(lock->val);
395389

396390
if (val & _Q_LOCKED_VAL) {
@@ -410,6 +404,7 @@ static __always_inline bool yield_to_prev(struct qspinlock *lock, struct qnode *
410404
if (preempted)
411405
return preempted;
412406
}
407+
node->sleepy = false;
413408
}
414409

415410
yield_prev:
@@ -533,7 +528,6 @@ static __always_inline void queued_spin_lock_mcs_queue(struct qspinlock *lock, b
533528
bool sleepy = false;
534529
bool mustq = false;
535530
int idx;
536-
bool set_sleepy = false;
537531
int iters = 0;
538532

539533
BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS));
@@ -591,10 +585,6 @@ static __always_inline void queued_spin_lock_mcs_queue(struct qspinlock *lock, b
591585
spec_barrier();
592586
spin_end();
593587

594-
/* Clear out stale propagated sleepy */
595-
if (paravirt && pv_yield_propagate_owner && node->sleepy)
596-
node->sleepy = 0;
597-
598588
smp_rmb(); /* acquire barrier for the mcs lock */
599589

600590
/*
@@ -636,7 +626,7 @@ static __always_inline void queued_spin_lock_mcs_queue(struct qspinlock *lock, b
636626
}
637627
}
638628

639-
propagate_sleepy(node, val, &set_sleepy, paravirt);
629+
propagate_sleepy(node, val, paravirt);
640630
preempted = yield_head_to_locked_owner(lock, val, paravirt);
641631
if (!maybe_stealers)
642632
continue;

0 commit comments

Comments
 (0)