Skip to content

Commit 2180f21

Browse files
committed
Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking updates from Ingo Molnar: "Just a handful of changes in this cycle: an ARM64 performance optimization, a comment fix and a debug output fix" * 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: locking/osq: Use optimized spinning loop for arm64 locking/qspinlock: Fix inaccessible URL of MCS lock paper locking/lockdep: Fix lockdep_stats indentation problem
2 parents 634cd4b + f5bfdc8 commit 2180f21

File tree

4 files changed

+28
-21
lines changed

4 files changed

+28
-21
lines changed

arch/arm64/include/asm/spinlock.h

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,4 +11,13 @@
1111
/* See include/linux/spinlock.h */
1212
#define smp_mb__after_spinlock() smp_mb()
1313

14+
/*
15+
* Changing this will break osq_lock() thanks to the call inside
16+
* smp_cond_load_relaxed().
17+
*
18+
* See:
19+
* https://lore.kernel.org/lkml/[email protected]
20+
*/
21+
#define vcpu_is_preempted(cpu) false
22+
1423
#endif /* __ASM_SPINLOCK_H */

kernel/locking/lockdep_proc.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -286,9 +286,9 @@ static int lockdep_stats_show(struct seq_file *m, void *v)
286286
seq_printf(m, " stack-trace entries: %11lu [max: %lu]\n",
287287
nr_stack_trace_entries, MAX_STACK_TRACE_ENTRIES);
288288
#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
289-
seq_printf(m, " number of stack traces: %llu\n",
289+
seq_printf(m, " number of stack traces: %11llu\n",
290290
lockdep_stack_trace_count());
291-
seq_printf(m, " number of stack hash chains: %llu\n",
291+
seq_printf(m, " number of stack hash chains: %11llu\n",
292292
lockdep_stack_hash_count());
293293
#endif
294294
seq_printf(m, " combined max dependencies: %11u\n",

kernel/locking/osq_lock.c

Lines changed: 10 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -134,20 +134,17 @@ bool osq_lock(struct optimistic_spin_queue *lock)
134134
* cmpxchg in an attempt to undo our queueing.
135135
*/
136136

137-
while (!READ_ONCE(node->locked)) {
138-
/*
139-
* If we need to reschedule bail... so we can block.
140-
* Use vcpu_is_preempted() to avoid waiting for a preempted
141-
* lock holder:
142-
*/
143-
if (need_resched() || vcpu_is_preempted(node_cpu(node->prev)))
144-
goto unqueue;
145-
146-
cpu_relax();
147-
}
148-
return true;
137+
/*
138+
* Wait to acquire the lock or cancelation. Note that need_resched()
139+
* will come with an IPI, which will wake smp_cond_load_relaxed() if it
140+
* is implemented with a monitor-wait. vcpu_is_preempted() relies on
141+
* polling, be careful.
142+
*/
143+
if (smp_cond_load_relaxed(&node->locked, VAL || need_resched() ||
144+
vcpu_is_preempted(node_cpu(node->prev))))
145+
return true;
149146

150-
unqueue:
147+
/* unqueue */
151148
/*
152149
* Step - A -- stabilize @prev
153150
*

kernel/locking/qspinlock.c

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -31,14 +31,15 @@
3131
/*
3232
* The basic principle of a queue-based spinlock can best be understood
3333
* by studying a classic queue-based spinlock implementation called the
34-
* MCS lock. The paper below provides a good description for this kind
35-
* of lock.
34+
* MCS lock. A copy of the original MCS lock paper ("Algorithms for Scalable
35+
* Synchronization on Shared-Memory Multiprocessors by Mellor-Crummey and
36+
* Scott") is available at
3637
*
37-
* http://www.cise.ufl.edu/tr/DOC/REP-1992-71.pdf
38+
* https://bugzilla.kernel.org/show_bug.cgi?id=206115
3839
*
39-
* This queued spinlock implementation is based on the MCS lock, however to make
40-
* it fit the 4 bytes we assume spinlock_t to be, and preserve its existing
41-
* API, we must modify it somehow.
40+
* This queued spinlock implementation is based on the MCS lock, however to
41+
* make it fit the 4 bytes we assume spinlock_t to be, and preserve its
42+
* existing API, we must modify it somehow.
4243
*
4344
* In particular; where the traditional MCS lock consists of a tail pointer
4445
* (8 bytes) and needs the next pointer (another 8 bytes) of its own node to

0 commit comments

Comments
 (0)