Skip to content

Commit 364eeb7

Browse files
committed
Merge tag 'locking-core-2024-11-18' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking updates from Ingo Molnar: "Lockdep: - Enable PROVE_RAW_LOCK_NESTING with PROVE_LOCKING (Sebastian Andrzej Siewior) - Add lockdep_cleanup_dead_cpu() (David Woodhouse) futexes: - Use atomic64_inc_return() in get_inode_sequence_number() (Uros Bizjak) - Use atomic64_try_cmpxchg_relaxed() in get_inode_sequence_number() (Uros Bizjak) RT locking: - Add sparse annotation PREEMPT_RT's locking (Sebastian Andrzej Siewior) spinlocks: - Use atomic_try_cmpxchg_release() in osq_unlock() (Uros Bizjak) atomics: - x86: Use ALT_OUTPUT_SP() for __alternative_atomic64() (Uros Bizjak) - x86: Use ALT_OUTPUT_SP() for __arch_{,try_}cmpxchg64_emu() (Uros Bizjak) KCSAN, seqlocks: - Support seqcount_latch_t (Marco Elver) <linux/cleanup.h>: - Add if_not_guard() conditional guard helper (David Lechner) - Adjust scoped_guard() macros to avoid potential warning (Przemek Kitszel) - Remove address space of returned pointer (Uros Bizjak) WW mutexes: - locking/ww_mutex: Adjust to lockdep nest_lock requirements (Thomas Hellström) Rust integration: - Fix raw_spin_lock initialization on PREEMPT_RT (Eder Zulian) Misc cleanups & fixes: - lockdep: Fix wait-type check related warnings (Ahmed Ehab) - lockdep: Use info level for initial info messages (Jiri Slaby) - spinlocks: Make __raw_* lock ops static (Geert Uytterhoeven) - pvqspinlock: Convert fields of 'enum vcpu_state' to uppercase (Qiuxu Zhuo) - iio: magnetometer: Fix if () scoped_guard() formatting (Stephen Rothwell) - rtmutex: Fix misleading comment (Peter Zijlstra) - percpu-rw-semaphores: Fix grammar in percpu-rw-semaphore.rst (Xiu Jianfeng)" * tag 'locking-core-2024-11-18' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (29 commits) locking/Documentation: Fix grammar in percpu-rw-semaphore.rst iio: magnetometer: fix if () scoped_guard() formatting rust: helpers: Avoid raw_spin_lock initialization for PREEMPT_RT kcsan, seqlock: Fix incorrect assumption in read_seqbegin() seqlock, treewide: Switch to non-raw seqcount_latch interface kcsan, seqlock: Support seqcount_latch_t time/sched_clock: Broaden sched_clock()'s instrumentation coverage time/sched_clock: Swap update_clock_read_data() latch writes locking/atomic/x86: Use ALT_OUTPUT_SP() for __arch_{,try_}cmpxchg64_emu() locking/atomic/x86: Use ALT_OUTPUT_SP() for __alternative_atomic64() cleanup: Add conditional guard helper cleanup: Adjust scoped_guard() macros to avoid potential warning locking/osq_lock: Use atomic_try_cmpxchg_release() in osq_unlock() cleanup: Remove address space of returned pointer locking/rtmutex: Fix misleading comment locking/rt: Annotate unlock followed by lock for sparse. locking/rt: Add sparse annotation for RCU. locking/rt: Remove one __cond_lock() in RT's spin_trylock_irqsave() locking/rt: Add sparse annotation PREEMPT_RT's sleeping locks. locking/pvqspinlock: Convert fields of 'enum vcpu_state' to uppercase ...
2 parents d8d78a9 + 3b49a34 commit 364eeb7

30 files changed

+355
-161
lines changed

Documentation/locking/percpu-rw-semaphore.rst

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -16,8 +16,8 @@ writing is very expensive, it calls synchronize_rcu() that can take
1616
hundreds of milliseconds.
1717

1818
The lock is declared with "struct percpu_rw_semaphore" type.
19-
The lock is initialized percpu_init_rwsem, it returns 0 on success and
20-
-ENOMEM on allocation failure.
19+
The lock is initialized with percpu_init_rwsem, it returns 0 on success
20+
and -ENOMEM on allocation failure.
2121
The lock must be freed with percpu_free_rwsem to avoid memory leak.
2222

2323
The lock is locked for read with percpu_down_read, percpu_up_read and

Documentation/locking/seqlock.rst

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -153,7 +153,7 @@ Use seqcount_latch_t when the write side sections cannot be protected
153153
from interruption by readers. This is typically the case when the read
154154
side can be invoked from NMI handlers.
155155

156-
Check `raw_write_seqcount_latch()` for more information.
156+
Check `write_seqcount_latch()` for more information.
157157

158158

159159
.. _seqlock_t:

arch/x86/include/asm/atomic64_32.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,8 @@ static __always_inline s64 arch_atomic64_read_nonatomic(const atomic64_t *v)
5151
#ifdef CONFIG_X86_CMPXCHG64
5252
#define __alternative_atomic64(f, g, out, in...) \
5353
asm volatile("call %c[func]" \
54-
: out : [func] "i" (atomic64_##g##_cx8), ## in)
54+
: ALT_OUTPUT_SP(out) \
55+
: [func] "i" (atomic64_##g##_cx8), ## in)
5556

5657
#define ATOMIC64_DECL(sym) ATOMIC64_DECL_ONE(sym##_cx8)
5758
#else

arch/x86/include/asm/cmpxchg_32.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -94,7 +94,7 @@ static __always_inline bool __try_cmpxchg64_local(volatile u64 *ptr, u64 *oldp,
9494
asm volatile(ALTERNATIVE(_lock_loc \
9595
"call cmpxchg8b_emu", \
9696
_lock "cmpxchg8b %a[ptr]", X86_FEATURE_CX8) \
97-
: "+a" (o.low), "+d" (o.high) \
97+
: ALT_OUTPUT_SP("+a" (o.low), "+d" (o.high)) \
9898
: "b" (n.low), "c" (n.high), [ptr] "S" (_ptr) \
9999
: "memory"); \
100100
\
@@ -123,8 +123,8 @@ static __always_inline u64 arch_cmpxchg64_local(volatile u64 *ptr, u64 old, u64
123123
"call cmpxchg8b_emu", \
124124
_lock "cmpxchg8b %a[ptr]", X86_FEATURE_CX8) \
125125
CC_SET(e) \
126-
: CC_OUT(e) (ret), \
127-
"+a" (o.low), "+d" (o.high) \
126+
: ALT_OUTPUT_SP(CC_OUT(e) (ret), \
127+
"+a" (o.low), "+d" (o.high)) \
128128
: "b" (n.low), "c" (n.high), [ptr] "S" (_ptr) \
129129
: "memory"); \
130130
\

arch/x86/kernel/tsc.c

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -174,10 +174,11 @@ static void __set_cyc2ns_scale(unsigned long khz, int cpu, unsigned long long ts
174174

175175
c2n = per_cpu_ptr(&cyc2ns, cpu);
176176

177-
raw_write_seqcount_latch(&c2n->seq);
177+
write_seqcount_latch_begin(&c2n->seq);
178178
c2n->data[0] = data;
179-
raw_write_seqcount_latch(&c2n->seq);
179+
write_seqcount_latch(&c2n->seq);
180180
c2n->data[1] = data;
181+
write_seqcount_latch_end(&c2n->seq);
181182
}
182183

183184
static void set_cyc2ns_scale(unsigned long khz, int cpu, unsigned long long tsc_now)

drivers/iio/magnetometer/af8133j.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -312,10 +312,11 @@ static int af8133j_set_scale(struct af8133j_data *data,
312312
* When suspended, just store the new range to data->range to be
313313
* applied later during power up.
314314
*/
315-
if (!pm_runtime_status_suspended(dev))
315+
if (!pm_runtime_status_suspended(dev)) {
316316
scoped_guard(mutex, &data->mutex)
317317
ret = regmap_write(data->regmap,
318318
AF8133J_REG_RANGE, range);
319+
}
319320

320321
pm_runtime_enable(dev);
321322

include/linux/cleanup.h

Lines changed: 58 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -273,6 +273,12 @@ static inline class_##_name##_t class_##_name##ext##_constructor(_init_args) \
273273
* an anonymous instance of the (guard) class, not recommended for
274274
* conditional locks.
275275
*
276+
* if_not_guard(name, args...) { <error handling> }:
277+
* convenience macro for conditional guards that calls the statement that
278+
* follows only if the lock was not acquired (typically an error return).
279+
*
280+
* Only for conditional locks.
281+
*
276282
* scoped_guard (name, args...) { }:
277283
* similar to CLASS(name, scope)(args), except the variable (with the
278284
* explicit name 'scope') is declard in a for-loop such that its scope is
@@ -285,14 +291,20 @@ static inline class_##_name##_t class_##_name##ext##_constructor(_init_args) \
285291
* similar to scoped_guard(), except it does fail when the lock
286292
* acquire fails.
287293
*
294+
* Only for conditional locks.
288295
*/
289296

297+
#define __DEFINE_CLASS_IS_CONDITIONAL(_name, _is_cond) \
298+
static __maybe_unused const bool class_##_name##_is_conditional = _is_cond
299+
290300
#define DEFINE_GUARD(_name, _type, _lock, _unlock) \
301+
__DEFINE_CLASS_IS_CONDITIONAL(_name, false); \
291302
DEFINE_CLASS(_name, _type, if (_T) { _unlock; }, ({ _lock; _T; }), _type _T); \
292303
static inline void * class_##_name##_lock_ptr(class_##_name##_t *_T) \
293-
{ return *_T; }
304+
{ return (void *)(__force unsigned long)*_T; }
294305

295306
#define DEFINE_GUARD_COND(_name, _ext, _condlock) \
307+
__DEFINE_CLASS_IS_CONDITIONAL(_name##_ext, true); \
296308
EXTEND_CLASS(_name, _ext, \
297309
({ void *_t = _T; if (_T && !(_condlock)) _t = NULL; _t; }), \
298310
class_##_name##_t _T) \
@@ -303,16 +315,48 @@ static inline class_##_name##_t class_##_name##ext##_constructor(_init_args) \
303315
CLASS(_name, __UNIQUE_ID(guard))
304316

305317
#define __guard_ptr(_name) class_##_name##_lock_ptr
318+
#define __is_cond_ptr(_name) class_##_name##_is_conditional
306319

307-
#define scoped_guard(_name, args...) \
308-
for (CLASS(_name, scope)(args), \
309-
*done = NULL; __guard_ptr(_name)(&scope) && !done; done = (void *)1)
310-
311-
#define scoped_cond_guard(_name, _fail, args...) \
312-
for (CLASS(_name, scope)(args), \
313-
*done = NULL; !done; done = (void *)1) \
314-
if (!__guard_ptr(_name)(&scope)) _fail; \
315-
else
320+
/*
321+
* Helper macro for scoped_guard().
322+
*
323+
* Note that the "!__is_cond_ptr(_name)" part of the condition ensures that
324+
* compiler would be sure that for the unconditional locks the body of the
325+
* loop (caller-provided code glued to the else clause) could not be skipped.
326+
* It is needed because the other part - "__guard_ptr(_name)(&scope)" - is too
327+
* hard to deduce (even if could be proven true for unconditional locks).
328+
*/
329+
#define __scoped_guard(_name, _label, args...) \
330+
for (CLASS(_name, scope)(args); \
331+
__guard_ptr(_name)(&scope) || !__is_cond_ptr(_name); \
332+
({ goto _label; })) \
333+
if (0) { \
334+
_label: \
335+
break; \
336+
} else
337+
338+
#define scoped_guard(_name, args...) \
339+
__scoped_guard(_name, __UNIQUE_ID(label), args)
340+
341+
#define __scoped_cond_guard(_name, _fail, _label, args...) \
342+
for (CLASS(_name, scope)(args); true; ({ goto _label; })) \
343+
if (!__guard_ptr(_name)(&scope)) { \
344+
BUILD_BUG_ON(!__is_cond_ptr(_name)); \
345+
_fail; \
346+
_label: \
347+
break; \
348+
} else
349+
350+
#define scoped_cond_guard(_name, _fail, args...) \
351+
__scoped_cond_guard(_name, _fail, __UNIQUE_ID(label), args)
352+
353+
#define __if_not_guard(_name, _id, args...) \
354+
BUILD_BUG_ON(!__is_cond_ptr(_name)); \
355+
CLASS(_name, _id)(args); \
356+
if (!__guard_ptr(_name)(&_id))
357+
358+
#define if_not_guard(_name, args...) \
359+
__if_not_guard(_name, __UNIQUE_ID(guard), args)
316360

317361
/*
318362
* Additional helper macros for generating lock guards with types, either for
@@ -347,7 +391,7 @@ static inline void class_##_name##_destructor(class_##_name##_t *_T) \
347391
\
348392
static inline void *class_##_name##_lock_ptr(class_##_name##_t *_T) \
349393
{ \
350-
return _T->lock; \
394+
return (void *)(__force unsigned long)_T->lock; \
351395
}
352396

353397

@@ -369,14 +413,17 @@ static inline class_##_name##_t class_##_name##_constructor(void) \
369413
}
370414

371415
#define DEFINE_LOCK_GUARD_1(_name, _type, _lock, _unlock, ...) \
416+
__DEFINE_CLASS_IS_CONDITIONAL(_name, false); \
372417
__DEFINE_UNLOCK_GUARD(_name, _type, _unlock, __VA_ARGS__) \
373418
__DEFINE_LOCK_GUARD_1(_name, _type, _lock)
374419

375420
#define DEFINE_LOCK_GUARD_0(_name, _lock, _unlock, ...) \
421+
__DEFINE_CLASS_IS_CONDITIONAL(_name, false); \
376422
__DEFINE_UNLOCK_GUARD(_name, void, _unlock, __VA_ARGS__) \
377423
__DEFINE_LOCK_GUARD_0(_name, _lock)
378424

379425
#define DEFINE_LOCK_GUARD_1_COND(_name, _ext, _condlock) \
426+
__DEFINE_CLASS_IS_CONDITIONAL(_name##_ext, true); \
380427
EXTEND_CLASS(_name, _ext, \
381428
({ class_##_name##_t _t = { .lock = l }, *_T = &_t;\
382429
if (_T->lock && !(_condlock)) _T->lock = NULL; \

include/linux/irqflags.h

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,19 +18,25 @@
1818
#include <asm/irqflags.h>
1919
#include <asm/percpu.h>
2020

21+
struct task_struct;
22+
2123
/* Currently lockdep_softirqs_on/off is used only by lockdep */
2224
#ifdef CONFIG_PROVE_LOCKING
2325
extern void lockdep_softirqs_on(unsigned long ip);
2426
extern void lockdep_softirqs_off(unsigned long ip);
2527
extern void lockdep_hardirqs_on_prepare(void);
2628
extern void lockdep_hardirqs_on(unsigned long ip);
2729
extern void lockdep_hardirqs_off(unsigned long ip);
30+
extern void lockdep_cleanup_dead_cpu(unsigned int cpu,
31+
struct task_struct *idle);
2832
#else
2933
static inline void lockdep_softirqs_on(unsigned long ip) { }
3034
static inline void lockdep_softirqs_off(unsigned long ip) { }
3135
static inline void lockdep_hardirqs_on_prepare(void) { }
3236
static inline void lockdep_hardirqs_on(unsigned long ip) { }
3337
static inline void lockdep_hardirqs_off(unsigned long ip) { }
38+
static inline void lockdep_cleanup_dead_cpu(unsigned int cpu,
39+
struct task_struct *idle) {}
3440
#endif
3541

3642
#ifdef CONFIG_TRACE_IRQFLAGS

include/linux/lockdep.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -173,7 +173,7 @@ static inline void lockdep_init_map(struct lockdep_map *lock, const char *name,
173173
(lock)->dep_map.lock_type)
174174

175175
#define lockdep_set_subclass(lock, sub) \
176-
lockdep_init_map_type(&(lock)->dep_map, #lock, (lock)->dep_map.key, sub,\
176+
lockdep_init_map_type(&(lock)->dep_map, (lock)->dep_map.name, (lock)->dep_map.key, sub,\
177177
(lock)->dep_map.wait_type_inner, \
178178
(lock)->dep_map.wait_type_outer, \
179179
(lock)->dep_map.lock_type)

include/linux/rbtree_latch.h

Lines changed: 11 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@
1414
*
1515
* If we need to allow unconditional lookups (say as required for NMI context
1616
* usage) we need a more complex setup; this data structure provides this by
17-
* employing the latch technique -- see @raw_write_seqcount_latch -- to
17+
* employing the latch technique -- see @write_seqcount_latch_begin -- to
1818
* implement a latched RB-tree which does allow for unconditional lookups by
1919
* virtue of always having (at least) one stable copy of the tree.
2020
*
@@ -132,7 +132,7 @@ __lt_find(void *key, struct latch_tree_root *ltr, int idx,
132132
* @ops: operators defining the node order
133133
*
134134
* It inserts @node into @root in an ordered fashion such that we can always
135-
* observe one complete tree. See the comment for raw_write_seqcount_latch().
135+
* observe one complete tree. See the comment for write_seqcount_latch_begin().
136136
*
137137
* The inserts use rcu_assign_pointer() to publish the element such that the
138138
* tree structure is stored before we can observe the new @node.
@@ -145,10 +145,11 @@ latch_tree_insert(struct latch_tree_node *node,
145145
struct latch_tree_root *root,
146146
const struct latch_tree_ops *ops)
147147
{
148-
raw_write_seqcount_latch(&root->seq);
148+
write_seqcount_latch_begin(&root->seq);
149149
__lt_insert(node, root, 0, ops->less);
150-
raw_write_seqcount_latch(&root->seq);
150+
write_seqcount_latch(&root->seq);
151151
__lt_insert(node, root, 1, ops->less);
152+
write_seqcount_latch_end(&root->seq);
152153
}
153154

154155
/**
@@ -159,7 +160,7 @@ latch_tree_insert(struct latch_tree_node *node,
159160
*
160161
* Removes @node from the trees @root in an ordered fashion such that we can
161162
* always observe one complete tree. See the comment for
162-
* raw_write_seqcount_latch().
163+
* write_seqcount_latch_begin().
163164
*
164165
* It is assumed that @node will observe one RCU quiescent state before being
165166
* reused of freed.
@@ -172,10 +173,11 @@ latch_tree_erase(struct latch_tree_node *node,
172173
struct latch_tree_root *root,
173174
const struct latch_tree_ops *ops)
174175
{
175-
raw_write_seqcount_latch(&root->seq);
176+
write_seqcount_latch_begin(&root->seq);
176177
__lt_erase(node, root, 0);
177-
raw_write_seqcount_latch(&root->seq);
178+
write_seqcount_latch(&root->seq);
178179
__lt_erase(node, root, 1);
180+
write_seqcount_latch_end(&root->seq);
179181
}
180182

181183
/**
@@ -204,9 +206,9 @@ latch_tree_find(void *key, struct latch_tree_root *root,
204206
unsigned int seq;
205207

206208
do {
207-
seq = raw_read_seqcount_latch(&root->seq);
209+
seq = read_seqcount_latch(&root->seq);
208210
node = __lt_find(key, root, seq & 1, ops->comp);
209-
} while (raw_read_seqcount_latch_retry(&root->seq, seq));
211+
} while (read_seqcount_latch_retry(&root->seq, seq));
210212

211213
return node;
212214
}

0 commit comments

Comments
 (0)