Skip to content

Commit 859247d

Browse files
a-darwishPeter Zijlstra
authored andcommitted
seqlock: lockdep assert non-preemptibility on seqcount_t write
Preemption must be disabled before entering a sequence count write side critical section. Failing to do so, the seqcount read side can preempt the write side section and spin for the entire scheduler tick. If that reader belongs to a real-time scheduling class, it can spin forever and the kernel will livelock. Assert through lockdep that preemption is disabled for seqcount writers. Signed-off-by: Ahmed S. Darwish <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Link: https://lkml.kernel.org/r/[email protected]
1 parent 8fd8ad5 commit 859247d

File tree

1 file changed

+23
-6
lines changed

1 file changed

+23
-6
lines changed

include/linux/seqlock.h

Lines changed: 23 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -266,6 +266,12 @@ static inline void raw_write_seqcount_end(seqcount_t *s)
266266
kcsan_nestable_atomic_end();
267267
}
268268

269+
static inline void __write_seqcount_begin_nested(seqcount_t *s, int subclass)
270+
{
271+
raw_write_seqcount_begin(s);
272+
seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_);
273+
}
274+
269275
/**
270276
* write_seqcount_begin_nested() - start a seqcount_t write section with
271277
* custom lockdep nesting level
@@ -276,8 +282,19 @@ static inline void raw_write_seqcount_end(seqcount_t *s)
276282
*/
277283
static inline void write_seqcount_begin_nested(seqcount_t *s, int subclass)
278284
{
279-
raw_write_seqcount_begin(s);
280-
seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_);
285+
lockdep_assert_preemption_disabled();
286+
__write_seqcount_begin_nested(s, subclass);
287+
}
288+
289+
/*
290+
* A write_seqcount_begin() variant w/o lockdep non-preemptibility checks.
291+
*
292+
* Use for internal seqlock.h code where it's known that preemption is
293+
* already disabled. For example, seqlock_t write side functions.
294+
*/
295+
static inline void __write_seqcount_begin(seqcount_t *s)
296+
{
297+
__write_seqcount_begin_nested(s, 0);
281298
}
282299

283300
/**
@@ -575,7 +592,7 @@ static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
575592
static inline void write_seqlock(seqlock_t *sl)
576593
{
577594
spin_lock(&sl->lock);
578-
write_seqcount_begin(&sl->seqcount);
595+
__write_seqcount_begin(&sl->seqcount);
579596
}
580597

581598
/**
@@ -601,7 +618,7 @@ static inline void write_sequnlock(seqlock_t *sl)
601618
static inline void write_seqlock_bh(seqlock_t *sl)
602619
{
603620
spin_lock_bh(&sl->lock);
604-
write_seqcount_begin(&sl->seqcount);
621+
__write_seqcount_begin(&sl->seqcount);
605622
}
606623

607624
/**
@@ -628,7 +645,7 @@ static inline void write_sequnlock_bh(seqlock_t *sl)
628645
static inline void write_seqlock_irq(seqlock_t *sl)
629646
{
630647
spin_lock_irq(&sl->lock);
631-
write_seqcount_begin(&sl->seqcount);
648+
__write_seqcount_begin(&sl->seqcount);
632649
}
633650

634651
/**
@@ -649,7 +666,7 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
649666
unsigned long flags;
650667

651668
spin_lock_irqsave(&sl->lock, flags);
652-
write_seqcount_begin(&sl->seqcount);
669+
__write_seqcount_begin(&sl->seqcount);
653670
return flags;
654671
}
655672

0 commit comments

Comments
 (0)