Skip to content

Commit f4a27cb

Browse files
a-darwishPeter Zijlstra
authored andcommitted
seqlock: Reorder seqcount_t and seqlock_t API definitions
The seqlock.h seqcount_t and seqlock_t API definitions are presented in the chronological order of their development rather than the order that makes most sense to readers. This makes it hard to follow and understand the header file code. Group and reorder all of the exported seqlock.h functions according to their function. First, group together the seqcount_t standard read path functions: - __read_seqcount_begin() - raw_read_seqcount_begin() - read_seqcount_begin() since each function is implemented exactly in terms of the one above it. Then, group the special-case seqcount_t readers on their own as: - raw_read_seqcount() - raw_seqcount_begin() since the only difference between the two functions is that the second one masks the sequence counter LSB while the first one does not. Note that raw_seqcount_begin() can actually be implemented in terms of raw_read_seqcount(), which will be done in a follow-up commit. Then, group the seqcount_t write path functions, instead of injecting unrelated seqcount_t latch functions between them, and order them as: - raw_write_seqcount_begin() - raw_write_seqcount_end() - write_seqcount_begin_nested() - write_seqcount_begin() - write_seqcount_end() - raw_write_seqcount_barrier() - write_seqcount_invalidate() which is the expected natural order. This also isolates the seqcount_t latch functions into their own area, at the end of the sequence counters section, and before jumping to the next one: sequential locks (seqlock_t). Do a similar grouping and reordering for seqlock_t "locking" readers vs. the "conditionally locking or lockless" ones. No implementation code was changed in any of the reordering above. Signed-off-by: Ahmed S. Darwish <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Link: https://lkml.kernel.org/r/[email protected]
1 parent d3b35b8 commit f4a27cb

File tree

1 file changed

+78
-80
lines changed

1 file changed

+78
-80
lines changed

include/linux/seqlock.h

Lines changed: 78 additions & 80 deletions
Original file line numberDiff line numberDiff line change
@@ -128,23 +128,6 @@ static inline unsigned __read_seqcount_begin(const seqcount_t *s)
128128
return ret;
129129
}
130130

131-
/**
132-
* raw_read_seqcount - Read the raw seqcount
133-
* @s: pointer to seqcount_t
134-
* Returns: count to be passed to read_seqcount_retry
135-
*
136-
* raw_read_seqcount opens a read critical section of the given
137-
* seqcount without any lockdep checking and without checking or
138-
* masking the LSB. Calling code is responsible for handling that.
139-
*/
140-
static inline unsigned raw_read_seqcount(const seqcount_t *s)
141-
{
142-
unsigned ret = READ_ONCE(s->sequence);
143-
smp_rmb();
144-
kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX);
145-
return ret;
146-
}
147-
148131
/**
149132
* raw_read_seqcount_begin - start seq-read critical section w/o lockdep
150133
* @s: pointer to seqcount_t
@@ -176,6 +159,23 @@ static inline unsigned read_seqcount_begin(const seqcount_t *s)
176159
return raw_read_seqcount_begin(s);
177160
}
178161

162+
/**
163+
* raw_read_seqcount - Read the raw seqcount
164+
* @s: pointer to seqcount_t
165+
* Returns: count to be passed to read_seqcount_retry
166+
*
167+
* raw_read_seqcount opens a read critical section of the given
168+
* seqcount without any lockdep checking and without checking or
169+
* masking the LSB. Calling code is responsible for handling that.
170+
*/
171+
static inline unsigned raw_read_seqcount(const seqcount_t *s)
172+
{
173+
unsigned ret = READ_ONCE(s->sequence);
174+
smp_rmb();
175+
kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX);
176+
return ret;
177+
}
178+
179179
/**
180180
* raw_seqcount_begin - begin a seq-read critical section
181181
* @s: pointer to seqcount_t
@@ -234,8 +234,6 @@ static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
234234
return __read_seqcount_retry(s, start);
235235
}
236236

237-
238-
239237
static inline void raw_write_seqcount_begin(seqcount_t *s)
240238
{
241239
kcsan_nestable_atomic_begin();
@@ -250,6 +248,23 @@ static inline void raw_write_seqcount_end(seqcount_t *s)
250248
kcsan_nestable_atomic_end();
251249
}
252250

251+
static inline void write_seqcount_begin_nested(seqcount_t *s, int subclass)
252+
{
253+
raw_write_seqcount_begin(s);
254+
seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_);
255+
}
256+
257+
static inline void write_seqcount_begin(seqcount_t *s)
258+
{
259+
write_seqcount_begin_nested(s, 0);
260+
}
261+
262+
static inline void write_seqcount_end(seqcount_t *s)
263+
{
264+
seqcount_release(&s->dep_map, _RET_IP_);
265+
raw_write_seqcount_end(s);
266+
}
267+
253268
/**
254269
* raw_write_seqcount_barrier - do a seq write barrier
255270
* @s: pointer to seqcount_t
@@ -300,6 +315,21 @@ static inline void raw_write_seqcount_barrier(seqcount_t *s)
300315
kcsan_nestable_atomic_end();
301316
}
302317

318+
/**
319+
* write_seqcount_invalidate - invalidate in-progress read-side seq operations
320+
* @s: pointer to seqcount_t
321+
*
322+
* After write_seqcount_invalidate, no read-side seq operations will complete
323+
* successfully and see data older than this.
324+
*/
325+
static inline void write_seqcount_invalidate(seqcount_t *s)
326+
{
327+
smp_wmb();
328+
kcsan_nestable_atomic_begin();
329+
s->sequence+=2;
330+
kcsan_nestable_atomic_end();
331+
}
332+
303333
static inline int raw_read_seqcount_latch(seqcount_t *s)
304334
{
305335
/* Pairs with the first smp_wmb() in raw_write_seqcount_latch() */
@@ -395,38 +425,6 @@ static inline void raw_write_seqcount_latch(seqcount_t *s)
395425
smp_wmb(); /* increment "sequence" before following stores */
396426
}
397427

398-
static inline void write_seqcount_begin_nested(seqcount_t *s, int subclass)
399-
{
400-
raw_write_seqcount_begin(s);
401-
seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_);
402-
}
403-
404-
static inline void write_seqcount_begin(seqcount_t *s)
405-
{
406-
write_seqcount_begin_nested(s, 0);
407-
}
408-
409-
static inline void write_seqcount_end(seqcount_t *s)
410-
{
411-
seqcount_release(&s->dep_map, _RET_IP_);
412-
raw_write_seqcount_end(s);
413-
}
414-
415-
/**
416-
* write_seqcount_invalidate - invalidate in-progress read-side seq operations
417-
* @s: pointer to seqcount_t
418-
*
419-
* After write_seqcount_invalidate, no read-side seq operations will complete
420-
* successfully and see data older than this.
421-
*/
422-
static inline void write_seqcount_invalidate(seqcount_t *s)
423-
{
424-
smp_wmb();
425-
kcsan_nestable_atomic_begin();
426-
s->sequence+=2;
427-
kcsan_nestable_atomic_end();
428-
}
429-
430428
/*
431429
* Sequential locks (seqlock_t)
432430
*
@@ -555,35 +553,6 @@ static inline void read_sequnlock_excl(seqlock_t *sl)
555553
spin_unlock(&sl->lock);
556554
}
557555

558-
/**
559-
* read_seqbegin_or_lock - begin a sequence number check or locking block
560-
* @lock: sequence lock
561-
* @seq : sequence number to be checked
562-
*
563-
* First try it once optimistically without taking the lock. If that fails,
564-
* take the lock. The sequence number is also used as a marker for deciding
565-
* whether to be a reader (even) or writer (odd).
566-
* N.B. seq must be initialized to an even number to begin with.
567-
*/
568-
static inline void read_seqbegin_or_lock(seqlock_t *lock, int *seq)
569-
{
570-
if (!(*seq & 1)) /* Even */
571-
*seq = read_seqbegin(lock);
572-
else /* Odd */
573-
read_seqlock_excl(lock);
574-
}
575-
576-
static inline int need_seqretry(seqlock_t *lock, int seq)
577-
{
578-
return !(seq & 1) && read_seqretry(lock, seq);
579-
}
580-
581-
static inline void done_seqretry(seqlock_t *lock, int seq)
582-
{
583-
if (seq & 1)
584-
read_sequnlock_excl(lock);
585-
}
586-
587556
static inline void read_seqlock_excl_bh(seqlock_t *sl)
588557
{
589558
spin_lock_bh(&sl->lock);
@@ -621,6 +590,35 @@ read_sequnlock_excl_irqrestore(seqlock_t *sl, unsigned long flags)
621590
spin_unlock_irqrestore(&sl->lock, flags);
622591
}
623592

593+
/**
594+
* read_seqbegin_or_lock - begin a sequence number check or locking block
595+
* @lock: sequence lock
596+
* @seq : sequence number to be checked
597+
*
598+
* First try it once optimistically without taking the lock. If that fails,
599+
* take the lock. The sequence number is also used as a marker for deciding
600+
* whether to be a reader (even) or writer (odd).
601+
* N.B. seq must be initialized to an even number to begin with.
602+
*/
603+
static inline void read_seqbegin_or_lock(seqlock_t *lock, int *seq)
604+
{
605+
if (!(*seq & 1)) /* Even */
606+
*seq = read_seqbegin(lock);
607+
else /* Odd */
608+
read_seqlock_excl(lock);
609+
}
610+
611+
static inline int need_seqretry(seqlock_t *lock, int seq)
612+
{
613+
return !(seq & 1) && read_seqretry(lock, seq);
614+
}
615+
616+
static inline void done_seqretry(seqlock_t *lock, int seq)
617+
{
618+
if (seq & 1)
619+
read_sequnlock_excl(lock);
620+
}
621+
624622
static inline unsigned long
625623
read_seqbegin_or_lock_irqsave(seqlock_t *lock, int *seq)
626624
{

0 commit comments

Comments
 (0)