Skip to content

Commit 025e82b

Browse files
a-darwishPeter Zijlstra
authored andcommitted
timekeeping: Use sequence counter with associated raw spinlock
A sequence counter write side critical section must be protected by some form of locking to serialize writers. A plain seqcount_t does not contain the information of which lock must be held when entering a write side critical section. Use the new seqcount_raw_spinlock_t data type, which allows to associate a raw spinlock with the sequence counter. This enables lockdep to verify that the raw spinlock used for writer serialization is held when the write side critical section is entered. If lockdep is disabled this lock association is compiled out and has neither storage size nor runtime overhead. Signed-off-by: Ahmed S. Darwish <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Link: https://lkml.kernel.org/r/[email protected]
1 parent 77cc278 commit 025e82b

File tree

1 file changed

+11
-8
lines changed

1 file changed

+11
-8
lines changed

kernel/time/timekeeping.c

Lines changed: 11 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -39,18 +39,19 @@ enum timekeeping_adv_mode {
3939
TK_ADV_FREQ
4040
};
4141

42+
static DEFINE_RAW_SPINLOCK(timekeeper_lock);
43+
4244
/*
4345
* The most important data for readout fits into a single 64 byte
4446
* cache line.
4547
*/
4648
static struct {
47-
seqcount_t seq;
49+
seqcount_raw_spinlock_t seq;
4850
struct timekeeper timekeeper;
4951
} tk_core ____cacheline_aligned = {
50-
.seq = SEQCNT_ZERO(tk_core.seq),
52+
.seq = SEQCNT_RAW_SPINLOCK_ZERO(tk_core.seq, &timekeeper_lock),
5153
};
5254

53-
static DEFINE_RAW_SPINLOCK(timekeeper_lock);
5455
static struct timekeeper shadow_timekeeper;
5556

5657
/**
@@ -63,7 +64,7 @@ static struct timekeeper shadow_timekeeper;
6364
* See @update_fast_timekeeper() below.
6465
*/
6566
struct tk_fast {
66-
seqcount_t seq;
67+
seqcount_raw_spinlock_t seq;
6768
struct tk_read_base base[2];
6869
};
6970

@@ -80,11 +81,13 @@ static struct clocksource dummy_clock = {
8081
};
8182

8283
static struct tk_fast tk_fast_mono ____cacheline_aligned = {
84+
.seq = SEQCNT_RAW_SPINLOCK_ZERO(tk_fast_mono.seq, &timekeeper_lock),
8385
.base[0] = { .clock = &dummy_clock, },
8486
.base[1] = { .clock = &dummy_clock, },
8587
};
8688

8789
static struct tk_fast tk_fast_raw ____cacheline_aligned = {
90+
.seq = SEQCNT_RAW_SPINLOCK_ZERO(tk_fast_raw.seq, &timekeeper_lock),
8891
.base[0] = { .clock = &dummy_clock, },
8992
.base[1] = { .clock = &dummy_clock, },
9093
};
@@ -157,7 +160,7 @@ static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta)
157160
* tk_clock_read - atomic clocksource read() helper
158161
*
159162
* This helper is necessary to use in the read paths because, while the
160-
* seqlock ensures we don't return a bad value while structures are updated,
163+
* seqcount ensures we don't return a bad value while structures are updated,
161164
* it doesn't protect from potential crashes. There is the possibility that
162165
* the tkr's clocksource may change between the read reference, and the
163166
* clock reference passed to the read function. This can cause crashes if
@@ -222,10 +225,10 @@ static inline u64 timekeeping_get_delta(const struct tk_read_base *tkr)
222225
unsigned int seq;
223226

224227
/*
225-
* Since we're called holding a seqlock, the data may shift
228+
* Since we're called holding a seqcount, the data may shift
226229
* under us while we're doing the calculation. This can cause
227230
* false positives, since we'd note a problem but throw the
228-
* results away. So nest another seqlock here to atomically
231+
* results away. So nest another seqcount here to atomically
229232
* grab the points we are checking with.
230233
*/
231234
do {
@@ -486,7 +489,7 @@ EXPORT_SYMBOL_GPL(ktime_get_raw_fast_ns);
486489
*
487490
* To keep it NMI safe since we're accessing from tracing, we're not using a
488491
* separate timekeeper with updates to monotonic clock and boot offset
489-
* protected with seqlocks. This has the following minor side effects:
492+
* protected with seqcounts. This has the following minor side effects:
490493
*
491494
* (1) Its possible that a timestamp be taken after the boot offset is updated
492495
* but before the timekeeper is updated. If this happens, the new boot offset

0 commit comments

Comments
 (0)