Skip to content

Commit 93190bc

Browse files
melverPeter Zijlstra
authored andcommitted
seqlock, treewide: Switch to non-raw seqcount_latch interface
Switch all instrumentable users of the seqcount_latch interface over to the non-raw interface. Co-developed-by: "Peter Zijlstra (Intel)" <[email protected]> Signed-off-by: "Peter Zijlstra (Intel)" <[email protected]> Signed-off-by: Marco Elver <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent 5c1806c commit 93190bc

File tree

5 files changed

+33
-25
lines changed

5 files changed

+33
-25
lines changed

arch/x86/kernel/tsc.c

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -174,10 +174,11 @@ static void __set_cyc2ns_scale(unsigned long khz, int cpu, unsigned long long ts
174174

175175
c2n = per_cpu_ptr(&cyc2ns, cpu);
176176

177-
raw_write_seqcount_latch(&c2n->seq);
177+
write_seqcount_latch_begin(&c2n->seq);
178178
c2n->data[0] = data;
179-
raw_write_seqcount_latch(&c2n->seq);
179+
write_seqcount_latch(&c2n->seq);
180180
c2n->data[1] = data;
181+
write_seqcount_latch_end(&c2n->seq);
181182
}
182183

183184
static void set_cyc2ns_scale(unsigned long khz, int cpu, unsigned long long tsc_now)

include/linux/rbtree_latch.h

Lines changed: 11 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@
1414
*
1515
* If we need to allow unconditional lookups (say as required for NMI context
1616
* usage) we need a more complex setup; this data structure provides this by
17-
* employing the latch technique -- see @raw_write_seqcount_latch -- to
17+
* employing the latch technique -- see @write_seqcount_latch_begin -- to
1818
* implement a latched RB-tree which does allow for unconditional lookups by
1919
* virtue of always having (at least) one stable copy of the tree.
2020
*
@@ -132,7 +132,7 @@ __lt_find(void *key, struct latch_tree_root *ltr, int idx,
132132
* @ops: operators defining the node order
133133
*
134134
* It inserts @node into @root in an ordered fashion such that we can always
135-
* observe one complete tree. See the comment for raw_write_seqcount_latch().
135+
* observe one complete tree. See the comment for write_seqcount_latch_begin().
136136
*
137137
* The inserts use rcu_assign_pointer() to publish the element such that the
138138
* tree structure is stored before we can observe the new @node.
@@ -145,10 +145,11 @@ latch_tree_insert(struct latch_tree_node *node,
145145
struct latch_tree_root *root,
146146
const struct latch_tree_ops *ops)
147147
{
148-
raw_write_seqcount_latch(&root->seq);
148+
write_seqcount_latch_begin(&root->seq);
149149
__lt_insert(node, root, 0, ops->less);
150-
raw_write_seqcount_latch(&root->seq);
150+
write_seqcount_latch(&root->seq);
151151
__lt_insert(node, root, 1, ops->less);
152+
write_seqcount_latch_end(&root->seq);
152153
}
153154

154155
/**
@@ -159,7 +160,7 @@ latch_tree_insert(struct latch_tree_node *node,
159160
*
160161
* Removes @node from the trees @root in an ordered fashion such that we can
161162
* always observe one complete tree. See the comment for
162-
* raw_write_seqcount_latch().
163+
* write_seqcount_latch_begin().
163164
*
164165
* It is assumed that @node will observe one RCU quiescent state before being
165166
* reused of freed.
@@ -172,10 +173,11 @@ latch_tree_erase(struct latch_tree_node *node,
172173
struct latch_tree_root *root,
173174
const struct latch_tree_ops *ops)
174175
{
175-
raw_write_seqcount_latch(&root->seq);
176+
write_seqcount_latch_begin(&root->seq);
176177
__lt_erase(node, root, 0);
177-
raw_write_seqcount_latch(&root->seq);
178+
write_seqcount_latch(&root->seq);
178179
__lt_erase(node, root, 1);
180+
write_seqcount_latch_end(&root->seq);
179181
}
180182

181183
/**
@@ -204,9 +206,9 @@ latch_tree_find(void *key, struct latch_tree_root *root,
204206
unsigned int seq;
205207

206208
do {
207-
seq = raw_read_seqcount_latch(&root->seq);
209+
seq = read_seqcount_latch(&root->seq);
208210
node = __lt_find(key, root, seq & 1, ops->comp);
209-
} while (raw_read_seqcount_latch_retry(&root->seq, seq));
211+
} while (read_seqcount_latch_retry(&root->seq, seq));
210212

211213
return node;
212214
}

kernel/printk/printk.c

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -560,10 +560,11 @@ bool printk_percpu_data_ready(void)
560560
/* Must be called under syslog_lock. */
561561
static void latched_seq_write(struct latched_seq *ls, u64 val)
562562
{
563-
raw_write_seqcount_latch(&ls->latch);
563+
write_seqcount_latch_begin(&ls->latch);
564564
ls->val[0] = val;
565-
raw_write_seqcount_latch(&ls->latch);
565+
write_seqcount_latch(&ls->latch);
566566
ls->val[1] = val;
567+
write_seqcount_latch_end(&ls->latch);
567568
}
568569

569570
/* Can be called from any context. */
@@ -574,10 +575,10 @@ static u64 latched_seq_read_nolock(struct latched_seq *ls)
574575
u64 val;
575576

576577
do {
577-
seq = raw_read_seqcount_latch(&ls->latch);
578+
seq = read_seqcount_latch(&ls->latch);
578579
idx = seq & 0x1;
579580
val = ls->val[idx];
580-
} while (raw_read_seqcount_latch_retry(&ls->latch, seq));
581+
} while (read_seqcount_latch_retry(&ls->latch, seq));
581582

582583
return val;
583584
}

kernel/time/sched_clock.c

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -71,13 +71,13 @@ static __always_inline u64 cyc_to_ns(u64 cyc, u32 mult, u32 shift)
7171

7272
notrace struct clock_read_data *sched_clock_read_begin(unsigned int *seq)
7373
{
74-
*seq = raw_read_seqcount_latch(&cd.seq);
74+
*seq = read_seqcount_latch(&cd.seq);
7575
return cd.read_data + (*seq & 1);
7676
}
7777

7878
notrace int sched_clock_read_retry(unsigned int seq)
7979
{
80-
return raw_read_seqcount_latch_retry(&cd.seq, seq);
80+
return read_seqcount_latch_retry(&cd.seq, seq);
8181
}
8282

8383
static __always_inline unsigned long long __sched_clock(void)
@@ -132,16 +132,18 @@ unsigned long long notrace sched_clock(void)
132132
static void update_clock_read_data(struct clock_read_data *rd)
133133
{
134134
/* steer readers towards the odd copy */
135-
raw_write_seqcount_latch(&cd.seq);
135+
write_seqcount_latch_begin(&cd.seq);
136136

137137
/* now its safe for us to update the normal (even) copy */
138138
cd.read_data[0] = *rd;
139139

140140
/* switch readers back to the even copy */
141-
raw_write_seqcount_latch(&cd.seq);
141+
write_seqcount_latch(&cd.seq);
142142

143143
/* update the backup (odd) copy with the new data */
144144
cd.read_data[1] = *rd;
145+
146+
write_seqcount_latch_end(&cd.seq);
145147
}
146148

147149
/*
@@ -279,7 +281,7 @@ void __init generic_sched_clock_init(void)
279281
*/
280282
static u64 notrace suspended_sched_clock_read(void)
281283
{
282-
unsigned int seq = raw_read_seqcount_latch(&cd.seq);
284+
unsigned int seq = read_seqcount_latch(&cd.seq);
283285

284286
return cd.read_data[seq & 1].epoch_cyc;
285287
}

kernel/time/timekeeping.c

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -411,7 +411,7 @@ static inline u64 timekeeping_get_ns(const struct tk_read_base *tkr)
411411
* We want to use this from any context including NMI and tracing /
412412
* instrumenting the timekeeping code itself.
413413
*
414-
* Employ the latch technique; see @raw_write_seqcount_latch.
414+
* Employ the latch technique; see @write_seqcount_latch.
415415
*
416416
* So if a NMI hits the update of base[0] then it will use base[1]
417417
* which is still consistent. In the worst case this can result is a
@@ -424,16 +424,18 @@ static void update_fast_timekeeper(const struct tk_read_base *tkr,
424424
struct tk_read_base *base = tkf->base;
425425

426426
/* Force readers off to base[1] */
427-
raw_write_seqcount_latch(&tkf->seq);
427+
write_seqcount_latch_begin(&tkf->seq);
428428

429429
/* Update base[0] */
430430
memcpy(base, tkr, sizeof(*base));
431431

432432
/* Force readers back to base[0] */
433-
raw_write_seqcount_latch(&tkf->seq);
433+
write_seqcount_latch(&tkf->seq);
434434

435435
/* Update base[1] */
436436
memcpy(base + 1, base, sizeof(*base));
437+
438+
write_seqcount_latch_end(&tkf->seq);
437439
}
438440

439441
static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf)
@@ -443,11 +445,11 @@ static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf)
443445
u64 now;
444446

445447
do {
446-
seq = raw_read_seqcount_latch(&tkf->seq);
448+
seq = read_seqcount_latch(&tkf->seq);
447449
tkr = tkf->base + (seq & 0x01);
448450
now = ktime_to_ns(tkr->base);
449451
now += __timekeeping_get_ns(tkr);
450-
} while (raw_read_seqcount_latch_retry(&tkf->seq, seq));
452+
} while (read_seqcount_latch_retry(&tkf->seq, seq));
451453

452454
return now;
453455
}

0 commit comments

Comments
 (0)