Skip to content

Commit 253e1e9

Browse files
committed
Merge tag 'lkmm.2024.07.12a' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu
Pull memory model updates from Paul McKenney: "lkmm: Fix corner-case locking bug and improve documentation A simple but odd single-process litmus test acquires and immediately releases a lock, then calls spin_is_locked(). LKMM acts if it was a deadlock due to an assumption that spin_is_locked() will follow a spin_lock() or some other process's spin_unlock(). This litmus test manages to violate this assumption because the spin_is_locked() follows the same process's spin_unlock(). This series fixes this bug, reorganizes and optimizes the lock.cat model, and updates documentation" * tag 'lkmm.2024.07.12a' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu: tools/memory-model: Code reorganization in lock.cat tools/memory-model: Fix bug in lock.cat tools/memory-model: Add access-marking.txt to README tools/memory-model: Add KCSAN LF mentorship session citation
2 parents c4b729b + ea6ee1b commit 253e1e9

File tree

3 files changed

+49
-27
lines changed

3 files changed

+49
-27
lines changed

tools/memory-model/Documentation/README

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -47,6 +47,10 @@ DESCRIPTION OF FILES
4747
README
4848
This file.
4949

50+
access-marking.txt
51+
Guidelines for marking intentionally concurrent accesses to
52+
shared memory.
53+
5054
cheatsheet.txt
5155
Quick-reference guide to the Linux-kernel memory model.
5256

tools/memory-model/Documentation/access-marking.txt

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,8 @@ normal accesses to shared memory, that is "normal" as in accesses that do
66
not use read-modify-write atomic operations. It also describes how to
77
document these accesses, both with comments and with special assertions
88
processed by the Kernel Concurrency Sanitizer (KCSAN). This discussion
9-
builds on an earlier LWN article [1].
9+
builds on an earlier LWN article [1] and Linux Foundation mentorship
10+
session [2].
1011

1112

1213
ACCESS-MARKING OPTIONS
@@ -31,7 +32,7 @@ example:
3132
WRITE_ONCE(a, b + data_race(c + d) + READ_ONCE(e));
3233

3334
Neither plain C-language accesses nor data_race() (#1 and #2 above) place
34-
any sort of constraint on the compiler's choice of optimizations [2].
35+
any sort of constraint on the compiler's choice of optimizations [3].
3536
In contrast, READ_ONCE() and WRITE_ONCE() (#3 and #4 above) restrict the
3637
compiler's use of code-motion and common-subexpression optimizations.
3738
Therefore, if a given access is involved in an intentional data race,
@@ -594,5 +595,8 @@ REFERENCES
594595
[1] "Concurrency bugs should fear the big bad data-race detector (part 2)"
595596
https://lwn.net/Articles/816854/
596597

597-
[2] "Who's afraid of a big bad optimizing compiler?"
598+
[2] "The Kernel Concurrency Sanitizer"
599+
https://www.linuxfoundation.org/webinars/the-kernel-concurrency-sanitizer
600+
601+
[3] "Who's afraid of a big bad optimizing compiler?"
598602
https://lwn.net/Articles/793253/

tools/memory-model/lock.cat

Lines changed: 38 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -54,6 +54,12 @@ flag ~empty LKR \ domain(lk-rmw) as unpaired-LKR
5454
*)
5555
empty ([LKW] ; po-loc ; [LKR]) \ (po-loc ; [UL] ; po-loc) as lock-nest
5656

57+
(*
58+
* In the same way, spin_is_locked() inside a critical section must always
59+
* return True (no RU events can be in a critical section for the same lock).
60+
*)
61+
empty ([LKW] ; po-loc ; [RU]) \ (po-loc ; [UL] ; po-loc) as nested-is-locked
62+
5763
(* The final value of a spinlock should not be tested *)
5864
flag ~empty [FW] ; loc ; [ALL-LOCKS] as lock-final
5965

@@ -79,42 +85,50 @@ empty ([UNMATCHED-LKW] ; loc ; [UNMATCHED-LKW]) \ id as unmatched-locks
7985
(* rfi for LF events: link each LKW to the LF events in its critical section *)
8086
let rfi-lf = ([LKW] ; po-loc ; [LF]) \ ([LKW] ; po-loc ; [UL] ; po-loc)
8187

82-
(* rfe for LF events *)
88+
(* Utility macro to convert a single pair to a single-edge relation *)
89+
let pair-to-relation p = p ++ 0
90+
91+
(*
92+
* If a given LF event e is outside a critical section, it cannot read
93+
* internally but it may read from an LKW event in another thread.
94+
* Compute the relation containing these possible edges.
95+
*)
96+
let possible-rfe-noncrit-lf e = (LKW * {e}) & loc & ext
97+
98+
(* Compute set of sets of possible rfe edges for LF events *)
8399
let all-possible-rfe-lf =
84100
(*
85-
* Given an LF event r, compute the possible rfe edges for that event
86-
* (all those starting from LKW events in other threads),
87-
* and then convert that relation to a set of single-edge relations.
101+
* Convert the possible-rfe-noncrit-lf relation for e
102+
* to a set of single edges
88103
*)
89-
let possible-rfe-lf r =
90-
let pair-to-relation p = p ++ 0
91-
in map pair-to-relation ((LKW * {r}) & loc & ext)
92-
(* Do this for each LF event r that isn't in rfi-lf *)
93-
in map possible-rfe-lf (LF \ range(rfi-lf))
104+
let set-of-singleton-rfe-lf e =
105+
map pair-to-relation (possible-rfe-noncrit-lf e)
106+
(* Do this for each LF event e that isn't in rfi-lf *)
107+
in map set-of-singleton-rfe-lf (LF \ range(rfi-lf))
94108

95109
(* Generate all rf relations for LF events *)
96110
with rfe-lf from cross(all-possible-rfe-lf)
97111
let rf-lf = rfe-lf | rfi-lf
98112

99113
(*
100-
* RU, i.e., spin_is_locked() returning False, is slightly different.
101-
* We rely on the memory model to rule out cases where spin_is_locked()
102-
* within one of the lock's critical sections returns False.
114+
* A given RU event e may read internally from the last po-previous UL,
115+
* or it may read from a UL event in another thread or the initial write.
116+
* Compute the relation containing these possible edges.
103117
*)
104-
105-
(* rfi for RU events: an RU may read from the last po-previous UL *)
106-
let rfi-ru = ([UL] ; po-loc ; [RU]) \ ([UL] ; po-loc ; [LKW] ; po-loc)
107-
108-
(* rfe for RU events: an RU may read from an external UL or the initial write *)
109-
let all-possible-rfe-ru =
110-
let possible-rfe-ru r =
111-
let pair-to-relation p = p ++ 0
112-
in map pair-to-relation (((UL | IW) * {r}) & loc & ext)
113-
in map possible-rfe-ru RU
118+
let possible-rf-ru e = (((UL * {e}) & po-loc) \
119+
([UL] ; po-loc ; [UL] ; po-loc)) |
120+
(((UL | IW) * {e}) & loc & ext)
121+
122+
(* Compute set of sets of possible rf edges for RU events *)
123+
let all-possible-rf-ru =
124+
(* Convert the possible-rf-ru relation for e to a set of single edges *)
125+
let set-of-singleton-rf-ru e =
126+
map pair-to-relation (possible-rf-ru e)
127+
(* Do this for each RU event e *)
128+
in map set-of-singleton-rf-ru RU
114129

115130
(* Generate all rf relations for RU events *)
116-
with rfe-ru from cross(all-possible-rfe-ru)
117-
let rf-ru = rfe-ru | rfi-ru
131+
with rf-ru from cross(all-possible-rf-ru)
118132

119133
(* Final rf relation *)
120134
let rf = rf | rf-lf | rf-ru

0 commit comments

Comments
 (0)