Skip to content

Commit 90c73cb

Browse files
mchehabpaulmckrcu
authored andcommitted
docs: RCU: Convert rcuref.txt to ReST
- Add a SPDX header; - Adjust document title; - Some whitespace fixes and new line breaks; - Mark literal blocks as such; - Add it to RCU/index.rst. Signed-off-by: Mauro Carvalho Chehab <[email protected]> Signed-off-by: Paul E. McKenney <[email protected]>
1 parent 43cb545 commit 90c73cb

File tree

2 files changed

+104
-96
lines changed

2 files changed

+104
-96
lines changed

Documentation/RCU/index.rst

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@ RCU concepts
1818
whatisRCU
1919
rcu
2020
rculist_nulls
21+
rcuref
2122
torture
2223
listRCU
2324
NMI-RCU

Documentation/RCU/rcuref.txt renamed to Documentation/RCU/rcuref.rst

Lines changed: 103 additions & 96 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,8 @@
1-
Reference-count design for elements of lists/arrays protected by RCU.
1+
.. SPDX-License-Identifier: GPL-2.0
2+
3+
====================================================================
4+
Reference-count design for elements of lists/arrays protected by RCU
5+
====================================================================
26

37

48
Please note that the percpu-ref feature is likely your first
@@ -12,32 +16,33 @@ please read on.
1216
Reference counting on elements of lists which are protected by traditional
1317
reader/writer spinlocks or semaphores are straightforward:
1418

15-
CODE LISTING A:
16-
1. 2.
17-
add() search_and_reference()
18-
{ {
19-
alloc_object read_lock(&list_lock);
20-
... search_for_element
21-
atomic_set(&el->rc, 1); atomic_inc(&el->rc);
22-
write_lock(&list_lock); ...
23-
add_element read_unlock(&list_lock);
24-
... ...
25-
write_unlock(&list_lock); }
26-
}
27-
28-
3. 4.
29-
release_referenced() delete()
30-
{ {
31-
... write_lock(&list_lock);
32-
if(atomic_dec_and_test(&el->rc)) ...
33-
kfree(el);
34-
... remove_element
35-
} write_unlock(&list_lock);
36-
...
37-
if (atomic_dec_and_test(&el->rc))
38-
kfree(el);
39-
...
40-
}
19+
CODE LISTING A::
20+
21+
1. 2.
22+
add() search_and_reference()
23+
{ {
24+
alloc_object read_lock(&list_lock);
25+
... search_for_element
26+
atomic_set(&el->rc, 1); atomic_inc(&el->rc);
27+
write_lock(&list_lock); ...
28+
add_element read_unlock(&list_lock);
29+
... ...
30+
write_unlock(&list_lock); }
31+
}
32+
33+
3. 4.
34+
release_referenced() delete()
35+
{ {
36+
... write_lock(&list_lock);
37+
if(atomic_dec_and_test(&el->rc)) ...
38+
kfree(el);
39+
... remove_element
40+
} write_unlock(&list_lock);
41+
...
42+
if (atomic_dec_and_test(&el->rc))
43+
kfree(el);
44+
...
45+
}
4146

4247
If this list/array is made lock free using RCU as in changing the
4348
write_lock() in add() and delete() to spin_lock() and changing read_lock()
@@ -46,34 +51,35 @@ search_and_reference() could potentially hold reference to an element which
4651
has already been deleted from the list/array. Use atomic_inc_not_zero()
4752
in this scenario as follows:
4853

49-
CODE LISTING B:
50-
1. 2.
51-
add() search_and_reference()
52-
{ {
53-
alloc_object rcu_read_lock();
54-
... search_for_element
55-
atomic_set(&el->rc, 1); if (!atomic_inc_not_zero(&el->rc)) {
56-
spin_lock(&list_lock); rcu_read_unlock();
57-
return FAIL;
58-
add_element }
59-
... ...
60-
spin_unlock(&list_lock); rcu_read_unlock();
61-
} }
62-
3. 4.
63-
release_referenced() delete()
64-
{ {
65-
... spin_lock(&list_lock);
66-
if (atomic_dec_and_test(&el->rc)) ...
67-
call_rcu(&el->head, el_free); remove_element
68-
... spin_unlock(&list_lock);
69-
} ...
70-
if (atomic_dec_and_test(&el->rc))
71-
call_rcu(&el->head, el_free);
72-
...
73-
}
54+
CODE LISTING B::
55+
56+
1. 2.
57+
add() search_and_reference()
58+
{ {
59+
alloc_object rcu_read_lock();
60+
... search_for_element
61+
atomic_set(&el->rc, 1); if (!atomic_inc_not_zero(&el->rc)) {
62+
spin_lock(&list_lock); rcu_read_unlock();
63+
return FAIL;
64+
add_element }
65+
... ...
66+
spin_unlock(&list_lock); rcu_read_unlock();
67+
} }
68+
3. 4.
69+
release_referenced() delete()
70+
{ {
71+
... spin_lock(&list_lock);
72+
if (atomic_dec_and_test(&el->rc)) ...
73+
call_rcu(&el->head, el_free); remove_element
74+
... spin_unlock(&list_lock);
75+
} ...
76+
if (atomic_dec_and_test(&el->rc))
77+
call_rcu(&el->head, el_free);
78+
...
79+
}
7480

7581
Sometimes, a reference to the element needs to be obtained in the
76-
update (write) stream. In such cases, atomic_inc_not_zero() might be
82+
update (write) stream. In such cases, atomic_inc_not_zero() might be
7783
overkill, since we hold the update-side spinlock. One might instead
7884
use atomic_inc() in such cases.
7985

@@ -82,39 +88,40 @@ search_and_reference() code path. In such cases, the
8288
atomic_dec_and_test() may be moved from delete() to el_free()
8389
as follows:
8490

85-
CODE LISTING C:
86-
1. 2.
87-
add() search_and_reference()
88-
{ {
89-
alloc_object rcu_read_lock();
90-
... search_for_element
91-
atomic_set(&el->rc, 1); atomic_inc(&el->rc);
92-
spin_lock(&list_lock); ...
93-
94-
add_element rcu_read_unlock();
95-
... }
96-
spin_unlock(&list_lock); 4.
97-
} delete()
98-
3. {
99-
release_referenced() spin_lock(&list_lock);
100-
{ ...
101-
... remove_element
102-
if (atomic_dec_and_test(&el->rc)) spin_unlock(&list_lock);
103-
kfree(el); ...
104-
... call_rcu(&el->head, el_free);
105-
} ...
106-
5. }
107-
void el_free(struct rcu_head *rhp)
108-
{
109-
release_referenced();
110-
}
91+
CODE LISTING C::
92+
93+
1. 2.
94+
add() search_and_reference()
95+
{ {
96+
alloc_object rcu_read_lock();
97+
... search_for_element
98+
atomic_set(&el->rc, 1); atomic_inc(&el->rc);
99+
spin_lock(&list_lock); ...
100+
101+
add_element rcu_read_unlock();
102+
... }
103+
spin_unlock(&list_lock); 4.
104+
} delete()
105+
3. {
106+
release_referenced() spin_lock(&list_lock);
107+
{ ...
108+
... remove_element
109+
if (atomic_dec_and_test(&el->rc)) spin_unlock(&list_lock);
110+
kfree(el); ...
111+
... call_rcu(&el->head, el_free);
112+
} ...
113+
5. }
114+
void el_free(struct rcu_head *rhp)
115+
{
116+
release_referenced();
117+
}
111118

112119
The key point is that the initial reference added by add() is not removed
113120
until after a grace period has elapsed following removal. This means that
114121
search_and_reference() cannot find this element, which means that the value
115122
of el->rc cannot increase. Thus, once it reaches zero, there are no
116-
readers that can or ever will be able to reference the element. The
117-
element can therefore safely be freed. This in turn guarantees that if
123+
readers that can or ever will be able to reference the element. The
124+
element can therefore safely be freed. This in turn guarantees that if
118125
any reader finds the element, that reader may safely acquire a reference
119126
without checking the value of the reference counter.
120127

@@ -130,21 +137,21 @@ the eventual invocation of kfree(), which is usually not a problem on
130137
modern computer systems, even the small ones.
131138

132139
In cases where delete() can sleep, synchronize_rcu() can be called from
133-
delete(), so that el_free() can be subsumed into delete as follows:
134-
135-
4.
136-
delete()
137-
{
138-
spin_lock(&list_lock);
139-
...
140-
remove_element
141-
spin_unlock(&list_lock);
142-
...
143-
synchronize_rcu();
144-
if (atomic_dec_and_test(&el->rc))
145-
kfree(el);
146-
...
147-
}
140+
delete(), so that el_free() can be subsumed into delete as follows::
141+
142+
4.
143+
delete()
144+
{
145+
spin_lock(&list_lock);
146+
...
147+
remove_element
148+
spin_unlock(&list_lock);
149+
...
150+
synchronize_rcu();
151+
if (atomic_dec_and_test(&el->rc))
152+
kfree(el);
153+
...
154+
}
148155

149156
As additional examples in the kernel, the pattern in listing C is used by
150157
reference counting of struct pid, while the pattern in listing B is used by

0 commit comments

Comments
 (0)