@@ -21,7 +21,7 @@ static void rcu_exp_gp_seq_start(void)
21
21
}
22
22
23
23
/*
24
- * Return then value that expedited-grace-period counter will have
24
+ * Return the value that the expedited-grace-period counter will have
25
25
* at the end of the current grace period.
26
26
*/
27
27
static __maybe_unused unsigned long rcu_exp_gp_seq_endval (void )
@@ -39,7 +39,9 @@ static void rcu_exp_gp_seq_end(void)
39
39
}
40
40
41
41
/*
42
- * Take a snapshot of the expedited-grace-period counter.
42
+ * Take a snapshot of the expedited-grace-period counter, which is the
43
+ * earliest value that will indicate that a full grace period has
44
+ * elapsed since the current time.
43
45
*/
44
46
static unsigned long rcu_exp_gp_seq_snap (void )
45
47
{
@@ -143,22 +145,18 @@ static void __maybe_unused sync_exp_reset_tree(void)
143
145
* Return non-zero if there is no RCU expedited grace period in progress
144
146
* for the specified rcu_node structure, in other words, if all CPUs and
145
147
* tasks covered by the specified rcu_node structure have done their bit
146
- * for the current expedited grace period. Works only for preemptible
147
- * RCU -- other RCU implementation use other means.
148
- *
149
- * Caller must hold the specificed rcu_node structure's ->lock
148
+ * for the current expedited grace period.
150
149
*/
151
150
static bool sync_rcu_exp_done (struct rcu_node * rnp )
152
151
{
153
152
raw_lockdep_assert_held_rcu_node (rnp );
154
-
155
153
return rnp -> exp_tasks == NULL &&
156
154
READ_ONCE (rnp -> expmask ) == 0 ;
157
155
}
158
156
159
157
/*
160
- * Like sync_rcu_exp_done(), but this function assumes the caller doesn't
161
- * hold the rcu_node's ->lock, and will acquire and release the lock itself
158
+ * Like sync_rcu_exp_done(), but where the caller does not hold the
159
+ * rcu_node's ->lock.
162
160
*/
163
161
static bool sync_rcu_exp_done_unlocked (struct rcu_node * rnp )
164
162
{
@@ -180,15 +178,14 @@ static bool sync_rcu_exp_done_unlocked(struct rcu_node *rnp)
180
178
* which the task was queued or to one of that rcu_node structure's ancestors,
181
179
* recursively up the tree. (Calm down, calm down, we do the recursion
182
180
* iteratively!)
183
- *
184
- * Caller must hold the specified rcu_node structure's ->lock.
185
181
*/
186
182
static void __rcu_report_exp_rnp (struct rcu_node * rnp ,
187
183
bool wake , unsigned long flags )
188
184
__releases (rnp - > lock )
189
185
{
190
186
unsigned long mask ;
191
187
188
+ raw_lockdep_assert_held_rcu_node (rnp );
192
189
for (;;) {
193
190
if (!sync_rcu_exp_done (rnp )) {
194
191
if (!rnp -> expmask )
@@ -452,6 +449,10 @@ static void sync_rcu_exp_select_cpus(void)
452
449
flush_work (& rnp - > rew .rew_work );
453
450
}
454
451
452
+ /*
453
+ * Wait for the expedited grace period to elapse, issuing any needed
454
+ * RCU CPU stall warnings along the way.
455
+ */
455
456
static void synchronize_sched_expedited_wait (void )
456
457
{
457
458
int cpu ;
@@ -781,7 +782,7 @@ static int rcu_print_task_exp_stall(struct rcu_node *rnp)
781
782
* implementations, it is still unfriendly to real-time workloads, so is
782
783
* thus not recommended for any sort of common-case code. In fact, if
783
784
* you are using synchronize_rcu_expedited() in a loop, please restructure
784
- * your code to batch your updates, and then Use a single synchronize_rcu()
785
+ * your code to batch your updates, and then use a single synchronize_rcu()
785
786
* instead.
786
787
*
787
788
* This has the same semantics as (but is more brutal than) synchronize_rcu().
0 commit comments