File tree Expand file tree Collapse file tree 7 files changed +11
-11
lines changed Expand file tree Collapse file tree 7 files changed +11
-11
lines changed Original file line number Diff line number Diff line change @@ -154,7 +154,7 @@ static inline void exit_tasks_rcu_finish(void) { }
154
154
*
155
155
* This macro resembles cond_resched(), except that it is defined to
156
156
* report potential quiescent states to RCU-tasks even if the cond_resched()
157
- * machinery were to be shut off, as some advocate for PREEMPT kernels.
157
+ * machinery were to be shut off, as some advocate for PREEMPTION kernels.
158
158
*/
159
159
#define cond_resched_tasks_rcu_qs () \
160
160
do { \
@@ -598,7 +598,7 @@ do { \
598
598
*
599
599
* You can avoid reading and understanding the next paragraph by
600
600
* following this rule: don't put anything in an rcu_read_lock() RCU
601
- * read-side critical section that would block in a !PREEMPT kernel.
601
+ * read-side critical section that would block in a !PREEMPTION kernel.
602
602
* But if you want the full story, read on!
603
603
*
604
604
* In non-preemptible RCU implementations (pure TREE_RCU and TINY_RCU),
Original file line number Diff line number Diff line change @@ -201,8 +201,8 @@ config RCU_NOCB_CPU
201
201
specified at boot time by the rcu_nocbs parameter. For each
202
202
such CPU, a kthread ("rcuox/N") will be created to invoke
203
203
callbacks, where the "N" is the CPU being offloaded, and where
204
- the "p" for RCU-preempt (PREEMPT kernels) and "s" for RCU-sched
205
- (!PREEMPT kernels). Nothing prevents this kthread from running
204
+ the "p" for RCU-preempt (PREEMPTION kernels) and "s" for RCU-sched
205
+ (!PREEMPTION kernels). Nothing prevents this kthread from running
206
206
on the specified CPUs, but (1) the kthreads may be preempted
207
207
between each callback, and (2) affinity or cgroups can be used
208
208
to force the kthreads to run on whatever set of CPUs is desired.
Original file line number Diff line number Diff line change @@ -1730,7 +1730,7 @@ static void rcu_torture_fwd_cb_cr(struct rcu_head *rhp)
1730
1730
// Give the scheduler a chance, even on nohz_full CPUs.
1731
1731
static void rcu_torture_fwd_prog_cond_resched (unsigned long iter )
1732
1732
{
1733
- if (IS_ENABLED (CONFIG_PREEMPT ) && IS_ENABLED (CONFIG_NO_HZ_FULL )) {
1733
+ if (IS_ENABLED (CONFIG_PREEMPTION ) && IS_ENABLED (CONFIG_NO_HZ_FULL )) {
1734
1734
// Real call_rcu() floods hit userspace, so emulate that.
1735
1735
if (need_resched () || (iter & 0xfff ))
1736
1736
schedule ();
Original file line number Diff line number Diff line change @@ -103,7 +103,7 @@ EXPORT_SYMBOL_GPL(__srcu_read_unlock);
103
103
104
104
/*
105
105
* Workqueue handler to drive one grace period and invoke any callbacks
106
- * that become ready as a result. Single-CPU and !PREEMPT operation
106
+ * that become ready as a result. Single-CPU and !PREEMPTION operation
107
107
* means that we get away with murder on synchronization. ;-)
108
108
*/
109
109
void srcu_drive_gp (struct work_struct * wp )
Original file line number Diff line number Diff line change @@ -2698,9 +2698,9 @@ EXPORT_SYMBOL_GPL(kfree_call_rcu);
2698
2698
2699
2699
/*
2700
2700
* During early boot, any blocking grace-period wait automatically
2701
- * implies a grace period. Later on, this is never the case for PREEMPT .
2701
+ * implies a grace period. Later on, this is never the case for PREEMPTION .
2702
2702
*
2703
- * Howevr, because a context switch is a grace period for !PREEMPT , any
2703
+ * Howevr, because a context switch is a grace period for !PREEMPTION , any
2704
2704
* blocking grace-period wait automatically implies a grace period if
2705
2705
* there is only one CPU online at any point time during execution of
2706
2706
* either synchronize_rcu() or synchronize_rcu_expedited(). It is OK to
Original file line number Diff line number Diff line change @@ -670,7 +670,7 @@ static void rcu_exp_handler(void *unused)
670
670
}
671
671
}
672
672
673
- /* PREEMPT =y, so no PREEMPT =n expedited grace period to clean up after. */
673
+ /* PREEMPTION =y, so no PREEMPTION =n expedited grace period to clean up after. */
674
674
static void sync_sched_exp_online_cleanup (int cpu )
675
675
{
676
676
}
Original file line number Diff line number Diff line change @@ -789,7 +789,7 @@ static void __init rcu_bootup_announce(void)
789
789
}
790
790
791
791
/*
792
- * Note a quiescent state for PREEMPT =n. Because we do not need to know
792
+ * Note a quiescent state for PREEMPTION =n. Because we do not need to know
793
793
* how many quiescent states passed, just if there was at least one since
794
794
* the start of the grace period, this just sets a flag. The caller must
795
795
* have disabled preemption.
@@ -839,7 +839,7 @@ void rcu_all_qs(void)
839
839
EXPORT_SYMBOL_GPL (rcu_all_qs );
840
840
841
841
/*
842
- * Note a PREEMPT =n context switch. The caller must have disabled interrupts.
842
+ * Note a PREEMPTION =n context switch. The caller must have disabled interrupts.
843
843
*/
844
844
void rcu_note_context_switch (bool preempt )
845
845
{
You can’t perform that action at this time.
0 commit comments