Skip to content

Commit 5a04848

Browse files
committed
rcu: Consolidate initialization and CPU-hotplug code
This commit consolidates the initialization and CPU-hotplug code at the end of kernel/rcu/tree.c. This is strictly a code-motion commit. No functionality has changed. Signed-off-by: Paul E. McKenney <[email protected]>
1 parent c004d23 commit 5a04848

File tree

1 file changed

+158
-156
lines changed

1 file changed

+158
-156
lines changed

kernel/rcu/tree.c

Lines changed: 158 additions & 156 deletions
Original file line numberDiff line numberDiff line change
@@ -144,14 +144,16 @@ static int rcu_scheduler_fully_active __read_mostly;
144144

145145
static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp,
146146
unsigned long gps, unsigned long flags);
147-
static void rcu_init_new_rnp(struct rcu_node *rnp_leaf);
148-
static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf);
149147
static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
150148
static void invoke_rcu_core(void);
151149
static void rcu_report_exp_rdp(struct rcu_data *rdp);
152150
static void sync_sched_exp_online_cleanup(int cpu);
153151
static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp);
154152
static bool rcu_rdp_is_offloaded(struct rcu_data *rdp);
153+
static bool rcu_rdp_cpu_online(struct rcu_data *rdp);
154+
static bool rcu_init_invoked(void);
155+
static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf);
156+
static void rcu_init_new_rnp(struct rcu_node *rnp_leaf);
155157

156158
/*
157159
* rcuc/rcub/rcuop kthread realtime priority. The "rcuop"
@@ -214,27 +216,6 @@ EXPORT_SYMBOL_GPL(rcu_get_gp_kthreads_prio);
214216
*/
215217
#define PER_RCU_NODE_PERIOD 3 /* Number of grace periods between delays for debugging. */
216218

217-
/*
218-
* Compute the mask of online CPUs for the specified rcu_node structure.
219-
* This will not be stable unless the rcu_node structure's ->lock is
220-
* held, but the bit corresponding to the current CPU will be stable
221-
* in most contexts.
222-
*/
223-
static unsigned long rcu_rnp_online_cpus(struct rcu_node *rnp)
224-
{
225-
return READ_ONCE(rnp->qsmaskinitnext);
226-
}
227-
228-
/*
229-
* Is the CPU corresponding to the specified rcu_data structure online
230-
* from RCU's perspective? This perspective is given by that structure's
231-
* ->qsmaskinitnext field rather than by the global cpu_online_mask.
232-
*/
233-
static bool rcu_rdp_cpu_online(struct rcu_data *rdp)
234-
{
235-
return !!(rdp->grpmask & rcu_rnp_online_cpus(rdp->mynode));
236-
}
237-
238219
/*
239220
* Return true if an RCU grace period is in progress. The READ_ONCE()s
240221
* permit this function to be invoked without holding the root rcu_node
@@ -734,46 +715,6 @@ void rcu_request_urgent_qs_task(struct task_struct *t)
734715
smp_store_release(per_cpu_ptr(&rcu_data.rcu_urgent_qs, cpu), true);
735716
}
736717

737-
#if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU)
738-
739-
/*
740-
* Is the current CPU online as far as RCU is concerned?
741-
*
742-
* Disable preemption to avoid false positives that could otherwise
743-
* happen due to the current CPU number being sampled, this task being
744-
* preempted, its old CPU being taken offline, resuming on some other CPU,
745-
* then determining that its old CPU is now offline.
746-
*
747-
* Disable checking if in an NMI handler because we cannot safely
748-
* report errors from NMI handlers anyway. In addition, it is OK to use
749-
* RCU on an offline processor during initial boot, hence the check for
750-
* rcu_scheduler_fully_active.
751-
*/
752-
bool rcu_lockdep_current_cpu_online(void)
753-
{
754-
struct rcu_data *rdp;
755-
bool ret = false;
756-
757-
if (in_nmi() || !rcu_scheduler_fully_active)
758-
return true;
759-
preempt_disable_notrace();
760-
rdp = this_cpu_ptr(&rcu_data);
761-
/*
762-
* Strictly, we care here about the case where the current CPU is
763-
* in rcu_cpu_starting() and thus has an excuse for rdp->grpmask
764-
* not being up to date. So arch_spin_is_locked() might have a
765-
* false positive if it's held by some *other* CPU, but that's
766-
* OK because that just means a false *negative* on the warning.
767-
*/
768-
if (rcu_rdp_cpu_online(rdp) || arch_spin_is_locked(&rcu_state.ofl_lock))
769-
ret = true;
770-
preempt_enable_notrace();
771-
return ret;
772-
}
773-
EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online);
774-
775-
#endif /* #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) */
776-
777718
/*
778719
* When trying to report a quiescent state on behalf of some other CPU,
779720
* it is our responsibility to check for and handle potential overflow
@@ -1350,13 +1291,6 @@ static void rcu_strict_gp_boundary(void *unused)
13501291
invoke_rcu_core();
13511292
}
13521293

1353-
// Has rcu_init() been invoked? This is used (for example) to determine
1354-
// whether spinlocks may be acquired safely.
1355-
static bool rcu_init_invoked(void)
1356-
{
1357-
return !!rcu_state.n_online_cpus;
1358-
}
1359-
13601294
// Make the polled API aware of the beginning of a grace period.
13611295
static void rcu_poll_gp_seq_start(unsigned long *snap)
13621296
{
@@ -2091,92 +2025,6 @@ rcu_check_quiescent_state(struct rcu_data *rdp)
20912025
rcu_report_qs_rdp(rdp);
20922026
}
20932027

2094-
/*
2095-
* Near the end of the offline process. Trace the fact that this CPU
2096-
* is going offline.
2097-
*/
2098-
int rcutree_dying_cpu(unsigned int cpu)
2099-
{
2100-
bool blkd;
2101-
struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
2102-
struct rcu_node *rnp = rdp->mynode;
2103-
2104-
if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
2105-
return 0;
2106-
2107-
blkd = !!(READ_ONCE(rnp->qsmask) & rdp->grpmask);
2108-
trace_rcu_grace_period(rcu_state.name, READ_ONCE(rnp->gp_seq),
2109-
blkd ? TPS("cpuofl-bgp") : TPS("cpuofl"));
2110-
return 0;
2111-
}
2112-
2113-
/*
2114-
* All CPUs for the specified rcu_node structure have gone offline,
2115-
* and all tasks that were preempted within an RCU read-side critical
2116-
* section while running on one of those CPUs have since exited their RCU
2117-
* read-side critical section. Some other CPU is reporting this fact with
2118-
* the specified rcu_node structure's ->lock held and interrupts disabled.
2119-
* This function therefore goes up the tree of rcu_node structures,
2120-
* clearing the corresponding bits in the ->qsmaskinit fields. Note that
2121-
* the leaf rcu_node structure's ->qsmaskinit field has already been
2122-
* updated.
2123-
*
2124-
* This function does check that the specified rcu_node structure has
2125-
* all CPUs offline and no blocked tasks, so it is OK to invoke it
2126-
* prematurely. That said, invoking it after the fact will cost you
2127-
* a needless lock acquisition. So once it has done its work, don't
2128-
* invoke it again.
2129-
*/
2130-
static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
2131-
{
2132-
long mask;
2133-
struct rcu_node *rnp = rnp_leaf;
2134-
2135-
raw_lockdep_assert_held_rcu_node(rnp_leaf);
2136-
if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) ||
2137-
WARN_ON_ONCE(rnp_leaf->qsmaskinit) ||
2138-
WARN_ON_ONCE(rcu_preempt_has_tasks(rnp_leaf)))
2139-
return;
2140-
for (;;) {
2141-
mask = rnp->grpmask;
2142-
rnp = rnp->parent;
2143-
if (!rnp)
2144-
break;
2145-
raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
2146-
rnp->qsmaskinit &= ~mask;
2147-
/* Between grace periods, so better already be zero! */
2148-
WARN_ON_ONCE(rnp->qsmask);
2149-
if (rnp->qsmaskinit) {
2150-
raw_spin_unlock_rcu_node(rnp);
2151-
/* irqs remain disabled. */
2152-
return;
2153-
}
2154-
raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
2155-
}
2156-
}
2157-
2158-
/*
2159-
* The CPU has been completely removed, and some other CPU is reporting
2160-
* this fact from process context. Do the remainder of the cleanup.
2161-
* There can only be one CPU hotplug operation at a time, so no need for
2162-
* explicit locking.
2163-
*/
2164-
int rcutree_dead_cpu(unsigned int cpu)
2165-
{
2166-
struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
2167-
struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */
2168-
2169-
if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
2170-
return 0;
2171-
2172-
WRITE_ONCE(rcu_state.n_online_cpus, rcu_state.n_online_cpus - 1);
2173-
/* Adjust any no-longer-needed kthreads. */
2174-
rcu_boost_kthread_setaffinity(rnp, -1);
2175-
// Stop-machine done, so allow nohz_full to disable tick.
2176-
tick_dep_clear(TICK_DEP_BIT_RCU);
2177-
return 0;
2178-
}
2179-
21802028
/*
21812029
* Invoke any RCU callbacks that have made it to the end of their grace
21822030
* period. Throttle as specified by rdp->blimit.
@@ -4079,6 +3927,160 @@ void rcu_barrier(void)
40793927
}
40803928
EXPORT_SYMBOL_GPL(rcu_barrier);
40813929

3930+
/*
3931+
* Compute the mask of online CPUs for the specified rcu_node structure.
3932+
* This will not be stable unless the rcu_node structure's ->lock is
3933+
* held, but the bit corresponding to the current CPU will be stable
3934+
* in most contexts.
3935+
*/
3936+
static unsigned long rcu_rnp_online_cpus(struct rcu_node *rnp)
3937+
{
3938+
return READ_ONCE(rnp->qsmaskinitnext);
3939+
}
3940+
3941+
/*
3942+
* Is the CPU corresponding to the specified rcu_data structure online
3943+
* from RCU's perspective? This perspective is given by that structure's
3944+
* ->qsmaskinitnext field rather than by the global cpu_online_mask.
3945+
*/
3946+
static bool rcu_rdp_cpu_online(struct rcu_data *rdp)
3947+
{
3948+
return !!(rdp->grpmask & rcu_rnp_online_cpus(rdp->mynode));
3949+
}
3950+
3951+
#if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU)
3952+
3953+
/*
3954+
* Is the current CPU online as far as RCU is concerned?
3955+
*
3956+
* Disable preemption to avoid false positives that could otherwise
3957+
* happen due to the current CPU number being sampled, this task being
3958+
* preempted, its old CPU being taken offline, resuming on some other CPU,
3959+
* then determining that its old CPU is now offline.
3960+
*
3961+
* Disable checking if in an NMI handler because we cannot safely
3962+
* report errors from NMI handlers anyway. In addition, it is OK to use
3963+
* RCU on an offline processor during initial boot, hence the check for
3964+
* rcu_scheduler_fully_active.
3965+
*/
3966+
bool rcu_lockdep_current_cpu_online(void)
3967+
{
3968+
struct rcu_data *rdp;
3969+
bool ret = false;
3970+
3971+
if (in_nmi() || !rcu_scheduler_fully_active)
3972+
return true;
3973+
preempt_disable_notrace();
3974+
rdp = this_cpu_ptr(&rcu_data);
3975+
/*
3976+
* Strictly, we care here about the case where the current CPU is
3977+
* in rcu_cpu_starting() and thus has an excuse for rdp->grpmask
3978+
* not being up to date. So arch_spin_is_locked() might have a
3979+
* false positive if it's held by some *other* CPU, but that's
3980+
* OK because that just means a false *negative* on the warning.
3981+
*/
3982+
if (rcu_rdp_cpu_online(rdp) || arch_spin_is_locked(&rcu_state.ofl_lock))
3983+
ret = true;
3984+
preempt_enable_notrace();
3985+
return ret;
3986+
}
3987+
EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online);
3988+
3989+
#endif /* #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) */
3990+
3991+
// Has rcu_init() been invoked? This is used (for example) to determine
3992+
// whether spinlocks may be acquired safely.
3993+
static bool rcu_init_invoked(void)
3994+
{
3995+
return !!rcu_state.n_online_cpus;
3996+
}
3997+
3998+
/*
3999+
* Near the end of the offline process. Trace the fact that this CPU
4000+
* is going offline.
4001+
*/
4002+
int rcutree_dying_cpu(unsigned int cpu)
4003+
{
4004+
bool blkd;
4005+
struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4006+
struct rcu_node *rnp = rdp->mynode;
4007+
4008+
if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
4009+
return 0;
4010+
4011+
blkd = !!(READ_ONCE(rnp->qsmask) & rdp->grpmask);
4012+
trace_rcu_grace_period(rcu_state.name, READ_ONCE(rnp->gp_seq),
4013+
blkd ? TPS("cpuofl-bgp") : TPS("cpuofl"));
4014+
return 0;
4015+
}
4016+
4017+
/*
4018+
* All CPUs for the specified rcu_node structure have gone offline,
4019+
* and all tasks that were preempted within an RCU read-side critical
4020+
* section while running on one of those CPUs have since exited their RCU
4021+
* read-side critical section. Some other CPU is reporting this fact with
4022+
* the specified rcu_node structure's ->lock held and interrupts disabled.
4023+
* This function therefore goes up the tree of rcu_node structures,
4024+
* clearing the corresponding bits in the ->qsmaskinit fields. Note that
4025+
* the leaf rcu_node structure's ->qsmaskinit field has already been
4026+
* updated.
4027+
*
4028+
* This function does check that the specified rcu_node structure has
4029+
* all CPUs offline and no blocked tasks, so it is OK to invoke it
4030+
* prematurely. That said, invoking it after the fact will cost you
4031+
* a needless lock acquisition. So once it has done its work, don't
4032+
* invoke it again.
4033+
*/
4034+
static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
4035+
{
4036+
long mask;
4037+
struct rcu_node *rnp = rnp_leaf;
4038+
4039+
raw_lockdep_assert_held_rcu_node(rnp_leaf);
4040+
if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) ||
4041+
WARN_ON_ONCE(rnp_leaf->qsmaskinit) ||
4042+
WARN_ON_ONCE(rcu_preempt_has_tasks(rnp_leaf)))
4043+
return;
4044+
for (;;) {
4045+
mask = rnp->grpmask;
4046+
rnp = rnp->parent;
4047+
if (!rnp)
4048+
break;
4049+
raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
4050+
rnp->qsmaskinit &= ~mask;
4051+
/* Between grace periods, so better already be zero! */
4052+
WARN_ON_ONCE(rnp->qsmask);
4053+
if (rnp->qsmaskinit) {
4054+
raw_spin_unlock_rcu_node(rnp);
4055+
/* irqs remain disabled. */
4056+
return;
4057+
}
4058+
raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
4059+
}
4060+
}
4061+
4062+
/*
4063+
* The CPU has been completely removed, and some other CPU is reporting
4064+
* this fact from process context. Do the remainder of the cleanup.
4065+
* There can only be one CPU hotplug operation at a time, so no need for
4066+
* explicit locking.
4067+
*/
4068+
int rcutree_dead_cpu(unsigned int cpu)
4069+
{
4070+
struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4071+
struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */
4072+
4073+
if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
4074+
return 0;
4075+
4076+
WRITE_ONCE(rcu_state.n_online_cpus, rcu_state.n_online_cpus - 1);
4077+
/* Adjust any no-longer-needed kthreads. */
4078+
rcu_boost_kthread_setaffinity(rnp, -1);
4079+
// Stop-machine done, so allow nohz_full to disable tick.
4080+
tick_dep_clear(TICK_DEP_BIT_RCU);
4081+
return 0;
4082+
}
4083+
40824084
/*
40834085
* Propagate ->qsinitmask bits up the rcu_node tree to account for the
40844086
* first CPU in a given leaf rcu_node structure coming online. The caller

0 commit comments

Comments
 (0)