Skip to content

Commit 7121dd9

Browse files
Frederic WeisbeckerNeeraj Upadhyay
authored andcommitted
rcu/nocb: Introduce nocb mutex
The barrier_mutex is used currently to protect (de-)offloading operations and prevent from nocb_lock locking imbalance in rcu_barrier() and shrinker, and also from misordered RCU barrier invocation. Now since RCU (de-)offloading is going to happen on offline CPUs, an RCU barrier will have to be executed while transitionning from offloaded to de-offloaded state. And this can't happen while holding the barrier_mutex. Introduce a NOCB mutex to protect (de-)offloading transitions. The barrier_mutex is still held for now when necessary to avoid barrier callbacks reordering and nocb_lock imbalance. Signed-off-by: Frederic Weisbecker <[email protected]> Signed-off-by: Paul E. McKenney <[email protected]> Reviewed-by: Paul E. McKenney <[email protected]> Signed-off-by: Neeraj Upadhyay <[email protected]>
1 parent 7be88a8 commit 7121dd9

File tree

4 files changed

+17
-8
lines changed

4 files changed

+17
-8
lines changed

kernel/rcu/tree.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -97,6 +97,9 @@ static struct rcu_state rcu_state = {
9797
.srs_cleanup_work = __WORK_INITIALIZER(rcu_state.srs_cleanup_work,
9898
rcu_sr_normal_gp_cleanup_work),
9999
.srs_cleanups_pending = ATOMIC_INIT(0),
100+
#ifdef CONFIG_RCU_NOCB_CPU
101+
.nocb_mutex = __MUTEX_INITIALIZER(rcu_state.nocb_mutex),
102+
#endif
100103
};
101104

102105
/* Dump rcu_node combining tree at boot to verify correct setup. */

kernel/rcu/tree.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -421,6 +421,7 @@ struct rcu_state {
421421
atomic_t srs_cleanups_pending; /* srs inflight worker cleanups. */
422422

423423
#ifdef CONFIG_RCU_NOCB_CPU
424+
struct mutex nocb_mutex; /* Guards (de-)offloading */
424425
int nocb_is_setup; /* nocb is setup from boot */
425426
#endif
426427
};

kernel/rcu/tree_nocb.h

Lines changed: 12 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1141,6 +1141,7 @@ int rcu_nocb_cpu_deoffload(int cpu)
11411141
int ret = 0;
11421142

11431143
cpus_read_lock();
1144+
mutex_lock(&rcu_state.nocb_mutex);
11441145
mutex_lock(&rcu_state.barrier_mutex);
11451146
if (rcu_rdp_is_offloaded(rdp)) {
11461147
if (cpu_online(cpu)) {
@@ -1153,6 +1154,7 @@ int rcu_nocb_cpu_deoffload(int cpu)
11531154
}
11541155
}
11551156
mutex_unlock(&rcu_state.barrier_mutex);
1157+
mutex_unlock(&rcu_state.nocb_mutex);
11561158
cpus_read_unlock();
11571159

11581160
return ret;
@@ -1228,6 +1230,7 @@ int rcu_nocb_cpu_offload(int cpu)
12281230
int ret = 0;
12291231

12301232
cpus_read_lock();
1233+
mutex_lock(&rcu_state.nocb_mutex);
12311234
mutex_lock(&rcu_state.barrier_mutex);
12321235
if (!rcu_rdp_is_offloaded(rdp)) {
12331236
if (cpu_online(cpu)) {
@@ -1240,6 +1243,7 @@ int rcu_nocb_cpu_offload(int cpu)
12401243
}
12411244
}
12421245
mutex_unlock(&rcu_state.barrier_mutex);
1246+
mutex_unlock(&rcu_state.nocb_mutex);
12431247
cpus_read_unlock();
12441248

12451249
return ret;
@@ -1257,7 +1261,7 @@ lazy_rcu_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
12571261
return 0;
12581262

12591263
/* Protect rcu_nocb_mask against concurrent (de-)offloading. */
1260-
if (!mutex_trylock(&rcu_state.barrier_mutex))
1264+
if (!mutex_trylock(&rcu_state.nocb_mutex))
12611265
return 0;
12621266

12631267
/* Snapshot count of all CPUs */
@@ -1267,7 +1271,7 @@ lazy_rcu_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
12671271
count += READ_ONCE(rdp->lazy_len);
12681272
}
12691273

1270-
mutex_unlock(&rcu_state.barrier_mutex);
1274+
mutex_unlock(&rcu_state.nocb_mutex);
12711275

12721276
return count ? count : SHRINK_EMPTY;
12731277
}
@@ -1285,9 +1289,9 @@ lazy_rcu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
12851289
* Protect against concurrent (de-)offloading. Otherwise nocb locking
12861290
* may be ignored or imbalanced.
12871291
*/
1288-
if (!mutex_trylock(&rcu_state.barrier_mutex)) {
1292+
if (!mutex_trylock(&rcu_state.nocb_mutex)) {
12891293
/*
1290-
* But really don't insist if barrier_mutex is contended since we
1294+
* But really don't insist if nocb_mutex is contended since we
12911295
* can't guarantee that it will never engage in a dependency
12921296
* chain involving memory allocation. The lock is seldom contended
12931297
* anyway.
@@ -1326,7 +1330,7 @@ lazy_rcu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
13261330
break;
13271331
}
13281332

1329-
mutex_unlock(&rcu_state.barrier_mutex);
1333+
mutex_unlock(&rcu_state.nocb_mutex);
13301334

13311335
return count ? count : SHRINK_STOP;
13321336
}
@@ -1473,15 +1477,15 @@ static void rcu_spawn_cpu_nocb_kthread(int cpu)
14731477
* No need to protect against concurrent rcu_barrier()
14741478
* because the number of callbacks should be 0 for a non-boot CPU,
14751479
* therefore rcu_barrier() shouldn't even try to grab the nocb_lock.
1476-
* But hold barrier_mutex to avoid nocb_lock imbalance from shrinker.
1480+
* But hold nocb_mutex to avoid nocb_lock imbalance from shrinker.
14771481
*/
14781482
WARN_ON_ONCE(system_state > SYSTEM_BOOTING && rcu_segcblist_n_cbs(&rdp->cblist));
1479-
mutex_lock(&rcu_state.barrier_mutex);
1483+
mutex_lock(&rcu_state.nocb_mutex);
14801484
if (rcu_rdp_is_offloaded(rdp)) {
14811485
rcu_nocb_rdp_deoffload(rdp);
14821486
cpumask_clear_cpu(cpu, rcu_nocb_mask);
14831487
}
1484-
mutex_unlock(&rcu_state.barrier_mutex);
1488+
mutex_unlock(&rcu_state.nocb_mutex);
14851489
}
14861490

14871491
/* How many CB CPU IDs per GP kthread? Default of -1 for sqrt(nr_cpu_ids). */

kernel/rcu/tree_plugin.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,7 @@ static bool rcu_rdp_is_offloaded(struct rcu_data *rdp)
2828
!(lockdep_is_held(&rcu_state.barrier_mutex) ||
2929
(IS_ENABLED(CONFIG_HOTPLUG_CPU) && lockdep_is_cpus_held()) ||
3030
lockdep_is_held(&rdp->nocb_lock) ||
31+
lockdep_is_held(&rcu_state.nocb_mutex) ||
3132
(!(IS_ENABLED(CONFIG_PREEMPT_COUNT) && preemptible()) &&
3233
rdp == this_cpu_ptr(&rcu_data)) ||
3334
rcu_current_is_nocb_kthread(rdp)),

0 commit comments

Comments
 (0)