Skip to content

Commit cfa6705

Browse files
Sebastian Andrzej SiewiorIngo Molnar
authored andcommitted
radix-tree: Use local_lock for protection
The radix-tree and idr preload mechanisms use preempt_disable() to protect the complete operation between xxx_preload() and xxx_preload_end(). As the code inside the preempt disabled section acquires regular spinlocks, which are converted to 'sleeping' spinlocks on a PREEMPT_RT kernel and eventually calls into a memory allocator, this conflicts with the RT semantics. Convert it to a local_lock which allows RT kernels to substitute them with a real per CPU lock. On non RT kernels this maps to preempt_disable() as before, but provides also lockdep coverage of the critical region. No functional change. Signed-off-by: Sebastian Andrzej Siewior <[email protected]> Signed-off-by: Ingo Molnar <[email protected]> Acked-by: Peter Zijlstra <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent 9171072 commit cfa6705

File tree

3 files changed

+20
-13
lines changed

3 files changed

+20
-13
lines changed

include/linux/idr.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -171,7 +171,7 @@ static inline bool idr_is_empty(const struct idr *idr)
171171
*/
172172
static inline void idr_preload_end(void)
173173
{
174-
preempt_enable();
174+
local_unlock(&radix_tree_preloads.lock);
175175
}
176176

177177
/**

include/linux/radix-tree.h

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,11 +16,20 @@
1616
#include <linux/spinlock.h>
1717
#include <linux/types.h>
1818
#include <linux/xarray.h>
19+
#include <linux/local_lock.h>
1920

2021
/* Keep unconverted code working */
2122
#define radix_tree_root xarray
2223
#define radix_tree_node xa_node
2324

25+
struct radix_tree_preload {
26+
local_lock_t lock;
27+
unsigned nr;
28+
/* nodes->parent points to next preallocated node */
29+
struct radix_tree_node *nodes;
30+
};
31+
DECLARE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
32+
2433
/*
2534
* The bottom two bits of the slot determine how the remaining bits in the
2635
* slot are interpreted:
@@ -245,7 +254,7 @@ int radix_tree_tagged(const struct radix_tree_root *, unsigned int tag);
245254

246255
static inline void radix_tree_preload_end(void)
247256
{
248-
preempt_enable();
257+
local_unlock(&radix_tree_preloads.lock);
249258
}
250259

251260
void __rcu **idr_get_free(struct radix_tree_root *root,

lib/radix-tree.c

Lines changed: 9 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -20,14 +20,14 @@
2020
#include <linux/kernel.h>
2121
#include <linux/kmemleak.h>
2222
#include <linux/percpu.h>
23+
#include <linux/local_lock.h>
2324
#include <linux/preempt.h> /* in_interrupt() */
2425
#include <linux/radix-tree.h>
2526
#include <linux/rcupdate.h>
2627
#include <linux/slab.h>
2728
#include <linux/string.h>
2829
#include <linux/xarray.h>
2930

30-
3131
/*
3232
* Radix tree node cache.
3333
*/
@@ -58,12 +58,10 @@ struct kmem_cache *radix_tree_node_cachep;
5858
/*
5959
* Per-cpu pool of preloaded nodes
6060
*/
61-
struct radix_tree_preload {
62-
unsigned nr;
63-
/* nodes->parent points to next preallocated node */
64-
struct radix_tree_node *nodes;
61+
DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = {
62+
.lock = INIT_LOCAL_LOCK(lock),
6563
};
66-
static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
64+
EXPORT_PER_CPU_SYMBOL_GPL(radix_tree_preloads);
6765

6866
static inline struct radix_tree_node *entry_to_node(void *ptr)
6967
{
@@ -332,14 +330,14 @@ static __must_check int __radix_tree_preload(gfp_t gfp_mask, unsigned nr)
332330
*/
333331
gfp_mask &= ~__GFP_ACCOUNT;
334332

335-
preempt_disable();
333+
local_lock(&radix_tree_preloads.lock);
336334
rtp = this_cpu_ptr(&radix_tree_preloads);
337335
while (rtp->nr < nr) {
338-
preempt_enable();
336+
local_unlock(&radix_tree_preloads.lock);
339337
node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
340338
if (node == NULL)
341339
goto out;
342-
preempt_disable();
340+
local_lock(&radix_tree_preloads.lock);
343341
rtp = this_cpu_ptr(&radix_tree_preloads);
344342
if (rtp->nr < nr) {
345343
node->parent = rtp->nodes;
@@ -381,7 +379,7 @@ int radix_tree_maybe_preload(gfp_t gfp_mask)
381379
if (gfpflags_allow_blocking(gfp_mask))
382380
return __radix_tree_preload(gfp_mask, RADIX_TREE_PRELOAD_SIZE);
383381
/* Preloading doesn't help anything with this gfp mask, skip it */
384-
preempt_disable();
382+
local_lock(&radix_tree_preloads.lock);
385383
return 0;
386384
}
387385
EXPORT_SYMBOL(radix_tree_maybe_preload);
@@ -1470,7 +1468,7 @@ EXPORT_SYMBOL(radix_tree_tagged);
14701468
void idr_preload(gfp_t gfp_mask)
14711469
{
14721470
if (__radix_tree_preload(gfp_mask, IDR_PRELOAD_SIZE))
1473-
preempt_disable();
1471+
local_lock(&radix_tree_preloads.lock);
14741472
}
14751473
EXPORT_SYMBOL(idr_preload);
14761474

0 commit comments

Comments
 (0)