|
20 | 20 | #include <linux/kernel.h>
|
21 | 21 | #include <linux/kmemleak.h>
|
22 | 22 | #include <linux/percpu.h>
|
| 23 | +#include <linux/local_lock.h> |
23 | 24 | #include <linux/preempt.h> /* in_interrupt() */
|
24 | 25 | #include <linux/radix-tree.h>
|
25 | 26 | #include <linux/rcupdate.h>
|
26 | 27 | #include <linux/slab.h>
|
27 | 28 | #include <linux/string.h>
|
28 | 29 | #include <linux/xarray.h>
|
29 | 30 |
|
30 |
| - |
31 | 31 | /*
|
32 | 32 | * Radix tree node cache.
|
33 | 33 | */
|
@@ -58,12 +58,10 @@ struct kmem_cache *radix_tree_node_cachep;
|
58 | 58 | /*
|
59 | 59 | * Per-cpu pool of preloaded nodes
|
60 | 60 | */
|
61 |
| -struct radix_tree_preload { |
62 |
| - unsigned nr; |
63 |
| - /* nodes->parent points to next preallocated node */ |
64 |
| - struct radix_tree_node *nodes; |
| 61 | +DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { |
| 62 | + .lock = INIT_LOCAL_LOCK(lock), |
65 | 63 | };
|
66 |
| -static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, }; |
| 64 | +EXPORT_PER_CPU_SYMBOL_GPL(radix_tree_preloads); |
67 | 65 |
|
68 | 66 | static inline struct radix_tree_node *entry_to_node(void *ptr)
|
69 | 67 | {
|
@@ -332,14 +330,14 @@ static __must_check int __radix_tree_preload(gfp_t gfp_mask, unsigned nr)
|
332 | 330 | */
|
333 | 331 | gfp_mask &= ~__GFP_ACCOUNT;
|
334 | 332 |
|
335 |
| - preempt_disable(); |
| 333 | + local_lock(&radix_tree_preloads.lock); |
336 | 334 | rtp = this_cpu_ptr(&radix_tree_preloads);
|
337 | 335 | while (rtp->nr < nr) {
|
338 |
| - preempt_enable(); |
| 336 | + local_unlock(&radix_tree_preloads.lock); |
339 | 337 | node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
|
340 | 338 | if (node == NULL)
|
341 | 339 | goto out;
|
342 |
| - preempt_disable(); |
| 340 | + local_lock(&radix_tree_preloads.lock); |
343 | 341 | rtp = this_cpu_ptr(&radix_tree_preloads);
|
344 | 342 | if (rtp->nr < nr) {
|
345 | 343 | node->parent = rtp->nodes;
|
@@ -381,7 +379,7 @@ int radix_tree_maybe_preload(gfp_t gfp_mask)
|
381 | 379 | if (gfpflags_allow_blocking(gfp_mask))
|
382 | 380 | return __radix_tree_preload(gfp_mask, RADIX_TREE_PRELOAD_SIZE);
|
383 | 381 | /* Preloading doesn't help anything with this gfp mask, skip it */
|
384 |
| - preempt_disable(); |
| 382 | + local_lock(&radix_tree_preloads.lock); |
385 | 383 | return 0;
|
386 | 384 | }
|
387 | 385 | EXPORT_SYMBOL(radix_tree_maybe_preload);
|
@@ -1470,7 +1468,7 @@ EXPORT_SYMBOL(radix_tree_tagged);
|
1470 | 1468 | void idr_preload(gfp_t gfp_mask)
|
1471 | 1469 | {
|
1472 | 1470 | if (__radix_tree_preload(gfp_mask, IDR_PRELOAD_SIZE))
|
1473 |
| - preempt_disable(); |
| 1471 | + local_lock(&radix_tree_preloads.lock); |
1474 | 1472 | }
|
1475 | 1473 | EXPORT_SYMBOL(idr_preload);
|
1476 | 1474 |
|
|
0 commit comments