Skip to content

Commit fa417ab

Browse files
committed
mm, slub: move disabling irqs closer to get_partial() in ___slab_alloc()
Continue reducing the irq disabled scope. Check for per-cpu partial slabs with first with irqs enabled and then recheck with irqs disabled before grabbing the slab page. Mostly preparatory for the following patches. Signed-off-by: Vlastimil Babka <[email protected]>
1 parent 0b303fb commit fa417ab

File tree

1 file changed

+25
-9
lines changed

1 file changed

+25
-9
lines changed

mm/slub.c

Lines changed: 25 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -2706,11 +2706,6 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
27062706
if (unlikely(node != NUMA_NO_NODE &&
27072707
!node_isset(node, slab_nodes)))
27082708
node = NUMA_NO_NODE;
2709-
local_irq_save(flags);
2710-
if (unlikely(c->page)) {
2711-
local_irq_restore(flags);
2712-
goto reread_page;
2713-
}
27142709
goto new_slab;
27152710
}
27162711
redo:
@@ -2751,6 +2746,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
27512746

27522747
if (!freelist) {
27532748
c->page = NULL;
2749+
local_irq_restore(flags);
27542750
stat(s, DEACTIVATE_BYPASS);
27552751
goto new_slab;
27562752
}
@@ -2780,19 +2776,36 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
27802776
goto reread_page;
27812777
}
27822778
deactivate_slab(s, page, c->freelist, c);
2779+
local_irq_restore(flags);
27832780

27842781
new_slab:
27852782

2786-
lockdep_assert_irqs_disabled();
2787-
27882783
if (slub_percpu_partial(c)) {
2784+
local_irq_save(flags);
2785+
if (unlikely(c->page)) {
2786+
local_irq_restore(flags);
2787+
goto reread_page;
2788+
}
2789+
if (unlikely(!slub_percpu_partial(c)))
2790+
goto new_objects; /* stolen by an IRQ handler */
2791+
27892792
page = c->page = slub_percpu_partial(c);
27902793
slub_set_percpu_partial(c, page);
27912794
local_irq_restore(flags);
27922795
stat(s, CPU_PARTIAL_ALLOC);
27932796
goto redo;
27942797
}
27952798

2799+
local_irq_save(flags);
2800+
if (unlikely(c->page)) {
2801+
local_irq_restore(flags);
2802+
goto reread_page;
2803+
}
2804+
2805+
new_objects:
2806+
2807+
lockdep_assert_irqs_disabled();
2808+
27962809
freelist = get_partial(s, gfpflags, node, &page);
27972810
if (freelist) {
27982811
c->page = page;
@@ -2825,15 +2838,18 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
28252838
check_new_page:
28262839

28272840
if (kmem_cache_debug(s)) {
2828-
if (!alloc_debug_processing(s, page, freelist, addr))
2841+
if (!alloc_debug_processing(s, page, freelist, addr)) {
28292842
/* Slab failed checks. Next slab needed */
2843+
c->page = NULL;
2844+
local_irq_restore(flags);
28302845
goto new_slab;
2831-
else
2846+
} else {
28322847
/*
28332848
* For debug case, we don't load freelist so that all
28342849
* allocations go through alloc_debug_processing()
28352850
*/
28362851
goto return_single;
2852+
}
28372853
}
28382854

28392855
if (unlikely(!pfmemalloc_match(page, gfpflags)))

0 commit comments

Comments
 (0)