Skip to content

Commit 4b1f449

Browse files
committed
mm, slub: stop disabling irqs around get_partial()
The function get_partial() does not need to have irqs disabled as a whole. It's sufficient to convert spin_lock operations to their irq saving/restoring versions. As a result, it's now possible to reach the page allocator from the slab allocator without disabling and re-enabling interrupts on the way. Signed-off-by: Vlastimil Babka <[email protected]>
1 parent 9f101ee commit 4b1f449

File tree

1 file changed

+8
-14
lines changed

1 file changed

+8
-14
lines changed

mm/slub.c

Lines changed: 8 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -2010,11 +2010,12 @@ static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags);
20102010
* Try to allocate a partial slab from a specific node.
20112011
*/
20122012
static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
2013-
struct page **ret_page, gfp_t flags)
2013+
struct page **ret_page, gfp_t gfpflags)
20142014
{
20152015
struct page *page, *page2;
20162016
void *object = NULL;
20172017
unsigned int available = 0;
2018+
unsigned long flags;
20182019
int objects;
20192020

20202021
/*
@@ -2026,11 +2027,11 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
20262027
if (!n || !n->nr_partial)
20272028
return NULL;
20282029

2029-
spin_lock(&n->list_lock);
2030+
spin_lock_irqsave(&n->list_lock, flags);
20302031
list_for_each_entry_safe(page, page2, &n->partial, slab_list) {
20312032
void *t;
20322033

2033-
if (!pfmemalloc_match(page, flags))
2034+
if (!pfmemalloc_match(page, gfpflags))
20342035
continue;
20352036

20362037
t = acquire_slab(s, n, page, object == NULL, &objects);
@@ -2051,7 +2052,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
20512052
break;
20522053

20532054
}
2054-
spin_unlock(&n->list_lock);
2055+
spin_unlock_irqrestore(&n->list_lock, flags);
20552056
return object;
20562057
}
20572058

@@ -2779,8 +2780,10 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
27792780
local_irq_restore(flags);
27802781
goto reread_page;
27812782
}
2782-
if (unlikely(!slub_percpu_partial(c)))
2783+
if (unlikely(!slub_percpu_partial(c))) {
2784+
local_irq_restore(flags);
27832785
goto new_objects; /* stolen by an IRQ handler */
2786+
}
27842787

27852788
page = c->page = slub_percpu_partial(c);
27862789
slub_set_percpu_partial(c, page);
@@ -2789,18 +2792,9 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
27892792
goto redo;
27902793
}
27912794

2792-
local_irq_save(flags);
2793-
if (unlikely(c->page)) {
2794-
local_irq_restore(flags);
2795-
goto reread_page;
2796-
}
2797-
27982795
new_objects:
27992796

2800-
lockdep_assert_irqs_disabled();
2801-
28022797
freelist = get_partial(s, gfpflags, node, &page);
2803-
local_irq_restore(flags);
28042798
if (freelist)
28052799
goto check_new_page;
28062800

0 commit comments

Comments
 (0)