Skip to content

Commit 45387b8

Browse files
committed
mm/slub: Convert alloc_slab_page() to return a struct slab
Preparatory, callers convert back to struct page for now. Also move setting page flags to alloc_slab_page() where we still operate on a struct page. This means the page->slab_cache pointer is now set later than the PageSlab flag, which could theoretically confuse some pfn walker assuming PageSlab means there would be a valid cache pointer. But as the code had no barriers and used __set_bit() anyway, it could have happened already, so there shouldn't be such a walker. Signed-off-by: Vlastimil Babka <[email protected]> Reviewed-by: Roman Gushchin <[email protected]> Reviewed-by: Hyeonggon Yoo <[email protected]> Tested-by: Hyeonggon Yoo <[email protected]>
1 parent fb012e2 commit 45387b8

File tree

1 file changed

+16
-10
lines changed

1 file changed

+16
-10
lines changed

mm/slub.c

Lines changed: 16 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1788,18 +1788,27 @@ static void *setup_object(struct kmem_cache *s, struct page *page,
17881788
/*
17891789
* Slab allocation and freeing
17901790
*/
1791-
static inline struct page *alloc_slab_page(struct kmem_cache *s,
1791+
static inline struct slab *alloc_slab_page(struct kmem_cache *s,
17921792
gfp_t flags, int node, struct kmem_cache_order_objects oo)
17931793
{
1794-
struct page *page;
1794+
struct folio *folio;
1795+
struct slab *slab;
17951796
unsigned int order = oo_order(oo);
17961797

17971798
if (node == NUMA_NO_NODE)
1798-
page = alloc_pages(flags, order);
1799+
folio = (struct folio *)alloc_pages(flags, order);
17991800
else
1800-
page = __alloc_pages_node(node, flags, order);
1801+
folio = (struct folio *)__alloc_pages_node(node, flags, order);
18011802

1802-
return page;
1803+
if (!folio)
1804+
return NULL;
1805+
1806+
slab = folio_slab(folio);
1807+
__folio_set_slab(folio);
1808+
if (page_is_pfmemalloc(folio_page(folio, 0)))
1809+
slab_set_pfmemalloc(slab);
1810+
1811+
return slab;
18031812
}
18041813

18051814
#ifdef CONFIG_SLAB_FREELIST_RANDOM
@@ -1932,15 +1941,15 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
19321941
if ((alloc_gfp & __GFP_DIRECT_RECLAIM) && oo_order(oo) > oo_order(s->min))
19331942
alloc_gfp = (alloc_gfp | __GFP_NOMEMALLOC) & ~(__GFP_RECLAIM|__GFP_NOFAIL);
19341943

1935-
page = alloc_slab_page(s, alloc_gfp, node, oo);
1944+
page = slab_page(alloc_slab_page(s, alloc_gfp, node, oo));
19361945
if (unlikely(!page)) {
19371946
oo = s->min;
19381947
alloc_gfp = flags;
19391948
/*
19401949
* Allocation may have failed due to fragmentation.
19411950
* Try a lower order alloc if possible
19421951
*/
1943-
page = alloc_slab_page(s, alloc_gfp, node, oo);
1952+
page = slab_page(alloc_slab_page(s, alloc_gfp, node, oo));
19441953
if (unlikely(!page))
19451954
goto out;
19461955
stat(s, ORDER_FALLBACK);
@@ -1951,9 +1960,6 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
19511960
account_slab(page_slab(page), oo_order(oo), s, flags);
19521961

19531962
page->slab_cache = s;
1954-
__SetPageSlab(page);
1955-
if (page_is_pfmemalloc(page))
1956-
SetPageSlabPfmemalloc(page);
19571963

19581964
kasan_poison_slab(page);
19591965

0 commit comments

Comments
 (0)