Skip to content

Commit 4020b4a

Browse files
committed
mm/slub: Convert __free_slab() to use struct slab
__free_slab() is on the boundary of distinguishing struct slab and struct page so start with struct slab but convert to folio for working with flags and folio_page() to call functions that require struct page. Signed-off-by: Vlastimil Babka <[email protected]> Reviewed-by: Roman Gushchin <[email protected]> Reviewed-by: Hyeonggon Yoo <[email protected]> Tested-by: Hyeonggon Yoo <[email protected]>
1 parent 45387b8 commit 4020b4a

File tree

1 file changed

+13
-14
lines changed

1 file changed

+13
-14
lines changed

mm/slub.c

Lines changed: 13 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -2005,43 +2005,42 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
20052005
flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
20062006
}
20072007

2008-
static void __free_slab(struct kmem_cache *s, struct page *page)
2008+
static void __free_slab(struct kmem_cache *s, struct slab *slab)
20092009
{
2010-
int order = compound_order(page);
2010+
struct folio *folio = slab_folio(slab);
2011+
int order = folio_order(folio);
20112012
int pages = 1 << order;
20122013

20132014
if (kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS)) {
20142015
void *p;
20152016

2016-
slab_pad_check(s, page);
2017-
for_each_object(p, s, page_address(page),
2018-
page->objects)
2019-
check_object(s, page, p, SLUB_RED_INACTIVE);
2017+
slab_pad_check(s, folio_page(folio, 0));
2018+
for_each_object(p, s, slab_address(slab), slab->objects)
2019+
check_object(s, folio_page(folio, 0), p, SLUB_RED_INACTIVE);
20202020
}
20212021

2022-
__ClearPageSlabPfmemalloc(page);
2023-
__ClearPageSlab(page);
2024-
/* In union with page->mapping where page allocator expects NULL */
2025-
page->slab_cache = NULL;
2022+
__slab_clear_pfmemalloc(slab);
2023+
__folio_clear_slab(folio);
2024+
folio->mapping = NULL;
20262025
if (current->reclaim_state)
20272026
current->reclaim_state->reclaimed_slab += pages;
2028-
unaccount_slab(page_slab(page), order, s);
2029-
__free_pages(page, order);
2027+
unaccount_slab(slab, order, s);
2028+
__free_pages(folio_page(folio, 0), order);
20302029
}
20312030

20322031
static void rcu_free_slab(struct rcu_head *h)
20332032
{
20342033
struct page *page = container_of(h, struct page, rcu_head);
20352034

2036-
__free_slab(page->slab_cache, page);
2035+
__free_slab(page->slab_cache, page_slab(page));
20372036
}
20382037

20392038
static void free_slab(struct kmem_cache *s, struct page *page)
20402039
{
20412040
if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU)) {
20422041
call_rcu(&page->rcu_head, rcu_free_slab);
20432042
} else
2044-
__free_slab(s, page);
2043+
__free_slab(s, page_slab(page));
20452044
}
20462045

20472046
static void discard_slab(struct kmem_cache *s, struct page *page)

0 commit comments

Comments
 (0)