Skip to content

Commit d835eef

Browse files
Matthew Wilcox (Oracle)tehcaster
authored andcommitted
mm/slub: Convert kfree() to use a struct slab
Convert kfree(), kmem_cache_free() and ___cache_free() to resolve object addresses to struct slab, using folio as intermediate step where needed. Keep passing the result as struct page for now in preparation for mass conversion of internal functions. [ [email protected]: Use folio as intermediate step when checking for large kmalloc pages, and when freeing them - rename free_nonslab_page() to free_large_kmalloc() that takes struct folio ] Signed-off-by: Matthew Wilcox (Oracle) <[email protected]> Signed-off-by: Vlastimil Babka <[email protected]> Reviewed-by: Roman Gushchin <[email protected]>
1 parent cc465c3 commit d835eef

File tree

1 file changed

+16
-13
lines changed

1 file changed

+16
-13
lines changed

mm/slub.c

Lines changed: 16 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -3517,7 +3517,7 @@ static __always_inline void slab_free(struct kmem_cache *s, struct page *page,
35173517
#ifdef CONFIG_KASAN_GENERIC
35183518
void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr)
35193519
{
3520-
do_slab_free(cache, virt_to_head_page(x), x, NULL, 1, addr);
3520+
do_slab_free(cache, slab_page(virt_to_slab(x)), x, NULL, 1, addr);
35213521
}
35223522
#endif
35233523

@@ -3527,7 +3527,7 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
35273527
if (!s)
35283528
return;
35293529
trace_kmem_cache_free(_RET_IP_, x, s->name);
3530-
slab_free(s, virt_to_head_page(x), x, NULL, 1, _RET_IP_);
3530+
slab_free(s, slab_page(virt_to_slab(x)), x, NULL, 1, _RET_IP_);
35313531
}
35323532
EXPORT_SYMBOL(kmem_cache_free);
35333533

@@ -3539,16 +3539,17 @@ struct detached_freelist {
35393539
struct kmem_cache *s;
35403540
};
35413541

3542-
static inline void free_nonslab_page(struct page *page, void *object)
3542+
static inline void free_large_kmalloc(struct folio *folio, void *object)
35433543
{
3544-
unsigned int order = compound_order(page);
3544+
unsigned int order = folio_order(folio);
35453545

3546-
if (WARN_ON_ONCE(!PageCompound(page)))
3546+
if (WARN_ON_ONCE(order == 0))
35473547
pr_warn_once("object pointer: 0x%p\n", object);
35483548

35493549
kfree_hook(object);
3550-
mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B, -(PAGE_SIZE << order));
3551-
__free_pages(page, order);
3550+
mod_lruvec_page_state(folio_page(folio, 0), NR_SLAB_UNRECLAIMABLE_B,
3551+
-(PAGE_SIZE << order));
3552+
__free_pages(folio_page(folio, 0), order);
35523553
}
35533554

35543555
/*
@@ -3588,7 +3589,7 @@ int build_detached_freelist(struct kmem_cache *s, size_t size,
35883589
if (!s) {
35893590
/* Handle kalloc'ed objects */
35903591
if (unlikely(!folio_test_slab(folio))) {
3591-
free_nonslab_page(folio_page(folio, 0), object);
3592+
free_large_kmalloc(folio, object);
35923593
p[size] = NULL; /* mark object processed */
35933594
return size;
35943595
}
@@ -4547,20 +4548,22 @@ EXPORT_SYMBOL(__ksize);
45474548

45484549
void kfree(const void *x)
45494550
{
4550-
struct page *page;
4551+
struct folio *folio;
4552+
struct slab *slab;
45514553
void *object = (void *)x;
45524554

45534555
trace_kfree(_RET_IP_, x);
45544556

45554557
if (unlikely(ZERO_OR_NULL_PTR(x)))
45564558
return;
45574559

4558-
page = virt_to_head_page(x);
4559-
if (unlikely(!PageSlab(page))) {
4560-
free_nonslab_page(page, object);
4560+
folio = virt_to_folio(x);
4561+
if (unlikely(!folio_test_slab(folio))) {
4562+
free_large_kmalloc(folio, object);
45614563
return;
45624564
}
4563-
slab_free(page->slab_cache, page, object, NULL, 1, _RET_IP_);
4565+
slab = folio_slab(folio);
4566+
slab_free(slab->slab_cache, slab_page(slab), object, NULL, 1, _RET_IP_);
45644567
}
45654568
EXPORT_SYMBOL(kfree);
45664569

0 commit comments

Comments
 (0)