Skip to content

Commit 5075701

Browse files
Matthew Wilcox (Oracle)tehcaster
authored andcommitted
mm/slob: Convert SLOB to use struct slab and struct folio
Use struct slab throughout the slob allocator. Where non-slab page can appear use struct folio instead of struct page. [ [email protected]: don't introduce wrappers for PageSlobFree in mm/slab.h just for the single callers being wrappers in mm/slob.c ] [ Hyeonggon Yoo <[email protected]>: fix NULL pointer deference ] Signed-off-by: Matthew Wilcox (Oracle) <[email protected]> Signed-off-by: Vlastimil Babka <[email protected]> Reviewed-by: Hyeonggon Yoo <[email protected]> Tested-by: Hyeonggon Yoo <[email protected]>
1 parent 4b5f8d9 commit 5075701

File tree

1 file changed

+27
-24
lines changed

1 file changed

+27
-24
lines changed

mm/slob.c

Lines changed: 27 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@
3030
* If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
3131
* alloc_pages() directly, allocating compound pages so the page order
3232
* does not have to be separately tracked.
33-
* These objects are detected in kfree() because PageSlab()
33+
* These objects are detected in kfree() because folio_test_slab()
3434
* is false for them.
3535
*
3636
* SLAB is emulated on top of SLOB by simply calling constructors and
@@ -105,21 +105,21 @@ static LIST_HEAD(free_slob_large);
105105
/*
106106
* slob_page_free: true for pages on free_slob_pages list.
107107
*/
108-
static inline int slob_page_free(struct page *sp)
108+
static inline int slob_page_free(struct slab *slab)
109109
{
110-
return PageSlobFree(sp);
110+
return PageSlobFree(slab_page(slab));
111111
}
112112

113-
static void set_slob_page_free(struct page *sp, struct list_head *list)
113+
static void set_slob_page_free(struct slab *slab, struct list_head *list)
114114
{
115-
list_add(&sp->slab_list, list);
116-
__SetPageSlobFree(sp);
115+
list_add(&slab->slab_list, list);
116+
__SetPageSlobFree(slab_page(slab));
117117
}
118118

119-
static inline void clear_slob_page_free(struct page *sp)
119+
static inline void clear_slob_page_free(struct slab *slab)
120120
{
121-
list_del(&sp->slab_list);
122-
__ClearPageSlobFree(sp);
121+
list_del(&slab->slab_list);
122+
__ClearPageSlobFree(slab_page(slab));
123123
}
124124

125125
#define SLOB_UNIT sizeof(slob_t)
@@ -234,7 +234,7 @@ static void slob_free_pages(void *b, int order)
234234
* freelist, in this case @page_removed_from_list will be set to
235235
* true (set to false otherwise).
236236
*/
237-
static void *slob_page_alloc(struct page *sp, size_t size, int align,
237+
static void *slob_page_alloc(struct slab *sp, size_t size, int align,
238238
int align_offset, bool *page_removed_from_list)
239239
{
240240
slob_t *prev, *cur, *aligned = NULL;
@@ -301,7 +301,8 @@ static void *slob_page_alloc(struct page *sp, size_t size, int align,
301301
static void *slob_alloc(size_t size, gfp_t gfp, int align, int node,
302302
int align_offset)
303303
{
304-
struct page *sp;
304+
struct folio *folio;
305+
struct slab *sp;
305306
struct list_head *slob_list;
306307
slob_t *b = NULL;
307308
unsigned long flags;
@@ -323,7 +324,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node,
323324
* If there's a node specification, search for a partial
324325
* page with a matching node id in the freelist.
325326
*/
326-
if (node != NUMA_NO_NODE && page_to_nid(sp) != node)
327+
if (node != NUMA_NO_NODE && slab_nid(sp) != node)
327328
continue;
328329
#endif
329330
/* Enough room on this page? */
@@ -358,8 +359,9 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node,
358359
b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
359360
if (!b)
360361
return NULL;
361-
sp = virt_to_page(b);
362-
__SetPageSlab(sp);
362+
folio = virt_to_folio(b);
363+
__folio_set_slab(folio);
364+
sp = folio_slab(folio);
363365

364366
spin_lock_irqsave(&slob_lock, flags);
365367
sp->units = SLOB_UNITS(PAGE_SIZE);
@@ -381,7 +383,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node,
381383
*/
382384
static void slob_free(void *block, int size)
383385
{
384-
struct page *sp;
386+
struct slab *sp;
385387
slob_t *prev, *next, *b = (slob_t *)block;
386388
slobidx_t units;
387389
unsigned long flags;
@@ -391,7 +393,7 @@ static void slob_free(void *block, int size)
391393
return;
392394
BUG_ON(!size);
393395

394-
sp = virt_to_page(block);
396+
sp = virt_to_slab(block);
395397
units = SLOB_UNITS(size);
396398

397399
spin_lock_irqsave(&slob_lock, flags);
@@ -401,8 +403,8 @@ static void slob_free(void *block, int size)
401403
if (slob_page_free(sp))
402404
clear_slob_page_free(sp);
403405
spin_unlock_irqrestore(&slob_lock, flags);
404-
__ClearPageSlab(sp);
405-
page_mapcount_reset(sp);
406+
__folio_clear_slab(slab_folio(sp));
407+
page_mapcount_reset(slab_page(sp));
406408
slob_free_pages(b, 0);
407409
return;
408410
}
@@ -544,24 +546,25 @@ EXPORT_SYMBOL(__kmalloc_node_track_caller);
544546

545547
void kfree(const void *block)
546548
{
547-
struct page *sp;
549+
struct folio *sp;
548550

549551
trace_kfree(_RET_IP_, block);
550552

551553
if (unlikely(ZERO_OR_NULL_PTR(block)))
552554
return;
553555
kmemleak_free(block);
554556

555-
sp = virt_to_page(block);
556-
if (PageSlab(sp)) {
557+
sp = virt_to_folio(block);
558+
if (folio_test_slab(sp)) {
557559
int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
558560
unsigned int *m = (unsigned int *)(block - align);
559561
slob_free(m, *m + align);
560562
} else {
561-
unsigned int order = compound_order(sp);
562-
mod_node_page_state(page_pgdat(sp), NR_SLAB_UNRECLAIMABLE_B,
563+
unsigned int order = folio_order(sp);
564+
565+
mod_node_page_state(folio_pgdat(sp), NR_SLAB_UNRECLAIMABLE_B,
563566
-(PAGE_SIZE << order));
564-
__free_pages(sp, order);
567+
__free_pages(folio_page(sp, 0), order);
565568

566569
}
567570
}

0 commit comments

Comments
 (0)