30
30
* If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
31
31
* alloc_pages() directly, allocating compound pages so the page order
32
32
* does not have to be separately tracked.
33
- * These objects are detected in kfree() because PageSlab ()
33
+ * These objects are detected in kfree() because folio_test_slab ()
34
34
* is false for them.
35
35
*
36
36
* SLAB is emulated on top of SLOB by simply calling constructors and
@@ -105,21 +105,21 @@ static LIST_HEAD(free_slob_large);
105
105
/*
106
106
* slob_page_free: true for pages on free_slob_pages list.
107
107
*/
108
- static inline int slob_page_free (struct page * sp )
108
+ static inline int slob_page_free (struct slab * slab )
109
109
{
110
- return PageSlobFree (sp );
110
+ return PageSlobFree (slab_page ( slab ) );
111
111
}
112
112
113
- static void set_slob_page_free (struct page * sp , struct list_head * list )
113
+ static void set_slob_page_free (struct slab * slab , struct list_head * list )
114
114
{
115
- list_add (& sp -> slab_list , list );
116
- __SetPageSlobFree (sp );
115
+ list_add (& slab -> slab_list , list );
116
+ __SetPageSlobFree (slab_page ( slab ) );
117
117
}
118
118
119
- static inline void clear_slob_page_free (struct page * sp )
119
+ static inline void clear_slob_page_free (struct slab * slab )
120
120
{
121
- list_del (& sp -> slab_list );
122
- __ClearPageSlobFree (sp );
121
+ list_del (& slab -> slab_list );
122
+ __ClearPageSlobFree (slab_page ( slab ) );
123
123
}
124
124
125
125
#define SLOB_UNIT sizeof(slob_t)
@@ -234,7 +234,7 @@ static void slob_free_pages(void *b, int order)
234
234
* freelist, in this case @page_removed_from_list will be set to
235
235
* true (set to false otherwise).
236
236
*/
237
- static void * slob_page_alloc (struct page * sp , size_t size , int align ,
237
+ static void * slob_page_alloc (struct slab * sp , size_t size , int align ,
238
238
int align_offset , bool * page_removed_from_list )
239
239
{
240
240
slob_t * prev , * cur , * aligned = NULL ;
@@ -301,7 +301,8 @@ static void *slob_page_alloc(struct page *sp, size_t size, int align,
301
301
static void * slob_alloc (size_t size , gfp_t gfp , int align , int node ,
302
302
int align_offset )
303
303
{
304
- struct page * sp ;
304
+ struct folio * folio ;
305
+ struct slab * sp ;
305
306
struct list_head * slob_list ;
306
307
slob_t * b = NULL ;
307
308
unsigned long flags ;
@@ -323,7 +324,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node,
323
324
* If there's a node specification, search for a partial
324
325
* page with a matching node id in the freelist.
325
326
*/
326
- if (node != NUMA_NO_NODE && page_to_nid (sp ) != node )
327
+ if (node != NUMA_NO_NODE && slab_nid (sp ) != node )
327
328
continue ;
328
329
#endif
329
330
/* Enough room on this page? */
@@ -358,8 +359,9 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node,
358
359
b = slob_new_pages (gfp & ~__GFP_ZERO , 0 , node );
359
360
if (!b )
360
361
return NULL ;
361
- sp = virt_to_page (b );
362
- __SetPageSlab (sp );
362
+ folio = virt_to_folio (b );
363
+ __folio_set_slab (folio );
364
+ sp = folio_slab (folio );
363
365
364
366
spin_lock_irqsave (& slob_lock , flags );
365
367
sp -> units = SLOB_UNITS (PAGE_SIZE );
@@ -381,7 +383,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node,
381
383
*/
382
384
static void slob_free (void * block , int size )
383
385
{
384
- struct page * sp ;
386
+ struct slab * sp ;
385
387
slob_t * prev , * next , * b = (slob_t * )block ;
386
388
slobidx_t units ;
387
389
unsigned long flags ;
@@ -391,7 +393,7 @@ static void slob_free(void *block, int size)
391
393
return ;
392
394
BUG_ON (!size );
393
395
394
- sp = virt_to_page (block );
396
+ sp = virt_to_slab (block );
395
397
units = SLOB_UNITS (size );
396
398
397
399
spin_lock_irqsave (& slob_lock , flags );
@@ -401,8 +403,8 @@ static void slob_free(void *block, int size)
401
403
if (slob_page_free (sp ))
402
404
clear_slob_page_free (sp );
403
405
spin_unlock_irqrestore (& slob_lock , flags );
404
- __ClearPageSlab ( sp );
405
- page_mapcount_reset (sp );
406
+ __folio_clear_slab ( slab_folio ( sp ) );
407
+ page_mapcount_reset (slab_page ( sp ) );
406
408
slob_free_pages (b , 0 );
407
409
return ;
408
410
}
@@ -544,24 +546,25 @@ EXPORT_SYMBOL(__kmalloc_node_track_caller);
544
546
545
547
void kfree (const void * block )
546
548
{
547
- struct page * sp ;
549
+ struct folio * sp ;
548
550
549
551
trace_kfree (_RET_IP_ , block );
550
552
551
553
if (unlikely (ZERO_OR_NULL_PTR (block )))
552
554
return ;
553
555
kmemleak_free (block );
554
556
555
- sp = virt_to_page (block );
556
- if (PageSlab (sp )) {
557
+ sp = virt_to_folio (block );
558
+ if (folio_test_slab (sp )) {
557
559
int align = max_t (size_t , ARCH_KMALLOC_MINALIGN , ARCH_SLAB_MINALIGN );
558
560
unsigned int * m = (unsigned int * )(block - align );
559
561
slob_free (m , * m + align );
560
562
} else {
561
- unsigned int order = compound_order (sp );
562
- mod_node_page_state (page_pgdat (sp ), NR_SLAB_UNRECLAIMABLE_B ,
563
+ unsigned int order = folio_order (sp );
564
+
565
+ mod_node_page_state (folio_pgdat (sp ), NR_SLAB_UNRECLAIMABLE_B ,
563
566
- (PAGE_SIZE << order ));
564
- __free_pages (sp , order );
567
+ __free_pages (folio_page ( sp , 0 ) , order );
565
568
566
569
}
567
570
}
0 commit comments