@@ -3532,7 +3532,7 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
3532
3532
EXPORT_SYMBOL (kmem_cache_free );
3533
3533
3534
3534
struct detached_freelist {
3535
- struct page * page ;
3535
+ struct slab * slab ;
3536
3536
void * tail ;
3537
3537
void * freelist ;
3538
3538
int cnt ;
@@ -3554,8 +3554,8 @@ static inline void free_nonslab_page(struct page *page, void *object)
3554
3554
/*
3555
3555
* This function progressively scans the array with free objects (with
3556
3556
* a limited look ahead) and extract objects belonging to the same
3557
- * page . It builds a detached freelist directly within the given
3558
- * page /objects. This can happen without any need for
3557
+ * slab . It builds a detached freelist directly within the given
3558
+ * slab /objects. This can happen without any need for
3559
3559
* synchronization, because the objects are owned by running process.
3560
3560
* The freelist is build up as a single linked list in the objects.
3561
3561
* The idea is, that this detached freelist can then be bulk
@@ -3570,10 +3570,11 @@ int build_detached_freelist(struct kmem_cache *s, size_t size,
3570
3570
size_t first_skipped_index = 0 ;
3571
3571
int lookahead = 3 ;
3572
3572
void * object ;
3573
- struct page * page ;
3573
+ struct folio * folio ;
3574
+ struct slab * slab ;
3574
3575
3575
3576
/* Always re-init detached_freelist */
3576
- df -> page = NULL ;
3577
+ df -> slab = NULL ;
3577
3578
3578
3579
do {
3579
3580
object = p [-- size ];
@@ -3583,17 +3584,19 @@ int build_detached_freelist(struct kmem_cache *s, size_t size,
3583
3584
if (!object )
3584
3585
return 0 ;
3585
3586
3586
- page = virt_to_head_page (object );
3587
+ folio = virt_to_folio (object );
3587
3588
if (!s ) {
3588
3589
/* Handle kalloc'ed objects */
3589
- if (unlikely (!PageSlab ( page ))) {
3590
- free_nonslab_page (page , object );
3590
+ if (unlikely (!folio_test_slab ( folio ))) {
3591
+ free_nonslab_page (folio_page ( folio , 0 ) , object );
3591
3592
p [size ] = NULL ; /* mark object processed */
3592
3593
return size ;
3593
3594
}
3594
3595
/* Derive kmem_cache from object */
3595
- df -> s = page -> slab_cache ;
3596
+ slab = folio_slab (folio );
3597
+ df -> s = slab -> slab_cache ;
3596
3598
} else {
3599
+ slab = folio_slab (folio );
3597
3600
df -> s = cache_from_obj (s , object ); /* Support for memcg */
3598
3601
}
3599
3602
@@ -3605,7 +3608,7 @@ int build_detached_freelist(struct kmem_cache *s, size_t size,
3605
3608
}
3606
3609
3607
3610
/* Start new detached freelist */
3608
- df -> page = page ;
3611
+ df -> slab = slab ;
3609
3612
set_freepointer (df -> s , object , NULL );
3610
3613
df -> tail = object ;
3611
3614
df -> freelist = object ;
@@ -3617,8 +3620,8 @@ int build_detached_freelist(struct kmem_cache *s, size_t size,
3617
3620
if (!object )
3618
3621
continue ; /* Skip processed objects */
3619
3622
3620
- /* df->page is always set at this point */
3621
- if (df -> page == virt_to_head_page (object )) {
3623
+ /* df->slab is always set at this point */
3624
+ if (df -> slab == virt_to_slab (object )) {
3622
3625
/* Opportunity build freelist */
3623
3626
set_freepointer (df -> s , object , df -> freelist );
3624
3627
df -> freelist = object ;
@@ -3650,10 +3653,10 @@ void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
3650
3653
struct detached_freelist df ;
3651
3654
3652
3655
size = build_detached_freelist (s , size , p , & df );
3653
- if (!df .page )
3656
+ if (!df .slab )
3654
3657
continue ;
3655
3658
3656
- slab_free (df .s , df .page , df .freelist , df .tail , df .cnt , _RET_IP_ );
3659
+ slab_free (df .s , slab_page ( df .slab ) , df .freelist , df .tail , df .cnt , _RET_IP_ );
3657
3660
} while (likely (size ));
3658
3661
}
3659
3662
EXPORT_SYMBOL (kmem_cache_free_bulk );
0 commit comments