@@ -426,8 +426,9 @@ enum kmalloc_cache_type {
426
426
NR_KMALLOC_TYPES
427
427
};
428
428
429
- extern struct kmem_cache *
430
- kmalloc_caches [NR_KMALLOC_TYPES ][KMALLOC_SHIFT_HIGH + 1 ];
429
+ typedef struct kmem_cache * kmem_buckets [KMALLOC_SHIFT_HIGH + 1 ];
430
+
431
+ extern kmem_buckets kmalloc_caches [NR_KMALLOC_TYPES ];
431
432
432
433
/*
433
434
* Define gfp bits that should not be set for KMALLOC_NORMAL.
@@ -528,9 +529,6 @@ static_assert(PAGE_SHIFT <= 20);
528
529
529
530
#include <linux/alloc_tag.h>
530
531
531
- void * __kmalloc_noprof (size_t size , gfp_t flags ) __assume_kmalloc_alignment __alloc_size (1 );
532
- #define __kmalloc (...) alloc_hooks(__kmalloc_noprof(__VA_ARGS__))
533
-
534
532
/**
535
533
* kmem_cache_alloc - Allocate an object
536
534
* @cachep: The cache to allocate from.
@@ -551,6 +549,10 @@ void *kmem_cache_alloc_lru_noprof(struct kmem_cache *s, struct list_lru *lru,
551
549
552
550
void kmem_cache_free (struct kmem_cache * s , void * objp );
553
551
552
+ kmem_buckets * kmem_buckets_create (const char * name , slab_flags_t flags ,
553
+ unsigned int useroffset , unsigned int usersize ,
554
+ void (* ctor )(void * ));
555
+
554
556
/*
555
557
* Bulk allocation and freeing operations. These are accelerated in an
556
558
* allocator specific way to avoid taking locks repeatedly or building
@@ -568,31 +570,49 @@ static __always_inline void kfree_bulk(size_t size, void **p)
568
570
kmem_cache_free_bulk (NULL , size , p );
569
571
}
570
572
571
- void * __kmalloc_node_noprof (size_t size , gfp_t flags , int node ) __assume_kmalloc_alignment
572
- __alloc_size (1 );
573
- #define __kmalloc_node (...) alloc_hooks(__kmalloc_node_noprof(__VA_ARGS__))
574
-
575
573
void * kmem_cache_alloc_node_noprof (struct kmem_cache * s , gfp_t flags ,
576
574
int node ) __assume_slab_alignment __malloc ;
577
575
#define kmem_cache_alloc_node (...) alloc_hooks(kmem_cache_alloc_node_noprof(__VA_ARGS__))
578
576
579
- void * kmalloc_trace_noprof (struct kmem_cache * s , gfp_t flags , size_t size )
580
- __assume_kmalloc_alignment __alloc_size (3 );
577
+ /*
578
+ * These macros allow declaring a kmem_buckets * parameter alongside size, which
579
+ * can be compiled out with CONFIG_SLAB_BUCKETS=n so that a large number of call
580
+ * sites don't have to pass NULL.
581
+ */
582
+ #ifdef CONFIG_SLAB_BUCKETS
583
+ #define DECL_BUCKET_PARAMS (_size , _b ) size_t (_size), kmem_buckets *(_b)
584
+ #define PASS_BUCKET_PARAMS (_size , _b ) (_size), (_b)
585
+ #define PASS_BUCKET_PARAM (_b ) (_b)
586
+ #else
587
+ #define DECL_BUCKET_PARAMS (_size , _b ) size_t (_size)
588
+ #define PASS_BUCKET_PARAMS (_size , _b ) (_size)
589
+ #define PASS_BUCKET_PARAM (_b ) NULL
590
+ #endif
591
+
592
+ /*
593
+ * The following functions are not to be used directly and are intended only
594
+ * for internal use from kmalloc() and kmalloc_node()
595
+ * with the exception of kunit tests
596
+ */
597
+
598
+ void * __kmalloc_noprof (size_t size , gfp_t flags )
599
+ __assume_kmalloc_alignment __alloc_size (1 );
581
600
582
- void * kmalloc_node_trace_noprof (struct kmem_cache * s , gfp_t gfpflags ,
583
- int node , size_t size ) __assume_kmalloc_alignment
584
- __alloc_size (4 );
585
- #define kmalloc_trace (...) alloc_hooks(kmalloc_trace_noprof(__VA_ARGS__))
601
+ void * __kmalloc_node_noprof (DECL_BUCKET_PARAMS (size , b ), gfp_t flags , int node )
602
+ __assume_kmalloc_alignment __alloc_size (1 );
586
603
587
- #define kmalloc_node_trace (...) alloc_hooks(kmalloc_node_trace_noprof(__VA_ARGS__))
604
+ void * __kmalloc_cache_noprof (struct kmem_cache * s , gfp_t flags , size_t size )
605
+ __assume_kmalloc_alignment __alloc_size (3 );
588
606
589
- void * kmalloc_large_noprof ( size_t size , gfp_t flags ) __assume_page_alignment
590
- __alloc_size ( 1 );
591
- #define kmalloc_large (...) alloc_hooks(kmalloc_large_noprof(__VA_ARGS__))
607
+ void * __kmalloc_cache_node_noprof ( struct kmem_cache * s , gfp_t gfpflags ,
608
+ int node , size_t size )
609
+ __assume_kmalloc_alignment __alloc_size ( 4 );
592
610
593
- void * kmalloc_large_node_noprof (size_t size , gfp_t flags , int node ) __assume_page_alignment
594
- __alloc_size (1 );
595
- #define kmalloc_large_node (...) alloc_hooks(kmalloc_large_node_noprof(__VA_ARGS__))
611
+ void * __kmalloc_large_noprof (size_t size , gfp_t flags )
612
+ __assume_page_alignment __alloc_size (1 );
613
+
614
+ void * __kmalloc_large_node_noprof (size_t size , gfp_t flags , int node )
615
+ __assume_page_alignment __alloc_size (1 );
596
616
597
617
/**
598
618
* kmalloc - allocate kernel memory
@@ -604,7 +624,8 @@ void *kmalloc_large_node_noprof(size_t size, gfp_t flags, int node) __assume_pag
604
624
*
605
625
* The allocated object address is aligned to at least ARCH_KMALLOC_MINALIGN
606
626
* bytes. For @size of power of two bytes, the alignment is also guaranteed
607
- * to be at least to the size.
627
+ * to be at least to the size. For other sizes, the alignment is guaranteed to
628
+ * be at least the largest power-of-two divisor of @size.
608
629
*
609
630
* The @flags argument may be one of the GFP flags defined at
610
631
* include/linux/gfp_types.h and described at
@@ -654,31 +675,37 @@ static __always_inline __alloc_size(1) void *kmalloc_noprof(size_t size, gfp_t f
654
675
unsigned int index ;
655
676
656
677
if (size > KMALLOC_MAX_CACHE_SIZE )
657
- return kmalloc_large_noprof (size , flags );
678
+ return __kmalloc_large_noprof (size , flags );
658
679
659
680
index = kmalloc_index (size );
660
- return kmalloc_trace_noprof (
681
+ return __kmalloc_cache_noprof (
661
682
kmalloc_caches [kmalloc_type (flags , _RET_IP_ )][index ],
662
683
flags , size );
663
684
}
664
685
return __kmalloc_noprof (size , flags );
665
686
}
666
687
#define kmalloc (...) alloc_hooks(kmalloc_noprof(__VA_ARGS__))
667
688
689
+ #define kmem_buckets_alloc (_b , _size , _flags ) \
690
+ alloc_hooks(__kmalloc_node_noprof(PASS_BUCKET_PARAMS(_size, _b), _flags, NUMA_NO_NODE))
691
+
692
+ #define kmem_buckets_alloc_track_caller (_b , _size , _flags ) \
693
+ alloc_hooks(__kmalloc_node_track_caller_noprof(PASS_BUCKET_PARAMS(_size, _b), _flags, NUMA_NO_NODE, _RET_IP_))
694
+
668
695
static __always_inline __alloc_size (1 ) void * kmalloc_node_noprof (size_t size , gfp_t flags , int node )
669
696
{
670
697
if (__builtin_constant_p (size ) && size ) {
671
698
unsigned int index ;
672
699
673
700
if (size > KMALLOC_MAX_CACHE_SIZE )
674
- return kmalloc_large_node_noprof (size , flags , node );
701
+ return __kmalloc_large_node_noprof (size , flags , node );
675
702
676
703
index = kmalloc_index (size );
677
- return kmalloc_node_trace_noprof (
704
+ return __kmalloc_cache_node_noprof (
678
705
kmalloc_caches [kmalloc_type (flags , _RET_IP_ )][index ],
679
706
flags , node , size );
680
707
}
681
- return __kmalloc_node_noprof (size , flags , node );
708
+ return __kmalloc_node_noprof (PASS_BUCKET_PARAMS ( size , NULL ) , flags , node );
682
709
}
683
710
#define kmalloc_node (...) alloc_hooks(kmalloc_node_noprof(__VA_ARGS__))
684
711
@@ -729,8 +756,10 @@ static inline __realloc_size(2, 3) void * __must_check krealloc_array_noprof(voi
729
756
*/
730
757
#define kcalloc (n , size , flags ) kmalloc_array(n, size, (flags) | __GFP_ZERO)
731
758
732
- void * kmalloc_node_track_caller_noprof (size_t size , gfp_t flags , int node ,
733
- unsigned long caller ) __alloc_size (1 );
759
+ void * __kmalloc_node_track_caller_noprof (DECL_BUCKET_PARAMS (size , b ), gfp_t flags , int node ,
760
+ unsigned long caller ) __alloc_size (1 );
761
+ #define kmalloc_node_track_caller_noprof (size , flags , node , caller ) \
762
+ __kmalloc_node_track_caller_noprof(PASS_BUCKET_PARAMS(size, NULL), flags, node, caller)
734
763
#define kmalloc_node_track_caller (...) \
735
764
alloc_hooks(kmalloc_node_track_caller_noprof(__VA_ARGS__, _RET_IP_))
736
765
@@ -756,7 +785,7 @@ static inline __alloc_size(1, 2) void *kmalloc_array_node_noprof(size_t n, size_
756
785
return NULL ;
757
786
if (__builtin_constant_p (n ) && __builtin_constant_p (size ))
758
787
return kmalloc_node_noprof (bytes , flags , node );
759
- return __kmalloc_node_noprof (bytes , flags , node );
788
+ return __kmalloc_node_noprof (PASS_BUCKET_PARAMS ( bytes , NULL ) , flags , node );
760
789
}
761
790
#define kmalloc_array_node (...) alloc_hooks(kmalloc_array_node_noprof(__VA_ARGS__))
762
791
@@ -780,14 +809,18 @@ static inline __alloc_size(1) void *kzalloc_noprof(size_t size, gfp_t flags)
780
809
#define kzalloc (...) alloc_hooks (kzalloc_noprof (__VA_ARGS__ ))
781
810
#define kzalloc_node (_size , _flags , _node ) kmalloc_node (_size , (_flags )|__GFP_ZERO , _node )
782
811
783
- extern void * kvmalloc_node_noprof (size_t size , gfp_t flags , int node ) __alloc_size (1 );
812
+ void * __kvmalloc_node_noprof (DECL_BUCKET_PARAMS (size , b ), gfp_t flags , int node ) __alloc_size (1 );
813
+ #define kvmalloc_node_noprof (size , flags , node ) \
814
+ __kvmalloc_node_noprof(PASS_BUCKET_PARAMS(size, NULL), flags, node)
784
815
#define kvmalloc_node (...) alloc_hooks(kvmalloc_node_noprof(__VA_ARGS__))
785
816
786
817
#define kvmalloc (_size , _flags ) kvmalloc_node(_size, _flags, NUMA_NO_NODE)
787
818
#define kvmalloc_noprof (_size , _flags ) kvmalloc_node_noprof(_size, _flags, NUMA_NO_NODE)
788
819
#define kvzalloc (_size , _flags ) kvmalloc(_size, (_flags)|__GFP_ZERO)
789
820
790
821
#define kvzalloc_node (_size , _flags , _node ) kvmalloc_node(_size, (_flags)|__GFP_ZERO, _node)
822
+ #define kmem_buckets_valloc (_b , _size , _flags ) \
823
+ alloc_hooks(__kvmalloc_node_noprof(PASS_BUCKET_PARAMS(_size, _b), _flags, NUMA_NO_NODE))
791
824
792
825
static inline __alloc_size (1 , 2 ) void *
793
826
kvmalloc_array_node_noprof (size_t n , size_t size , gfp_t flags , int node )
0 commit comments