Skip to content

Commit a0a44d9

Browse files
committed
mm, slab: don't wrap internal functions with alloc_hooks()
The functions __kmalloc_noprof(), kmalloc_large_noprof(), kmalloc_trace_noprof() and their _node variants are all internal to the implementations of kmalloc_noprof() and kmalloc_node_noprof() and are only declared in the "public" slab.h and exported so that those implementations can be static inline and distinguish the build-time constant size variants. The only other users for some of the internal functions are slub_kunit and fortify_kunit tests which make very short-lived allocations. Therefore we can stop wrapping them with the alloc_hooks() macro. Instead add a __ prefix to all of them and a comment documenting these as internal. Also rename __kmalloc_trace() to __kmalloc_cache() which is more descriptive - it is a variant of __kmalloc() where the exact kmalloc cache has been already determined. The usage in fortify_kunit can be removed completely, as the internal functions should be tested already through kmalloc() tests in the test variant that passes non-constant allocation size. Reported-by: Kent Overstreet <[email protected]> Cc: Suren Baghdasaryan <[email protected]> Cc: Kees Cook <[email protected]> Reviewed-by: Kent Overstreet <[email protected]> Acked-by: David Rientjes <[email protected]> Signed-off-by: Vlastimil Babka <[email protected]>
1 parent 1613e60 commit a0a44d9

File tree

4 files changed

+38
-43
lines changed

4 files changed

+38
-43
lines changed

include/linux/slab.h

Lines changed: 24 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -528,9 +528,6 @@ static_assert(PAGE_SHIFT <= 20);
528528

529529
#include <linux/alloc_tag.h>
530530

531-
void *__kmalloc_noprof(size_t size, gfp_t flags) __assume_kmalloc_alignment __alloc_size(1);
532-
#define __kmalloc(...) alloc_hooks(__kmalloc_noprof(__VA_ARGS__))
533-
534531
/**
535532
* kmem_cache_alloc - Allocate an object
536533
* @cachep: The cache to allocate from.
@@ -568,31 +565,34 @@ static __always_inline void kfree_bulk(size_t size, void **p)
568565
kmem_cache_free_bulk(NULL, size, p);
569566
}
570567

571-
void *__kmalloc_node_noprof(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment
572-
__alloc_size(1);
573-
#define __kmalloc_node(...) alloc_hooks(__kmalloc_node_noprof(__VA_ARGS__))
574-
575568
void *kmem_cache_alloc_node_noprof(struct kmem_cache *s, gfp_t flags,
576569
int node) __assume_slab_alignment __malloc;
577570
#define kmem_cache_alloc_node(...) alloc_hooks(kmem_cache_alloc_node_noprof(__VA_ARGS__))
578571

579-
void *kmalloc_trace_noprof(struct kmem_cache *s, gfp_t flags, size_t size)
580-
__assume_kmalloc_alignment __alloc_size(3);
572+
/*
573+
* The following functions are not to be used directly and are intended only
574+
* for internal use from kmalloc() and kmalloc_node()
575+
* with the exception of kunit tests
576+
*/
577+
578+
void *__kmalloc_noprof(size_t size, gfp_t flags)
579+
__assume_kmalloc_alignment __alloc_size(1);
580+
581+
void *__kmalloc_node_noprof(size_t size, gfp_t flags, int node)
582+
__assume_kmalloc_alignment __alloc_size(1);
581583

582-
void *kmalloc_node_trace_noprof(struct kmem_cache *s, gfp_t gfpflags,
583-
int node, size_t size) __assume_kmalloc_alignment
584-
__alloc_size(4);
585-
#define kmalloc_trace(...) alloc_hooks(kmalloc_trace_noprof(__VA_ARGS__))
584+
void *__kmalloc_cache_noprof(struct kmem_cache *s, gfp_t flags, size_t size)
585+
__assume_kmalloc_alignment __alloc_size(3);
586586

587-
#define kmalloc_node_trace(...) alloc_hooks(kmalloc_node_trace_noprof(__VA_ARGS__))
587+
void *__kmalloc_cache_node_noprof(struct kmem_cache *s, gfp_t gfpflags,
588+
int node, size_t size)
589+
__assume_kmalloc_alignment __alloc_size(4);
588590

589-
void *kmalloc_large_noprof(size_t size, gfp_t flags) __assume_page_alignment
590-
__alloc_size(1);
591-
#define kmalloc_large(...) alloc_hooks(kmalloc_large_noprof(__VA_ARGS__))
591+
void *__kmalloc_large_noprof(size_t size, gfp_t flags)
592+
__assume_page_alignment __alloc_size(1);
592593

593-
void *kmalloc_large_node_noprof(size_t size, gfp_t flags, int node) __assume_page_alignment
594-
__alloc_size(1);
595-
#define kmalloc_large_node(...) alloc_hooks(kmalloc_large_node_noprof(__VA_ARGS__))
594+
void *__kmalloc_large_node_noprof(size_t size, gfp_t flags, int node)
595+
__assume_page_alignment __alloc_size(1);
596596

597597
/**
598598
* kmalloc - allocate kernel memory
@@ -654,10 +654,10 @@ static __always_inline __alloc_size(1) void *kmalloc_noprof(size_t size, gfp_t f
654654
unsigned int index;
655655

656656
if (size > KMALLOC_MAX_CACHE_SIZE)
657-
return kmalloc_large_noprof(size, flags);
657+
return __kmalloc_large_noprof(size, flags);
658658

659659
index = kmalloc_index(size);
660-
return kmalloc_trace_noprof(
660+
return __kmalloc_cache_noprof(
661661
kmalloc_caches[kmalloc_type(flags, _RET_IP_)][index],
662662
flags, size);
663663
}
@@ -671,10 +671,10 @@ static __always_inline __alloc_size(1) void *kmalloc_node_noprof(size_t size, gf
671671
unsigned int index;
672672

673673
if (size > KMALLOC_MAX_CACHE_SIZE)
674-
return kmalloc_large_node_noprof(size, flags, node);
674+
return __kmalloc_large_node_noprof(size, flags, node);
675675

676676
index = kmalloc_index(size);
677-
return kmalloc_node_trace_noprof(
677+
return __kmalloc_cache_node_noprof(
678678
kmalloc_caches[kmalloc_type(flags, _RET_IP_)][index],
679679
flags, node, size);
680680
}

lib/fortify_kunit.c

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -233,11 +233,6 @@ static void fortify_test_alloc_size_##allocator##_dynamic(struct kunit *test) \
233233
kfree(p)); \
234234
checker(expected_size, \
235235
kmalloc_array_node(alloc_size, 1, gfp, NUMA_NO_NODE), \
236-
kfree(p)); \
237-
checker(expected_size, __kmalloc(alloc_size, gfp), \
238-
kfree(p)); \
239-
checker(expected_size, \
240-
__kmalloc_node(alloc_size, gfp, NUMA_NO_NODE), \
241236
kfree(p)); \
242237
\
243238
orig = kmalloc(alloc_size, gfp); \

lib/slub_kunit.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -140,7 +140,7 @@ static void test_kmalloc_redzone_access(struct kunit *test)
140140
{
141141
struct kmem_cache *s = test_kmem_cache_create("TestSlub_RZ_kmalloc", 32,
142142
SLAB_KMALLOC|SLAB_STORE_USER|SLAB_RED_ZONE);
143-
u8 *p = kmalloc_trace(s, GFP_KERNEL, 18);
143+
u8 *p = __kmalloc_cache_noprof(s, GFP_KERNEL, 18);
144144

145145
kasan_disable_current();
146146

mm/slub.c

Lines changed: 13 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -4053,7 +4053,7 @@ EXPORT_SYMBOL(kmem_cache_alloc_node_noprof);
40534053
* directly to the page allocator. We use __GFP_COMP, because we will need to
40544054
* know the allocation order to free the pages properly in kfree.
40554055
*/
4056-
static void *__kmalloc_large_node(size_t size, gfp_t flags, int node)
4056+
static void *___kmalloc_large_node(size_t size, gfp_t flags, int node)
40574057
{
40584058
struct folio *folio;
40594059
void *ptr = NULL;
@@ -4078,25 +4078,25 @@ static void *__kmalloc_large_node(size_t size, gfp_t flags, int node)
40784078
return ptr;
40794079
}
40804080

4081-
void *kmalloc_large_noprof(size_t size, gfp_t flags)
4081+
void *__kmalloc_large_noprof(size_t size, gfp_t flags)
40824082
{
4083-
void *ret = __kmalloc_large_node(size, flags, NUMA_NO_NODE);
4083+
void *ret = ___kmalloc_large_node(size, flags, NUMA_NO_NODE);
40844084

40854085
trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << get_order(size),
40864086
flags, NUMA_NO_NODE);
40874087
return ret;
40884088
}
4089-
EXPORT_SYMBOL(kmalloc_large_noprof);
4089+
EXPORT_SYMBOL(__kmalloc_large_noprof);
40904090

4091-
void *kmalloc_large_node_noprof(size_t size, gfp_t flags, int node)
4091+
void *__kmalloc_large_node_noprof(size_t size, gfp_t flags, int node)
40924092
{
4093-
void *ret = __kmalloc_large_node(size, flags, node);
4093+
void *ret = ___kmalloc_large_node(size, flags, node);
40944094

40954095
trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << get_order(size),
40964096
flags, node);
40974097
return ret;
40984098
}
4099-
EXPORT_SYMBOL(kmalloc_large_node_noprof);
4099+
EXPORT_SYMBOL(__kmalloc_large_node_noprof);
41004100

41014101
static __always_inline
41024102
void *__do_kmalloc_node(size_t size, gfp_t flags, int node,
@@ -4106,7 +4106,7 @@ void *__do_kmalloc_node(size_t size, gfp_t flags, int node,
41064106
void *ret;
41074107

41084108
if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
4109-
ret = __kmalloc_large_node(size, flags, node);
4109+
ret = __kmalloc_large_node_noprof(size, flags, node);
41104110
trace_kmalloc(caller, ret, size,
41114111
PAGE_SIZE << get_order(size), flags, node);
41124112
return ret;
@@ -4142,7 +4142,7 @@ void *kmalloc_node_track_caller_noprof(size_t size, gfp_t flags,
41424142
}
41434143
EXPORT_SYMBOL(kmalloc_node_track_caller_noprof);
41444144

4145-
void *kmalloc_trace_noprof(struct kmem_cache *s, gfp_t gfpflags, size_t size)
4145+
void *__kmalloc_cache_noprof(struct kmem_cache *s, gfp_t gfpflags, size_t size)
41464146
{
41474147
void *ret = slab_alloc_node(s, NULL, gfpflags, NUMA_NO_NODE,
41484148
_RET_IP_, size);
@@ -4152,10 +4152,10 @@ void *kmalloc_trace_noprof(struct kmem_cache *s, gfp_t gfpflags, size_t size)
41524152
ret = kasan_kmalloc(s, ret, size, gfpflags);
41534153
return ret;
41544154
}
4155-
EXPORT_SYMBOL(kmalloc_trace_noprof);
4155+
EXPORT_SYMBOL(__kmalloc_cache_noprof);
41564156

4157-
void *kmalloc_node_trace_noprof(struct kmem_cache *s, gfp_t gfpflags,
4158-
int node, size_t size)
4157+
void *__kmalloc_cache_node_noprof(struct kmem_cache *s, gfp_t gfpflags,
4158+
int node, size_t size)
41594159
{
41604160
void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, size);
41614161

@@ -4164,7 +4164,7 @@ void *kmalloc_node_trace_noprof(struct kmem_cache *s, gfp_t gfpflags,
41644164
ret = kasan_kmalloc(s, ret, size, gfpflags);
41654165
return ret;
41664166
}
4167-
EXPORT_SYMBOL(kmalloc_node_trace_noprof);
4167+
EXPORT_SYMBOL(__kmalloc_cache_node_noprof);
41684168

41694169
static noinline void free_to_partial_list(
41704170
struct kmem_cache *s, struct slab *slab,

0 commit comments

Comments
 (0)