Skip to content

Commit 26a4099

Browse files
hygonitehcaster
authored andcommitted
mm/sl[au]b: cleanup kmem_cache_alloc[_node]_trace()
Despite its name, kmem_cache_alloc[_node]_trace() is hook for inlined kmalloc. So rename it to kmalloc[_node]_trace(). Move its implementation to slab_common.c by using __kmem_cache_alloc_node(), but keep CONFIG_TRACING=n varients to save a function call when CONFIG_TRACING=n. Use __assume_kmalloc_alignment for kmalloc[_node]_trace instead of __assume_slab_alignement. Generally kmalloc has larger alignment requirements. Suggested-by: Vlastimil Babka <[email protected]> Signed-off-by: Hyeonggon Yoo <[email protected]> Reviewed-by: Vlastimil Babka <[email protected]> Signed-off-by: Vlastimil Babka <[email protected]>
1 parent b140513 commit 26a4099

File tree

4 files changed

+41
-75
lines changed

4 files changed

+41
-75
lines changed

include/linux/slab.h

Lines changed: 14 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -449,25 +449,26 @@ void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node) __assum
449449
__malloc;
450450

451451
#ifdef CONFIG_TRACING
452-
extern void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t flags, size_t size)
453-
__assume_slab_alignment __alloc_size(3);
454-
455-
extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
456-
int node, size_t size) __assume_slab_alignment
457-
__alloc_size(4);
452+
void *kmalloc_trace(struct kmem_cache *s, gfp_t flags, size_t size)
453+
__assume_kmalloc_alignment __alloc_size(3);
458454

455+
void *kmalloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
456+
int node, size_t size) __assume_kmalloc_alignment
457+
__alloc_size(4);
459458
#else /* CONFIG_TRACING */
460-
static __always_inline __alloc_size(3) void *kmem_cache_alloc_trace(struct kmem_cache *s,
461-
gfp_t flags, size_t size)
459+
/* Save a function call when CONFIG_TRACING=n */
460+
static __always_inline __alloc_size(3)
461+
void *kmalloc_trace(struct kmem_cache *s, gfp_t flags, size_t size)
462462
{
463463
void *ret = kmem_cache_alloc(s, flags);
464464

465465
ret = kasan_kmalloc(s, ret, size, flags);
466466
return ret;
467467
}
468468

469-
static __always_inline void *kmem_cache_alloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
470-
int node, size_t size)
469+
static __always_inline __alloc_size(4)
470+
void *kmalloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
471+
int node, size_t size)
471472
{
472473
void *ret = kmem_cache_alloc_node(s, gfpflags, node);
473474

@@ -550,7 +551,7 @@ static __always_inline __alloc_size(1) void *kmalloc(size_t size, gfp_t flags)
550551
if (!index)
551552
return ZERO_SIZE_PTR;
552553

553-
return kmem_cache_alloc_trace(
554+
return kmalloc_trace(
554555
kmalloc_caches[kmalloc_type(flags)][index],
555556
flags, size);
556557
#endif
@@ -572,9 +573,9 @@ static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t fla
572573
if (!index)
573574
return ZERO_SIZE_PTR;
574575

575-
return kmem_cache_alloc_node_trace(
576+
return kmalloc_node_trace(
576577
kmalloc_caches[kmalloc_type(flags)][index],
577-
flags, node, size);
578+
flags, node, size);
578579
}
579580
return __kmalloc_node(size, flags, node);
580581
}

mm/slab.c

Lines changed: 0 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -3519,22 +3519,6 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
35193519
}
35203520
EXPORT_SYMBOL(kmem_cache_alloc_bulk);
35213521

3522-
#ifdef CONFIG_TRACING
3523-
void *
3524-
kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
3525-
{
3526-
void *ret;
3527-
3528-
ret = slab_alloc(cachep, NULL, flags, size, _RET_IP_);
3529-
3530-
ret = kasan_kmalloc(cachep, ret, size, flags);
3531-
trace_kmalloc(_RET_IP_, ret, cachep,
3532-
size, cachep->size, flags);
3533-
return ret;
3534-
}
3535-
EXPORT_SYMBOL(kmem_cache_alloc_trace);
3536-
#endif
3537-
35383522
/**
35393523
* kmem_cache_alloc_node - Allocate an object on the specified node
35403524
* @cachep: The cache to allocate from.
@@ -3568,25 +3552,6 @@ void *__kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
35683552
orig_size, caller);
35693553
}
35703554

3571-
#ifdef CONFIG_TRACING
3572-
void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
3573-
gfp_t flags,
3574-
int nodeid,
3575-
size_t size)
3576-
{
3577-
void *ret;
3578-
3579-
ret = slab_alloc_node(cachep, NULL, flags, nodeid, size, _RET_IP_);
3580-
3581-
ret = kasan_kmalloc(cachep, ret, size, flags);
3582-
trace_kmalloc_node(_RET_IP_, ret, cachep,
3583-
size, cachep->size,
3584-
flags, nodeid);
3585-
return ret;
3586-
}
3587-
EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
3588-
#endif
3589-
35903555
#ifdef CONFIG_PRINTK
35913556
void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
35923557
{

mm/slab_common.c

Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1000,6 +1000,33 @@ size_t __ksize(const void *object)
10001000
return slab_ksize(folio_slab(folio)->slab_cache);
10011001
}
10021002
EXPORT_SYMBOL(__ksize);
1003+
1004+
#ifdef CONFIG_TRACING
1005+
void *kmalloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
1006+
{
1007+
void *ret = __kmem_cache_alloc_node(s, gfpflags, NUMA_NO_NODE,
1008+
size, _RET_IP_);
1009+
1010+
trace_kmalloc_node(_RET_IP_, ret, s, size, s->size,
1011+
gfpflags, NUMA_NO_NODE);
1012+
1013+
ret = kasan_kmalloc(s, ret, size, gfpflags);
1014+
return ret;
1015+
}
1016+
EXPORT_SYMBOL(kmalloc_trace);
1017+
1018+
void *kmalloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
1019+
int node, size_t size)
1020+
{
1021+
void *ret = __kmem_cache_alloc_node(s, gfpflags, node, size, _RET_IP_);
1022+
1023+
trace_kmalloc_node(_RET_IP_, ret, s, size, s->size, gfpflags, node);
1024+
1025+
ret = kasan_kmalloc(s, ret, size, gfpflags);
1026+
return ret;
1027+
}
1028+
EXPORT_SYMBOL(kmalloc_node_trace);
1029+
#endif /* !CONFIG_TRACING */
10031030
#endif /* !CONFIG_SLOB */
10041031

10051032
gfp_t kmalloc_fix_flags(gfp_t flags)

mm/slub.c

Lines changed: 0 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -3270,17 +3270,6 @@ void *__kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags,
32703270
caller, orig_size);
32713271
}
32723272

3273-
#ifdef CONFIG_TRACING
3274-
void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
3275-
{
3276-
void *ret = slab_alloc(s, NULL, gfpflags, _RET_IP_, size);
3277-
trace_kmalloc(_RET_IP_, ret, s, size, s->size, gfpflags);
3278-
ret = kasan_kmalloc(s, ret, size, gfpflags);
3279-
return ret;
3280-
}
3281-
EXPORT_SYMBOL(kmem_cache_alloc_trace);
3282-
#endif
3283-
32843273
void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
32853274
{
32863275
void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, s->object_size);
@@ -3292,22 +3281,6 @@ void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
32923281
}
32933282
EXPORT_SYMBOL(kmem_cache_alloc_node);
32943283

3295-
#ifdef CONFIG_TRACING
3296-
void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
3297-
gfp_t gfpflags,
3298-
int node, size_t size)
3299-
{
3300-
void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, size);
3301-
3302-
trace_kmalloc_node(_RET_IP_, ret, s,
3303-
size, s->size, gfpflags, node);
3304-
3305-
ret = kasan_kmalloc(s, ret, size, gfpflags);
3306-
return ret;
3307-
}
3308-
EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
3309-
#endif
3310-
33113284
/*
33123285
* Slow path handling. This may still be called frequently since objects
33133286
* have a longer lifetime than the cpu slabs in most processing loads.

0 commit comments

Comments
 (0)