Skip to content

Commit 3662c13

Browse files
committed
Merge branch 'slab/for-6.1/common_kmalloc' into slab/for-next
The "common kmalloc v4" series [1] by Hyeonggon Yoo. - Improves the mm/slab_common.c wrappers to allow deleting duplicated code between SLAB and SLUB. - Large kmalloc() allocations in SLAB are passed to page allocator like in SLUB, reducing number of kmalloc caches. - Removes the {kmem_cache_alloc,kmalloc}_node variants of tracepoints, node id parameter added to non-_node variants. - 8 files changed, 341 insertions(+), 651 deletions(-) [1] https://lore.kernel.org/all/[email protected]/ -- Merge resolves trivial conflict in mm/slub.c with commit 5373b8a ("kasan: call kasan_malloc() from __kmalloc_*track_caller()")
2 parents 0467ca3 + d5eff73 commit 3662c13

File tree

8 files changed

+341
-655
lines changed

8 files changed

+341
-655
lines changed

include/linux/slab.h

Lines changed: 50 additions & 94 deletions
Original file line numberDiff line numberDiff line change
@@ -187,7 +187,6 @@ int kmem_cache_shrink(struct kmem_cache *s);
187187
void * __must_check krealloc(const void *objp, size_t new_size, gfp_t flags) __alloc_size(2);
188188
void kfree(const void *objp);
189189
void kfree_sensitive(const void *objp);
190-
size_t __ksize(const void *objp);
191190
size_t ksize(const void *objp);
192191
#ifdef CONFIG_PRINTK
193192
bool kmem_valid_obj(void *object);
@@ -243,27 +242,17 @@ static inline unsigned int arch_slab_minalign(void)
243242

244243
#ifdef CONFIG_SLAB
245244
/*
246-
* The largest kmalloc size supported by the SLAB allocators is
247-
* 32 megabyte (2^25) or the maximum allocatable page order if that is
248-
* less than 32 MB.
249-
*
250-
* WARNING: Its not easy to increase this value since the allocators have
251-
* to do various tricks to work around compiler limitations in order to
252-
* ensure proper constant folding.
245+
* SLAB and SLUB directly allocates requests fitting in to an order-1 page
246+
* (PAGE_SIZE*2). Larger requests are passed to the page allocator.
253247
*/
254-
#define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \
255-
(MAX_ORDER + PAGE_SHIFT - 1) : 25)
256-
#define KMALLOC_SHIFT_MAX KMALLOC_SHIFT_HIGH
248+
#define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1)
249+
#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1)
257250
#ifndef KMALLOC_SHIFT_LOW
258251
#define KMALLOC_SHIFT_LOW 5
259252
#endif
260253
#endif
261254

262255
#ifdef CONFIG_SLUB
263-
/*
264-
* SLUB directly allocates requests fitting in to an order-1 page
265-
* (PAGE_SIZE*2). Larger requests are passed to the page allocator.
266-
*/
267256
#define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1)
268257
#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1)
269258
#ifndef KMALLOC_SHIFT_LOW
@@ -415,10 +404,6 @@ static __always_inline unsigned int __kmalloc_index(size_t size,
415404
if (size <= 512 * 1024) return 19;
416405
if (size <= 1024 * 1024) return 20;
417406
if (size <= 2 * 1024 * 1024) return 21;
418-
if (size <= 4 * 1024 * 1024) return 22;
419-
if (size <= 8 * 1024 * 1024) return 23;
420-
if (size <= 16 * 1024 * 1024) return 24;
421-
if (size <= 32 * 1024 * 1024) return 25;
422407

423408
if (!IS_ENABLED(CONFIG_PROFILE_ALL_BRANCHES) && size_is_constant)
424409
BUILD_BUG_ON_MSG(1, "unexpected size in kmalloc_index()");
@@ -428,6 +413,7 @@ static __always_inline unsigned int __kmalloc_index(size_t size,
428413
/* Will never be reached. Needed because the compiler may complain */
429414
return -1;
430415
}
416+
static_assert(PAGE_SHIFT <= 20);
431417
#define kmalloc_index(s) __kmalloc_index(s, true)
432418
#endif /* !CONFIG_SLOB */
433419

@@ -456,51 +442,32 @@ static __always_inline void kfree_bulk(size_t size, void **p)
456442
kmem_cache_free_bulk(NULL, size, p);
457443
}
458444

459-
#ifdef CONFIG_NUMA
460445
void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment
461446
__alloc_size(1);
462447
void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node) __assume_slab_alignment
463448
__malloc;
464-
#else
465-
static __always_inline __alloc_size(1) void *__kmalloc_node(size_t size, gfp_t flags, int node)
466-
{
467-
return __kmalloc(size, flags);
468-
}
469-
470-
static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node)
471-
{
472-
return kmem_cache_alloc(s, flags);
473-
}
474-
#endif
475449

476450
#ifdef CONFIG_TRACING
477-
extern void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t flags, size_t size)
478-
__assume_slab_alignment __alloc_size(3);
479-
480-
#ifdef CONFIG_NUMA
481-
extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
482-
int node, size_t size) __assume_slab_alignment
483-
__alloc_size(4);
484-
#else
485-
static __always_inline __alloc_size(4) void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
486-
gfp_t gfpflags, int node, size_t size)
487-
{
488-
return kmem_cache_alloc_trace(s, gfpflags, size);
489-
}
490-
#endif /* CONFIG_NUMA */
451+
void *kmalloc_trace(struct kmem_cache *s, gfp_t flags, size_t size)
452+
__assume_kmalloc_alignment __alloc_size(3);
491453

454+
void *kmalloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
455+
int node, size_t size) __assume_kmalloc_alignment
456+
__alloc_size(4);
492457
#else /* CONFIG_TRACING */
493-
static __always_inline __alloc_size(3) void *kmem_cache_alloc_trace(struct kmem_cache *s,
494-
gfp_t flags, size_t size)
458+
/* Save a function call when CONFIG_TRACING=n */
459+
static __always_inline __alloc_size(3)
460+
void *kmalloc_trace(struct kmem_cache *s, gfp_t flags, size_t size)
495461
{
496462
void *ret = kmem_cache_alloc(s, flags);
497463

498464
ret = kasan_kmalloc(s, ret, size, flags);
499465
return ret;
500466
}
501467

502-
static __always_inline void *kmem_cache_alloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
503-
int node, size_t size)
468+
static __always_inline __alloc_size(4)
469+
void *kmalloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
470+
int node, size_t size)
504471
{
505472
void *ret = kmem_cache_alloc_node(s, gfpflags, node);
506473

@@ -509,25 +476,11 @@ static __always_inline void *kmem_cache_alloc_node_trace(struct kmem_cache *s, g
509476
}
510477
#endif /* CONFIG_TRACING */
511478

512-
extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment
513-
__alloc_size(1);
514-
515-
#ifdef CONFIG_TRACING
516-
extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
517-
__assume_page_alignment __alloc_size(1);
518-
#else
519-
static __always_inline __alloc_size(1) void *kmalloc_order_trace(size_t size, gfp_t flags,
520-
unsigned int order)
521-
{
522-
return kmalloc_order(size, flags, order);
523-
}
524-
#endif
479+
void *kmalloc_large(size_t size, gfp_t flags) __assume_page_alignment
480+
__alloc_size(1);
525481

526-
static __always_inline __alloc_size(1) void *kmalloc_large(size_t size, gfp_t flags)
527-
{
528-
unsigned int order = get_order(size);
529-
return kmalloc_order_trace(size, flags, order);
530-
}
482+
void *kmalloc_large_node(size_t size, gfp_t flags, int node) __assume_page_alignment
483+
__alloc_size(1);
531484

532485
/**
533486
* kmalloc - allocate memory
@@ -597,31 +550,43 @@ static __always_inline __alloc_size(1) void *kmalloc(size_t size, gfp_t flags)
597550
if (!index)
598551
return ZERO_SIZE_PTR;
599552

600-
return kmem_cache_alloc_trace(
553+
return kmalloc_trace(
601554
kmalloc_caches[kmalloc_type(flags)][index],
602555
flags, size);
603556
#endif
604557
}
605558
return __kmalloc(size, flags);
606559
}
607560

561+
#ifndef CONFIG_SLOB
608562
static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t flags, int node)
609563
{
610-
#ifndef CONFIG_SLOB
611-
if (__builtin_constant_p(size) &&
612-
size <= KMALLOC_MAX_CACHE_SIZE) {
613-
unsigned int i = kmalloc_index(size);
564+
if (__builtin_constant_p(size)) {
565+
unsigned int index;
566+
567+
if (size > KMALLOC_MAX_CACHE_SIZE)
568+
return kmalloc_large_node(size, flags, node);
569+
570+
index = kmalloc_index(size);
614571

615-
if (!i)
572+
if (!index)
616573
return ZERO_SIZE_PTR;
617574

618-
return kmem_cache_alloc_node_trace(
619-
kmalloc_caches[kmalloc_type(flags)][i],
620-
flags, node, size);
575+
return kmalloc_node_trace(
576+
kmalloc_caches[kmalloc_type(flags)][index],
577+
flags, node, size);
621578
}
622-
#endif
623579
return __kmalloc_node(size, flags, node);
624580
}
581+
#else
582+
static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t flags, int node)
583+
{
584+
if (__builtin_constant_p(size) && size > KMALLOC_MAX_CACHE_SIZE)
585+
return kmalloc_large_node(size, flags, node);
586+
587+
return __kmalloc_node(size, flags, node);
588+
}
589+
#endif
625590

626591
/**
627592
* kmalloc_array - allocate memory for an array.
@@ -671,6 +636,12 @@ static inline __alloc_size(1, 2) void *kcalloc(size_t n, size_t size, gfp_t flag
671636
return kmalloc_array(n, size, flags | __GFP_ZERO);
672637
}
673638

639+
void *__kmalloc_node_track_caller(size_t size, gfp_t flags, int node,
640+
unsigned long caller) __alloc_size(1);
641+
#define kmalloc_node_track_caller(size, flags, node) \
642+
__kmalloc_node_track_caller(size, flags, node, \
643+
_RET_IP_)
644+
674645
/*
675646
* kmalloc_track_caller is a special version of kmalloc that records the
676647
* calling function of the routine calling it for slab leak tracking instead
@@ -679,9 +650,9 @@ static inline __alloc_size(1, 2) void *kcalloc(size_t n, size_t size, gfp_t flag
679650
* allocator where we care about the real place the memory allocation
680651
* request comes from.
681652
*/
682-
extern void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller);
683653
#define kmalloc_track_caller(size, flags) \
684-
__kmalloc_track_caller(size, flags, _RET_IP_)
654+
__kmalloc_node_track_caller(size, flags, \
655+
NUMA_NO_NODE, _RET_IP_)
685656

686657
static inline __alloc_size(1, 2) void *kmalloc_array_node(size_t n, size_t size, gfp_t flags,
687658
int node)
@@ -700,21 +671,6 @@ static inline __alloc_size(1, 2) void *kcalloc_node(size_t n, size_t size, gfp_t
700671
return kmalloc_array_node(n, size, flags | __GFP_ZERO, node);
701672
}
702673

703-
704-
#ifdef CONFIG_NUMA
705-
extern void *__kmalloc_node_track_caller(size_t size, gfp_t flags, int node,
706-
unsigned long caller) __alloc_size(1);
707-
#define kmalloc_node_track_caller(size, flags, node) \
708-
__kmalloc_node_track_caller(size, flags, node, \
709-
_RET_IP_)
710-
711-
#else /* CONFIG_NUMA */
712-
713-
#define kmalloc_node_track_caller(size, flags, node) \
714-
kmalloc_track_caller(size, flags)
715-
716-
#endif /* CONFIG_NUMA */
717-
718674
/*
719675
* Shortcuts
720676
*/

0 commit comments

Comments
 (0)