@@ -187,7 +187,6 @@ int kmem_cache_shrink(struct kmem_cache *s);
187
187
void * __must_check krealloc (const void * objp , size_t new_size , gfp_t flags ) __alloc_size (2 );
188
188
void kfree (const void * objp );
189
189
void kfree_sensitive (const void * objp );
190
- size_t __ksize (const void * objp );
191
190
size_t ksize (const void * objp );
192
191
#ifdef CONFIG_PRINTK
193
192
bool kmem_valid_obj (void * object );
@@ -243,27 +242,17 @@ static inline unsigned int arch_slab_minalign(void)
243
242
244
243
#ifdef CONFIG_SLAB
245
244
/*
246
- * The largest kmalloc size supported by the SLAB allocators is
247
- * 32 megabyte (2^25) or the maximum allocatable page order if that is
248
- * less than 32 MB.
249
- *
250
- * WARNING: Its not easy to increase this value since the allocators have
251
- * to do various tricks to work around compiler limitations in order to
252
- * ensure proper constant folding.
245
+ * SLAB and SLUB directly allocates requests fitting in to an order-1 page
246
+ * (PAGE_SIZE*2). Larger requests are passed to the page allocator.
253
247
*/
254
- #define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \
255
- (MAX_ORDER + PAGE_SHIFT - 1) : 25)
256
- #define KMALLOC_SHIFT_MAX KMALLOC_SHIFT_HIGH
248
+ #define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1)
249
+ #define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1)
257
250
#ifndef KMALLOC_SHIFT_LOW
258
251
#define KMALLOC_SHIFT_LOW 5
259
252
#endif
260
253
#endif
261
254
262
255
#ifdef CONFIG_SLUB
263
- /*
264
- * SLUB directly allocates requests fitting in to an order-1 page
265
- * (PAGE_SIZE*2). Larger requests are passed to the page allocator.
266
- */
267
256
#define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1)
268
257
#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1)
269
258
#ifndef KMALLOC_SHIFT_LOW
@@ -415,10 +404,6 @@ static __always_inline unsigned int __kmalloc_index(size_t size,
415
404
if (size <= 512 * 1024 ) return 19 ;
416
405
if (size <= 1024 * 1024 ) return 20 ;
417
406
if (size <= 2 * 1024 * 1024 ) return 21 ;
418
- if (size <= 4 * 1024 * 1024 ) return 22 ;
419
- if (size <= 8 * 1024 * 1024 ) return 23 ;
420
- if (size <= 16 * 1024 * 1024 ) return 24 ;
421
- if (size <= 32 * 1024 * 1024 ) return 25 ;
422
407
423
408
if (!IS_ENABLED (CONFIG_PROFILE_ALL_BRANCHES ) && size_is_constant )
424
409
BUILD_BUG_ON_MSG (1 , "unexpected size in kmalloc_index()" );
@@ -428,6 +413,7 @@ static __always_inline unsigned int __kmalloc_index(size_t size,
428
413
/* Will never be reached. Needed because the compiler may complain */
429
414
return -1 ;
430
415
}
416
+ static_assert (PAGE_SHIFT <= 20 );
431
417
#define kmalloc_index (s ) __kmalloc_index(s, true)
432
418
#endif /* !CONFIG_SLOB */
433
419
@@ -456,51 +442,32 @@ static __always_inline void kfree_bulk(size_t size, void **p)
456
442
kmem_cache_free_bulk (NULL , size , p );
457
443
}
458
444
459
- #ifdef CONFIG_NUMA
460
445
void * __kmalloc_node (size_t size , gfp_t flags , int node ) __assume_kmalloc_alignment
461
446
__alloc_size (1 );
462
447
void * kmem_cache_alloc_node (struct kmem_cache * s , gfp_t flags , int node ) __assume_slab_alignment
463
448
__malloc ;
464
- #else
465
- static __always_inline __alloc_size (1 ) void * __kmalloc_node (size_t size , gfp_t flags , int node )
466
- {
467
- return __kmalloc (size , flags );
468
- }
469
-
470
- static __always_inline void * kmem_cache_alloc_node (struct kmem_cache * s , gfp_t flags , int node )
471
- {
472
- return kmem_cache_alloc (s , flags );
473
- }
474
- #endif
475
449
476
450
#ifdef CONFIG_TRACING
477
- extern void * kmem_cache_alloc_trace (struct kmem_cache * s , gfp_t flags , size_t size )
478
- __assume_slab_alignment __alloc_size (3 );
479
-
480
- #ifdef CONFIG_NUMA
481
- extern void * kmem_cache_alloc_node_trace (struct kmem_cache * s , gfp_t gfpflags ,
482
- int node , size_t size ) __assume_slab_alignment
483
- __alloc_size (4 );
484
- #else
485
- static __always_inline __alloc_size (4 ) void * kmem_cache_alloc_node_trace (struct kmem_cache * s ,
486
- gfp_t gfpflags , int node , size_t size )
487
- {
488
- return kmem_cache_alloc_trace (s , gfpflags , size );
489
- }
490
- #endif /* CONFIG_NUMA */
451
+ void * kmalloc_trace (struct kmem_cache * s , gfp_t flags , size_t size )
452
+ __assume_kmalloc_alignment __alloc_size (3 );
491
453
454
+ void * kmalloc_node_trace (struct kmem_cache * s , gfp_t gfpflags ,
455
+ int node , size_t size ) __assume_kmalloc_alignment
456
+ __alloc_size (4 );
492
457
#else /* CONFIG_TRACING */
493
- static __always_inline __alloc_size (3 ) void * kmem_cache_alloc_trace (struct kmem_cache * s ,
494
- gfp_t flags , size_t size )
458
+ /* Save a function call when CONFIG_TRACING=n */
459
+ static __always_inline __alloc_size (3 )
460
+ void * kmalloc_trace (struct kmem_cache * s , gfp_t flags , size_t size )
495
461
{
496
462
void * ret = kmem_cache_alloc (s , flags );
497
463
498
464
ret = kasan_kmalloc (s , ret , size , flags );
499
465
return ret ;
500
466
}
501
467
502
- static __always_inline void * kmem_cache_alloc_node_trace (struct kmem_cache * s , gfp_t gfpflags ,
503
- int node , size_t size )
468
+ static __always_inline __alloc_size (4 )
469
+ void * kmalloc_node_trace (struct kmem_cache * s , gfp_t gfpflags ,
470
+ int node , size_t size )
504
471
{
505
472
void * ret = kmem_cache_alloc_node (s , gfpflags , node );
506
473
@@ -509,25 +476,11 @@ static __always_inline void *kmem_cache_alloc_node_trace(struct kmem_cache *s, g
509
476
}
510
477
#endif /* CONFIG_TRACING */
511
478
512
- extern void * kmalloc_order (size_t size , gfp_t flags , unsigned int order ) __assume_page_alignment
513
- __alloc_size (1 );
514
-
515
- #ifdef CONFIG_TRACING
516
- extern void * kmalloc_order_trace (size_t size , gfp_t flags , unsigned int order )
517
- __assume_page_alignment __alloc_size (1 );
518
- #else
519
- static __always_inline __alloc_size (1 ) void * kmalloc_order_trace (size_t size , gfp_t flags ,
520
- unsigned int order )
521
- {
522
- return kmalloc_order (size , flags , order );
523
- }
524
- #endif
479
+ void * kmalloc_large (size_t size , gfp_t flags ) __assume_page_alignment
480
+ __alloc_size (1 );
525
481
526
- static __always_inline __alloc_size (1 ) void * kmalloc_large (size_t size , gfp_t flags )
527
- {
528
- unsigned int order = get_order (size );
529
- return kmalloc_order_trace (size , flags , order );
530
- }
482
+ void * kmalloc_large_node (size_t size , gfp_t flags , int node ) __assume_page_alignment
483
+ __alloc_size (1 );
531
484
532
485
/**
533
486
* kmalloc - allocate memory
@@ -597,31 +550,43 @@ static __always_inline __alloc_size(1) void *kmalloc(size_t size, gfp_t flags)
597
550
if (!index )
598
551
return ZERO_SIZE_PTR ;
599
552
600
- return kmem_cache_alloc_trace (
553
+ return kmalloc_trace (
601
554
kmalloc_caches [kmalloc_type (flags )][index ],
602
555
flags , size );
603
556
#endif
604
557
}
605
558
return __kmalloc (size , flags );
606
559
}
607
560
561
+ #ifndef CONFIG_SLOB
608
562
static __always_inline __alloc_size (1 ) void * kmalloc_node (size_t size , gfp_t flags , int node )
609
563
{
610
- #ifndef CONFIG_SLOB
611
- if (__builtin_constant_p (size ) &&
612
- size <= KMALLOC_MAX_CACHE_SIZE ) {
613
- unsigned int i = kmalloc_index (size );
564
+ if (__builtin_constant_p (size )) {
565
+ unsigned int index ;
566
+
567
+ if (size > KMALLOC_MAX_CACHE_SIZE )
568
+ return kmalloc_large_node (size , flags , node );
569
+
570
+ index = kmalloc_index (size );
614
571
615
- if (!i )
572
+ if (!index )
616
573
return ZERO_SIZE_PTR ;
617
574
618
- return kmem_cache_alloc_node_trace (
619
- kmalloc_caches [kmalloc_type (flags )][i ],
620
- flags , node , size );
575
+ return kmalloc_node_trace (
576
+ kmalloc_caches [kmalloc_type (flags )][index ],
577
+ flags , node , size );
621
578
}
622
- #endif
623
579
return __kmalloc_node (size , flags , node );
624
580
}
581
+ #else
582
+ static __always_inline __alloc_size (1 ) void * kmalloc_node (size_t size , gfp_t flags , int node )
583
+ {
584
+ if (__builtin_constant_p (size ) && size > KMALLOC_MAX_CACHE_SIZE )
585
+ return kmalloc_large_node (size , flags , node );
586
+
587
+ return __kmalloc_node (size , flags , node );
588
+ }
589
+ #endif
625
590
626
591
/**
627
592
* kmalloc_array - allocate memory for an array.
@@ -671,6 +636,12 @@ static inline __alloc_size(1, 2) void *kcalloc(size_t n, size_t size, gfp_t flag
671
636
return kmalloc_array (n , size , flags | __GFP_ZERO );
672
637
}
673
638
639
+ void * __kmalloc_node_track_caller (size_t size , gfp_t flags , int node ,
640
+ unsigned long caller ) __alloc_size (1 );
641
+ #define kmalloc_node_track_caller (size , flags , node ) \
642
+ __kmalloc_node_track_caller (size , flags , node , \
643
+ _RET_IP_ )
644
+
674
645
/*
675
646
* kmalloc_track_caller is a special version of kmalloc that records the
676
647
* calling function of the routine calling it for slab leak tracking instead
@@ -679,9 +650,9 @@ static inline __alloc_size(1, 2) void *kcalloc(size_t n, size_t size, gfp_t flag
679
650
* allocator where we care about the real place the memory allocation
680
651
* request comes from.
681
652
*/
682
- extern void * __kmalloc_track_caller (size_t size , gfp_t flags , unsigned long caller );
683
653
#define kmalloc_track_caller (size , flags ) \
684
- __kmalloc_track_caller(size, flags, _RET_IP_)
654
+ __kmalloc_node_track_caller (size , flags , \
655
+ NUMA_NO_NODE , _RET_IP_ )
685
656
686
657
static inline __alloc_size (1 , 2 ) void * kmalloc_array_node (size_t n , size_t size , gfp_t flags ,
687
658
int node )
@@ -700,21 +671,6 @@ static inline __alloc_size(1, 2) void *kcalloc_node(size_t n, size_t size, gfp_t
700
671
return kmalloc_array_node (n , size , flags | __GFP_ZERO , node );
701
672
}
702
673
703
-
704
- #ifdef CONFIG_NUMA
705
- extern void * __kmalloc_node_track_caller (size_t size , gfp_t flags , int node ,
706
- unsigned long caller ) __alloc_size (1 );
707
- #define kmalloc_node_track_caller (size , flags , node ) \
708
- __kmalloc_node_track_caller(size, flags, node, \
709
- _RET_IP_)
710
-
711
- #else /* CONFIG_NUMA */
712
-
713
- #define kmalloc_node_track_caller (size , flags , node ) \
714
- kmalloc_track_caller(size, flags)
715
-
716
- #endif /* CONFIG_NUMA */
717
-
718
674
/*
719
675
* Shortcuts
720
676
*/
0 commit comments