@@ -723,6 +723,7 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s,
723
723
unsigned int orig_size )
724
724
{
725
725
unsigned int zero_size = s -> object_size ;
726
+ bool kasan_init = init ;
726
727
size_t i ;
727
728
728
729
flags &= gfp_allowed_mask ;
@@ -739,6 +740,17 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s,
739
740
(s -> flags & SLAB_KMALLOC ))
740
741
zero_size = orig_size ;
741
742
743
+ /*
744
+ * When slub_debug is enabled, avoid memory initialization integrated
745
+ * into KASAN and instead zero out the memory via the memset below with
746
+ * the proper size. Otherwise, KASAN might overwrite SLUB redzones and
747
+ * cause false-positive reports. This does not lead to a performance
748
+ * penalty on production builds, as slub_debug is not intended to be
749
+ * enabled there.
750
+ */
751
+ if (__slub_debug_enabled ())
752
+ kasan_init = false;
753
+
742
754
/*
743
755
* As memory initialization might be integrated into KASAN,
744
756
* kasan_slab_alloc and initialization memset must be
@@ -747,8 +759,8 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s,
747
759
* As p[i] might get tagged, memset and kmemleak hook come after KASAN.
748
760
*/
749
761
for (i = 0 ; i < size ; i ++ ) {
750
- p [i ] = kasan_slab_alloc (s , p [i ], flags , init );
751
- if (p [i ] && init && ! kasan_has_integrated_init ())
762
+ p [i ] = kasan_slab_alloc (s , p [i ], flags , kasan_init );
763
+ if (p [i ] && init && (! kasan_init || ! kasan_has_integrated_init () ))
752
764
memset (p [i ], 0 , zero_size );
753
765
kmemleak_alloc_recursive (p [i ], s -> object_size , 1 ,
754
766
s -> flags , flags );
0 commit comments