Skip to content

Commit ee5b455

Browse files
committed
Merge tag 'slab-for-6.9-rc7-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab
Pull slab fixes from Vlastimil Babka: - Fix for cleanup infrastructure (Dan Carpenter) This makes the __free(kfree) cleanup hooks not crash on error pointers. - SLUB fix for freepointer checking (Nicolas Bouchinet) This fixes a recently introduced bug that manifests when init_on_free, CONFIG_SLAB_FREELIST_HARDENED and consistency checks (slub_debug=F) are all enabled, and results in false-positive freepointer corrupt reports for caches that store freepointer outside of the object area. * tag 'slab-for-6.9-rc7-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab: mm/slab: make __free(kfree) accept error pointers mm/slub: avoid zeroing outside-object freepointer for single free
2 parents c73677c + cd7eb8f commit ee5b455

File tree

2 files changed

+31
-25
lines changed

2 files changed

+31
-25
lines changed

include/linux/slab.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -266,7 +266,7 @@ void kfree(const void *objp);
266266
void kfree_sensitive(const void *objp);
267267
size_t __ksize(const void *objp);
268268

269-
DEFINE_FREE(kfree, void *, if (_T) kfree(_T))
269+
DEFINE_FREE(kfree, void *, if (!IS_ERR_OR_NULL(_T)) kfree(_T))
270270

271271
/**
272272
* ksize - Report actual allocation size of associated object
@@ -792,7 +792,7 @@ static inline __alloc_size(1, 2) void *kvcalloc(size_t n, size_t size, gfp_t fla
792792
extern void *kvrealloc(const void *p, size_t oldsize, size_t newsize, gfp_t flags)
793793
__realloc_size(3);
794794
extern void kvfree(const void *addr);
795-
DEFINE_FREE(kvfree, void *, if (_T) kvfree(_T))
795+
DEFINE_FREE(kvfree, void *, if (!IS_ERR_OR_NULL(_T)) kvfree(_T))
796796

797797
extern void kvfree_sensitive(const void *addr, size_t len);
798798

mm/slub.c

Lines changed: 29 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -557,6 +557,26 @@ static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
557557
*(freeptr_t *)freeptr_addr = freelist_ptr_encode(s, fp, freeptr_addr);
558558
}
559559

560+
/*
561+
* See comment in calculate_sizes().
562+
*/
563+
static inline bool freeptr_outside_object(struct kmem_cache *s)
564+
{
565+
return s->offset >= s->inuse;
566+
}
567+
568+
/*
569+
* Return offset of the end of info block which is inuse + free pointer if
570+
* not overlapping with object.
571+
*/
572+
static inline unsigned int get_info_end(struct kmem_cache *s)
573+
{
574+
if (freeptr_outside_object(s))
575+
return s->inuse + sizeof(void *);
576+
else
577+
return s->inuse;
578+
}
579+
560580
/* Loop over all objects in a slab */
561581
#define for_each_object(__p, __s, __addr, __objects) \
562582
for (__p = fixup_red_left(__s, __addr); \
@@ -845,26 +865,6 @@ static void print_section(char *level, char *text, u8 *addr,
845865
metadata_access_disable();
846866
}
847867

848-
/*
849-
* See comment in calculate_sizes().
850-
*/
851-
static inline bool freeptr_outside_object(struct kmem_cache *s)
852-
{
853-
return s->offset >= s->inuse;
854-
}
855-
856-
/*
857-
* Return offset of the end of info block which is inuse + free pointer if
858-
* not overlapping with object.
859-
*/
860-
static inline unsigned int get_info_end(struct kmem_cache *s)
861-
{
862-
if (freeptr_outside_object(s))
863-
return s->inuse + sizeof(void *);
864-
else
865-
return s->inuse;
866-
}
867-
868868
static struct track *get_track(struct kmem_cache *s, void *object,
869869
enum track_item alloc)
870870
{
@@ -2092,15 +2092,20 @@ bool slab_free_hook(struct kmem_cache *s, void *x, bool init)
20922092
*
20932093
* The initialization memset's clear the object and the metadata,
20942094
* but don't touch the SLAB redzone.
2095+
*
2096+
* The object's freepointer is also avoided if stored outside the
2097+
* object.
20952098
*/
20962099
if (unlikely(init)) {
20972100
int rsize;
2101+
unsigned int inuse;
20982102

2103+
inuse = get_info_end(s);
20992104
if (!kasan_has_integrated_init())
21002105
memset(kasan_reset_tag(x), 0, s->object_size);
21012106
rsize = (s->flags & SLAB_RED_ZONE) ? s->red_left_pad : 0;
2102-
memset((char *)kasan_reset_tag(x) + s->inuse, 0,
2103-
s->size - s->inuse - rsize);
2107+
memset((char *)kasan_reset_tag(x) + inuse, 0,
2108+
s->size - inuse - rsize);
21042109
}
21052110
/* KASAN might put x into memory quarantine, delaying its reuse. */
21062111
return !kasan_slab_free(s, x, init);
@@ -3722,7 +3727,8 @@ static void *__slab_alloc_node(struct kmem_cache *s,
37223727
static __always_inline void maybe_wipe_obj_freeptr(struct kmem_cache *s,
37233728
void *obj)
37243729
{
3725-
if (unlikely(slab_want_init_on_free(s)) && obj)
3730+
if (unlikely(slab_want_init_on_free(s)) && obj &&
3731+
!freeptr_outside_object(s))
37263732
memset((void *)((char *)kasan_reset_tag(obj) + s->offset),
37273733
0, sizeof(void *));
37283734
}

0 commit comments

Comments
 (0)