Skip to content

Commit 6ed8bfd

Browse files
GeHao01994tehcaster
authored andcommitted
slab: Avoid race on slab->obj_exts in alloc_slab_obj_exts
If two competing threads enter alloc_slab_obj_exts() and one of them fails to allocate the object extension vector, it might override the valid slab->obj_exts allocated by the other thread with OBJEXTS_ALLOC_FAIL. This will cause the thread that lost this race and expects a valid pointer to dereference a NULL pointer later on. Update slab->obj_exts atomically using cmpxchg() to avoid slab->obj_exts overrides by racing threads. Thanks for Vlastimil and Suren's help with debugging. Fixes: f7381b9 ("slab: mark slab->obj_exts allocation failures unconditionally") Cc: <[email protected]> Suggested-by: Suren Baghdasaryan <[email protected]> Signed-off-by: Hao Ge <[email protected]> Reviewed-by: Harry Yoo <[email protected]> Reviewed-by: Suren Baghdasaryan <[email protected]> Link: https://patch.msgid.link/[email protected] Signed-off-by: Vlastimil Babka <[email protected]>
1 parent 86f54f9 commit 6ed8bfd

File tree

1 file changed

+6
-3
lines changed

1 file changed

+6
-3
lines changed

mm/slub.c

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2054,7 +2054,7 @@ static inline void mark_objexts_empty(struct slabobj_ext *obj_exts)
20542054

20552055
static inline void mark_failed_objexts_alloc(struct slab *slab)
20562056
{
2057-
slab->obj_exts = OBJEXTS_ALLOC_FAIL;
2057+
cmpxchg(&slab->obj_exts, 0, OBJEXTS_ALLOC_FAIL);
20582058
}
20592059

20602060
static inline void handle_failed_objexts_alloc(unsigned long obj_exts,
@@ -2136,6 +2136,7 @@ int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
21362136
#ifdef CONFIG_MEMCG
21372137
new_exts |= MEMCG_DATA_OBJEXTS;
21382138
#endif
2139+
retry:
21392140
old_exts = READ_ONCE(slab->obj_exts);
21402141
handle_failed_objexts_alloc(old_exts, vec, objects);
21412142
if (new_slab) {
@@ -2145,8 +2146,7 @@ int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
21452146
* be simply assigned.
21462147
*/
21472148
slab->obj_exts = new_exts;
2148-
} else if ((old_exts & ~OBJEXTS_FLAGS_MASK) ||
2149-
cmpxchg(&slab->obj_exts, old_exts, new_exts) != old_exts) {
2149+
} else if (old_exts & ~OBJEXTS_FLAGS_MASK) {
21502150
/*
21512151
* If the slab is already in use, somebody can allocate and
21522152
* assign slabobj_exts in parallel. In this case the existing
@@ -2158,6 +2158,9 @@ int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
21582158
else
21592159
kfree(vec);
21602160
return 0;
2161+
} else if (cmpxchg(&slab->obj_exts, old_exts, new_exts) != old_exts) {
2162+
/* Retry if a racing thread changed slab->obj_exts from under us. */
2163+
goto retry;
21612164
}
21622165

21632166
if (allow_spin)

0 commit comments

Comments
 (0)