Skip to content

Commit 401fb12

Browse files
committed
mm/sl*b: Differentiate struct slab fields by sl*b implementations
With a struct slab definition separate from struct page, we can go further and define only fields that the chosen sl*b implementation uses. This means everything between __page_flags and __page_refcount placeholders now depends on the chosen CONFIG_SL*B. Some fields exist in all implementations (slab_list) but can be part of a union in some, so it's simpler to repeat them than complicate the definition with ifdefs even more. The patch doesn't change physical offsets of the fields, although it could be done later - for example it's now clear that tighter packing in SLOB could be possible. This should also prevent accidental use of fields that don't exist in given implementation. Before this patch virt_to_cache() and cache_from_obj() were visible for SLOB (albeit not used), although they rely on the slab_cache field that isn't set by SLOB. With this patch it's now a compile error, so these functions are now hidden behind an #ifndef CONFIG_SLOB. Signed-off-by: Vlastimil Babka <[email protected]> Reviewed-by: Roman Gushchin <[email protected]> Tested-by: Marco Elver <[email protected]> # kfence Reviewed-by: Hyeonggon Yoo <[email protected]> Tested-by: Hyeonggon Yoo <[email protected]> Cc: Alexander Potapenko <[email protected]> Cc: Marco Elver <[email protected]> Cc: Dmitry Vyukov <[email protected]> Cc: <[email protected]>
1 parent 8dae0cf commit 401fb12

File tree

2 files changed

+43
-14
lines changed

2 files changed

+43
-14
lines changed

mm/kfence/core.c

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -427,10 +427,11 @@ static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t g
427427
/* Set required slab fields. */
428428
slab = virt_to_slab((void *)meta->addr);
429429
slab->slab_cache = cache;
430-
if (IS_ENABLED(CONFIG_SLUB))
431-
slab->objects = 1;
432-
if (IS_ENABLED(CONFIG_SLAB))
433-
slab->s_mem = addr;
430+
#if defined(CONFIG_SLUB)
431+
slab->objects = 1;
432+
#elif defined(CONFIG_SLAB)
433+
slab->s_mem = addr;
434+
#endif
434435

435436
/* Memory initialization. */
436437
for_each_canary(meta, set_canary_byte);

mm/slab.h

Lines changed: 38 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -8,35 +8,57 @@
88
/* Reuses the bits in struct page */
99
struct slab {
1010
unsigned long __page_flags;
11+
12+
#if defined(CONFIG_SLAB)
13+
1114
union {
1215
struct list_head slab_list;
13-
struct { /* Partial pages */
16+
struct rcu_head rcu_head;
17+
};
18+
struct kmem_cache *slab_cache;
19+
void *freelist; /* array of free object indexes */
20+
void *s_mem; /* first object */
21+
unsigned int active;
22+
23+
#elif defined(CONFIG_SLUB)
24+
25+
union {
26+
struct list_head slab_list;
27+
struct rcu_head rcu_head;
28+
struct {
1429
struct slab *next;
1530
#ifdef CONFIG_64BIT
1631
int slabs; /* Nr of slabs left */
1732
#else
1833
short int slabs;
1934
#endif
2035
};
21-
struct rcu_head rcu_head;
2236
};
23-
struct kmem_cache *slab_cache; /* not slob */
37+
struct kmem_cache *slab_cache;
2438
/* Double-word boundary */
2539
void *freelist; /* first free object */
2640
union {
27-
void *s_mem; /* slab: first object */
28-
unsigned long counters; /* SLUB */
29-
struct { /* SLUB */
41+
unsigned long counters;
42+
struct {
3043
unsigned inuse:16;
3144
unsigned objects:15;
3245
unsigned frozen:1;
3346
};
3447
};
48+
unsigned int __unused;
49+
50+
#elif defined(CONFIG_SLOB)
51+
52+
struct list_head slab_list;
53+
void *__unused_1;
54+
void *freelist; /* first free block */
55+
void *__unused_2;
56+
int units;
57+
58+
#else
59+
#error "Unexpected slab allocator configured"
60+
#endif
3561

36-
union {
37-
unsigned int active; /* SLAB */
38-
int units; /* SLOB */
39-
};
4062
atomic_t __page_refcount;
4163
#ifdef CONFIG_MEMCG
4264
unsigned long memcg_data;
@@ -48,10 +70,14 @@ struct slab {
4870
SLAB_MATCH(flags, __page_flags);
4971
SLAB_MATCH(compound_head, slab_list); /* Ensure bit 0 is clear */
5072
SLAB_MATCH(slab_list, slab_list);
73+
#ifndef CONFIG_SLOB
5174
SLAB_MATCH(rcu_head, rcu_head);
5275
SLAB_MATCH(slab_cache, slab_cache);
76+
#endif
77+
#ifdef CONFIG_SLAB
5378
SLAB_MATCH(s_mem, s_mem);
5479
SLAB_MATCH(active, active);
80+
#endif
5581
SLAB_MATCH(_refcount, __page_refcount);
5682
#ifdef CONFIG_MEMCG
5783
SLAB_MATCH(memcg_data, memcg_data);
@@ -599,6 +625,7 @@ static inline void memcg_slab_free_hook(struct kmem_cache *s,
599625
}
600626
#endif /* CONFIG_MEMCG_KMEM */
601627

628+
#ifndef CONFIG_SLOB
602629
static inline struct kmem_cache *virt_to_cache(const void *obj)
603630
{
604631
struct slab *slab;
@@ -645,6 +672,7 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
645672
print_tracking(cachep, x);
646673
return cachep;
647674
}
675+
#endif /* CONFIG_SLOB */
648676

649677
static inline size_t slab_ksize(const struct kmem_cache *s)
650678
{

0 commit comments

Comments
 (0)