Skip to content

Commit cc61eb8

Browse files
committed
mm, slab: use an enum to define SLAB_ cache creation flags
The values of SLAB_ cache creation flags are defined by hand, which is tedious and error-prone. Use an enum to assign the bit number and a __SLAB_FLAG_BIT() macro to #define the final flags. This renumbers the flag values, which is OK as they are only used internally. Also define a __SLAB_FLAG_UNUSED macro to assign value to flags disabled by their respective config options in a unified and sparse-friendly way. Reviewed-and-tested-by: Xiongwei Song <[email protected]> Reviewed-by: Chengming Zhou <[email protected]> Reviewed-by: Roman Gushchin <[email protected]> Acked-by: David Rientjes <[email protected]> Signed-off-by: Vlastimil Babka <[email protected]>
1 parent cdeeaab commit cc61eb8

File tree

2 files changed

+70
-30
lines changed

2 files changed

+70
-30
lines changed

include/linux/slab.h

Lines changed: 67 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -21,29 +21,69 @@
2121
#include <linux/cleanup.h>
2222
#include <linux/hash.h>
2323

24+
enum _slab_flag_bits {
25+
_SLAB_CONSISTENCY_CHECKS,
26+
_SLAB_RED_ZONE,
27+
_SLAB_POISON,
28+
_SLAB_KMALLOC,
29+
_SLAB_HWCACHE_ALIGN,
30+
_SLAB_CACHE_DMA,
31+
_SLAB_CACHE_DMA32,
32+
_SLAB_STORE_USER,
33+
_SLAB_PANIC,
34+
_SLAB_TYPESAFE_BY_RCU,
35+
_SLAB_TRACE,
36+
#ifdef CONFIG_DEBUG_OBJECTS
37+
_SLAB_DEBUG_OBJECTS,
38+
#endif
39+
_SLAB_NOLEAKTRACE,
40+
_SLAB_NO_MERGE,
41+
#ifdef CONFIG_FAILSLAB
42+
_SLAB_FAILSLAB,
43+
#endif
44+
#ifdef CONFIG_MEMCG_KMEM
45+
_SLAB_ACCOUNT,
46+
#endif
47+
#ifdef CONFIG_KASAN_GENERIC
48+
_SLAB_KASAN,
49+
#endif
50+
_SLAB_NO_USER_FLAGS,
51+
#ifdef CONFIG_KFENCE
52+
_SLAB_SKIP_KFENCE,
53+
#endif
54+
#ifndef CONFIG_SLUB_TINY
55+
_SLAB_RECLAIM_ACCOUNT,
56+
#endif
57+
_SLAB_OBJECT_POISON,
58+
_SLAB_CMPXCHG_DOUBLE,
59+
_SLAB_FLAGS_LAST_BIT
60+
};
61+
62+
#define __SLAB_FLAG_BIT(nr) ((slab_flags_t __force)(1U << (nr)))
63+
#define __SLAB_FLAG_UNUSED ((slab_flags_t __force)(0U))
2464

2565
/*
2666
* Flags to pass to kmem_cache_create().
2767
* The ones marked DEBUG need CONFIG_SLUB_DEBUG enabled, otherwise are no-op
2868
*/
2969
/* DEBUG: Perform (expensive) checks on alloc/free */
30-
#define SLAB_CONSISTENCY_CHECKS ((slab_flags_t __force)0x00000100U)
70+
#define SLAB_CONSISTENCY_CHECKS __SLAB_FLAG_BIT(_SLAB_CONSISTENCY_CHECKS)
3171
/* DEBUG: Red zone objs in a cache */
32-
#define SLAB_RED_ZONE ((slab_flags_t __force)0x00000400U)
72+
#define SLAB_RED_ZONE __SLAB_FLAG_BIT(_SLAB_RED_ZONE)
3373
/* DEBUG: Poison objects */
34-
#define SLAB_POISON ((slab_flags_t __force)0x00000800U)
74+
#define SLAB_POISON __SLAB_FLAG_BIT(_SLAB_POISON)
3575
/* Indicate a kmalloc slab */
36-
#define SLAB_KMALLOC ((slab_flags_t __force)0x00001000U)
76+
#define SLAB_KMALLOC __SLAB_FLAG_BIT(_SLAB_KMALLOC)
3777
/* Align objs on cache lines */
38-
#define SLAB_HWCACHE_ALIGN ((slab_flags_t __force)0x00002000U)
78+
#define SLAB_HWCACHE_ALIGN __SLAB_FLAG_BIT(_SLAB_HWCACHE_ALIGN)
3979
/* Use GFP_DMA memory */
40-
#define SLAB_CACHE_DMA ((slab_flags_t __force)0x00004000U)
80+
#define SLAB_CACHE_DMA __SLAB_FLAG_BIT(_SLAB_CACHE_DMA)
4181
/* Use GFP_DMA32 memory */
42-
#define SLAB_CACHE_DMA32 ((slab_flags_t __force)0x00008000U)
82+
#define SLAB_CACHE_DMA32 __SLAB_FLAG_BIT(_SLAB_CACHE_DMA32)
4383
/* DEBUG: Store the last owner for bug hunting */
44-
#define SLAB_STORE_USER ((slab_flags_t __force)0x00010000U)
84+
#define SLAB_STORE_USER __SLAB_FLAG_BIT(_SLAB_STORE_USER)
4585
/* Panic if kmem_cache_create() fails */
46-
#define SLAB_PANIC ((slab_flags_t __force)0x00040000U)
86+
#define SLAB_PANIC __SLAB_FLAG_BIT(_SLAB_PANIC)
4787
/*
4888
* SLAB_TYPESAFE_BY_RCU - **WARNING** READ THIS!
4989
*
@@ -95,19 +135,19 @@
95135
* Note that SLAB_TYPESAFE_BY_RCU was originally named SLAB_DESTROY_BY_RCU.
96136
*/
97137
/* Defer freeing slabs to RCU */
98-
#define SLAB_TYPESAFE_BY_RCU ((slab_flags_t __force)0x00080000U)
138+
#define SLAB_TYPESAFE_BY_RCU __SLAB_FLAG_BIT(_SLAB_TYPESAFE_BY_RCU)
99139
/* Trace allocations and frees */
100-
#define SLAB_TRACE ((slab_flags_t __force)0x00200000U)
140+
#define SLAB_TRACE __SLAB_FLAG_BIT(_SLAB_TRACE)
101141

102142
/* Flag to prevent checks on free */
103143
#ifdef CONFIG_DEBUG_OBJECTS
104-
# define SLAB_DEBUG_OBJECTS ((slab_flags_t __force)0x00400000U)
144+
# define SLAB_DEBUG_OBJECTS __SLAB_FLAG_BIT(_SLAB_DEBUG_OBJECTS)
105145
#else
106-
# define SLAB_DEBUG_OBJECTS 0
146+
# define SLAB_DEBUG_OBJECTS __SLAB_FLAG_UNUSED
107147
#endif
108148

109149
/* Avoid kmemleak tracing */
110-
#define SLAB_NOLEAKTRACE ((slab_flags_t __force)0x00800000U)
150+
#define SLAB_NOLEAKTRACE __SLAB_FLAG_BIT(_SLAB_NOLEAKTRACE)
111151

112152
/*
113153
* Prevent merging with compatible kmem caches. This flag should be used
@@ -119,51 +159,51 @@
119159
* - performance critical caches, should be very rare and consulted with slab
120160
* maintainers, and not used together with CONFIG_SLUB_TINY
121161
*/
122-
#define SLAB_NO_MERGE ((slab_flags_t __force)0x01000000U)
162+
#define SLAB_NO_MERGE __SLAB_FLAG_BIT(_SLAB_NO_MERGE)
123163

124164
/* Fault injection mark */
125165
#ifdef CONFIG_FAILSLAB
126-
# define SLAB_FAILSLAB ((slab_flags_t __force)0x02000000U)
166+
# define SLAB_FAILSLAB __SLAB_FLAG_BIT(_SLAB_FAILSLAB)
127167
#else
128-
# define SLAB_FAILSLAB 0
168+
# define SLAB_FAILSLAB __SLAB_FLAG_UNUSED
129169
#endif
130170
/* Account to memcg */
131171
#ifdef CONFIG_MEMCG_KMEM
132-
# define SLAB_ACCOUNT ((slab_flags_t __force)0x04000000U)
172+
# define SLAB_ACCOUNT __SLAB_FLAG_BIT(_SLAB_ACCOUNT)
133173
#else
134-
# define SLAB_ACCOUNT 0
174+
# define SLAB_ACCOUNT __SLAB_FLAG_UNUSED
135175
#endif
136176

137177
#ifdef CONFIG_KASAN_GENERIC
138-
#define SLAB_KASAN ((slab_flags_t __force)0x08000000U)
178+
#define SLAB_KASAN __SLAB_FLAG_BIT(_SLAB_KASAN)
139179
#else
140-
#define SLAB_KASAN 0
180+
#define SLAB_KASAN __SLAB_FLAG_UNUSED
141181
#endif
142182

143183
/*
144184
* Ignore user specified debugging flags.
145185
* Intended for caches created for self-tests so they have only flags
146186
* specified in the code and other flags are ignored.
147187
*/
148-
#define SLAB_NO_USER_FLAGS ((slab_flags_t __force)0x10000000U)
188+
#define SLAB_NO_USER_FLAGS __SLAB_FLAG_BIT(_SLAB_NO_USER_FLAGS)
149189

150190
#ifdef CONFIG_KFENCE
151-
#define SLAB_SKIP_KFENCE ((slab_flags_t __force)0x20000000U)
191+
#define SLAB_SKIP_KFENCE __SLAB_FLAG_BIT(_SLAB_SKIP_KFENCE)
152192
#else
153-
#define SLAB_SKIP_KFENCE 0
193+
#define SLAB_SKIP_KFENCE __SLAB_FLAG_UNUSED
154194
#endif
155195

156196
/* The following flags affect the page allocator grouping pages by mobility */
157197
/* Objects are reclaimable */
158198
#ifndef CONFIG_SLUB_TINY
159-
#define SLAB_RECLAIM_ACCOUNT ((slab_flags_t __force)0x00020000U)
199+
#define SLAB_RECLAIM_ACCOUNT __SLAB_FLAG_BIT(_SLAB_RECLAIM_ACCOUNT)
160200
#else
161-
#define SLAB_RECLAIM_ACCOUNT ((slab_flags_t __force)0)
201+
#define SLAB_RECLAIM_ACCOUNT __SLAB_FLAG_UNUSED
162202
#endif
163203
#define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */
164204

165205
/* Obsolete unused flag, to be removed */
166-
#define SLAB_MEM_SPREAD ((slab_flags_t __force)0U)
206+
#define SLAB_MEM_SPREAD __SLAB_FLAG_UNUSED
167207

168208
/*
169209
* ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.

mm/slub.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -306,13 +306,13 @@ static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
306306

307307
/* Internal SLUB flags */
308308
/* Poison object */
309-
#define __OBJECT_POISON ((slab_flags_t __force)0x80000000U)
309+
#define __OBJECT_POISON __SLAB_FLAG_BIT(_SLAB_OBJECT_POISON)
310310
/* Use cmpxchg_double */
311311

312312
#ifdef system_has_freelist_aba
313-
#define __CMPXCHG_DOUBLE ((slab_flags_t __force)0x40000000U)
313+
#define __CMPXCHG_DOUBLE __SLAB_FLAG_BIT(_SLAB_CMPXCHG_DOUBLE)
314314
#else
315-
#define __CMPXCHG_DOUBLE ((slab_flags_t __force)0U)
315+
#define __CMPXCHG_DOUBLE __SLAB_FLAG_UNUSED
316316
#endif
317317

318318
/*

0 commit comments

Comments
 (0)