|
21 | 21 | #include <linux/cleanup.h>
|
22 | 22 | #include <linux/hash.h>
|
23 | 23 |
|
| 24 | +enum _slab_flag_bits { |
| 25 | + _SLAB_CONSISTENCY_CHECKS, |
| 26 | + _SLAB_RED_ZONE, |
| 27 | + _SLAB_POISON, |
| 28 | + _SLAB_KMALLOC, |
| 29 | + _SLAB_HWCACHE_ALIGN, |
| 30 | + _SLAB_CACHE_DMA, |
| 31 | + _SLAB_CACHE_DMA32, |
| 32 | + _SLAB_STORE_USER, |
| 33 | + _SLAB_PANIC, |
| 34 | + _SLAB_TYPESAFE_BY_RCU, |
| 35 | + _SLAB_TRACE, |
| 36 | +#ifdef CONFIG_DEBUG_OBJECTS |
| 37 | + _SLAB_DEBUG_OBJECTS, |
| 38 | +#endif |
| 39 | + _SLAB_NOLEAKTRACE, |
| 40 | + _SLAB_NO_MERGE, |
| 41 | +#ifdef CONFIG_FAILSLAB |
| 42 | + _SLAB_FAILSLAB, |
| 43 | +#endif |
| 44 | +#ifdef CONFIG_MEMCG_KMEM |
| 45 | + _SLAB_ACCOUNT, |
| 46 | +#endif |
| 47 | +#ifdef CONFIG_KASAN_GENERIC |
| 48 | + _SLAB_KASAN, |
| 49 | +#endif |
| 50 | + _SLAB_NO_USER_FLAGS, |
| 51 | +#ifdef CONFIG_KFENCE |
| 52 | + _SLAB_SKIP_KFENCE, |
| 53 | +#endif |
| 54 | +#ifndef CONFIG_SLUB_TINY |
| 55 | + _SLAB_RECLAIM_ACCOUNT, |
| 56 | +#endif |
| 57 | + _SLAB_OBJECT_POISON, |
| 58 | + _SLAB_CMPXCHG_DOUBLE, |
| 59 | + _SLAB_FLAGS_LAST_BIT |
| 60 | +}; |
| 61 | + |
| 62 | +#define __SLAB_FLAG_BIT(nr) ((slab_flags_t __force)(1U << (nr))) |
| 63 | +#define __SLAB_FLAG_UNUSED ((slab_flags_t __force)(0U)) |
24 | 64 |
|
25 | 65 | /*
|
26 | 66 | * Flags to pass to kmem_cache_create().
|
27 | 67 | * The ones marked DEBUG need CONFIG_SLUB_DEBUG enabled, otherwise are no-op
|
28 | 68 | */
|
29 | 69 | /* DEBUG: Perform (expensive) checks on alloc/free */
|
30 |
| -#define SLAB_CONSISTENCY_CHECKS ((slab_flags_t __force)0x00000100U) |
| 70 | +#define SLAB_CONSISTENCY_CHECKS __SLAB_FLAG_BIT(_SLAB_CONSISTENCY_CHECKS) |
31 | 71 | /* DEBUG: Red zone objs in a cache */
|
32 |
| -#define SLAB_RED_ZONE ((slab_flags_t __force)0x00000400U) |
| 72 | +#define SLAB_RED_ZONE __SLAB_FLAG_BIT(_SLAB_RED_ZONE) |
33 | 73 | /* DEBUG: Poison objects */
|
34 |
| -#define SLAB_POISON ((slab_flags_t __force)0x00000800U) |
| 74 | +#define SLAB_POISON __SLAB_FLAG_BIT(_SLAB_POISON) |
35 | 75 | /* Indicate a kmalloc slab */
|
36 |
| -#define SLAB_KMALLOC ((slab_flags_t __force)0x00001000U) |
| 76 | +#define SLAB_KMALLOC __SLAB_FLAG_BIT(_SLAB_KMALLOC) |
37 | 77 | /* Align objs on cache lines */
|
38 |
| -#define SLAB_HWCACHE_ALIGN ((slab_flags_t __force)0x00002000U) |
| 78 | +#define SLAB_HWCACHE_ALIGN __SLAB_FLAG_BIT(_SLAB_HWCACHE_ALIGN) |
39 | 79 | /* Use GFP_DMA memory */
|
40 |
| -#define SLAB_CACHE_DMA ((slab_flags_t __force)0x00004000U) |
| 80 | +#define SLAB_CACHE_DMA __SLAB_FLAG_BIT(_SLAB_CACHE_DMA) |
41 | 81 | /* Use GFP_DMA32 memory */
|
42 |
| -#define SLAB_CACHE_DMA32 ((slab_flags_t __force)0x00008000U) |
| 82 | +#define SLAB_CACHE_DMA32 __SLAB_FLAG_BIT(_SLAB_CACHE_DMA32) |
43 | 83 | /* DEBUG: Store the last owner for bug hunting */
|
44 |
| -#define SLAB_STORE_USER ((slab_flags_t __force)0x00010000U) |
| 84 | +#define SLAB_STORE_USER __SLAB_FLAG_BIT(_SLAB_STORE_USER) |
45 | 85 | /* Panic if kmem_cache_create() fails */
|
46 |
| -#define SLAB_PANIC ((slab_flags_t __force)0x00040000U) |
| 86 | +#define SLAB_PANIC __SLAB_FLAG_BIT(_SLAB_PANIC) |
47 | 87 | /*
|
48 | 88 | * SLAB_TYPESAFE_BY_RCU - **WARNING** READ THIS!
|
49 | 89 | *
|
|
95 | 135 | * Note that SLAB_TYPESAFE_BY_RCU was originally named SLAB_DESTROY_BY_RCU.
|
96 | 136 | */
|
97 | 137 | /* Defer freeing slabs to RCU */
|
98 |
| -#define SLAB_TYPESAFE_BY_RCU ((slab_flags_t __force)0x00080000U) |
| 138 | +#define SLAB_TYPESAFE_BY_RCU __SLAB_FLAG_BIT(_SLAB_TYPESAFE_BY_RCU) |
99 | 139 | /* Trace allocations and frees */
|
100 |
| -#define SLAB_TRACE ((slab_flags_t __force)0x00200000U) |
| 140 | +#define SLAB_TRACE __SLAB_FLAG_BIT(_SLAB_TRACE) |
101 | 141 |
|
102 | 142 | /* Flag to prevent checks on free */
|
103 | 143 | #ifdef CONFIG_DEBUG_OBJECTS
|
104 |
| -# define SLAB_DEBUG_OBJECTS ((slab_flags_t __force)0x00400000U) |
| 144 | +# define SLAB_DEBUG_OBJECTS __SLAB_FLAG_BIT(_SLAB_DEBUG_OBJECTS) |
105 | 145 | #else
|
106 |
| -# define SLAB_DEBUG_OBJECTS 0 |
| 146 | +# define SLAB_DEBUG_OBJECTS __SLAB_FLAG_UNUSED |
107 | 147 | #endif
|
108 | 148 |
|
109 | 149 | /* Avoid kmemleak tracing */
|
110 |
| -#define SLAB_NOLEAKTRACE ((slab_flags_t __force)0x00800000U) |
| 150 | +#define SLAB_NOLEAKTRACE __SLAB_FLAG_BIT(_SLAB_NOLEAKTRACE) |
111 | 151 |
|
112 | 152 | /*
|
113 | 153 | * Prevent merging with compatible kmem caches. This flag should be used
|
|
119 | 159 | * - performance critical caches, should be very rare and consulted with slab
|
120 | 160 | * maintainers, and not used together with CONFIG_SLUB_TINY
|
121 | 161 | */
|
122 |
| -#define SLAB_NO_MERGE ((slab_flags_t __force)0x01000000U) |
| 162 | +#define SLAB_NO_MERGE __SLAB_FLAG_BIT(_SLAB_NO_MERGE) |
123 | 163 |
|
124 | 164 | /* Fault injection mark */
|
125 | 165 | #ifdef CONFIG_FAILSLAB
|
126 |
| -# define SLAB_FAILSLAB ((slab_flags_t __force)0x02000000U) |
| 166 | +# define SLAB_FAILSLAB __SLAB_FLAG_BIT(_SLAB_FAILSLAB) |
127 | 167 | #else
|
128 |
| -# define SLAB_FAILSLAB 0 |
| 168 | +# define SLAB_FAILSLAB __SLAB_FLAG_UNUSED |
129 | 169 | #endif
|
130 | 170 | /* Account to memcg */
|
131 | 171 | #ifdef CONFIG_MEMCG_KMEM
|
132 |
| -# define SLAB_ACCOUNT ((slab_flags_t __force)0x04000000U) |
| 172 | +# define SLAB_ACCOUNT __SLAB_FLAG_BIT(_SLAB_ACCOUNT) |
133 | 173 | #else
|
134 |
| -# define SLAB_ACCOUNT 0 |
| 174 | +# define SLAB_ACCOUNT __SLAB_FLAG_UNUSED |
135 | 175 | #endif
|
136 | 176 |
|
137 | 177 | #ifdef CONFIG_KASAN_GENERIC
|
138 |
| -#define SLAB_KASAN ((slab_flags_t __force)0x08000000U) |
| 178 | +#define SLAB_KASAN __SLAB_FLAG_BIT(_SLAB_KASAN) |
139 | 179 | #else
|
140 |
| -#define SLAB_KASAN 0 |
| 180 | +#define SLAB_KASAN __SLAB_FLAG_UNUSED |
141 | 181 | #endif
|
142 | 182 |
|
143 | 183 | /*
|
144 | 184 | * Ignore user specified debugging flags.
|
145 | 185 | * Intended for caches created for self-tests so they have only flags
|
146 | 186 | * specified in the code and other flags are ignored.
|
147 | 187 | */
|
148 |
| -#define SLAB_NO_USER_FLAGS ((slab_flags_t __force)0x10000000U) |
| 188 | +#define SLAB_NO_USER_FLAGS __SLAB_FLAG_BIT(_SLAB_NO_USER_FLAGS) |
149 | 189 |
|
150 | 190 | #ifdef CONFIG_KFENCE
|
151 |
| -#define SLAB_SKIP_KFENCE ((slab_flags_t __force)0x20000000U) |
| 191 | +#define SLAB_SKIP_KFENCE __SLAB_FLAG_BIT(_SLAB_SKIP_KFENCE) |
152 | 192 | #else
|
153 |
| -#define SLAB_SKIP_KFENCE 0 |
| 193 | +#define SLAB_SKIP_KFENCE __SLAB_FLAG_UNUSED |
154 | 194 | #endif
|
155 | 195 |
|
156 | 196 | /* The following flags affect the page allocator grouping pages by mobility */
|
157 | 197 | /* Objects are reclaimable */
|
158 | 198 | #ifndef CONFIG_SLUB_TINY
|
159 |
| -#define SLAB_RECLAIM_ACCOUNT ((slab_flags_t __force)0x00020000U) |
| 199 | +#define SLAB_RECLAIM_ACCOUNT __SLAB_FLAG_BIT(_SLAB_RECLAIM_ACCOUNT) |
160 | 200 | #else
|
161 |
| -#define SLAB_RECLAIM_ACCOUNT ((slab_flags_t __force)0) |
| 201 | +#define SLAB_RECLAIM_ACCOUNT __SLAB_FLAG_UNUSED |
162 | 202 | #endif
|
163 | 203 | #define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */
|
164 | 204 |
|
165 | 205 | /* Obsolete unused flag, to be removed */
|
166 |
| -#define SLAB_MEM_SPREAD ((slab_flags_t __force)0U) |
| 206 | +#define SLAB_MEM_SPREAD __SLAB_FLAG_UNUSED |
167 | 207 |
|
168 | 208 | /*
|
169 | 209 | * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
|
|
0 commit comments