Skip to content

Commit 1a1c4e4

Browse files
committed
Merge branch 'slab/for-6.9/slab-flag-cleanups' into slab/for-linus
Merge a series from myself that replaces hardcoded SLAB_ cache flag values with an enum, and explicitly deprecates the SLAB_MEM_SPREAD flag that is a no-op sine SLAB removal.
2 parents 466ed9e + 96d8dbb commit 1a1c4e4

File tree

6 files changed

+79
-55
lines changed

6 files changed

+79
-55
lines changed

include/linux/kasan.h

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -429,7 +429,6 @@ struct kasan_cache {
429429
};
430430

431431
size_t kasan_metadata_size(struct kmem_cache *cache, bool in_object);
432-
slab_flags_t kasan_never_merge(void);
433432
void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
434433
slab_flags_t *flags);
435434

@@ -446,11 +445,6 @@ static inline size_t kasan_metadata_size(struct kmem_cache *cache,
446445
{
447446
return 0;
448447
}
449-
/* And thus nothing prevents cache merging. */
450-
static inline slab_flags_t kasan_never_merge(void)
451-
{
452-
return 0;
453-
}
454448
/* And no cache-related metadata initialization is required. */
455449
static inline void kasan_cache_create(struct kmem_cache *cache,
456450
unsigned int *size,

include/linux/slab.h

Lines changed: 69 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -21,29 +21,69 @@
2121
#include <linux/cleanup.h>
2222
#include <linux/hash.h>
2323

24+
enum _slab_flag_bits {
25+
_SLAB_CONSISTENCY_CHECKS,
26+
_SLAB_RED_ZONE,
27+
_SLAB_POISON,
28+
_SLAB_KMALLOC,
29+
_SLAB_HWCACHE_ALIGN,
30+
_SLAB_CACHE_DMA,
31+
_SLAB_CACHE_DMA32,
32+
_SLAB_STORE_USER,
33+
_SLAB_PANIC,
34+
_SLAB_TYPESAFE_BY_RCU,
35+
_SLAB_TRACE,
36+
#ifdef CONFIG_DEBUG_OBJECTS
37+
_SLAB_DEBUG_OBJECTS,
38+
#endif
39+
_SLAB_NOLEAKTRACE,
40+
_SLAB_NO_MERGE,
41+
#ifdef CONFIG_FAILSLAB
42+
_SLAB_FAILSLAB,
43+
#endif
44+
#ifdef CONFIG_MEMCG_KMEM
45+
_SLAB_ACCOUNT,
46+
#endif
47+
#ifdef CONFIG_KASAN_GENERIC
48+
_SLAB_KASAN,
49+
#endif
50+
_SLAB_NO_USER_FLAGS,
51+
#ifdef CONFIG_KFENCE
52+
_SLAB_SKIP_KFENCE,
53+
#endif
54+
#ifndef CONFIG_SLUB_TINY
55+
_SLAB_RECLAIM_ACCOUNT,
56+
#endif
57+
_SLAB_OBJECT_POISON,
58+
_SLAB_CMPXCHG_DOUBLE,
59+
_SLAB_FLAGS_LAST_BIT
60+
};
61+
62+
#define __SLAB_FLAG_BIT(nr) ((slab_flags_t __force)(1U << (nr)))
63+
#define __SLAB_FLAG_UNUSED ((slab_flags_t __force)(0U))
2464

2565
/*
2666
* Flags to pass to kmem_cache_create().
2767
* The ones marked DEBUG need CONFIG_SLUB_DEBUG enabled, otherwise are no-op
2868
*/
2969
/* DEBUG: Perform (expensive) checks on alloc/free */
30-
#define SLAB_CONSISTENCY_CHECKS ((slab_flags_t __force)0x00000100U)
70+
#define SLAB_CONSISTENCY_CHECKS __SLAB_FLAG_BIT(_SLAB_CONSISTENCY_CHECKS)
3171
/* DEBUG: Red zone objs in a cache */
32-
#define SLAB_RED_ZONE ((slab_flags_t __force)0x00000400U)
72+
#define SLAB_RED_ZONE __SLAB_FLAG_BIT(_SLAB_RED_ZONE)
3373
/* DEBUG: Poison objects */
34-
#define SLAB_POISON ((slab_flags_t __force)0x00000800U)
74+
#define SLAB_POISON __SLAB_FLAG_BIT(_SLAB_POISON)
3575
/* Indicate a kmalloc slab */
36-
#define SLAB_KMALLOC ((slab_flags_t __force)0x00001000U)
76+
#define SLAB_KMALLOC __SLAB_FLAG_BIT(_SLAB_KMALLOC)
3777
/* Align objs on cache lines */
38-
#define SLAB_HWCACHE_ALIGN ((slab_flags_t __force)0x00002000U)
78+
#define SLAB_HWCACHE_ALIGN __SLAB_FLAG_BIT(_SLAB_HWCACHE_ALIGN)
3979
/* Use GFP_DMA memory */
40-
#define SLAB_CACHE_DMA ((slab_flags_t __force)0x00004000U)
80+
#define SLAB_CACHE_DMA __SLAB_FLAG_BIT(_SLAB_CACHE_DMA)
4181
/* Use GFP_DMA32 memory */
42-
#define SLAB_CACHE_DMA32 ((slab_flags_t __force)0x00008000U)
82+
#define SLAB_CACHE_DMA32 __SLAB_FLAG_BIT(_SLAB_CACHE_DMA32)
4383
/* DEBUG: Store the last owner for bug hunting */
44-
#define SLAB_STORE_USER ((slab_flags_t __force)0x00010000U)
84+
#define SLAB_STORE_USER __SLAB_FLAG_BIT(_SLAB_STORE_USER)
4585
/* Panic if kmem_cache_create() fails */
46-
#define SLAB_PANIC ((slab_flags_t __force)0x00040000U)
86+
#define SLAB_PANIC __SLAB_FLAG_BIT(_SLAB_PANIC)
4787
/*
4888
* SLAB_TYPESAFE_BY_RCU - **WARNING** READ THIS!
4989
*
@@ -95,21 +135,19 @@
95135
* Note that SLAB_TYPESAFE_BY_RCU was originally named SLAB_DESTROY_BY_RCU.
96136
*/
97137
/* Defer freeing slabs to RCU */
98-
#define SLAB_TYPESAFE_BY_RCU ((slab_flags_t __force)0x00080000U)
99-
/* Spread some memory over cpuset */
100-
#define SLAB_MEM_SPREAD ((slab_flags_t __force)0x00100000U)
138+
#define SLAB_TYPESAFE_BY_RCU __SLAB_FLAG_BIT(_SLAB_TYPESAFE_BY_RCU)
101139
/* Trace allocations and frees */
102-
#define SLAB_TRACE ((slab_flags_t __force)0x00200000U)
140+
#define SLAB_TRACE __SLAB_FLAG_BIT(_SLAB_TRACE)
103141

104142
/* Flag to prevent checks on free */
105143
#ifdef CONFIG_DEBUG_OBJECTS
106-
# define SLAB_DEBUG_OBJECTS ((slab_flags_t __force)0x00400000U)
144+
# define SLAB_DEBUG_OBJECTS __SLAB_FLAG_BIT(_SLAB_DEBUG_OBJECTS)
107145
#else
108-
# define SLAB_DEBUG_OBJECTS 0
146+
# define SLAB_DEBUG_OBJECTS __SLAB_FLAG_UNUSED
109147
#endif
110148

111149
/* Avoid kmemleak tracing */
112-
#define SLAB_NOLEAKTRACE ((slab_flags_t __force)0x00800000U)
150+
#define SLAB_NOLEAKTRACE __SLAB_FLAG_BIT(_SLAB_NOLEAKTRACE)
113151

114152
/*
115153
* Prevent merging with compatible kmem caches. This flag should be used
@@ -121,49 +159,52 @@
121159
* - performance critical caches, should be very rare and consulted with slab
122160
* maintainers, and not used together with CONFIG_SLUB_TINY
123161
*/
124-
#define SLAB_NO_MERGE ((slab_flags_t __force)0x01000000U)
162+
#define SLAB_NO_MERGE __SLAB_FLAG_BIT(_SLAB_NO_MERGE)
125163

126164
/* Fault injection mark */
127165
#ifdef CONFIG_FAILSLAB
128-
# define SLAB_FAILSLAB ((slab_flags_t __force)0x02000000U)
166+
# define SLAB_FAILSLAB __SLAB_FLAG_BIT(_SLAB_FAILSLAB)
129167
#else
130-
# define SLAB_FAILSLAB 0
168+
# define SLAB_FAILSLAB __SLAB_FLAG_UNUSED
131169
#endif
132170
/* Account to memcg */
133171
#ifdef CONFIG_MEMCG_KMEM
134-
# define SLAB_ACCOUNT ((slab_flags_t __force)0x04000000U)
172+
# define SLAB_ACCOUNT __SLAB_FLAG_BIT(_SLAB_ACCOUNT)
135173
#else
136-
# define SLAB_ACCOUNT 0
174+
# define SLAB_ACCOUNT __SLAB_FLAG_UNUSED
137175
#endif
138176

139177
#ifdef CONFIG_KASAN_GENERIC
140-
#define SLAB_KASAN ((slab_flags_t __force)0x08000000U)
178+
#define SLAB_KASAN __SLAB_FLAG_BIT(_SLAB_KASAN)
141179
#else
142-
#define SLAB_KASAN 0
180+
#define SLAB_KASAN __SLAB_FLAG_UNUSED
143181
#endif
144182

145183
/*
146184
* Ignore user specified debugging flags.
147185
* Intended for caches created for self-tests so they have only flags
148186
* specified in the code and other flags are ignored.
149187
*/
150-
#define SLAB_NO_USER_FLAGS ((slab_flags_t __force)0x10000000U)
188+
#define SLAB_NO_USER_FLAGS __SLAB_FLAG_BIT(_SLAB_NO_USER_FLAGS)
151189

152190
#ifdef CONFIG_KFENCE
153-
#define SLAB_SKIP_KFENCE ((slab_flags_t __force)0x20000000U)
191+
#define SLAB_SKIP_KFENCE __SLAB_FLAG_BIT(_SLAB_SKIP_KFENCE)
154192
#else
155-
#define SLAB_SKIP_KFENCE 0
193+
#define SLAB_SKIP_KFENCE __SLAB_FLAG_UNUSED
156194
#endif
157195

158196
/* The following flags affect the page allocator grouping pages by mobility */
159197
/* Objects are reclaimable */
160198
#ifndef CONFIG_SLUB_TINY
161-
#define SLAB_RECLAIM_ACCOUNT ((slab_flags_t __force)0x00020000U)
199+
#define SLAB_RECLAIM_ACCOUNT __SLAB_FLAG_BIT(_SLAB_RECLAIM_ACCOUNT)
162200
#else
163-
#define SLAB_RECLAIM_ACCOUNT ((slab_flags_t __force)0)
201+
#define SLAB_RECLAIM_ACCOUNT __SLAB_FLAG_UNUSED
164202
#endif
165203
#define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */
166204

205+
/* Obsolete unused flag, to be removed */
206+
#define SLAB_MEM_SPREAD __SLAB_FLAG_UNUSED
207+
167208
/*
168209
* ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
169210
*

mm/kasan/generic.c

Lines changed: 6 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -334,14 +334,6 @@ DEFINE_ASAN_SET_SHADOW(f3);
334334
DEFINE_ASAN_SET_SHADOW(f5);
335335
DEFINE_ASAN_SET_SHADOW(f8);
336336

337-
/* Only allow cache merging when no per-object metadata is present. */
338-
slab_flags_t kasan_never_merge(void)
339-
{
340-
if (!kasan_requires_meta())
341-
return 0;
342-
return SLAB_KASAN;
343-
}
344-
345337
/*
346338
* Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
347339
* For larger allocations larger redzones are used.
@@ -370,15 +362,13 @@ void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
370362
return;
371363

372364
/*
373-
* SLAB_KASAN is used to mark caches that are sanitized by KASAN
374-
* and that thus have per-object metadata.
375-
* Currently this flag is used in two places:
376-
* 1. In slab_ksize() to account for per-object metadata when
377-
* calculating the size of the accessible memory within the object.
378-
* 2. In slab_common.c via kasan_never_merge() to prevent merging of
379-
* caches with per-object metadata.
365+
* SLAB_KASAN is used to mark caches that are sanitized by KASAN and
366+
* that thus have per-object metadata. Currently, this flag is used in
367+
* slab_ksize() to account for per-object metadata when calculating the
368+
* size of the accessible memory within the object. Additionally, we use
369+
* SLAB_NO_MERGE to prevent merging of caches with per-object metadata.
380370
*/
381-
*flags |= SLAB_KASAN;
371+
*flags |= SLAB_KASAN | SLAB_NO_MERGE;
382372

383373
ok_size = *size;
384374

mm/slab.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -465,7 +465,6 @@ static inline bool is_kmalloc_cache(struct kmem_cache *s)
465465
SLAB_STORE_USER | \
466466
SLAB_TRACE | \
467467
SLAB_CONSISTENCY_CHECKS | \
468-
SLAB_MEM_SPREAD | \
469468
SLAB_NOLEAKTRACE | \
470469
SLAB_RECLAIM_ACCOUNT | \
471470
SLAB_TEMPORARY | \

mm/slab_common.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,7 @@ static DECLARE_WORK(slab_caches_to_rcu_destroy_work,
5050
*/
5151
#define SLAB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
5252
SLAB_TRACE | SLAB_TYPESAFE_BY_RCU | SLAB_NOLEAKTRACE | \
53-
SLAB_FAILSLAB | SLAB_NO_MERGE | kasan_never_merge())
53+
SLAB_FAILSLAB | SLAB_NO_MERGE)
5454

5555
#define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \
5656
SLAB_CACHE_DMA32 | SLAB_ACCOUNT)

mm/slub.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -306,13 +306,13 @@ static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
306306

307307
/* Internal SLUB flags */
308308
/* Poison object */
309-
#define __OBJECT_POISON ((slab_flags_t __force)0x80000000U)
309+
#define __OBJECT_POISON __SLAB_FLAG_BIT(_SLAB_OBJECT_POISON)
310310
/* Use cmpxchg_double */
311311

312312
#ifdef system_has_freelist_aba
313-
#define __CMPXCHG_DOUBLE ((slab_flags_t __force)0x40000000U)
313+
#define __CMPXCHG_DOUBLE __SLAB_FLAG_BIT(_SLAB_CMPXCHG_DOUBLE)
314314
#else
315-
#define __CMPXCHG_DOUBLE ((slab_flags_t __force)0U)
315+
#define __CMPXCHG_DOUBLE __SLAB_FLAG_UNUSED
316316
#endif
317317

318318
/*

0 commit comments

Comments
 (0)