Skip to content

Commit 3090809

Browse files
Matthew Wilcox (Oracle)tehcaster
authored andcommitted
slab: Rename slab->__page_flags to slab->flags
Slab has its own reasons for using flag bits; they aren't just the page bits. Maybe this won't be the ultimate solution, but we should be clear that these bits are in use. Signed-off-by: Matthew Wilcox (Oracle) <[email protected]> Link: https://patch.msgid.link/[email protected] Signed-off-by: Vlastimil Babka <[email protected]>
1 parent 262e086 commit 3090809

File tree

2 files changed

+17
-5
lines changed

2 files changed

+17
-5
lines changed

mm/slab.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,7 @@ typedef union {
5050

5151
/* Reuses the bits in struct page */
5252
struct slab {
53-
unsigned long __page_flags;
53+
unsigned long flags;
5454

5555
struct kmem_cache *slab_cache;
5656
union {
@@ -99,7 +99,7 @@ struct slab {
9999

100100
#define SLAB_MATCH(pg, sl) \
101101
static_assert(offsetof(struct page, pg) == offsetof(struct slab, sl))
102-
SLAB_MATCH(flags, __page_flags);
102+
SLAB_MATCH(flags, flags);
103103
SLAB_MATCH(compound_head, slab_cache); /* Ensure bit 0 is clear */
104104
SLAB_MATCH(_refcount, __page_refcount);
105105
#ifdef CONFIG_MEMCG

mm/slub.c

Lines changed: 15 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -183,6 +183,18 @@
183183
* the fast path and disables lockless freelists.
184184
*/
185185

186+
/**
187+
* enum slab_flags - How the slab flags bits are used.
188+
* @SL_locked: Is locked with slab_lock()
189+
*
190+
* The slab flags share space with the page flags but some bits have
191+
* different interpretations. The high bits are used for information
192+
* like zone/node/section.
193+
*/
194+
enum slab_flags {
195+
SL_locked = PG_locked,
196+
};
197+
186198
/*
187199
* We could simply use migrate_disable()/enable() but as long as it's a
188200
* function call even on !PREEMPT_RT, use inline preempt_disable() there.
@@ -639,12 +651,12 @@ static inline unsigned int slub_get_cpu_partial(struct kmem_cache *s)
639651
*/
640652
static __always_inline void slab_lock(struct slab *slab)
641653
{
642-
bit_spin_lock(PG_locked, &slab->__page_flags);
654+
bit_spin_lock(SL_locked, &slab->flags);
643655
}
644656

645657
static __always_inline void slab_unlock(struct slab *slab)
646658
{
647-
bit_spin_unlock(PG_locked, &slab->__page_flags);
659+
bit_spin_unlock(SL_locked, &slab->flags);
648660
}
649661

650662
static inline bool
@@ -1010,7 +1022,7 @@ static void print_slab_info(const struct slab *slab)
10101022
{
10111023
pr_err("Slab 0x%p objects=%u used=%u fp=0x%p flags=%pGp\n",
10121024
slab, slab->objects, slab->inuse, slab->freelist,
1013-
&slab->__page_flags);
1025+
&slab->flags);
10141026
}
10151027

10161028
void skip_orig_size_check(struct kmem_cache *s, const void *object)

0 commit comments

Comments
 (0)