|
183 | 183 | * the fast path and disables lockless freelists.
|
184 | 184 | */
|
185 | 185 |
|
| 186 | +/** |
| 187 | + * enum slab_flags - How the slab flags bits are used. |
| 188 | + * @SL_locked: Is locked with slab_lock() |
| 189 | + * |
| 190 | + * The slab flags share space with the page flags but some bits have |
| 191 | + * different interpretations. The high bits are used for information |
| 192 | + * like zone/node/section. |
| 193 | + */ |
| 194 | +enum slab_flags { |
| 195 | + SL_locked = PG_locked, |
| 196 | +}; |
| 197 | + |
186 | 198 | /*
|
187 | 199 | * We could simply use migrate_disable()/enable() but as long as it's a
|
188 | 200 | * function call even on !PREEMPT_RT, use inline preempt_disable() there.
|
@@ -639,12 +651,12 @@ static inline unsigned int slub_get_cpu_partial(struct kmem_cache *s)
|
639 | 651 | */
|
640 | 652 | static __always_inline void slab_lock(struct slab *slab)
|
641 | 653 | {
|
642 |
| - bit_spin_lock(PG_locked, &slab->__page_flags); |
| 654 | + bit_spin_lock(SL_locked, &slab->flags); |
643 | 655 | }
|
644 | 656 |
|
645 | 657 | static __always_inline void slab_unlock(struct slab *slab)
|
646 | 658 | {
|
647 |
| - bit_spin_unlock(PG_locked, &slab->__page_flags); |
| 659 | + bit_spin_unlock(SL_locked, &slab->flags); |
648 | 660 | }
|
649 | 661 |
|
650 | 662 | static inline bool
|
@@ -1010,7 +1022,7 @@ static void print_slab_info(const struct slab *slab)
|
1010 | 1022 | {
|
1011 | 1023 | pr_err("Slab 0x%p objects=%u used=%u fp=0x%p flags=%pGp\n",
|
1012 | 1024 | slab, slab->objects, slab->inuse, slab->freelist,
|
1013 |
| - &slab->__page_flags); |
| 1025 | + &slab->flags); |
1014 | 1026 | }
|
1015 | 1027 |
|
1016 | 1028 | void skip_orig_size_check(struct kmem_cache *s, const void *object)
|
|
0 commit comments