Skip to content

Commit 3df2991

Browse files
Matthew Wilcox (Oracle)tehcaster
authored andcommitted
slab: Add SL_pfmemalloc flag
Give slab its own name for this flag. Move the implementation from slab.h to slub.c since it's only used inside slub.c. Signed-off-by: Matthew Wilcox (Oracle) <[email protected]> Acked-by: Harry Yoo <[email protected]> Link: https://patch.msgid.link/[email protected] Signed-off-by: Vlastimil Babka <[email protected]>
1 parent c5c4490 commit 3df2991

File tree

2 files changed

+21
-24
lines changed

2 files changed

+21
-24
lines changed

mm/slab.h

Lines changed: 0 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -167,30 +167,6 @@ static_assert(IS_ALIGNED(offsetof(struct slab, freelist), sizeof(freelist_aba_t)
167167
*/
168168
#define slab_page(s) folio_page(slab_folio(s), 0)
169169

170-
/*
171-
* If network-based swap is enabled, sl*b must keep track of whether pages
172-
* were allocated from pfmemalloc reserves.
173-
*/
174-
static inline bool slab_test_pfmemalloc(const struct slab *slab)
175-
{
176-
return folio_test_active(slab_folio(slab));
177-
}
178-
179-
static inline void slab_set_pfmemalloc(struct slab *slab)
180-
{
181-
folio_set_active(slab_folio(slab));
182-
}
183-
184-
static inline void slab_clear_pfmemalloc(struct slab *slab)
185-
{
186-
folio_clear_active(slab_folio(slab));
187-
}
188-
189-
static inline void __slab_clear_pfmemalloc(struct slab *slab)
190-
{
191-
__folio_clear_active(slab_folio(slab));
192-
}
193-
194170
static inline void *slab_address(const struct slab *slab)
195171
{
196172
return folio_address(slab_folio(slab));

mm/slub.c

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -187,6 +187,7 @@
187187
* enum slab_flags - How the slab flags bits are used.
188188
* @SL_locked: Is locked with slab_lock()
189189
* @SL_partial: On the per-node partial list
190+
* @SL_pfmemalloc: Was allocated from PF_MEMALLOC reserves
190191
*
191192
* The slab flags share space with the page flags but some bits have
192193
* different interpretations. The high bits are used for information
@@ -195,6 +196,7 @@
195196
enum slab_flags {
196197
SL_locked = PG_locked,
197198
SL_partial = PG_workingset, /* Historical reasons for this bit */
199+
SL_pfmemalloc = PG_active, /* Historical reasons for this bit */
198200
};
199201

200202
/*
@@ -648,6 +650,25 @@ static inline unsigned int slub_get_cpu_partial(struct kmem_cache *s)
648650
}
649651
#endif /* CONFIG_SLUB_CPU_PARTIAL */
650652

653+
/*
654+
* If network-based swap is enabled, slub must keep track of whether memory
655+
* were allocated from pfmemalloc reserves.
656+
*/
657+
static inline bool slab_test_pfmemalloc(const struct slab *slab)
658+
{
659+
return test_bit(SL_pfmemalloc, &slab->flags);
660+
}
661+
662+
static inline void slab_set_pfmemalloc(struct slab *slab)
663+
{
664+
set_bit(SL_pfmemalloc, &slab->flags);
665+
}
666+
667+
static inline void __slab_clear_pfmemalloc(struct slab *slab)
668+
{
669+
__clear_bit(SL_pfmemalloc, &slab->flags);
670+
}
671+
651672
/*
652673
* Per slab locking using the pagelock
653674
*/

0 commit comments

Comments
 (0)