Skip to content

Commit 01b34d1

Browse files
Matthew Wilcox (Oracle)tehcaster
authored andcommitted
mm/slub: Convert pfmemalloc_match() to take a struct slab
Preparatory for mass conversion. Use the new slab_test_pfmemalloc() helper. As it doesn't do VM_BUG_ON(!PageSlab()) we no longer need the pfmemalloc_match_unsafe() variant. Signed-off-by: Matthew Wilcox (Oracle) <[email protected]> Signed-off-by: Vlastimil Babka <[email protected]> Reviewed-by: Roman Gushchin <[email protected]> Reviewed-by: Hyeonggon Yoo <[email protected]>
1 parent 4020b4a commit 01b34d1

File tree

1 file changed

+6
-19
lines changed

1 file changed

+6
-19
lines changed

mm/slub.c

Lines changed: 6 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -2128,7 +2128,7 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain);
21282128
static inline void put_cpu_partial(struct kmem_cache *s, struct page *page,
21292129
int drain) { }
21302130
#endif
2131-
static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags);
2131+
static inline bool pfmemalloc_match(struct slab *slab, gfp_t gfpflags);
21322132

21332133
/*
21342134
* Try to allocate a partial slab from a specific node.
@@ -2154,7 +2154,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
21542154
list_for_each_entry_safe(page, page2, &n->partial, slab_list) {
21552155
void *t;
21562156

2157-
if (!pfmemalloc_match(page, gfpflags))
2157+
if (!pfmemalloc_match(page_slab(page), gfpflags))
21582158
continue;
21592159

21602160
t = acquire_slab(s, n, page, object == NULL);
@@ -2832,22 +2832,9 @@ slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
28322832
#endif
28332833
}
28342834

2835-
static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags)
2835+
static inline bool pfmemalloc_match(struct slab *slab, gfp_t gfpflags)
28362836
{
2837-
if (unlikely(PageSlabPfmemalloc(page)))
2838-
return gfp_pfmemalloc_allowed(gfpflags);
2839-
2840-
return true;
2841-
}
2842-
2843-
/*
2844-
* A variant of pfmemalloc_match() that tests page flags without asserting
2845-
* PageSlab. Intended for opportunistic checks before taking a lock and
2846-
* rechecking that nobody else freed the page under us.
2847-
*/
2848-
static inline bool pfmemalloc_match_unsafe(struct page *page, gfp_t gfpflags)
2849-
{
2850-
if (unlikely(__PageSlabPfmemalloc(page)))
2837+
if (unlikely(slab_test_pfmemalloc(slab)))
28512838
return gfp_pfmemalloc_allowed(gfpflags);
28522839

28532840
return true;
@@ -2949,7 +2936,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
29492936
* PFMEMALLOC but right now, we are losing the pfmemalloc
29502937
* information when the page leaves the per-cpu allocator
29512938
*/
2952-
if (unlikely(!pfmemalloc_match_unsafe(page, gfpflags)))
2939+
if (unlikely(!pfmemalloc_match(page_slab(page), gfpflags)))
29532940
goto deactivate_slab;
29542941

29552942
/* must check again c->page in case we got preempted and it changed */
@@ -3061,7 +3048,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
30613048
}
30623049
}
30633050

3064-
if (unlikely(!pfmemalloc_match(page, gfpflags)))
3051+
if (unlikely(!pfmemalloc_match(page_slab(page), gfpflags)))
30653052
/*
30663053
* For !pfmemalloc_match() case we don't load freelist so that
30673054
* we don't make further mismatched allocations easier.

0 commit comments

Comments
 (0)