Skip to content

Commit 67c2669

Browse files
rgushchindennisszhou
authored andcommitted
percpu: split __pcpu_balance_workfn()
__pcpu_balance_workfn() became fairly big and hard to follow, but in fact it consists of two fully independent parts, responsible for the destruction of excessive free chunks and population of necessarily amount of free pages. In order to simplify the code and prepare for adding of a new functionality, split it in two functions: 1) pcpu_balance_free, 2) pcpu_balance_populated. Move the taking/releasing of the pcpu_alloc_mutex to an upper level to keep the current synchronization in place. Signed-off-by: Roman Gushchin <[email protected]> Reviewed-by: Dennis Zhou <[email protected]> Signed-off-by: Dennis Zhou <[email protected]>
1 parent ac9380f commit 67c2669

File tree

1 file changed

+29
-17
lines changed

1 file changed

+29
-17
lines changed

mm/percpu.c

Lines changed: 29 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -1933,31 +1933,22 @@ void __percpu *__alloc_reserved_percpu(size_t size, size_t align)
19331933
}
19341934

19351935
/**
1936-
* __pcpu_balance_workfn - manage the amount of free chunks and populated pages
1936+
* pcpu_balance_free - manage the amount of free chunks
19371937
* @type: chunk type
19381938
*
1939-
* Reclaim all fully free chunks except for the first one. This is also
1940-
* responsible for maintaining the pool of empty populated pages. However,
1941-
* it is possible that this is called when physical memory is scarce causing
1942-
* OOM killer to be triggered. We should avoid doing so until an actual
1943-
* allocation causes the failure as it is possible that requests can be
1944-
* serviced from already backed regions.
1939+
* Reclaim all fully free chunks except for the first one.
19451940
*/
1946-
static void __pcpu_balance_workfn(enum pcpu_chunk_type type)
1941+
static void pcpu_balance_free(enum pcpu_chunk_type type)
19471942
{
1948-
/* gfp flags passed to underlying allocators */
1949-
const gfp_t gfp = GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN;
19501943
LIST_HEAD(to_free);
19511944
struct list_head *pcpu_slot = pcpu_chunk_list(type);
19521945
struct list_head *free_head = &pcpu_slot[pcpu_nr_slots - 1];
19531946
struct pcpu_chunk *chunk, *next;
1954-
int slot, nr_to_pop, ret;
19551947

19561948
/*
19571949
* There's no reason to keep around multiple unused chunks and VM
19581950
* areas can be scarce. Destroy all free chunks except for one.
19591951
*/
1960-
mutex_lock(&pcpu_alloc_mutex);
19611952
spin_lock_irq(&pcpu_lock);
19621953

19631954
list_for_each_entry_safe(chunk, next, free_head, list) {
@@ -1985,6 +1976,25 @@ static void __pcpu_balance_workfn(enum pcpu_chunk_type type)
19851976
pcpu_destroy_chunk(chunk);
19861977
cond_resched();
19871978
}
1979+
}
1980+
1981+
/**
1982+
* pcpu_balance_populated - manage the amount of populated pages
1983+
* @type: chunk type
1984+
*
1985+
* Maintain a certain amount of populated pages to satisfy atomic allocations.
1986+
* It is possible that this is called when physical memory is scarce causing
1987+
* OOM killer to be triggered. We should avoid doing so until an actual
1988+
* allocation causes the failure as it is possible that requests can be
1989+
* serviced from already backed regions.
1990+
*/
1991+
static void pcpu_balance_populated(enum pcpu_chunk_type type)
1992+
{
1993+
/* gfp flags passed to underlying allocators */
1994+
const gfp_t gfp = GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN;
1995+
struct list_head *pcpu_slot = pcpu_chunk_list(type);
1996+
struct pcpu_chunk *chunk;
1997+
int slot, nr_to_pop, ret;
19881998

19891999
/*
19902000
* Ensure there are certain number of free populated pages for
@@ -2054,22 +2064,24 @@ static void __pcpu_balance_workfn(enum pcpu_chunk_type type)
20542064
goto retry_pop;
20552065
}
20562066
}
2057-
2058-
mutex_unlock(&pcpu_alloc_mutex);
20592067
}
20602068

20612069
/**
20622070
* pcpu_balance_workfn - manage the amount of free chunks and populated pages
20632071
* @work: unused
20642072
*
2065-
* Call __pcpu_balance_workfn() for each chunk type.
2073+
* Call pcpu_balance_free() and pcpu_balance_populated() for each chunk type.
20662074
*/
20672075
static void pcpu_balance_workfn(struct work_struct *work)
20682076
{
20692077
enum pcpu_chunk_type type;
20702078

2071-
for (type = 0; type < PCPU_NR_CHUNK_TYPES; type++)
2072-
__pcpu_balance_workfn(type);
2079+
for (type = 0; type < PCPU_NR_CHUNK_TYPES; type++) {
2080+
mutex_lock(&pcpu_alloc_mutex);
2081+
pcpu_balance_free(type);
2082+
pcpu_balance_populated(type);
2083+
mutex_unlock(&pcpu_alloc_mutex);
2084+
}
20732085
}
20742086

20752087
/**

0 commit comments

Comments
 (0)