@@ -1933,31 +1933,22 @@ void __percpu *__alloc_reserved_percpu(size_t size, size_t align)
1933
1933
}
1934
1934
1935
1935
/**
1936
- * __pcpu_balance_workfn - manage the amount of free chunks and populated pages
1936
+ * pcpu_balance_free - manage the amount of free chunks
1937
1937
* @type: chunk type
1938
1938
*
1939
- * Reclaim all fully free chunks except for the first one. This is also
1940
- * responsible for maintaining the pool of empty populated pages. However,
1941
- * it is possible that this is called when physical memory is scarce causing
1942
- * OOM killer to be triggered. We should avoid doing so until an actual
1943
- * allocation causes the failure as it is possible that requests can be
1944
- * serviced from already backed regions.
1939
+ * Reclaim all fully free chunks except for the first one.
1945
1940
*/
1946
- static void __pcpu_balance_workfn (enum pcpu_chunk_type type )
1941
+ static void pcpu_balance_free (enum pcpu_chunk_type type )
1947
1942
{
1948
- /* gfp flags passed to underlying allocators */
1949
- const gfp_t gfp = GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN ;
1950
1943
LIST_HEAD (to_free );
1951
1944
struct list_head * pcpu_slot = pcpu_chunk_list (type );
1952
1945
struct list_head * free_head = & pcpu_slot [pcpu_nr_slots - 1 ];
1953
1946
struct pcpu_chunk * chunk , * next ;
1954
- int slot , nr_to_pop , ret ;
1955
1947
1956
1948
/*
1957
1949
* There's no reason to keep around multiple unused chunks and VM
1958
1950
* areas can be scarce. Destroy all free chunks except for one.
1959
1951
*/
1960
- mutex_lock (& pcpu_alloc_mutex );
1961
1952
spin_lock_irq (& pcpu_lock );
1962
1953
1963
1954
list_for_each_entry_safe (chunk , next , free_head , list ) {
@@ -1985,6 +1976,25 @@ static void __pcpu_balance_workfn(enum pcpu_chunk_type type)
1985
1976
pcpu_destroy_chunk (chunk );
1986
1977
cond_resched ();
1987
1978
}
1979
+ }
1980
+
1981
+ /**
1982
+ * pcpu_balance_populated - manage the amount of populated pages
1983
+ * @type: chunk type
1984
+ *
1985
+ * Maintain a certain amount of populated pages to satisfy atomic allocations.
1986
+ * It is possible that this is called when physical memory is scarce causing
1987
+ * OOM killer to be triggered. We should avoid doing so until an actual
1988
+ * allocation causes the failure as it is possible that requests can be
1989
+ * serviced from already backed regions.
1990
+ */
1991
+ static void pcpu_balance_populated (enum pcpu_chunk_type type )
1992
+ {
1993
+ /* gfp flags passed to underlying allocators */
1994
+ const gfp_t gfp = GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN ;
1995
+ struct list_head * pcpu_slot = pcpu_chunk_list (type );
1996
+ struct pcpu_chunk * chunk ;
1997
+ int slot , nr_to_pop , ret ;
1988
1998
1989
1999
/*
1990
2000
* Ensure there are certain number of free populated pages for
@@ -2054,22 +2064,24 @@ static void __pcpu_balance_workfn(enum pcpu_chunk_type type)
2054
2064
goto retry_pop ;
2055
2065
}
2056
2066
}
2057
-
2058
- mutex_unlock (& pcpu_alloc_mutex );
2059
2067
}
2060
2068
2061
2069
/**
2062
2070
* pcpu_balance_workfn - manage the amount of free chunks and populated pages
2063
2071
* @work: unused
2064
2072
*
2065
- * Call __pcpu_balance_workfn () for each chunk type.
2073
+ * Call pcpu_balance_free() and pcpu_balance_populated () for each chunk type.
2066
2074
*/
2067
2075
static void pcpu_balance_workfn (struct work_struct * work )
2068
2076
{
2069
2077
enum pcpu_chunk_type type ;
2070
2078
2071
- for (type = 0 ; type < PCPU_NR_CHUNK_TYPES ; type ++ )
2072
- __pcpu_balance_workfn (type );
2079
+ for (type = 0 ; type < PCPU_NR_CHUNK_TYPES ; type ++ ) {
2080
+ mutex_lock (& pcpu_alloc_mutex );
2081
+ pcpu_balance_free (type );
2082
+ pcpu_balance_populated (type );
2083
+ mutex_unlock (& pcpu_alloc_mutex );
2084
+ }
2073
2085
}
2074
2086
2075
2087
/**
0 commit comments