@@ -1980,19 +1980,22 @@ void __percpu *__alloc_reserved_percpu(size_t size, size_t align)
1980
1980
* If empty_only is %false, reclaim all fully free chunks regardless of the
1981
1981
* number of populated pages. Otherwise, only reclaim chunks that have no
1982
1982
* populated pages.
1983
+ *
1984
+ * CONTEXT:
1985
+ * pcpu_lock (can be dropped temporarily)
1983
1986
*/
1984
1987
static void pcpu_balance_free (bool empty_only )
1985
1988
{
1986
1989
LIST_HEAD (to_free );
1987
1990
struct list_head * free_head = & pcpu_chunk_lists [pcpu_free_slot ];
1988
1991
struct pcpu_chunk * chunk , * next ;
1989
1992
1993
+ lockdep_assert_held (& pcpu_lock );
1994
+
1990
1995
/*
1991
1996
* There's no reason to keep around multiple unused chunks and VM
1992
1997
* areas can be scarce. Destroy all free chunks except for one.
1993
1998
*/
1994
- spin_lock_irq (& pcpu_lock );
1995
-
1996
1999
list_for_each_entry_safe (chunk , next , free_head , list ) {
1997
2000
WARN_ON (chunk -> immutable );
1998
2001
@@ -2004,8 +2007,10 @@ static void pcpu_balance_free(bool empty_only)
2004
2007
list_move (& chunk -> list , & to_free );
2005
2008
}
2006
2009
2007
- spin_unlock_irq (& pcpu_lock );
2010
+ if (list_empty (& to_free ))
2011
+ return ;
2008
2012
2013
+ spin_unlock_irq (& pcpu_lock );
2009
2014
list_for_each_entry_safe (chunk , next , & to_free , list ) {
2010
2015
unsigned int rs , re ;
2011
2016
@@ -2019,6 +2024,7 @@ static void pcpu_balance_free(bool empty_only)
2019
2024
pcpu_destroy_chunk (chunk );
2020
2025
cond_resched ();
2021
2026
}
2027
+ spin_lock_irq (& pcpu_lock );
2022
2028
}
2023
2029
2024
2030
/**
@@ -2029,6 +2035,9 @@ static void pcpu_balance_free(bool empty_only)
2029
2035
* OOM killer to be triggered. We should avoid doing so until an actual
2030
2036
* allocation causes the failure as it is possible that requests can be
2031
2037
* serviced from already backed regions.
2038
+ *
2039
+ * CONTEXT:
2040
+ * pcpu_lock (can be dropped temporarily)
2032
2041
*/
2033
2042
static void pcpu_balance_populated (void )
2034
2043
{
@@ -2037,6 +2046,8 @@ static void pcpu_balance_populated(void)
2037
2046
struct pcpu_chunk * chunk ;
2038
2047
int slot , nr_to_pop , ret ;
2039
2048
2049
+ lockdep_assert_held (& pcpu_lock );
2050
+
2040
2051
/*
2041
2052
* Ensure there are certain number of free populated pages for
2042
2053
* atomic allocs. Fill up from the most packed so that atomic
@@ -2064,13 +2075,11 @@ static void pcpu_balance_populated(void)
2064
2075
if (!nr_to_pop )
2065
2076
break ;
2066
2077
2067
- spin_lock_irq (& pcpu_lock );
2068
2078
list_for_each_entry (chunk , & pcpu_chunk_lists [slot ], list ) {
2069
2079
nr_unpop = chunk -> nr_pages - chunk -> nr_populated ;
2070
2080
if (nr_unpop )
2071
2081
break ;
2072
2082
}
2073
- spin_unlock_irq (& pcpu_lock );
2074
2083
2075
2084
if (!nr_unpop )
2076
2085
continue ;
@@ -2080,12 +2089,13 @@ static void pcpu_balance_populated(void)
2080
2089
chunk -> nr_pages ) {
2081
2090
int nr = min_t (int , re - rs , nr_to_pop );
2082
2091
2092
+ spin_unlock_irq (& pcpu_lock );
2083
2093
ret = pcpu_populate_chunk (chunk , rs , rs + nr , gfp );
2094
+ cond_resched ();
2095
+ spin_lock_irq (& pcpu_lock );
2084
2096
if (!ret ) {
2085
2097
nr_to_pop -= nr ;
2086
- spin_lock_irq (& pcpu_lock );
2087
2098
pcpu_chunk_populated (chunk , rs , rs + nr );
2088
- spin_unlock_irq (& pcpu_lock );
2089
2099
} else {
2090
2100
nr_to_pop = 0 ;
2091
2101
}
@@ -2097,11 +2107,12 @@ static void pcpu_balance_populated(void)
2097
2107
2098
2108
if (nr_to_pop ) {
2099
2109
/* ran out of chunks to populate, create a new one and retry */
2110
+ spin_unlock_irq (& pcpu_lock );
2100
2111
chunk = pcpu_create_chunk (gfp );
2112
+ cond_resched ();
2113
+ spin_lock_irq (& pcpu_lock );
2101
2114
if (chunk ) {
2102
- spin_lock_irq (& pcpu_lock );
2103
2115
pcpu_chunk_relocate (chunk , -1 );
2104
- spin_unlock_irq (& pcpu_lock );
2105
2116
goto retry_pop ;
2106
2117
}
2107
2118
}
@@ -2117,14 +2128,18 @@ static void pcpu_balance_populated(void)
2117
2128
* populated pages threshold, reintegrate the chunk if it has empty free pages.
2118
2129
* Each chunk is scanned in the reverse order to keep populated pages close to
2119
2130
* the beginning of the chunk.
2131
+ *
2132
+ * CONTEXT:
2133
+ * pcpu_lock (can be dropped temporarily)
2134
+ *
2120
2135
*/
2121
2136
static void pcpu_reclaim_populated (void )
2122
2137
{
2123
2138
struct pcpu_chunk * chunk ;
2124
2139
struct pcpu_block_md * block ;
2125
2140
int i , end ;
2126
2141
2127
- spin_lock_irq (& pcpu_lock );
2142
+ lockdep_assert_held (& pcpu_lock );
2128
2143
2129
2144
restart :
2130
2145
/*
@@ -2190,8 +2205,6 @@ static void pcpu_reclaim_populated(void)
2190
2205
list_move (& chunk -> list ,
2191
2206
& pcpu_chunk_lists [pcpu_sidelined_slot ]);
2192
2207
}
2193
-
2194
- spin_unlock_irq (& pcpu_lock );
2195
2208
}
2196
2209
2197
2210
/**
@@ -2212,10 +2225,14 @@ static void pcpu_balance_workfn(struct work_struct *work)
2212
2225
* appropriate.
2213
2226
*/
2214
2227
mutex_lock (& pcpu_alloc_mutex );
2228
+ spin_lock_irq (& pcpu_lock );
2229
+
2215
2230
pcpu_balance_free (false);
2216
2231
pcpu_reclaim_populated ();
2217
2232
pcpu_balance_populated ();
2218
2233
pcpu_balance_free (true);
2234
+
2235
+ spin_unlock_irq (& pcpu_lock );
2219
2236
mutex_unlock (& pcpu_alloc_mutex );
2220
2237
}
2221
2238
0 commit comments