@@ -164,6 +164,10 @@ static void folio_batch_move_lru(struct folio_batch *fbatch, move_fn_t move_fn)
164
164
for (i = 0 ; i < folio_batch_count (fbatch ); i ++ ) {
165
165
struct folio * folio = fbatch -> folios [i ];
166
166
167
+ /* block memcg migration while the folio moves between lru */
168
+ if (move_fn != lru_add && !folio_test_clear_lru (folio ))
169
+ continue ;
170
+
167
171
folio_lruvec_relock_irqsave (folio , & lruvec , & flags );
168
172
move_fn (lruvec , folio );
169
173
@@ -176,23 +180,19 @@ static void folio_batch_move_lru(struct folio_batch *fbatch, move_fn_t move_fn)
176
180
}
177
181
178
182
static void __folio_batch_add_and_move (struct folio_batch __percpu * fbatch ,
179
- struct folio * folio , move_fn_t move_fn ,
180
- bool on_lru , bool disable_irq )
183
+ struct folio * folio , move_fn_t move_fn , bool disable_irq )
181
184
{
182
185
unsigned long flags ;
183
186
184
- if (on_lru && !folio_test_clear_lru (folio ))
185
- return ;
186
-
187
187
folio_get (folio );
188
188
189
189
if (disable_irq )
190
190
local_lock_irqsave (& cpu_fbatches .lock_irq , flags );
191
191
else
192
192
local_lock (& cpu_fbatches .lock );
193
193
194
- if (!folio_batch_add (this_cpu_ptr (fbatch ), folio ) || folio_test_large ( folio ) ||
195
- lru_cache_disabled ())
194
+ if (!folio_batch_add (this_cpu_ptr (fbatch ), folio ) ||
195
+ folio_test_large ( folio ) || lru_cache_disabled ())
196
196
folio_batch_move_lru (this_cpu_ptr (fbatch ), move_fn );
197
197
198
198
if (disable_irq )
@@ -201,13 +201,13 @@ static void __folio_batch_add_and_move(struct folio_batch __percpu *fbatch,
201
201
local_unlock (& cpu_fbatches .lock );
202
202
}
203
203
204
- #define folio_batch_add_and_move (folio , op , on_lru ) \
205
- __folio_batch_add_and_move( \
206
- &cpu_fbatches.op, \
207
- folio, \
208
- op, \
209
- on_lru, \
210
- offsetof(struct cpu_fbatches, op) >= offsetof(struct cpu_fbatches, lock_irq) \
204
+ #define folio_batch_add_and_move (folio , op ) \
205
+ __folio_batch_add_and_move( \
206
+ &cpu_fbatches.op, \
207
+ folio, \
208
+ op, \
209
+ offsetof(struct cpu_fbatches, op) >= \
210
+ offsetof(struct cpu_fbatches, lock_irq) \
211
211
)
212
212
213
213
static void lru_move_tail (struct lruvec * lruvec , struct folio * folio )
@@ -231,10 +231,10 @@ static void lru_move_tail(struct lruvec *lruvec, struct folio *folio)
231
231
void folio_rotate_reclaimable (struct folio * folio )
232
232
{
233
233
if (folio_test_locked (folio ) || folio_test_dirty (folio ) ||
234
- folio_test_unevictable (folio ))
234
+ folio_test_unevictable (folio ) || ! folio_test_lru ( folio ) )
235
235
return ;
236
236
237
- folio_batch_add_and_move (folio , lru_move_tail , true );
237
+ folio_batch_add_and_move (folio , lru_move_tail );
238
238
}
239
239
240
240
void lru_note_cost_unlock_irq (struct lruvec * lruvec , bool file ,
@@ -328,10 +328,11 @@ static void folio_activate_drain(int cpu)
328
328
329
329
void folio_activate (struct folio * folio )
330
330
{
331
- if (folio_test_active (folio ) || folio_test_unevictable (folio ))
331
+ if (folio_test_active (folio ) || folio_test_unevictable (folio ) ||
332
+ !folio_test_lru (folio ))
332
333
return ;
333
334
334
- folio_batch_add_and_move (folio , lru_activate , true );
335
+ folio_batch_add_and_move (folio , lru_activate );
335
336
}
336
337
337
338
#else
@@ -507,7 +508,7 @@ void folio_add_lru(struct folio *folio)
507
508
lru_gen_in_fault () && !(current -> flags & PF_MEMALLOC ))
508
509
folio_set_active (folio );
509
510
510
- folio_batch_add_and_move (folio , lru_add , false );
511
+ folio_batch_add_and_move (folio , lru_add );
511
512
}
512
513
EXPORT_SYMBOL (folio_add_lru );
513
514
@@ -685,13 +686,13 @@ void lru_add_drain_cpu(int cpu)
685
686
void deactivate_file_folio (struct folio * folio )
686
687
{
687
688
/* Deactivating an unevictable folio will not accelerate reclaim */
688
- if (folio_test_unevictable (folio ))
689
+ if (folio_test_unevictable (folio ) || ! folio_test_lru ( folio ) )
689
690
return ;
690
691
691
692
if (lru_gen_enabled () && lru_gen_clear_refs (folio ))
692
693
return ;
693
694
694
- folio_batch_add_and_move (folio , lru_deactivate_file , true );
695
+ folio_batch_add_and_move (folio , lru_deactivate_file );
695
696
}
696
697
697
698
/*
@@ -704,13 +705,13 @@ void deactivate_file_folio(struct folio *folio)
704
705
*/
705
706
void folio_deactivate (struct folio * folio )
706
707
{
707
- if (folio_test_unevictable (folio ))
708
+ if (folio_test_unevictable (folio ) || ! folio_test_lru ( folio ) )
708
709
return ;
709
710
710
711
if (lru_gen_enabled () ? lru_gen_clear_refs (folio ) : !folio_test_active (folio ))
711
712
return ;
712
713
713
- folio_batch_add_and_move (folio , lru_deactivate , true );
714
+ folio_batch_add_and_move (folio , lru_deactivate );
714
715
}
715
716
716
717
/**
@@ -723,10 +724,11 @@ void folio_deactivate(struct folio *folio)
723
724
void folio_mark_lazyfree (struct folio * folio )
724
725
{
725
726
if (!folio_test_anon (folio ) || !folio_test_swapbacked (folio ) ||
727
+ !folio_test_lru (folio ) ||
726
728
folio_test_swapcache (folio ) || folio_test_unevictable (folio ))
727
729
return ;
728
730
729
- folio_batch_add_and_move (folio , lru_lazyfree , true );
731
+ folio_batch_add_and_move (folio , lru_lazyfree );
730
732
}
731
733
732
734
void lru_add_drain (void )
0 commit comments