Skip to content

Commit afb99e9

Browse files
Hugh Dickinsakpm00
authored andcommitted
mm: revert "mm/gup: clear the LRU flag of a page before adding to LRU batch"
This reverts commit 33dfe92: now that collect_longterm_unpinnable_folios() is checking ref_count instead of lru, and mlock/munlock do not participate in the revised LRU flag clearing, those changes are misleading, and enlarge the window during which mlock/munlock may miss an mlock_count update. It is possible (I'd hesitate to claim probable) that the greater likelihood of missed mlock_count updates would explain the "Realtime threads delayed due to kcompactd0" observed on 6.12 in the Link below. If that is the case, this reversion will help; but a complete solution needs also a further patch, beyond the scope of this series. Included some 80-column cleanup around folio_batch_add_and_move(). The role of folio_test_clear_lru() (before taking per-memcg lru_lock) is questionable since 6.13 removed mem_cgroup_move_account() etc; but perhaps there are still some races which need it - not examined here. Link: https://lore.kernel.org/linux-mm/DU0PR01MB10385345F7153F334100981888259A@DU0PR01MB10385.eurprd01.prod.exchangelabs.com/ Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Hugh Dickins <[email protected]> Acked-by: David Hildenbrand <[email protected]> Cc: "Aneesh Kumar K.V" <[email protected]> Cc: Axel Rasmussen <[email protected]> Cc: Chris Li <[email protected]> Cc: Christoph Hellwig <[email protected]> Cc: Jason Gunthorpe <[email protected]> Cc: Johannes Weiner <[email protected]> Cc: John Hubbard <[email protected]> Cc: Keir Fraser <[email protected]> Cc: Konstantin Khlebnikov <[email protected]> Cc: Li Zhe <[email protected]> Cc: Matthew Wilcox (Oracle) <[email protected]> Cc: Peter Xu <[email protected]> Cc: Rik van Riel <[email protected]> Cc: Shivank Garg <[email protected]> Cc: Vlastimil Babka <[email protected]> Cc: Wei Xu <[email protected]> Cc: Will Deacon <[email protected]> Cc: yangge <[email protected]> Cc: Yuanchu Xie <[email protected]> Cc: Yu Zhao <[email protected]> Cc: <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
1 parent a09a8a1 commit afb99e9

File tree

1 file changed

+26
-24
lines changed

1 file changed

+26
-24
lines changed

mm/swap.c

Lines changed: 26 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -164,6 +164,10 @@ static void folio_batch_move_lru(struct folio_batch *fbatch, move_fn_t move_fn)
164164
for (i = 0; i < folio_batch_count(fbatch); i++) {
165165
struct folio *folio = fbatch->folios[i];
166166

167+
/* block memcg migration while the folio moves between lru */
168+
if (move_fn != lru_add && !folio_test_clear_lru(folio))
169+
continue;
170+
167171
folio_lruvec_relock_irqsave(folio, &lruvec, &flags);
168172
move_fn(lruvec, folio);
169173

@@ -176,23 +180,19 @@ static void folio_batch_move_lru(struct folio_batch *fbatch, move_fn_t move_fn)
176180
}
177181

178182
static void __folio_batch_add_and_move(struct folio_batch __percpu *fbatch,
179-
struct folio *folio, move_fn_t move_fn,
180-
bool on_lru, bool disable_irq)
183+
struct folio *folio, move_fn_t move_fn, bool disable_irq)
181184
{
182185
unsigned long flags;
183186

184-
if (on_lru && !folio_test_clear_lru(folio))
185-
return;
186-
187187
folio_get(folio);
188188

189189
if (disable_irq)
190190
local_lock_irqsave(&cpu_fbatches.lock_irq, flags);
191191
else
192192
local_lock(&cpu_fbatches.lock);
193193

194-
if (!folio_batch_add(this_cpu_ptr(fbatch), folio) || folio_test_large(folio) ||
195-
lru_cache_disabled())
194+
if (!folio_batch_add(this_cpu_ptr(fbatch), folio) ||
195+
folio_test_large(folio) || lru_cache_disabled())
196196
folio_batch_move_lru(this_cpu_ptr(fbatch), move_fn);
197197

198198
if (disable_irq)
@@ -201,13 +201,13 @@ static void __folio_batch_add_and_move(struct folio_batch __percpu *fbatch,
201201
local_unlock(&cpu_fbatches.lock);
202202
}
203203

204-
#define folio_batch_add_and_move(folio, op, on_lru) \
205-
__folio_batch_add_and_move( \
206-
&cpu_fbatches.op, \
207-
folio, \
208-
op, \
209-
on_lru, \
210-
offsetof(struct cpu_fbatches, op) >= offsetof(struct cpu_fbatches, lock_irq) \
204+
#define folio_batch_add_and_move(folio, op) \
205+
__folio_batch_add_and_move( \
206+
&cpu_fbatches.op, \
207+
folio, \
208+
op, \
209+
offsetof(struct cpu_fbatches, op) >= \
210+
offsetof(struct cpu_fbatches, lock_irq) \
211211
)
212212

213213
static void lru_move_tail(struct lruvec *lruvec, struct folio *folio)
@@ -231,10 +231,10 @@ static void lru_move_tail(struct lruvec *lruvec, struct folio *folio)
231231
void folio_rotate_reclaimable(struct folio *folio)
232232
{
233233
if (folio_test_locked(folio) || folio_test_dirty(folio) ||
234-
folio_test_unevictable(folio))
234+
folio_test_unevictable(folio) || !folio_test_lru(folio))
235235
return;
236236

237-
folio_batch_add_and_move(folio, lru_move_tail, true);
237+
folio_batch_add_and_move(folio, lru_move_tail);
238238
}
239239

240240
void lru_note_cost_unlock_irq(struct lruvec *lruvec, bool file,
@@ -328,10 +328,11 @@ static void folio_activate_drain(int cpu)
328328

329329
void folio_activate(struct folio *folio)
330330
{
331-
if (folio_test_active(folio) || folio_test_unevictable(folio))
331+
if (folio_test_active(folio) || folio_test_unevictable(folio) ||
332+
!folio_test_lru(folio))
332333
return;
333334

334-
folio_batch_add_and_move(folio, lru_activate, true);
335+
folio_batch_add_and_move(folio, lru_activate);
335336
}
336337

337338
#else
@@ -507,7 +508,7 @@ void folio_add_lru(struct folio *folio)
507508
lru_gen_in_fault() && !(current->flags & PF_MEMALLOC))
508509
folio_set_active(folio);
509510

510-
folio_batch_add_and_move(folio, lru_add, false);
511+
folio_batch_add_and_move(folio, lru_add);
511512
}
512513
EXPORT_SYMBOL(folio_add_lru);
513514

@@ -685,13 +686,13 @@ void lru_add_drain_cpu(int cpu)
685686
void deactivate_file_folio(struct folio *folio)
686687
{
687688
/* Deactivating an unevictable folio will not accelerate reclaim */
688-
if (folio_test_unevictable(folio))
689+
if (folio_test_unevictable(folio) || !folio_test_lru(folio))
689690
return;
690691

691692
if (lru_gen_enabled() && lru_gen_clear_refs(folio))
692693
return;
693694

694-
folio_batch_add_and_move(folio, lru_deactivate_file, true);
695+
folio_batch_add_and_move(folio, lru_deactivate_file);
695696
}
696697

697698
/*
@@ -704,13 +705,13 @@ void deactivate_file_folio(struct folio *folio)
704705
*/
705706
void folio_deactivate(struct folio *folio)
706707
{
707-
if (folio_test_unevictable(folio))
708+
if (folio_test_unevictable(folio) || !folio_test_lru(folio))
708709
return;
709710

710711
if (lru_gen_enabled() ? lru_gen_clear_refs(folio) : !folio_test_active(folio))
711712
return;
712713

713-
folio_batch_add_and_move(folio, lru_deactivate, true);
714+
folio_batch_add_and_move(folio, lru_deactivate);
714715
}
715716

716717
/**
@@ -723,10 +724,11 @@ void folio_deactivate(struct folio *folio)
723724
void folio_mark_lazyfree(struct folio *folio)
724725
{
725726
if (!folio_test_anon(folio) || !folio_test_swapbacked(folio) ||
727+
!folio_test_lru(folio) ||
726728
folio_test_swapcache(folio) || folio_test_unevictable(folio))
727729
return;
728730

729-
folio_batch_add_and_move(folio, lru_lazyfree, true);
731+
folio_batch_add_and_move(folio, lru_lazyfree);
730732
}
731733

732734
void lru_add_drain(void)

0 commit comments

Comments
 (0)