Skip to content

Commit 61c663e

Browse files
Yu Zhaoakpm00
authored andcommitted
mm/truncate: batch-clear shadow entries
Make clear_shadow_entry() clear shadow entries in `struct folio_batch` so that it can reduce contention on i_lock and i_pages locks, e.g., watchdog: BUG: soft lockup - CPU#29 stuck for 11s! [fio:2701649] clear_shadow_entry+0x3d/0x100 mapping_try_invalidate+0x117/0x1d0 invalidate_mapping_pages+0x10/0x20 invalidate_bdev+0x3c/0x50 blkdev_common_ioctl+0x5f7/0xa90 blkdev_ioctl+0x109/0x270 Also, rename clear_shadow_entry() to clear_shadow_entries() accordingly. [[email protected]: v2] Link: https://lkml.kernel.org/r/[email protected] Link: https://lkml.kernel.org/r/[email protected] Reported-by: Bharata B Rao <[email protected]> Closes: https://lore.kernel.org/[email protected]/ Signed-off-by: Yu Zhao <[email protected]> Cc: Mel Gorman <[email protected]> Cc: Johannes Weiner <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
1 parent 8a78882 commit 61c663e

File tree

1 file changed

+31
-37
lines changed

1 file changed

+31
-37
lines changed

mm/truncate.c

Lines changed: 31 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -39,12 +39,25 @@ static inline void __clear_shadow_entry(struct address_space *mapping,
3939
xas_store(&xas, NULL);
4040
}
4141

42-
static void clear_shadow_entry(struct address_space *mapping, pgoff_t index,
43-
void *entry)
42+
static void clear_shadow_entries(struct address_space *mapping,
43+
struct folio_batch *fbatch, pgoff_t *indices)
4444
{
45+
int i;
46+
47+
/* Handled by shmem itself, or for DAX we do nothing. */
48+
if (shmem_mapping(mapping) || dax_mapping(mapping))
49+
return;
50+
4551
spin_lock(&mapping->host->i_lock);
4652
xa_lock_irq(&mapping->i_pages);
47-
__clear_shadow_entry(mapping, index, entry);
53+
54+
for (i = 0; i < folio_batch_count(fbatch); i++) {
55+
struct folio *folio = fbatch->folios[i];
56+
57+
if (xa_is_value(folio))
58+
__clear_shadow_entry(mapping, indices[i], folio);
59+
}
60+
4861
xa_unlock_irq(&mapping->i_pages);
4962
if (mapping_shrinkable(mapping))
5063
inode_add_lru(mapping->host);
@@ -105,36 +118,6 @@ static void truncate_folio_batch_exceptionals(struct address_space *mapping,
105118
fbatch->nr = j;
106119
}
107120

108-
/*
109-
* Invalidate exceptional entry if easily possible. This handles exceptional
110-
* entries for invalidate_inode_pages().
111-
*/
112-
static int invalidate_exceptional_entry(struct address_space *mapping,
113-
pgoff_t index, void *entry)
114-
{
115-
/* Handled by shmem itself, or for DAX we do nothing. */
116-
if (shmem_mapping(mapping) || dax_mapping(mapping))
117-
return 1;
118-
clear_shadow_entry(mapping, index, entry);
119-
return 1;
120-
}
121-
122-
/*
123-
* Invalidate exceptional entry if clean. This handles exceptional entries for
124-
* invalidate_inode_pages2() so for DAX it evicts only clean entries.
125-
*/
126-
static int invalidate_exceptional_entry2(struct address_space *mapping,
127-
pgoff_t index, void *entry)
128-
{
129-
/* Handled by shmem itself */
130-
if (shmem_mapping(mapping))
131-
return 1;
132-
if (dax_mapping(mapping))
133-
return dax_invalidate_mapping_entry_sync(mapping, index);
134-
clear_shadow_entry(mapping, index, entry);
135-
return 1;
136-
}
137-
138121
/**
139122
* folio_invalidate - Invalidate part or all of a folio.
140123
* @folio: The folio which is affected.
@@ -494,6 +477,7 @@ unsigned long mapping_try_invalidate(struct address_space *mapping,
494477
unsigned long ret;
495478
unsigned long count = 0;
496479
int i;
480+
bool xa_has_values = false;
497481

498482
folio_batch_init(&fbatch);
499483
while (find_lock_entries(mapping, &index, end, &fbatch, indices)) {
@@ -503,8 +487,8 @@ unsigned long mapping_try_invalidate(struct address_space *mapping,
503487
/* We rely upon deletion not changing folio->index */
504488

505489
if (xa_is_value(folio)) {
506-
count += invalidate_exceptional_entry(mapping,
507-
indices[i], folio);
490+
xa_has_values = true;
491+
count++;
508492
continue;
509493
}
510494

@@ -522,6 +506,10 @@ unsigned long mapping_try_invalidate(struct address_space *mapping,
522506
}
523507
count += ret;
524508
}
509+
510+
if (xa_has_values)
511+
clear_shadow_entries(mapping, &fbatch, indices);
512+
525513
folio_batch_remove_exceptionals(&fbatch);
526514
folio_batch_release(&fbatch);
527515
cond_resched();
@@ -616,6 +604,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
616604
int ret = 0;
617605
int ret2 = 0;
618606
int did_range_unmap = 0;
607+
bool xa_has_values = false;
619608

620609
if (mapping_empty(mapping))
621610
return 0;
@@ -629,8 +618,9 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
629618
/* We rely upon deletion not changing folio->index */
630619

631620
if (xa_is_value(folio)) {
632-
if (!invalidate_exceptional_entry2(mapping,
633-
indices[i], folio))
621+
xa_has_values = true;
622+
if (dax_mapping(mapping) &&
623+
!dax_invalidate_mapping_entry_sync(mapping, indices[i]))
634624
ret = -EBUSY;
635625
continue;
636626
}
@@ -666,6 +656,10 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
666656
ret = ret2;
667657
folio_unlock(folio);
668658
}
659+
660+
if (xa_has_values)
661+
clear_shadow_entries(mapping, &fbatch, indices);
662+
669663
folio_batch_remove_exceptionals(&fbatch);
670664
folio_batch_release(&fbatch);
671665
cond_resched();

0 commit comments

Comments
 (0)