Skip to content

Commit 4a9e231

Browse files
axboeakpm00
authored andcommitted
mm/truncate: add folio_unmap_invalidate() helper
Add a folio_unmap_invalidate() helper, which unmaps and invalidates a given folio. The caller must already have locked the folio. Embed the old invalidate_complete_folio2() helper in there as well, as nobody else calls it. Use this new helper in invalidate_inode_pages2_range(), rather than duplicate the code there. In preparation for using this elsewhere as well, have it take a gfp_t mask rather than assume GFP_KERNEL is the right choice. This bubbles back to invalidate_complete_folio2() as well. Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Jens Axboe <[email protected]> Cc: Brian Foster <[email protected]> Cc: Chris Mason <[email protected]> Cc: Christoph Hellwig <[email protected]> Cc: Johannes Weiner <[email protected]> Cc: Kirill A. Shutemov <[email protected]> Cc: Matthew Wilcox (Oracle) <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
1 parent 77d0752 commit 4a9e231

File tree

2 files changed

+30
-25
lines changed

2 files changed

+30
-25
lines changed

mm/internal.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -392,6 +392,8 @@ void unmap_page_range(struct mmu_gather *tlb,
392392
struct vm_area_struct *vma,
393393
unsigned long addr, unsigned long end,
394394
struct zap_details *details);
395+
int folio_unmap_invalidate(struct address_space *mapping, struct folio *folio,
396+
gfp_t gfp);
395397

396398
void page_cache_ra_order(struct readahead_control *, struct file_ra_state *,
397399
unsigned int order);

mm/truncate.c

Lines changed: 28 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -525,21 +525,42 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping,
525525
}
526526
EXPORT_SYMBOL(invalidate_mapping_pages);
527527

528+
static int folio_launder(struct address_space *mapping, struct folio *folio)
529+
{
530+
if (!folio_test_dirty(folio))
531+
return 0;
532+
if (folio->mapping != mapping || mapping->a_ops->launder_folio == NULL)
533+
return 0;
534+
return mapping->a_ops->launder_folio(folio);
535+
}
536+
528537
/*
529538
* This is like mapping_evict_folio(), except it ignores the folio's
530539
* refcount. We do this because invalidate_inode_pages2() needs stronger
531540
* invalidation guarantees, and cannot afford to leave folios behind because
532541
* shrink_folio_list() has a temp ref on them, or because they're transiently
533542
* sitting in the folio_add_lru() caches.
534543
*/
535-
static int invalidate_complete_folio2(struct address_space *mapping,
536-
struct folio *folio)
544+
int folio_unmap_invalidate(struct address_space *mapping, struct folio *folio,
545+
gfp_t gfp)
537546
{
538-
if (folio->mapping != mapping)
539-
return 0;
547+
int ret;
548+
549+
VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
540550

541-
if (!filemap_release_folio(folio, GFP_KERNEL))
551+
if (folio_test_dirty(folio))
542552
return 0;
553+
if (folio_mapped(folio))
554+
unmap_mapping_folio(folio);
555+
BUG_ON(folio_mapped(folio));
556+
557+
ret = folio_launder(mapping, folio);
558+
if (ret)
559+
return ret;
560+
if (folio->mapping != mapping)
561+
return -EBUSY;
562+
if (!filemap_release_folio(folio, gfp))
563+
return -EBUSY;
543564

544565
spin_lock(&mapping->host->i_lock);
545566
xa_lock_irq(&mapping->i_pages);
@@ -558,16 +579,7 @@ static int invalidate_complete_folio2(struct address_space *mapping,
558579
failed:
559580
xa_unlock_irq(&mapping->i_pages);
560581
spin_unlock(&mapping->host->i_lock);
561-
return 0;
562-
}
563-
564-
static int folio_launder(struct address_space *mapping, struct folio *folio)
565-
{
566-
if (!folio_test_dirty(folio))
567-
return 0;
568-
if (folio->mapping != mapping || mapping->a_ops->launder_folio == NULL)
569-
return 0;
570-
return mapping->a_ops->launder_folio(folio);
582+
return -EBUSY;
571583
}
572584

573585
/**
@@ -631,16 +643,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
631643
}
632644
VM_BUG_ON_FOLIO(!folio_contains(folio, indices[i]), folio);
633645
folio_wait_writeback(folio);
634-
635-
if (folio_mapped(folio))
636-
unmap_mapping_folio(folio);
637-
BUG_ON(folio_mapped(folio));
638-
639-
ret2 = folio_launder(mapping, folio);
640-
if (ret2 == 0) {
641-
if (!invalidate_complete_folio2(mapping, folio))
642-
ret2 = -EBUSY;
643-
}
646+
ret2 = folio_unmap_invalidate(mapping, folio, GFP_KERNEL);
644647
if (ret2 < 0)
645648
ret = ret2;
646649
folio_unlock(folio);

0 commit comments

Comments
 (0)