|
58 | 58 | * Defaults to flushing at tlb_end_vma() to reset the range; helps when
|
59 | 59 | * there's large holes between the VMAs.
|
60 | 60 | *
|
| 61 | + * - tlb_free_vmas() |
| 62 | + * |
| 63 | + * tlb_free_vmas() marks the start of unlinking of one or more vmas |
| 64 | + * and freeing page-tables. |
| 65 | + * |
61 | 66 | * - tlb_remove_table()
|
62 | 67 | *
|
63 | 68 | * tlb_remove_table() is the basic primitive to free page-table directories
|
@@ -464,7 +469,12 @@ tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma)
|
464 | 469 | */
|
465 | 470 | tlb->vma_huge = is_vm_hugetlb_page(vma);
|
466 | 471 | tlb->vma_exec = !!(vma->vm_flags & VM_EXEC);
|
467 |
| - tlb->vma_pfn = !!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)); |
| 472 | + |
| 473 | + /* |
| 474 | + * Track if there's at least one VM_PFNMAP/VM_MIXEDMAP vma |
| 475 | + * in the tracked range, see tlb_free_vmas(). |
| 476 | + */ |
| 477 | + tlb->vma_pfn |= !!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)); |
468 | 478 | }
|
469 | 479 |
|
470 | 480 | static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
|
@@ -547,23 +557,39 @@ static inline void tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *
|
547 | 557 | }
|
548 | 558 |
|
549 | 559 | static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
|
| 560 | +{ |
| 561 | + if (tlb->fullmm || IS_ENABLED(CONFIG_MMU_GATHER_MERGE_VMAS)) |
| 562 | + return; |
| 563 | + |
| 564 | + /* |
| 565 | + * Do a TLB flush and reset the range at VMA boundaries; this avoids |
| 566 | + * the ranges growing with the unused space between consecutive VMAs, |
| 567 | + * but also the mmu_gather::vma_* flags from tlb_start_vma() rely on |
| 568 | + * this. |
| 569 | + */ |
| 570 | + tlb_flush_mmu_tlbonly(tlb); |
| 571 | +} |
| 572 | + |
| 573 | +static inline void tlb_free_vmas(struct mmu_gather *tlb) |
550 | 574 | {
|
551 | 575 | if (tlb->fullmm)
|
552 | 576 | return;
|
553 | 577 |
|
554 | 578 | /*
|
555 | 579 | * VM_PFNMAP is more fragile because the core mm will not track the
|
556 |
| - * page mapcount -- there might not be page-frames for these PFNs after |
557 |
| - * all. Force flush TLBs for such ranges to avoid munmap() vs |
558 |
| - * unmap_mapping_range() races. |
| 580 | + * page mapcount -- there might not be page-frames for these PFNs |
| 581 | + * after all. |
| 582 | + * |
| 583 | + * Specifically() there is a race between munmap() and |
| 584 | + * unmap_mapping_range(), where munmap() will unlink the VMA, such |
| 585 | + * that unmap_mapping_range() will no longer observe the VMA and |
| 586 | + * no-op, without observing the TLBI, returning prematurely. |
| 587 | + * |
| 588 | + * So if we're about to unlink such a VMA, and we have pending |
| 589 | + * TLBI for such a vma, flush things now. |
559 | 590 | */
|
560 |
| - if (tlb->vma_pfn || !IS_ENABLED(CONFIG_MMU_GATHER_MERGE_VMAS)) { |
561 |
| - /* |
562 |
| - * Do a TLB flush and reset the range at VMA boundaries; this avoids |
563 |
| - * the ranges growing with the unused space between consecutive VMAs. |
564 |
| - */ |
| 591 | + if (tlb->vma_pfn) |
565 | 592 | tlb_flush_mmu_tlbonly(tlb);
|
566 |
| - } |
567 | 593 | }
|
568 | 594 |
|
569 | 595 | /*
|
|
0 commit comments