Skip to content

Commit 98b74bb

Browse files
VMoolaakpm00
authored andcommitted
mm/hugetlb.c: fix UAF of vma in hugetlb fault pathway
Syzbot reports a UAF in hugetlb_fault(). This happens because vmf_anon_prepare() could drop the per-VMA lock and allow the current VMA to be freed before hugetlb_vma_unlock_read() is called. We can fix this by using a modified version of vmf_anon_prepare() that doesn't release the VMA lock on failure, and then release it ourselves after hugetlb_vma_unlock_read(). Link: https://lkml.kernel.org/r/[email protected] Fixes: 9acad7b ("hugetlb: use vmf_anon_prepare() instead of anon_vma_prepare()") Reported-by: [email protected] Closes: https://lore.kernel.org/linux-mm/[email protected]/ Signed-off-by: Vishal Moola (Oracle) <[email protected]> Cc: Muchun Song <[email protected]> Cc: <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
1 parent 2a058ab commit 98b74bb

File tree

1 file changed

+18
-2
lines changed

1 file changed

+18
-2
lines changed

mm/hugetlb.c

Lines changed: 18 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -6048,7 +6048,7 @@ static vm_fault_t hugetlb_wp(struct folio *pagecache_folio,
60486048
* When the original hugepage is shared one, it does not have
60496049
* anon_vma prepared.
60506050
*/
6051-
ret = vmf_anon_prepare(vmf);
6051+
ret = __vmf_anon_prepare(vmf);
60526052
if (unlikely(ret))
60536053
goto out_release_all;
60546054

@@ -6247,7 +6247,7 @@ static vm_fault_t hugetlb_no_page(struct address_space *mapping,
62476247
}
62486248

62496249
if (!(vma->vm_flags & VM_MAYSHARE)) {
6250-
ret = vmf_anon_prepare(vmf);
6250+
ret = __vmf_anon_prepare(vmf);
62516251
if (unlikely(ret))
62526252
goto out;
62536253
}
@@ -6378,6 +6378,14 @@ static vm_fault_t hugetlb_no_page(struct address_space *mapping,
63786378
folio_unlock(folio);
63796379
out:
63806380
hugetlb_vma_unlock_read(vma);
6381+
6382+
/*
6383+
* We must check to release the per-VMA lock. __vmf_anon_prepare() is
6384+
* the only way ret can be set to VM_FAULT_RETRY.
6385+
*/
6386+
if (unlikely(ret & VM_FAULT_RETRY))
6387+
vma_end_read(vma);
6388+
63816389
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
63826390
return ret;
63836391

@@ -6599,6 +6607,14 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
65996607
}
66006608
out_mutex:
66016609
hugetlb_vma_unlock_read(vma);
6610+
6611+
/*
6612+
* We must check to release the per-VMA lock. __vmf_anon_prepare() in
6613+
* hugetlb_wp() is the only way ret can be set to VM_FAULT_RETRY.
6614+
*/
6615+
if (unlikely(ret & VM_FAULT_RETRY))
6616+
vma_end_read(vma);
6617+
66026618
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
66036619
/*
66046620
* Generally it's safe to hold refcount during waiting page lock. But

0 commit comments

Comments
 (0)