Skip to content

Commit 2a058ab

Browse files
VMoolaakpm00
authored andcommitted
mm: change vmf_anon_prepare() to __vmf_anon_prepare()
Some callers of vmf_anon_prepare() may not want us to release the per-VMA lock ourselves. Rename vmf_anon_prepare() to __vmf_anon_prepare() and let the callers drop the lock when desired. Also, make vmf_anon_prepare() a wrapper that releases the per-VMA lock itself for any callers that don't care. This is in preparation to fix this bug reported by syzbot: https://lore.kernel.org/linux-mm/[email protected]/ Link: https://lkml.kernel.org/r/[email protected] Fixes: 9acad7b ("hugetlb: use vmf_anon_prepare() instead of anon_vma_prepare()") Reported-by: [email protected] Closes: https://lore.kernel.org/linux-mm/[email protected]/ Signed-off-by: Vishal Moola (Oracle) <[email protected]> Cc: Muchun Song <[email protected]> Cc: <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
1 parent b4afe41 commit 2a058ab

File tree

2 files changed

+13
-6
lines changed

2 files changed

+13
-6
lines changed

mm/internal.h

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -310,7 +310,16 @@ static inline void wake_throttle_isolated(pg_data_t *pgdat)
310310
wake_up(wqh);
311311
}
312312

313-
vm_fault_t vmf_anon_prepare(struct vm_fault *vmf);
313+
vm_fault_t __vmf_anon_prepare(struct vm_fault *vmf);
314+
static inline vm_fault_t vmf_anon_prepare(struct vm_fault *vmf)
315+
{
316+
vm_fault_t ret = __vmf_anon_prepare(vmf);
317+
318+
if (unlikely(ret & VM_FAULT_RETRY))
319+
vma_end_read(vmf->vma);
320+
return ret;
321+
}
322+
314323
vm_fault_t do_swap_page(struct vm_fault *vmf);
315324
void folio_rotate_reclaimable(struct folio *folio);
316325
bool __folio_end_writeback(struct folio *folio);

mm/memory.c

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -3259,7 +3259,7 @@ static inline vm_fault_t vmf_can_call_fault(const struct vm_fault *vmf)
32593259
}
32603260

32613261
/**
3262-
* vmf_anon_prepare - Prepare to handle an anonymous fault.
3262+
* __vmf_anon_prepare - Prepare to handle an anonymous fault.
32633263
* @vmf: The vm_fault descriptor passed from the fault handler.
32643264
*
32653265
* When preparing to insert an anonymous page into a VMA from a
@@ -3273,18 +3273,16 @@ static inline vm_fault_t vmf_can_call_fault(const struct vm_fault *vmf)
32733273
* Return: 0 if fault handling can proceed. Any other value should be
32743274
* returned to the caller.
32753275
*/
3276-
vm_fault_t vmf_anon_prepare(struct vm_fault *vmf)
3276+
vm_fault_t __vmf_anon_prepare(struct vm_fault *vmf)
32773277
{
32783278
struct vm_area_struct *vma = vmf->vma;
32793279
vm_fault_t ret = 0;
32803280

32813281
if (likely(vma->anon_vma))
32823282
return 0;
32833283
if (vmf->flags & FAULT_FLAG_VMA_LOCK) {
3284-
if (!mmap_read_trylock(vma->vm_mm)) {
3285-
vma_end_read(vma);
3284+
if (!mmap_read_trylock(vma->vm_mm))
32863285
return VM_FAULT_RETRY;
3287-
}
32883286
}
32893287
if (__anon_vma_prepare(vma))
32903288
ret = VM_FAULT_OOM;

0 commit comments

Comments
 (0)