Skip to content

Commit 8fde12c

Browse files
committed
mm: prevent get_user_pages() from overflowing page refcount
If the page refcount wraps around past zero, it will be freed while there are still four billion references to it. One of the possible avenues for an attacker to try to make this happen is by doing direct IO on a page multiple times. This patch makes get_user_pages() refuse to take a new page reference if there are already more than two billion references to the page. Reported-by: Jann Horn <[email protected]> Acked-by: Matthew Wilcox <[email protected]> Cc: [email protected] Signed-off-by: Linus Torvalds <[email protected]>
1 parent 88b1a17 commit 8fde12c

File tree

2 files changed

+49
-12
lines changed

2 files changed

+49
-12
lines changed

mm/gup.c

Lines changed: 36 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -157,8 +157,12 @@ static struct page *follow_page_pte(struct vm_area_struct *vma,
157157
goto retry;
158158
}
159159

160-
if (flags & FOLL_GET)
161-
get_page(page);
160+
if (flags & FOLL_GET) {
161+
if (unlikely(!try_get_page(page))) {
162+
page = ERR_PTR(-ENOMEM);
163+
goto out;
164+
}
165+
}
162166
if (flags & FOLL_TOUCH) {
163167
if ((flags & FOLL_WRITE) &&
164168
!pte_dirty(pte) && !PageDirty(page))
@@ -295,7 +299,10 @@ static struct page *follow_pmd_mask(struct vm_area_struct *vma,
295299
if (pmd_trans_unstable(pmd))
296300
ret = -EBUSY;
297301
} else {
298-
get_page(page);
302+
if (unlikely(!try_get_page(page))) {
303+
spin_unlock(ptl);
304+
return ERR_PTR(-ENOMEM);
305+
}
299306
spin_unlock(ptl);
300307
lock_page(page);
301308
ret = split_huge_page(page);
@@ -497,7 +504,10 @@ static int get_gate_page(struct mm_struct *mm, unsigned long address,
497504
if (is_device_public_page(*page))
498505
goto unmap;
499506
}
500-
get_page(*page);
507+
if (unlikely(!try_get_page(*page))) {
508+
ret = -ENOMEM;
509+
goto unmap;
510+
}
501511
out:
502512
ret = 0;
503513
unmap:
@@ -1393,6 +1403,20 @@ static void undo_dev_pagemap(int *nr, int nr_start, struct page **pages)
13931403
}
13941404
}
13951405

1406+
/*
1407+
* Return the compund head page with ref appropriately incremented,
1408+
* or NULL if that failed.
1409+
*/
1410+
static inline struct page *try_get_compound_head(struct page *page, int refs)
1411+
{
1412+
struct page *head = compound_head(page);
1413+
if (WARN_ON_ONCE(page_ref_count(head) < 0))
1414+
return NULL;
1415+
if (unlikely(!page_cache_add_speculative(head, refs)))
1416+
return NULL;
1417+
return head;
1418+
}
1419+
13961420
#ifdef CONFIG_ARCH_HAS_PTE_SPECIAL
13971421
static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
13981422
int write, struct page **pages, int *nr)
@@ -1427,9 +1451,9 @@ static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
14271451

14281452
VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
14291453
page = pte_page(pte);
1430-
head = compound_head(page);
14311454

1432-
if (!page_cache_get_speculative(head))
1455+
head = try_get_compound_head(page, 1);
1456+
if (!head)
14331457
goto pte_unmap;
14341458

14351459
if (unlikely(pte_val(pte) != pte_val(*ptep))) {
@@ -1568,8 +1592,8 @@ static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
15681592
refs++;
15691593
} while (addr += PAGE_SIZE, addr != end);
15701594

1571-
head = compound_head(pmd_page(orig));
1572-
if (!page_cache_add_speculative(head, refs)) {
1595+
head = try_get_compound_head(pmd_page(orig), refs);
1596+
if (!head) {
15731597
*nr -= refs;
15741598
return 0;
15751599
}
@@ -1606,8 +1630,8 @@ static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
16061630
refs++;
16071631
} while (addr += PAGE_SIZE, addr != end);
16081632

1609-
head = compound_head(pud_page(orig));
1610-
if (!page_cache_add_speculative(head, refs)) {
1633+
head = try_get_compound_head(pud_page(orig), refs);
1634+
if (!head) {
16111635
*nr -= refs;
16121636
return 0;
16131637
}
@@ -1643,8 +1667,8 @@ static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr,
16431667
refs++;
16441668
} while (addr += PAGE_SIZE, addr != end);
16451669

1646-
head = compound_head(pgd_page(orig));
1647-
if (!page_cache_add_speculative(head, refs)) {
1670+
head = try_get_compound_head(pgd_page(orig), refs);
1671+
if (!head) {
16481672
*nr -= refs;
16491673
return 0;
16501674
}

mm/hugetlb.c

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4298,6 +4298,19 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
42984298

42994299
pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
43004300
page = pte_page(huge_ptep_get(pte));
4301+
4302+
/*
4303+
* Instead of doing 'try_get_page()' below in the same_page
4304+
* loop, just check the count once here.
4305+
*/
4306+
if (unlikely(page_count(page) <= 0)) {
4307+
if (pages) {
4308+
spin_unlock(ptl);
4309+
remainder = 0;
4310+
err = -ENOMEM;
4311+
break;
4312+
}
4313+
}
43014314
same_page:
43024315
if (pages) {
43034316
pages[i] = mem_map_offset(page, pfn_offset);

0 commit comments

Comments
 (0)