@@ -157,8 +157,12 @@ static struct page *follow_page_pte(struct vm_area_struct *vma,
157
157
goto retry ;
158
158
}
159
159
160
- if (flags & FOLL_GET )
161
- get_page (page );
160
+ if (flags & FOLL_GET ) {
161
+ if (unlikely (!try_get_page (page ))) {
162
+ page = ERR_PTR (- ENOMEM );
163
+ goto out ;
164
+ }
165
+ }
162
166
if (flags & FOLL_TOUCH ) {
163
167
if ((flags & FOLL_WRITE ) &&
164
168
!pte_dirty (pte ) && !PageDirty (page ))
@@ -295,7 +299,10 @@ static struct page *follow_pmd_mask(struct vm_area_struct *vma,
295
299
if (pmd_trans_unstable (pmd ))
296
300
ret = - EBUSY ;
297
301
} else {
298
- get_page (page );
302
+ if (unlikely (!try_get_page (page ))) {
303
+ spin_unlock (ptl );
304
+ return ERR_PTR (- ENOMEM );
305
+ }
299
306
spin_unlock (ptl );
300
307
lock_page (page );
301
308
ret = split_huge_page (page );
@@ -497,7 +504,10 @@ static int get_gate_page(struct mm_struct *mm, unsigned long address,
497
504
if (is_device_public_page (* page ))
498
505
goto unmap ;
499
506
}
500
- get_page (* page );
507
+ if (unlikely (!try_get_page (* page ))) {
508
+ ret = - ENOMEM ;
509
+ goto unmap ;
510
+ }
501
511
out :
502
512
ret = 0 ;
503
513
unmap :
@@ -1393,6 +1403,20 @@ static void undo_dev_pagemap(int *nr, int nr_start, struct page **pages)
1393
1403
}
1394
1404
}
1395
1405
1406
+ /*
1407
+ * Return the compund head page with ref appropriately incremented,
1408
+ * or NULL if that failed.
1409
+ */
1410
+ static inline struct page * try_get_compound_head (struct page * page , int refs )
1411
+ {
1412
+ struct page * head = compound_head (page );
1413
+ if (WARN_ON_ONCE (page_ref_count (head ) < 0 ))
1414
+ return NULL ;
1415
+ if (unlikely (!page_cache_add_speculative (head , refs )))
1416
+ return NULL ;
1417
+ return head ;
1418
+ }
1419
+
1396
1420
#ifdef CONFIG_ARCH_HAS_PTE_SPECIAL
1397
1421
static int gup_pte_range (pmd_t pmd , unsigned long addr , unsigned long end ,
1398
1422
int write , struct page * * pages , int * nr )
@@ -1427,9 +1451,9 @@ static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
1427
1451
1428
1452
VM_BUG_ON (!pfn_valid (pte_pfn (pte )));
1429
1453
page = pte_page (pte );
1430
- head = compound_head (page );
1431
1454
1432
- if (!page_cache_get_speculative (head ))
1455
+ head = try_get_compound_head (page , 1 );
1456
+ if (!head )
1433
1457
goto pte_unmap ;
1434
1458
1435
1459
if (unlikely (pte_val (pte ) != pte_val (* ptep ))) {
@@ -1568,8 +1592,8 @@ static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
1568
1592
refs ++ ;
1569
1593
} while (addr += PAGE_SIZE , addr != end );
1570
1594
1571
- head = compound_head (pmd_page (orig ));
1572
- if (!page_cache_add_speculative ( head , refs ) ) {
1595
+ head = try_get_compound_head (pmd_page (orig ), refs );
1596
+ if (!head ) {
1573
1597
* nr -= refs ;
1574
1598
return 0 ;
1575
1599
}
@@ -1606,8 +1630,8 @@ static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
1606
1630
refs ++ ;
1607
1631
} while (addr += PAGE_SIZE , addr != end );
1608
1632
1609
- head = compound_head (pud_page (orig ));
1610
- if (!page_cache_add_speculative ( head , refs ) ) {
1633
+ head = try_get_compound_head (pud_page (orig ), refs );
1634
+ if (!head ) {
1611
1635
* nr -= refs ;
1612
1636
return 0 ;
1613
1637
}
@@ -1643,8 +1667,8 @@ static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr,
1643
1667
refs ++ ;
1644
1668
} while (addr += PAGE_SIZE , addr != end );
1645
1669
1646
- head = compound_head (pgd_page (orig ));
1647
- if (!page_cache_add_speculative ( head , refs ) ) {
1670
+ head = try_get_compound_head (pgd_page (orig ), refs );
1671
+ if (!head ) {
1648
1672
* nr -= refs ;
1649
1673
return 0 ;
1650
1674
}
0 commit comments