Skip to content

Commit 8efd6f5

Browse files
arjunroytorvalds
authored andcommitted
mm/memory.c: refactor insert_page to prepare for batched-lock insert
Add helper methods for vm_insert_page()/insert_page() to prepare for vm_insert_pages(), which batch-inserts pages to reduce spinlock operations when inserting multiple consecutive pages into the user page table. The intention of this patch-set is to reduce atomic ops for tcp zerocopy receives, which normally hits the same spinlock multiple times consecutively. Signed-off-by: Arjun Roy <[email protected]> Signed-off-by: Eric Dumazet <[email protected]> Signed-off-by: Soheil Hassas Yeganeh <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Cc: David Miller <[email protected]> Cc: Matthew Wilcox <[email protected]> Cc: Jason Gunthorpe <[email protected]> Cc: Stephen Rothwell <[email protected]> Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Linus Torvalds <[email protected]>
1 parent 09ef528 commit 8efd6f5

File tree

1 file changed

+24
-15
lines changed

1 file changed

+24
-15
lines changed

mm/memory.c

Lines changed: 24 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -1442,6 +1442,27 @@ pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
14421442
return pte_alloc_map_lock(mm, pmd, addr, ptl);
14431443
}
14441444

1445+
static int validate_page_before_insert(struct page *page)
1446+
{
1447+
if (PageAnon(page) || PageSlab(page) || page_has_type(page))
1448+
return -EINVAL;
1449+
flush_dcache_page(page);
1450+
return 0;
1451+
}
1452+
1453+
static int insert_page_into_pte_locked(struct mm_struct *mm, pte_t *pte,
1454+
unsigned long addr, struct page *page, pgprot_t prot)
1455+
{
1456+
if (!pte_none(*pte))
1457+
return -EBUSY;
1458+
/* Ok, finally just insert the thing.. */
1459+
get_page(page);
1460+
inc_mm_counter_fast(mm, mm_counter_file(page));
1461+
page_add_file_rmap(page, false);
1462+
set_pte_at(mm, addr, pte, mk_pte(page, prot));
1463+
return 0;
1464+
}
1465+
14451466
/*
14461467
* This is the old fallback for page remapping.
14471468
*
@@ -1457,26 +1478,14 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
14571478
pte_t *pte;
14581479
spinlock_t *ptl;
14591480

1460-
retval = -EINVAL;
1461-
if (PageAnon(page) || PageSlab(page) || page_has_type(page))
1481+
retval = validate_page_before_insert(page);
1482+
if (retval)
14621483
goto out;
14631484
retval = -ENOMEM;
1464-
flush_dcache_page(page);
14651485
pte = get_locked_pte(mm, addr, &ptl);
14661486
if (!pte)
14671487
goto out;
1468-
retval = -EBUSY;
1469-
if (!pte_none(*pte))
1470-
goto out_unlock;
1471-
1472-
/* Ok, finally just insert the thing.. */
1473-
get_page(page);
1474-
inc_mm_counter_fast(mm, mm_counter_file(page));
1475-
page_add_file_rmap(page, false);
1476-
set_pte_at(mm, addr, pte, mk_pte(page, prot));
1477-
1478-
retval = 0;
1479-
out_unlock:
1488+
retval = insert_page_into_pte_locked(mm, pte, addr, page, prot);
14801489
pte_unmap_unlock(pte, ptl);
14811490
out:
14821491
return retval;

0 commit comments

Comments
 (0)