Skip to content

Commit 8cd3984

Browse files
arjunroytorvalds
authored andcommitted
mm/memory.c: add vm_insert_pages()
Add the ability to insert multiple pages at once to a user VM with lower PTE spinlock operations. The intention of this patch-set is to reduce atomic ops for tcp zerocopy receives, which normally hits the same spinlock multiple times consecutively. [[email protected]: pte_alloc() no longer takes the `addr' argument] [[email protected]: add missing page_count() check to vm_insert_pages()] Link: http://lkml.kernel.org/r/[email protected] [[email protected]: vm_insert_pages() checks if pte_index defined] Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Arjun Roy <[email protected]> Signed-off-by: Eric Dumazet <[email protected]> Signed-off-by: Soheil Hassas Yeganeh <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Cc: David Miller <[email protected]> Cc: Matthew Wilcox <[email protected]> Cc: Jason Gunthorpe <[email protected]> Cc: Stephen Rothwell <[email protected]> Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Linus Torvalds <[email protected]>
1 parent c97078b commit 8cd3984

File tree

2 files changed

+129
-2
lines changed

2 files changed

+129
-2
lines changed

include/linux/mm.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2689,6 +2689,8 @@ struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
26892689
int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
26902690
unsigned long pfn, unsigned long size, pgprot_t);
26912691
int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
2692+
int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr,
2693+
struct page **pages, unsigned long *num);
26922694
int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
26932695
unsigned long num);
26942696
int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,

mm/memory.c

Lines changed: 127 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1419,8 +1419,7 @@ void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
14191419
}
14201420
EXPORT_SYMBOL_GPL(zap_vma_ptes);
14211421

1422-
pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
1423-
spinlock_t **ptl)
1422+
static pmd_t *walk_to_pmd(struct mm_struct *mm, unsigned long addr)
14241423
{
14251424
pgd_t *pgd;
14261425
p4d_t *p4d;
@@ -1439,6 +1438,16 @@ pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
14391438
return NULL;
14401439

14411440
VM_BUG_ON(pmd_trans_huge(*pmd));
1441+
return pmd;
1442+
}
1443+
1444+
pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
1445+
spinlock_t **ptl)
1446+
{
1447+
pmd_t *pmd = walk_to_pmd(mm, addr);
1448+
1449+
if (!pmd)
1450+
return NULL;
14421451
return pte_alloc_map_lock(mm, pmd, addr, ptl);
14431452
}
14441453

@@ -1491,6 +1500,122 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
14911500
return retval;
14921501
}
14931502

1503+
#ifdef pte_index
1504+
static int insert_page_in_batch_locked(struct mm_struct *mm, pmd_t *pmd,
1505+
unsigned long addr, struct page *page, pgprot_t prot)
1506+
{
1507+
int err;
1508+
1509+
if (!page_count(page))
1510+
return -EINVAL;
1511+
err = validate_page_before_insert(page);
1512+
return err ? err : insert_page_into_pte_locked(
1513+
mm, pte_offset_map(pmd, addr), addr, page, prot);
1514+
}
1515+
1516+
/* insert_pages() amortizes the cost of spinlock operations
1517+
* when inserting pages in a loop. Arch *must* define pte_index.
1518+
*/
1519+
static int insert_pages(struct vm_area_struct *vma, unsigned long addr,
1520+
struct page **pages, unsigned long *num, pgprot_t prot)
1521+
{
1522+
pmd_t *pmd = NULL;
1523+
spinlock_t *pte_lock = NULL;
1524+
struct mm_struct *const mm = vma->vm_mm;
1525+
unsigned long curr_page_idx = 0;
1526+
unsigned long remaining_pages_total = *num;
1527+
unsigned long pages_to_write_in_pmd;
1528+
int ret;
1529+
more:
1530+
ret = -EFAULT;
1531+
pmd = walk_to_pmd(mm, addr);
1532+
if (!pmd)
1533+
goto out;
1534+
1535+
pages_to_write_in_pmd = min_t(unsigned long,
1536+
remaining_pages_total, PTRS_PER_PTE - pte_index(addr));
1537+
1538+
/* Allocate the PTE if necessary; takes PMD lock once only. */
1539+
ret = -ENOMEM;
1540+
if (pte_alloc(mm, pmd))
1541+
goto out;
1542+
pte_lock = pte_lockptr(mm, pmd);
1543+
1544+
while (pages_to_write_in_pmd) {
1545+
int pte_idx = 0;
1546+
const int batch_size = min_t(int, pages_to_write_in_pmd, 8);
1547+
1548+
spin_lock(pte_lock);
1549+
for (; pte_idx < batch_size; ++pte_idx) {
1550+
int err = insert_page_in_batch_locked(mm, pmd,
1551+
addr, pages[curr_page_idx], prot);
1552+
if (unlikely(err)) {
1553+
spin_unlock(pte_lock);
1554+
ret = err;
1555+
remaining_pages_total -= pte_idx;
1556+
goto out;
1557+
}
1558+
addr += PAGE_SIZE;
1559+
++curr_page_idx;
1560+
}
1561+
spin_unlock(pte_lock);
1562+
pages_to_write_in_pmd -= batch_size;
1563+
remaining_pages_total -= batch_size;
1564+
}
1565+
if (remaining_pages_total)
1566+
goto more;
1567+
ret = 0;
1568+
out:
1569+
*num = remaining_pages_total;
1570+
return ret;
1571+
}
1572+
#endif /* ifdef pte_index */
1573+
1574+
/**
1575+
* vm_insert_pages - insert multiple pages into user vma, batching the pmd lock.
1576+
* @vma: user vma to map to
1577+
* @addr: target start user address of these pages
1578+
* @pages: source kernel pages
1579+
* @num: in: number of pages to map. out: number of pages that were *not*
1580+
* mapped. (0 means all pages were successfully mapped).
1581+
*
1582+
* Preferred over vm_insert_page() when inserting multiple pages.
1583+
*
1584+
* In case of error, we may have mapped a subset of the provided
1585+
* pages. It is the caller's responsibility to account for this case.
1586+
*
1587+
* The same restrictions apply as in vm_insert_page().
1588+
*/
1589+
int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr,
1590+
struct page **pages, unsigned long *num)
1591+
{
1592+
#ifdef pte_index
1593+
const unsigned long end_addr = addr + (*num * PAGE_SIZE) - 1;
1594+
1595+
if (addr < vma->vm_start || end_addr >= vma->vm_end)
1596+
return -EFAULT;
1597+
if (!(vma->vm_flags & VM_MIXEDMAP)) {
1598+
BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem));
1599+
BUG_ON(vma->vm_flags & VM_PFNMAP);
1600+
vma->vm_flags |= VM_MIXEDMAP;
1601+
}
1602+
/* Defer page refcount checking till we're about to map that page. */
1603+
return insert_pages(vma, addr, pages, num, vma->vm_page_prot);
1604+
#else
1605+
unsigned long idx = 0, pgcount = *num;
1606+
int err;
1607+
1608+
for (; idx < pgcount; ++idx) {
1609+
err = vm_insert_page(vma, addr + (PAGE_SIZE * idx), pages[idx]);
1610+
if (err)
1611+
break;
1612+
}
1613+
*num = pgcount - idx;
1614+
return err;
1615+
#endif /* ifdef pte_index */
1616+
}
1617+
EXPORT_SYMBOL(vm_insert_pages);
1618+
14941619
/**
14951620
* vm_insert_page - insert single page into user vma
14961621
* @vma: user vma to map to

0 commit comments

Comments
 (0)