@@ -1419,8 +1419,7 @@ void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
1419
1419
}
1420
1420
EXPORT_SYMBOL_GPL (zap_vma_ptes );
1421
1421
1422
- pte_t * __get_locked_pte (struct mm_struct * mm , unsigned long addr ,
1423
- spinlock_t * * ptl )
1422
+ static pmd_t * walk_to_pmd (struct mm_struct * mm , unsigned long addr )
1424
1423
{
1425
1424
pgd_t * pgd ;
1426
1425
p4d_t * p4d ;
@@ -1439,6 +1438,16 @@ pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
1439
1438
return NULL ;
1440
1439
1441
1440
VM_BUG_ON (pmd_trans_huge (* pmd ));
1441
+ return pmd ;
1442
+ }
1443
+
1444
+ pte_t * __get_locked_pte (struct mm_struct * mm , unsigned long addr ,
1445
+ spinlock_t * * ptl )
1446
+ {
1447
+ pmd_t * pmd = walk_to_pmd (mm , addr );
1448
+
1449
+ if (!pmd )
1450
+ return NULL ;
1442
1451
return pte_alloc_map_lock (mm , pmd , addr , ptl );
1443
1452
}
1444
1453
@@ -1491,6 +1500,122 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
1491
1500
return retval ;
1492
1501
}
1493
1502
1503
+ #ifdef pte_index
1504
+ static int insert_page_in_batch_locked (struct mm_struct * mm , pmd_t * pmd ,
1505
+ unsigned long addr , struct page * page , pgprot_t prot )
1506
+ {
1507
+ int err ;
1508
+
1509
+ if (!page_count (page ))
1510
+ return - EINVAL ;
1511
+ err = validate_page_before_insert (page );
1512
+ return err ? err : insert_page_into_pte_locked (
1513
+ mm , pte_offset_map (pmd , addr ), addr , page , prot );
1514
+ }
1515
+
1516
+ /* insert_pages() amortizes the cost of spinlock operations
1517
+ * when inserting pages in a loop. Arch *must* define pte_index.
1518
+ */
1519
+ static int insert_pages (struct vm_area_struct * vma , unsigned long addr ,
1520
+ struct page * * pages , unsigned long * num , pgprot_t prot )
1521
+ {
1522
+ pmd_t * pmd = NULL ;
1523
+ spinlock_t * pte_lock = NULL ;
1524
+ struct mm_struct * const mm = vma -> vm_mm ;
1525
+ unsigned long curr_page_idx = 0 ;
1526
+ unsigned long remaining_pages_total = * num ;
1527
+ unsigned long pages_to_write_in_pmd ;
1528
+ int ret ;
1529
+ more :
1530
+ ret = - EFAULT ;
1531
+ pmd = walk_to_pmd (mm , addr );
1532
+ if (!pmd )
1533
+ goto out ;
1534
+
1535
+ pages_to_write_in_pmd = min_t (unsigned long ,
1536
+ remaining_pages_total , PTRS_PER_PTE - pte_index (addr ));
1537
+
1538
+ /* Allocate the PTE if necessary; takes PMD lock once only. */
1539
+ ret = - ENOMEM ;
1540
+ if (pte_alloc (mm , pmd ))
1541
+ goto out ;
1542
+ pte_lock = pte_lockptr (mm , pmd );
1543
+
1544
+ while (pages_to_write_in_pmd ) {
1545
+ int pte_idx = 0 ;
1546
+ const int batch_size = min_t (int , pages_to_write_in_pmd , 8 );
1547
+
1548
+ spin_lock (pte_lock );
1549
+ for (; pte_idx < batch_size ; ++ pte_idx ) {
1550
+ int err = insert_page_in_batch_locked (mm , pmd ,
1551
+ addr , pages [curr_page_idx ], prot );
1552
+ if (unlikely (err )) {
1553
+ spin_unlock (pte_lock );
1554
+ ret = err ;
1555
+ remaining_pages_total -= pte_idx ;
1556
+ goto out ;
1557
+ }
1558
+ addr += PAGE_SIZE ;
1559
+ ++ curr_page_idx ;
1560
+ }
1561
+ spin_unlock (pte_lock );
1562
+ pages_to_write_in_pmd -= batch_size ;
1563
+ remaining_pages_total -= batch_size ;
1564
+ }
1565
+ if (remaining_pages_total )
1566
+ goto more ;
1567
+ ret = 0 ;
1568
+ out :
1569
+ * num = remaining_pages_total ;
1570
+ return ret ;
1571
+ }
1572
+ #endif /* ifdef pte_index */
1573
+
1574
+ /**
1575
+ * vm_insert_pages - insert multiple pages into user vma, batching the pmd lock.
1576
+ * @vma: user vma to map to
1577
+ * @addr: target start user address of these pages
1578
+ * @pages: source kernel pages
1579
+ * @num: in: number of pages to map. out: number of pages that were *not*
1580
+ * mapped. (0 means all pages were successfully mapped).
1581
+ *
1582
+ * Preferred over vm_insert_page() when inserting multiple pages.
1583
+ *
1584
+ * In case of error, we may have mapped a subset of the provided
1585
+ * pages. It is the caller's responsibility to account for this case.
1586
+ *
1587
+ * The same restrictions apply as in vm_insert_page().
1588
+ */
1589
+ int vm_insert_pages (struct vm_area_struct * vma , unsigned long addr ,
1590
+ struct page * * pages , unsigned long * num )
1591
+ {
1592
+ #ifdef pte_index
1593
+ const unsigned long end_addr = addr + (* num * PAGE_SIZE ) - 1 ;
1594
+
1595
+ if (addr < vma -> vm_start || end_addr >= vma -> vm_end )
1596
+ return - EFAULT ;
1597
+ if (!(vma -> vm_flags & VM_MIXEDMAP )) {
1598
+ BUG_ON (down_read_trylock (& vma -> vm_mm -> mmap_sem ));
1599
+ BUG_ON (vma -> vm_flags & VM_PFNMAP );
1600
+ vma -> vm_flags |= VM_MIXEDMAP ;
1601
+ }
1602
+ /* Defer page refcount checking till we're about to map that page. */
1603
+ return insert_pages (vma , addr , pages , num , vma -> vm_page_prot );
1604
+ #else
1605
+ unsigned long idx = 0 , pgcount = * num ;
1606
+ int err ;
1607
+
1608
+ for (; idx < pgcount ; ++ idx ) {
1609
+ err = vm_insert_page (vma , addr + (PAGE_SIZE * idx ), pages [idx ]);
1610
+ if (err )
1611
+ break ;
1612
+ }
1613
+ * num = pgcount - idx ;
1614
+ return err ;
1615
+ #endif /* ifdef pte_index */
1616
+ }
1617
+ EXPORT_SYMBOL (vm_insert_pages );
1618
+
1494
1619
/**
1495
1620
* vm_insert_page - insert single page into user vma
1496
1621
* @vma: user vma to map to
0 commit comments