@@ -537,25 +537,26 @@ __visible p4d_t xen_make_p4d(p4dval_t p4d)
537
537
PV_CALLEE_SAVE_REGS_THUNK (xen_make_p4d );
538
538
#endif /* CONFIG_PGTABLE_LEVELS >= 5 */
539
539
540
- static int xen_pmd_walk (struct mm_struct * mm , pmd_t * pmd ,
541
- int (* func )(struct mm_struct * mm , struct page * , enum pt_level ),
542
- bool last , unsigned long limit )
540
+ static void xen_pmd_walk (struct mm_struct * mm , pmd_t * pmd ,
541
+ void (* func )(struct mm_struct * mm , struct page * ,
542
+ enum pt_level ),
543
+ bool last , unsigned long limit )
543
544
{
544
- int i , nr , flush = 0 ;
545
+ int i , nr ;
545
546
546
547
nr = last ? pmd_index (limit ) + 1 : PTRS_PER_PMD ;
547
548
for (i = 0 ; i < nr ; i ++ ) {
548
549
if (!pmd_none (pmd [i ]))
549
- flush |= (* func )(mm , pmd_page (pmd [i ]), PT_PTE );
550
+ (* func )(mm , pmd_page (pmd [i ]), PT_PTE );
550
551
}
551
- return flush ;
552
552
}
553
553
554
- static int xen_pud_walk (struct mm_struct * mm , pud_t * pud ,
555
- int (* func )(struct mm_struct * mm , struct page * , enum pt_level ),
556
- bool last , unsigned long limit )
554
+ static void xen_pud_walk (struct mm_struct * mm , pud_t * pud ,
555
+ void (* func )(struct mm_struct * mm , struct page * ,
556
+ enum pt_level ),
557
+ bool last , unsigned long limit )
557
558
{
558
- int i , nr , flush = 0 ;
559
+ int i , nr ;
559
560
560
561
nr = last ? pud_index (limit ) + 1 : PTRS_PER_PUD ;
561
562
for (i = 0 ; i < nr ; i ++ ) {
@@ -566,29 +567,26 @@ static int xen_pud_walk(struct mm_struct *mm, pud_t *pud,
566
567
567
568
pmd = pmd_offset (& pud [i ], 0 );
568
569
if (PTRS_PER_PMD > 1 )
569
- flush |= (* func )(mm , virt_to_page (pmd ), PT_PMD );
570
- flush |= xen_pmd_walk (mm , pmd , func ,
571
- last && i == nr - 1 , limit );
570
+ (* func )(mm , virt_to_page (pmd ), PT_PMD );
571
+ xen_pmd_walk (mm , pmd , func , last && i == nr - 1 , limit );
572
572
}
573
- return flush ;
574
573
}
575
574
576
- static int xen_p4d_walk (struct mm_struct * mm , p4d_t * p4d ,
577
- int (* func )(struct mm_struct * mm , struct page * , enum pt_level ),
578
- bool last , unsigned long limit )
575
+ static void xen_p4d_walk (struct mm_struct * mm , p4d_t * p4d ,
576
+ void (* func )(struct mm_struct * mm , struct page * ,
577
+ enum pt_level ),
578
+ bool last , unsigned long limit )
579
579
{
580
- int flush = 0 ;
581
580
pud_t * pud ;
582
581
583
582
584
583
if (p4d_none (* p4d ))
585
- return flush ;
584
+ return ;
586
585
587
586
pud = pud_offset (p4d , 0 );
588
587
if (PTRS_PER_PUD > 1 )
589
- flush |= (* func )(mm , virt_to_page (pud ), PT_PUD );
590
- flush |= xen_pud_walk (mm , pud , func , last , limit );
591
- return flush ;
588
+ (* func )(mm , virt_to_page (pud ), PT_PUD );
589
+ xen_pud_walk (mm , pud , func , last , limit );
592
590
}
593
591
594
592
/*
@@ -603,12 +601,12 @@ static int xen_p4d_walk(struct mm_struct *mm, p4d_t *p4d,
603
601
* We must skip the Xen hole in the middle of the address space, just after
604
602
* the big x86-64 virtual hole.
605
603
*/
606
- static int __xen_pgd_walk (struct mm_struct * mm , pgd_t * pgd ,
607
- int (* func )(struct mm_struct * mm , struct page * ,
608
- enum pt_level ),
609
- unsigned long limit )
604
+ static void __xen_pgd_walk (struct mm_struct * mm , pgd_t * pgd ,
605
+ void (* func )(struct mm_struct * mm , struct page * ,
606
+ enum pt_level ),
607
+ unsigned long limit )
610
608
{
611
- int i , nr , flush = 0 ;
609
+ int i , nr ;
612
610
unsigned hole_low = 0 , hole_high = 0 ;
613
611
614
612
/* The limit is the last byte to be touched */
@@ -633,22 +631,20 @@ static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd,
633
631
continue ;
634
632
635
633
p4d = p4d_offset (& pgd [i ], 0 );
636
- flush |= xen_p4d_walk (mm , p4d , func , i == nr - 1 , limit );
634
+ xen_p4d_walk (mm , p4d , func , i == nr - 1 , limit );
637
635
}
638
636
639
637
/* Do the top level last, so that the callbacks can use it as
640
638
a cue to do final things like tlb flushes. */
641
- flush |= (* func )(mm , virt_to_page (pgd ), PT_PGD );
642
-
643
- return flush ;
639
+ (* func )(mm , virt_to_page (pgd ), PT_PGD );
644
640
}
645
641
646
- static int xen_pgd_walk (struct mm_struct * mm ,
647
- int (* func )(struct mm_struct * mm , struct page * ,
648
- enum pt_level ),
649
- unsigned long limit )
642
+ static void xen_pgd_walk (struct mm_struct * mm ,
643
+ void (* func )(struct mm_struct * mm , struct page * ,
644
+ enum pt_level ),
645
+ unsigned long limit )
650
646
{
651
- return __xen_pgd_walk (mm , mm -> pgd , func , limit );
647
+ __xen_pgd_walk (mm , mm -> pgd , func , limit );
652
648
}
653
649
654
650
/* If we're using split pte locks, then take the page's lock and
@@ -681,26 +677,17 @@ static void xen_do_pin(unsigned level, unsigned long pfn)
681
677
xen_extend_mmuext_op (& op );
682
678
}
683
679
684
- static int xen_pin_page (struct mm_struct * mm , struct page * page ,
685
- enum pt_level level )
680
+ static void xen_pin_page (struct mm_struct * mm , struct page * page ,
681
+ enum pt_level level )
686
682
{
687
683
unsigned pgfl = TestSetPagePinned (page );
688
- int flush ;
689
-
690
- if (pgfl )
691
- flush = 0 ; /* already pinned */
692
- else if (PageHighMem (page ))
693
- /* kmaps need flushing if we found an unpinned
694
- highpage */
695
- flush = 1 ;
696
- else {
684
+
685
+ if (!pgfl ) {
697
686
void * pt = lowmem_page_address (page );
698
687
unsigned long pfn = page_to_pfn (page );
699
688
struct multicall_space mcs = __xen_mc_entry (0 );
700
689
spinlock_t * ptl ;
701
690
702
- flush = 0 ;
703
-
704
691
/*
705
692
* We need to hold the pagetable lock between the time
706
693
* we make the pagetable RO and when we actually pin
@@ -737,8 +724,6 @@ static int xen_pin_page(struct mm_struct *mm, struct page *page,
737
724
xen_mc_callback (xen_pte_unlock , ptl );
738
725
}
739
726
}
740
-
741
- return flush ;
742
727
}
743
728
744
729
/* This is called just after a mm has been created, but it has not
@@ -752,14 +737,7 @@ static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd)
752
737
753
738
xen_mc_batch ();
754
739
755
- if (__xen_pgd_walk (mm , pgd , xen_pin_page , USER_LIMIT )) {
756
- /* re-enable interrupts for flushing */
757
- xen_mc_issue (0 );
758
-
759
- kmap_flush_unused ();
760
-
761
- xen_mc_batch ();
762
- }
740
+ __xen_pgd_walk (mm , pgd , xen_pin_page , USER_LIMIT );
763
741
764
742
xen_do_pin (MMUEXT_PIN_L4_TABLE , PFN_DOWN (__pa (pgd )));
765
743
@@ -803,11 +781,10 @@ void xen_mm_pin_all(void)
803
781
spin_unlock (& pgd_lock );
804
782
}
805
783
806
- static int __init xen_mark_pinned (struct mm_struct * mm , struct page * page ,
807
- enum pt_level level )
784
+ static void __init xen_mark_pinned (struct mm_struct * mm , struct page * page ,
785
+ enum pt_level level )
808
786
{
809
787
SetPagePinned (page );
810
- return 0 ;
811
788
}
812
789
813
790
/*
@@ -823,12 +800,12 @@ static void __init xen_after_bootmem(void)
823
800
xen_pgd_walk (& init_mm , xen_mark_pinned , FIXADDR_TOP );
824
801
}
825
802
826
- static int xen_unpin_page (struct mm_struct * mm , struct page * page ,
827
- enum pt_level level )
803
+ static void xen_unpin_page (struct mm_struct * mm , struct page * page ,
804
+ enum pt_level level )
828
805
{
829
806
unsigned pgfl = TestClearPagePinned (page );
830
807
831
- if (pgfl && ! PageHighMem ( page ) ) {
808
+ if (pgfl ) {
832
809
void * pt = lowmem_page_address (page );
833
810
unsigned long pfn = page_to_pfn (page );
834
811
spinlock_t * ptl = NULL ;
@@ -859,8 +836,6 @@ static int xen_unpin_page(struct mm_struct *mm, struct page *page,
859
836
xen_mc_callback (xen_pte_unlock , ptl );
860
837
}
861
838
}
862
-
863
- return 0 ; /* never need to flush on unpin */
864
839
}
865
840
866
841
/* Release a pagetables pages back as normal RW */
@@ -1554,20 +1529,14 @@ static inline void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn,
1554
1529
if (static_branch_likely (& xen_struct_pages_ready ))
1555
1530
SetPagePinned (page );
1556
1531
1557
- if (!PageHighMem (page )) {
1558
- xen_mc_batch ();
1532
+ xen_mc_batch ();
1559
1533
1560
- __set_pfn_prot (pfn , PAGE_KERNEL_RO );
1534
+ __set_pfn_prot (pfn , PAGE_KERNEL_RO );
1561
1535
1562
- if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS )
1563
- __pin_pagetable_pfn (MMUEXT_PIN_L1_TABLE , pfn );
1536
+ if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS )
1537
+ __pin_pagetable_pfn (MMUEXT_PIN_L1_TABLE , pfn );
1564
1538
1565
- xen_mc_issue (PARAVIRT_LAZY_MMU );
1566
- } else {
1567
- /* make sure there are no stray mappings of
1568
- this page */
1569
- kmap_flush_unused ();
1570
- }
1539
+ xen_mc_issue (PARAVIRT_LAZY_MMU );
1571
1540
}
1572
1541
}
1573
1542
@@ -1590,16 +1559,15 @@ static inline void xen_release_ptpage(unsigned long pfn, unsigned level)
1590
1559
trace_xen_mmu_release_ptpage (pfn , level , pinned );
1591
1560
1592
1561
if (pinned ) {
1593
- if (!PageHighMem (page )) {
1594
- xen_mc_batch ();
1562
+ xen_mc_batch ();
1595
1563
1596
- if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS )
1597
- __pin_pagetable_pfn (MMUEXT_UNPIN_TABLE , pfn );
1564
+ if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS )
1565
+ __pin_pagetable_pfn (MMUEXT_UNPIN_TABLE , pfn );
1598
1566
1599
- __set_pfn_prot (pfn , PAGE_KERNEL );
1567
+ __set_pfn_prot (pfn , PAGE_KERNEL );
1568
+
1569
+ xen_mc_issue (PARAVIRT_LAZY_MMU );
1600
1570
1601
- xen_mc_issue (PARAVIRT_LAZY_MMU );
1602
- }
1603
1571
ClearPagePinned (page );
1604
1572
}
1605
1573
}
0 commit comments