Skip to content

Commit f2e39e8

Browse files
committed
x86/xen: drop tests for highmem in pv code
With support for 32-bit pv guests gone pure pv-code no longer needs to test for highmem. Dropping those tests removes the need for flushing in some places. Signed-off-by: Juergen Gross <[email protected]> Reviewed-by: Boris Ostrovsky <[email protected]> Signed-off-by: Juergen Gross <[email protected]>
1 parent 56415c4 commit f2e39e8

File tree

2 files changed

+57
-95
lines changed

2 files changed

+57
-95
lines changed

arch/x86/xen/enlighten_pv.c

Lines changed: 4 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -345,15 +345,13 @@ static void set_aliased_prot(void *v, pgprot_t prot)
345345
pte_t *ptep;
346346
pte_t pte;
347347
unsigned long pfn;
348-
struct page *page;
349348
unsigned char dummy;
349+
void *va;
350350

351351
ptep = lookup_address((unsigned long)v, &level);
352352
BUG_ON(ptep == NULL);
353353

354354
pfn = pte_pfn(*ptep);
355-
page = pfn_to_page(pfn);
356-
357355
pte = pfn_pte(pfn, prot);
358356

359357
/*
@@ -383,14 +381,10 @@ static void set_aliased_prot(void *v, pgprot_t prot)
383381
if (HYPERVISOR_update_va_mapping((unsigned long)v, pte, 0))
384382
BUG();
385383

386-
if (!PageHighMem(page)) {
387-
void *av = __va(PFN_PHYS(pfn));
384+
va = __va(PFN_PHYS(pfn));
388385

389-
if (av != v)
390-
if (HYPERVISOR_update_va_mapping((unsigned long)av, pte, 0))
391-
BUG();
392-
} else
393-
kmap_flush_unused();
386+
if (va != v && HYPERVISOR_update_va_mapping((unsigned long)va, pte, 0))
387+
BUG();
394388

395389
preempt_enable();
396390
}

arch/x86/xen/mmu_pv.c

Lines changed: 53 additions & 85 deletions
Original file line numberDiff line numberDiff line change
@@ -537,25 +537,26 @@ __visible p4d_t xen_make_p4d(p4dval_t p4d)
537537
PV_CALLEE_SAVE_REGS_THUNK(xen_make_p4d);
538538
#endif /* CONFIG_PGTABLE_LEVELS >= 5 */
539539

540-
static int xen_pmd_walk(struct mm_struct *mm, pmd_t *pmd,
541-
int (*func)(struct mm_struct *mm, struct page *, enum pt_level),
542-
bool last, unsigned long limit)
540+
static void xen_pmd_walk(struct mm_struct *mm, pmd_t *pmd,
541+
void (*func)(struct mm_struct *mm, struct page *,
542+
enum pt_level),
543+
bool last, unsigned long limit)
543544
{
544-
int i, nr, flush = 0;
545+
int i, nr;
545546

546547
nr = last ? pmd_index(limit) + 1 : PTRS_PER_PMD;
547548
for (i = 0; i < nr; i++) {
548549
if (!pmd_none(pmd[i]))
549-
flush |= (*func)(mm, pmd_page(pmd[i]), PT_PTE);
550+
(*func)(mm, pmd_page(pmd[i]), PT_PTE);
550551
}
551-
return flush;
552552
}
553553

554-
static int xen_pud_walk(struct mm_struct *mm, pud_t *pud,
555-
int (*func)(struct mm_struct *mm, struct page *, enum pt_level),
556-
bool last, unsigned long limit)
554+
static void xen_pud_walk(struct mm_struct *mm, pud_t *pud,
555+
void (*func)(struct mm_struct *mm, struct page *,
556+
enum pt_level),
557+
bool last, unsigned long limit)
557558
{
558-
int i, nr, flush = 0;
559+
int i, nr;
559560

560561
nr = last ? pud_index(limit) + 1 : PTRS_PER_PUD;
561562
for (i = 0; i < nr; i++) {
@@ -566,29 +567,26 @@ static int xen_pud_walk(struct mm_struct *mm, pud_t *pud,
566567

567568
pmd = pmd_offset(&pud[i], 0);
568569
if (PTRS_PER_PMD > 1)
569-
flush |= (*func)(mm, virt_to_page(pmd), PT_PMD);
570-
flush |= xen_pmd_walk(mm, pmd, func,
571-
last && i == nr - 1, limit);
570+
(*func)(mm, virt_to_page(pmd), PT_PMD);
571+
xen_pmd_walk(mm, pmd, func, last && i == nr - 1, limit);
572572
}
573-
return flush;
574573
}
575574

576-
static int xen_p4d_walk(struct mm_struct *mm, p4d_t *p4d,
577-
int (*func)(struct mm_struct *mm, struct page *, enum pt_level),
578-
bool last, unsigned long limit)
575+
static void xen_p4d_walk(struct mm_struct *mm, p4d_t *p4d,
576+
void (*func)(struct mm_struct *mm, struct page *,
577+
enum pt_level),
578+
bool last, unsigned long limit)
579579
{
580-
int flush = 0;
581580
pud_t *pud;
582581

583582

584583
if (p4d_none(*p4d))
585-
return flush;
584+
return;
586585

587586
pud = pud_offset(p4d, 0);
588587
if (PTRS_PER_PUD > 1)
589-
flush |= (*func)(mm, virt_to_page(pud), PT_PUD);
590-
flush |= xen_pud_walk(mm, pud, func, last, limit);
591-
return flush;
588+
(*func)(mm, virt_to_page(pud), PT_PUD);
589+
xen_pud_walk(mm, pud, func, last, limit);
592590
}
593591

594592
/*
@@ -603,12 +601,12 @@ static int xen_p4d_walk(struct mm_struct *mm, p4d_t *p4d,
603601
* We must skip the Xen hole in the middle of the address space, just after
604602
* the big x86-64 virtual hole.
605603
*/
606-
static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd,
607-
int (*func)(struct mm_struct *mm, struct page *,
608-
enum pt_level),
609-
unsigned long limit)
604+
static void __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd,
605+
void (*func)(struct mm_struct *mm, struct page *,
606+
enum pt_level),
607+
unsigned long limit)
610608
{
611-
int i, nr, flush = 0;
609+
int i, nr;
612610
unsigned hole_low = 0, hole_high = 0;
613611

614612
/* The limit is the last byte to be touched */
@@ -633,22 +631,20 @@ static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd,
633631
continue;
634632

635633
p4d = p4d_offset(&pgd[i], 0);
636-
flush |= xen_p4d_walk(mm, p4d, func, i == nr - 1, limit);
634+
xen_p4d_walk(mm, p4d, func, i == nr - 1, limit);
637635
}
638636

639637
/* Do the top level last, so that the callbacks can use it as
640638
a cue to do final things like tlb flushes. */
641-
flush |= (*func)(mm, virt_to_page(pgd), PT_PGD);
642-
643-
return flush;
639+
(*func)(mm, virt_to_page(pgd), PT_PGD);
644640
}
645641

646-
static int xen_pgd_walk(struct mm_struct *mm,
647-
int (*func)(struct mm_struct *mm, struct page *,
648-
enum pt_level),
649-
unsigned long limit)
642+
static void xen_pgd_walk(struct mm_struct *mm,
643+
void (*func)(struct mm_struct *mm, struct page *,
644+
enum pt_level),
645+
unsigned long limit)
650646
{
651-
return __xen_pgd_walk(mm, mm->pgd, func, limit);
647+
__xen_pgd_walk(mm, mm->pgd, func, limit);
652648
}
653649

654650
/* If we're using split pte locks, then take the page's lock and
@@ -681,26 +677,17 @@ static void xen_do_pin(unsigned level, unsigned long pfn)
681677
xen_extend_mmuext_op(&op);
682678
}
683679

684-
static int xen_pin_page(struct mm_struct *mm, struct page *page,
685-
enum pt_level level)
680+
static void xen_pin_page(struct mm_struct *mm, struct page *page,
681+
enum pt_level level)
686682
{
687683
unsigned pgfl = TestSetPagePinned(page);
688-
int flush;
689-
690-
if (pgfl)
691-
flush = 0; /* already pinned */
692-
else if (PageHighMem(page))
693-
/* kmaps need flushing if we found an unpinned
694-
highpage */
695-
flush = 1;
696-
else {
684+
685+
if (!pgfl) {
697686
void *pt = lowmem_page_address(page);
698687
unsigned long pfn = page_to_pfn(page);
699688
struct multicall_space mcs = __xen_mc_entry(0);
700689
spinlock_t *ptl;
701690

702-
flush = 0;
703-
704691
/*
705692
* We need to hold the pagetable lock between the time
706693
* we make the pagetable RO and when we actually pin
@@ -737,8 +724,6 @@ static int xen_pin_page(struct mm_struct *mm, struct page *page,
737724
xen_mc_callback(xen_pte_unlock, ptl);
738725
}
739726
}
740-
741-
return flush;
742727
}
743728

744729
/* This is called just after a mm has been created, but it has not
@@ -752,14 +737,7 @@ static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd)
752737

753738
xen_mc_batch();
754739

755-
if (__xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT)) {
756-
/* re-enable interrupts for flushing */
757-
xen_mc_issue(0);
758-
759-
kmap_flush_unused();
760-
761-
xen_mc_batch();
762-
}
740+
__xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT);
763741

764742
xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(pgd)));
765743

@@ -803,11 +781,10 @@ void xen_mm_pin_all(void)
803781
spin_unlock(&pgd_lock);
804782
}
805783

806-
static int __init xen_mark_pinned(struct mm_struct *mm, struct page *page,
807-
enum pt_level level)
784+
static void __init xen_mark_pinned(struct mm_struct *mm, struct page *page,
785+
enum pt_level level)
808786
{
809787
SetPagePinned(page);
810-
return 0;
811788
}
812789

813790
/*
@@ -823,12 +800,12 @@ static void __init xen_after_bootmem(void)
823800
xen_pgd_walk(&init_mm, xen_mark_pinned, FIXADDR_TOP);
824801
}
825802

826-
static int xen_unpin_page(struct mm_struct *mm, struct page *page,
827-
enum pt_level level)
803+
static void xen_unpin_page(struct mm_struct *mm, struct page *page,
804+
enum pt_level level)
828805
{
829806
unsigned pgfl = TestClearPagePinned(page);
830807

831-
if (pgfl && !PageHighMem(page)) {
808+
if (pgfl) {
832809
void *pt = lowmem_page_address(page);
833810
unsigned long pfn = page_to_pfn(page);
834811
spinlock_t *ptl = NULL;
@@ -859,8 +836,6 @@ static int xen_unpin_page(struct mm_struct *mm, struct page *page,
859836
xen_mc_callback(xen_pte_unlock, ptl);
860837
}
861838
}
862-
863-
return 0; /* never need to flush on unpin */
864839
}
865840

866841
/* Release a pagetables pages back as normal RW */
@@ -1554,20 +1529,14 @@ static inline void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn,
15541529
if (static_branch_likely(&xen_struct_pages_ready))
15551530
SetPagePinned(page);
15561531

1557-
if (!PageHighMem(page)) {
1558-
xen_mc_batch();
1532+
xen_mc_batch();
15591533

1560-
__set_pfn_prot(pfn, PAGE_KERNEL_RO);
1534+
__set_pfn_prot(pfn, PAGE_KERNEL_RO);
15611535

1562-
if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS)
1563-
__pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
1536+
if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS)
1537+
__pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
15641538

1565-
xen_mc_issue(PARAVIRT_LAZY_MMU);
1566-
} else {
1567-
/* make sure there are no stray mappings of
1568-
this page */
1569-
kmap_flush_unused();
1570-
}
1539+
xen_mc_issue(PARAVIRT_LAZY_MMU);
15711540
}
15721541
}
15731542

@@ -1590,16 +1559,15 @@ static inline void xen_release_ptpage(unsigned long pfn, unsigned level)
15901559
trace_xen_mmu_release_ptpage(pfn, level, pinned);
15911560

15921561
if (pinned) {
1593-
if (!PageHighMem(page)) {
1594-
xen_mc_batch();
1562+
xen_mc_batch();
15951563

1596-
if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS)
1597-
__pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
1564+
if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS)
1565+
__pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
15981566

1599-
__set_pfn_prot(pfn, PAGE_KERNEL);
1567+
__set_pfn_prot(pfn, PAGE_KERNEL);
1568+
1569+
xen_mc_issue(PARAVIRT_LAZY_MMU);
16001570

1601-
xen_mc_issue(PARAVIRT_LAZY_MMU);
1602-
}
16031571
ClearPagePinned(page);
16041572
}
16051573
}

0 commit comments

Comments
 (0)