Skip to content

Commit 5bc8b0f

Browse files
jgross1Ingo Molnar
authored andcommitted
x86/pat: Fix W^X violation false-positives when running as Xen PV guest
When running as Xen PV guest in some cases W^X violation WARN()s have been observed. Those WARN()s are produced by verify_rwx(), which looks into the PTE to verify that writable kernel pages have the NX bit set in order to avoid code modifications of the kernel by rogue code. As the NX bits of all levels of translation entries are or-ed and the RW bits of all levels are and-ed, looking just into the PTE isn't enough for the decision that a writable page is executable, too. When running as a Xen PV guest, the direct map PMDs and kernel high map PMDs share the same set of PTEs. Xen kernel initialization will set the NX bit in the direct map PMD entries, and not the shared PTEs. Fixes: 652c5bf ("x86/mm: Refuse W^X violations") Reported-by: Jason Andryuk <[email protected]> Signed-off-by: Juergen Gross <[email protected]> Signed-off-by: Ingo Molnar <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent 02eac06 commit 5bc8b0f

File tree

1 file changed

+18
-8
lines changed

1 file changed

+18
-8
lines changed

arch/x86/mm/pat/set_memory.c

Lines changed: 18 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -619,7 +619,8 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long start,
619619
* Validate strict W^X semantics.
620620
*/
621621
static inline pgprot_t verify_rwx(pgprot_t old, pgprot_t new, unsigned long start,
622-
unsigned long pfn, unsigned long npg)
622+
unsigned long pfn, unsigned long npg,
623+
bool nx, bool rw)
623624
{
624625
unsigned long end;
625626

@@ -641,6 +642,10 @@ static inline pgprot_t verify_rwx(pgprot_t old, pgprot_t new, unsigned long star
641642
if ((pgprot_val(new) & (_PAGE_RW | _PAGE_NX)) != _PAGE_RW)
642643
return new;
643644

645+
/* Non-leaf translation entries can disable writing or execution. */
646+
if (!rw || nx)
647+
return new;
648+
644649
end = start + npg * PAGE_SIZE - 1;
645650
WARN_ONCE(1, "CPA detected W^X violation: %016llx -> %016llx range: 0x%016lx - 0x%016lx PFN %lx\n",
646651
(unsigned long long)pgprot_val(old),
@@ -742,7 +747,7 @@ pte_t *lookup_address(unsigned long address, unsigned int *level)
742747
EXPORT_SYMBOL_GPL(lookup_address);
743748

744749
static pte_t *_lookup_address_cpa(struct cpa_data *cpa, unsigned long address,
745-
unsigned int *level)
750+
unsigned int *level, bool *nx, bool *rw)
746751
{
747752
pgd_t *pgd;
748753

@@ -751,7 +756,7 @@ static pte_t *_lookup_address_cpa(struct cpa_data *cpa, unsigned long address,
751756
else
752757
pgd = cpa->pgd + pgd_index(address);
753758

754-
return lookup_address_in_pgd(pgd, address, level);
759+
return lookup_address_in_pgd_attr(pgd, address, level, nx, rw);
755760
}
756761

757762
/*
@@ -879,12 +884,13 @@ static int __should_split_large_page(pte_t *kpte, unsigned long address,
879884
pgprot_t old_prot, new_prot, req_prot, chk_prot;
880885
pte_t new_pte, *tmp;
881886
enum pg_level level;
887+
bool nx, rw;
882888

883889
/*
884890
* Check for races, another CPU might have split this page
885891
* up already:
886892
*/
887-
tmp = _lookup_address_cpa(cpa, address, &level);
893+
tmp = _lookup_address_cpa(cpa, address, &level, &nx, &rw);
888894
if (tmp != kpte)
889895
return 1;
890896

@@ -995,7 +1001,8 @@ static int __should_split_large_page(pte_t *kpte, unsigned long address,
9951001
new_prot = static_protections(req_prot, lpaddr, old_pfn, numpages,
9961002
psize, CPA_DETECT);
9971003

998-
new_prot = verify_rwx(old_prot, new_prot, lpaddr, old_pfn, numpages);
1004+
new_prot = verify_rwx(old_prot, new_prot, lpaddr, old_pfn, numpages,
1005+
nx, rw);
9991006

10001007
/*
10011008
* If there is a conflict, split the large page.
@@ -1076,14 +1083,15 @@ __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address,
10761083
pte_t *pbase = (pte_t *)page_address(base);
10771084
unsigned int i, level;
10781085
pgprot_t ref_prot;
1086+
bool nx, rw;
10791087
pte_t *tmp;
10801088

10811089
spin_lock(&pgd_lock);
10821090
/*
10831091
* Check for races, another CPU might have split this page
10841092
* up for us already:
10851093
*/
1086-
tmp = _lookup_address_cpa(cpa, address, &level);
1094+
tmp = _lookup_address_cpa(cpa, address, &level, &nx, &rw);
10871095
if (tmp != kpte) {
10881096
spin_unlock(&pgd_lock);
10891097
return 1;
@@ -1624,10 +1632,11 @@ static int __change_page_attr(struct cpa_data *cpa, int primary)
16241632
int do_split, err;
16251633
unsigned int level;
16261634
pte_t *kpte, old_pte;
1635+
bool nx, rw;
16271636

16281637
address = __cpa_addr(cpa, cpa->curpage);
16291638
repeat:
1630-
kpte = _lookup_address_cpa(cpa, address, &level);
1639+
kpte = _lookup_address_cpa(cpa, address, &level, &nx, &rw);
16311640
if (!kpte)
16321641
return __cpa_process_fault(cpa, address, primary);
16331642

@@ -1649,7 +1658,8 @@ static int __change_page_attr(struct cpa_data *cpa, int primary)
16491658
new_prot = static_protections(new_prot, address, pfn, 1, 0,
16501659
CPA_PROTECT);
16511660

1652-
new_prot = verify_rwx(old_prot, new_prot, address, pfn, 1);
1661+
new_prot = verify_rwx(old_prot, new_prot, address, pfn, 1,
1662+
nx, rw);
16531663

16541664
new_prot = pgprot_clear_protnone_bits(new_prot);
16551665

0 commit comments

Comments
 (0)