@@ -619,7 +619,8 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long start,
619
619
* Validate strict W^X semantics.
620
620
*/
621
621
static inline pgprot_t verify_rwx (pgprot_t old , pgprot_t new , unsigned long start ,
622
- unsigned long pfn , unsigned long npg )
622
+ unsigned long pfn , unsigned long npg ,
623
+ bool nx , bool rw )
623
624
{
624
625
unsigned long end ;
625
626
@@ -641,6 +642,10 @@ static inline pgprot_t verify_rwx(pgprot_t old, pgprot_t new, unsigned long star
641
642
if ((pgprot_val (new ) & (_PAGE_RW | _PAGE_NX )) != _PAGE_RW )
642
643
return new ;
643
644
645
+ /* Non-leaf translation entries can disable writing or execution. */
646
+ if (!rw || nx )
647
+ return new ;
648
+
644
649
end = start + npg * PAGE_SIZE - 1 ;
645
650
WARN_ONCE (1 , "CPA detected W^X violation: %016llx -> %016llx range: 0x%016lx - 0x%016lx PFN %lx\n" ,
646
651
(unsigned long long )pgprot_val (old ),
@@ -657,20 +662,26 @@ static inline pgprot_t verify_rwx(pgprot_t old, pgprot_t new, unsigned long star
657
662
658
663
/*
659
664
* Lookup the page table entry for a virtual address in a specific pgd.
660
- * Return a pointer to the entry and the level of the mapping.
665
+ * Return a pointer to the entry, the level of the mapping, and the effective
666
+ * NX and RW bits of all page table levels.
661
667
*/
662
- pte_t * lookup_address_in_pgd (pgd_t * pgd , unsigned long address ,
663
- unsigned int * level )
668
+ pte_t * lookup_address_in_pgd_attr (pgd_t * pgd , unsigned long address ,
669
+ unsigned int * level , bool * nx , bool * rw )
664
670
{
665
671
p4d_t * p4d ;
666
672
pud_t * pud ;
667
673
pmd_t * pmd ;
668
674
669
675
* level = PG_LEVEL_NONE ;
676
+ * nx = false;
677
+ * rw = true;
670
678
671
679
if (pgd_none (* pgd ))
672
680
return NULL ;
673
681
682
+ * nx |= pgd_flags (* pgd ) & _PAGE_NX ;
683
+ * rw &= pgd_flags (* pgd ) & _PAGE_RW ;
684
+
674
685
p4d = p4d_offset (pgd , address );
675
686
if (p4d_none (* p4d ))
676
687
return NULL ;
@@ -679,6 +690,9 @@ pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
679
690
if (p4d_leaf (* p4d ) || !p4d_present (* p4d ))
680
691
return (pte_t * )p4d ;
681
692
693
+ * nx |= p4d_flags (* p4d ) & _PAGE_NX ;
694
+ * rw &= p4d_flags (* p4d ) & _PAGE_RW ;
695
+
682
696
pud = pud_offset (p4d , address );
683
697
if (pud_none (* pud ))
684
698
return NULL ;
@@ -687,6 +701,9 @@ pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
687
701
if (pud_leaf (* pud ) || !pud_present (* pud ))
688
702
return (pte_t * )pud ;
689
703
704
+ * nx |= pud_flags (* pud ) & _PAGE_NX ;
705
+ * rw &= pud_flags (* pud ) & _PAGE_RW ;
706
+
690
707
pmd = pmd_offset (pud , address );
691
708
if (pmd_none (* pmd ))
692
709
return NULL ;
@@ -695,11 +712,26 @@ pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
695
712
if (pmd_leaf (* pmd ) || !pmd_present (* pmd ))
696
713
return (pte_t * )pmd ;
697
714
715
+ * nx |= pmd_flags (* pmd ) & _PAGE_NX ;
716
+ * rw &= pmd_flags (* pmd ) & _PAGE_RW ;
717
+
698
718
* level = PG_LEVEL_4K ;
699
719
700
720
return pte_offset_kernel (pmd , address );
701
721
}
702
722
723
+ /*
724
+ * Lookup the page table entry for a virtual address in a specific pgd.
725
+ * Return a pointer to the entry and the level of the mapping.
726
+ */
727
+ pte_t * lookup_address_in_pgd (pgd_t * pgd , unsigned long address ,
728
+ unsigned int * level )
729
+ {
730
+ bool nx , rw ;
731
+
732
+ return lookup_address_in_pgd_attr (pgd , address , level , & nx , & rw );
733
+ }
734
+
703
735
/*
704
736
* Lookup the page table entry for a virtual address. Return a pointer
705
737
* to the entry and the level of the mapping.
@@ -715,13 +747,16 @@ pte_t *lookup_address(unsigned long address, unsigned int *level)
715
747
EXPORT_SYMBOL_GPL (lookup_address );
716
748
717
749
static pte_t * _lookup_address_cpa (struct cpa_data * cpa , unsigned long address ,
718
- unsigned int * level )
750
+ unsigned int * level , bool * nx , bool * rw )
719
751
{
720
- if (cpa -> pgd )
721
- return lookup_address_in_pgd (cpa -> pgd + pgd_index (address ),
722
- address , level );
752
+ pgd_t * pgd ;
753
+
754
+ if (!cpa -> pgd )
755
+ pgd = pgd_offset_k (address );
756
+ else
757
+ pgd = cpa -> pgd + pgd_index (address );
723
758
724
- return lookup_address ( address , level );
759
+ return lookup_address_in_pgd_attr ( pgd , address , level , nx , rw );
725
760
}
726
761
727
762
/*
@@ -849,12 +884,13 @@ static int __should_split_large_page(pte_t *kpte, unsigned long address,
849
884
pgprot_t old_prot , new_prot , req_prot , chk_prot ;
850
885
pte_t new_pte , * tmp ;
851
886
enum pg_level level ;
887
+ bool nx , rw ;
852
888
853
889
/*
854
890
* Check for races, another CPU might have split this page
855
891
* up already:
856
892
*/
857
- tmp = _lookup_address_cpa (cpa , address , & level );
893
+ tmp = _lookup_address_cpa (cpa , address , & level , & nx , & rw );
858
894
if (tmp != kpte )
859
895
return 1 ;
860
896
@@ -965,7 +1001,8 @@ static int __should_split_large_page(pte_t *kpte, unsigned long address,
965
1001
new_prot = static_protections (req_prot , lpaddr , old_pfn , numpages ,
966
1002
psize , CPA_DETECT );
967
1003
968
- new_prot = verify_rwx (old_prot , new_prot , lpaddr , old_pfn , numpages );
1004
+ new_prot = verify_rwx (old_prot , new_prot , lpaddr , old_pfn , numpages ,
1005
+ nx , rw );
969
1006
970
1007
/*
971
1008
* If there is a conflict, split the large page.
@@ -1046,14 +1083,15 @@ __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address,
1046
1083
pte_t * pbase = (pte_t * )page_address (base );
1047
1084
unsigned int i , level ;
1048
1085
pgprot_t ref_prot ;
1086
+ bool nx , rw ;
1049
1087
pte_t * tmp ;
1050
1088
1051
1089
spin_lock (& pgd_lock );
1052
1090
/*
1053
1091
* Check for races, another CPU might have split this page
1054
1092
* up for us already:
1055
1093
*/
1056
- tmp = _lookup_address_cpa (cpa , address , & level );
1094
+ tmp = _lookup_address_cpa (cpa , address , & level , & nx , & rw );
1057
1095
if (tmp != kpte ) {
1058
1096
spin_unlock (& pgd_lock );
1059
1097
return 1 ;
@@ -1594,10 +1632,11 @@ static int __change_page_attr(struct cpa_data *cpa, int primary)
1594
1632
int do_split , err ;
1595
1633
unsigned int level ;
1596
1634
pte_t * kpte , old_pte ;
1635
+ bool nx , rw ;
1597
1636
1598
1637
address = __cpa_addr (cpa , cpa -> curpage );
1599
1638
repeat :
1600
- kpte = _lookup_address_cpa (cpa , address , & level );
1639
+ kpte = _lookup_address_cpa (cpa , address , & level , & nx , & rw );
1601
1640
if (!kpte )
1602
1641
return __cpa_process_fault (cpa , address , primary );
1603
1642
@@ -1619,7 +1658,8 @@ static int __change_page_attr(struct cpa_data *cpa, int primary)
1619
1658
new_prot = static_protections (new_prot , address , pfn , 1 , 0 ,
1620
1659
CPA_PROTECT );
1621
1660
1622
- new_prot = verify_rwx (old_prot , new_prot , address , pfn , 1 );
1661
+ new_prot = verify_rwx (old_prot , new_prot , address , pfn , 1 ,
1662
+ nx , rw );
1623
1663
1624
1664
new_prot = pgprot_clear_protnone_bits (new_prot );
1625
1665
0 commit comments