@@ -37,6 +37,12 @@ struct s1_walk_result {
37
37
u8 APTable ;
38
38
bool UXNTable ;
39
39
bool PXNTable ;
40
+ bool ur ;
41
+ bool uw ;
42
+ bool ux ;
43
+ bool pr ;
44
+ bool pw ;
45
+ bool px ;
40
46
};
41
47
struct {
42
48
u8 fst ;
@@ -764,111 +770,139 @@ static bool pan3_enabled(struct kvm_vcpu *vcpu, enum trans_regime regime)
764
770
return sctlr & SCTLR_EL1_EPAN ;
765
771
}
766
772
767
- static u64 handle_at_slow (struct kvm_vcpu * vcpu , u32 op , u64 vaddr )
773
+ static void compute_s1_direct_permissions (struct kvm_vcpu * vcpu ,
774
+ struct s1_walk_info * wi ,
775
+ struct s1_walk_result * wr )
768
776
{
769
- bool perm_fail , ur , uw , ux , pr , pw , px ;
770
- struct s1_walk_result wr = {};
771
- struct s1_walk_info wi = {};
772
- int ret , idx ;
773
-
774
- ret = setup_s1_walk (vcpu , op , & wi , & wr , vaddr );
775
- if (ret )
776
- goto compute_par ;
777
-
778
- if (wr .level == S1_MMU_DISABLED )
779
- goto compute_par ;
780
-
781
- idx = srcu_read_lock (& vcpu -> kvm -> srcu );
782
-
783
- ret = walk_s1 (vcpu , & wi , & wr , vaddr );
784
-
785
- srcu_read_unlock (& vcpu -> kvm -> srcu , idx );
786
-
787
- if (ret )
788
- goto compute_par ;
789
-
790
- /* FIXME: revisit when adding indirect permission support */
791
- /* AArch64.S1DirectBasePermissions() */
792
- if (wi .regime != TR_EL2 ) {
793
- switch (FIELD_GET (PTE_USER | PTE_RDONLY , wr .desc )) {
777
+ /* Non-hierarchical part of AArch64.S1DirectBasePermissions() */
778
+ if (wi -> regime != TR_EL2 ) {
779
+ switch (FIELD_GET (PTE_USER | PTE_RDONLY , wr -> desc )) {
794
780
case 0b00 :
795
- pr = pw = true;
796
- ur = uw = false;
781
+ wr -> pr = wr -> pw = true;
782
+ wr -> ur = wr -> uw = false;
797
783
break ;
798
784
case 0b01 :
799
- pr = pw = ur = uw = true;
785
+ wr -> pr = wr -> pw = wr -> ur = wr -> uw = true;
800
786
break ;
801
787
case 0b10 :
802
- pr = true;
803
- pw = ur = uw = false;
788
+ wr -> pr = true;
789
+ wr -> pw = wr -> ur = wr -> uw = false;
804
790
break ;
805
791
case 0b11 :
806
- pr = ur = true;
807
- pw = uw = false;
792
+ wr -> pr = wr -> ur = true;
793
+ wr -> pw = wr -> uw = false;
808
794
break ;
809
795
}
810
796
811
- switch (wr .APTable ) {
797
+ /* We don't use px for anything yet, but hey... */
798
+ wr -> px = !((wr -> desc & PTE_PXN ) || wr -> uw );
799
+ wr -> ux = !(wr -> desc & PTE_UXN );
800
+ } else {
801
+ wr -> ur = wr -> uw = wr -> ux = false;
802
+
803
+ if (!(wr -> desc & PTE_RDONLY )) {
804
+ wr -> pr = wr -> pw = true;
805
+ } else {
806
+ wr -> pr = true;
807
+ wr -> pw = false;
808
+ }
809
+
810
+ /* XN maps to UXN */
811
+ wr -> px = !(wr -> desc & PTE_UXN );
812
+ }
813
+ }
814
+
815
+ static void compute_s1_hierarchical_permissions (struct kvm_vcpu * vcpu ,
816
+ struct s1_walk_info * wi ,
817
+ struct s1_walk_result * wr )
818
+ {
819
+ /* Hierarchical part of AArch64.S1DirectBasePermissions() */
820
+ if (wi -> regime != TR_EL2 ) {
821
+ switch (wr -> APTable ) {
812
822
case 0b00 :
813
823
break ;
814
824
case 0b01 :
815
- ur = uw = false;
825
+ wr -> ur = wr -> uw = false;
816
826
break ;
817
827
case 0b10 :
818
- pw = uw = false;
828
+ wr -> pw = wr -> uw = false;
819
829
break ;
820
830
case 0b11 :
821
- pw = ur = uw = false;
831
+ wr -> pw = wr -> ur = wr -> uw = false;
822
832
break ;
823
833
}
824
834
825
- /* We don't use px for anything yet, but hey... */
826
- px = !((wr .desc & PTE_PXN ) || wr .PXNTable || uw );
827
- ux = !((wr .desc & PTE_UXN ) || wr .UXNTable );
835
+ wr -> px &= !wr -> PXNTable ;
836
+ wr -> ux &= !wr -> UXNTable ;
837
+ } else {
838
+ if (wr -> APTable & BIT (1 ))
839
+ wr -> pw = false;
828
840
829
- if (op == OP_AT_S1E1RP || op == OP_AT_S1E1WP ) {
830
- bool pan ;
841
+ /* XN maps to UXN */
842
+ wr -> px &= !wr -> UXNTable ;
843
+ }
844
+ }
831
845
832
- pan = * vcpu_cpsr (vcpu ) & PSR_PAN_BIT ;
833
- pan &= ur || uw || (pan3_enabled (vcpu , wi .regime ) && ux );
834
- pw &= !pan ;
835
- pr &= !pan ;
836
- }
837
- } else {
838
- ur = uw = ux = false;
846
+ static void compute_s1_permissions (struct kvm_vcpu * vcpu , u32 op ,
847
+ struct s1_walk_info * wi ,
848
+ struct s1_walk_result * wr )
849
+ {
850
+ compute_s1_direct_permissions (vcpu , wi , wr );
839
851
840
- if (!(wr .desc & PTE_RDONLY )) {
841
- pr = pw = true;
842
- } else {
843
- pr = true;
844
- pw = false;
845
- }
852
+ if (!wi -> hpd )
853
+ compute_s1_hierarchical_permissions (vcpu , wi , wr );
846
854
847
- if (wr . APTable & BIT ( 1 ))
848
- pw = false ;
855
+ if (op == OP_AT_S1E1RP || op == OP_AT_S1E1WP ) {
856
+ bool pan ;
849
857
850
- /* XN maps to UXN */
851
- px = !((wr .desc & PTE_UXN ) || wr .UXNTable );
858
+ pan = * vcpu_cpsr (vcpu ) & PSR_PAN_BIT ;
859
+ pan &= wr -> ur || wr -> uw || (pan3_enabled (vcpu , wi -> regime ) && wr -> ux );
860
+ wr -> pw &= !pan ;
861
+ wr -> pr &= !pan ;
852
862
}
863
+ }
864
+
865
+ static u64 handle_at_slow (struct kvm_vcpu * vcpu , u32 op , u64 vaddr )
866
+ {
867
+ struct s1_walk_result wr = {};
868
+ struct s1_walk_info wi = {};
869
+ bool perm_fail = false;
870
+ int ret , idx ;
871
+
872
+ ret = setup_s1_walk (vcpu , op , & wi , & wr , vaddr );
873
+ if (ret )
874
+ goto compute_par ;
875
+
876
+ if (wr .level == S1_MMU_DISABLED )
877
+ goto compute_par ;
878
+
879
+ idx = srcu_read_lock (& vcpu -> kvm -> srcu );
880
+
881
+ ret = walk_s1 (vcpu , & wi , & wr , vaddr );
882
+
883
+ srcu_read_unlock (& vcpu -> kvm -> srcu , idx );
884
+
885
+ if (ret )
886
+ goto compute_par ;
853
887
854
- perm_fail = false ;
888
+ compute_s1_permissions ( vcpu , op , & wi , & wr ) ;
855
889
856
890
switch (op ) {
857
891
case OP_AT_S1E1RP :
858
892
case OP_AT_S1E1R :
859
893
case OP_AT_S1E2R :
860
- perm_fail = !pr ;
894
+ perm_fail = !wr . pr ;
861
895
break ;
862
896
case OP_AT_S1E1WP :
863
897
case OP_AT_S1E1W :
864
898
case OP_AT_S1E2W :
865
- perm_fail = !pw ;
899
+ perm_fail = !wr . pw ;
866
900
break ;
867
901
case OP_AT_S1E0R :
868
- perm_fail = !ur ;
902
+ perm_fail = !wr . ur ;
869
903
break ;
870
904
case OP_AT_S1E0W :
871
- perm_fail = !uw ;
905
+ perm_fail = !wr . uw ;
872
906
break ;
873
907
case OP_AT_S1E1A :
874
908
case OP_AT_S1E2A :
0 commit comments