Skip to content

Commit 59505b5

Browse files
Sean Christophersonbonzini
authored andcommitted
KVM: x86/mmu: Add separate helper for shadow NPT root page role calc
Refactor the shadow NPT role calculation into a separate helper to better differentiate it from the non-nested shadow MMU, e.g. the NPT variant is never direct and derives its root level from the TDP level. Signed-off-by: Sean Christopherson <[email protected]> Message-Id: <[email protected]> Reviewed-by: Vitaly Kuznetsov <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
1 parent f291a35 commit 59505b5

File tree

1 file changed

+25
-5
lines changed

1 file changed

+25
-5
lines changed

arch/x86/kvm/mmu/mmu.c

Lines changed: 25 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -4908,17 +4908,27 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
49084908
}
49094909

49104910
static union kvm_mmu_role
4911-
kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only)
4911+
kvm_calc_shadow_root_page_role_common(struct kvm_vcpu *vcpu, bool base_only)
49124912
{
49134913
union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, base_only);
49144914

49154915
role.base.smep_andnot_wp = role.ext.cr4_smep &&
49164916
!is_write_protection(vcpu);
49174917
role.base.smap_andnot_wp = role.ext.cr4_smap &&
49184918
!is_write_protection(vcpu);
4919-
role.base.direct = !is_paging(vcpu);
49204919
role.base.gpte_is_8_bytes = !!is_pae(vcpu);
49214920

4921+
return role;
4922+
}
4923+
4924+
static union kvm_mmu_role
4925+
kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only)
4926+
{
4927+
union kvm_mmu_role role =
4928+
kvm_calc_shadow_root_page_role_common(vcpu, base_only);
4929+
4930+
role.base.direct = !is_paging(vcpu);
4931+
49224932
if (!is_long_mode(vcpu))
49234933
role.base.level = PT32E_ROOT_LEVEL;
49244934
else if (is_la57_mode(vcpu))
@@ -4956,14 +4966,24 @@ static void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, u32 cr0, u32 cr4, u32 efe
49564966
shadow_mmu_init_context(vcpu, context, cr0, cr4, efer, new_role);
49574967
}
49584968

4969+
static union kvm_mmu_role
4970+
kvm_calc_shadow_npt_root_page_role(struct kvm_vcpu *vcpu)
4971+
{
4972+
union kvm_mmu_role role =
4973+
kvm_calc_shadow_root_page_role_common(vcpu, false);
4974+
4975+
role.base.direct = false;
4976+
role.base.level = vcpu->arch.tdp_level;
4977+
4978+
return role;
4979+
}
4980+
49594981
void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, u32 cr0, u32 cr4, u32 efer,
49604982
gpa_t nested_cr3)
49614983
{
49624984
struct kvm_mmu *context = &vcpu->arch.guest_mmu;
4963-
union kvm_mmu_role new_role =
4964-
kvm_calc_shadow_mmu_root_page_role(vcpu, false);
4985+
union kvm_mmu_role new_role = kvm_calc_shadow_npt_root_page_role(vcpu);
49654986

4966-
new_role.base.level = vcpu->arch.tdp_level;
49674987
context->shadow_root_level = new_role.base.level;
49684988

49694989
__kvm_mmu_new_pgd(vcpu, nested_cr3, new_role.base, false, false);

0 commit comments

Comments
 (0)