Skip to content

Commit 0f04a2a

Browse files
vittyvkbonzini
authored andcommitted
KVM: nSVM: split kvm_init_shadow_npt_mmu() from kvm_init_shadow_mmu()
As a preparatory change for moving kvm_mmu_new_pgd() from nested_prepare_vmcb_save() to nested_svm_init_mmu_context() split kvm_init_shadow_npt_mmu() from kvm_init_shadow_mmu(). This also makes the code look more like nVMX (kvm_init_shadow_ept_mmu()). No functional change intended. Signed-off-by: Vitaly Kuznetsov <[email protected]> Message-Id: <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
1 parent d574c53 commit 0f04a2a

File tree

3 files changed

+28
-9
lines changed

3 files changed

+28
-9
lines changed

arch/x86/kvm/mmu.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -57,7 +57,8 @@ void
5757
reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context);
5858

5959
void kvm_init_mmu(struct kvm_vcpu *vcpu, bool reset_roots);
60-
void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, u32 cr0, u32 cr4, u32 efer);
60+
void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, u32 cr0, u32 cr4, u32 efer,
61+
gpa_t nested_cr3);
6162
void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
6263
bool accessed_dirty, gpa_t new_eptp);
6364
bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu);

arch/x86/kvm/mmu/mmu.c

Lines changed: 24 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -4918,14 +4918,10 @@ kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only)
49184918
return role;
49194919
}
49204920

4921-
void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, u32 cr0, u32 cr4, u32 efer)
4921+
static void shadow_mmu_init_context(struct kvm_vcpu *vcpu, u32 cr0, u32 cr4,
4922+
u32 efer, union kvm_mmu_role new_role)
49224923
{
49234924
struct kvm_mmu *context = vcpu->arch.mmu;
4924-
union kvm_mmu_role new_role =
4925-
kvm_calc_shadow_mmu_root_page_role(vcpu, false);
4926-
4927-
if (new_role.as_u64 == context->mmu_role.as_u64)
4928-
return;
49294925

49304926
if (!(cr0 & X86_CR0_PG))
49314927
nonpaging_init_context(vcpu, context);
@@ -4939,7 +4935,28 @@ void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, u32 cr0, u32 cr4, u32 efer)
49394935
context->mmu_role.as_u64 = new_role.as_u64;
49404936
reset_shadow_zero_bits_mask(vcpu, context);
49414937
}
4942-
EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu);
4938+
4939+
static void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, u32 cr0, u32 cr4, u32 efer)
4940+
{
4941+
struct kvm_mmu *context = vcpu->arch.mmu;
4942+
union kvm_mmu_role new_role =
4943+
kvm_calc_shadow_mmu_root_page_role(vcpu, false);
4944+
4945+
if (new_role.as_u64 != context->mmu_role.as_u64)
4946+
shadow_mmu_init_context(vcpu, cr0, cr4, efer, new_role);
4947+
}
4948+
4949+
void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, u32 cr0, u32 cr4, u32 efer,
4950+
gpa_t nested_cr3)
4951+
{
4952+
struct kvm_mmu *context = vcpu->arch.mmu;
4953+
union kvm_mmu_role new_role =
4954+
kvm_calc_shadow_mmu_root_page_role(vcpu, false);
4955+
4956+
if (new_role.as_u64 != context->mmu_role.as_u64)
4957+
shadow_mmu_init_context(vcpu, cr0, cr4, efer, new_role);
4958+
}
4959+
EXPORT_SYMBOL_GPL(kvm_init_shadow_npt_mmu);
49434960

49444961
static union kvm_mmu_role
49454962
kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty,

arch/x86/kvm/svm/nested.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -87,7 +87,8 @@ static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
8787
WARN_ON(mmu_is_nested(vcpu));
8888

8989
vcpu->arch.mmu = &vcpu->arch.guest_mmu;
90-
kvm_init_shadow_mmu(vcpu, X86_CR0_PG, hsave->save.cr4, hsave->save.efer);
90+
kvm_init_shadow_npt_mmu(vcpu, X86_CR0_PG, hsave->save.cr4, hsave->save.efer,
91+
svm->nested.ctl.nested_cr3);
9192
vcpu->arch.mmu->get_guest_pgd = nested_svm_get_tdp_cr3;
9293
vcpu->arch.mmu->get_pdptr = nested_svm_get_tdp_pdptr;
9394
vcpu->arch.mmu->inject_page_fault = nested_svm_inject_npf_exit;

0 commit comments

Comments
 (0)