Skip to content

Commit 8c00865

Browse files
committed
KVM: MMU: stop dereferencing vcpu->arch.mmu to get the context for MMU init
kvm_init_shadow_mmu() was actually the only function that could be called with different vcpu->arch.mmu values. Now that kvm_init_shadow_npt_mmu() is separated from kvm_init_shadow_mmu(), we always know the MMU context we need to use and there is no need to dereference vcpu->arch.mmu pointer. Based on a patch by Vitaly Kuznetsov <[email protected]>. Signed-off-by: Paolo Bonzini <[email protected]> Signed-off-by: Vitaly Kuznetsov <[email protected]> Message-Id: <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
1 parent 0f04a2a commit 8c00865

File tree

1 file changed

+10
-11
lines changed

1 file changed

+10
-11
lines changed

arch/x86/kvm/mmu/mmu.c

Lines changed: 10 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -4850,7 +4850,7 @@ kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only)
48504850

48514851
static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
48524852
{
4853-
struct kvm_mmu *context = vcpu->arch.mmu;
4853+
struct kvm_mmu *context = &vcpu->arch.root_mmu;
48544854
union kvm_mmu_role new_role =
48554855
kvm_calc_tdp_mmu_root_page_role(vcpu, false);
48564856

@@ -4918,11 +4918,10 @@ kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only)
49184918
return role;
49194919
}
49204920

4921-
static void shadow_mmu_init_context(struct kvm_vcpu *vcpu, u32 cr0, u32 cr4,
4922-
u32 efer, union kvm_mmu_role new_role)
4921+
static void shadow_mmu_init_context(struct kvm_vcpu *vcpu, struct kvm_mmu *context,
4922+
u32 cr0, u32 cr4, u32 efer,
4923+
union kvm_mmu_role new_role)
49234924
{
4924-
struct kvm_mmu *context = vcpu->arch.mmu;
4925-
49264925
if (!(cr0 & X86_CR0_PG))
49274926
nonpaging_init_context(vcpu, context);
49284927
else if (efer & EFER_LMA)
@@ -4938,23 +4937,23 @@ static void shadow_mmu_init_context(struct kvm_vcpu *vcpu, u32 cr0, u32 cr4,
49384937

49394938
static void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, u32 cr0, u32 cr4, u32 efer)
49404939
{
4941-
struct kvm_mmu *context = vcpu->arch.mmu;
4940+
struct kvm_mmu *context = &vcpu->arch.root_mmu;
49424941
union kvm_mmu_role new_role =
49434942
kvm_calc_shadow_mmu_root_page_role(vcpu, false);
49444943

49454944
if (new_role.as_u64 != context->mmu_role.as_u64)
4946-
shadow_mmu_init_context(vcpu, cr0, cr4, efer, new_role);
4945+
shadow_mmu_init_context(vcpu, context, cr0, cr4, efer, new_role);
49474946
}
49484947

49494948
void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, u32 cr0, u32 cr4, u32 efer,
49504949
gpa_t nested_cr3)
49514950
{
4952-
struct kvm_mmu *context = vcpu->arch.mmu;
4951+
struct kvm_mmu *context = &vcpu->arch.guest_mmu;
49534952
union kvm_mmu_role new_role =
49544953
kvm_calc_shadow_mmu_root_page_role(vcpu, false);
49554954

49564955
if (new_role.as_u64 != context->mmu_role.as_u64)
4957-
shadow_mmu_init_context(vcpu, cr0, cr4, efer, new_role);
4956+
shadow_mmu_init_context(vcpu, context, cr0, cr4, efer, new_role);
49584957
}
49594958
EXPORT_SYMBOL_GPL(kvm_init_shadow_npt_mmu);
49604959

@@ -4990,7 +4989,7 @@ kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty,
49904989
void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
49914990
bool accessed_dirty, gpa_t new_eptp)
49924991
{
4993-
struct kvm_mmu *context = vcpu->arch.mmu;
4992+
struct kvm_mmu *context = &vcpu->arch.guest_mmu;
49944993
u8 level = vmx_eptp_page_walk_level(new_eptp);
49954994
union kvm_mmu_role new_role =
49964995
kvm_calc_shadow_ept_root_page_role(vcpu, accessed_dirty,
@@ -5024,7 +5023,7 @@ EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu);
50245023

50255024
static void init_kvm_softmmu(struct kvm_vcpu *vcpu)
50265025
{
5027-
struct kvm_mmu *context = vcpu->arch.mmu;
5026+
struct kvm_mmu *context = &vcpu->arch.root_mmu;
50285027

50295028
kvm_init_shadow_mmu(vcpu,
50305029
kvm_read_cr0_bits(vcpu, X86_CR0_PG),

0 commit comments

Comments
 (0)