Skip to content

Commit 2a40b90

Browse files
Sean Christophersonbonzini
authored andcommitted
KVM: x86: Pull the PGD's level from the MMU instead of recalculating it
Use the shadow_root_level from the current MMU as the root level for the PGD, i.e. for VMX's EPTP. This eliminates the weird dependency between VMX and the MMU where both must independently calculate the same root level for things to work correctly. Temporarily keep VMX's calculation of the level and use it to WARN if the incoming level diverges. Opportunistically refactor kvm_mmu_load_pgd() to avoid indentation hell, and rename a 'cr3' param in the load_mmu_pgd prototype that managed to survive the cr3 purge. No functional change intended. Signed-off-by: Sean Christopherson <[email protected]> Message-Id: <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
1 parent 812f805 commit 2a40b90

File tree

6 files changed

+27
-13
lines changed

6 files changed

+27
-13
lines changed

arch/x86/include/asm/kvm_host.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1136,7 +1136,8 @@ struct kvm_x86_ops {
11361136
int (*get_tdp_level)(struct kvm_vcpu *vcpu);
11371137
u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);
11381138

1139-
void (*load_mmu_pgd)(struct kvm_vcpu *vcpu, unsigned long cr3);
1139+
void (*load_mmu_pgd)(struct kvm_vcpu *vcpu, unsigned long pgd,
1140+
int pgd_level);
11401141

11411142
bool (*has_wbinvd_exit)(void);
11421143

arch/x86/kvm/mmu.h

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -90,9 +90,13 @@ static inline unsigned long kvm_get_active_pcid(struct kvm_vcpu *vcpu)
9090

9191
static inline void kvm_mmu_load_pgd(struct kvm_vcpu *vcpu)
9292
{
93-
if (VALID_PAGE(vcpu->arch.mmu->root_hpa))
94-
kvm_x86_ops.load_mmu_pgd(vcpu, vcpu->arch.mmu->root_hpa |
95-
kvm_get_active_pcid(vcpu));
93+
u64 root_hpa = vcpu->arch.mmu->root_hpa;
94+
95+
if (!VALID_PAGE(root_hpa))
96+
return;
97+
98+
kvm_x86_ops.load_mmu_pgd(vcpu, root_hpa | kvm_get_active_pcid(vcpu),
99+
vcpu->arch.mmu->shadow_root_level);
96100
}
97101

98102
int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,

arch/x86/kvm/svm/svm.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3541,7 +3541,8 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
35413541
return exit_fastpath;
35423542
}
35433543

3544-
static void svm_load_mmu_pgd(struct kvm_vcpu *vcpu, unsigned long root)
3544+
static void svm_load_mmu_pgd(struct kvm_vcpu *vcpu, unsigned long root,
3545+
int root_level)
35453546
{
35463547
struct vcpu_svm *svm = to_svm(vcpu);
35473548
unsigned long cr3;

arch/x86/kvm/vmx/nested.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2162,7 +2162,8 @@ static void prepare_vmcs02_constant_state(struct vcpu_vmx *vmx)
21622162
* consistency checks.
21632163
*/
21642164
if (enable_ept && nested_early_check)
2165-
vmcs_write64(EPT_POINTER, construct_eptp(&vmx->vcpu, 0));
2165+
vmcs_write64(EPT_POINTER,
2166+
construct_eptp(&vmx->vcpu, 0, PT64_ROOT_4LEVEL));
21662167

21672168
/* All VMFUNCs are currently emulated through L0 vmexits. */
21682169
if (cpu_has_vmx_vmfunc())

arch/x86/kvm/vmx/vmx.c

Lines changed: 12 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -2933,14 +2933,16 @@ static void vmx_flush_tlb_all(struct kvm_vcpu *vcpu)
29332933

29342934
static void vmx_flush_tlb_current(struct kvm_vcpu *vcpu)
29352935
{
2936-
u64 root_hpa = vcpu->arch.mmu->root_hpa;
2936+
struct kvm_mmu *mmu = vcpu->arch.mmu;
2937+
u64 root_hpa = mmu->root_hpa;
29372938

29382939
/* No flush required if the current context is invalid. */
29392940
if (!VALID_PAGE(root_hpa))
29402941
return;
29412942

29422943
if (enable_ept)
2943-
ept_sync_context(construct_eptp(vcpu, root_hpa));
2944+
ept_sync_context(construct_eptp(vcpu, root_hpa,
2945+
mmu->shadow_root_level));
29442946
else if (!is_guest_mode(vcpu))
29452947
vpid_sync_context(to_vmx(vcpu)->vpid);
29462948
else
@@ -3078,11 +3080,12 @@ static int get_ept_level(struct kvm_vcpu *vcpu)
30783080
return vmx_get_tdp_level(vcpu);
30793081
}
30803082

3081-
u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa)
3083+
u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa,
3084+
int root_level)
30823085
{
30833086
u64 eptp = VMX_EPTP_MT_WB;
30843087

3085-
eptp |= (get_ept_level(vcpu) == 5) ? VMX_EPTP_PWL_5 : VMX_EPTP_PWL_4;
3088+
eptp |= (root_level == 5) ? VMX_EPTP_PWL_5 : VMX_EPTP_PWL_4;
30863089

30873090
if (enable_ept_ad_bits &&
30883091
(!is_guest_mode(vcpu) || nested_ept_ad_enabled(vcpu)))
@@ -3092,15 +3095,18 @@ u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa)
30923095
return eptp;
30933096
}
30943097

3095-
static void vmx_load_mmu_pgd(struct kvm_vcpu *vcpu, unsigned long pgd)
3098+
static void vmx_load_mmu_pgd(struct kvm_vcpu *vcpu, unsigned long pgd,
3099+
int pgd_level)
30963100
{
30973101
struct kvm *kvm = vcpu->kvm;
30983102
bool update_guest_cr3 = true;
30993103
unsigned long guest_cr3;
31003104
u64 eptp;
31013105

31023106
if (enable_ept) {
3103-
eptp = construct_eptp(vcpu, pgd);
3107+
WARN_ON(pgd_level != get_ept_level(vcpu));
3108+
3109+
eptp = construct_eptp(vcpu, pgd, pgd_level);
31043110
vmcs_write64(EPT_POINTER, eptp);
31053111

31063112
if (kvm_x86_ops.tlb_remote_flush) {

arch/x86/kvm/vmx/vmx.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -341,7 +341,8 @@ void set_cr4_guest_host_mask(struct vcpu_vmx *vmx);
341341
void ept_save_pdptrs(struct kvm_vcpu *vcpu);
342342
void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
343343
void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
344-
u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa);
344+
u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa,
345+
int root_level);
345346
void update_exception_bitmap(struct kvm_vcpu *vcpu);
346347
void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu);
347348
bool vmx_nmi_blocked(struct kvm_vcpu *vcpu);

0 commit comments

Comments
 (0)