Skip to content

Commit 85aa888

Browse files
Junaid Shahidbonzini
authored andcommitted
kvm: vmx: Sync all matching EPTPs when injecting nested EPT fault
When a nested EPT violation/misconfig is injected into the guest, the shadow EPT PTEs associated with that address need to be synced. This is done by kvm_inject_emulated_page_fault() before it calls nested_ept_inject_page_fault(). However, that will only sync the shadow EPT PTE associated with the current L1 EPTP. Since the ASID is based on EP4TA rather than the full EPTP, so syncing the current EPTP is not enough. The SPTEs associated with any other L1 EPTPs in the prev_roots cache with the same EP4TA also need to be synced. Signed-off-by: Junaid Shahid <[email protected]> Message-Id: <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
1 parent 375d1ad commit 85aa888

File tree

1 file changed

+41
-12
lines changed

1 file changed

+41
-12
lines changed

arch/x86/kvm/vmx/nested.c

Lines changed: 41 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -330,6 +330,31 @@ void nested_vmx_free_vcpu(struct kvm_vcpu *vcpu)
330330
vcpu_put(vcpu);
331331
}
332332

333+
#define EPTP_PA_MASK GENMASK_ULL(51, 12)
334+
335+
static bool nested_ept_root_matches(hpa_t root_hpa, u64 root_eptp, u64 eptp)
336+
{
337+
return VALID_PAGE(root_hpa) &&
338+
((root_eptp & EPTP_PA_MASK) == (eptp & EPTP_PA_MASK));
339+
}
340+
341+
static void nested_ept_invalidate_addr(struct kvm_vcpu *vcpu, gpa_t eptp,
342+
gpa_t addr)
343+
{
344+
uint i;
345+
struct kvm_mmu_root_info *cached_root;
346+
347+
WARN_ON_ONCE(!mmu_is_nested(vcpu));
348+
349+
for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
350+
cached_root = &vcpu->arch.mmu->prev_roots[i];
351+
352+
if (nested_ept_root_matches(cached_root->hpa, cached_root->pgd,
353+
eptp))
354+
vcpu->arch.mmu->invlpg(vcpu, addr, cached_root->hpa);
355+
}
356+
}
357+
333358
static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu,
334359
struct x86_exception *fault)
335360
{
@@ -342,10 +367,22 @@ static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu,
342367
vm_exit_reason = EXIT_REASON_PML_FULL;
343368
vmx->nested.pml_full = false;
344369
exit_qualification &= INTR_INFO_UNBLOCK_NMI;
345-
} else if (fault->error_code & PFERR_RSVD_MASK)
346-
vm_exit_reason = EXIT_REASON_EPT_MISCONFIG;
347-
else
348-
vm_exit_reason = EXIT_REASON_EPT_VIOLATION;
370+
} else {
371+
if (fault->error_code & PFERR_RSVD_MASK)
372+
vm_exit_reason = EXIT_REASON_EPT_MISCONFIG;
373+
else
374+
vm_exit_reason = EXIT_REASON_EPT_VIOLATION;
375+
376+
/*
377+
* Although the caller (kvm_inject_emulated_page_fault) would
378+
* have already synced the faulting address in the shadow EPT
379+
* tables for the current EPTP12, we also need to sync it for
380+
* any other cached EPTP02s based on the same EP4TA, since the
381+
* TLB associates mappings to the EP4TA rather than the full EPTP.
382+
*/
383+
nested_ept_invalidate_addr(vcpu, vmcs12->ept_pointer,
384+
fault->address);
385+
}
349386

350387
nested_vmx_vmexit(vcpu, vm_exit_reason, 0, exit_qualification);
351388
vmcs12->guest_physical_address = fault->address;
@@ -5325,14 +5362,6 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu)
53255362
return nested_vmx_succeed(vcpu);
53265363
}
53275364

5328-
#define EPTP_PA_MASK GENMASK_ULL(51, 12)
5329-
5330-
static bool nested_ept_root_matches(hpa_t root_hpa, u64 root_eptp, u64 eptp)
5331-
{
5332-
return VALID_PAGE(root_hpa) &&
5333-
((root_eptp & EPTP_PA_MASK) == (eptp & EPTP_PA_MASK));
5334-
}
5335-
53365365
/* Emulate the INVEPT instruction */
53375366
static int handle_invept(struct kvm_vcpu *vcpu)
53385367
{

0 commit comments

Comments
 (0)