Skip to content

Commit 61b05a9

Browse files
Lai Jiangshanbonzini
authored andcommitted
KVM: X86: Don't unload MMU in kvm_vcpu_flush_tlb_guest()
kvm_mmu_unload() destroys all the PGD caches. Use the lighter kvm_mmu_sync_roots() and kvm_mmu_sync_prev_roots() instead. Signed-off-by: Lai Jiangshan <[email protected]> Message-Id: <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
1 parent 264d3dc commit 61b05a9

File tree

3 files changed

+22
-6
lines changed

3 files changed

+22
-6
lines changed

arch/x86/kvm/mmu.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -79,6 +79,7 @@ int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
7979
int kvm_mmu_load(struct kvm_vcpu *vcpu);
8080
void kvm_mmu_unload(struct kvm_vcpu *vcpu);
8181
void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu);
82+
void kvm_mmu_sync_prev_roots(struct kvm_vcpu *vcpu);
8283

8384
static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu)
8485
{

arch/x86/kvm/mmu/mmu.c

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3647,6 +3647,9 @@ static bool is_unsync_root(hpa_t root)
36473647
{
36483648
struct kvm_mmu_page *sp;
36493649

3650+
if (!VALID_PAGE(root))
3651+
return false;
3652+
36503653
/*
36513654
* The read barrier orders the CPU's read of SPTE.W during the page table
36523655
* walk before the reads of sp->unsync/sp->unsync_children here.
@@ -3714,6 +3717,19 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
37143717
write_unlock(&vcpu->kvm->mmu_lock);
37153718
}
37163719

3720+
void kvm_mmu_sync_prev_roots(struct kvm_vcpu *vcpu)
3721+
{
3722+
unsigned long roots_to_free = 0;
3723+
int i;
3724+
3725+
for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
3726+
if (is_unsync_root(vcpu->arch.mmu->prev_roots[i].hpa))
3727+
roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i);
3728+
3729+
/* sync prev_roots by simply freeing them */
3730+
kvm_mmu_free_roots(vcpu, vcpu->arch.mmu, roots_to_free);
3731+
}
3732+
37173733
static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gpa_t vaddr,
37183734
u32 access, struct x86_exception *exception)
37193735
{

arch/x86/kvm/x86.c

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -3245,15 +3245,14 @@ static void kvm_vcpu_flush_tlb_guest(struct kvm_vcpu *vcpu)
32453245
++vcpu->stat.tlb_flush;
32463246

32473247
if (!tdp_enabled) {
3248-
/*
3248+
/*
32493249
* A TLB flush on behalf of the guest is equivalent to
32503250
* INVPCID(all), toggling CR4.PGE, etc., which requires
3251-
* a forced sync of the shadow page tables. Unload the
3252-
* entire MMU here and the subsequent load will sync the
3253-
* shadow page tables, and also flush the TLB.
3251+
* a forced sync of the shadow page tables. Ensure all the
3252+
* roots are synced and the guest TLB in hardware is clean.
32543253
*/
3255-
kvm_mmu_unload(vcpu);
3256-
return;
3254+
kvm_mmu_sync_roots(vcpu);
3255+
kvm_mmu_sync_prev_roots(vcpu);
32573256
}
32583257

32593258
static_call(kvm_x86_tlb_flush_guest)(vcpu);

0 commit comments

Comments
 (0)