Skip to content

Commit 4b85c92

Browse files
sean-jcbonzini
authored andcommitted
KVM: x86/mmu: Remove spurious TLB flushes in TDP MMU zap collapsible path
Drop the "flush" param and return values to/from the TDP MMU's helper for zapping collapsible SPTEs. Because the helper runs with mmu_lock held for read, not write, it uses tdp_mmu_zap_spte_atomic(), and the atomic zap handles the necessary remote TLB flush. Similarly, because mmu_lock is dropped and re-acquired between zapping legacy MMUs and zapping TDP MMUs, kvm_mmu_zap_collapsible_sptes() must handle remote TLB flushes from the legacy MMU before calling into the TDP MMU. Fixes: e220971 ("KVM: x86/mmu: Skip rmap operations if rmaps not allocated") Signed-off-by: Sean Christopherson <[email protected]> Message-Id: <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
1 parent 7533377 commit 4b85c92

File tree

3 files changed

+11
-25
lines changed

3 files changed

+11
-25
lines changed

arch/x86/kvm/mmu/mmu.c

Lines changed: 2 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -5848,26 +5848,21 @@ static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm,
58485848
void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
58495849
const struct kvm_memory_slot *slot)
58505850
{
5851-
bool flush;
5852-
58535851
if (kvm_memslots_have_rmaps(kvm)) {
58545852
write_lock(&kvm->mmu_lock);
58555853
/*
58565854
* Zap only 4k SPTEs since the legacy MMU only supports dirty
58575855
* logging at a 4k granularity and never creates collapsible
58585856
* 2m SPTEs during dirty logging.
58595857
*/
5860-
flush = slot_handle_level_4k(kvm, slot, kvm_mmu_zap_collapsible_spte, true);
5861-
if (flush)
5858+
if (slot_handle_level_4k(kvm, slot, kvm_mmu_zap_collapsible_spte, true))
58625859
kvm_arch_flush_remote_tlbs_memslot(kvm, slot);
58635860
write_unlock(&kvm->mmu_lock);
58645861
}
58655862

58665863
if (is_tdp_mmu_enabled(kvm)) {
58675864
read_lock(&kvm->mmu_lock);
5868-
flush = kvm_tdp_mmu_zap_collapsible_sptes(kvm, slot, false);
5869-
if (flush)
5870-
kvm_arch_flush_remote_tlbs_memslot(kvm, slot);
5865+
kvm_tdp_mmu_zap_collapsible_sptes(kvm, slot);
58715866
read_unlock(&kvm->mmu_lock);
58725867
}
58735868
}

arch/x86/kvm/mmu/tdp_mmu.c

Lines changed: 7 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -1362,10 +1362,9 @@ void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
13621362
* Clear leaf entries which could be replaced by large mappings, for
13631363
* GFNs within the slot.
13641364
*/
1365-
static bool zap_collapsible_spte_range(struct kvm *kvm,
1365+
static void zap_collapsible_spte_range(struct kvm *kvm,
13661366
struct kvm_mmu_page *root,
1367-
const struct kvm_memory_slot *slot,
1368-
bool flush)
1367+
const struct kvm_memory_slot *slot)
13691368
{
13701369
gfn_t start = slot->base_gfn;
13711370
gfn_t end = start + slot->npages;
@@ -1376,10 +1375,8 @@ static bool zap_collapsible_spte_range(struct kvm *kvm,
13761375

13771376
tdp_root_for_each_pte(iter, root, start, end) {
13781377
retry:
1379-
if (tdp_mmu_iter_cond_resched(kvm, &iter, flush, true)) {
1380-
flush = false;
1378+
if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
13811379
continue;
1382-
}
13831380

13841381
if (!is_shadow_present_pte(iter.old_spte) ||
13851382
!is_last_spte(iter.old_spte, iter.level))
@@ -1391,6 +1388,7 @@ static bool zap_collapsible_spte_range(struct kvm *kvm,
13911388
pfn, PG_LEVEL_NUM))
13921389
continue;
13931390

1391+
/* Note, a successful atomic zap also does a remote TLB flush. */
13941392
if (!tdp_mmu_zap_spte_atomic(kvm, &iter)) {
13951393
/*
13961394
* The iter must explicitly re-read the SPTE because
@@ -1399,30 +1397,24 @@ static bool zap_collapsible_spte_range(struct kvm *kvm,
13991397
iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep));
14001398
goto retry;
14011399
}
1402-
flush = true;
14031400
}
14041401

14051402
rcu_read_unlock();
1406-
1407-
return flush;
14081403
}
14091404

14101405
/*
14111406
* Clear non-leaf entries (and free associated page tables) which could
14121407
* be replaced by large mappings, for GFNs within the slot.
14131408
*/
1414-
bool kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
1415-
const struct kvm_memory_slot *slot,
1416-
bool flush)
1409+
void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
1410+
const struct kvm_memory_slot *slot)
14171411
{
14181412
struct kvm_mmu_page *root;
14191413

14201414
lockdep_assert_held_read(&kvm->mmu_lock);
14211415

14221416
for_each_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
1423-
flush = zap_collapsible_spte_range(kvm, root, slot, flush);
1424-
1425-
return flush;
1417+
zap_collapsible_spte_range(kvm, root, slot);
14261418
}
14271419

14281420
/*

arch/x86/kvm/mmu/tdp_mmu.h

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -64,9 +64,8 @@ void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
6464
struct kvm_memory_slot *slot,
6565
gfn_t gfn, unsigned long mask,
6666
bool wrprot);
67-
bool kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
68-
const struct kvm_memory_slot *slot,
69-
bool flush);
67+
void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
68+
const struct kvm_memory_slot *slot);
7069

7170
bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
7271
struct kvm_memory_slot *slot, gfn_t gfn,

0 commit comments

Comments
 (0)