Skip to content

Commit 548f87f

Browse files
committed
KVM: x86/mmu: Honor NEED_RESCHED when zapping rmaps and blocking is allowed
Convert kvm_unmap_gfn_range(), which is the helper that zaps rmap SPTEs in response to an mmu_notifier invalidation, to use __kvm_rmap_zap_gfn_range() and feed in range->may_block. In other words, honor NEED_RESCHED by way of cond_resched() when zapping rmaps. This fixes a long-standing issue where KVM could process an absurd number of rmap entries without ever yielding, e.g. if an mmu_notifier fired on a PUD (or larger) range. Opportunistically rename __kvm_zap_rmap() to kvm_zap_rmap(), and drop the old kvm_zap_rmap(). Ideally, the shuffling would be done in a different patch, but that just makes the compiler unhappy, e.g. arch/x86/kvm/mmu/mmu.c:1462:13: error: ‘kvm_zap_rmap’ defined but not used Reported-by: Peter Xu <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Sean Christopherson <[email protected]>
1 parent dd9eaad commit 548f87f

File tree

1 file changed

+6
-10
lines changed

1 file changed

+6
-10
lines changed

arch/x86/kvm/mmu/mmu.c

Lines changed: 6 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1435,16 +1435,10 @@ static bool kvm_vcpu_write_protect_gfn(struct kvm_vcpu *vcpu, u64 gfn)
14351435
return kvm_mmu_slot_gfn_write_protect(vcpu->kvm, slot, gfn, PG_LEVEL_4K);
14361436
}
14371437

1438-
static bool __kvm_zap_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1439-
const struct kvm_memory_slot *slot)
1440-
{
1441-
return kvm_zap_all_rmap_sptes(kvm, rmap_head);
1442-
}
1443-
14441438
static bool kvm_zap_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1445-
struct kvm_memory_slot *slot, gfn_t gfn, int level)
1439+
const struct kvm_memory_slot *slot)
14461440
{
1447-
return __kvm_zap_rmap(kvm, rmap_head, slot);
1441+
return kvm_zap_all_rmap_sptes(kvm, rmap_head);
14481442
}
14491443

14501444
struct slot_rmap_walk_iterator {
@@ -1578,7 +1572,7 @@ static bool __kvm_rmap_zap_gfn_range(struct kvm *kvm,
15781572
gfn_t start, gfn_t end, bool can_yield,
15791573
bool flush)
15801574
{
1581-
return __walk_slot_rmaps(kvm, slot, __kvm_zap_rmap,
1575+
return __walk_slot_rmaps(kvm, slot, kvm_zap_rmap,
15821576
PG_LEVEL_4K, KVM_MAX_HUGEPAGE_LEVEL,
15831577
start, end - 1, can_yield, true, flush);
15841578
}
@@ -1607,7 +1601,9 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
16071601
bool flush = false;
16081602

16091603
if (kvm_memslots_have_rmaps(kvm))
1610-
flush = kvm_handle_gfn_range(kvm, range, kvm_zap_rmap);
1604+
flush = __kvm_rmap_zap_gfn_range(kvm, range->slot,
1605+
range->start, range->end,
1606+
range->may_block, flush);
16111607

16121608
if (tdp_mmu_enabled)
16131609
flush = kvm_tdp_mmu_unmap_gfn_range(kvm, range, flush);

0 commit comments

Comments
 (0)