Skip to content

Commit 5b1fb11

Browse files
committed
KVM: x86/mmu: Plumb a @can_yield parameter into __walk_slot_rmaps()
Add a @can_yield param to __walk_slot_rmaps() to control whether or not dropping mmu_lock and conditionally rescheduling is allowed. This will allow using __walk_slot_rmaps() and thus cond_resched() to handle mmu_notifier invalidations, which usually allow blocking/yielding, but not when invoked by the OOM killer. Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Sean Christopherson <[email protected]>
1 parent 0a37fff commit 5b1fb11

File tree

1 file changed

+8
-4
lines changed

1 file changed

+8
-4
lines changed

arch/x86/kvm/mmu/mmu.c

Lines changed: 8 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1526,7 +1526,8 @@ static __always_inline bool __walk_slot_rmaps(struct kvm *kvm,
15261526
slot_rmaps_handler fn,
15271527
int start_level, int end_level,
15281528
gfn_t start_gfn, gfn_t end_gfn,
1529-
bool flush_on_yield, bool flush)
1529+
bool can_yield, bool flush_on_yield,
1530+
bool flush)
15301531
{
15311532
struct slot_rmap_walk_iterator iterator;
15321533

@@ -1537,6 +1538,9 @@ static __always_inline bool __walk_slot_rmaps(struct kvm *kvm,
15371538
if (iterator.rmap)
15381539
flush |= fn(kvm, iterator.rmap, slot);
15391540

1541+
if (!can_yield)
1542+
continue;
1543+
15401544
if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
15411545
if (flush && flush_on_yield) {
15421546
kvm_flush_remote_tlbs_range(kvm, start_gfn,
@@ -1558,7 +1562,7 @@ static __always_inline bool walk_slot_rmaps(struct kvm *kvm,
15581562
{
15591563
return __walk_slot_rmaps(kvm, slot, fn, start_level, end_level,
15601564
slot->base_gfn, slot->base_gfn + slot->npages - 1,
1561-
flush_on_yield, false);
1565+
true, flush_on_yield, false);
15621566
}
15631567

15641568
static __always_inline bool walk_slot_rmaps_4k(struct kvm *kvm,
@@ -6600,7 +6604,7 @@ static bool kvm_rmap_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_e
66006604

66016605
flush = __walk_slot_rmaps(kvm, memslot, __kvm_zap_rmap,
66026606
PG_LEVEL_4K, KVM_MAX_HUGEPAGE_LEVEL,
6603-
start, end - 1, true, flush);
6607+
start, end - 1, true, true, flush);
66046608
}
66056609
}
66066610

@@ -6888,7 +6892,7 @@ static void kvm_shadow_mmu_try_split_huge_pages(struct kvm *kvm,
68886892
*/
68896893
for (level = KVM_MAX_HUGEPAGE_LEVEL; level > target_level; level--)
68906894
__walk_slot_rmaps(kvm, slot, shadow_mmu_try_split_huge_pages,
6891-
level, level, start, end - 1, true, false);
6895+
level, level, start, end - 1, true, true, false);
68926896
}
68936897

68946898
/* Must be called with the mmu_lock held in write-mode. */

0 commit comments

Comments
 (0)