Skip to content

Commit 0a37fff

Browse files
committed
KVM: x86/mmu: Move walk_slot_rmaps() up near for_each_slot_rmap_range()
Move walk_slot_rmaps() and friends up near for_each_slot_rmap_range() so that the walkers can be used to handle mmu_notifier invalidations, and so that similar function has some amount of locality in code. No functional change intended. Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Sean Christopherson <[email protected]>
1 parent 98a69b9 commit 0a37fff

File tree

1 file changed

+53
-53
lines changed

1 file changed

+53
-53
lines changed

arch/x86/kvm/mmu/mmu.c

Lines changed: 53 additions & 53 deletions
Original file line numberDiff line numberDiff line change
@@ -1516,6 +1516,59 @@ static void slot_rmap_walk_next(struct slot_rmap_walk_iterator *iterator)
15161516
slot_rmap_walk_okay(_iter_); \
15171517
slot_rmap_walk_next(_iter_))
15181518

1519+
/* The return value indicates if tlb flush on all vcpus is needed. */
1520+
typedef bool (*slot_rmaps_handler) (struct kvm *kvm,
1521+
struct kvm_rmap_head *rmap_head,
1522+
const struct kvm_memory_slot *slot);
1523+
1524+
static __always_inline bool __walk_slot_rmaps(struct kvm *kvm,
1525+
const struct kvm_memory_slot *slot,
1526+
slot_rmaps_handler fn,
1527+
int start_level, int end_level,
1528+
gfn_t start_gfn, gfn_t end_gfn,
1529+
bool flush_on_yield, bool flush)
1530+
{
1531+
struct slot_rmap_walk_iterator iterator;
1532+
1533+
lockdep_assert_held_write(&kvm->mmu_lock);
1534+
1535+
for_each_slot_rmap_range(slot, start_level, end_level, start_gfn,
1536+
end_gfn, &iterator) {
1537+
if (iterator.rmap)
1538+
flush |= fn(kvm, iterator.rmap, slot);
1539+
1540+
if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
1541+
if (flush && flush_on_yield) {
1542+
kvm_flush_remote_tlbs_range(kvm, start_gfn,
1543+
iterator.gfn - start_gfn + 1);
1544+
flush = false;
1545+
}
1546+
cond_resched_rwlock_write(&kvm->mmu_lock);
1547+
}
1548+
}
1549+
1550+
return flush;
1551+
}
1552+
1553+
static __always_inline bool walk_slot_rmaps(struct kvm *kvm,
1554+
const struct kvm_memory_slot *slot,
1555+
slot_rmaps_handler fn,
1556+
int start_level, int end_level,
1557+
bool flush_on_yield)
1558+
{
1559+
return __walk_slot_rmaps(kvm, slot, fn, start_level, end_level,
1560+
slot->base_gfn, slot->base_gfn + slot->npages - 1,
1561+
flush_on_yield, false);
1562+
}
1563+
1564+
static __always_inline bool walk_slot_rmaps_4k(struct kvm *kvm,
1565+
const struct kvm_memory_slot *slot,
1566+
slot_rmaps_handler fn,
1567+
bool flush_on_yield)
1568+
{
1569+
return walk_slot_rmaps(kvm, slot, fn, PG_LEVEL_4K, PG_LEVEL_4K, flush_on_yield);
1570+
}
1571+
15191572
typedef bool (*rmap_handler_t)(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
15201573
struct kvm_memory_slot *slot, gfn_t gfn,
15211574
int level);
@@ -6272,59 +6325,6 @@ void kvm_configure_mmu(bool enable_tdp, int tdp_forced_root_level,
62726325
}
62736326
EXPORT_SYMBOL_GPL(kvm_configure_mmu);
62746327

6275-
/* The return value indicates if tlb flush on all vcpus is needed. */
6276-
typedef bool (*slot_rmaps_handler) (struct kvm *kvm,
6277-
struct kvm_rmap_head *rmap_head,
6278-
const struct kvm_memory_slot *slot);
6279-
6280-
static __always_inline bool __walk_slot_rmaps(struct kvm *kvm,
6281-
const struct kvm_memory_slot *slot,
6282-
slot_rmaps_handler fn,
6283-
int start_level, int end_level,
6284-
gfn_t start_gfn, gfn_t end_gfn,
6285-
bool flush_on_yield, bool flush)
6286-
{
6287-
struct slot_rmap_walk_iterator iterator;
6288-
6289-
lockdep_assert_held_write(&kvm->mmu_lock);
6290-
6291-
for_each_slot_rmap_range(slot, start_level, end_level, start_gfn,
6292-
end_gfn, &iterator) {
6293-
if (iterator.rmap)
6294-
flush |= fn(kvm, iterator.rmap, slot);
6295-
6296-
if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
6297-
if (flush && flush_on_yield) {
6298-
kvm_flush_remote_tlbs_range(kvm, start_gfn,
6299-
iterator.gfn - start_gfn + 1);
6300-
flush = false;
6301-
}
6302-
cond_resched_rwlock_write(&kvm->mmu_lock);
6303-
}
6304-
}
6305-
6306-
return flush;
6307-
}
6308-
6309-
static __always_inline bool walk_slot_rmaps(struct kvm *kvm,
6310-
const struct kvm_memory_slot *slot,
6311-
slot_rmaps_handler fn,
6312-
int start_level, int end_level,
6313-
bool flush_on_yield)
6314-
{
6315-
return __walk_slot_rmaps(kvm, slot, fn, start_level, end_level,
6316-
slot->base_gfn, slot->base_gfn + slot->npages - 1,
6317-
flush_on_yield, false);
6318-
}
6319-
6320-
static __always_inline bool walk_slot_rmaps_4k(struct kvm *kvm,
6321-
const struct kvm_memory_slot *slot,
6322-
slot_rmaps_handler fn,
6323-
bool flush_on_yield)
6324-
{
6325-
return walk_slot_rmaps(kvm, slot, fn, PG_LEVEL_4K, PG_LEVEL_4K, flush_on_yield);
6326-
}
6327-
63286328
static void free_mmu_pages(struct kvm_mmu *mmu)
63296329
{
63306330
if (!tdp_enabled && mmu->pae_root)

0 commit comments

Comments
 (0)