@@ -1577,25 +1577,6 @@ static bool __kvm_rmap_zap_gfn_range(struct kvm *kvm,
1577
1577
start , end - 1 , can_yield , true, flush );
1578
1578
}
1579
1579
1580
- typedef bool (* rmap_handler_t )(struct kvm * kvm , struct kvm_rmap_head * rmap_head ,
1581
- struct kvm_memory_slot * slot , gfn_t gfn ,
1582
- int level );
1583
-
1584
- static __always_inline bool kvm_handle_gfn_range (struct kvm * kvm ,
1585
- struct kvm_gfn_range * range ,
1586
- rmap_handler_t handler )
1587
- {
1588
- struct slot_rmap_walk_iterator iterator ;
1589
- bool ret = false;
1590
-
1591
- for_each_slot_rmap_range (range -> slot , PG_LEVEL_4K , KVM_MAX_HUGEPAGE_LEVEL ,
1592
- range -> start , range -> end - 1 , & iterator )
1593
- ret |= handler (kvm , iterator .rmap , range -> slot , iterator .gfn ,
1594
- iterator .level );
1595
-
1596
- return ret ;
1597
- }
1598
-
1599
1580
bool kvm_unmap_gfn_range (struct kvm * kvm , struct kvm_gfn_range * range )
1600
1581
{
1601
1582
bool flush = false;
@@ -1615,31 +1596,6 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
1615
1596
return flush ;
1616
1597
}
1617
1598
1618
- static bool kvm_age_rmap (struct kvm * kvm , struct kvm_rmap_head * rmap_head ,
1619
- struct kvm_memory_slot * slot , gfn_t gfn , int level )
1620
- {
1621
- u64 * sptep ;
1622
- struct rmap_iterator iter ;
1623
- int young = 0 ;
1624
-
1625
- for_each_rmap_spte (rmap_head , & iter , sptep )
1626
- young |= mmu_spte_age (sptep );
1627
-
1628
- return young ;
1629
- }
1630
-
1631
- static bool kvm_test_age_rmap (struct kvm * kvm , struct kvm_rmap_head * rmap_head ,
1632
- struct kvm_memory_slot * slot , gfn_t gfn , int level )
1633
- {
1634
- u64 * sptep ;
1635
- struct rmap_iterator iter ;
1636
-
1637
- for_each_rmap_spte (rmap_head , & iter , sptep )
1638
- if (is_accessed_spte (* sptep ))
1639
- return true;
1640
- return false;
1641
- }
1642
-
1643
1599
#define RMAP_RECYCLE_THRESHOLD 1000
1644
1600
1645
1601
static void __rmap_add (struct kvm * kvm ,
@@ -1674,12 +1630,32 @@ static void rmap_add(struct kvm_vcpu *vcpu, const struct kvm_memory_slot *slot,
1674
1630
__rmap_add (vcpu -> kvm , cache , slot , spte , gfn , access );
1675
1631
}
1676
1632
1633
+ static bool kvm_rmap_age_gfn_range (struct kvm * kvm ,
1634
+ struct kvm_gfn_range * range , bool test_only )
1635
+ {
1636
+ struct slot_rmap_walk_iterator iterator ;
1637
+ struct rmap_iterator iter ;
1638
+ bool young = false;
1639
+ u64 * sptep ;
1640
+
1641
+ for_each_slot_rmap_range (range -> slot , PG_LEVEL_4K , KVM_MAX_HUGEPAGE_LEVEL ,
1642
+ range -> start , range -> end - 1 , & iterator ) {
1643
+ for_each_rmap_spte (iterator .rmap , & iter , sptep ) {
1644
+ if (test_only && is_accessed_spte (* sptep ))
1645
+ return true;
1646
+
1647
+ young = mmu_spte_age (sptep );
1648
+ }
1649
+ }
1650
+ return young ;
1651
+ }
1652
+
1677
1653
bool kvm_age_gfn (struct kvm * kvm , struct kvm_gfn_range * range )
1678
1654
{
1679
1655
bool young = false;
1680
1656
1681
1657
if (kvm_memslots_have_rmaps (kvm ))
1682
- young = kvm_handle_gfn_range (kvm , range , kvm_age_rmap );
1658
+ young = kvm_rmap_age_gfn_range (kvm , range , false );
1683
1659
1684
1660
if (tdp_mmu_enabled )
1685
1661
young |= kvm_tdp_mmu_age_gfn_range (kvm , range );
@@ -1692,7 +1668,7 @@ bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
1692
1668
bool young = false;
1693
1669
1694
1670
if (kvm_memslots_have_rmaps (kvm ))
1695
- young = kvm_handle_gfn_range (kvm , range , kvm_test_age_rmap );
1671
+ young = kvm_rmap_age_gfn_range (kvm , range , true );
1696
1672
1697
1673
if (tdp_mmu_enabled )
1698
1674
young |= kvm_tdp_mmu_test_age_gfn (kvm , range );
0 commit comments