@@ -787,9 +787,6 @@ static inline void tdp_mmu_iter_set_spte(struct kvm *kvm, struct tdp_iter *iter,
787
787
continue; \
788
788
else
789
789
790
- #define tdp_mmu_for_each_pte (_iter , _kvm , _root , _start , _end ) \
791
- for_each_tdp_pte(_iter, _kvm, _root, _start, _end)
792
-
793
790
static inline bool __must_check tdp_mmu_iter_need_resched (struct kvm * kvm ,
794
791
struct tdp_iter * iter )
795
792
{
@@ -1248,7 +1245,7 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
1248
1245
1249
1246
rcu_read_lock ();
1250
1247
1251
- tdp_mmu_for_each_pte (iter , kvm , root , fault -> gfn , fault -> gfn + 1 ) {
1248
+ for_each_tdp_pte (iter , kvm , root , fault -> gfn , fault -> gfn + 1 ) {
1252
1249
int r ;
1253
1250
1254
1251
if (fault -> nx_huge_page_workaround_enabled )
@@ -1918,7 +1915,7 @@ int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
1918
1915
1919
1916
* root_level = vcpu -> arch .mmu -> root_role .level ;
1920
1917
1921
- tdp_mmu_for_each_pte (iter , vcpu -> kvm , root , gfn , gfn + 1 ) {
1918
+ for_each_tdp_pte (iter , vcpu -> kvm , root , gfn , gfn + 1 ) {
1922
1919
leaf = iter .level ;
1923
1920
sptes [leaf ] = iter .old_spte ;
1924
1921
}
@@ -1945,7 +1942,7 @@ u64 *kvm_tdp_mmu_fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, gfn_t gfn,
1945
1942
struct tdp_iter iter ;
1946
1943
tdp_ptep_t sptep = NULL ;
1947
1944
1948
- tdp_mmu_for_each_pte (iter , vcpu -> kvm , root , gfn , gfn + 1 ) {
1945
+ for_each_tdp_pte (iter , vcpu -> kvm , root , gfn , gfn + 1 ) {
1949
1946
* spte = iter .old_spte ;
1950
1947
sptep = iter .sptep ;
1951
1948
}
0 commit comments