@@ -878,8 +878,8 @@ long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags,
878
878
return ret ;
879
879
}
880
880
881
- static int kvmppc_get_hpa (struct kvm_vcpu * vcpu , unsigned long gpa ,
882
- int writing , unsigned long * hpa ,
881
+ static int kvmppc_get_hpa (struct kvm_vcpu * vcpu , unsigned long mmu_seq ,
882
+ unsigned long gpa , int writing , unsigned long * hpa ,
883
883
struct kvm_memory_slot * * memslot_p )
884
884
{
885
885
struct kvm * kvm = vcpu -> kvm ;
@@ -898,7 +898,7 @@ static int kvmppc_get_hpa(struct kvm_vcpu *vcpu, unsigned long gpa,
898
898
hva = __gfn_to_hva_memslot (memslot , gfn );
899
899
900
900
/* Try to find the host pte for that virtual address */
901
- ptep = __find_linux_pte ( vcpu -> arch . pgdir , hva , NULL , & shift );
901
+ ptep = find_kvm_host_pte ( kvm , mmu_seq , hva , & shift );
902
902
if (!ptep )
903
903
return H_TOO_HARD ;
904
904
pte = kvmppc_read_update_linux_pte (ptep , writing );
@@ -933,16 +933,11 @@ static long kvmppc_do_h_page_init_zero(struct kvm_vcpu *vcpu,
933
933
mmu_seq = kvm -> mmu_notifier_seq ;
934
934
smp_rmb ();
935
935
936
- ret = kvmppc_get_hpa (vcpu , dest , 1 , & pa , & memslot );
937
- if (ret != H_SUCCESS )
938
- return ret ;
939
-
940
- /* Check if we've been invalidated */
941
936
arch_spin_lock (& kvm -> mmu_lock .rlock .raw_lock );
942
- if (mmu_notifier_retry (kvm , mmu_seq )) {
943
- ret = H_TOO_HARD ;
937
+
938
+ ret = kvmppc_get_hpa (vcpu , mmu_seq , dest , 1 , & pa , & memslot );
939
+ if (ret != H_SUCCESS )
944
940
goto out_unlock ;
945
- }
946
941
947
942
/* Zero the page */
948
943
for (i = 0 ; i < SZ_4K ; i += L1_CACHE_BYTES , pa += L1_CACHE_BYTES )
@@ -966,19 +961,14 @@ static long kvmppc_do_h_page_init_copy(struct kvm_vcpu *vcpu,
966
961
mmu_seq = kvm -> mmu_notifier_seq ;
967
962
smp_rmb ();
968
963
969
- ret = kvmppc_get_hpa (vcpu , dest , 1 , & dest_pa , & dest_memslot );
970
- if (ret != H_SUCCESS )
971
- return ret ;
972
- ret = kvmppc_get_hpa (vcpu , src , 0 , & src_pa , NULL );
964
+ arch_spin_lock (& kvm -> mmu_lock .rlock .raw_lock );
965
+ ret = kvmppc_get_hpa (vcpu , mmu_seq , dest , 1 , & dest_pa , & dest_memslot );
973
966
if (ret != H_SUCCESS )
974
- return ret ;
967
+ goto out_unlock ;
975
968
976
- /* Check if we've been invalidated */
977
- arch_spin_lock (& kvm -> mmu_lock .rlock .raw_lock );
978
- if (mmu_notifier_retry (kvm , mmu_seq )) {
979
- ret = H_TOO_HARD ;
969
+ ret = kvmppc_get_hpa (vcpu , mmu_seq , src , 0 , & src_pa , NULL );
970
+ if (ret != H_SUCCESS )
980
971
goto out_unlock ;
981
- }
982
972
983
973
/* Copy the page */
984
974
memcpy ((void * )dest_pa , (void * )src_pa , SZ_4K );
0 commit comments