@@ -948,7 +948,7 @@ static long kvmppc_do_h_page_init_zero(struct kvm_vcpu *vcpu,
948
948
return ret ;
949
949
950
950
/* Check if we've been invalidated */
951
- raw_spin_lock (& kvm -> mmu_lock .rlock );
951
+ arch_spin_lock (& kvm -> mmu_lock .rlock . raw_lock );
952
952
if (mmu_notifier_retry (kvm , mmu_seq )) {
953
953
ret = H_TOO_HARD ;
954
954
goto out_unlock ;
@@ -960,7 +960,7 @@ static long kvmppc_do_h_page_init_zero(struct kvm_vcpu *vcpu,
960
960
kvmppc_update_dirty_map (memslot , dest >> PAGE_SHIFT , PAGE_SIZE );
961
961
962
962
out_unlock :
963
- raw_spin_unlock (& kvm -> mmu_lock .rlock );
963
+ arch_spin_unlock (& kvm -> mmu_lock .rlock . raw_lock );
964
964
return ret ;
965
965
}
966
966
@@ -984,7 +984,7 @@ static long kvmppc_do_h_page_init_copy(struct kvm_vcpu *vcpu,
984
984
return ret ;
985
985
986
986
/* Check if we've been invalidated */
987
- raw_spin_lock (& kvm -> mmu_lock .rlock );
987
+ arch_spin_lock (& kvm -> mmu_lock .rlock . raw_lock );
988
988
if (mmu_notifier_retry (kvm , mmu_seq )) {
989
989
ret = H_TOO_HARD ;
990
990
goto out_unlock ;
@@ -996,7 +996,7 @@ static long kvmppc_do_h_page_init_copy(struct kvm_vcpu *vcpu,
996
996
kvmppc_update_dirty_map (dest_memslot , dest >> PAGE_SHIFT , PAGE_SIZE );
997
997
998
998
out_unlock :
999
- raw_spin_unlock (& kvm -> mmu_lock .rlock );
999
+ arch_spin_unlock (& kvm -> mmu_lock .rlock . raw_lock );
1000
1000
return ret ;
1001
1001
}
1002
1002
0 commit comments