Skip to content

Commit 87013f9

Browse files
kvaneeshmpe
authored andcommitted
powerpc/kvm/book3s: switch from raw_spin_*lock to arch_spin_lock.
These functions can get called in realmode. Hence use low level arch_spin_lock which is safe to be called in realmode. Signed-off-by: Aneesh Kumar K.V <[email protected]> Signed-off-by: Michael Ellerman <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent 15759cb commit 87013f9

File tree

1 file changed

+4
-4
lines changed

1 file changed

+4
-4
lines changed

arch/powerpc/kvm/book3s_hv_rm_mmu.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -948,7 +948,7 @@ static long kvmppc_do_h_page_init_zero(struct kvm_vcpu *vcpu,
948948
return ret;
949949

950950
/* Check if we've been invalidated */
951-
raw_spin_lock(&kvm->mmu_lock.rlock);
951+
arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock);
952952
if (mmu_notifier_retry(kvm, mmu_seq)) {
953953
ret = H_TOO_HARD;
954954
goto out_unlock;
@@ -960,7 +960,7 @@ static long kvmppc_do_h_page_init_zero(struct kvm_vcpu *vcpu,
960960
kvmppc_update_dirty_map(memslot, dest >> PAGE_SHIFT, PAGE_SIZE);
961961

962962
out_unlock:
963-
raw_spin_unlock(&kvm->mmu_lock.rlock);
963+
arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock);
964964
return ret;
965965
}
966966

@@ -984,7 +984,7 @@ static long kvmppc_do_h_page_init_copy(struct kvm_vcpu *vcpu,
984984
return ret;
985985

986986
/* Check if we've been invalidated */
987-
raw_spin_lock(&kvm->mmu_lock.rlock);
987+
arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock);
988988
if (mmu_notifier_retry(kvm, mmu_seq)) {
989989
ret = H_TOO_HARD;
990990
goto out_unlock;
@@ -996,7 +996,7 @@ static long kvmppc_do_h_page_init_copy(struct kvm_vcpu *vcpu,
996996
kvmppc_update_dirty_map(dest_memslot, dest >> PAGE_SHIFT, PAGE_SIZE);
997997

998998
out_unlock:
999-
raw_spin_unlock(&kvm->mmu_lock.rlock);
999+
arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock);
10001000
return ret;
10011001
}
10021002

0 commit comments

Comments
 (0)