Skip to content

Commit dc89184

Browse files
kvaneeshmpe
authored andcommitted
powerpc/kvm/nested: Add helper to walk nested shadow linux page table.
The locking rules for walking nested shadow linux page table is different from process scoped table. Hence add a helper for nested page table walk and also add check whether we are holding the right locks. Signed-off-by: Aneesh Kumar K.V <[email protected]> Signed-off-by: Michael Ellerman <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent 4b99412 commit dc89184

File tree

1 file changed

+21
-7
lines changed

1 file changed

+21
-7
lines changed

arch/powerpc/kvm/book3s_hv_nested.c

Lines changed: 21 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -750,6 +750,24 @@ static struct kvm_nested_guest *kvmhv_find_nested(struct kvm *kvm, int lpid)
750750
return kvm->arch.nested_guests[lpid];
751751
}
752752

753+
static pte_t *find_kvm_nested_guest_pte(struct kvm *kvm, unsigned long lpid,
754+
unsigned long ea, unsigned *hshift)
755+
{
756+
struct kvm_nested_guest *gp;
757+
pte_t *pte;
758+
759+
gp = kvmhv_find_nested(kvm, lpid);
760+
if (!gp)
761+
return NULL;
762+
763+
VM_WARN(!spin_is_locked(&kvm->mmu_lock),
764+
"%s called with kvm mmu_lock not held \n", __func__);
765+
pte = __find_linux_pte(gp->shadow_pgtable, ea, NULL, hshift);
766+
767+
return pte;
768+
}
769+
770+
753771
static inline bool kvmhv_n_rmap_is_equal(u64 rmap_1, u64 rmap_2)
754772
{
755773
return !((rmap_1 ^ rmap_2) & (RMAP_NESTED_LPID_MASK |
@@ -792,19 +810,15 @@ static void kvmhv_update_nest_rmap_rc(struct kvm *kvm, u64 n_rmap,
792810
unsigned long clr, unsigned long set,
793811
unsigned long hpa, unsigned long mask)
794812
{
795-
struct kvm_nested_guest *gp;
796813
unsigned long gpa;
797814
unsigned int shift, lpid;
798815
pte_t *ptep;
799816

800817
gpa = n_rmap & RMAP_NESTED_GPA_MASK;
801818
lpid = (n_rmap & RMAP_NESTED_LPID_MASK) >> RMAP_NESTED_LPID_SHIFT;
802-
gp = kvmhv_find_nested(kvm, lpid);
803-
if (!gp)
804-
return;
805819

806820
/* Find the pte */
807-
ptep = __find_linux_pte(gp->shadow_pgtable, gpa, NULL, &shift);
821+
ptep = find_kvm_nested_guest_pte(kvm, lpid, gpa, &shift);
808822
/*
809823
* If the pte is present and the pfn is still the same, update the pte.
810824
* If the pfn has changed then this is a stale rmap entry, the nested
@@ -854,7 +868,7 @@ static void kvmhv_remove_nest_rmap(struct kvm *kvm, u64 n_rmap,
854868
return;
855869

856870
/* Find and invalidate the pte */
857-
ptep = __find_linux_pte(gp->shadow_pgtable, gpa, NULL, &shift);
871+
ptep = find_kvm_nested_guest_pte(kvm, lpid, gpa, &shift);
858872
/* Don't spuriously invalidate ptes if the pfn has changed */
859873
if (ptep && pte_present(*ptep) && ((pte_val(*ptep) & mask) == hpa))
860874
kvmppc_unmap_pte(kvm, ptep, gpa, shift, NULL, gp->shadow_lpid);
@@ -921,7 +935,7 @@ static bool kvmhv_invalidate_shadow_pte(struct kvm_vcpu *vcpu,
921935
int shift;
922936

923937
spin_lock(&kvm->mmu_lock);
924-
ptep = __find_linux_pte(gp->shadow_pgtable, gpa, NULL, &shift);
938+
ptep = find_kvm_nested_guest_pte(kvm, gp->l1_lpid, gpa, &shift);
925939
if (!shift)
926940
shift = PAGE_SHIFT;
927941
if (ptep && pte_present(*ptep)) {

0 commit comments

Comments
 (0)