Skip to content

Commit 3c164eb

Browse files
ouptonMarc Zyngier
authored andcommitted
KVM: arm64: nv: Do not block when unmapping stage-2 if disallowed
Right now the nested code allows unmap operations on a shadow stage-2 to block unconditionally. This is wrong in a couple places, such as a non-blocking MMU notifier or on the back of a sched_in() notifier as part of shadow MMU recycling. Carry through whether or not blocking is allowed to kvm_pgtable_stage2_unmap(). This 'fixes' an issue where stage-2 MMU reclaim would precipitate a stack overflow from a pile of kvm_sched_in() callbacks, all trying to recycle a stage-2 MMU. Signed-off-by: Oliver Upton <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Marc Zyngier <[email protected]>
1 parent 6ded46b commit 3c164eb

File tree

5 files changed

+17
-15
lines changed

5 files changed

+17
-15
lines changed

arch/arm64/include/asm/kvm_mmu.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -166,7 +166,8 @@ int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size,
166166
int create_hyp_stack(phys_addr_t phys_addr, unsigned long *haddr);
167167
void __init free_hyp_pgds(void);
168168

169-
void kvm_stage2_unmap_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size);
169+
void kvm_stage2_unmap_range(struct kvm_s2_mmu *mmu, phys_addr_t start,
170+
u64 size, bool may_block);
170171
void kvm_stage2_flush_range(struct kvm_s2_mmu *mmu, phys_addr_t addr, phys_addr_t end);
171172
void kvm_stage2_wp_range(struct kvm_s2_mmu *mmu, phys_addr_t addr, phys_addr_t end);
172173

arch/arm64/include/asm/kvm_nested.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -124,7 +124,7 @@ extern int kvm_s2_handle_perm_fault(struct kvm_vcpu *vcpu,
124124
struct kvm_s2_trans *trans);
125125
extern int kvm_inject_s2_fault(struct kvm_vcpu *vcpu, u64 esr_el2);
126126
extern void kvm_nested_s2_wp(struct kvm *kvm);
127-
extern void kvm_nested_s2_unmap(struct kvm *kvm);
127+
extern void kvm_nested_s2_unmap(struct kvm *kvm, bool may_block);
128128
extern void kvm_nested_s2_flush(struct kvm *kvm);
129129

130130
unsigned long compute_tlb_inval_range(struct kvm_s2_mmu *mmu, u64 val);

arch/arm64/kvm/mmu.c

Lines changed: 8 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -328,9 +328,10 @@ static void __unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64
328328
may_block));
329329
}
330330

331-
void kvm_stage2_unmap_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size)
331+
void kvm_stage2_unmap_range(struct kvm_s2_mmu *mmu, phys_addr_t start,
332+
u64 size, bool may_block)
332333
{
333-
__unmap_stage2_range(mmu, start, size, true);
334+
__unmap_stage2_range(mmu, start, size, may_block);
334335
}
335336

336337
void kvm_stage2_flush_range(struct kvm_s2_mmu *mmu, phys_addr_t addr, phys_addr_t end)
@@ -1015,7 +1016,7 @@ static void stage2_unmap_memslot(struct kvm *kvm,
10151016

10161017
if (!(vma->vm_flags & VM_PFNMAP)) {
10171018
gpa_t gpa = addr + (vm_start - memslot->userspace_addr);
1018-
kvm_stage2_unmap_range(&kvm->arch.mmu, gpa, vm_end - vm_start);
1019+
kvm_stage2_unmap_range(&kvm->arch.mmu, gpa, vm_end - vm_start, true);
10191020
}
10201021
hva = vm_end;
10211022
} while (hva < reg_end);
@@ -1042,7 +1043,7 @@ void stage2_unmap_vm(struct kvm *kvm)
10421043
kvm_for_each_memslot(memslot, bkt, slots)
10431044
stage2_unmap_memslot(kvm, memslot);
10441045

1045-
kvm_nested_s2_unmap(kvm);
1046+
kvm_nested_s2_unmap(kvm, true);
10461047

10471048
write_unlock(&kvm->mmu_lock);
10481049
mmap_read_unlock(current->mm);
@@ -1912,7 +1913,7 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
19121913
(range->end - range->start) << PAGE_SHIFT,
19131914
range->may_block);
19141915

1915-
kvm_nested_s2_unmap(kvm);
1916+
kvm_nested_s2_unmap(kvm, range->may_block);
19161917
return false;
19171918
}
19181919

@@ -2179,8 +2180,8 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
21792180
phys_addr_t size = slot->npages << PAGE_SHIFT;
21802181

21812182
write_lock(&kvm->mmu_lock);
2182-
kvm_stage2_unmap_range(&kvm->arch.mmu, gpa, size);
2183-
kvm_nested_s2_unmap(kvm);
2183+
kvm_stage2_unmap_range(&kvm->arch.mmu, gpa, size, true);
2184+
kvm_nested_s2_unmap(kvm, true);
21842185
write_unlock(&kvm->mmu_lock);
21852186
}
21862187

arch/arm64/kvm/nested.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -634,7 +634,7 @@ static struct kvm_s2_mmu *get_s2_mmu_nested(struct kvm_vcpu *vcpu)
634634

635635
/* Clear the old state */
636636
if (kvm_s2_mmu_valid(s2_mmu))
637-
kvm_stage2_unmap_range(s2_mmu, 0, kvm_phys_size(s2_mmu));
637+
kvm_stage2_unmap_range(s2_mmu, 0, kvm_phys_size(s2_mmu), false);
638638

639639
/*
640640
* The virtual VMID (modulo CnP) will be used as a key when matching
@@ -745,7 +745,7 @@ void kvm_nested_s2_wp(struct kvm *kvm)
745745
}
746746
}
747747

748-
void kvm_nested_s2_unmap(struct kvm *kvm)
748+
void kvm_nested_s2_unmap(struct kvm *kvm, bool may_block)
749749
{
750750
int i;
751751

@@ -755,7 +755,7 @@ void kvm_nested_s2_unmap(struct kvm *kvm)
755755
struct kvm_s2_mmu *mmu = &kvm->arch.nested_mmus[i];
756756

757757
if (kvm_s2_mmu_valid(mmu))
758-
kvm_stage2_unmap_range(mmu, 0, kvm_phys_size(mmu));
758+
kvm_stage2_unmap_range(mmu, 0, kvm_phys_size(mmu), may_block);
759759
}
760760
}
761761

arch/arm64/kvm/sys_regs.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2937,7 +2937,7 @@ static bool handle_alle1is(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
29372937
* Drop all shadow S2s, resulting in S1/S2 TLBIs for each of the
29382938
* corresponding VMIDs.
29392939
*/
2940-
kvm_nested_s2_unmap(vcpu->kvm);
2940+
kvm_nested_s2_unmap(vcpu->kvm, true);
29412941

29422942
write_unlock(&vcpu->kvm->mmu_lock);
29432943

@@ -2989,7 +2989,7 @@ union tlbi_info {
29892989
static void s2_mmu_unmap_range(struct kvm_s2_mmu *mmu,
29902990
const union tlbi_info *info)
29912991
{
2992-
kvm_stage2_unmap_range(mmu, info->range.start, info->range.size);
2992+
kvm_stage2_unmap_range(mmu, info->range.start, info->range.size, true);
29932993
}
29942994

29952995
static bool handle_vmalls12e1is(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
@@ -3084,7 +3084,7 @@ static void s2_mmu_unmap_ipa(struct kvm_s2_mmu *mmu,
30843084
max_size = compute_tlb_inval_range(mmu, info->ipa.addr);
30853085
base_addr &= ~(max_size - 1);
30863086

3087-
kvm_stage2_unmap_range(mmu, base_addr, max_size);
3087+
kvm_stage2_unmap_range(mmu, base_addr, max_size, true);
30883088
}
30893089

30903090
static bool handle_ipas2e1is(struct kvm_vcpu *vcpu, struct sys_reg_params *p,

0 commit comments

Comments
 (0)