Skip to content

Commit 054056b

Browse files
committed
Merge branch kvm-arm64/misc into kvmarm/next
* kvm-arm64/misc: : Miscellaneous updates : : - Put an upper bound on the number of I-cache invalidations by : cacheline to avoid soft lockups : : - Get rid of bogus refererence count transfer for THP mappings : : - Do a local TLB invalidation on permission fault race : : - Fixes for page_fault_test KVM selftest : : - Add a tracepoint for detecting MMIO instructions unsupported by KVM KVM: arm64: Add tracepoint for MMIO accesses where ISV==0 KVM: arm64: selftest: Perform ISB before reading PAR_EL1 KVM: arm64: selftest: Add the missing .guest_prepare() KVM: arm64: Always invalidate TLB for stage-2 permission faults KVM: arm64: Do not transfer page refcount for THP adjustment KVM: arm64: Avoid soft lockups due to I-cache maintenance arm64: tlbflush: Rename MAX_TLBI_OPS KVM: arm64: Don't use kerneldoc comment for arm64_check_features() Signed-off-by: Oliver Upton <[email protected]>
2 parents 6465e26 + d11974d commit 054056b

File tree

8 files changed

+69
-34
lines changed

8 files changed

+69
-34
lines changed

arch/arm64/include/asm/kvm_mmu.h

Lines changed: 31 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -224,16 +224,41 @@ static inline void __clean_dcache_guest_page(void *va, size_t size)
224224
kvm_flush_dcache_to_poc(va, size);
225225
}
226226

227+
static inline size_t __invalidate_icache_max_range(void)
228+
{
229+
u8 iminline;
230+
u64 ctr;
231+
232+
asm volatile(ALTERNATIVE_CB("movz %0, #0\n"
233+
"movk %0, #0, lsl #16\n"
234+
"movk %0, #0, lsl #32\n"
235+
"movk %0, #0, lsl #48\n",
236+
ARM64_ALWAYS_SYSTEM,
237+
kvm_compute_final_ctr_el0)
238+
: "=r" (ctr));
239+
240+
iminline = SYS_FIELD_GET(CTR_EL0, IminLine, ctr) + 2;
241+
return MAX_DVM_OPS << iminline;
242+
}
243+
227244
static inline void __invalidate_icache_guest_page(void *va, size_t size)
228245
{
229-
if (icache_is_aliasing()) {
230-
/* any kind of VIPT cache */
246+
/*
247+
* VPIPT I-cache maintenance must be done from EL2. See comment in the
248+
* nVHE flavor of __kvm_tlb_flush_vmid_ipa().
249+
*/
250+
if (icache_is_vpipt() && read_sysreg(CurrentEL) != CurrentEL_EL2)
251+
return;
252+
253+
/*
254+
* Blow the whole I-cache if it is aliasing (i.e. VIPT) or the
255+
* invalidation range exceeds our arbitrary limit on invadations by
256+
* cache line.
257+
*/
258+
if (icache_is_aliasing() || size > __invalidate_icache_max_range())
231259
icache_inval_all_pou();
232-
} else if (read_sysreg(CurrentEL) != CurrentEL_EL1 ||
233-
!icache_is_vpipt()) {
234-
/* PIPT or VPIPT at EL2 (see comment in __kvm_tlb_flush_vmid_ipa) */
260+
else
235261
icache_inval_pou((unsigned long)va, (unsigned long)va + size);
236-
}
237262
}
238263

239264
void kvm_set_way_flush(struct kvm_vcpu *vcpu);

arch/arm64/include/asm/tlbflush.h

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -333,7 +333,7 @@ static inline void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
333333
* This is meant to avoid soft lock-ups on large TLB flushing ranges and not
334334
* necessarily a performance improvement.
335335
*/
336-
#define MAX_TLBI_OPS PTRS_PER_PTE
336+
#define MAX_DVM_OPS PTRS_PER_PTE
337337

338338
/*
339339
* __flush_tlb_range_op - Perform TLBI operation upon a range
@@ -413,12 +413,12 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma,
413413

414414
/*
415415
* When not uses TLB range ops, we can handle up to
416-
* (MAX_TLBI_OPS - 1) pages;
416+
* (MAX_DVM_OPS - 1) pages;
417417
* When uses TLB range ops, we can handle up to
418418
* (MAX_TLBI_RANGE_PAGES - 1) pages.
419419
*/
420420
if ((!system_supports_tlb_range() &&
421-
(end - start) >= (MAX_TLBI_OPS * stride)) ||
421+
(end - start) >= (MAX_DVM_OPS * stride)) ||
422422
pages >= MAX_TLBI_RANGE_PAGES) {
423423
flush_tlb_mm(vma->vm_mm);
424424
return;
@@ -451,7 +451,7 @@ static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end
451451
{
452452
unsigned long addr;
453453

454-
if ((end - start) > (MAX_TLBI_OPS * PAGE_SIZE)) {
454+
if ((end - start) > (MAX_DVM_OPS * PAGE_SIZE)) {
455455
flush_tlb_all();
456456
return;
457457
}

arch/arm64/kvm/hyp/pgtable.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1314,7 +1314,7 @@ int kvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr,
13141314
ret = stage2_update_leaf_attrs(pgt, addr, 1, set, clr, NULL, &level,
13151315
KVM_PGTABLE_WALK_HANDLE_FAULT |
13161316
KVM_PGTABLE_WALK_SHARED);
1317-
if (!ret)
1317+
if (!ret || ret == -EAGAIN)
13181318
kvm_call_hyp(__kvm_tlb_flush_vmid_ipa_nsh, pgt->mmu, addr, level);
13191319
return ret;
13201320
}

arch/arm64/kvm/mmio.c

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -135,6 +135,9 @@ int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
135135
* volunteered to do so, and bail out otherwise.
136136
*/
137137
if (!kvm_vcpu_dabt_isvalid(vcpu)) {
138+
trace_kvm_mmio_nisv(*vcpu_pc(vcpu), kvm_vcpu_get_esr(vcpu),
139+
kvm_vcpu_get_hfar(vcpu), fault_ipa);
140+
138141
if (test_bit(KVM_ARCH_FLAG_RETURN_NISV_IO_ABORT_TO_USER,
139142
&vcpu->kvm->arch.flags)) {
140143
run->exit_reason = KVM_EXIT_ARM_NISV;
@@ -143,7 +146,6 @@ int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
143146
return 0;
144147
}
145148

146-
kvm_pr_unimpl("Data abort outside memslots with no valid syndrome info\n");
147149
return -ENOSYS;
148150
}
149151

arch/arm64/kvm/mmu.c

Lines changed: 0 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -1298,28 +1298,8 @@ transparent_hugepage_adjust(struct kvm *kvm, struct kvm_memory_slot *memslot,
12981298
if (sz < PMD_SIZE)
12991299
return PAGE_SIZE;
13001300

1301-
/*
1302-
* The address we faulted on is backed by a transparent huge
1303-
* page. However, because we map the compound huge page and
1304-
* not the individual tail page, we need to transfer the
1305-
* refcount to the head page. We have to be careful that the
1306-
* THP doesn't start to split while we are adjusting the
1307-
* refcounts.
1308-
*
1309-
* We are sure this doesn't happen, because mmu_invalidate_retry
1310-
* was successful and we are holding the mmu_lock, so if this
1311-
* THP is trying to split, it will be blocked in the mmu
1312-
* notifier before touching any of the pages, specifically
1313-
* before being able to call __split_huge_page_refcount().
1314-
*
1315-
* We can therefore safely transfer the refcount from PG_tail
1316-
* to PG_head and switch the pfn from a tail page to the head
1317-
* page accordingly.
1318-
*/
13191301
*ipap &= PMD_MASK;
1320-
kvm_release_pfn_clean(pfn);
13211302
pfn &= ~(PTRS_PER_PMD - 1);
1322-
get_page(pfn_to_page(pfn));
13231303
*pfnp = pfn;
13241304

13251305
return PMD_SIZE;

arch/arm64/kvm/sys_regs.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1228,7 +1228,7 @@ static s64 kvm_arm64_ftr_safe_value(u32 id, const struct arm64_ftr_bits *ftrp,
12281228
return arm64_ftr_safe_value(&kvm_ftr, new, cur);
12291229
}
12301230

1231-
/**
1231+
/*
12321232
* arm64_check_features() - Check if a feature register value constitutes
12331233
* a subset of features indicated by the idreg's KVM sanitised limit.
12341234
*

arch/arm64/kvm/trace_arm.h

Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -136,6 +136,31 @@ TRACE_EVENT(kvm_mmio_emulate,
136136
__entry->vcpu_pc, __entry->instr, __entry->cpsr)
137137
);
138138

139+
TRACE_EVENT(kvm_mmio_nisv,
140+
TP_PROTO(unsigned long vcpu_pc, unsigned long esr,
141+
unsigned long far, unsigned long ipa),
142+
TP_ARGS(vcpu_pc, esr, far, ipa),
143+
144+
TP_STRUCT__entry(
145+
__field( unsigned long, vcpu_pc )
146+
__field( unsigned long, esr )
147+
__field( unsigned long, far )
148+
__field( unsigned long, ipa )
149+
),
150+
151+
TP_fast_assign(
152+
__entry->vcpu_pc = vcpu_pc;
153+
__entry->esr = esr;
154+
__entry->far = far;
155+
__entry->ipa = ipa;
156+
),
157+
158+
TP_printk("ipa %#016lx, esr %#016lx, far %#016lx, pc %#016lx",
159+
__entry->ipa, __entry->esr,
160+
__entry->far, __entry->vcpu_pc)
161+
);
162+
163+
139164
TRACE_EVENT(kvm_set_way_flush,
140165
TP_PROTO(unsigned long vcpu_pc, bool cache),
141166
TP_ARGS(vcpu_pc, cache),

tools/testing/selftests/kvm/aarch64/page_fault_test.c

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -135,8 +135,8 @@ static void guest_at(void)
135135
uint64_t par;
136136

137137
asm volatile("at s1e1r, %0" :: "r" (guest_test_memory));
138-
par = read_sysreg(par_el1);
139138
isb();
139+
par = read_sysreg(par_el1);
140140

141141
/* Bit 1 indicates whether the AT was successful */
142142
GUEST_ASSERT_EQ(par & 1, 0);
@@ -842,6 +842,7 @@ static void help(char *name)
842842
.name = SCAT2(ro_memslot_no_syndrome, _access), \
843843
.data_memslot_flags = KVM_MEM_READONLY, \
844844
.pt_memslot_flags = KVM_MEM_READONLY, \
845+
.guest_prepare = { _PREPARE(_access) }, \
845846
.guest_test = _access, \
846847
.fail_vcpu_run_handler = fail_vcpu_run_mmio_no_syndrome_handler, \
847848
.expected_events = { .fail_vcpu_runs = 1 }, \
@@ -865,6 +866,7 @@ static void help(char *name)
865866
.name = SCAT2(ro_memslot_no_syn_and_dlog, _access), \
866867
.data_memslot_flags = KVM_MEM_READONLY | KVM_MEM_LOG_DIRTY_PAGES, \
867868
.pt_memslot_flags = KVM_MEM_READONLY | KVM_MEM_LOG_DIRTY_PAGES, \
869+
.guest_prepare = { _PREPARE(_access) }, \
868870
.guest_test = _access, \
869871
.guest_test_check = { _test_check }, \
870872
.fail_vcpu_run_handler = fail_vcpu_run_mmio_no_syndrome_handler, \
@@ -894,6 +896,7 @@ static void help(char *name)
894896
.data_memslot_flags = KVM_MEM_READONLY, \
895897
.pt_memslot_flags = KVM_MEM_READONLY, \
896898
.mem_mark_cmd = CMD_HOLE_DATA | CMD_HOLE_PT, \
899+
.guest_prepare = { _PREPARE(_access) }, \
897900
.guest_test = _access, \
898901
.uffd_data_handler = _uffd_data_handler, \
899902
.uffd_pt_handler = uffd_pt_handler, \

0 commit comments

Comments
 (0)