Skip to content

Commit a040adf

Browse files
committed
Merge branch kvm-arm64/misc into kvmarm/next
* kvm-arm64/misc: : Miscellaneous updates : : - Fix handling of features w/ nonzero safe values in set_id_regs : selftest : : - Cleanup the unused kern_hyp_va() asm macro : : - Differentiate nVHE and hVHE in boot-time message : : - Several selftests cleanups : : - Drop bogus return value from kvm_arch_create_vm_debugfs() : : - Make save/restore of SPE and TRBE control registers affect EL1 state : in hVHE mode : : - Typos KVM: arm64: Fix TRFCR_EL1/PMSCR_EL1 access in hVHE mode KVM: selftests: aarch64: Remove unused functions from vpmu test KVM: arm64: Fix typos KVM: Get rid of return value from kvm_arch_create_vm_debugfs() KVM: selftests: Print timer ctl register in ISTATUS assertion KVM: selftests: Fix GUEST_PRINTF() format warnings in ARM code KVM: arm64: removed unused kern_hyp_va asm macro KVM: arm64: add comments to __kern_hyp_va KVM: arm64: print Hyp mode KVM: arm64: selftests: Handle feature fields with nonzero minimum value correctly Signed-off-by: Oliver Upton <[email protected]>
2 parents 262cd16 + 9a3bfb2 commit a040adf

File tree

21 files changed

+66
-89
lines changed

21 files changed

+66
-89
lines changed

arch/arm64/include/asm/kvm_hyp.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,7 @@ DECLARE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params);
7070
/*
7171
* Without an __arch_swab32(), we fall back to ___constant_swab32(), but the
7272
* static inline can allow the compiler to out-of-line this. KVM always wants
73-
* the macro version as its always inlined.
73+
* the macro version as it's always inlined.
7474
*/
7575
#define __kvm_swab32(x) ___constant_swab32(x)
7676

arch/arm64/include/asm/kvm_mmu.h

Lines changed: 20 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -53,27 +53,6 @@
5353

5454
#include <asm/alternative.h>
5555

56-
/*
57-
* Convert a kernel VA into a HYP VA.
58-
* reg: VA to be converted.
59-
*
60-
* The actual code generation takes place in kvm_update_va_mask, and
61-
* the instructions below are only there to reserve the space and
62-
* perform the register allocation (kvm_update_va_mask uses the
63-
* specific registers encoded in the instructions).
64-
*/
65-
.macro kern_hyp_va reg
66-
#ifndef __KVM_VHE_HYPERVISOR__
67-
alternative_cb ARM64_ALWAYS_SYSTEM, kvm_update_va_mask
68-
and \reg, \reg, #1 /* mask with va_mask */
69-
ror \reg, \reg, #1 /* rotate to the first tag bit */
70-
add \reg, \reg, #0 /* insert the low 12 bits of the tag */
71-
add \reg, \reg, #0, lsl 12 /* insert the top 12 bits of the tag */
72-
ror \reg, \reg, #63 /* rotate back */
73-
alternative_cb_end
74-
#endif
75-
.endm
76-
7756
/*
7857
* Convert a hypervisor VA to a PA
7958
* reg: hypervisor address to be converted in place
@@ -127,14 +106,29 @@ void kvm_apply_hyp_relocations(void);
127106

128107
#define __hyp_pa(x) (((phys_addr_t)(x)) + hyp_physvirt_offset)
129108

109+
/*
110+
* Convert a kernel VA into a HYP VA.
111+
*
112+
* Can be called from hyp or non-hyp context.
113+
*
114+
* The actual code generation takes place in kvm_update_va_mask(), and
115+
* the instructions below are only there to reserve the space and
116+
* perform the register allocation (kvm_update_va_mask() uses the
117+
* specific registers encoded in the instructions).
118+
*/
130119
static __always_inline unsigned long __kern_hyp_va(unsigned long v)
131120
{
121+
/*
122+
* This #ifndef is an optimisation for when this is called from VHE hyp
123+
* context. When called from a VHE non-hyp context, kvm_update_va_mask() will
124+
* replace the instructions with `nop`s.
125+
*/
132126
#ifndef __KVM_VHE_HYPERVISOR__
133-
asm volatile(ALTERNATIVE_CB("and %0, %0, #1\n"
134-
"ror %0, %0, #1\n"
135-
"add %0, %0, #0\n"
136-
"add %0, %0, #0, lsl 12\n"
137-
"ror %0, %0, #63\n",
127+
asm volatile(ALTERNATIVE_CB("and %0, %0, #1\n" /* mask with va_mask */
128+
"ror %0, %0, #1\n" /* rotate to the first tag bit */
129+
"add %0, %0, #0\n" /* insert the low 12 bits of the tag */
130+
"add %0, %0, #0, lsl 12\n" /* insert the top 12 bits of the tag */
131+
"ror %0, %0, #63\n", /* rotate back */
138132
ARM64_ALWAYS_SYSTEM,
139133
kvm_update_va_mask)
140134
: "+r" (v));

arch/arm64/kvm/arch_timer.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -745,7 +745,7 @@ static void kvm_timer_vcpu_load_nested_switch(struct kvm_vcpu *vcpu,
745745
WARN_ON_ONCE(ret);
746746

747747
/*
748-
* The virtual offset behaviour is "interresting", as it
748+
* The virtual offset behaviour is "interesting", as it
749749
* always applies when HCR_EL2.E2H==0, but only when
750750
* accessed from EL1 when HCR_EL2.E2H==1. So make sure we
751751
* track E2H when putting the HV timer in "direct" mode.

arch/arm64/kvm/arm.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2591,7 +2591,8 @@ static __init int kvm_arm_init(void)
25912591
} else if (in_hyp_mode) {
25922592
kvm_info("VHE mode initialized successfully\n");
25932593
} else {
2594-
kvm_info("Hyp mode initialized successfully\n");
2594+
char mode = cpus_have_final_cap(ARM64_KVM_HVHE) ? 'h' : 'n';
2595+
kvm_info("Hyp mode (%cVHE) initialized successfully\n", mode);
25952596
}
25962597

25972598
/*

arch/arm64/kvm/fpsimd.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -117,7 +117,7 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu)
117117
}
118118

119119
/*
120-
* Called just before entering the guest once we are no longer preemptable
120+
* Called just before entering the guest once we are no longer preemptible
121121
* and interrupts are disabled. If we have managed to run anything using
122122
* FP while we were preemptible (such as off the back of an interrupt),
123123
* then neither the host nor the guest own the FP hardware (and it was the

arch/arm64/kvm/hyp/nvhe/debug-sr.c

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -31,8 +31,8 @@ static void __debug_save_spe(u64 *pmscr_el1)
3131
return;
3232

3333
/* Yes; save the control register and disable data generation */
34-
*pmscr_el1 = read_sysreg_s(SYS_PMSCR_EL1);
35-
write_sysreg_s(0, SYS_PMSCR_EL1);
34+
*pmscr_el1 = read_sysreg_el1(SYS_PMSCR);
35+
write_sysreg_el1(0, SYS_PMSCR);
3636
isb();
3737

3838
/* Now drain all buffered data to memory */
@@ -48,7 +48,7 @@ static void __debug_restore_spe(u64 pmscr_el1)
4848
isb();
4949

5050
/* Re-enable data generation */
51-
write_sysreg_s(pmscr_el1, SYS_PMSCR_EL1);
51+
write_sysreg_el1(pmscr_el1, SYS_PMSCR);
5252
}
5353

5454
static void __debug_save_trace(u64 *trfcr_el1)
@@ -63,8 +63,8 @@ static void __debug_save_trace(u64 *trfcr_el1)
6363
* Since access to TRFCR_EL1 is trapped, the guest can't
6464
* modify the filtering set by the host.
6565
*/
66-
*trfcr_el1 = read_sysreg_s(SYS_TRFCR_EL1);
67-
write_sysreg_s(0, SYS_TRFCR_EL1);
66+
*trfcr_el1 = read_sysreg_el1(SYS_TRFCR);
67+
write_sysreg_el1(0, SYS_TRFCR);
6868
isb();
6969
/* Drain the trace buffer to memory */
7070
tsb_csync();
@@ -76,7 +76,7 @@ static void __debug_restore_trace(u64 trfcr_el1)
7676
return;
7777

7878
/* Restore trace filter controls */
79-
write_sysreg_s(trfcr_el1, SYS_TRFCR_EL1);
79+
write_sysreg_el1(trfcr_el1, SYS_TRFCR);
8080
}
8181

8282
void __debug_save_host_buffers_nvhe(struct kvm_vcpu *vcpu)

arch/arm64/kvm/hyp/nvhe/host.S

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -110,7 +110,7 @@ SYM_FUNC_END(__host_enter)
110110
* u64 elr, u64 par);
111111
*/
112112
SYM_FUNC_START(__hyp_do_panic)
113-
/* Prepare and exit to the host's panic funciton. */
113+
/* Prepare and exit to the host's panic function. */
114114
mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
115115
PSR_MODE_EL1h)
116116
msr spsr_el2, lr

arch/arm64/kvm/hyp/nvhe/mm.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -155,7 +155,7 @@ int hyp_back_vmemmap(phys_addr_t back)
155155
start = hyp_memory[i].base;
156156
start = ALIGN_DOWN((u64)hyp_phys_to_page(start), PAGE_SIZE);
157157
/*
158-
* The begining of the hyp_vmemmap region for the current
158+
* The beginning of the hyp_vmemmap region for the current
159159
* memblock may already be backed by the page backing the end
160160
* the previous region, so avoid mapping it twice.
161161
*/
@@ -408,7 +408,7 @@ static void *admit_host_page(void *arg)
408408
return pop_hyp_memcache(host_mc, hyp_phys_to_virt);
409409
}
410410

411-
/* Refill our local memcache by poping pages from the one provided by the host. */
411+
/* Refill our local memcache by popping pages from the one provided by the host. */
412412
int refill_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages,
413413
struct kvm_hyp_memcache *host_mc)
414414
{

arch/arm64/kvm/inject_fault.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -134,7 +134,7 @@ static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt, u32 addr)
134134
if (vcpu_read_sys_reg(vcpu, TCR_EL1) & TTBCR_EAE) {
135135
fsr = DFSR_LPAE | DFSR_FSC_EXTABT_LPAE;
136136
} else {
137-
/* no need to shuffle FS[4] into DFSR[10] as its 0 */
137+
/* no need to shuffle FS[4] into DFSR[10] as it's 0 */
138138
fsr = DFSR_FSC_EXTABT_nLPAE;
139139
}
140140

arch/arm64/kvm/vgic/vgic-init.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -309,7 +309,7 @@ int vgic_init(struct kvm *kvm)
309309
vgic_lpi_translation_cache_init(kvm);
310310

311311
/*
312-
* If we have GICv4.1 enabled, unconditionnaly request enable the
312+
* If we have GICv4.1 enabled, unconditionally request enable the
313313
* v4 support so that we get HW-accelerated vSGIs. Otherwise, only
314314
* enable it if we present a virtual ITS to the guest.
315315
*/

0 commit comments

Comments
 (0)