Skip to content

Commit 9d0c8e7

Browse files
committed
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM fixes from Paolo Bonzini: "More fixes for ARM and x86" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: KVM: LAPIC: Advancing the timer expiration on guest initiated write KVM: x86/mmu: Skip !MMU-present SPTEs when removing SP in exclusive mode KVM: kvmclock: Fix vCPUs > 64 can't be online/hotpluged kvm: x86: annotate RCU pointers KVM: arm64: Fix exclusive limit for IPA size KVM: arm64: Reject VM creation when the default IPA size is unsupported KVM: arm64: Ensure I-cache isolation between vcpus of a same VM KVM: arm64: Don't use cbz/adr with external symbols KVM: arm64: Fix range alignment when walking page tables KVM: arm64: Workaround firmware wrongly advertising GICv2-on-v3 compatibility KVM: arm64: Rename __vgic_v3_get_ich_vtr_el2() to __vgic_v3_get_gic_config() KVM: arm64: Don't access PMSELR_EL0/PMUSERENR_EL0 when no PMU is available KVM: arm64: Turn kvm_arm_support_pmu_v3() into a static key KVM: arm64: Fix nVHE hyp panic host context restore KVM: arm64: Avoid corrupting vCPU context register in guest exit KVM: arm64: nvhe: Save the SPE context early kvm: x86: use NULL instead of using plain integer as pointer KVM: SVM: Connect 'npt' module param to KVM's internal 'npt_enabled' KVM: x86: Ensure deadline timer has truly expired before posting its IRQ
2 parents 50eb842 + 35737d2 commit 9d0c8e7

File tree

27 files changed

+194
-81
lines changed

27 files changed

+194
-81
lines changed

Documentation/virt/kvm/api.rst

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -182,6 +182,9 @@ is dependent on the CPU capability and the kernel configuration. The limit can
182182
be retrieved using KVM_CAP_ARM_VM_IPA_SIZE of the KVM_CHECK_EXTENSION
183183
ioctl() at run-time.
184184

185+
Creation of the VM will fail if the requested IPA size (whether it is
186+
implicit or explicit) is unsupported on the host.
187+
185188
Please note that configuring the IPA size does not affect the capability
186189
exposed by the guest CPUs in ID_AA64MMFR0_EL1[PARange]. It only affects
187190
size of the address translated by the stage2 level (guest physical to

arch/arm64/include/asm/kvm_asm.h

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -47,10 +47,10 @@
4747
#define __KVM_HOST_SMCCC_FUNC___kvm_flush_vm_context 2
4848
#define __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_ipa 3
4949
#define __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid 4
50-
#define __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_local_vmid 5
50+
#define __KVM_HOST_SMCCC_FUNC___kvm_flush_cpu_context 5
5151
#define __KVM_HOST_SMCCC_FUNC___kvm_timer_set_cntvoff 6
5252
#define __KVM_HOST_SMCCC_FUNC___kvm_enable_ssbs 7
53-
#define __KVM_HOST_SMCCC_FUNC___vgic_v3_get_ich_vtr_el2 8
53+
#define __KVM_HOST_SMCCC_FUNC___vgic_v3_get_gic_config 8
5454
#define __KVM_HOST_SMCCC_FUNC___vgic_v3_read_vmcr 9
5555
#define __KVM_HOST_SMCCC_FUNC___vgic_v3_write_vmcr 10
5656
#define __KVM_HOST_SMCCC_FUNC___vgic_v3_init_lrs 11
@@ -183,16 +183,16 @@ DECLARE_KVM_HYP_SYM(__bp_harden_hyp_vecs);
183183
#define __bp_harden_hyp_vecs CHOOSE_HYP_SYM(__bp_harden_hyp_vecs)
184184

185185
extern void __kvm_flush_vm_context(void);
186+
extern void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu);
186187
extern void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa,
187188
int level);
188189
extern void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu);
189-
extern void __kvm_tlb_flush_local_vmid(struct kvm_s2_mmu *mmu);
190190

191191
extern void __kvm_timer_set_cntvoff(u64 cntvoff);
192192

193193
extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
194194

195-
extern u64 __vgic_v3_get_ich_vtr_el2(void);
195+
extern u64 __vgic_v3_get_gic_config(void);
196196
extern u64 __vgic_v3_read_vmcr(void);
197197
extern void __vgic_v3_write_vmcr(u32 vmcr);
198198
extern void __vgic_v3_init_lrs(void);

arch/arm64/include/asm/kvm_hyp.h

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -83,6 +83,11 @@ void sysreg_restore_guest_state_vhe(struct kvm_cpu_context *ctxt);
8383
void __debug_switch_to_guest(struct kvm_vcpu *vcpu);
8484
void __debug_switch_to_host(struct kvm_vcpu *vcpu);
8585

86+
#ifdef __KVM_NVHE_HYPERVISOR__
87+
void __debug_save_host_buffers_nvhe(struct kvm_vcpu *vcpu);
88+
void __debug_restore_host_buffers_nvhe(struct kvm_vcpu *vcpu);
89+
#endif
90+
8691
void __fpsimd_save_state(struct user_fpsimd_state *fp_regs);
8792
void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs);
8893

@@ -97,7 +102,8 @@ bool kvm_host_psci_handler(struct kvm_cpu_context *host_ctxt);
97102

98103
void __noreturn hyp_panic(void);
99104
#ifdef __KVM_NVHE_HYPERVISOR__
100-
void __noreturn __hyp_do_panic(bool restore_host, u64 spsr, u64 elr, u64 par);
105+
void __noreturn __hyp_do_panic(struct kvm_cpu_context *host_ctxt, u64 spsr,
106+
u64 elr, u64 par);
101107
#endif
102108

103109
#endif /* __ARM64_KVM_HYP_H__ */

arch/arm64/kernel/image-vars.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -101,6 +101,9 @@ KVM_NVHE_ALIAS(__stop___kvm_ex_table);
101101
/* Array containing bases of nVHE per-CPU memory regions. */
102102
KVM_NVHE_ALIAS(kvm_arm_hyp_percpu_base);
103103

104+
/* PMU available static key */
105+
KVM_NVHE_ALIAS(kvm_arm_pmu_available);
106+
104107
#endif /* CONFIG_KVM */
105108

106109
#endif /* __ARM64_KERNEL_IMAGE_VARS_H */

arch/arm64/kvm/arm.c

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -385,11 +385,16 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
385385
last_ran = this_cpu_ptr(mmu->last_vcpu_ran);
386386

387387
/*
388+
* We guarantee that both TLBs and I-cache are private to each
389+
* vcpu. If detecting that a vcpu from the same VM has
390+
* previously run on the same physical CPU, call into the
391+
* hypervisor code to nuke the relevant contexts.
392+
*
388393
* We might get preempted before the vCPU actually runs, but
389394
* over-invalidation doesn't affect correctness.
390395
*/
391396
if (*last_ran != vcpu->vcpu_id) {
392-
kvm_call_hyp(__kvm_tlb_flush_local_vmid, mmu);
397+
kvm_call_hyp(__kvm_flush_cpu_context, mmu);
393398
*last_ran = vcpu->vcpu_id;
394399
}
395400

arch/arm64/kvm/hyp/entry.S

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -85,16 +85,18 @@ SYM_INNER_LABEL(__guest_exit_panic, SYM_L_GLOBAL)
8585

8686
// If the hyp context is loaded, go straight to hyp_panic
8787
get_loaded_vcpu x0, x1
88-
cbz x0, hyp_panic
88+
cbnz x0, 1f
89+
b hyp_panic
8990

91+
1:
9092
// The hyp context is saved so make sure it is restored to allow
9193
// hyp_panic to run at hyp and, subsequently, panic to run in the host.
9294
// This makes use of __guest_exit to avoid duplication but sets the
9395
// return address to tail call into hyp_panic. As a side effect, the
9496
// current state is saved to the guest context but it will only be
9597
// accurate if the guest had been completely restored.
9698
adr_this_cpu x0, kvm_hyp_ctxt, x1
97-
adr x1, hyp_panic
99+
adr_l x1, hyp_panic
98100
str x1, [x0, #CPU_XREG_OFFSET(30)]
99101

100102
get_vcpu_ptr x1, x0
@@ -146,7 +148,7 @@ SYM_INNER_LABEL(__guest_exit, SYM_L_GLOBAL)
146148
// Now restore the hyp regs
147149
restore_callee_saved_regs x2
148150

149-
set_loaded_vcpu xzr, x1, x2
151+
set_loaded_vcpu xzr, x2, x3
150152

151153
alternative_if ARM64_HAS_RAS_EXTN
152154
// If we have the RAS extensions we can consume a pending error

arch/arm64/kvm/hyp/include/hyp/switch.h

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -90,15 +90,18 @@ static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
9090
* counter, which could make a PMXEVCNTR_EL0 access UNDEF at
9191
* EL1 instead of being trapped to EL2.
9292
*/
93-
write_sysreg(0, pmselr_el0);
94-
write_sysreg(ARMV8_PMU_USERENR_MASK, pmuserenr_el0);
93+
if (kvm_arm_support_pmu_v3()) {
94+
write_sysreg(0, pmselr_el0);
95+
write_sysreg(ARMV8_PMU_USERENR_MASK, pmuserenr_el0);
96+
}
9597
write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
9698
}
9799

98100
static inline void __deactivate_traps_common(void)
99101
{
100102
write_sysreg(0, hstr_el2);
101-
write_sysreg(0, pmuserenr_el0);
103+
if (kvm_arm_support_pmu_v3())
104+
write_sysreg(0, pmuserenr_el0);
102105
}
103106

104107
static inline void ___activate_traps(struct kvm_vcpu *vcpu)

arch/arm64/kvm/hyp/nvhe/debug-sr.c

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -58,16 +58,24 @@ static void __debug_restore_spe(u64 pmscr_el1)
5858
write_sysreg_s(pmscr_el1, SYS_PMSCR_EL1);
5959
}
6060

61-
void __debug_switch_to_guest(struct kvm_vcpu *vcpu)
61+
void __debug_save_host_buffers_nvhe(struct kvm_vcpu *vcpu)
6262
{
6363
/* Disable and flush SPE data generation */
6464
__debug_save_spe(&vcpu->arch.host_debug_state.pmscr_el1);
65+
}
66+
67+
void __debug_switch_to_guest(struct kvm_vcpu *vcpu)
68+
{
6569
__debug_switch_to_guest_common(vcpu);
6670
}
6771

68-
void __debug_switch_to_host(struct kvm_vcpu *vcpu)
72+
void __debug_restore_host_buffers_nvhe(struct kvm_vcpu *vcpu)
6973
{
7074
__debug_restore_spe(vcpu->arch.host_debug_state.pmscr_el1);
75+
}
76+
77+
void __debug_switch_to_host(struct kvm_vcpu *vcpu)
78+
{
7179
__debug_switch_to_host_common(vcpu);
7280
}
7381

arch/arm64/kvm/hyp/nvhe/host.S

Lines changed: 8 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -71,7 +71,8 @@ SYM_FUNC_START(__host_enter)
7171
SYM_FUNC_END(__host_enter)
7272

7373
/*
74-
* void __noreturn __hyp_do_panic(bool restore_host, u64 spsr, u64 elr, u64 par);
74+
* void __noreturn __hyp_do_panic(struct kvm_cpu_context *host_ctxt, u64 spsr,
75+
* u64 elr, u64 par);
7576
*/
7677
SYM_FUNC_START(__hyp_do_panic)
7778
/* Prepare and exit to the host's panic funciton. */
@@ -82,9 +83,11 @@ SYM_FUNC_START(__hyp_do_panic)
8283
hyp_kimg_va lr, x6
8384
msr elr_el2, lr
8485

85-
/* Set the panic format string. Use the, now free, LR as scratch. */
86-
ldr lr, =__hyp_panic_string
87-
hyp_kimg_va lr, x6
86+
mov x29, x0
87+
88+
/* Load the format string into x0 and arguments into x1-7 */
89+
ldr x0, =__hyp_panic_string
90+
hyp_kimg_va x0, x6
8891

8992
/* Load the format arguments into x1-7. */
9093
mov x6, x3
@@ -94,9 +97,7 @@ SYM_FUNC_START(__hyp_do_panic)
9497
mrs x5, hpfar_el2
9598

9699
/* Enter the host, conditionally restoring the host context. */
97-
cmp x0, xzr
98-
mov x0, lr
99-
b.eq __host_enter_without_restoring
100+
cbz x29, __host_enter_without_restoring
100101
b __host_enter_for_panic
101102
SYM_FUNC_END(__hyp_do_panic)
102103

arch/arm64/kvm/hyp/nvhe/hyp-main.c

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -46,11 +46,11 @@ static void handle___kvm_tlb_flush_vmid(struct kvm_cpu_context *host_ctxt)
4646
__kvm_tlb_flush_vmid(kern_hyp_va(mmu));
4747
}
4848

49-
static void handle___kvm_tlb_flush_local_vmid(struct kvm_cpu_context *host_ctxt)
49+
static void handle___kvm_flush_cpu_context(struct kvm_cpu_context *host_ctxt)
5050
{
5151
DECLARE_REG(struct kvm_s2_mmu *, mmu, host_ctxt, 1);
5252

53-
__kvm_tlb_flush_local_vmid(kern_hyp_va(mmu));
53+
__kvm_flush_cpu_context(kern_hyp_va(mmu));
5454
}
5555

5656
static void handle___kvm_timer_set_cntvoff(struct kvm_cpu_context *host_ctxt)
@@ -67,9 +67,9 @@ static void handle___kvm_enable_ssbs(struct kvm_cpu_context *host_ctxt)
6767
write_sysreg_el2(tmp, SYS_SCTLR);
6868
}
6969

70-
static void handle___vgic_v3_get_ich_vtr_el2(struct kvm_cpu_context *host_ctxt)
70+
static void handle___vgic_v3_get_gic_config(struct kvm_cpu_context *host_ctxt)
7171
{
72-
cpu_reg(host_ctxt, 1) = __vgic_v3_get_ich_vtr_el2();
72+
cpu_reg(host_ctxt, 1) = __vgic_v3_get_gic_config();
7373
}
7474

7575
static void handle___vgic_v3_read_vmcr(struct kvm_cpu_context *host_ctxt)
@@ -115,10 +115,10 @@ static const hcall_t host_hcall[] = {
115115
HANDLE_FUNC(__kvm_flush_vm_context),
116116
HANDLE_FUNC(__kvm_tlb_flush_vmid_ipa),
117117
HANDLE_FUNC(__kvm_tlb_flush_vmid),
118-
HANDLE_FUNC(__kvm_tlb_flush_local_vmid),
118+
HANDLE_FUNC(__kvm_flush_cpu_context),
119119
HANDLE_FUNC(__kvm_timer_set_cntvoff),
120120
HANDLE_FUNC(__kvm_enable_ssbs),
121-
HANDLE_FUNC(__vgic_v3_get_ich_vtr_el2),
121+
HANDLE_FUNC(__vgic_v3_get_gic_config),
122122
HANDLE_FUNC(__vgic_v3_read_vmcr),
123123
HANDLE_FUNC(__vgic_v3_write_vmcr),
124124
HANDLE_FUNC(__vgic_v3_init_lrs),

0 commit comments

Comments
 (0)