Skip to content

Commit f080815

Browse files
committed
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull kvm fixes from Paolo Bonzini: "ARM64: - Fix constant sign extension affecting TCR_EL2 and preventing running on ARMv8.7 models due to spurious bits being set - Fix use of helpers using PSTATE early on exit by always sampling it as soon as the exit takes place - Move pkvm's 32bit handling into a common helper RISC-V: - Fix incorrect KVM_MAX_VCPUS value - Unmap stage2 mapping when deleting/moving a memslot x86: - Fix and downgrade BUG_ON due to uninitialized cache - Many APICv and MOVE_ENC_CONTEXT_FROM fixes - Correctly emulate TLB flushes around nested vmentry/vmexit and when the nested hypervisor uses VPID - Prevent modifications to CPUID after the VM has run - Other smaller bugfixes Generic: - Memslot handling bugfixes" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (44 commits) KVM: fix avic_set_running for preemptable kernels KVM: VMX: clear vmx_x86_ops.sync_pir_to_irr if APICv is disabled KVM: SEV: accept signals in sev_lock_two_vms KVM: SEV: do not take kvm->lock when destroying KVM: SEV: Prohibit migration of a VM that has mirrors KVM: SEV: Do COPY_ENC_CONTEXT_FROM with both VMs locked selftests: sev_migrate_tests: add tests for KVM_CAP_VM_COPY_ENC_CONTEXT_FROM KVM: SEV: move mirror status to destination of KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM KVM: SEV: initialize regions_list of a mirror VM KVM: SEV: cleanup locking for KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM KVM: SEV: do not use list_replace_init on an empty list KVM: x86: Use a stable condition around all VT-d PI paths KVM: x86: check PIR even for vCPUs with disabled APICv KVM: VMX: prepare sync_pir_to_irr for running with APICv disabled KVM: selftests: page_table_test: fix calculation of guest_test_phys_mem KVM: x86/mmu: Handle "default" period when selectively waking kthread KVM: MMU: shadow nested paging does not have PKU KVM: x86/mmu: Remove spurious TLB flushes in TDP MMU zap collapsible path KVM: x86/mmu: Use yield-safe TDP MMU root iter in MMU notifier unmapping KVM: X86: Use vcpu->arch.walk_mmu for kvm_mmu_invlpg() ...
2 parents d6e6a27 + 7cfc5c6 commit f080815

File tree

27 files changed

+623
-352
lines changed

27 files changed

+623
-352
lines changed

arch/arm64/include/asm/kvm_arm.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -91,7 +91,7 @@
9191
#define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H)
9292

9393
/* TCR_EL2 Registers bits */
94-
#define TCR_EL2_RES1 ((1 << 31) | (1 << 23))
94+
#define TCR_EL2_RES1 ((1U << 31) | (1 << 23))
9595
#define TCR_EL2_TBI (1 << 20)
9696
#define TCR_EL2_PS_SHIFT 16
9797
#define TCR_EL2_PS_MASK (7 << TCR_EL2_PS_SHIFT)
@@ -276,7 +276,7 @@
276276
#define CPTR_EL2_TFP_SHIFT 10
277277

278278
/* Hyp Coprocessor Trap Register */
279-
#define CPTR_EL2_TCPAC (1 << 31)
279+
#define CPTR_EL2_TCPAC (1U << 31)
280280
#define CPTR_EL2_TAM (1 << 30)
281281
#define CPTR_EL2_TTA (1 << 20)
282282
#define CPTR_EL2_TFP (1 << CPTR_EL2_TFP_SHIFT)

arch/arm64/kvm/hyp/include/hyp/switch.h

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -403,6 +403,8 @@ typedef bool (*exit_handler_fn)(struct kvm_vcpu *, u64 *);
403403

404404
static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu);
405405

406+
static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code);
407+
406408
/*
407409
* Allow the hypervisor to handle the exit with an exit handler if it has one.
408410
*
@@ -429,6 +431,18 @@ static inline bool kvm_hyp_handle_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
429431
*/
430432
static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
431433
{
434+
/*
435+
* Save PSTATE early so that we can evaluate the vcpu mode
436+
* early on.
437+
*/
438+
vcpu->arch.ctxt.regs.pstate = read_sysreg_el2(SYS_SPSR);
439+
440+
/*
441+
* Check whether we want to repaint the state one way or
442+
* another.
443+
*/
444+
early_exit_filter(vcpu, exit_code);
445+
432446
if (ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ)
433447
vcpu->arch.fault.esr_el2 = read_sysreg_el2(SYS_ESR);
434448

arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,12 @@ static inline void __sysreg_save_el1_state(struct kvm_cpu_context *ctxt)
7070
static inline void __sysreg_save_el2_return_state(struct kvm_cpu_context *ctxt)
7171
{
7272
ctxt->regs.pc = read_sysreg_el2(SYS_ELR);
73-
ctxt->regs.pstate = read_sysreg_el2(SYS_SPSR);
73+
/*
74+
* Guest PSTATE gets saved at guest fixup time in all
75+
* cases. We still need to handle the nVHE host side here.
76+
*/
77+
if (!has_vhe() && ctxt->__hyp_running_vcpu)
78+
ctxt->regs.pstate = read_sysreg_el2(SYS_SPSR);
7479

7580
if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN))
7681
ctxt_sys_reg(ctxt, DISR_EL1) = read_sysreg_s(SYS_VDISR_EL2);

arch/arm64/kvm/hyp/nvhe/switch.c

Lines changed: 1 addition & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -233,7 +233,7 @@ static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu)
233233
* Returns false if the guest ran in AArch32 when it shouldn't have, and
234234
* thus should exit to the host, or true if a the guest run loop can continue.
235235
*/
236-
static bool handle_aarch32_guest(struct kvm_vcpu *vcpu, u64 *exit_code)
236+
static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code)
237237
{
238238
struct kvm *kvm = kern_hyp_va(vcpu->kvm);
239239

@@ -248,10 +248,7 @@ static bool handle_aarch32_guest(struct kvm_vcpu *vcpu, u64 *exit_code)
248248
vcpu->arch.target = -1;
249249
*exit_code &= BIT(ARM_EXIT_WITH_SERROR_BIT);
250250
*exit_code |= ARM_EXCEPTION_IL;
251-
return false;
252251
}
253-
254-
return true;
255252
}
256253

257254
/* Switch to the guest for legacy non-VHE systems */
@@ -316,9 +313,6 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
316313
/* Jump in the fire! */
317314
exit_code = __guest_enter(vcpu);
318315

319-
if (unlikely(!handle_aarch32_guest(vcpu, &exit_code)))
320-
break;
321-
322316
/* And we're baaack! */
323317
} while (fixup_guest_exit(vcpu, &exit_code));
324318

arch/arm64/kvm/hyp/vhe/switch.c

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -112,6 +112,10 @@ static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu)
112112
return hyp_exit_handlers;
113113
}
114114

115+
static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code)
116+
{
117+
}
118+
115119
/* Switch to the guest for VHE systems running in EL2 */
116120
static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
117121
{

arch/riscv/include/asm/kvm_host.h

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -12,14 +12,12 @@
1212
#include <linux/types.h>
1313
#include <linux/kvm.h>
1414
#include <linux/kvm_types.h>
15+
#include <asm/csr.h>
1516
#include <asm/kvm_vcpu_fp.h>
1617
#include <asm/kvm_vcpu_timer.h>
1718

18-
#ifdef CONFIG_64BIT
19-
#define KVM_MAX_VCPUS (1U << 16)
20-
#else
21-
#define KVM_MAX_VCPUS (1U << 9)
22-
#endif
19+
#define KVM_MAX_VCPUS \
20+
((HGATP_VMID_MASK >> HGATP_VMID_SHIFT) + 1)
2321

2422
#define KVM_HALT_POLL_NS_DEFAULT 500000
2523

arch/riscv/kvm/mmu.c

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -453,6 +453,12 @@ void kvm_arch_flush_shadow_all(struct kvm *kvm)
453453
void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
454454
struct kvm_memory_slot *slot)
455455
{
456+
gpa_t gpa = slot->base_gfn << PAGE_SHIFT;
457+
phys_addr_t size = slot->npages << PAGE_SHIFT;
458+
459+
spin_lock(&kvm->mmu_lock);
460+
stage2_unmap_range(kvm, gpa, size, false);
461+
spin_unlock(&kvm->mmu_lock);
456462
}
457463

458464
void kvm_arch_commit_memory_region(struct kvm *kvm,

arch/x86/kvm/ioapic.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -81,7 +81,6 @@ struct kvm_ioapic {
8181
unsigned long irq_states[IOAPIC_NUM_PINS];
8282
struct kvm_io_device dev;
8383
struct kvm *kvm;
84-
void (*ack_notifier)(void *opaque, int irq);
8584
spinlock_t lock;
8685
struct rtc_status rtc_status;
8786
struct delayed_work eoi_inject;

arch/x86/kvm/irq.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,6 @@ struct kvm_pic {
5656
struct kvm_io_device dev_master;
5757
struct kvm_io_device dev_slave;
5858
struct kvm_io_device dev_elcr;
59-
void (*ack_notifier)(void *opaque, int irq);
6059
unsigned long irq_states[PIC_NUM_PINS];
6160
};
6261

arch/x86/kvm/lapic.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -707,7 +707,7 @@ static void pv_eoi_clr_pending(struct kvm_vcpu *vcpu)
707707
static int apic_has_interrupt_for_ppr(struct kvm_lapic *apic, u32 ppr)
708708
{
709709
int highest_irr;
710-
if (apic->vcpu->arch.apicv_active)
710+
if (kvm_x86_ops.sync_pir_to_irr)
711711
highest_irr = static_call(kvm_x86_sync_pir_to_irr)(apic->vcpu);
712712
else
713713
highest_irr = apic_find_highest_irr(apic);

0 commit comments

Comments
 (0)