Skip to content

Commit 3806094

Browse files
committed
Merge tag 'kvmarm-5.8' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into HEAD
KVM/arm64 updates for Linux 5.8: - Move the arch-specific code into arch/arm64/kvm - Start the post-32bit cleanup - Cherry-pick a few non-invasive pre-NV patches
2 parents 09d952c + 8f7f4fe commit 3806094

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

56 files changed

+629
-639
lines changed

Documentation/virt/kvm/api.rst

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5799,7 +5799,7 @@ will be initialized to 1 when created. This also improves performance because
57995799
dirty logging can be enabled gradually in small chunks on the first call
58005800
to KVM_CLEAR_DIRTY_LOG. KVM_DIRTY_LOG_INITIALLY_SET depends on
58015801
KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE (it is also only available on
5802-
x86 for now).
5802+
x86 and arm64 for now).
58035803

58045804
KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 was previously available under the name
58055805
KVM_CAP_MANUAL_DIRTY_LOG_PROTECT, but the implementation had bugs that make

MAINTAINERS

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9295,7 +9295,6 @@ F: arch/arm64/include/asm/kvm*
92959295
F: arch/arm64/include/uapi/asm/kvm*
92969296
F: arch/arm64/kvm/
92979297
F: include/kvm/arm_*
9298-
F: virt/kvm/arm/
92999298

93009299
KERNEL VIRTUAL MACHINE FOR MIPS (KVM/mips)
93019300

arch/arm64/include/asm/kvm_asm.h

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -64,12 +64,14 @@ extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
6464
extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
6565
extern void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu);
6666

67-
extern void __kvm_timer_set_cntvoff(u32 cntvoff_low, u32 cntvoff_high);
67+
extern void __kvm_timer_set_cntvoff(u64 cntvoff);
6868

6969
extern int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu);
7070

7171
extern int __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu);
7272

73+
extern void __kvm_enable_ssbs(void);
74+
7375
extern u64 __vgic_v3_get_ich_vtr_el2(void);
7476
extern u64 __vgic_v3_read_vmcr(void);
7577
extern void __vgic_v3_write_vmcr(u32 vmcr);

arch/arm64/include/asm/kvm_host.h

Lines changed: 4 additions & 40 deletions
Original file line numberDiff line numberDiff line change
@@ -46,6 +46,9 @@
4646
#define KVM_REQ_RECORD_STEAL KVM_ARCH_REQ(3)
4747
#define KVM_REQ_RELOAD_GICv4 KVM_ARCH_REQ(4)
4848

49+
#define KVM_DIRTY_LOG_MANUAL_CAPS (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | \
50+
KVM_DIRTY_LOG_INITIALLY_SET)
51+
4952
DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
5053

5154
extern unsigned int kvm_sve_max_vl;
@@ -112,12 +115,8 @@ struct kvm_vcpu_fault_info {
112115
u64 disr_el1; /* Deferred [SError] Status Register */
113116
};
114117

115-
/*
116-
* 0 is reserved as an invalid value.
117-
* Order should be kept in sync with the save/restore code.
118-
*/
119118
enum vcpu_sysreg {
120-
__INVALID_SYSREG__,
119+
__INVALID_SYSREG__, /* 0 is reserved as an invalid value */
121120
MPIDR_EL1, /* MultiProcessor Affinity Register */
122121
CSSELR_EL1, /* Cache Size Selection Register */
123122
SCTLR_EL1, /* System Control Register */
@@ -532,39 +531,6 @@ static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt)
532531
cpu_ctxt->sys_regs[MPIDR_EL1] = read_cpuid_mpidr();
533532
}
534533

535-
void __kvm_enable_ssbs(void);
536-
537-
static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
538-
unsigned long hyp_stack_ptr,
539-
unsigned long vector_ptr)
540-
{
541-
/*
542-
* Calculate the raw per-cpu offset without a translation from the
543-
* kernel's mapping to the linear mapping, and store it in tpidr_el2
544-
* so that we can use adr_l to access per-cpu variables in EL2.
545-
*/
546-
u64 tpidr_el2 = ((u64)this_cpu_ptr(&kvm_host_data) -
547-
(u64)kvm_ksym_ref(kvm_host_data));
548-
549-
/*
550-
* Call initialization code, and switch to the full blown HYP code.
551-
* If the cpucaps haven't been finalized yet, something has gone very
552-
* wrong, and hyp will crash and burn when it uses any
553-
* cpus_have_const_cap() wrapper.
554-
*/
555-
BUG_ON(!system_capabilities_finalized());
556-
__kvm_call_hyp((void *)pgd_ptr, hyp_stack_ptr, vector_ptr, tpidr_el2);
557-
558-
/*
559-
* Disabling SSBD on a non-VHE system requires us to enable SSBS
560-
* at EL2.
561-
*/
562-
if (!has_vhe() && this_cpu_has_cap(ARM64_SSBS) &&
563-
arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) {
564-
kvm_call_hyp(__kvm_enable_ssbs);
565-
}
566-
}
567-
568534
static inline bool kvm_arch_requires_vhe(void)
569535
{
570536
/*
@@ -600,8 +566,6 @@ int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
600566
int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
601567
struct kvm_device_attr *attr);
602568

603-
static inline void __cpu_init_stage2(void) {}
604-
605569
/* Guest/host FPSIMD coordination helpers */
606570
int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu);
607571
void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu);

arch/arm64/include/asm/kvm_hyp.h

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -56,12 +56,12 @@
5656

5757
int __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu);
5858

59-
void __vgic_v3_save_state(struct kvm_vcpu *vcpu);
60-
void __vgic_v3_restore_state(struct kvm_vcpu *vcpu);
61-
void __vgic_v3_activate_traps(struct kvm_vcpu *vcpu);
62-
void __vgic_v3_deactivate_traps(struct kvm_vcpu *vcpu);
63-
void __vgic_v3_save_aprs(struct kvm_vcpu *vcpu);
64-
void __vgic_v3_restore_aprs(struct kvm_vcpu *vcpu);
59+
void __vgic_v3_save_state(struct vgic_v3_cpu_if *cpu_if);
60+
void __vgic_v3_restore_state(struct vgic_v3_cpu_if *cpu_if);
61+
void __vgic_v3_activate_traps(struct vgic_v3_cpu_if *cpu_if);
62+
void __vgic_v3_deactivate_traps(struct vgic_v3_cpu_if *cpu_if);
63+
void __vgic_v3_save_aprs(struct vgic_v3_cpu_if *cpu_if);
64+
void __vgic_v3_restore_aprs(struct vgic_v3_cpu_if *cpu_if);
6565
int __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu);
6666

6767
void __timer_enable_traps(struct kvm_vcpu *vcpu);

arch/arm64/include/asm/kvm_mmu.h

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -363,8 +363,6 @@ static inline void __kvm_flush_dcache_pud(pud_t pud)
363363
}
364364
}
365365

366-
#define kvm_virt_to_phys(x) __pa_symbol(x)
367-
368366
void kvm_set_way_flush(struct kvm_vcpu *vcpu);
369367
void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
370368

@@ -473,7 +471,7 @@ static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa,
473471
extern void *__kvm_bp_vect_base;
474472
extern int __kvm_harden_el2_vector_slot;
475473

476-
/* This is only called on a VHE system */
474+
/* This is called on both VHE and !VHE systems */
477475
static inline void *kvm_get_hyp_vector(void)
478476
{
479477
struct bp_hardening_data *data = arm64_get_bp_hardening_data();

arch/arm64/include/asm/ptrace.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,7 @@
3535
#define GIC_PRIO_PSR_I_SET (1 << 4)
3636

3737
/* Additional SPSR bits not exposed in the UABI */
38+
#define PSR_MODE_THREAD_BIT (1 << 0)
3839
#define PSR_IL_BIT (1 << 20)
3940

4041
/* AArch32-specific ptrace requests */

arch/arm64/include/asm/virt.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -85,7 +85,7 @@ static inline bool is_kernel_in_hyp_mode(void)
8585

8686
static __always_inline bool has_vhe(void)
8787
{
88-
if (cpus_have_const_cap(ARM64_HAS_VIRT_HOST_EXTN))
88+
if (cpus_have_final_cap(ARM64_HAS_VIRT_HOST_EXTN))
8989
return true;
9090

9191
return false;

arch/arm64/kernel/asm-offsets.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -96,7 +96,7 @@ int main(void)
9696
DEFINE(CPU_BOOT_PTRAUTH_KEY, offsetof(struct secondary_data, ptrauth_key));
9797
#endif
9898
BLANK();
99-
#ifdef CONFIG_KVM_ARM_HOST
99+
#ifdef CONFIG_KVM
100100
DEFINE(VCPU_CONTEXT, offsetof(struct kvm_vcpu, arch.ctxt));
101101
DEFINE(VCPU_FAULT_DISR, offsetof(struct kvm_vcpu, arch.fault.disr_el1));
102102
DEFINE(VCPU_WORKAROUND_FLAGS, offsetof(struct kvm_vcpu, arch.workaround_flags));

arch/arm64/kernel/cpu_errata.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -234,7 +234,7 @@ static int detect_harden_bp_fw(void)
234234
smccc_end = NULL;
235235
break;
236236

237-
#if IS_ENABLED(CONFIG_KVM_ARM_HOST)
237+
#if IS_ENABLED(CONFIG_KVM)
238238
case SMCCC_CONDUIT_SMC:
239239
cb = call_smc_arch_workaround_1;
240240
smccc_start = __smccc_workaround_1_smc;

0 commit comments

Comments
 (0)