Skip to content

Commit 7134fa0

Browse files
committed
Merge tag 'kvmarm-fixes-5.7-2' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into kvm-master
KVM/arm fixes for Linux 5.7, take #2 - Fix compilation with Clang - Correctly initialize GICv4.1 in the absence of a virtual ITS - Move SP_EL0 save/restore to the guest entry/exit code - Handle PC wrap around on 32bit guests, and narrow all 32bit registers on userspace access
2 parents 9e5e19f + 0225fd5 commit 7134fa0

File tree

7 files changed

+49
-19
lines changed

7 files changed

+49
-19
lines changed

arch/arm64/kvm/guest.c

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -200,6 +200,13 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
200200
}
201201

202202
memcpy((u32 *)regs + off, valp, KVM_REG_SIZE(reg->id));
203+
204+
if (*vcpu_cpsr(vcpu) & PSR_MODE32_BIT) {
205+
int i;
206+
207+
for (i = 0; i < 16; i++)
208+
*vcpu_reg32(vcpu, i) = (u32)*vcpu_reg32(vcpu, i);
209+
}
203210
out:
204211
return err;
205212
}

arch/arm64/kvm/hyp/entry.S

Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@
1818

1919
#define CPU_GP_REG_OFFSET(x) (CPU_GP_REGS + x)
2020
#define CPU_XREG_OFFSET(x) CPU_GP_REG_OFFSET(CPU_USER_PT_REGS + 8*x)
21+
#define CPU_SP_EL0_OFFSET (CPU_XREG_OFFSET(30) + 8)
2122

2223
.text
2324
.pushsection .hyp.text, "ax"
@@ -47,6 +48,16 @@
4748
ldp x29, lr, [\ctxt, #CPU_XREG_OFFSET(29)]
4849
.endm
4950

51+
.macro save_sp_el0 ctxt, tmp
52+
mrs \tmp, sp_el0
53+
str \tmp, [\ctxt, #CPU_SP_EL0_OFFSET]
54+
.endm
55+
56+
.macro restore_sp_el0 ctxt, tmp
57+
ldr \tmp, [\ctxt, #CPU_SP_EL0_OFFSET]
58+
msr sp_el0, \tmp
59+
.endm
60+
5061
/*
5162
* u64 __guest_enter(struct kvm_vcpu *vcpu,
5263
* struct kvm_cpu_context *host_ctxt);
@@ -60,6 +71,9 @@ SYM_FUNC_START(__guest_enter)
6071
// Store the host regs
6172
save_callee_saved_regs x1
6273

74+
// Save the host's sp_el0
75+
save_sp_el0 x1, x2
76+
6377
// Now the host state is stored if we have a pending RAS SError it must
6478
// affect the host. If any asynchronous exception is pending we defer
6579
// the guest entry. The DSB isn't necessary before v8.2 as any SError
@@ -83,6 +97,9 @@ alternative_else_nop_endif
8397
// when this feature is enabled for kernel code.
8498
ptrauth_switch_to_guest x29, x0, x1, x2
8599

100+
// Restore the guest's sp_el0
101+
restore_sp_el0 x29, x0
102+
86103
// Restore guest regs x0-x17
87104
ldp x0, x1, [x29, #CPU_XREG_OFFSET(0)]
88105
ldp x2, x3, [x29, #CPU_XREG_OFFSET(2)]
@@ -130,6 +147,9 @@ SYM_INNER_LABEL(__guest_exit, SYM_L_GLOBAL)
130147
// Store the guest regs x18-x29, lr
131148
save_callee_saved_regs x1
132149

150+
// Store the guest's sp_el0
151+
save_sp_el0 x1, x2
152+
133153
get_host_ctxt x2, x3
134154

135155
// Macro ptrauth_switch_to_guest format:
@@ -139,6 +159,9 @@ SYM_INNER_LABEL(__guest_exit, SYM_L_GLOBAL)
139159
// when this feature is enabled for kernel code.
140160
ptrauth_switch_to_host x1, x2, x3, x4, x5
141161

162+
// Restore the hosts's sp_el0
163+
restore_sp_el0 x2, x3
164+
142165
// Now restore the host regs
143166
restore_callee_saved_regs x2
144167

arch/arm64/kvm/hyp/hyp-entry.S

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -198,7 +198,6 @@ SYM_CODE_END(__hyp_panic)
198198
.macro invalid_vector label, target = __hyp_panic
199199
.align 2
200200
SYM_CODE_START(\label)
201-
\label:
202201
b \target
203202
SYM_CODE_END(\label)
204203
.endm

arch/arm64/kvm/hyp/sysreg-sr.c

Lines changed: 3 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -15,8 +15,9 @@
1515
/*
1616
* Non-VHE: Both host and guest must save everything.
1717
*
18-
* VHE: Host and guest must save mdscr_el1 and sp_el0 (and the PC and pstate,
19-
* which are handled as part of the el2 return state) on every switch.
18+
* VHE: Host and guest must save mdscr_el1 and sp_el0 (and the PC and
19+
* pstate, which are handled as part of the el2 return state) on every
20+
* switch (sp_el0 is being dealt with in the assembly code).
2021
* tpidr_el0 and tpidrro_el0 only need to be switched when going
2122
* to host userspace or a different VCPU. EL1 registers only need to be
2223
* switched when potentially going to run a different VCPU. The latter two
@@ -26,12 +27,6 @@
2627
static void __hyp_text __sysreg_save_common_state(struct kvm_cpu_context *ctxt)
2728
{
2829
ctxt->sys_regs[MDSCR_EL1] = read_sysreg(mdscr_el1);
29-
30-
/*
31-
* The host arm64 Linux uses sp_el0 to point to 'current' and it must
32-
* therefore be saved/restored on every entry/exit to/from the guest.
33-
*/
34-
ctxt->gp_regs.regs.sp = read_sysreg(sp_el0);
3530
}
3631

3732
static void __hyp_text __sysreg_save_user_state(struct kvm_cpu_context *ctxt)
@@ -99,12 +94,6 @@ NOKPROBE_SYMBOL(sysreg_save_guest_state_vhe);
9994
static void __hyp_text __sysreg_restore_common_state(struct kvm_cpu_context *ctxt)
10095
{
10196
write_sysreg(ctxt->sys_regs[MDSCR_EL1], mdscr_el1);
102-
103-
/*
104-
* The host arm64 Linux uses sp_el0 to point to 'current' and it must
105-
* therefore be saved/restored on every entry/exit to/from the guest.
106-
*/
107-
write_sysreg(ctxt->gp_regs.regs.sp, sp_el0);
10897
}
10998

11099
static void __hyp_text __sysreg_restore_user_state(struct kvm_cpu_context *ctxt)

virt/kvm/arm/hyp/aarch32.c

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -125,12 +125,16 @@ static void __hyp_text kvm_adjust_itstate(struct kvm_vcpu *vcpu)
125125
*/
126126
void __hyp_text kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr)
127127
{
128+
u32 pc = *vcpu_pc(vcpu);
128129
bool is_thumb;
129130

130131
is_thumb = !!(*vcpu_cpsr(vcpu) & PSR_AA32_T_BIT);
131132
if (is_thumb && !is_wide_instr)
132-
*vcpu_pc(vcpu) += 2;
133+
pc += 2;
133134
else
134-
*vcpu_pc(vcpu) += 4;
135+
pc += 4;
136+
137+
*vcpu_pc(vcpu) = pc;
138+
135139
kvm_adjust_itstate(vcpu);
136140
}

virt/kvm/arm/vgic/vgic-init.c

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -294,8 +294,15 @@ int vgic_init(struct kvm *kvm)
294294
}
295295
}
296296

297-
if (vgic_has_its(kvm)) {
297+
if (vgic_has_its(kvm))
298298
vgic_lpi_translation_cache_init(kvm);
299+
300+
/*
301+
* If we have GICv4.1 enabled, unconditionnaly request enable the
302+
* v4 support so that we get HW-accelerated vSGIs. Otherwise, only
303+
* enable it if we present a virtual ITS to the guest.
304+
*/
305+
if (vgic_supports_direct_msis(kvm)) {
299306
ret = vgic_v4_init(kvm);
300307
if (ret)
301308
goto out;

virt/kvm/arm/vgic/vgic-mmio-v3.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,8 @@ bool vgic_has_its(struct kvm *kvm)
5050

5151
bool vgic_supports_direct_msis(struct kvm *kvm)
5252
{
53-
return kvm_vgic_global_state.has_gicv4 && vgic_has_its(kvm);
53+
return (kvm_vgic_global_state.has_gicv4_1 ||
54+
(kvm_vgic_global_state.has_gicv4 && vgic_has_its(kvm)));
5455
}
5556

5657
/*

0 commit comments

Comments
 (0)