Skip to content

Commit acf2ab2

Browse files
author
Marc Zyngier
committed
Merge branch kvm-arm64/vgic-sre-traps into kvmarm-master/next
* kvm-arm64/vgic-sre-traps: : . : Fix the multiple of cases where KVM/arm64 doesn't correctly : handle the guest trying to use a GICv3 that isn't advertised. : : From the cover letter: : : "It recently appeared that, when running on a GICv3-equipped platform : (which is what non-ancient arm64 HW has), *not* configuring a GICv3 : for the guest could result in less than desirable outcomes. : : We have multiple issues to fix: : : - for registers that *always* trap (the SGI registers) or that *may* : trap (the SRE register), we need to check whether a GICv3 has been : instantiated before acting upon the trap. : : - for registers that only conditionally trap, we must actively trap : them even in the absence of a GICv3 being instantiated, and handle : those traps accordingly. : : - finally, ID registers must reflect the absence of a GICv3, so that : we are consistent. : : This series goes through all these requirements. The main complexity : here is to apply a GICv3 configuration on the host in the absence of a : GICv3 in the guest. This is pretty hackish, but I don't have a much : better solution so far. : : As part of making wider use of of the trap bits, we fully define the : trap routing as per the architecture, something that we eventually : need for NV anyway." : . KVM: arm64: selftests: Cope with lack of GICv3 in set_id_regs KVM: arm64: Add selftest checking how the absence of GICv3 is handled KVM: arm64: Unify UNDEF injection helpers KVM: arm64: Make most GICv3 accesses UNDEF if they trap KVM: arm64: Honor guest requested traps in GICv3 emulation KVM: arm64: Add trap routing information for ICH_HCR_EL2 KVM: arm64: Add ICH_HCR_EL2 to the vcpu state KVM: arm64: Zero ID_AA64PFR0_EL1.GIC when no GICv3 is presented to the guest KVM: arm64: Add helper for last ditch idreg adjustments KVM: arm64: Force GICv3 trap activation when no irqchip is configured on VHE KVM: arm64: Force SRE traps when SRE access is not enabled KVM: arm64: Move GICv3 trap configuration to kvm_calculate_traps() Signed-off-by: Marc Zyngier <[email protected]>
2 parents 091258a + 4641c7e commit acf2ab2

File tree

13 files changed

+522
-135
lines changed

13 files changed

+522
-135
lines changed

arch/arm64/include/asm/kvm_host.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -534,6 +534,8 @@ enum vcpu_sysreg {
534534
VNCR(CNTP_CVAL_EL0),
535535
VNCR(CNTP_CTL_EL0),
536536

537+
VNCR(ICH_HCR_EL2),
538+
537539
NR_SYS_REGS /* Nothing after this line! */
538540
};
539541

arch/arm64/kvm/arm.c

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -46,6 +46,8 @@
4646
#include <kvm/arm_pmu.h>
4747
#include <kvm/arm_psci.h>
4848

49+
#include "sys_regs.h"
50+
4951
static enum kvm_mode kvm_mode = KVM_MODE_DEFAULT;
5052

5153
enum kvm_wfx_trap_policy {
@@ -821,15 +823,13 @@ int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
821823
return ret;
822824
}
823825

824-
if (vcpu_has_nv(vcpu)) {
825-
ret = kvm_init_nv_sysregs(vcpu->kvm);
826-
if (ret)
827-
return ret;
828-
}
826+
ret = kvm_finalize_sys_regs(vcpu);
827+
if (ret)
828+
return ret;
829829

830830
/*
831-
* This needs to happen after NV has imposed its own restrictions on
832-
* the feature set
831+
* This needs to happen after any restriction has been applied
832+
* to the feature set.
833833
*/
834834
kvm_calculate_traps(vcpu);
835835

arch/arm64/kvm/emulate-nested.c

Lines changed: 66 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -86,12 +86,17 @@ enum cgt_group_id {
8686
CGT_HCRX_EnFPM,
8787
CGT_HCRX_TCR2En,
8888

89+
CGT_ICH_HCR_TC,
90+
CGT_ICH_HCR_TALL0,
91+
CGT_ICH_HCR_TALL1,
92+
CGT_ICH_HCR_TDIR,
93+
8994
/*
9095
* Anything after this point is a combination of coarse trap
9196
* controls, which must all be evaluated to decide what to do.
9297
*/
9398
__MULTIPLE_CONTROL_BITS__,
94-
CGT_HCR_IMO_FMO = __MULTIPLE_CONTROL_BITS__,
99+
CGT_HCR_IMO_FMO_ICH_HCR_TC = __MULTIPLE_CONTROL_BITS__,
95100
CGT_HCR_TID2_TID4,
96101
CGT_HCR_TTLB_TTLBIS,
97102
CGT_HCR_TTLB_TTLBOS,
@@ -106,6 +111,8 @@ enum cgt_group_id {
106111
CGT_MDCR_TDE_TDRA,
107112
CGT_MDCR_TDCC_TDE_TDA,
108113

114+
CGT_ICH_HCR_TC_TDIR,
115+
109116
/*
110117
* Anything after this point requires a callback evaluating a
111118
* complex trap condition. Ugly stuff.
@@ -385,6 +392,30 @@ static const struct trap_bits coarse_trap_bits[] = {
385392
.mask = HCRX_EL2_TCR2En,
386393
.behaviour = BEHAVE_FORWARD_ANY,
387394
},
395+
[CGT_ICH_HCR_TC] = {
396+
.index = ICH_HCR_EL2,
397+
.value = ICH_HCR_TC,
398+
.mask = ICH_HCR_TC,
399+
.behaviour = BEHAVE_FORWARD_ANY,
400+
},
401+
[CGT_ICH_HCR_TALL0] = {
402+
.index = ICH_HCR_EL2,
403+
.value = ICH_HCR_TALL0,
404+
.mask = ICH_HCR_TALL0,
405+
.behaviour = BEHAVE_FORWARD_ANY,
406+
},
407+
[CGT_ICH_HCR_TALL1] = {
408+
.index = ICH_HCR_EL2,
409+
.value = ICH_HCR_TALL1,
410+
.mask = ICH_HCR_TALL1,
411+
.behaviour = BEHAVE_FORWARD_ANY,
412+
},
413+
[CGT_ICH_HCR_TDIR] = {
414+
.index = ICH_HCR_EL2,
415+
.value = ICH_HCR_TDIR,
416+
.mask = ICH_HCR_TDIR,
417+
.behaviour = BEHAVE_FORWARD_ANY,
418+
},
388419
};
389420

390421
#define MCB(id, ...) \
@@ -394,7 +425,6 @@ static const struct trap_bits coarse_trap_bits[] = {
394425
}
395426

396427
static const enum cgt_group_id *coarse_control_combo[] = {
397-
MCB(CGT_HCR_IMO_FMO, CGT_HCR_IMO, CGT_HCR_FMO),
398428
MCB(CGT_HCR_TID2_TID4, CGT_HCR_TID2, CGT_HCR_TID4),
399429
MCB(CGT_HCR_TTLB_TTLBIS, CGT_HCR_TTLB, CGT_HCR_TTLBIS),
400430
MCB(CGT_HCR_TTLB_TTLBOS, CGT_HCR_TTLB, CGT_HCR_TTLBOS),
@@ -409,6 +439,9 @@ static const enum cgt_group_id *coarse_control_combo[] = {
409439
MCB(CGT_MDCR_TDE_TDOSA, CGT_MDCR_TDE, CGT_MDCR_TDOSA),
410440
MCB(CGT_MDCR_TDE_TDRA, CGT_MDCR_TDE, CGT_MDCR_TDRA),
411441
MCB(CGT_MDCR_TDCC_TDE_TDA, CGT_MDCR_TDCC, CGT_MDCR_TDE, CGT_MDCR_TDA),
442+
443+
MCB(CGT_HCR_IMO_FMO_ICH_HCR_TC, CGT_HCR_IMO, CGT_HCR_FMO, CGT_ICH_HCR_TC),
444+
MCB(CGT_ICH_HCR_TC_TDIR, CGT_ICH_HCR_TC, CGT_ICH_HCR_TDIR),
412445
};
413446

414447
typedef enum trap_behaviour (*complex_condition_check)(struct kvm_vcpu *);
@@ -543,9 +576,9 @@ static const struct encoding_to_trap_config encoding_to_cgt[] __initconst = {
543576
SR_TRAP(SYS_CSSELR_EL1, CGT_HCR_TID2_TID4),
544577
SR_RANGE_TRAP(SYS_ID_PFR0_EL1,
545578
sys_reg(3, 0, 0, 7, 7), CGT_HCR_TID3),
546-
SR_TRAP(SYS_ICC_SGI0R_EL1, CGT_HCR_IMO_FMO),
547-
SR_TRAP(SYS_ICC_ASGI1R_EL1, CGT_HCR_IMO_FMO),
548-
SR_TRAP(SYS_ICC_SGI1R_EL1, CGT_HCR_IMO_FMO),
579+
SR_TRAP(SYS_ICC_SGI0R_EL1, CGT_HCR_IMO_FMO_ICH_HCR_TC),
580+
SR_TRAP(SYS_ICC_ASGI1R_EL1, CGT_HCR_IMO_FMO_ICH_HCR_TC),
581+
SR_TRAP(SYS_ICC_SGI1R_EL1, CGT_HCR_IMO_FMO_ICH_HCR_TC),
549582
SR_RANGE_TRAP(sys_reg(3, 0, 11, 0, 0),
550583
sys_reg(3, 0, 11, 15, 7), CGT_HCR_TIDCP),
551584
SR_RANGE_TRAP(sys_reg(3, 1, 11, 0, 0),
@@ -1116,6 +1149,34 @@ static const struct encoding_to_trap_config encoding_to_cgt[] __initconst = {
11161149
SR_TRAP(SYS_CNTPCT_EL0, CGT_CNTHCTL_EL1PCTEN),
11171150
SR_TRAP(SYS_CNTPCTSS_EL0, CGT_CNTHCTL_EL1PCTEN),
11181151
SR_TRAP(SYS_FPMR, CGT_HCRX_EnFPM),
1152+
/*
1153+
* IMPDEF choice:
1154+
* We treat ICC_SRE_EL2.{SRE,Enable) and ICV_SRE_EL1.SRE as
1155+
* RAO/WI. We therefore never consider ICC_SRE_EL2.Enable for
1156+
* ICC_SRE_EL1 access, and always handle it locally.
1157+
*/
1158+
SR_TRAP(SYS_ICC_AP0R0_EL1, CGT_ICH_HCR_TALL0),
1159+
SR_TRAP(SYS_ICC_AP0R1_EL1, CGT_ICH_HCR_TALL0),
1160+
SR_TRAP(SYS_ICC_AP0R2_EL1, CGT_ICH_HCR_TALL0),
1161+
SR_TRAP(SYS_ICC_AP0R3_EL1, CGT_ICH_HCR_TALL0),
1162+
SR_TRAP(SYS_ICC_AP1R0_EL1, CGT_ICH_HCR_TALL1),
1163+
SR_TRAP(SYS_ICC_AP1R1_EL1, CGT_ICH_HCR_TALL1),
1164+
SR_TRAP(SYS_ICC_AP1R2_EL1, CGT_ICH_HCR_TALL1),
1165+
SR_TRAP(SYS_ICC_AP1R3_EL1, CGT_ICH_HCR_TALL1),
1166+
SR_TRAP(SYS_ICC_BPR0_EL1, CGT_ICH_HCR_TALL0),
1167+
SR_TRAP(SYS_ICC_BPR1_EL1, CGT_ICH_HCR_TALL1),
1168+
SR_TRAP(SYS_ICC_CTLR_EL1, CGT_ICH_HCR_TC),
1169+
SR_TRAP(SYS_ICC_DIR_EL1, CGT_ICH_HCR_TC_TDIR),
1170+
SR_TRAP(SYS_ICC_EOIR0_EL1, CGT_ICH_HCR_TALL0),
1171+
SR_TRAP(SYS_ICC_EOIR1_EL1, CGT_ICH_HCR_TALL1),
1172+
SR_TRAP(SYS_ICC_HPPIR0_EL1, CGT_ICH_HCR_TALL0),
1173+
SR_TRAP(SYS_ICC_HPPIR1_EL1, CGT_ICH_HCR_TALL1),
1174+
SR_TRAP(SYS_ICC_IAR0_EL1, CGT_ICH_HCR_TALL0),
1175+
SR_TRAP(SYS_ICC_IAR1_EL1, CGT_ICH_HCR_TALL1),
1176+
SR_TRAP(SYS_ICC_IGRPEN0_EL1, CGT_ICH_HCR_TALL0),
1177+
SR_TRAP(SYS_ICC_IGRPEN1_EL1, CGT_ICH_HCR_TALL1),
1178+
SR_TRAP(SYS_ICC_PMR_EL1, CGT_ICH_HCR_TC),
1179+
SR_TRAP(SYS_ICC_RPR_EL1, CGT_ICH_HCR_TC),
11191180
};
11201181

11211182
static DEFINE_XARRAY(sr_forward_xa);

arch/arm64/kvm/hyp/vgic-v3-sr.c

Lines changed: 91 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -268,8 +268,16 @@ void __vgic_v3_activate_traps(struct vgic_v3_cpu_if *cpu_if)
268268
* starting to mess with the rest of the GIC, and VMCR_EL2 in
269269
* particular. This logic must be called before
270270
* __vgic_v3_restore_state().
271+
*
272+
* However, if the vgic is disabled (ICH_HCR_EL2.EN==0), no GIC is
273+
* provisioned at all. In order to prevent illegal accesses to the
274+
* system registers to trap to EL1 (duh), force ICC_SRE_EL1.SRE to 1
275+
* so that the trap bits can take effect. Yes, we *loves* the GIC.
271276
*/
272-
if (!cpu_if->vgic_sre) {
277+
if (!(cpu_if->vgic_hcr & ICH_HCR_EN)) {
278+
write_gicreg(ICC_SRE_EL1_SRE, ICC_SRE_EL1);
279+
isb();
280+
} else if (!cpu_if->vgic_sre) {
273281
write_gicreg(0, ICC_SRE_EL1);
274282
isb();
275283
write_gicreg(cpu_if->vgic_vmcr, ICH_VMCR_EL2);
@@ -288,19 +296,21 @@ void __vgic_v3_activate_traps(struct vgic_v3_cpu_if *cpu_if)
288296
}
289297

290298
/*
291-
* Prevent the guest from touching the GIC system registers if
292-
* SRE isn't enabled for GICv3 emulation.
299+
* Prevent the guest from touching the ICC_SRE_EL1 system
300+
* register. Note that this may not have any effect, as
301+
* ICC_SRE_EL2.Enable being RAO/WI is a valid implementation.
293302
*/
294303
write_gicreg(read_gicreg(ICC_SRE_EL2) & ~ICC_SRE_EL2_ENABLE,
295304
ICC_SRE_EL2);
296305

297306
/*
298307
* If we need to trap system registers, we must write
299308
* ICH_HCR_EL2 anyway, even if no interrupts are being
300-
* injected,
309+
* injected. Note that this also applies if we don't expect
310+
* any system register access (no vgic at all).
301311
*/
302312
if (static_branch_unlikely(&vgic_v3_cpuif_trap) ||
303-
cpu_if->its_vpe.its_vm)
313+
cpu_if->its_vpe.its_vm || !cpu_if->vgic_sre)
304314
write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2);
305315
}
306316

@@ -326,7 +336,7 @@ void __vgic_v3_deactivate_traps(struct vgic_v3_cpu_if *cpu_if)
326336
* no interrupts were being injected, and we disable it again here.
327337
*/
328338
if (static_branch_unlikely(&vgic_v3_cpuif_trap) ||
329-
cpu_if->its_vpe.its_vm)
339+
cpu_if->its_vpe.its_vm || !cpu_if->vgic_sre)
330340
write_gicreg(0, ICH_HCR_EL2);
331341
}
332342

@@ -1032,6 +1042,75 @@ static void __vgic_v3_write_ctlr(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
10321042
write_gicreg(vmcr, ICH_VMCR_EL2);
10331043
}
10341044

1045+
static bool __vgic_v3_check_trap_forwarding(struct kvm_vcpu *vcpu,
1046+
u32 sysreg, bool is_read)
1047+
{
1048+
u64 ich_hcr;
1049+
1050+
if (!vcpu_has_nv(vcpu) || is_hyp_ctxt(vcpu))
1051+
return false;
1052+
1053+
ich_hcr = __vcpu_sys_reg(vcpu, ICH_HCR_EL2);
1054+
1055+
switch (sysreg) {
1056+
case SYS_ICC_IGRPEN0_EL1:
1057+
if (is_read &&
1058+
(__vcpu_sys_reg(vcpu, HFGRTR_EL2) & HFGxTR_EL2_ICC_IGRPENn_EL1))
1059+
return true;
1060+
1061+
if (!is_read &&
1062+
(__vcpu_sys_reg(vcpu, HFGWTR_EL2) & HFGxTR_EL2_ICC_IGRPENn_EL1))
1063+
return true;
1064+
1065+
fallthrough;
1066+
1067+
case SYS_ICC_AP0Rn_EL1(0):
1068+
case SYS_ICC_AP0Rn_EL1(1):
1069+
case SYS_ICC_AP0Rn_EL1(2):
1070+
case SYS_ICC_AP0Rn_EL1(3):
1071+
case SYS_ICC_BPR0_EL1:
1072+
case SYS_ICC_EOIR0_EL1:
1073+
case SYS_ICC_HPPIR0_EL1:
1074+
case SYS_ICC_IAR0_EL1:
1075+
return ich_hcr & ICH_HCR_TALL0;
1076+
1077+
case SYS_ICC_IGRPEN1_EL1:
1078+
if (is_read &&
1079+
(__vcpu_sys_reg(vcpu, HFGRTR_EL2) & HFGxTR_EL2_ICC_IGRPENn_EL1))
1080+
return true;
1081+
1082+
if (!is_read &&
1083+
(__vcpu_sys_reg(vcpu, HFGWTR_EL2) & HFGxTR_EL2_ICC_IGRPENn_EL1))
1084+
return true;
1085+
1086+
fallthrough;
1087+
1088+
case SYS_ICC_AP1Rn_EL1(0):
1089+
case SYS_ICC_AP1Rn_EL1(1):
1090+
case SYS_ICC_AP1Rn_EL1(2):
1091+
case SYS_ICC_AP1Rn_EL1(3):
1092+
case SYS_ICC_BPR1_EL1:
1093+
case SYS_ICC_EOIR1_EL1:
1094+
case SYS_ICC_HPPIR1_EL1:
1095+
case SYS_ICC_IAR1_EL1:
1096+
return ich_hcr & ICH_HCR_TALL1;
1097+
1098+
case SYS_ICC_DIR_EL1:
1099+
if (ich_hcr & ICH_HCR_TDIR)
1100+
return true;
1101+
1102+
fallthrough;
1103+
1104+
case SYS_ICC_RPR_EL1:
1105+
case SYS_ICC_CTLR_EL1:
1106+
case SYS_ICC_PMR_EL1:
1107+
return ich_hcr & ICH_HCR_TC;
1108+
1109+
default:
1110+
return false;
1111+
}
1112+
}
1113+
10351114
int __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu)
10361115
{
10371116
int rt;
@@ -1041,6 +1120,9 @@ int __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu)
10411120
bool is_read;
10421121
u32 sysreg;
10431122

1123+
if (kern_hyp_va(vcpu->kvm)->arch.vgic.vgic_model != KVM_DEV_TYPE_ARM_VGIC_V3)
1124+
return 0;
1125+
10441126
esr = kvm_vcpu_get_esr(vcpu);
10451127
if (vcpu_mode_is_32bit(vcpu)) {
10461128
if (!kvm_condition_valid(vcpu)) {
@@ -1055,6 +1137,9 @@ int __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu)
10551137

10561138
is_read = (esr & ESR_ELx_SYS64_ISS_DIR_MASK) == ESR_ELx_SYS64_ISS_DIR_READ;
10571139

1140+
if (__vgic_v3_check_trap_forwarding(vcpu, sysreg, is_read))
1141+
return 0;
1142+
10581143
switch (sysreg) {
10591144
case SYS_ICC_IAR0_EL1:
10601145
case SYS_ICC_IAR1_EL1:

arch/arm64/kvm/nested.c

Lines changed: 5 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -954,19 +954,16 @@ static void set_sysreg_masks(struct kvm *kvm, int sr, u64 res0, u64 res1)
954954
int kvm_init_nv_sysregs(struct kvm *kvm)
955955
{
956956
u64 res0, res1;
957-
int ret = 0;
958957

959-
mutex_lock(&kvm->arch.config_lock);
958+
lockdep_assert_held(&kvm->arch.config_lock);
960959

961960
if (kvm->arch.sysreg_masks)
962-
goto out;
961+
return 0;
963962

964963
kvm->arch.sysreg_masks = kzalloc(sizeof(*(kvm->arch.sysreg_masks)),
965964
GFP_KERNEL_ACCOUNT);
966-
if (!kvm->arch.sysreg_masks) {
967-
ret = -ENOMEM;
968-
goto out;
969-
}
965+
if (!kvm->arch.sysreg_masks)
966+
return -ENOMEM;
970967

971968
limit_nv_id_regs(kvm);
972969

@@ -1195,8 +1192,6 @@ int kvm_init_nv_sysregs(struct kvm *kvm)
11951192
if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, AMU, V1P1))
11961193
res0 |= ~(res0 | res1);
11971194
set_sysreg_masks(kvm, HAFGRTR_EL2, res0, res1);
1198-
out:
1199-
mutex_unlock(&kvm->arch.config_lock);
12001195

1201-
return ret;
1196+
return 0;
12021197
}

0 commit comments

Comments
 (0)