Skip to content

Commit 3318e42

Browse files
committed
Merge branch 'kvm-arm64/doublefault2' into kvmarm/next
* kvm-arm64/doublefault2: (33 commits) : NV Support for FEAT_RAS + DoubleFault2 : : Delegate the vSError context to the guest hypervisor when in a nested : state, including registers related to ESR propagation. Additionally, : catch up KVM's external abort infrastructure to the architecture, : implementing the effects of FEAT_DoubleFault2. : : This has some impact on non-nested guests, as SErrors deemed unmasked at : the time they're made pending are now immediately injected with an : emulated exception entry rather than using the VSE bit. KVM: arm64: Make RAS registers UNDEF when RAS isn't advertised KVM: arm64: Filter out HCR_EL2 bits when running in hypervisor context KVM: arm64: Check for SYSREGS_ON_CPU before accessing the CPU state KVM: arm64: Commit exceptions from KVM_SET_VCPU_EVENTS immediately KVM: arm64: selftests: Test ESR propagation for vSError injection KVM: arm64: Populate ESR_ELx.EC for emulated SError injection KVM: arm64: selftests: Catch up set_id_regs with the kernel KVM: arm64: selftests: Add SCTLR2_EL1 to get-reg-list KVM: arm64: selftests: Test SEAs are taken to SError vector when EASE=1 KVM: arm64: selftests: Add basic SError injection test KVM: arm64: Don't retire MMIO instruction w/ pending (emulated) SError KVM: arm64: Advertise support for FEAT_DoubleFault2 KVM: arm64: Advertise support for FEAT_SCTLR2 KVM: arm64: nv: Enable vSErrors when HCRX_EL2.TMEA is set KVM: arm64: nv: Honor SError routing effects of SCTLR2_ELx.NMEA KVM: arm64: nv: Take "masked" aborts to EL2 when HCRX_EL2.TMEA is set KVM: arm64: Route SEAs to the SError vector when EASE is set KVM: arm64: nv: Ensure Address size faults affect correct ESR KVM: arm64: Factor out helper for selecting exception target EL KVM: arm64: Describe SCTLR2_ELx RESx masks ... Signed-off-by: Oliver Upton <[email protected]>
2 parents c535d13 + d9c5c23 commit 3318e42

30 files changed

+983
-333
lines changed

arch/arm64/include/asm/kvm_emulate.h

Lines changed: 48 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -45,16 +45,39 @@ bool kvm_condition_valid32(const struct kvm_vcpu *vcpu);
4545
void kvm_skip_instr32(struct kvm_vcpu *vcpu);
4646

4747
void kvm_inject_undefined(struct kvm_vcpu *vcpu);
48-
void kvm_inject_vabt(struct kvm_vcpu *vcpu);
49-
void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
50-
void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
48+
int kvm_inject_serror_esr(struct kvm_vcpu *vcpu, u64 esr);
49+
int kvm_inject_sea(struct kvm_vcpu *vcpu, bool iabt, u64 addr);
5150
void kvm_inject_size_fault(struct kvm_vcpu *vcpu);
5251

52+
static inline int kvm_inject_sea_dabt(struct kvm_vcpu *vcpu, u64 addr)
53+
{
54+
return kvm_inject_sea(vcpu, false, addr);
55+
}
56+
57+
static inline int kvm_inject_sea_iabt(struct kvm_vcpu *vcpu, u64 addr)
58+
{
59+
return kvm_inject_sea(vcpu, true, addr);
60+
}
61+
62+
static inline int kvm_inject_serror(struct kvm_vcpu *vcpu)
63+
{
64+
/*
65+
* ESR_ELx.ISV (later renamed to IDS) indicates whether or not
66+
* ESR_ELx.ISS contains IMPLEMENTATION DEFINED syndrome information.
67+
*
68+
* Set the bit when injecting an SError w/o an ESR to indicate ISS
69+
* does not follow the architected format.
70+
*/
71+
return kvm_inject_serror_esr(vcpu, ESR_ELx_ISV);
72+
}
73+
5374
void kvm_vcpu_wfi(struct kvm_vcpu *vcpu);
5475

5576
void kvm_emulate_nested_eret(struct kvm_vcpu *vcpu);
5677
int kvm_inject_nested_sync(struct kvm_vcpu *vcpu, u64 esr_el2);
5778
int kvm_inject_nested_irq(struct kvm_vcpu *vcpu);
79+
int kvm_inject_nested_sea(struct kvm_vcpu *vcpu, bool iabt, u64 addr);
80+
int kvm_inject_nested_serror(struct kvm_vcpu *vcpu, u64 esr);
5881

5982
static inline void kvm_inject_nested_sve_trap(struct kvm_vcpu *vcpu)
6083
{
@@ -195,6 +218,11 @@ static inline bool vcpu_el2_tge_is_set(const struct kvm_vcpu *vcpu)
195218
return ctxt_sys_reg(&vcpu->arch.ctxt, HCR_EL2) & HCR_TGE;
196219
}
197220

221+
static inline bool vcpu_el2_amo_is_set(const struct kvm_vcpu *vcpu)
222+
{
223+
return ctxt_sys_reg(&vcpu->arch.ctxt, HCR_EL2) & HCR_AMO;
224+
}
225+
198226
static inline bool is_hyp_ctxt(const struct kvm_vcpu *vcpu)
199227
{
200228
bool e2h, tge;
@@ -224,6 +252,20 @@ static inline bool vcpu_is_host_el0(const struct kvm_vcpu *vcpu)
224252
return is_hyp_ctxt(vcpu) && !vcpu_is_el2(vcpu);
225253
}
226254

255+
static inline bool is_nested_ctxt(struct kvm_vcpu *vcpu)
256+
{
257+
return vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu);
258+
}
259+
260+
static inline bool vserror_state_is_nested(struct kvm_vcpu *vcpu)
261+
{
262+
if (!is_nested_ctxt(vcpu))
263+
return false;
264+
265+
return vcpu_el2_amo_is_set(vcpu) ||
266+
(__vcpu_sys_reg(vcpu, HCRX_EL2) & HCRX_EL2_TMEA);
267+
}
268+
227269
/*
228270
* The layout of SPSR for an AArch32 state is different when observed from an
229271
* AArch64 SPSR_ELx or an AArch32 SPSR_*. This function generates the AArch32
@@ -627,6 +669,9 @@ static inline void vcpu_set_hcrx(struct kvm_vcpu *vcpu)
627669

628670
if (kvm_has_fpmr(kvm))
629671
vcpu->arch.hcrx_el2 |= HCRX_EL2_EnFPM;
672+
673+
if (kvm_has_sctlr2(kvm))
674+
vcpu->arch.hcrx_el2 |= HCRX_EL2_SCTLR2En;
630675
}
631676
}
632677
#endif /* __ARM64_KVM_EMULATE_H__ */

arch/arm64/include/asm/kvm_host.h

Lines changed: 33 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -523,6 +523,7 @@ enum vcpu_sysreg {
523523
/* Anything from this can be RES0/RES1 sanitised */
524524
MARKER(__SANITISED_REG_START__),
525525
TCR2_EL2, /* Extended Translation Control Register (EL2) */
526+
SCTLR2_EL2, /* System Control Register 2 (EL2) */
526527
MDCR_EL2, /* Monitor Debug Configuration Register (EL2) */
527528
CNTHCTL_EL2, /* Counter-timer Hypervisor Control register */
528529

@@ -537,6 +538,7 @@ enum vcpu_sysreg {
537538
VNCR(TTBR1_EL1),/* Translation Table Base Register 1 */
538539
VNCR(TCR_EL1), /* Translation Control Register */
539540
VNCR(TCR2_EL1), /* Extended Translation Control Register */
541+
VNCR(SCTLR2_EL1), /* System Control Register 2 */
540542
VNCR(ESR_EL1), /* Exception Syndrome Register */
541543
VNCR(AFSR0_EL1),/* Auxiliary Fault Status Register 0 */
542544
VNCR(AFSR1_EL1),/* Auxiliary Fault Status Register 1 */
@@ -565,6 +567,10 @@ enum vcpu_sysreg {
565567

566568
VNCR(POR_EL1), /* Permission Overlay Register 1 (EL1) */
567569

570+
/* FEAT_RAS registers */
571+
VNCR(VDISR_EL2),
572+
VNCR(VSESR_EL2),
573+
568574
VNCR(HFGRTR_EL2),
569575
VNCR(HFGWTR_EL2),
570576
VNCR(HFGITR_EL2),
@@ -817,7 +823,7 @@ struct kvm_vcpu_arch {
817823
u8 iflags;
818824

819825
/* State flags for kernel bookkeeping, unused by the hypervisor code */
820-
u8 sflags;
826+
u16 sflags;
821827

822828
/*
823829
* Don't run the guest (internal implementation need).
@@ -953,9 +959,21 @@ struct kvm_vcpu_arch {
953959
__vcpu_flags_preempt_enable(); \
954960
} while (0)
955961

962+
#define __vcpu_test_and_clear_flag(v, flagset, f, m) \
963+
({ \
964+
typeof(v->arch.flagset) set; \
965+
\
966+
set = __vcpu_get_flag(v, flagset, f, m); \
967+
__vcpu_clear_flag(v, flagset, f, m); \
968+
\
969+
set; \
970+
})
971+
956972
#define vcpu_get_flag(v, ...) __vcpu_get_flag((v), __VA_ARGS__)
957973
#define vcpu_set_flag(v, ...) __vcpu_set_flag((v), __VA_ARGS__)
958974
#define vcpu_clear_flag(v, ...) __vcpu_clear_flag((v), __VA_ARGS__)
975+
#define vcpu_test_and_clear_flag(v, ...) \
976+
__vcpu_test_and_clear_flag((v), __VA_ARGS__)
959977

960978
/* KVM_ARM_VCPU_INIT completed */
961979
#define VCPU_INITIALIZED __vcpu_single_flag(cflags, BIT(0))
@@ -1015,6 +1033,8 @@ struct kvm_vcpu_arch {
10151033
#define IN_WFI __vcpu_single_flag(sflags, BIT(6))
10161034
/* KVM is currently emulating a nested ERET */
10171035
#define IN_NESTED_ERET __vcpu_single_flag(sflags, BIT(7))
1036+
/* SError pending for nested guest */
1037+
#define NESTED_SERROR_PENDING __vcpu_single_flag(sflags, BIT(8))
10181038

10191039

10201040
/* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */
@@ -1149,6 +1169,8 @@ static inline bool __vcpu_read_sys_reg_from_cpu(int reg, u64 *val)
11491169
* System registers listed in the switch are not saved on every
11501170
* exit from the guest but are only saved on vcpu_put.
11511171
*
1172+
* SYSREGS_ON_CPU *MUST* be checked before using this helper.
1173+
*
11521174
* Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but
11531175
* should never be listed below, because the guest cannot modify its
11541176
* own MPIDR_EL1 and MPIDR_EL1 is accessed for VCPU A from VCPU B's
@@ -1186,6 +1208,7 @@ static inline bool __vcpu_read_sys_reg_from_cpu(int reg, u64 *val)
11861208
case IFSR32_EL2: *val = read_sysreg_s(SYS_IFSR32_EL2); break;
11871209
case DBGVCR32_EL2: *val = read_sysreg_s(SYS_DBGVCR32_EL2); break;
11881210
case ZCR_EL1: *val = read_sysreg_s(SYS_ZCR_EL12); break;
1211+
case SCTLR2_EL1: *val = read_sysreg_s(SYS_SCTLR2_EL12); break;
11891212
default: return false;
11901213
}
11911214

@@ -1200,6 +1223,8 @@ static inline bool __vcpu_write_sys_reg_to_cpu(u64 val, int reg)
12001223
* System registers listed in the switch are not restored on every
12011224
* entry to the guest but are only restored on vcpu_load.
12021225
*
1226+
* SYSREGS_ON_CPU *MUST* be checked before using this helper.
1227+
*
12031228
* Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but
12041229
* should never be listed below, because the MPIDR should only be set
12051230
* once, before running the VCPU, and never changed later.
@@ -1236,6 +1261,7 @@ static inline bool __vcpu_write_sys_reg_to_cpu(u64 val, int reg)
12361261
case IFSR32_EL2: write_sysreg_s(val, SYS_IFSR32_EL2); break;
12371262
case DBGVCR32_EL2: write_sysreg_s(val, SYS_DBGVCR32_EL2); break;
12381263
case ZCR_EL1: write_sysreg_s(val, SYS_ZCR_EL12); break;
1264+
case SCTLR2_EL1: write_sysreg_s(val, SYS_SCTLR2_EL12); break;
12391265
default: return false;
12401266
}
12411267

@@ -1387,8 +1413,6 @@ static inline bool kvm_arm_is_pvtime_enabled(struct kvm_vcpu_arch *vcpu_arch)
13871413
return (vcpu_arch->steal.base != INVALID_GPA);
13881414
}
13891415

1390-
void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 syndrome);
1391-
13921416
struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
13931417

13941418
DECLARE_KVM_HYP_PER_CPU(struct kvm_host_data, kvm_host_data);
@@ -1666,6 +1690,12 @@ void kvm_set_vm_id_reg(struct kvm *kvm, u32 reg, u64 val);
16661690
#define kvm_has_s1poe(k) \
16671691
(kvm_has_feat((k), ID_AA64MMFR3_EL1, S1POE, IMP))
16681692

1693+
#define kvm_has_ras(k) \
1694+
(kvm_has_feat((k), ID_AA64PFR0_EL1, RAS, IMP))
1695+
1696+
#define kvm_has_sctlr2(k) \
1697+
(kvm_has_feat((k), ID_AA64MMFR3_EL1, SCTLRX, IMP))
1698+
16691699
static inline bool kvm_arch_has_irq_bypass(void)
16701700
{
16711701
return true;

arch/arm64/include/asm/kvm_nested.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -80,6 +80,8 @@ extern void kvm_vcpu_load_hw_mmu(struct kvm_vcpu *vcpu);
8080
extern void kvm_vcpu_put_hw_mmu(struct kvm_vcpu *vcpu);
8181

8282
extern void check_nested_vcpu_requests(struct kvm_vcpu *vcpu);
83+
extern void kvm_nested_flush_hwstate(struct kvm_vcpu *vcpu);
84+
extern void kvm_nested_sync_hwstate(struct kvm_vcpu *vcpu);
8385

8486
struct kvm_s2_trans {
8587
phys_addr_t output;

arch/arm64/include/asm/vncr_mapping.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -51,6 +51,7 @@
5151
#define VNCR_SP_EL1 0x240
5252
#define VNCR_VBAR_EL1 0x250
5353
#define VNCR_TCR2_EL1 0x270
54+
#define VNCR_SCTLR2_EL1 0x278
5455
#define VNCR_PIRE0_EL1 0x290
5556
#define VNCR_PIR_EL1 0x2A0
5657
#define VNCR_POR_EL1 0x2A8
@@ -84,6 +85,7 @@
8485
#define VNCR_ICH_HCR_EL2 0x4C0
8586
#define VNCR_ICH_VMCR_EL2 0x4C8
8687
#define VNCR_VDISR_EL2 0x500
88+
#define VNCR_VSESR_EL2 0x508
8789
#define VNCR_PMBLIMITR_EL1 0x800
8890
#define VNCR_PMBPTR_EL1 0x810
8991
#define VNCR_PMBSR_EL1 0x820

arch/arm64/kernel/cpufeature.c

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -303,6 +303,7 @@ static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
303303
};
304304

305305
static const struct arm64_ftr_bits ftr_id_aa64pfr1[] = {
306+
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_DF2_SHIFT, 4, 0),
306307
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_GCS),
307308
FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_GCS_SHIFT, 4, 0),
308309
S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_MTE_frac_SHIFT, 4, 0),
@@ -500,6 +501,7 @@ static const struct arm64_ftr_bits ftr_id_aa64mmfr3[] = {
500501
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_POE),
501502
FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR3_EL1_S1POE_SHIFT, 4, 0),
502503
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR3_EL1_S1PIE_SHIFT, 4, 0),
504+
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR3_EL1_SCTLRX_SHIFT, 4, 0),
503505
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR3_EL1_TCRX_SHIFT, 4, 0),
504506
ARM64_FTR_END,
505507
};
@@ -3061,6 +3063,13 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
30613063
.matches = has_pmuv3,
30623064
},
30633065
#endif
3066+
{
3067+
.desc = "SCTLR2",
3068+
.capability = ARM64_HAS_SCTLR2,
3069+
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
3070+
.matches = has_cpuid_feature,
3071+
ARM64_CPUID_FIELDS(ID_AA64MMFR3_EL1, SCTLRX, IMP)
3072+
},
30643073
{},
30653074
};
30663075

arch/arm64/kvm/arch_timer.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -830,7 +830,7 @@ static void timer_set_traps(struct kvm_vcpu *vcpu, struct timer_map *map)
830830
* by the guest (either FEAT_VHE or FEAT_E2H0 is implemented, but
831831
* not both). This simplifies the handling of the EL1NV* bits.
832832
*/
833-
if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) {
833+
if (is_nested_ctxt(vcpu)) {
834834
u64 val = __vcpu_sys_reg(vcpu, CNTHCTL_EL2);
835835

836836
/* Use the VHE format for mental sanity */

arch/arm64/kvm/arm.c

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -528,7 +528,7 @@ static void vcpu_set_pauth_traps(struct kvm_vcpu *vcpu)
528528
* Either we're running an L2 guest, and the API/APK bits come
529529
* from L1's HCR_EL2, or API/APK are both set.
530530
*/
531-
if (unlikely(vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu))) {
531+
if (unlikely(is_nested_ctxt(vcpu))) {
532532
u64 val;
533533

534534
val = __vcpu_sys_reg(vcpu, HCR_EL2);
@@ -747,7 +747,8 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
747747
*/
748748
int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
749749
{
750-
bool irq_lines = *vcpu_hcr(v) & (HCR_VI | HCR_VF);
750+
bool irq_lines = *vcpu_hcr(v) & (HCR_VI | HCR_VF | HCR_VSE);
751+
751752
return ((irq_lines || kvm_vgic_vcpu_pending_irq(v))
752753
&& !kvm_arm_vcpu_stopped(v) && !v->arch.pause);
753754
}
@@ -1194,6 +1195,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
11941195
*/
11951196
preempt_disable();
11961197

1198+
kvm_nested_flush_hwstate(vcpu);
1199+
11971200
if (kvm_vcpu_has_pmu(vcpu))
11981201
kvm_pmu_flush_hwstate(vcpu);
11991202

@@ -1293,6 +1296,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
12931296
/* Exit types that need handling before we can be preempted */
12941297
handle_exit_early(vcpu, ret);
12951298

1299+
kvm_nested_sync_hwstate(vcpu);
1300+
12961301
preempt_enable();
12971302

12981303
/*

arch/arm64/kvm/config.c

Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -131,6 +131,8 @@ struct reg_bits_to_feat_map {
131131
#define FEAT_SPMU ID_AA64DFR1_EL1, SPMU, IMP
132132
#define FEAT_SPE_nVM ID_AA64DFR2_EL1, SPE_nVM, IMP
133133
#define FEAT_STEP2 ID_AA64DFR2_EL1, STEP, IMP
134+
#define FEAT_SYSREG128 ID_AA64ISAR2_EL1, SYSREG_128, IMP
135+
#define FEAT_CPA2 ID_AA64ISAR3_EL1, CPA, CPA2
134136

135137
static bool not_feat_aa64el3(struct kvm *kvm)
136138
{
@@ -832,6 +834,23 @@ static const struct reg_bits_to_feat_map hcr_feat_map[] = {
832834
NEEDS_FEAT_FIXED(HCR_EL2_E2H, compute_hcr_e2h),
833835
};
834836

837+
static const struct reg_bits_to_feat_map sctlr2_feat_map[] = {
838+
NEEDS_FEAT(SCTLR2_EL1_NMEA |
839+
SCTLR2_EL1_EASE,
840+
FEAT_DoubleFault2),
841+
NEEDS_FEAT(SCTLR2_EL1_EnADERR, feat_aderr),
842+
NEEDS_FEAT(SCTLR2_EL1_EnANERR, feat_anerr),
843+
NEEDS_FEAT(SCTLR2_EL1_EnIDCP128, FEAT_SYSREG128),
844+
NEEDS_FEAT(SCTLR2_EL1_EnPACM |
845+
SCTLR2_EL1_EnPACM0,
846+
feat_pauth_lr),
847+
NEEDS_FEAT(SCTLR2_EL1_CPTA |
848+
SCTLR2_EL1_CPTA0 |
849+
SCTLR2_EL1_CPTM |
850+
SCTLR2_EL1_CPTM0,
851+
FEAT_CPA2),
852+
};
853+
835854
static void __init check_feat_map(const struct reg_bits_to_feat_map *map,
836855
int map_size, u64 res0, const char *str)
837856
{
@@ -863,6 +882,8 @@ void __init check_feature_map(void)
863882
__HCRX_EL2_RES0, "HCRX_EL2");
864883
check_feat_map(hcr_feat_map, ARRAY_SIZE(hcr_feat_map),
865884
HCR_EL2_RES0, "HCR_EL2");
885+
check_feat_map(sctlr2_feat_map, ARRAY_SIZE(sctlr2_feat_map),
886+
SCTLR2_EL1_RES0, "SCTLR2_EL1");
866887
}
867888

868889
static bool idreg_feat_match(struct kvm *kvm, const struct reg_bits_to_feat_map *map)
@@ -1077,6 +1098,13 @@ void get_reg_fixed_bits(struct kvm *kvm, enum vcpu_sysreg reg, u64 *res0, u64 *r
10771098
*res0 |= HCR_EL2_RES0 | (mask & ~fixed);
10781099
*res1 = HCR_EL2_RES1 | (mask & fixed);
10791100
break;
1101+
case SCTLR2_EL1:
1102+
case SCTLR2_EL2:
1103+
*res0 = compute_res0_bits(kvm, sctlr2_feat_map,
1104+
ARRAY_SIZE(sctlr2_feat_map), 0, 0);
1105+
*res0 |= SCTLR2_EL1_RES0;
1106+
*res1 = SCTLR2_EL1_RES1;
1107+
break;
10801108
default:
10811109
WARN_ON_ONCE(1);
10821110
*res0 = *res1 = 0;

0 commit comments

Comments
 (0)