Skip to content

Commit 080612b

Browse files
author
Marc Zyngier
committed
Merge branch kvm-arm64/nv-timers into kvmarm-master/next
* kvm-arm64/nv-timers: : . : Nested Virt support for the EL2 timers. From the initial cover letter: : : "Here's another batch of NV-related patches, this time bringing in most : of the timer support for EL2 as well as nested guests. : : The code is pretty convoluted for a bunch of reasons: : : - FEAT_NV2 breaks the timer semantics by redirecting HW controls to : memory, meaning that a guest could setup a timer and never see it : firing until the next exit : : - We go try hard to reflect the timer state in memory, but that's not : great. : : - With FEAT_ECV, we can finally correctly emulate the virtual timer, : but this emulation is pretty costly : : - As a way to make things suck less, we handle timer reads as early as : possible, and only defer writes to the normal trap handling : : - Finally, some implementations are badly broken, and require some : hand-holding, irrespective of NV support. So we try and reuse the NV : infrastructure to make them usable. This could be further optimised, : but I'm running out of patience for this sort of HW. : : [...]" : . KVM: arm64: nv: Fix doc header layout for timers KVM: arm64: nv: Document EL2 timer API KVM: arm64: Work around x1e's CNTVOFF_EL2 bogosity KVM: arm64: nv: Sanitise CNTHCTL_EL2 KVM: arm64: nv: Propagate CNTHCTL_EL2.EL1NV{P,V}CT bits KVM: arm64: nv: Add trap routing for CNTHCTL_EL2.EL1{NVPCT,NVVCT,TVT,TVCT} KVM: arm64: Handle counter access early in non-HYP context KVM: arm64: nv: Accelerate EL0 counter accesses from hypervisor context KVM: arm64: nv: Accelerate EL0 timer read accesses when FEAT_ECV in use KVM: arm64: nv: Use FEAT_ECV to trap access to EL0 timers KVM: arm64: nv: Publish emulated timer interrupt state in the in-memory state KVM: arm64: nv: Sync nested timer state with FEAT_NV2 KVM: arm64: nv: Add handling of EL2-specific timer registers Signed-off-by: Marc Zyngier <[email protected]>
2 parents e880b16 + 5447863 commit 080612b

File tree

17 files changed

+579
-47
lines changed

17 files changed

+579
-47
lines changed

Documentation/virt/kvm/devices/vcpu.rst

Lines changed: 8 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -142,8 +142,8 @@ the cpu field to the processor id.
142142

143143
:Architectures: ARM64
144144

145-
2.1. ATTRIBUTES: KVM_ARM_VCPU_TIMER_IRQ_VTIMER, KVM_ARM_VCPU_TIMER_IRQ_PTIMER
146-
-----------------------------------------------------------------------------
145+
2.1. ATTRIBUTES: KVM_ARM_VCPU_TIMER_IRQ_{VTIMER,PTIMER,HVTIMER,HPTIMER}
146+
-----------------------------------------------------------------------
147147

148148
:Parameters: in kvm_device_attr.addr the address for the timer interrupt is a
149149
pointer to an int
@@ -159,10 +159,12 @@ A value describing the architected timer interrupt number when connected to an
159159
in-kernel virtual GIC. These must be a PPI (16 <= intid < 32). Setting the
160160
attribute overrides the default values (see below).
161161

162-
============================= ==========================================
163-
KVM_ARM_VCPU_TIMER_IRQ_VTIMER The EL1 virtual timer intid (default: 27)
164-
KVM_ARM_VCPU_TIMER_IRQ_PTIMER The EL1 physical timer intid (default: 30)
165-
============================= ==========================================
162+
============================== ==========================================
163+
KVM_ARM_VCPU_TIMER_IRQ_VTIMER The EL1 virtual timer intid (default: 27)
164+
KVM_ARM_VCPU_TIMER_IRQ_PTIMER The EL1 physical timer intid (default: 30)
165+
KVM_ARM_VCPU_TIMER_IRQ_HVTIMER The EL2 virtual timer intid (default: 28)
166+
KVM_ARM_VCPU_TIMER_IRQ_HPTIMER The EL2 physical timer intid (default: 26)
167+
============================== ==========================================
166168

167169
Setting the same PPI for different timers will prevent the VCPUs from running.
168170
Setting the interrupt number on a VCPU configures all VCPUs created at that

arch/arm64/include/asm/cputype.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -122,6 +122,7 @@
122122
#define QCOM_CPU_PART_KRYO_3XX_SILVER 0x803
123123
#define QCOM_CPU_PART_KRYO_4XX_GOLD 0x804
124124
#define QCOM_CPU_PART_KRYO_4XX_SILVER 0x805
125+
#define QCOM_CPU_PART_ORYON_X1 0x001
125126

126127
#define NVIDIA_CPU_PART_DENVER 0x003
127128
#define NVIDIA_CPU_PART_CARMEL 0x004
@@ -198,6 +199,7 @@
198199
#define MIDR_QCOM_KRYO_3XX_SILVER MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_3XX_SILVER)
199200
#define MIDR_QCOM_KRYO_4XX_GOLD MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_4XX_GOLD)
200201
#define MIDR_QCOM_KRYO_4XX_SILVER MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_4XX_SILVER)
202+
#define MIDR_QCOM_ORYON_X1 MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_ORYON_X1)
201203
#define MIDR_NVIDIA_DENVER MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_DENVER)
202204
#define MIDR_NVIDIA_CARMEL MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_CARMEL)
203205
#define MIDR_FUJITSU_A64FX MIDR_CPU_MODEL(ARM_CPU_IMP_FUJITSU, FUJITSU_CPU_PART_A64FX)

arch/arm64/include/asm/kvm_host.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -493,7 +493,6 @@ enum vcpu_sysreg {
493493
VBAR_EL2, /* Vector Base Address Register (EL2) */
494494
RVBAR_EL2, /* Reset Vector Base Address Register */
495495
CONTEXTIDR_EL2, /* Context ID Register (EL2) */
496-
CNTHCTL_EL2, /* Counter-timer Hypervisor Control register */
497496
SP_EL2, /* EL2 Stack Pointer */
498497
CNTHP_CTL_EL2,
499498
CNTHP_CVAL_EL2,
@@ -504,6 +503,7 @@ enum vcpu_sysreg {
504503
MARKER(__SANITISED_REG_START__),
505504
TCR2_EL2, /* Extended Translation Control Register (EL2) */
506505
MDCR_EL2, /* Monitor Debug Configuration Register (EL2) */
506+
CNTHCTL_EL2, /* Counter-timer Hypervisor Control register */
507507

508508
/* Any VNCR-capable reg goes after this point */
509509
MARKER(__VNCR_START__),

arch/arm64/include/asm/sysreg.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -477,21 +477,25 @@
477477
#define SYS_CNTFRQ_EL0 sys_reg(3, 3, 14, 0, 0)
478478

479479
#define SYS_CNTPCT_EL0 sys_reg(3, 3, 14, 0, 1)
480+
#define SYS_CNTVCT_EL0 sys_reg(3, 3, 14, 0, 2)
480481
#define SYS_CNTPCTSS_EL0 sys_reg(3, 3, 14, 0, 5)
481482
#define SYS_CNTVCTSS_EL0 sys_reg(3, 3, 14, 0, 6)
482483

483484
#define SYS_CNTP_TVAL_EL0 sys_reg(3, 3, 14, 2, 0)
484485
#define SYS_CNTP_CTL_EL0 sys_reg(3, 3, 14, 2, 1)
485486
#define SYS_CNTP_CVAL_EL0 sys_reg(3, 3, 14, 2, 2)
486487

488+
#define SYS_CNTV_TVAL_EL0 sys_reg(3, 3, 14, 3, 0)
487489
#define SYS_CNTV_CTL_EL0 sys_reg(3, 3, 14, 3, 1)
488490
#define SYS_CNTV_CVAL_EL0 sys_reg(3, 3, 14, 3, 2)
489491

490492
#define SYS_AARCH32_CNTP_TVAL sys_reg(0, 0, 14, 2, 0)
491493
#define SYS_AARCH32_CNTP_CTL sys_reg(0, 0, 14, 2, 1)
492494
#define SYS_AARCH32_CNTPCT sys_reg(0, 0, 0, 14, 0)
495+
#define SYS_AARCH32_CNTVCT sys_reg(0, 1, 0, 14, 0)
493496
#define SYS_AARCH32_CNTP_CVAL sys_reg(0, 2, 0, 14, 0)
494497
#define SYS_AARCH32_CNTPCTSS sys_reg(0, 8, 0, 14, 0)
498+
#define SYS_AARCH32_CNTVCTSS sys_reg(0, 9, 0, 14, 0)
495499

496500
#define __PMEV_op2(n) ((n) & 0x7)
497501
#define __CNTR_CRm(n) (0x8 | (((n) >> 3) & 0x3))

arch/arm64/kernel/cpu_errata.c

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -786,6 +786,14 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
786786
ERRATA_MIDR_RANGE_LIST(erratum_ac03_cpu_38_list),
787787
},
788788
#endif
789+
{
790+
.desc = "Broken CNTVOFF_EL2",
791+
.capability = ARM64_WORKAROUND_QCOM_ORYON_CNTVOFF,
792+
ERRATA_MIDR_RANGE_LIST(((const struct midr_range[]) {
793+
MIDR_ALL_VERSIONS(MIDR_QCOM_ORYON_X1),
794+
{}
795+
})),
796+
},
789797
{
790798
}
791799
};

arch/arm64/kernel/image-vars.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -105,6 +105,9 @@ KVM_NVHE_ALIAS(__hyp_stub_vectors);
105105
KVM_NVHE_ALIAS(vgic_v2_cpuif_trap);
106106
KVM_NVHE_ALIAS(vgic_v3_cpuif_trap);
107107

108+
/* Static key which is set if CNTVOFF_EL2 is unusable */
109+
KVM_NVHE_ALIAS(broken_cntvoff_key);
110+
108111
/* EL2 exception handling */
109112
KVM_NVHE_ALIAS(__start___kvm_ex_table);
110113
KVM_NVHE_ALIAS(__stop___kvm_ex_table);

arch/arm64/kvm/arch_timer.c

Lines changed: 159 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,7 @@ static u32 host_vtimer_irq_flags;
3030
static u32 host_ptimer_irq_flags;
3131

3232
static DEFINE_STATIC_KEY_FALSE(has_gic_active_state);
33+
DEFINE_STATIC_KEY_FALSE(broken_cntvoff_key);
3334

3435
static const u8 default_ppi[] = {
3536
[TIMER_PTIMER] = 30,
@@ -101,21 +102,6 @@ u64 timer_get_cval(struct arch_timer_context *ctxt)
101102
}
102103
}
103104

104-
static u64 timer_get_offset(struct arch_timer_context *ctxt)
105-
{
106-
u64 offset = 0;
107-
108-
if (!ctxt)
109-
return 0;
110-
111-
if (ctxt->offset.vm_offset)
112-
offset += *ctxt->offset.vm_offset;
113-
if (ctxt->offset.vcpu_offset)
114-
offset += *ctxt->offset.vcpu_offset;
115-
116-
return offset;
117-
}
118-
119105
static void timer_set_ctl(struct arch_timer_context *ctxt, u32 ctl)
120106
{
121107
struct kvm_vcpu *vcpu = ctxt->vcpu;
@@ -441,11 +427,30 @@ void kvm_timer_update_run(struct kvm_vcpu *vcpu)
441427
regs->device_irq_level |= KVM_ARM_DEV_EL1_PTIMER;
442428
}
443429

430+
static void kvm_timer_update_status(struct arch_timer_context *ctx, bool level)
431+
{
432+
/*
433+
* Paper over NV2 brokenness by publishing the interrupt status
434+
* bit. This still results in a poor quality of emulation (guest
435+
* writes will have no effect until the next exit).
436+
*
437+
* But hey, it's fast, right?
438+
*/
439+
if (is_hyp_ctxt(ctx->vcpu) &&
440+
(ctx == vcpu_vtimer(ctx->vcpu) || ctx == vcpu_ptimer(ctx->vcpu))) {
441+
unsigned long val = timer_get_ctl(ctx);
442+
__assign_bit(__ffs(ARCH_TIMER_CTRL_IT_STAT), &val, level);
443+
timer_set_ctl(ctx, val);
444+
}
445+
}
446+
444447
static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level,
445448
struct arch_timer_context *timer_ctx)
446449
{
447450
int ret;
448451

452+
kvm_timer_update_status(timer_ctx, new_level);
453+
449454
timer_ctx->irq.level = new_level;
450455
trace_kvm_timer_update_irq(vcpu->vcpu_id, timer_irq(timer_ctx),
451456
timer_ctx->irq.level);
@@ -471,6 +476,8 @@ static void timer_emulate(struct arch_timer_context *ctx)
471476
return;
472477
}
473478

479+
kvm_timer_update_status(ctx, should_fire);
480+
474481
/*
475482
* If the timer can fire now, we don't need to have a soft timer
476483
* scheduled for the future. If the timer cannot fire at all,
@@ -513,7 +520,12 @@ static void timer_save_state(struct arch_timer_context *ctx)
513520
case TIMER_VTIMER:
514521
case TIMER_HVTIMER:
515522
timer_set_ctl(ctx, read_sysreg_el0(SYS_CNTV_CTL));
516-
timer_set_cval(ctx, read_sysreg_el0(SYS_CNTV_CVAL));
523+
cval = read_sysreg_el0(SYS_CNTV_CVAL);
524+
525+
if (has_broken_cntvoff())
526+
cval -= timer_get_offset(ctx);
527+
528+
timer_set_cval(ctx, cval);
517529

518530
/* Disable the timer */
519531
write_sysreg_el0(0, SYS_CNTV_CTL);
@@ -618,8 +630,15 @@ static void timer_restore_state(struct arch_timer_context *ctx)
618630

619631
case TIMER_VTIMER:
620632
case TIMER_HVTIMER:
621-
set_cntvoff(timer_get_offset(ctx));
622-
write_sysreg_el0(timer_get_cval(ctx), SYS_CNTV_CVAL);
633+
cval = timer_get_cval(ctx);
634+
offset = timer_get_offset(ctx);
635+
if (has_broken_cntvoff()) {
636+
set_cntvoff(0);
637+
cval += offset;
638+
} else {
639+
set_cntvoff(offset);
640+
}
641+
write_sysreg_el0(cval, SYS_CNTV_CVAL);
623642
isb();
624643
write_sysreg_el0(timer_get_ctl(ctx), SYS_CNTV_CTL);
625644
break;
@@ -762,7 +781,7 @@ static void kvm_timer_vcpu_load_nested_switch(struct kvm_vcpu *vcpu,
762781

763782
static void timer_set_traps(struct kvm_vcpu *vcpu, struct timer_map *map)
764783
{
765-
bool tpt, tpc;
784+
bool tvt, tpt, tvc, tpc, tvt02, tpt02;
766785
u64 clr, set;
767786

768787
/*
@@ -777,7 +796,29 @@ static void timer_set_traps(struct kvm_vcpu *vcpu, struct timer_map *map)
777796
* within this function, reality kicks in and we start adding
778797
* traps based on emulation requirements.
779798
*/
780-
tpt = tpc = false;
799+
tvt = tpt = tvc = tpc = false;
800+
tvt02 = tpt02 = false;
801+
802+
/*
803+
* NV2 badly breaks the timer semantics by redirecting accesses to
804+
* the EL1 timer state to memory, so let's call ECV to the rescue if
805+
* available: we trap all CNT{P,V}_{CTL,CVAL,TVAL}_EL0 accesses.
806+
*
807+
* The treatment slightly varies depending whether we run a nVHE or
808+
* VHE guest: nVHE will use the _EL0 registers directly, while VHE
809+
* will use the _EL02 accessors. This translates in different trap
810+
* bits.
811+
*
812+
* None of the trapping is required when running in non-HYP context,
813+
* unless required by the L1 hypervisor settings once we advertise
814+
* ECV+NV in the guest, or that we need trapping for other reasons.
815+
*/
816+
if (cpus_have_final_cap(ARM64_HAS_ECV) && is_hyp_ctxt(vcpu)) {
817+
if (vcpu_el2_e2h_is_set(vcpu))
818+
tvt02 = tpt02 = true;
819+
else
820+
tvt = tpt = true;
821+
}
781822

782823
/*
783824
* We have two possibility to deal with a physical offset:
@@ -792,10 +833,21 @@ static void timer_set_traps(struct kvm_vcpu *vcpu, struct timer_map *map)
792833
if (!has_cntpoff() && timer_get_offset(map->direct_ptimer))
793834
tpt = tpc = true;
794835

836+
/*
837+
* For the poor sods that could not correctly substract one value
838+
* from another, trap the full virtual timer and counter.
839+
*/
840+
if (has_broken_cntvoff() && timer_get_offset(map->direct_vtimer))
841+
tvt = tvc = true;
842+
795843
/*
796844
* Apply the enable bits that the guest hypervisor has requested for
797845
* its own guest. We can only add traps that wouldn't have been set
798846
* above.
847+
* Implementation choices: we do not support NV when E2H=0 in the
848+
* guest, and we don't support configuration where E2H is writable
849+
* by the guest (either FEAT_VHE or FEAT_E2H0 is implemented, but
850+
* not both). This simplifies the handling of the EL1NV* bits.
799851
*/
800852
if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) {
801853
u64 val = __vcpu_sys_reg(vcpu, CNTHCTL_EL2);
@@ -806,6 +858,9 @@ static void timer_set_traps(struct kvm_vcpu *vcpu, struct timer_map *map)
806858

807859
tpt |= !(val & (CNTHCTL_EL1PCEN << 10));
808860
tpc |= !(val & (CNTHCTL_EL1PCTEN << 10));
861+
862+
tpt02 |= (val & CNTHCTL_EL1NVPCT);
863+
tvt02 |= (val & CNTHCTL_EL1NVVCT);
809864
}
810865

811866
/*
@@ -817,6 +872,10 @@ static void timer_set_traps(struct kvm_vcpu *vcpu, struct timer_map *map)
817872

818873
assign_clear_set_bit(tpt, CNTHCTL_EL1PCEN << 10, set, clr);
819874
assign_clear_set_bit(tpc, CNTHCTL_EL1PCTEN << 10, set, clr);
875+
assign_clear_set_bit(tvt, CNTHCTL_EL1TVT, clr, set);
876+
assign_clear_set_bit(tvc, CNTHCTL_EL1TVCT, clr, set);
877+
assign_clear_set_bit(tvt02, CNTHCTL_EL1NVVCT, clr, set);
878+
assign_clear_set_bit(tpt02, CNTHCTL_EL1NVPCT, clr, set);
820879

821880
/* This only happens on VHE, so use the CNTHCTL_EL2 accessor. */
822881
sysreg_clear_set(cnthctl_el2, clr, set);
@@ -905,6 +964,54 @@ void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu)
905964
kvm_timer_blocking(vcpu);
906965
}
907966

967+
void kvm_timer_sync_nested(struct kvm_vcpu *vcpu)
968+
{
969+
/*
970+
* When NV2 is on, guest hypervisors have their EL1 timer register
971+
* accesses redirected to the VNCR page. Any guest action taken on
972+
* the timer is postponed until the next exit, leading to a very
973+
* poor quality of emulation.
974+
*
975+
* This is an unmitigated disaster, only papered over by FEAT_ECV,
976+
* which allows trapping of the timer registers even with NV2.
977+
* Still, this is still worse than FEAT_NV on its own. Meh.
978+
*/
979+
if (!vcpu_el2_e2h_is_set(vcpu)) {
980+
if (cpus_have_final_cap(ARM64_HAS_ECV))
981+
return;
982+
983+
/*
984+
* A non-VHE guest hypervisor doesn't have any direct access
985+
* to its timers: the EL2 registers trap (and the HW is
986+
* fully emulated), while the EL0 registers access memory
987+
* despite the access being notionally direct. Boo.
988+
*
989+
* We update the hardware timer registers with the
990+
* latest value written by the guest to the VNCR page
991+
* and let the hardware take care of the rest.
992+
*/
993+
write_sysreg_el0(__vcpu_sys_reg(vcpu, CNTV_CTL_EL0), SYS_CNTV_CTL);
994+
write_sysreg_el0(__vcpu_sys_reg(vcpu, CNTV_CVAL_EL0), SYS_CNTV_CVAL);
995+
write_sysreg_el0(__vcpu_sys_reg(vcpu, CNTP_CTL_EL0), SYS_CNTP_CTL);
996+
write_sysreg_el0(__vcpu_sys_reg(vcpu, CNTP_CVAL_EL0), SYS_CNTP_CVAL);
997+
} else {
998+
/*
999+
* For a VHE guest hypervisor, the EL2 state is directly
1000+
* stored in the host EL1 timers, while the emulated EL0
1001+
* state is stored in the VNCR page. The latter could have
1002+
* been updated behind our back, and we must reset the
1003+
* emulation of the timers.
1004+
*/
1005+
struct timer_map map;
1006+
get_timer_map(vcpu, &map);
1007+
1008+
soft_timer_cancel(&map.emul_vtimer->hrtimer);
1009+
soft_timer_cancel(&map.emul_ptimer->hrtimer);
1010+
timer_emulate(map.emul_vtimer);
1011+
timer_emulate(map.emul_ptimer);
1012+
}
1013+
}
1014+
9081015
/*
9091016
* With a userspace irqchip we have to check if the guest de-asserted the
9101017
* timer and if so, unmask the timer irq signal on the host interrupt
@@ -1363,6 +1470,37 @@ static int kvm_irq_init(struct arch_timer_kvm_info *info)
13631470
return 0;
13641471
}
13651472

1473+
static void kvm_timer_handle_errata(void)
1474+
{
1475+
u64 mmfr0, mmfr1, mmfr4;
1476+
1477+
/*
1478+
* CNTVOFF_EL2 is broken on some implementations. For those, we trap
1479+
* all virtual timer/counter accesses, requiring FEAT_ECV.
1480+
*
1481+
* However, a hypervisor supporting nesting is likely to mitigate the
1482+
* erratum at L0, and not require other levels to mitigate it (which
1483+
* would otherwise be a terrible performance sink due to trap
1484+
* amplification).
1485+
*
1486+
* Given that the affected HW implements both FEAT_VHE and FEAT_E2H0,
1487+
* and that NV is likely not to (because of limitations of the
1488+
* architecture), only enable the workaround when FEAT_VHE and
1489+
* FEAT_E2H0 are both detected. Time will tell if this actually holds.
1490+
*/
1491+
mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
1492+
mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
1493+
mmfr4 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR4_EL1);
1494+
if (SYS_FIELD_GET(ID_AA64MMFR1_EL1, VH, mmfr1) &&
1495+
!SYS_FIELD_GET(ID_AA64MMFR4_EL1, E2H0, mmfr4) &&
1496+
SYS_FIELD_GET(ID_AA64MMFR0_EL1, ECV, mmfr0) &&
1497+
(has_vhe() || has_hvhe()) &&
1498+
cpus_have_final_cap(ARM64_WORKAROUND_QCOM_ORYON_CNTVOFF)) {
1499+
static_branch_enable(&broken_cntvoff_key);
1500+
kvm_info("Broken CNTVOFF_EL2, trapping virtual timer\n");
1501+
}
1502+
}
1503+
13661504
int __init kvm_timer_hyp_init(bool has_gic)
13671505
{
13681506
struct arch_timer_kvm_info *info;
@@ -1431,6 +1569,7 @@ int __init kvm_timer_hyp_init(bool has_gic)
14311569
goto out_free_vtimer_irq;
14321570
}
14331571

1572+
kvm_timer_handle_errata();
14341573
return 0;
14351574

14361575
out_free_ptimer_irq:

0 commit comments

Comments
 (0)