Skip to content

Commit 41ce82f

Browse files
author
Marc Zyngier
committed
KVM: arm64: timers: Move timer registers to the sys_regs file
Move the timer gsisters to the sysreg file. This will further help when they are directly changed by a nesting hypervisor in the VNCR page. This requires moving the initialisation of the timer struct so that some of the helpers (such as arch_timer_ctx_index) can work correctly at an early stage. Signed-off-by: Marc Zyngier <[email protected]>
1 parent 3c5ff0c commit 41ce82f

File tree

4 files changed

+136
-44
lines changed

4 files changed

+136
-44
lines changed

arch/arm64/include/asm/kvm_host.h

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -189,6 +189,12 @@ enum vcpu_sysreg {
189189
SP_EL1,
190190
SPSR_EL1,
191191

192+
CNTVOFF_EL2,
193+
CNTV_CVAL_EL0,
194+
CNTV_CTL_EL0,
195+
CNTP_CVAL_EL0,
196+
CNTP_CTL_EL0,
197+
192198
/* 32bit specific registers. Keep them at the end of the range */
193199
DACR32_EL2, /* Domain Access Control Register */
194200
IFSR32_EL2, /* Instruction Fault Status Register */

arch/arm64/kvm/arch_timer.c

Lines changed: 122 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -51,6 +51,93 @@ static u64 kvm_arm_timer_read(struct kvm_vcpu *vcpu,
5151
struct arch_timer_context *timer,
5252
enum kvm_arch_timer_regs treg);
5353

54+
u32 timer_get_ctl(struct arch_timer_context *ctxt)
55+
{
56+
struct kvm_vcpu *vcpu = ctxt->vcpu;
57+
58+
switch(arch_timer_ctx_index(ctxt)) {
59+
case TIMER_VTIMER:
60+
return __vcpu_sys_reg(vcpu, CNTV_CTL_EL0);
61+
case TIMER_PTIMER:
62+
return __vcpu_sys_reg(vcpu, CNTP_CTL_EL0);
63+
default:
64+
WARN_ON(1);
65+
return 0;
66+
}
67+
}
68+
69+
u64 timer_get_cval(struct arch_timer_context *ctxt)
70+
{
71+
struct kvm_vcpu *vcpu = ctxt->vcpu;
72+
73+
switch(arch_timer_ctx_index(ctxt)) {
74+
case TIMER_VTIMER:
75+
return __vcpu_sys_reg(vcpu, CNTV_CVAL_EL0);
76+
case TIMER_PTIMER:
77+
return __vcpu_sys_reg(vcpu, CNTP_CVAL_EL0);
78+
default:
79+
WARN_ON(1);
80+
return 0;
81+
}
82+
}
83+
84+
static u64 timer_get_offset(struct arch_timer_context *ctxt)
85+
{
86+
struct kvm_vcpu *vcpu = ctxt->vcpu;
87+
88+
switch(arch_timer_ctx_index(ctxt)) {
89+
case TIMER_VTIMER:
90+
return __vcpu_sys_reg(vcpu, CNTVOFF_EL2);
91+
default:
92+
return 0;
93+
}
94+
}
95+
96+
static void timer_set_ctl(struct arch_timer_context *ctxt, u32 ctl)
97+
{
98+
struct kvm_vcpu *vcpu = ctxt->vcpu;
99+
100+
switch(arch_timer_ctx_index(ctxt)) {
101+
case TIMER_VTIMER:
102+
__vcpu_sys_reg(vcpu, CNTV_CTL_EL0) = ctl;
103+
break;
104+
case TIMER_PTIMER:
105+
__vcpu_sys_reg(vcpu, CNTP_CTL_EL0) = ctl;
106+
break;
107+
default:
108+
WARN_ON(1);
109+
}
110+
}
111+
112+
static void timer_set_cval(struct arch_timer_context *ctxt, u64 cval)
113+
{
114+
struct kvm_vcpu *vcpu = ctxt->vcpu;
115+
116+
switch(arch_timer_ctx_index(ctxt)) {
117+
case TIMER_VTIMER:
118+
__vcpu_sys_reg(vcpu, CNTV_CVAL_EL0) = cval;
119+
break;
120+
case TIMER_PTIMER:
121+
__vcpu_sys_reg(vcpu, CNTP_CVAL_EL0) = cval;
122+
break;
123+
default:
124+
WARN_ON(1);
125+
}
126+
}
127+
128+
static void timer_set_offset(struct arch_timer_context *ctxt, u64 offset)
129+
{
130+
struct kvm_vcpu *vcpu = ctxt->vcpu;
131+
132+
switch(arch_timer_ctx_index(ctxt)) {
133+
case TIMER_VTIMER:
134+
__vcpu_sys_reg(vcpu, CNTVOFF_EL2) = offset;
135+
break;
136+
default:
137+
WARN(offset, "timer %ld\n", arch_timer_ctx_index(ctxt));
138+
}
139+
}
140+
54141
u64 kvm_phys_timer_read(void)
55142
{
56143
return timecounter->cc->read(timecounter->cc);
@@ -124,8 +211,8 @@ static u64 kvm_timer_compute_delta(struct arch_timer_context *timer_ctx)
124211
{
125212
u64 cval, now;
126213

127-
cval = timer_ctx->cnt_cval;
128-
now = kvm_phys_timer_read() - timer_ctx->cntvoff;
214+
cval = timer_get_cval(timer_ctx);
215+
now = kvm_phys_timer_read() - timer_get_offset(timer_ctx);
129216

130217
if (now < cval) {
131218
u64 ns;
@@ -144,8 +231,8 @@ static bool kvm_timer_irq_can_fire(struct arch_timer_context *timer_ctx)
144231
{
145232
WARN_ON(timer_ctx && timer_ctx->loaded);
146233
return timer_ctx &&
147-
!(timer_ctx->cnt_ctl & ARCH_TIMER_CTRL_IT_MASK) &&
148-
(timer_ctx->cnt_ctl & ARCH_TIMER_CTRL_ENABLE);
234+
((timer_get_ctl(timer_ctx) &
235+
(ARCH_TIMER_CTRL_IT_MASK | ARCH_TIMER_CTRL_ENABLE)) == ARCH_TIMER_CTRL_ENABLE);
149236
}
150237

151238
/*
@@ -256,8 +343,8 @@ static bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx)
256343
if (!kvm_timer_irq_can_fire(timer_ctx))
257344
return false;
258345

259-
cval = timer_ctx->cnt_cval;
260-
now = kvm_phys_timer_read() - timer_ctx->cntvoff;
346+
cval = timer_get_cval(timer_ctx);
347+
now = kvm_phys_timer_read() - timer_get_offset(timer_ctx);
261348

262349
return cval <= now;
263350
}
@@ -350,17 +437,17 @@ static void timer_save_state(struct arch_timer_context *ctx)
350437

351438
switch (index) {
352439
case TIMER_VTIMER:
353-
ctx->cnt_ctl = read_sysreg_el0(SYS_CNTV_CTL);
354-
ctx->cnt_cval = read_sysreg_el0(SYS_CNTV_CVAL);
440+
timer_set_ctl(ctx, read_sysreg_el0(SYS_CNTV_CTL));
441+
timer_set_cval(ctx, read_sysreg_el0(SYS_CNTV_CVAL));
355442

356443
/* Disable the timer */
357444
write_sysreg_el0(0, SYS_CNTV_CTL);
358445
isb();
359446

360447
break;
361448
case TIMER_PTIMER:
362-
ctx->cnt_ctl = read_sysreg_el0(SYS_CNTP_CTL);
363-
ctx->cnt_cval = read_sysreg_el0(SYS_CNTP_CVAL);
449+
timer_set_ctl(ctx, read_sysreg_el0(SYS_CNTP_CTL));
450+
timer_set_cval(ctx, read_sysreg_el0(SYS_CNTP_CVAL));
364451

365452
/* Disable the timer */
366453
write_sysreg_el0(0, SYS_CNTP_CTL);
@@ -429,14 +516,14 @@ static void timer_restore_state(struct arch_timer_context *ctx)
429516

430517
switch (index) {
431518
case TIMER_VTIMER:
432-
write_sysreg_el0(ctx->cnt_cval, SYS_CNTV_CVAL);
519+
write_sysreg_el0(timer_get_cval(ctx), SYS_CNTV_CVAL);
433520
isb();
434-
write_sysreg_el0(ctx->cnt_ctl, SYS_CNTV_CTL);
521+
write_sysreg_el0(timer_get_ctl(ctx), SYS_CNTV_CTL);
435522
break;
436523
case TIMER_PTIMER:
437-
write_sysreg_el0(ctx->cnt_cval, SYS_CNTP_CVAL);
524+
write_sysreg_el0(timer_get_cval(ctx), SYS_CNTP_CVAL);
438525
isb();
439-
write_sysreg_el0(ctx->cnt_ctl, SYS_CNTP_CTL);
526+
write_sysreg_el0(timer_get_ctl(ctx), SYS_CNTP_CTL);
440527
break;
441528
case NR_KVM_TIMERS:
442529
BUG();
@@ -528,7 +615,7 @@ void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu)
528615
kvm_timer_vcpu_load_nogic(vcpu);
529616
}
530617

531-
set_cntvoff(map.direct_vtimer->cntvoff);
618+
set_cntvoff(timer_get_offset(map.direct_vtimer));
532619

533620
kvm_timer_unblocking(vcpu);
534621

@@ -639,8 +726,8 @@ int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu)
639726
* resets the timer to be disabled and unmasked and is compliant with
640727
* the ARMv7 architecture.
641728
*/
642-
vcpu_vtimer(vcpu)->cnt_ctl = 0;
643-
vcpu_ptimer(vcpu)->cnt_ctl = 0;
729+
timer_set_ctl(vcpu_vtimer(vcpu), 0);
730+
timer_set_ctl(vcpu_ptimer(vcpu), 0);
644731

645732
if (timer->enabled) {
646733
kvm_timer_update_irq(vcpu, false, vcpu_vtimer(vcpu));
@@ -668,13 +755,13 @@ static void update_vtimer_cntvoff(struct kvm_vcpu *vcpu, u64 cntvoff)
668755

669756
mutex_lock(&kvm->lock);
670757
kvm_for_each_vcpu(i, tmp, kvm)
671-
vcpu_vtimer(tmp)->cntvoff = cntvoff;
758+
timer_set_offset(vcpu_vtimer(tmp), cntvoff);
672759

673760
/*
674761
* When called from the vcpu create path, the CPU being created is not
675762
* included in the loop above, so we just set it here as well.
676763
*/
677-
vcpu_vtimer(vcpu)->cntvoff = cntvoff;
764+
timer_set_offset(vcpu_vtimer(vcpu), cntvoff);
678765
mutex_unlock(&kvm->lock);
679766
}
680767

@@ -684,9 +771,12 @@ void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
684771
struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
685772
struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
686773

774+
vtimer->vcpu = vcpu;
775+
ptimer->vcpu = vcpu;
776+
687777
/* Synchronize cntvoff across all vtimers of a VM. */
688778
update_vtimer_cntvoff(vcpu, kvm_phys_timer_read());
689-
ptimer->cntvoff = 0;
779+
timer_set_offset(ptimer, 0);
690780

691781
hrtimer_init(&timer->bg_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
692782
timer->bg_timer.function = kvm_bg_timer_expire;
@@ -704,9 +794,6 @@ void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
704794

705795
vtimer->host_timer_irq_flags = host_vtimer_irq_flags;
706796
ptimer->host_timer_irq_flags = host_ptimer_irq_flags;
707-
708-
vtimer->vcpu = vcpu;
709-
ptimer->vcpu = vcpu;
710797
}
711798

712799
static void kvm_timer_init_interrupt(void *info)
@@ -756,10 +843,12 @@ static u64 read_timer_ctl(struct arch_timer_context *timer)
756843
* UNKNOWN when ENABLE bit is 0, so we chose to set ISTATUS bit
757844
* regardless of ENABLE bit for our implementation convenience.
758845
*/
846+
u32 ctl = timer_get_ctl(timer);
847+
759848
if (!kvm_timer_compute_delta(timer))
760-
return timer->cnt_ctl | ARCH_TIMER_CTRL_IT_STAT;
761-
else
762-
return timer->cnt_ctl;
849+
ctl |= ARCH_TIMER_CTRL_IT_STAT;
850+
851+
return ctl;
763852
}
764853

765854
u64 kvm_arm_timer_get_reg(struct kvm_vcpu *vcpu, u64 regid)
@@ -795,20 +884,20 @@ static u64 kvm_arm_timer_read(struct kvm_vcpu *vcpu,
795884

796885
switch (treg) {
797886
case TIMER_REG_TVAL:
798-
val = timer->cnt_cval - kvm_phys_timer_read() + timer->cntvoff;
799-
val &= lower_32_bits(val);
887+
val = timer_get_cval(timer) - kvm_phys_timer_read() + timer_get_offset(timer);
888+
val = lower_32_bits(val);
800889
break;
801890

802891
case TIMER_REG_CTL:
803892
val = read_timer_ctl(timer);
804893
break;
805894

806895
case TIMER_REG_CVAL:
807-
val = timer->cnt_cval;
896+
val = timer_get_cval(timer);
808897
break;
809898

810899
case TIMER_REG_CNT:
811-
val = kvm_phys_timer_read() - timer->cntvoff;
900+
val = kvm_phys_timer_read() - timer_get_offset(timer);
812901
break;
813902

814903
default:
@@ -842,15 +931,15 @@ static void kvm_arm_timer_write(struct kvm_vcpu *vcpu,
842931
{
843932
switch (treg) {
844933
case TIMER_REG_TVAL:
845-
timer->cnt_cval = kvm_phys_timer_read() - timer->cntvoff + (s32)val;
934+
timer_set_cval(timer, kvm_phys_timer_read() - timer_get_offset(timer) + (s32)val);
846935
break;
847936

848937
case TIMER_REG_CTL:
849-
timer->cnt_ctl = val & ~ARCH_TIMER_CTRL_IT_STAT;
938+
timer_set_ctl(timer, val & ~ARCH_TIMER_CTRL_IT_STAT);
850939
break;
851940

852941
case TIMER_REG_CVAL:
853-
timer->cnt_cval = val;
942+
timer_set_cval(timer, val);
854943
break;
855944

856945
default:

arch/arm64/kvm/trace_arm.h

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -301,8 +301,8 @@ TRACE_EVENT(kvm_timer_save_state,
301301
),
302302

303303
TP_fast_assign(
304-
__entry->ctl = ctx->cnt_ctl;
305-
__entry->cval = ctx->cnt_cval;
304+
__entry->ctl = timer_get_ctl(ctx);
305+
__entry->cval = timer_get_cval(ctx);
306306
__entry->timer_idx = arch_timer_ctx_index(ctx);
307307
),
308308

@@ -323,8 +323,8 @@ TRACE_EVENT(kvm_timer_restore_state,
323323
),
324324

325325
TP_fast_assign(
326-
__entry->ctl = ctx->cnt_ctl;
327-
__entry->cval = ctx->cnt_cval;
326+
__entry->ctl = timer_get_ctl(ctx);
327+
__entry->cval = timer_get_cval(ctx);
328328
__entry->timer_idx = arch_timer_ctx_index(ctx);
329329
),
330330

include/kvm/arm_arch_timer.h

Lines changed: 4 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -26,16 +26,9 @@ enum kvm_arch_timer_regs {
2626
struct arch_timer_context {
2727
struct kvm_vcpu *vcpu;
2828

29-
/* Registers: control register, timer value */
30-
u32 cnt_ctl;
31-
u64 cnt_cval;
32-
3329
/* Timer IRQ */
3430
struct kvm_irq_level irq;
3531

36-
/* Virtual offset */
37-
u64 cntvoff;
38-
3932
/* Emulated Timer (may be unused) */
4033
struct hrtimer hrtimer;
4134

@@ -109,4 +102,8 @@ void kvm_arm_timer_write_sysreg(struct kvm_vcpu *vcpu,
109102
enum kvm_arch_timer_regs treg,
110103
u64 val);
111104

105+
/* Needed for tracing */
106+
u32 timer_get_ctl(struct arch_timer_context *ctxt);
107+
u64 timer_get_cval(struct arch_timer_context *ctxt);
108+
112109
#endif

0 commit comments

Comments
 (0)