Skip to content

Commit 6678791

Browse files
author
Marc Zyngier
committed
KVM: arm64: Add assignment-specific sysreg accessor
Assigning a value to a system register doesn't do what it is supposed to be doing if that register is one that has RESx bits. The main problem is that we use __vcpu_sys_reg(), which can be used both as a lvalue and rvalue. When used as a lvalue, the bit masking occurs *before* the new value is assigned, meaning that we (1) do pointless work on the old cvalue, and (2) potentially assign an invalid value as we fail to apply the masks to it. Fix this by providing a new __vcpu_assign_sys_reg() that does what it says on the tin, and sanitises the *new* value instead of the old one. This comes with a significant amount of churn. Reviewed-by: Miguel Luis <[email protected]> Reviewed-by: Oliver Upton <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Marc Zyngier <[email protected]>
1 parent 4d62121 commit 6678791

File tree

12 files changed

+86
-73
lines changed

12 files changed

+86
-73
lines changed

arch/arm64/include/asm/kvm_host.h

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1107,6 +1107,17 @@ static inline u64 *___ctxt_sys_reg(const struct kvm_cpu_context *ctxt, int r)
11071107
#define ctxt_sys_reg(c,r) (*__ctxt_sys_reg(c,r))
11081108

11091109
u64 kvm_vcpu_apply_reg_masks(const struct kvm_vcpu *, enum vcpu_sysreg, u64);
1110+
1111+
#define __vcpu_assign_sys_reg(v, r, val) \
1112+
do { \
1113+
const struct kvm_cpu_context *ctxt = &(v)->arch.ctxt; \
1114+
u64 __v = (val); \
1115+
if (vcpu_has_nv((v)) && (r) >= __SANITISED_REG_START__) \
1116+
__v = kvm_vcpu_apply_reg_masks((v), (r), __v); \
1117+
\
1118+
ctxt_sys_reg(ctxt, (r)) = __v; \
1119+
} while (0)
1120+
11101121
#define __vcpu_sys_reg(v,r) \
11111122
(*({ \
11121123
const struct kvm_cpu_context *ctxt = &(v)->arch.ctxt; \

arch/arm64/kvm/arch_timer.c

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -108,16 +108,16 @@ static void timer_set_ctl(struct arch_timer_context *ctxt, u32 ctl)
108108

109109
switch(arch_timer_ctx_index(ctxt)) {
110110
case TIMER_VTIMER:
111-
__vcpu_sys_reg(vcpu, CNTV_CTL_EL0) = ctl;
111+
__vcpu_assign_sys_reg(vcpu, CNTV_CTL_EL0, ctl);
112112
break;
113113
case TIMER_PTIMER:
114-
__vcpu_sys_reg(vcpu, CNTP_CTL_EL0) = ctl;
114+
__vcpu_assign_sys_reg(vcpu, CNTP_CTL_EL0, ctl);
115115
break;
116116
case TIMER_HVTIMER:
117-
__vcpu_sys_reg(vcpu, CNTHV_CTL_EL2) = ctl;
117+
__vcpu_assign_sys_reg(vcpu, CNTHV_CTL_EL2, ctl);
118118
break;
119119
case TIMER_HPTIMER:
120-
__vcpu_sys_reg(vcpu, CNTHP_CTL_EL2) = ctl;
120+
__vcpu_assign_sys_reg(vcpu, CNTHP_CTL_EL2, ctl);
121121
break;
122122
default:
123123
WARN_ON(1);
@@ -130,16 +130,16 @@ static void timer_set_cval(struct arch_timer_context *ctxt, u64 cval)
130130

131131
switch(arch_timer_ctx_index(ctxt)) {
132132
case TIMER_VTIMER:
133-
__vcpu_sys_reg(vcpu, CNTV_CVAL_EL0) = cval;
133+
__vcpu_assign_sys_reg(vcpu, CNTV_CVAL_EL0, cval);
134134
break;
135135
case TIMER_PTIMER:
136-
__vcpu_sys_reg(vcpu, CNTP_CVAL_EL0) = cval;
136+
__vcpu_assign_sys_reg(vcpu, CNTP_CVAL_EL0, cval);
137137
break;
138138
case TIMER_HVTIMER:
139-
__vcpu_sys_reg(vcpu, CNTHV_CVAL_EL2) = cval;
139+
__vcpu_assign_sys_reg(vcpu, CNTHV_CVAL_EL2, cval);
140140
break;
141141
case TIMER_HPTIMER:
142-
__vcpu_sys_reg(vcpu, CNTHP_CVAL_EL2) = cval;
142+
__vcpu_assign_sys_reg(vcpu, CNTHP_CVAL_EL2, cval);
143143
break;
144144
default:
145145
WARN_ON(1);

arch/arm64/kvm/hyp/exception.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@ static inline void __vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg)
3737
if (unlikely(vcpu_has_nv(vcpu)))
3838
vcpu_write_sys_reg(vcpu, val, reg);
3939
else if (!__vcpu_write_sys_reg_to_cpu(val, reg))
40-
__vcpu_sys_reg(vcpu, reg) = val;
40+
__vcpu_assign_sys_reg(vcpu, reg, val);
4141
}
4242

4343
static void __vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long target_mode,
@@ -51,7 +51,7 @@ static void __vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long target_mode,
5151
} else if (has_vhe()) {
5252
write_sysreg_el1(val, SYS_SPSR);
5353
} else {
54-
__vcpu_sys_reg(vcpu, SPSR_EL1) = val;
54+
__vcpu_assign_sys_reg(vcpu, SPSR_EL1, val);
5555
}
5656
}
5757

arch/arm64/kvm/hyp/include/hyp/switch.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,7 @@ static inline void __fpsimd_save_fpexc32(struct kvm_vcpu *vcpu)
4545
if (!vcpu_el1_is_32bit(vcpu))
4646
return;
4747

48-
__vcpu_sys_reg(vcpu, FPEXC32_EL2) = read_sysreg(fpexc32_el2);
48+
__vcpu_assign_sys_reg(vcpu, FPEXC32_EL2, read_sysreg(fpexc32_el2));
4949
}
5050

5151
static inline void __activate_traps_fpsimd32(struct kvm_vcpu *vcpu)
@@ -457,7 +457,7 @@ static inline void fpsimd_lazy_switch_to_host(struct kvm_vcpu *vcpu)
457457
*/
458458
if (vcpu_has_sve(vcpu)) {
459459
zcr_el1 = read_sysreg_el1(SYS_ZCR);
460-
__vcpu_sys_reg(vcpu, vcpu_sve_zcr_elx(vcpu)) = zcr_el1;
460+
__vcpu_assign_sys_reg(vcpu, vcpu_sve_zcr_elx(vcpu), zcr_el1);
461461

462462
/*
463463
* The guest's state is always saved using the guest's max VL.

arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -307,11 +307,11 @@ static inline void __sysreg32_save_state(struct kvm_vcpu *vcpu)
307307
vcpu->arch.ctxt.spsr_irq = read_sysreg(spsr_irq);
308308
vcpu->arch.ctxt.spsr_fiq = read_sysreg(spsr_fiq);
309309

310-
__vcpu_sys_reg(vcpu, DACR32_EL2) = read_sysreg(dacr32_el2);
311-
__vcpu_sys_reg(vcpu, IFSR32_EL2) = read_sysreg(ifsr32_el2);
310+
__vcpu_assign_sys_reg(vcpu, DACR32_EL2, read_sysreg(dacr32_el2));
311+
__vcpu_assign_sys_reg(vcpu, IFSR32_EL2, read_sysreg(ifsr32_el2));
312312

313313
if (has_vhe() || kvm_debug_regs_in_use(vcpu))
314-
__vcpu_sys_reg(vcpu, DBGVCR32_EL2) = read_sysreg(dbgvcr32_el2);
314+
__vcpu_assign_sys_reg(vcpu, DBGVCR32_EL2, read_sysreg(dbgvcr32_el2));
315315
}
316316

317317
static inline void __sysreg32_restore_state(struct kvm_vcpu *vcpu)

arch/arm64/kvm/hyp/nvhe/hyp-main.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ void __kvm_hyp_host_forward_smc(struct kvm_cpu_context *host_ctxt);
2626

2727
static void __hyp_sve_save_guest(struct kvm_vcpu *vcpu)
2828
{
29-
__vcpu_sys_reg(vcpu, ZCR_EL1) = read_sysreg_el1(SYS_ZCR);
29+
__vcpu_assign_sys_reg(vcpu, ZCR_EL1, read_sysreg_el1(SYS_ZCR));
3030
/*
3131
* On saving/restoring guest sve state, always use the maximum VL for
3232
* the guest. The layout of the data when saving the sve state depends
@@ -79,7 +79,7 @@ static void fpsimd_sve_sync(struct kvm_vcpu *vcpu)
7979

8080
has_fpmr = kvm_has_fpmr(kern_hyp_va(vcpu->kvm));
8181
if (has_fpmr)
82-
__vcpu_sys_reg(vcpu, FPMR) = read_sysreg_s(SYS_FPMR);
82+
__vcpu_assign_sys_reg(vcpu, FPMR, read_sysreg_s(SYS_FPMR));
8383

8484
if (system_supports_sve())
8585
__hyp_sve_restore_host();

arch/arm64/kvm/hyp/vhe/switch.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -223,9 +223,9 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu)
223223
*/
224224
val = read_sysreg_el0(SYS_CNTP_CVAL);
225225
if (map.direct_ptimer == vcpu_ptimer(vcpu))
226-
__vcpu_sys_reg(vcpu, CNTP_CVAL_EL0) = val;
226+
__vcpu_assign_sys_reg(vcpu, CNTP_CVAL_EL0, val);
227227
if (map.direct_ptimer == vcpu_hptimer(vcpu))
228-
__vcpu_sys_reg(vcpu, CNTHP_CVAL_EL2) = val;
228+
__vcpu_assign_sys_reg(vcpu, CNTHP_CVAL_EL2, val);
229229

230230
offset = read_sysreg_s(SYS_CNTPOFF_EL2);
231231

arch/arm64/kvm/hyp/vhe/sysreg-sr.c

Lines changed: 22 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -18,17 +18,17 @@
1818
static void __sysreg_save_vel2_state(struct kvm_vcpu *vcpu)
1919
{
2020
/* These registers are common with EL1 */
21-
__vcpu_sys_reg(vcpu, PAR_EL1) = read_sysreg(par_el1);
22-
__vcpu_sys_reg(vcpu, TPIDR_EL1) = read_sysreg(tpidr_el1);
23-
24-
__vcpu_sys_reg(vcpu, ESR_EL2) = read_sysreg_el1(SYS_ESR);
25-
__vcpu_sys_reg(vcpu, AFSR0_EL2) = read_sysreg_el1(SYS_AFSR0);
26-
__vcpu_sys_reg(vcpu, AFSR1_EL2) = read_sysreg_el1(SYS_AFSR1);
27-
__vcpu_sys_reg(vcpu, FAR_EL2) = read_sysreg_el1(SYS_FAR);
28-
__vcpu_sys_reg(vcpu, MAIR_EL2) = read_sysreg_el1(SYS_MAIR);
29-
__vcpu_sys_reg(vcpu, VBAR_EL2) = read_sysreg_el1(SYS_VBAR);
30-
__vcpu_sys_reg(vcpu, CONTEXTIDR_EL2) = read_sysreg_el1(SYS_CONTEXTIDR);
31-
__vcpu_sys_reg(vcpu, AMAIR_EL2) = read_sysreg_el1(SYS_AMAIR);
21+
__vcpu_assign_sys_reg(vcpu, PAR_EL1, read_sysreg(par_el1));
22+
__vcpu_assign_sys_reg(vcpu, TPIDR_EL1, read_sysreg(tpidr_el1));
23+
24+
__vcpu_assign_sys_reg(vcpu, ESR_EL2, read_sysreg_el1(SYS_ESR));
25+
__vcpu_assign_sys_reg(vcpu, AFSR0_EL2, read_sysreg_el1(SYS_AFSR0));
26+
__vcpu_assign_sys_reg(vcpu, AFSR1_EL2, read_sysreg_el1(SYS_AFSR1));
27+
__vcpu_assign_sys_reg(vcpu, FAR_EL2, read_sysreg_el1(SYS_FAR));
28+
__vcpu_assign_sys_reg(vcpu, MAIR_EL2, read_sysreg_el1(SYS_MAIR));
29+
__vcpu_assign_sys_reg(vcpu, VBAR_EL2, read_sysreg_el1(SYS_VBAR));
30+
__vcpu_assign_sys_reg(vcpu, CONTEXTIDR_EL2, read_sysreg_el1(SYS_CONTEXTIDR));
31+
__vcpu_assign_sys_reg(vcpu, AMAIR_EL2, read_sysreg_el1(SYS_AMAIR));
3232

3333
/*
3434
* In VHE mode those registers are compatible between EL1 and EL2,
@@ -46,21 +46,21 @@ static void __sysreg_save_vel2_state(struct kvm_vcpu *vcpu)
4646
* are always trapped, ensuring that the in-memory
4747
* copy is always up-to-date. A small blessing...
4848
*/
49-
__vcpu_sys_reg(vcpu, SCTLR_EL2) = read_sysreg_el1(SYS_SCTLR);
50-
__vcpu_sys_reg(vcpu, TTBR0_EL2) = read_sysreg_el1(SYS_TTBR0);
51-
__vcpu_sys_reg(vcpu, TTBR1_EL2) = read_sysreg_el1(SYS_TTBR1);
52-
__vcpu_sys_reg(vcpu, TCR_EL2) = read_sysreg_el1(SYS_TCR);
49+
__vcpu_assign_sys_reg(vcpu, SCTLR_EL2, read_sysreg_el1(SYS_SCTLR));
50+
__vcpu_assign_sys_reg(vcpu, TTBR0_EL2, read_sysreg_el1(SYS_TTBR0));
51+
__vcpu_assign_sys_reg(vcpu, TTBR1_EL2, read_sysreg_el1(SYS_TTBR1));
52+
__vcpu_assign_sys_reg(vcpu, TCR_EL2, read_sysreg_el1(SYS_TCR));
5353

5454
if (ctxt_has_tcrx(&vcpu->arch.ctxt)) {
55-
__vcpu_sys_reg(vcpu, TCR2_EL2) = read_sysreg_el1(SYS_TCR2);
55+
__vcpu_assign_sys_reg(vcpu, TCR2_EL2, read_sysreg_el1(SYS_TCR2));
5656

5757
if (ctxt_has_s1pie(&vcpu->arch.ctxt)) {
58-
__vcpu_sys_reg(vcpu, PIRE0_EL2) = read_sysreg_el1(SYS_PIRE0);
59-
__vcpu_sys_reg(vcpu, PIR_EL2) = read_sysreg_el1(SYS_PIR);
58+
__vcpu_assign_sys_reg(vcpu, PIRE0_EL2, read_sysreg_el1(SYS_PIRE0));
59+
__vcpu_assign_sys_reg(vcpu, PIR_EL2, read_sysreg_el1(SYS_PIR));
6060
}
6161

6262
if (ctxt_has_s1poe(&vcpu->arch.ctxt))
63-
__vcpu_sys_reg(vcpu, POR_EL2) = read_sysreg_el1(SYS_POR);
63+
__vcpu_assign_sys_reg(vcpu, POR_EL2, read_sysreg_el1(SYS_POR));
6464
}
6565

6666
/*
@@ -74,9 +74,9 @@ static void __sysreg_save_vel2_state(struct kvm_vcpu *vcpu)
7474
__vcpu_sys_reg(vcpu, CNTHCTL_EL2) |= val;
7575
}
7676

77-
__vcpu_sys_reg(vcpu, SP_EL2) = read_sysreg(sp_el1);
78-
__vcpu_sys_reg(vcpu, ELR_EL2) = read_sysreg_el1(SYS_ELR);
79-
__vcpu_sys_reg(vcpu, SPSR_EL2) = read_sysreg_el1(SYS_SPSR);
77+
__vcpu_assign_sys_reg(vcpu, SP_EL2, read_sysreg(sp_el1));
78+
__vcpu_assign_sys_reg(vcpu, ELR_EL2, read_sysreg_el1(SYS_ELR));
79+
__vcpu_assign_sys_reg(vcpu, SPSR_EL2, read_sysreg_el1(SYS_SPSR));
8080
}
8181

8282
static void __sysreg_restore_vel2_state(struct kvm_vcpu *vcpu)

arch/arm64/kvm/pmu-emul.c

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -178,7 +178,7 @@ static void kvm_pmu_set_pmc_value(struct kvm_pmc *pmc, u64 val, bool force)
178178
val |= lower_32_bits(val);
179179
}
180180

181-
__vcpu_sys_reg(vcpu, reg) = val;
181+
__vcpu_assign_sys_reg(vcpu, reg, val);
182182

183183
/* Recreate the perf event to reflect the updated sample_period */
184184
kvm_pmu_create_perf_event(pmc);
@@ -204,7 +204,7 @@ void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
204204
void kvm_pmu_set_counter_value_user(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
205205
{
206206
kvm_pmu_release_perf_event(kvm_vcpu_idx_to_pmc(vcpu, select_idx));
207-
__vcpu_sys_reg(vcpu, counter_index_to_reg(select_idx)) = val;
207+
__vcpu_assign_sys_reg(vcpu, counter_index_to_reg(select_idx), val);
208208
kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
209209
}
210210

@@ -239,7 +239,7 @@ static void kvm_pmu_stop_counter(struct kvm_pmc *pmc)
239239

240240
reg = counter_index_to_reg(pmc->idx);
241241

242-
__vcpu_sys_reg(vcpu, reg) = val;
242+
__vcpu_assign_sys_reg(vcpu, reg, val);
243243

244244
kvm_pmu_release_perf_event(pmc);
245245
}
@@ -503,7 +503,7 @@ static void kvm_pmu_counter_increment(struct kvm_vcpu *vcpu,
503503
reg = __vcpu_sys_reg(vcpu, counter_index_to_reg(i)) + 1;
504504
if (!kvm_pmc_is_64bit(pmc))
505505
reg = lower_32_bits(reg);
506-
__vcpu_sys_reg(vcpu, counter_index_to_reg(i)) = reg;
506+
__vcpu_assign_sys_reg(vcpu, counter_index_to_reg(i), reg);
507507

508508
/* No overflow? move on */
509509
if (kvm_pmc_has_64bit_overflow(pmc) ? reg : lower_32_bits(reg))
@@ -602,7 +602,7 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
602602
kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
603603

604604
/* The reset bits don't indicate any state, and shouldn't be saved. */
605-
__vcpu_sys_reg(vcpu, PMCR_EL0) = val & ~(ARMV8_PMU_PMCR_C | ARMV8_PMU_PMCR_P);
605+
__vcpu_assign_sys_reg(vcpu, PMCR_EL0, (val & ~(ARMV8_PMU_PMCR_C | ARMV8_PMU_PMCR_P)));
606606

607607
if (val & ARMV8_PMU_PMCR_C)
608608
kvm_pmu_set_counter_value(vcpu, ARMV8_PMU_CYCLE_IDX, 0);
@@ -779,7 +779,7 @@ void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
779779
u64 reg;
780780

781781
reg = counter_index_to_evtreg(pmc->idx);
782-
__vcpu_sys_reg(vcpu, reg) = data & kvm_pmu_evtyper_mask(vcpu->kvm);
782+
__vcpu_assign_sys_reg(vcpu, reg, (data & kvm_pmu_evtyper_mask(vcpu->kvm)));
783783

784784
kvm_pmu_create_perf_event(pmc);
785785
}
@@ -1038,7 +1038,7 @@ static void kvm_arm_set_nr_counters(struct kvm *kvm, unsigned int nr)
10381038
u64 val = __vcpu_sys_reg(vcpu, MDCR_EL2);
10391039
val &= ~MDCR_EL2_HPMN;
10401040
val |= FIELD_PREP(MDCR_EL2_HPMN, kvm->arch.nr_pmu_counters);
1041-
__vcpu_sys_reg(vcpu, MDCR_EL2) = val;
1041+
__vcpu_assign_sys_reg(vcpu, MDCR_EL2, val);
10421042
}
10431043
}
10441044
}

0 commit comments

Comments
 (0)