Skip to content

Commit 71071ac

Browse files
author
Marc Zyngier
committed
KVM: arm64: hyp: Use ctxt_sys_reg/__vcpu_sys_reg instead of raw sys_regs access
Switch the hypervisor code to using ctxt_sys_reg/__vcpu_sys_reg instead of raw sys_regs accesses. No intended functionnal change. Signed-off-by: Marc Zyngier <[email protected]>
1 parent 1b422dd commit 71071ac

File tree

5 files changed

+62
-65
lines changed

5 files changed

+62
-65
lines changed

arch/arm64/include/asm/kvm_host.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -561,7 +561,7 @@ DECLARE_PER_CPU(kvm_host_data_t, kvm_host_data);
561561
static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt)
562562
{
563563
/* The host's MPIDR is immutable, so let's set it up at boot time */
564-
cpu_ctxt->sys_regs[MPIDR_EL1] = read_cpuid_mpidr();
564+
ctxt_sys_reg(cpu_ctxt, MPIDR_EL1) = read_cpuid_mpidr();
565565
}
566566

567567
static inline bool kvm_arch_requires_vhe(void)

arch/arm64/kvm/hyp/include/hyp/debug-sr.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -104,7 +104,7 @@ static inline void __debug_save_state(struct kvm_vcpu *vcpu,
104104
save_debug(dbg->dbg_wcr, dbgwcr, wrps);
105105
save_debug(dbg->dbg_wvr, dbgwvr, wrps);
106106

107-
ctxt->sys_regs[MDCCINT_EL1] = read_sysreg(mdccint_el1);
107+
ctxt_sys_reg(ctxt, MDCCINT_EL1) = read_sysreg(mdccint_el1);
108108
}
109109

110110
static inline void __debug_restore_state(struct kvm_vcpu *vcpu,
@@ -124,7 +124,7 @@ static inline void __debug_restore_state(struct kvm_vcpu *vcpu,
124124
restore_debug(dbg->dbg_wcr, dbgwcr, wrps);
125125
restore_debug(dbg->dbg_wvr, dbgwvr, wrps);
126126

127-
write_sysreg(ctxt->sys_regs[MDCCINT_EL1], mdccint_el1);
127+
write_sysreg(ctxt_sys_reg(ctxt, MDCCINT_EL1), mdccint_el1);
128128
}
129129

130130
static inline void __debug_switch_to_guest_common(struct kvm_vcpu *vcpu)

arch/arm64/kvm/hyp/include/hyp/switch.h

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,7 @@ static inline void __fpsimd_save_fpexc32(struct kvm_vcpu *vcpu)
5353
if (!vcpu_el1_is_32bit(vcpu))
5454
return;
5555

56-
vcpu->arch.ctxt.sys_regs[FPEXC32_EL2] = read_sysreg(fpexc32_el2);
56+
__vcpu_sys_reg(vcpu, FPEXC32_EL2) = read_sysreg(fpexc32_el2);
5757
}
5858

5959
static inline void __activate_traps_fpsimd32(struct kvm_vcpu *vcpu)
@@ -268,15 +268,14 @@ static inline bool __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
268268
sve_load_state(vcpu_sve_pffr(vcpu),
269269
&vcpu->arch.ctxt.gp_regs.fp_regs.fpsr,
270270
sve_vq_from_vl(vcpu->arch.sve_max_vl) - 1);
271-
write_sysreg_s(vcpu->arch.ctxt.sys_regs[ZCR_EL1], SYS_ZCR_EL12);
271+
write_sysreg_s(__vcpu_sys_reg(vcpu, ZCR_EL1), SYS_ZCR_EL12);
272272
} else {
273273
__fpsimd_restore_state(&vcpu->arch.ctxt.gp_regs.fp_regs);
274274
}
275275

276276
/* Skip restoring fpexc32 for AArch64 guests */
277277
if (!(read_sysreg(hcr_el2) & HCR_RW))
278-
write_sysreg(vcpu->arch.ctxt.sys_regs[FPEXC32_EL2],
279-
fpexc32_el2);
278+
write_sysreg(__vcpu_sys_reg(vcpu, FPEXC32_EL2), fpexc32_el2);
280279

281280
vcpu->arch.flags |= KVM_ARM64_FP_ENABLED;
282281

arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h

Lines changed: 54 additions & 56 deletions
Original file line numberDiff line numberDiff line change
@@ -17,34 +17,34 @@
1717

1818
static inline void __sysreg_save_common_state(struct kvm_cpu_context *ctxt)
1919
{
20-
ctxt->sys_regs[MDSCR_EL1] = read_sysreg(mdscr_el1);
20+
ctxt_sys_reg(ctxt, MDSCR_EL1) = read_sysreg(mdscr_el1);
2121
}
2222

2323
static inline void __sysreg_save_user_state(struct kvm_cpu_context *ctxt)
2424
{
25-
ctxt->sys_regs[TPIDR_EL0] = read_sysreg(tpidr_el0);
26-
ctxt->sys_regs[TPIDRRO_EL0] = read_sysreg(tpidrro_el0);
25+
ctxt_sys_reg(ctxt, TPIDR_EL0) = read_sysreg(tpidr_el0);
26+
ctxt_sys_reg(ctxt, TPIDRRO_EL0) = read_sysreg(tpidrro_el0);
2727
}
2828

2929
static inline void __sysreg_save_el1_state(struct kvm_cpu_context *ctxt)
3030
{
31-
ctxt->sys_regs[CSSELR_EL1] = read_sysreg(csselr_el1);
32-
ctxt->sys_regs[SCTLR_EL1] = read_sysreg_el1(SYS_SCTLR);
33-
ctxt->sys_regs[CPACR_EL1] = read_sysreg_el1(SYS_CPACR);
34-
ctxt->sys_regs[TTBR0_EL1] = read_sysreg_el1(SYS_TTBR0);
35-
ctxt->sys_regs[TTBR1_EL1] = read_sysreg_el1(SYS_TTBR1);
36-
ctxt->sys_regs[TCR_EL1] = read_sysreg_el1(SYS_TCR);
37-
ctxt->sys_regs[ESR_EL1] = read_sysreg_el1(SYS_ESR);
38-
ctxt->sys_regs[AFSR0_EL1] = read_sysreg_el1(SYS_AFSR0);
39-
ctxt->sys_regs[AFSR1_EL1] = read_sysreg_el1(SYS_AFSR1);
40-
ctxt->sys_regs[FAR_EL1] = read_sysreg_el1(SYS_FAR);
41-
ctxt->sys_regs[MAIR_EL1] = read_sysreg_el1(SYS_MAIR);
42-
ctxt->sys_regs[VBAR_EL1] = read_sysreg_el1(SYS_VBAR);
43-
ctxt->sys_regs[CONTEXTIDR_EL1] = read_sysreg_el1(SYS_CONTEXTIDR);
44-
ctxt->sys_regs[AMAIR_EL1] = read_sysreg_el1(SYS_AMAIR);
45-
ctxt->sys_regs[CNTKCTL_EL1] = read_sysreg_el1(SYS_CNTKCTL);
46-
ctxt->sys_regs[PAR_EL1] = read_sysreg(par_el1);
47-
ctxt->sys_regs[TPIDR_EL1] = read_sysreg(tpidr_el1);
31+
ctxt_sys_reg(ctxt, CSSELR_EL1) = read_sysreg(csselr_el1);
32+
ctxt_sys_reg(ctxt, SCTLR_EL1) = read_sysreg_el1(SYS_SCTLR);
33+
ctxt_sys_reg(ctxt, CPACR_EL1) = read_sysreg_el1(SYS_CPACR);
34+
ctxt_sys_reg(ctxt, TTBR0_EL1) = read_sysreg_el1(SYS_TTBR0);
35+
ctxt_sys_reg(ctxt, TTBR1_EL1) = read_sysreg_el1(SYS_TTBR1);
36+
ctxt_sys_reg(ctxt, TCR_EL1) = read_sysreg_el1(SYS_TCR);
37+
ctxt_sys_reg(ctxt, ESR_EL1) = read_sysreg_el1(SYS_ESR);
38+
ctxt_sys_reg(ctxt, AFSR0_EL1) = read_sysreg_el1(SYS_AFSR0);
39+
ctxt_sys_reg(ctxt, AFSR1_EL1) = read_sysreg_el1(SYS_AFSR1);
40+
ctxt_sys_reg(ctxt, FAR_EL1) = read_sysreg_el1(SYS_FAR);
41+
ctxt_sys_reg(ctxt, MAIR_EL1) = read_sysreg_el1(SYS_MAIR);
42+
ctxt_sys_reg(ctxt, VBAR_EL1) = read_sysreg_el1(SYS_VBAR);
43+
ctxt_sys_reg(ctxt, CONTEXTIDR_EL1) = read_sysreg_el1(SYS_CONTEXTIDR);
44+
ctxt_sys_reg(ctxt, AMAIR_EL1) = read_sysreg_el1(SYS_AMAIR);
45+
ctxt_sys_reg(ctxt, CNTKCTL_EL1) = read_sysreg_el1(SYS_CNTKCTL);
46+
ctxt_sys_reg(ctxt, PAR_EL1) = read_sysreg(par_el1);
47+
ctxt_sys_reg(ctxt, TPIDR_EL1) = read_sysreg(tpidr_el1);
4848

4949
ctxt->gp_regs.sp_el1 = read_sysreg(sp_el1);
5050
ctxt->gp_regs.elr_el1 = read_sysreg_el1(SYS_ELR);
@@ -57,55 +57,55 @@ static inline void __sysreg_save_el2_return_state(struct kvm_cpu_context *ctxt)
5757
ctxt->gp_regs.regs.pstate = read_sysreg_el2(SYS_SPSR);
5858

5959
if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN))
60-
ctxt->sys_regs[DISR_EL1] = read_sysreg_s(SYS_VDISR_EL2);
60+
ctxt_sys_reg(ctxt, DISR_EL1) = read_sysreg_s(SYS_VDISR_EL2);
6161
}
6262

6363
static inline void __sysreg_restore_common_state(struct kvm_cpu_context *ctxt)
6464
{
65-
write_sysreg(ctxt->sys_regs[MDSCR_EL1], mdscr_el1);
65+
write_sysreg(ctxt_sys_reg(ctxt, MDSCR_EL1), mdscr_el1);
6666
}
6767

6868
static inline void __sysreg_restore_user_state(struct kvm_cpu_context *ctxt)
6969
{
70-
write_sysreg(ctxt->sys_regs[TPIDR_EL0], tpidr_el0);
71-
write_sysreg(ctxt->sys_regs[TPIDRRO_EL0], tpidrro_el0);
70+
write_sysreg(ctxt_sys_reg(ctxt, TPIDR_EL0), tpidr_el0);
71+
write_sysreg(ctxt_sys_reg(ctxt, TPIDRRO_EL0), tpidrro_el0);
7272
}
7373

7474
static inline void __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt)
7575
{
76-
write_sysreg(ctxt->sys_regs[MPIDR_EL1], vmpidr_el2);
77-
write_sysreg(ctxt->sys_regs[CSSELR_EL1], csselr_el1);
76+
write_sysreg(ctxt_sys_reg(ctxt, MPIDR_EL1), vmpidr_el2);
77+
write_sysreg(ctxt_sys_reg(ctxt, CSSELR_EL1), csselr_el1);
7878

7979
if (has_vhe() ||
8080
!cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
81-
write_sysreg_el1(ctxt->sys_regs[SCTLR_EL1], SYS_SCTLR);
82-
write_sysreg_el1(ctxt->sys_regs[TCR_EL1], SYS_TCR);
81+
write_sysreg_el1(ctxt_sys_reg(ctxt, SCTLR_EL1), SYS_SCTLR);
82+
write_sysreg_el1(ctxt_sys_reg(ctxt, TCR_EL1), SYS_TCR);
8383
} else if (!ctxt->__hyp_running_vcpu) {
8484
/*
8585
* Must only be done for guest registers, hence the context
8686
* test. We're coming from the host, so SCTLR.M is already
8787
* set. Pairs with nVHE's __activate_traps().
8888
*/
89-
write_sysreg_el1((ctxt->sys_regs[TCR_EL1] |
89+
write_sysreg_el1((ctxt_sys_reg(ctxt, TCR_EL1) |
9090
TCR_EPD1_MASK | TCR_EPD0_MASK),
9191
SYS_TCR);
9292
isb();
9393
}
9494

95-
write_sysreg_el1(ctxt->sys_regs[CPACR_EL1], SYS_CPACR);
96-
write_sysreg_el1(ctxt->sys_regs[TTBR0_EL1], SYS_TTBR0);
97-
write_sysreg_el1(ctxt->sys_regs[TTBR1_EL1], SYS_TTBR1);
98-
write_sysreg_el1(ctxt->sys_regs[ESR_EL1], SYS_ESR);
99-
write_sysreg_el1(ctxt->sys_regs[AFSR0_EL1], SYS_AFSR0);
100-
write_sysreg_el1(ctxt->sys_regs[AFSR1_EL1], SYS_AFSR1);
101-
write_sysreg_el1(ctxt->sys_regs[FAR_EL1], SYS_FAR);
102-
write_sysreg_el1(ctxt->sys_regs[MAIR_EL1], SYS_MAIR);
103-
write_sysreg_el1(ctxt->sys_regs[VBAR_EL1], SYS_VBAR);
104-
write_sysreg_el1(ctxt->sys_regs[CONTEXTIDR_EL1],SYS_CONTEXTIDR);
105-
write_sysreg_el1(ctxt->sys_regs[AMAIR_EL1], SYS_AMAIR);
106-
write_sysreg_el1(ctxt->sys_regs[CNTKCTL_EL1], SYS_CNTKCTL);
107-
write_sysreg(ctxt->sys_regs[PAR_EL1], par_el1);
108-
write_sysreg(ctxt->sys_regs[TPIDR_EL1], tpidr_el1);
95+
write_sysreg_el1(ctxt_sys_reg(ctxt, CPACR_EL1), SYS_CPACR);
96+
write_sysreg_el1(ctxt_sys_reg(ctxt, TTBR0_EL1), SYS_TTBR0);
97+
write_sysreg_el1(ctxt_sys_reg(ctxt, TTBR1_EL1), SYS_TTBR1);
98+
write_sysreg_el1(ctxt_sys_reg(ctxt, ESR_EL1), SYS_ESR);
99+
write_sysreg_el1(ctxt_sys_reg(ctxt, AFSR0_EL1), SYS_AFSR0);
100+
write_sysreg_el1(ctxt_sys_reg(ctxt, AFSR1_EL1), SYS_AFSR1);
101+
write_sysreg_el1(ctxt_sys_reg(ctxt, FAR_EL1), SYS_FAR);
102+
write_sysreg_el1(ctxt_sys_reg(ctxt, MAIR_EL1), SYS_MAIR);
103+
write_sysreg_el1(ctxt_sys_reg(ctxt, VBAR_EL1), SYS_VBAR);
104+
write_sysreg_el1(ctxt_sys_reg(ctxt, CONTEXTIDR_EL1), SYS_CONTEXTIDR);
105+
write_sysreg_el1(ctxt_sys_reg(ctxt, AMAIR_EL1), SYS_AMAIR);
106+
write_sysreg_el1(ctxt_sys_reg(ctxt, CNTKCTL_EL1), SYS_CNTKCTL);
107+
write_sysreg(ctxt_sys_reg(ctxt, PAR_EL1), par_el1);
108+
write_sysreg(ctxt_sys_reg(ctxt, TPIDR_EL1), tpidr_el1);
109109

110110
if (!has_vhe() &&
111111
cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT) &&
@@ -120,9 +120,9 @@ static inline void __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt)
120120
* deconfigured and disabled. We can now restore the host's
121121
* S1 configuration: SCTLR, and only then TCR.
122122
*/
123-
write_sysreg_el1(ctxt->sys_regs[SCTLR_EL1], SYS_SCTLR);
123+
write_sysreg_el1(ctxt_sys_reg(ctxt, SCTLR_EL1), SYS_SCTLR);
124124
isb();
125-
write_sysreg_el1(ctxt->sys_regs[TCR_EL1], SYS_TCR);
125+
write_sysreg_el1(ctxt_sys_reg(ctxt, TCR_EL1), SYS_TCR);
126126
}
127127

128128
write_sysreg(ctxt->gp_regs.sp_el1, sp_el1);
@@ -153,51 +153,49 @@ static inline void __sysreg_restore_el2_return_state(struct kvm_cpu_context *ctx
153153
write_sysreg_el2(pstate, SYS_SPSR);
154154

155155
if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN))
156-
write_sysreg_s(ctxt->sys_regs[DISR_EL1], SYS_VDISR_EL2);
156+
write_sysreg_s(ctxt_sys_reg(ctxt, DISR_EL1), SYS_VDISR_EL2);
157157
}
158158

159159
static inline void __sysreg32_save_state(struct kvm_vcpu *vcpu)
160160
{
161-
u64 *spsr, *sysreg;
161+
u64 *spsr;
162162

163163
if (!vcpu_el1_is_32bit(vcpu))
164164
return;
165165

166166
spsr = vcpu->arch.ctxt.gp_regs.spsr;
167-
sysreg = vcpu->arch.ctxt.sys_regs;
168167

169168
spsr[KVM_SPSR_ABT] = read_sysreg(spsr_abt);
170169
spsr[KVM_SPSR_UND] = read_sysreg(spsr_und);
171170
spsr[KVM_SPSR_IRQ] = read_sysreg(spsr_irq);
172171
spsr[KVM_SPSR_FIQ] = read_sysreg(spsr_fiq);
173172

174-
sysreg[DACR32_EL2] = read_sysreg(dacr32_el2);
175-
sysreg[IFSR32_EL2] = read_sysreg(ifsr32_el2);
173+
__vcpu_sys_reg(vcpu, DACR32_EL2) = read_sysreg(dacr32_el2);
174+
__vcpu_sys_reg(vcpu, IFSR32_EL2) = read_sysreg(ifsr32_el2);
176175

177176
if (has_vhe() || vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY)
178-
sysreg[DBGVCR32_EL2] = read_sysreg(dbgvcr32_el2);
177+
__vcpu_sys_reg(vcpu, DBGVCR32_EL2) = read_sysreg(dbgvcr32_el2);
179178
}
180179

181180
static inline void __sysreg32_restore_state(struct kvm_vcpu *vcpu)
182181
{
183-
u64 *spsr, *sysreg;
182+
u64 *spsr;
184183

185184
if (!vcpu_el1_is_32bit(vcpu))
186185
return;
187186

188187
spsr = vcpu->arch.ctxt.gp_regs.spsr;
189-
sysreg = vcpu->arch.ctxt.sys_regs;
190188

191189
write_sysreg(spsr[KVM_SPSR_ABT], spsr_abt);
192190
write_sysreg(spsr[KVM_SPSR_UND], spsr_und);
193191
write_sysreg(spsr[KVM_SPSR_IRQ], spsr_irq);
194192
write_sysreg(spsr[KVM_SPSR_FIQ], spsr_fiq);
195193

196-
write_sysreg(sysreg[DACR32_EL2], dacr32_el2);
197-
write_sysreg(sysreg[IFSR32_EL2], ifsr32_el2);
194+
write_sysreg(__vcpu_sys_reg(vcpu, DACR32_EL2), dacr32_el2);
195+
write_sysreg(__vcpu_sys_reg(vcpu, IFSR32_EL2), ifsr32_el2);
198196

199197
if (has_vhe() || vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY)
200-
write_sysreg(sysreg[DBGVCR32_EL2], dbgvcr32_el2);
198+
write_sysreg(__vcpu_sys_reg(vcpu, DBGVCR32_EL2), dbgvcr32_el2);
201199
}
202200

203201
#endif /* __ARM64_KVM_HYP_SYSREG_SR_H__ */

arch/arm64/kvm/hyp/nvhe/switch.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -52,9 +52,9 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
5252
* configured and enabled. We can now restore the guest's S1
5353
* configuration: SCTLR, and only then TCR.
5454
*/
55-
write_sysreg_el1(ctxt->sys_regs[SCTLR_EL1], SYS_SCTLR);
55+
write_sysreg_el1(ctxt_sys_reg(ctxt, SCTLR_EL1), SYS_SCTLR);
5656
isb();
57-
write_sysreg_el1(ctxt->sys_regs[TCR_EL1], SYS_TCR);
57+
write_sysreg_el1(ctxt_sys_reg(ctxt, TCR_EL1), SYS_TCR);
5858
}
5959
}
6060

0 commit comments

Comments
 (0)