Skip to content

Commit e47c205

Browse files
author
Marc Zyngier
committed
KVM: arm64: Make struct kvm_regs userspace-only
struct kvm_regs is used by userspace to indicate which register gets accessed by the {GET,SET}_ONE_REG API. But as we're about to refactor the layout of the in-kernel register structures, we need the kernel to move away from it. Let's make kvm_regs userspace only, and let the kernel map it to its own internal representation. Reviewed-by: James Morse <[email protected]> Signed-off-by: Marc Zyngier <[email protected]>
1 parent 5b78077 commit e47c205

File tree

10 files changed

+96
-48
lines changed

10 files changed

+96
-48
lines changed

arch/arm64/include/asm/kvm_emulate.h

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -124,12 +124,12 @@ static inline void vcpu_set_vsesr(struct kvm_vcpu *vcpu, u64 vsesr)
124124

125125
static __always_inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
126126
{
127-
return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc;
127+
return (unsigned long *)&vcpu_gp_regs(vcpu)->pc;
128128
}
129129

130130
static inline unsigned long *__vcpu_elr_el1(const struct kvm_vcpu *vcpu)
131131
{
132-
return (unsigned long *)&vcpu_gp_regs(vcpu)->elr_el1;
132+
return (unsigned long *)&vcpu->arch.ctxt.elr_el1;
133133
}
134134

135135
static inline unsigned long vcpu_read_elr_el1(const struct kvm_vcpu *vcpu)
@@ -150,7 +150,7 @@ static inline void vcpu_write_elr_el1(const struct kvm_vcpu *vcpu, unsigned long
150150

151151
static __always_inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
152152
{
153-
return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pstate;
153+
return (unsigned long *)&vcpu_gp_regs(vcpu)->pstate;
154154
}
155155

156156
static __always_inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
@@ -179,14 +179,14 @@ static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
179179
static __always_inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu,
180180
u8 reg_num)
181181
{
182-
return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs.regs[reg_num];
182+
return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs[reg_num];
183183
}
184184

185185
static __always_inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
186186
unsigned long val)
187187
{
188188
if (reg_num != 31)
189-
vcpu_gp_regs(vcpu)->regs.regs[reg_num] = val;
189+
vcpu_gp_regs(vcpu)->regs[reg_num] = val;
190190
}
191191

192192
static inline unsigned long vcpu_read_spsr(const struct kvm_vcpu *vcpu)
@@ -197,7 +197,7 @@ static inline unsigned long vcpu_read_spsr(const struct kvm_vcpu *vcpu)
197197
if (vcpu->arch.sysregs_loaded_on_cpu)
198198
return read_sysreg_el1(SYS_SPSR);
199199
else
200-
return vcpu_gp_regs(vcpu)->spsr[KVM_SPSR_EL1];
200+
return vcpu->arch.ctxt.spsr[KVM_SPSR_EL1];
201201
}
202202

203203
static inline void vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long v)
@@ -210,7 +210,7 @@ static inline void vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long v)
210210
if (vcpu->arch.sysregs_loaded_on_cpu)
211211
write_sysreg_el1(v, SYS_SPSR);
212212
else
213-
vcpu_gp_regs(vcpu)->spsr[KVM_SPSR_EL1] = v;
213+
vcpu->arch.ctxt.spsr[KVM_SPSR_EL1] = v;
214214
}
215215

216216
/*
@@ -519,11 +519,11 @@ static __always_inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_i
519519
static __always_inline void __kvm_skip_instr(struct kvm_vcpu *vcpu)
520520
{
521521
*vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR);
522-
vcpu->arch.ctxt.gp_regs.regs.pstate = read_sysreg_el2(SYS_SPSR);
522+
vcpu_gp_regs(vcpu)->pstate = read_sysreg_el2(SYS_SPSR);
523523

524524
kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
525525

526-
write_sysreg_el2(vcpu->arch.ctxt.gp_regs.regs.pstate, SYS_SPSR);
526+
write_sysreg_el2(vcpu_gp_regs(vcpu)->pstate, SYS_SPSR);
527527
write_sysreg_el2(*vcpu_pc(vcpu), SYS_ELR);
528528
}
529529

arch/arm64/include/asm/kvm_host.h

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -236,7 +236,15 @@ enum vcpu_sysreg {
236236
#define NR_COPRO_REGS (NR_SYS_REGS * 2)
237237

238238
struct kvm_cpu_context {
239-
struct kvm_regs gp_regs;
239+
struct user_pt_regs regs; /* sp = sp_el0 */
240+
241+
u64 sp_el1;
242+
u64 elr_el1;
243+
244+
u64 spsr[KVM_NR_SPSR];
245+
246+
struct user_fpsimd_state fp_regs;
247+
240248
union {
241249
u64 sys_regs[NR_SYS_REGS];
242250
u32 copro[NR_COPRO_REGS];
@@ -402,7 +410,7 @@ struct kvm_vcpu_arch {
402410
system_supports_generic_auth()) && \
403411
((vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_PTRAUTH))
404412

405-
#define vcpu_gp_regs(v) (&(v)->arch.ctxt.gp_regs)
413+
#define vcpu_gp_regs(v) (&(v)->arch.ctxt.regs)
406414

407415
/*
408416
* Only use __vcpu_sys_reg/ctxt_sys_reg if you know you want the

arch/arm64/kernel/asm-offsets.c

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -102,13 +102,12 @@ int main(void)
102102
DEFINE(VCPU_FAULT_DISR, offsetof(struct kvm_vcpu, arch.fault.disr_el1));
103103
DEFINE(VCPU_WORKAROUND_FLAGS, offsetof(struct kvm_vcpu, arch.workaround_flags));
104104
DEFINE(VCPU_HCR_EL2, offsetof(struct kvm_vcpu, arch.hcr_el2));
105-
DEFINE(CPU_GP_REGS, offsetof(struct kvm_cpu_context, gp_regs));
105+
DEFINE(CPU_USER_PT_REGS, offsetof(struct kvm_cpu_context, regs));
106106
DEFINE(CPU_APIAKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APIAKEYLO_EL1]));
107107
DEFINE(CPU_APIBKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APIBKEYLO_EL1]));
108108
DEFINE(CPU_APDAKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APDAKEYLO_EL1]));
109109
DEFINE(CPU_APDBKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APDBKEYLO_EL1]));
110110
DEFINE(CPU_APGAKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APGAKEYLO_EL1]));
111-
DEFINE(CPU_USER_PT_REGS, offsetof(struct kvm_regs, regs));
112111
DEFINE(HOST_CONTEXT_VCPU, offsetof(struct kvm_cpu_context, __hyp_running_vcpu));
113112
DEFINE(HOST_DATA_CONTEXT, offsetof(struct kvm_host_data, host_ctxt));
114113
#endif

arch/arm64/kvm/fpsimd.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -85,7 +85,7 @@ void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu)
8585
WARN_ON_ONCE(!irqs_disabled());
8686

8787
if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED) {
88-
fpsimd_bind_state_to_cpu(&vcpu->arch.ctxt.gp_regs.fp_regs,
88+
fpsimd_bind_state_to_cpu(&vcpu->arch.ctxt.fp_regs,
8989
vcpu->arch.sve_state,
9090
vcpu->arch.sve_max_vl);
9191

arch/arm64/kvm/guest.c

Lines changed: 56 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -101,19 +101,60 @@ static int core_reg_size_from_offset(const struct kvm_vcpu *vcpu, u64 off)
101101
return size;
102102
}
103103

104-
static int validate_core_offset(const struct kvm_vcpu *vcpu,
105-
const struct kvm_one_reg *reg)
104+
static void *core_reg_addr(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
106105
{
107106
u64 off = core_reg_offset_from_id(reg->id);
108107
int size = core_reg_size_from_offset(vcpu, off);
109108

110109
if (size < 0)
111-
return -EINVAL;
110+
return NULL;
112111

113112
if (KVM_REG_SIZE(reg->id) != size)
114-
return -EINVAL;
113+
return NULL;
115114

116-
return 0;
115+
switch (off) {
116+
case KVM_REG_ARM_CORE_REG(regs.regs[0]) ...
117+
KVM_REG_ARM_CORE_REG(regs.regs[30]):
118+
off -= KVM_REG_ARM_CORE_REG(regs.regs[0]);
119+
off /= 2;
120+
return &vcpu->arch.ctxt.regs.regs[off];
121+
122+
case KVM_REG_ARM_CORE_REG(regs.sp):
123+
return &vcpu->arch.ctxt.regs.sp;
124+
125+
case KVM_REG_ARM_CORE_REG(regs.pc):
126+
return &vcpu->arch.ctxt.regs.pc;
127+
128+
case KVM_REG_ARM_CORE_REG(regs.pstate):
129+
return &vcpu->arch.ctxt.regs.pstate;
130+
131+
case KVM_REG_ARM_CORE_REG(sp_el1):
132+
return &vcpu->arch.ctxt.sp_el1;
133+
134+
case KVM_REG_ARM_CORE_REG(elr_el1):
135+
return &vcpu->arch.ctxt.elr_el1;
136+
137+
case KVM_REG_ARM_CORE_REG(spsr[0]) ...
138+
KVM_REG_ARM_CORE_REG(spsr[KVM_NR_SPSR - 1]):
139+
off -= KVM_REG_ARM_CORE_REG(spsr[0]);
140+
off /= 2;
141+
return &vcpu->arch.ctxt.spsr[off];
142+
143+
case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ...
144+
KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]):
145+
off -= KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]);
146+
off /= 4;
147+
return &vcpu->arch.ctxt.fp_regs.vregs[off];
148+
149+
case KVM_REG_ARM_CORE_REG(fp_regs.fpsr):
150+
return &vcpu->arch.ctxt.fp_regs.fpsr;
151+
152+
case KVM_REG_ARM_CORE_REG(fp_regs.fpcr):
153+
return &vcpu->arch.ctxt.fp_regs.fpcr;
154+
155+
default:
156+
return NULL;
157+
}
117158
}
118159

119160
static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
@@ -125,8 +166,8 @@ static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
125166
* off the index in the "array".
126167
*/
127168
__u32 __user *uaddr = (__u32 __user *)(unsigned long)reg->addr;
128-
struct kvm_regs *regs = vcpu_gp_regs(vcpu);
129-
int nr_regs = sizeof(*regs) / sizeof(__u32);
169+
int nr_regs = sizeof(struct kvm_regs) / sizeof(__u32);
170+
void *addr;
130171
u32 off;
131172

132173
/* Our ID is an index into the kvm_regs struct. */
@@ -135,10 +176,11 @@ static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
135176
(off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
136177
return -ENOENT;
137178

138-
if (validate_core_offset(vcpu, reg))
179+
addr = core_reg_addr(vcpu, reg);
180+
if (!addr)
139181
return -EINVAL;
140182

141-
if (copy_to_user(uaddr, ((u32 *)regs) + off, KVM_REG_SIZE(reg->id)))
183+
if (copy_to_user(uaddr, addr, KVM_REG_SIZE(reg->id)))
142184
return -EFAULT;
143185

144186
return 0;
@@ -147,10 +189,9 @@ static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
147189
static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
148190
{
149191
__u32 __user *uaddr = (__u32 __user *)(unsigned long)reg->addr;
150-
struct kvm_regs *regs = vcpu_gp_regs(vcpu);
151-
int nr_regs = sizeof(*regs) / sizeof(__u32);
192+
int nr_regs = sizeof(struct kvm_regs) / sizeof(__u32);
152193
__uint128_t tmp;
153-
void *valp = &tmp;
194+
void *valp = &tmp, *addr;
154195
u64 off;
155196
int err = 0;
156197

@@ -160,7 +201,8 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
160201
(off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
161202
return -ENOENT;
162203

163-
if (validate_core_offset(vcpu, reg))
204+
addr = core_reg_addr(vcpu, reg);
205+
if (!addr)
164206
return -EINVAL;
165207

166208
if (KVM_REG_SIZE(reg->id) > sizeof(tmp))
@@ -198,7 +240,7 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
198240
}
199241
}
200242

201-
memcpy((u32 *)regs + off, valp, KVM_REG_SIZE(reg->id));
243+
memcpy(addr, valp, KVM_REG_SIZE(reg->id));
202244

203245
if (*vcpu_cpsr(vcpu) & PSR_MODE32_BIT) {
204246
int i;

arch/arm64/kvm/hyp/entry.S

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -16,8 +16,7 @@
1616
#include <asm/kvm_mmu.h>
1717
#include <asm/kvm_ptrauth.h>
1818

19-
#define CPU_GP_REG_OFFSET(x) (CPU_GP_REGS + x)
20-
#define CPU_XREG_OFFSET(x) CPU_GP_REG_OFFSET(CPU_USER_PT_REGS + 8*x)
19+
#define CPU_XREG_OFFSET(x) (CPU_USER_PT_REGS + 8*x)
2120
#define CPU_SP_EL0_OFFSET (CPU_XREG_OFFSET(30) + 8)
2221

2322
.text

arch/arm64/kvm/hyp/include/hyp/switch.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -266,11 +266,11 @@ static inline bool __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
266266

267267
if (sve_guest) {
268268
sve_load_state(vcpu_sve_pffr(vcpu),
269-
&vcpu->arch.ctxt.gp_regs.fp_regs.fpsr,
269+
&vcpu->arch.ctxt.fp_regs.fpsr,
270270
sve_vq_from_vl(vcpu->arch.sve_max_vl) - 1);
271271
write_sysreg_s(__vcpu_sys_reg(vcpu, ZCR_EL1), SYS_ZCR_EL12);
272272
} else {
273-
__fpsimd_restore_state(&vcpu->arch.ctxt.gp_regs.fp_regs);
273+
__fpsimd_restore_state(&vcpu->arch.ctxt.fp_regs);
274274
}
275275

276276
/* Skip restoring fpexc32 for AArch64 guests */

arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -46,15 +46,15 @@ static inline void __sysreg_save_el1_state(struct kvm_cpu_context *ctxt)
4646
ctxt_sys_reg(ctxt, PAR_EL1) = read_sysreg(par_el1);
4747
ctxt_sys_reg(ctxt, TPIDR_EL1) = read_sysreg(tpidr_el1);
4848

49-
ctxt->gp_regs.sp_el1 = read_sysreg(sp_el1);
50-
ctxt->gp_regs.elr_el1 = read_sysreg_el1(SYS_ELR);
51-
ctxt->gp_regs.spsr[KVM_SPSR_EL1]= read_sysreg_el1(SYS_SPSR);
49+
ctxt->sp_el1 = read_sysreg(sp_el1);
50+
ctxt->elr_el1 = read_sysreg_el1(SYS_ELR);
51+
ctxt->spsr[KVM_SPSR_EL1] = read_sysreg_el1(SYS_SPSR);
5252
}
5353

5454
static inline void __sysreg_save_el2_return_state(struct kvm_cpu_context *ctxt)
5555
{
56-
ctxt->gp_regs.regs.pc = read_sysreg_el2(SYS_ELR);
57-
ctxt->gp_regs.regs.pstate = read_sysreg_el2(SYS_SPSR);
56+
ctxt->regs.pc = read_sysreg_el2(SYS_ELR);
57+
ctxt->regs.pstate = read_sysreg_el2(SYS_SPSR);
5858

5959
if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN))
6060
ctxt_sys_reg(ctxt, DISR_EL1) = read_sysreg_s(SYS_VDISR_EL2);
@@ -125,14 +125,14 @@ static inline void __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt)
125125
write_sysreg_el1(ctxt_sys_reg(ctxt, TCR_EL1), SYS_TCR);
126126
}
127127

128-
write_sysreg(ctxt->gp_regs.sp_el1, sp_el1);
129-
write_sysreg_el1(ctxt->gp_regs.elr_el1, SYS_ELR);
130-
write_sysreg_el1(ctxt->gp_regs.spsr[KVM_SPSR_EL1],SYS_SPSR);
128+
write_sysreg(ctxt->sp_el1, sp_el1);
129+
write_sysreg_el1(ctxt->elr_el1, SYS_ELR);
130+
write_sysreg_el1(ctxt->spsr[KVM_SPSR_EL1], SYS_SPSR);
131131
}
132132

133133
static inline void __sysreg_restore_el2_return_state(struct kvm_cpu_context *ctxt)
134134
{
135-
u64 pstate = ctxt->gp_regs.regs.pstate;
135+
u64 pstate = ctxt->regs.pstate;
136136
u64 mode = pstate & PSR_AA32_MODE_MASK;
137137

138138
/*
@@ -149,7 +149,7 @@ static inline void __sysreg_restore_el2_return_state(struct kvm_cpu_context *ctx
149149
if (!(mode & PSR_MODE32_BIT) && mode >= PSR_MODE_EL2t)
150150
pstate = PSR_MODE_EL2h | PSR_IL_BIT;
151151

152-
write_sysreg_el2(ctxt->gp_regs.regs.pc, SYS_ELR);
152+
write_sysreg_el2(ctxt->regs.pc, SYS_ELR);
153153
write_sysreg_el2(pstate, SYS_SPSR);
154154

155155
if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN))
@@ -163,7 +163,7 @@ static inline void __sysreg32_save_state(struct kvm_vcpu *vcpu)
163163
if (!vcpu_el1_is_32bit(vcpu))
164164
return;
165165

166-
spsr = vcpu->arch.ctxt.gp_regs.spsr;
166+
spsr = vcpu->arch.ctxt.spsr;
167167

168168
spsr[KVM_SPSR_ABT] = read_sysreg(spsr_abt);
169169
spsr[KVM_SPSR_UND] = read_sysreg(spsr_und);
@@ -184,7 +184,7 @@ static inline void __sysreg32_restore_state(struct kvm_vcpu *vcpu)
184184
if (!vcpu_el1_is_32bit(vcpu))
185185
return;
186186

187-
spsr = vcpu->arch.ctxt.gp_regs.spsr;
187+
spsr = vcpu->arch.ctxt.spsr;
188188

189189
write_sysreg(spsr[KVM_SPSR_ABT], spsr_abt);
190190
write_sysreg(spsr[KVM_SPSR_UND], spsr_und);

arch/arm64/kvm/regmap.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -100,7 +100,7 @@ static const unsigned long vcpu_reg_offsets[VCPU_NR_MODES][16] = {
100100
*/
101101
unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num)
102102
{
103-
unsigned long *reg_array = (unsigned long *)&vcpu->arch.ctxt.gp_regs.regs;
103+
unsigned long *reg_array = (unsigned long *)&vcpu->arch.ctxt.regs;
104104
unsigned long mode = *vcpu_cpsr(vcpu) & PSR_AA32_MODE_MASK;
105105

106106
switch (mode) {
@@ -148,7 +148,7 @@ unsigned long vcpu_read_spsr32(const struct kvm_vcpu *vcpu)
148148
int spsr_idx = vcpu_spsr32_mode(vcpu);
149149

150150
if (!vcpu->arch.sysregs_loaded_on_cpu)
151-
return vcpu_gp_regs(vcpu)->spsr[spsr_idx];
151+
return vcpu->arch.ctxt.spsr[spsr_idx];
152152

153153
switch (spsr_idx) {
154154
case KVM_SPSR_SVC:
@@ -171,7 +171,7 @@ void vcpu_write_spsr32(struct kvm_vcpu *vcpu, unsigned long v)
171171
int spsr_idx = vcpu_spsr32_mode(vcpu);
172172

173173
if (!vcpu->arch.sysregs_loaded_on_cpu) {
174-
vcpu_gp_regs(vcpu)->spsr[spsr_idx] = v;
174+
vcpu->arch.ctxt.spsr[spsr_idx] = v;
175175
return;
176176
}
177177

arch/arm64/kvm/reset.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -288,7 +288,7 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
288288

289289
/* Reset core registers */
290290
memset(vcpu_gp_regs(vcpu), 0, sizeof(*vcpu_gp_regs(vcpu)));
291-
vcpu_gp_regs(vcpu)->regs.pstate = pstate;
291+
vcpu_gp_regs(vcpu)->pstate = pstate;
292292

293293
/* Reset system registers */
294294
kvm_reset_sys_regs(vcpu);

0 commit comments

Comments
 (0)