Skip to content

Commit f6deff3

Browse files
zhaotianrui-loongsonchenhuacai
authored andcommitted
LoongArch: KVM: Implement basic vcpu ioctl interfaces
Implement basic vcpu ioctl interfaces, including: 1, vcpu KVM_ENABLE_CAP ioctl interface. 2, vcpu get registers and set registers operations, it is called when user space use the ioctl interface to get or set regs. Reviewed-by: Bibo Mao <[email protected]> Tested-by: Huacai Chen <[email protected]> Signed-off-by: Tianrui Zhao <[email protected]> Signed-off-by: Huacai Chen <[email protected]>
1 parent 2fc3bd8 commit f6deff3

File tree

1 file changed

+261
-0
lines changed

1 file changed

+261
-0
lines changed

arch/loongarch/kvm/vcpu.c

Lines changed: 261 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -141,6 +141,267 @@ static int kvm_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
141141
return RESUME_GUEST;
142142
}
143143

144+
static int _kvm_getcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 *val)
145+
{
146+
unsigned long gintc;
147+
struct loongarch_csrs *csr = vcpu->arch.csr;
148+
149+
if (get_gcsr_flag(id) & INVALID_GCSR)
150+
return -EINVAL;
151+
152+
if (id == LOONGARCH_CSR_ESTAT) {
153+
/* ESTAT IP0~IP7 get from GINTC */
154+
gintc = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_GINTC) & 0xff;
155+
*val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_ESTAT) | (gintc << 2);
156+
return 0;
157+
}
158+
159+
/*
160+
* Get software CSR state since software state is consistent
161+
* with hardware for synchronous ioctl
162+
*/
163+
*val = kvm_read_sw_gcsr(csr, id);
164+
165+
return 0;
166+
}
167+
168+
static int _kvm_setcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 val)
169+
{
170+
int ret = 0, gintc;
171+
struct loongarch_csrs *csr = vcpu->arch.csr;
172+
173+
if (get_gcsr_flag(id) & INVALID_GCSR)
174+
return -EINVAL;
175+
176+
if (id == LOONGARCH_CSR_ESTAT) {
177+
/* ESTAT IP0~IP7 inject through GINTC */
178+
gintc = (val >> 2) & 0xff;
179+
kvm_set_sw_gcsr(csr, LOONGARCH_CSR_GINTC, gintc);
180+
181+
gintc = val & ~(0xffUL << 2);
182+
kvm_set_sw_gcsr(csr, LOONGARCH_CSR_ESTAT, gintc);
183+
184+
return ret;
185+
}
186+
187+
kvm_write_sw_gcsr(csr, id, val);
188+
189+
return ret;
190+
}
191+
192+
static int kvm_get_one_reg(struct kvm_vcpu *vcpu,
193+
const struct kvm_one_reg *reg, u64 *v)
194+
{
195+
int id, ret = 0;
196+
u64 type = reg->id & KVM_REG_LOONGARCH_MASK;
197+
198+
switch (type) {
199+
case KVM_REG_LOONGARCH_CSR:
200+
id = KVM_GET_IOC_CSR_IDX(reg->id);
201+
ret = _kvm_getcsr(vcpu, id, v);
202+
break;
203+
case KVM_REG_LOONGARCH_CPUCFG:
204+
id = KVM_GET_IOC_CPUCFG_IDX(reg->id);
205+
if (id >= 0 && id < KVM_MAX_CPUCFG_REGS)
206+
*v = vcpu->arch.cpucfg[id];
207+
else
208+
ret = -EINVAL;
209+
break;
210+
case KVM_REG_LOONGARCH_KVM:
211+
switch (reg->id) {
212+
case KVM_REG_LOONGARCH_COUNTER:
213+
*v = drdtime() + vcpu->kvm->arch.time_offset;
214+
break;
215+
default:
216+
ret = -EINVAL;
217+
break;
218+
}
219+
break;
220+
default:
221+
ret = -EINVAL;
222+
break;
223+
}
224+
225+
return ret;
226+
}
227+
228+
static int kvm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
229+
{
230+
int ret = 0;
231+
u64 v, size = reg->id & KVM_REG_SIZE_MASK;
232+
233+
switch (size) {
234+
case KVM_REG_SIZE_U64:
235+
ret = kvm_get_one_reg(vcpu, reg, &v);
236+
if (ret)
237+
return ret;
238+
ret = put_user(v, (u64 __user *)(long)reg->addr);
239+
break;
240+
default:
241+
ret = -EINVAL;
242+
break;
243+
}
244+
245+
return ret;
246+
}
247+
248+
static int kvm_set_one_reg(struct kvm_vcpu *vcpu,
249+
const struct kvm_one_reg *reg, u64 v)
250+
{
251+
int id, ret = 0;
252+
u64 type = reg->id & KVM_REG_LOONGARCH_MASK;
253+
254+
switch (type) {
255+
case KVM_REG_LOONGARCH_CSR:
256+
id = KVM_GET_IOC_CSR_IDX(reg->id);
257+
ret = _kvm_setcsr(vcpu, id, v);
258+
break;
259+
case KVM_REG_LOONGARCH_CPUCFG:
260+
id = KVM_GET_IOC_CPUCFG_IDX(reg->id);
261+
if (id >= 0 && id < KVM_MAX_CPUCFG_REGS)
262+
vcpu->arch.cpucfg[id] = (u32)v;
263+
else
264+
ret = -EINVAL;
265+
break;
266+
case KVM_REG_LOONGARCH_KVM:
267+
switch (reg->id) {
268+
case KVM_REG_LOONGARCH_COUNTER:
269+
/*
270+
* gftoffset is relative with board, not vcpu
271+
* only set for the first time for smp system
272+
*/
273+
if (vcpu->vcpu_id == 0)
274+
vcpu->kvm->arch.time_offset = (signed long)(v - drdtime());
275+
break;
276+
case KVM_REG_LOONGARCH_VCPU_RESET:
277+
kvm_reset_timer(vcpu);
278+
memset(&vcpu->arch.irq_pending, 0, sizeof(vcpu->arch.irq_pending));
279+
memset(&vcpu->arch.irq_clear, 0, sizeof(vcpu->arch.irq_clear));
280+
break;
281+
default:
282+
ret = -EINVAL;
283+
break;
284+
}
285+
break;
286+
default:
287+
ret = -EINVAL;
288+
break;
289+
}
290+
291+
return ret;
292+
}
293+
294+
static int kvm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
295+
{
296+
int ret = 0;
297+
u64 v, size = reg->id & KVM_REG_SIZE_MASK;
298+
299+
switch (size) {
300+
case KVM_REG_SIZE_U64:
301+
ret = get_user(v, (u64 __user *)(long)reg->addr);
302+
if (ret)
303+
return ret;
304+
break;
305+
default:
306+
return -EINVAL;
307+
}
308+
309+
return kvm_set_one_reg(vcpu, reg, v);
310+
}
311+
312+
int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
313+
{
314+
return -ENOIOCTLCMD;
315+
}
316+
317+
int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
318+
{
319+
return -ENOIOCTLCMD;
320+
}
321+
322+
int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
323+
{
324+
int i;
325+
326+
for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
327+
regs->gpr[i] = vcpu->arch.gprs[i];
328+
329+
regs->pc = vcpu->arch.pc;
330+
331+
return 0;
332+
}
333+
334+
int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
335+
{
336+
int i;
337+
338+
for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
339+
vcpu->arch.gprs[i] = regs->gpr[i];
340+
341+
vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */
342+
vcpu->arch.pc = regs->pc;
343+
344+
return 0;
345+
}
346+
347+
static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
348+
struct kvm_enable_cap *cap)
349+
{
350+
/* FPU is enabled by default, will support LSX/LASX later. */
351+
return -EINVAL;
352+
}
353+
354+
long kvm_arch_vcpu_ioctl(struct file *filp,
355+
unsigned int ioctl, unsigned long arg)
356+
{
357+
long r;
358+
void __user *argp = (void __user *)arg;
359+
struct kvm_vcpu *vcpu = filp->private_data;
360+
361+
/*
362+
* Only software CSR should be modified
363+
*
364+
* If any hardware CSR register is modified, vcpu_load/vcpu_put pair
365+
* should be used. Since CSR registers owns by this vcpu, if switch
366+
* to other vcpus, other vcpus need reload CSR registers.
367+
*
368+
* If software CSR is modified, bit KVM_LARCH_HWCSR_USABLE should
369+
* be clear in vcpu->arch.aux_inuse, and vcpu_load will check
370+
* aux_inuse flag and reload CSR registers form software.
371+
*/
372+
373+
switch (ioctl) {
374+
case KVM_SET_ONE_REG:
375+
case KVM_GET_ONE_REG: {
376+
struct kvm_one_reg reg;
377+
378+
r = -EFAULT;
379+
if (copy_from_user(&reg, argp, sizeof(reg)))
380+
break;
381+
if (ioctl == KVM_SET_ONE_REG) {
382+
r = kvm_set_reg(vcpu, &reg);
383+
vcpu->arch.aux_inuse &= ~KVM_LARCH_HWCSR_USABLE;
384+
} else
385+
r = kvm_get_reg(vcpu, &reg);
386+
break;
387+
}
388+
case KVM_ENABLE_CAP: {
389+
struct kvm_enable_cap cap;
390+
391+
r = -EFAULT;
392+
if (copy_from_user(&cap, argp, sizeof(cap)))
393+
break;
394+
r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
395+
break;
396+
}
397+
default:
398+
r = -ENOIOCTLCMD;
399+
break;
400+
}
401+
402+
return r;
403+
}
404+
144405
int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
145406
{
146407
return 0;

0 commit comments

Comments
 (0)