Skip to content

Commit 0f4b825

Browse files
VincentZWCpalmer-dabbelt
authored andcommitted
riscv: KVM: Add vector lazy save/restore support
This patch adds vector context save/restore for guest VCPUs. To reduce the impact on KVM performance, the implementation imitates the FP context switch mechanism to lazily store and restore the vector context only when the kernel enters/exits the in-kernel run loop and not during the KVM world switch. Signed-off-by: Vincent Chen <[email protected]> Signed-off-by: Greentime Hu <[email protected]> Signed-off-by: Andy Chiu <[email protected]> Reviewed-by: Anup Patel <[email protected]> Acked-by: Anup Patel <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Palmer Dabbelt <[email protected]>
1 parent bf78f1e commit 0f4b825

File tree

6 files changed

+300
-0
lines changed

6 files changed

+300
-0
lines changed

arch/riscv/include/asm/kvm_host.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@
1515
#include <linux/spinlock.h>
1616
#include <asm/hwcap.h>
1717
#include <asm/kvm_aia.h>
18+
#include <asm/ptrace.h>
1819
#include <asm/kvm_vcpu_fp.h>
1920
#include <asm/kvm_vcpu_insn.h>
2021
#include <asm/kvm_vcpu_sbi.h>
@@ -145,6 +146,7 @@ struct kvm_cpu_context {
145146
unsigned long sstatus;
146147
unsigned long hstatus;
147148
union __riscv_fp_state fp;
149+
struct __riscv_v_ext_state vector;
148150
};
149151

150152
struct kvm_vcpu_csr {
Lines changed: 82 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,82 @@
1+
/* SPDX-License-Identifier: GPL-2.0-only */
2+
/*
3+
* Copyright (C) 2022 SiFive
4+
*
5+
* Authors:
6+
* Vincent Chen <[email protected]>
7+
* Greentime Hu <[email protected]>
8+
*/
9+
10+
#ifndef __KVM_VCPU_RISCV_VECTOR_H
11+
#define __KVM_VCPU_RISCV_VECTOR_H
12+
13+
#include <linux/types.h>
14+
15+
#ifdef CONFIG_RISCV_ISA_V
16+
#include <asm/vector.h>
17+
#include <asm/kvm_host.h>
18+
19+
static __always_inline void __kvm_riscv_vector_save(struct kvm_cpu_context *context)
20+
{
21+
__riscv_v_vstate_save(&context->vector, context->vector.datap);
22+
}
23+
24+
static __always_inline void __kvm_riscv_vector_restore(struct kvm_cpu_context *context)
25+
{
26+
__riscv_v_vstate_restore(&context->vector, context->vector.datap);
27+
}
28+
29+
void kvm_riscv_vcpu_vector_reset(struct kvm_vcpu *vcpu);
30+
void kvm_riscv_vcpu_guest_vector_save(struct kvm_cpu_context *cntx,
31+
unsigned long *isa);
32+
void kvm_riscv_vcpu_guest_vector_restore(struct kvm_cpu_context *cntx,
33+
unsigned long *isa);
34+
void kvm_riscv_vcpu_host_vector_save(struct kvm_cpu_context *cntx);
35+
void kvm_riscv_vcpu_host_vector_restore(struct kvm_cpu_context *cntx);
36+
int kvm_riscv_vcpu_alloc_vector_context(struct kvm_vcpu *vcpu,
37+
struct kvm_cpu_context *cntx);
38+
void kvm_riscv_vcpu_free_vector_context(struct kvm_vcpu *vcpu);
39+
#else
40+
41+
struct kvm_cpu_context;
42+
43+
static inline void kvm_riscv_vcpu_vector_reset(struct kvm_vcpu *vcpu)
44+
{
45+
}
46+
47+
static inline void kvm_riscv_vcpu_guest_vector_save(struct kvm_cpu_context *cntx,
48+
unsigned long *isa)
49+
{
50+
}
51+
52+
static inline void kvm_riscv_vcpu_guest_vector_restore(struct kvm_cpu_context *cntx,
53+
unsigned long *isa)
54+
{
55+
}
56+
57+
static inline void kvm_riscv_vcpu_host_vector_save(struct kvm_cpu_context *cntx)
58+
{
59+
}
60+
61+
static inline void kvm_riscv_vcpu_host_vector_restore(struct kvm_cpu_context *cntx)
62+
{
63+
}
64+
65+
static inline int kvm_riscv_vcpu_alloc_vector_context(struct kvm_vcpu *vcpu,
66+
struct kvm_cpu_context *cntx)
67+
{
68+
return 0;
69+
}
70+
71+
static inline void kvm_riscv_vcpu_free_vector_context(struct kvm_vcpu *vcpu)
72+
{
73+
}
74+
#endif
75+
76+
int kvm_riscv_vcpu_get_reg_vector(struct kvm_vcpu *vcpu,
77+
const struct kvm_one_reg *reg,
78+
unsigned long rtype);
79+
int kvm_riscv_vcpu_set_reg_vector(struct kvm_vcpu *vcpu,
80+
const struct kvm_one_reg *reg,
81+
unsigned long rtype);
82+
#endif

arch/riscv/include/uapi/asm/kvm.h

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -204,6 +204,13 @@ enum KVM_RISCV_SBI_EXT_ID {
204204
#define KVM_REG_RISCV_SBI_MULTI_REG_LAST \
205205
KVM_REG_RISCV_SBI_MULTI_REG(KVM_RISCV_SBI_EXT_MAX - 1)
206206

207+
/* V extension registers are mapped as type 9 */
208+
#define KVM_REG_RISCV_VECTOR (0x09 << KVM_REG_RISCV_TYPE_SHIFT)
209+
#define KVM_REG_RISCV_VECTOR_CSR_REG(name) \
210+
(offsetof(struct __riscv_v_ext_state, name) / sizeof(unsigned long))
211+
#define KVM_REG_RISCV_VECTOR_REG(n) \
212+
((n) + sizeof(struct __riscv_v_ext_state) / sizeof(unsigned long))
213+
207214
#endif
208215

209216
#endif /* __LINUX_KVM_RISCV_H */

arch/riscv/kvm/Makefile

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@ kvm-y += mmu.o
1717
kvm-y += vcpu.o
1818
kvm-y += vcpu_exit.o
1919
kvm-y += vcpu_fp.o
20+
kvm-y += vcpu_vector.o
2021
kvm-y += vcpu_insn.o
2122
kvm-y += vcpu_switch.o
2223
kvm-y += vcpu_sbi.o

arch/riscv/kvm/vcpu.c

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,8 @@
2222
#include <asm/cacheflush.h>
2323
#include <asm/hwcap.h>
2424
#include <asm/sbi.h>
25+
#include <asm/vector.h>
26+
#include <asm/kvm_vcpu_vector.h>
2527

2628
const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
2729
KVM_GENERIC_VCPU_STATS(),
@@ -139,6 +141,8 @@ static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu)
139141

140142
kvm_riscv_vcpu_fp_reset(vcpu);
141143

144+
kvm_riscv_vcpu_vector_reset(vcpu);
145+
142146
kvm_riscv_vcpu_timer_reset(vcpu);
143147

144148
kvm_riscv_vcpu_aia_reset(vcpu);
@@ -199,6 +203,9 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
199203
cntx->hstatus |= HSTATUS_SPVP;
200204
cntx->hstatus |= HSTATUS_SPV;
201205

206+
if (kvm_riscv_vcpu_alloc_vector_context(vcpu, cntx))
207+
return -ENOMEM;
208+
202209
/* By default, make CY, TM, and IR counters accessible in VU mode */
203210
reset_csr->scounteren = 0x7;
204211

@@ -242,6 +249,9 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
242249

243250
/* Free unused pages pre-allocated for G-stage page table mappings */
244251
kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
252+
253+
/* Free vector context space for host and guest kernel */
254+
kvm_riscv_vcpu_free_vector_context(vcpu);
245255
}
246256

247257
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
@@ -680,6 +690,9 @@ static int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu,
680690
return kvm_riscv_vcpu_set_reg_isa_ext(vcpu, reg);
681691
case KVM_REG_RISCV_SBI_EXT:
682692
return kvm_riscv_vcpu_set_reg_sbi_ext(vcpu, reg);
693+
case KVM_REG_RISCV_VECTOR:
694+
return kvm_riscv_vcpu_set_reg_vector(vcpu, reg,
695+
KVM_REG_RISCV_VECTOR);
683696
default:
684697
break;
685698
}
@@ -709,6 +722,9 @@ static int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu,
709722
return kvm_riscv_vcpu_get_reg_isa_ext(vcpu, reg);
710723
case KVM_REG_RISCV_SBI_EXT:
711724
return kvm_riscv_vcpu_get_reg_sbi_ext(vcpu, reg);
725+
case KVM_REG_RISCV_VECTOR:
726+
return kvm_riscv_vcpu_get_reg_vector(vcpu, reg,
727+
KVM_REG_RISCV_VECTOR);
712728
default:
713729
break;
714730
}
@@ -1003,6 +1019,9 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
10031019
kvm_riscv_vcpu_host_fp_save(&vcpu->arch.host_context);
10041020
kvm_riscv_vcpu_guest_fp_restore(&vcpu->arch.guest_context,
10051021
vcpu->arch.isa);
1022+
kvm_riscv_vcpu_host_vector_save(&vcpu->arch.host_context);
1023+
kvm_riscv_vcpu_guest_vector_restore(&vcpu->arch.guest_context,
1024+
vcpu->arch.isa);
10061025

10071026
kvm_riscv_vcpu_aia_load(vcpu, cpu);
10081027

@@ -1022,6 +1041,9 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
10221041
kvm_riscv_vcpu_host_fp_restore(&vcpu->arch.host_context);
10231042

10241043
kvm_riscv_vcpu_timer_save(vcpu);
1044+
kvm_riscv_vcpu_guest_vector_save(&vcpu->arch.guest_context,
1045+
vcpu->arch.isa);
1046+
kvm_riscv_vcpu_host_vector_restore(&vcpu->arch.host_context);
10251047

10261048
csr->vsstatus = csr_read(CSR_VSSTATUS);
10271049
csr->vsie = csr_read(CSR_VSIE);

arch/riscv/kvm/vcpu_vector.c

Lines changed: 186 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,186 @@
1+
// SPDX-License-Identifier: GPL-2.0
2+
/*
3+
* Copyright (C) 2022 SiFive
4+
*
5+
* Authors:
6+
* Vincent Chen <[email protected]>
7+
* Greentime Hu <[email protected]>
8+
*/
9+
10+
#include <linux/errno.h>
11+
#include <linux/err.h>
12+
#include <linux/kvm_host.h>
13+
#include <linux/uaccess.h>
14+
#include <asm/hwcap.h>
15+
#include <asm/kvm_vcpu_vector.h>
16+
#include <asm/vector.h>
17+
18+
#ifdef CONFIG_RISCV_ISA_V
19+
void kvm_riscv_vcpu_vector_reset(struct kvm_vcpu *vcpu)
20+
{
21+
unsigned long *isa = vcpu->arch.isa;
22+
struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
23+
24+
cntx->sstatus &= ~SR_VS;
25+
if (riscv_isa_extension_available(isa, v)) {
26+
cntx->sstatus |= SR_VS_INITIAL;
27+
WARN_ON(!cntx->vector.datap);
28+
memset(cntx->vector.datap, 0, riscv_v_vsize);
29+
} else {
30+
cntx->sstatus |= SR_VS_OFF;
31+
}
32+
}
33+
34+
static void kvm_riscv_vcpu_vector_clean(struct kvm_cpu_context *cntx)
35+
{
36+
cntx->sstatus &= ~SR_VS;
37+
cntx->sstatus |= SR_VS_CLEAN;
38+
}
39+
40+
void kvm_riscv_vcpu_guest_vector_save(struct kvm_cpu_context *cntx,
41+
unsigned long *isa)
42+
{
43+
if ((cntx->sstatus & SR_VS) == SR_VS_DIRTY) {
44+
if (riscv_isa_extension_available(isa, v))
45+
__kvm_riscv_vector_save(cntx);
46+
kvm_riscv_vcpu_vector_clean(cntx);
47+
}
48+
}
49+
50+
void kvm_riscv_vcpu_guest_vector_restore(struct kvm_cpu_context *cntx,
51+
unsigned long *isa)
52+
{
53+
if ((cntx->sstatus & SR_VS) != SR_VS_OFF) {
54+
if (riscv_isa_extension_available(isa, v))
55+
__kvm_riscv_vector_restore(cntx);
56+
kvm_riscv_vcpu_vector_clean(cntx);
57+
}
58+
}
59+
60+
void kvm_riscv_vcpu_host_vector_save(struct kvm_cpu_context *cntx)
61+
{
62+
/* No need to check host sstatus as it can be modified outside */
63+
if (riscv_isa_extension_available(NULL, v))
64+
__kvm_riscv_vector_save(cntx);
65+
}
66+
67+
void kvm_riscv_vcpu_host_vector_restore(struct kvm_cpu_context *cntx)
68+
{
69+
if (riscv_isa_extension_available(NULL, v))
70+
__kvm_riscv_vector_restore(cntx);
71+
}
72+
73+
int kvm_riscv_vcpu_alloc_vector_context(struct kvm_vcpu *vcpu,
74+
struct kvm_cpu_context *cntx)
75+
{
76+
cntx->vector.datap = kmalloc(riscv_v_vsize, GFP_KERNEL);
77+
if (!cntx->vector.datap)
78+
return -ENOMEM;
79+
80+
vcpu->arch.host_context.vector.datap = kzalloc(riscv_v_vsize, GFP_KERNEL);
81+
if (!vcpu->arch.host_context.vector.datap)
82+
return -ENOMEM;
83+
84+
return 0;
85+
}
86+
87+
void kvm_riscv_vcpu_free_vector_context(struct kvm_vcpu *vcpu)
88+
{
89+
kfree(vcpu->arch.guest_reset_context.vector.datap);
90+
kfree(vcpu->arch.host_context.vector.datap);
91+
}
92+
#endif
93+
94+
static void *kvm_riscv_vcpu_vreg_addr(struct kvm_vcpu *vcpu,
95+
unsigned long reg_num,
96+
size_t reg_size)
97+
{
98+
struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
99+
void *reg_val;
100+
size_t vlenb = riscv_v_vsize / 32;
101+
102+
if (reg_num < KVM_REG_RISCV_VECTOR_REG(0)) {
103+
if (reg_size != sizeof(unsigned long))
104+
return NULL;
105+
switch (reg_num) {
106+
case KVM_REG_RISCV_VECTOR_CSR_REG(vstart):
107+
reg_val = &cntx->vector.vstart;
108+
break;
109+
case KVM_REG_RISCV_VECTOR_CSR_REG(vl):
110+
reg_val = &cntx->vector.vl;
111+
break;
112+
case KVM_REG_RISCV_VECTOR_CSR_REG(vtype):
113+
reg_val = &cntx->vector.vtype;
114+
break;
115+
case KVM_REG_RISCV_VECTOR_CSR_REG(vcsr):
116+
reg_val = &cntx->vector.vcsr;
117+
break;
118+
case KVM_REG_RISCV_VECTOR_CSR_REG(datap):
119+
default:
120+
return NULL;
121+
}
122+
} else if (reg_num <= KVM_REG_RISCV_VECTOR_REG(31)) {
123+
if (reg_size != vlenb)
124+
return NULL;
125+
reg_val = cntx->vector.datap
126+
+ (reg_num - KVM_REG_RISCV_VECTOR_REG(0)) * vlenb;
127+
} else {
128+
return NULL;
129+
}
130+
131+
return reg_val;
132+
}
133+
134+
int kvm_riscv_vcpu_get_reg_vector(struct kvm_vcpu *vcpu,
135+
const struct kvm_one_reg *reg,
136+
unsigned long rtype)
137+
{
138+
unsigned long *isa = vcpu->arch.isa;
139+
unsigned long __user *uaddr =
140+
(unsigned long __user *)(unsigned long)reg->addr;
141+
unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
142+
KVM_REG_SIZE_MASK |
143+
rtype);
144+
void *reg_val = NULL;
145+
size_t reg_size = KVM_REG_SIZE(reg->id);
146+
147+
if (rtype == KVM_REG_RISCV_VECTOR &&
148+
riscv_isa_extension_available(isa, v)) {
149+
reg_val = kvm_riscv_vcpu_vreg_addr(vcpu, reg_num, reg_size);
150+
}
151+
152+
if (!reg_val)
153+
return -EINVAL;
154+
155+
if (copy_to_user(uaddr, reg_val, reg_size))
156+
return -EFAULT;
157+
158+
return 0;
159+
}
160+
161+
int kvm_riscv_vcpu_set_reg_vector(struct kvm_vcpu *vcpu,
162+
const struct kvm_one_reg *reg,
163+
unsigned long rtype)
164+
{
165+
unsigned long *isa = vcpu->arch.isa;
166+
unsigned long __user *uaddr =
167+
(unsigned long __user *)(unsigned long)reg->addr;
168+
unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
169+
KVM_REG_SIZE_MASK |
170+
rtype);
171+
void *reg_val = NULL;
172+
size_t reg_size = KVM_REG_SIZE(reg->id);
173+
174+
if (rtype == KVM_REG_RISCV_VECTOR &&
175+
riscv_isa_extension_available(isa, v)) {
176+
reg_val = kvm_riscv_vcpu_vreg_addr(vcpu, reg_num, reg_size);
177+
}
178+
179+
if (!reg_val)
180+
return -EINVAL;
181+
182+
if (copy_from_user(reg_val, uaddr, reg_size))
183+
return -EFAULT;
184+
185+
return 0;
186+
}

0 commit comments

Comments
 (0)