Skip to content

Commit e33bda7

Browse files
bibo-maochenhuacai
authored andcommitted
LoongArch: KVM: Add PV IPI support on host side
On LoongArch system, IPI hw uses iocsr registers. There are one iocsr register access on IPI sending, and two iocsr access on IPI receiving for the IPI interrupt handler. In VM mode all iocsr accessing will cause VM to trap into hypervisor. So with one IPI hw notification there will be three times of trap. In this patch PV IPI is added for VM, hypercall instruction is used for IPI sender, and hypervisor will inject an SWI to the destination vcpu. During the SWI interrupt handler, only CSR.ESTAT register is written to clear irq. CSR.ESTAT register access will not trap into hypervisor, so with PV IPI supported, there is one trap with IPI sender, and no trap with IPI receiver, there is only one trap with IPI notification. Also this patch adds IPI multicast support, the method is similar with x86. With IPI multicast support, IPI notification can be sent to at most 128 vcpus at one time. It greatly reduces the times of trapping into hypervisor. Signed-off-by: Bibo Mao <[email protected]> Signed-off-by: Huacai Chen <[email protected]>
1 parent 73516e9 commit e33bda7

File tree

6 files changed

+211
-2
lines changed

6 files changed

+211
-2
lines changed

arch/loongarch/include/asm/kvm_host.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -43,6 +43,7 @@ struct kvm_vcpu_stat {
4343
u64 idle_exits;
4444
u64 cpucfg_exits;
4545
u64 signal_exits;
46+
u64 hypercall_exits;
4647
};
4748

4849
#define KVM_MEM_HUGEPAGE_CAPABLE (1UL << 0)

arch/loongarch/include/asm/kvm_para.h

Lines changed: 131 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,13 +2,144 @@
22
#ifndef _ASM_LOONGARCH_KVM_PARA_H
33
#define _ASM_LOONGARCH_KVM_PARA_H
44

5+
/*
6+
* Hypercall code field
7+
*/
8+
#define HYPERVISOR_KVM 1
9+
#define HYPERVISOR_VENDOR_SHIFT 8
10+
#define HYPERCALL_ENCODE(vendor, code) ((vendor << HYPERVISOR_VENDOR_SHIFT) + code)
11+
12+
#define KVM_HCALL_CODE_SERVICE 0
13+
14+
#define KVM_HCALL_SERVICE HYPERCALL_ENCODE(HYPERVISOR_KVM, KVM_HCALL_CODE_SERVICE)
15+
#define KVM_HCALL_FUNC_IPI 1
16+
517
/*
618
* LoongArch hypercall return code
719
*/
820
#define KVM_HCALL_SUCCESS 0
921
#define KVM_HCALL_INVALID_CODE -1UL
1022
#define KVM_HCALL_INVALID_PARAMETER -2UL
1123

24+
/*
25+
* Hypercall interface for KVM hypervisor
26+
*
27+
* a0: function identifier
28+
* a1-a6: args
29+
* Return value will be placed in a0.
30+
* Up to 6 arguments are passed in a1, a2, a3, a4, a5, a6.
31+
*/
32+
static __always_inline long kvm_hypercall0(u64 fid)
33+
{
34+
register long ret asm("a0");
35+
register unsigned long fun asm("a0") = fid;
36+
37+
__asm__ __volatile__(
38+
"hvcl "__stringify(KVM_HCALL_SERVICE)
39+
: "=r" (ret)
40+
: "r" (fun)
41+
: "memory"
42+
);
43+
44+
return ret;
45+
}
46+
47+
static __always_inline long kvm_hypercall1(u64 fid, unsigned long arg0)
48+
{
49+
register long ret asm("a0");
50+
register unsigned long fun asm("a0") = fid;
51+
register unsigned long a1 asm("a1") = arg0;
52+
53+
__asm__ __volatile__(
54+
"hvcl "__stringify(KVM_HCALL_SERVICE)
55+
: "=r" (ret)
56+
: "r" (fun), "r" (a1)
57+
: "memory"
58+
);
59+
60+
return ret;
61+
}
62+
63+
static __always_inline long kvm_hypercall2(u64 fid,
64+
unsigned long arg0, unsigned long arg1)
65+
{
66+
register long ret asm("a0");
67+
register unsigned long fun asm("a0") = fid;
68+
register unsigned long a1 asm("a1") = arg0;
69+
register unsigned long a2 asm("a2") = arg1;
70+
71+
__asm__ __volatile__(
72+
"hvcl "__stringify(KVM_HCALL_SERVICE)
73+
: "=r" (ret)
74+
: "r" (fun), "r" (a1), "r" (a2)
75+
: "memory"
76+
);
77+
78+
return ret;
79+
}
80+
81+
static __always_inline long kvm_hypercall3(u64 fid,
82+
unsigned long arg0, unsigned long arg1, unsigned long arg2)
83+
{
84+
register long ret asm("a0");
85+
register unsigned long fun asm("a0") = fid;
86+
register unsigned long a1 asm("a1") = arg0;
87+
register unsigned long a2 asm("a2") = arg1;
88+
register unsigned long a3 asm("a3") = arg2;
89+
90+
__asm__ __volatile__(
91+
"hvcl "__stringify(KVM_HCALL_SERVICE)
92+
: "=r" (ret)
93+
: "r" (fun), "r" (a1), "r" (a2), "r" (a3)
94+
: "memory"
95+
);
96+
97+
return ret;
98+
}
99+
100+
static __always_inline long kvm_hypercall4(u64 fid,
101+
unsigned long arg0, unsigned long arg1,
102+
unsigned long arg2, unsigned long arg3)
103+
{
104+
register long ret asm("a0");
105+
register unsigned long fun asm("a0") = fid;
106+
register unsigned long a1 asm("a1") = arg0;
107+
register unsigned long a2 asm("a2") = arg1;
108+
register unsigned long a3 asm("a3") = arg2;
109+
register unsigned long a4 asm("a4") = arg3;
110+
111+
__asm__ __volatile__(
112+
"hvcl "__stringify(KVM_HCALL_SERVICE)
113+
: "=r" (ret)
114+
: "r"(fun), "r" (a1), "r" (a2), "r" (a3), "r" (a4)
115+
: "memory"
116+
);
117+
118+
return ret;
119+
}
120+
121+
static __always_inline long kvm_hypercall5(u64 fid,
122+
unsigned long arg0, unsigned long arg1,
123+
unsigned long arg2, unsigned long arg3, unsigned long arg4)
124+
{
125+
register long ret asm("a0");
126+
register unsigned long fun asm("a0") = fid;
127+
register unsigned long a1 asm("a1") = arg0;
128+
register unsigned long a2 asm("a2") = arg1;
129+
register unsigned long a3 asm("a3") = arg2;
130+
register unsigned long a4 asm("a4") = arg3;
131+
register unsigned long a5 asm("a5") = arg4;
132+
133+
__asm__ __volatile__(
134+
"hvcl "__stringify(KVM_HCALL_SERVICE)
135+
: "=r" (ret)
136+
: "r"(fun), "r" (a1), "r" (a2), "r" (a3), "r" (a4), "r" (a5)
137+
: "memory"
138+
);
139+
140+
return ret;
141+
}
142+
12143
static inline unsigned int kvm_arch_para_features(void)
13144
{
14145
return 0;

arch/loongarch/include/asm/kvm_vcpu.h

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -110,4 +110,14 @@ static inline int kvm_queue_exception(struct kvm_vcpu *vcpu,
110110
return -1;
111111
}
112112

113+
static inline unsigned long kvm_read_reg(struct kvm_vcpu *vcpu, int num)
114+
{
115+
return vcpu->arch.gprs[num];
116+
}
117+
118+
static inline void kvm_write_reg(struct kvm_vcpu *vcpu, int num, unsigned long val)
119+
{
120+
vcpu->arch.gprs[num] = val;
121+
}
122+
113123
#endif /* __ASM_LOONGARCH_KVM_VCPU_H__ */

arch/loongarch/include/asm/loongarch.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -168,6 +168,7 @@
168168
#define CPUCFG_KVM_SIG (CPUCFG_KVM_BASE + 0)
169169
#define KVM_SIGNATURE "KVM\0"
170170
#define CPUCFG_KVM_FEATURE (CPUCFG_KVM_BASE + 4)
171+
#define KVM_FEATURE_IPI BIT(1)
171172

172173
#ifndef __ASSEMBLY__
173174

arch/loongarch/kvm/exit.c

Lines changed: 67 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -48,6 +48,9 @@ static int kvm_emu_cpucfg(struct kvm_vcpu *vcpu, larch_inst inst)
4848
/* CPUCFG emulation between 0x40000000 -- 0x400000ff */
4949
vcpu->arch.gprs[rd] = *(unsigned int *)KVM_SIGNATURE;
5050
break;
51+
case CPUCFG_KVM_FEATURE:
52+
vcpu->arch.gprs[rd] = KVM_FEATURE_IPI;
53+
break;
5154
default:
5255
vcpu->arch.gprs[rd] = 0;
5356
break;
@@ -706,12 +709,74 @@ static int kvm_handle_lasx_disabled(struct kvm_vcpu *vcpu)
706709
return RESUME_GUEST;
707710
}
708711

712+
static int kvm_send_pv_ipi(struct kvm_vcpu *vcpu)
713+
{
714+
unsigned int min, cpu, i;
715+
unsigned long ipi_bitmap;
716+
struct kvm_vcpu *dest;
717+
718+
min = kvm_read_reg(vcpu, LOONGARCH_GPR_A3);
719+
for (i = 0; i < 2; i++, min += BITS_PER_LONG) {
720+
ipi_bitmap = kvm_read_reg(vcpu, LOONGARCH_GPR_A1 + i);
721+
if (!ipi_bitmap)
722+
continue;
723+
724+
cpu = find_first_bit((void *)&ipi_bitmap, BITS_PER_LONG);
725+
while (cpu < BITS_PER_LONG) {
726+
dest = kvm_get_vcpu_by_cpuid(vcpu->kvm, cpu + min);
727+
cpu = find_next_bit((void *)&ipi_bitmap, BITS_PER_LONG, cpu + 1);
728+
if (!dest)
729+
continue;
730+
731+
/* Send SWI0 to dest vcpu to emulate IPI interrupt */
732+
kvm_queue_irq(dest, INT_SWI0);
733+
kvm_vcpu_kick(dest);
734+
}
735+
}
736+
737+
return 0;
738+
}
739+
740+
/*
741+
* Hypercall emulation always return to guest, Caller should check retval.
742+
*/
743+
static void kvm_handle_service(struct kvm_vcpu *vcpu)
744+
{
745+
unsigned long func = kvm_read_reg(vcpu, LOONGARCH_GPR_A0);
746+
long ret;
747+
748+
switch (func) {
749+
case KVM_HCALL_FUNC_IPI:
750+
kvm_send_pv_ipi(vcpu);
751+
ret = KVM_HCALL_SUCCESS;
752+
break;
753+
default:
754+
ret = KVM_HCALL_INVALID_CODE;
755+
break;
756+
};
757+
758+
kvm_write_reg(vcpu, LOONGARCH_GPR_A0, ret);
759+
}
760+
709761
static int kvm_handle_hypercall(struct kvm_vcpu *vcpu)
710762
{
763+
larch_inst inst;
764+
unsigned int code;
765+
766+
inst.word = vcpu->arch.badi;
767+
code = inst.reg0i15_format.immediate;
711768
update_pc(&vcpu->arch);
712769

713-
/* Treat it as noop intruction, only set return value */
714-
vcpu->arch.gprs[LOONGARCH_GPR_A0] = KVM_HCALL_INVALID_CODE;
770+
switch (code) {
771+
case KVM_HCALL_SERVICE:
772+
vcpu->stat.hypercall_exits++;
773+
kvm_handle_service(vcpu);
774+
break;
775+
default:
776+
/* Treat it as noop intruction, only set return value */
777+
kvm_write_reg(vcpu, LOONGARCH_GPR_A0, KVM_HCALL_INVALID_CODE);
778+
break;
779+
}
715780

716781
return RESUME_GUEST;
717782
}

arch/loongarch/kvm/vcpu.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@ const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
1919
STATS_DESC_COUNTER(VCPU, idle_exits),
2020
STATS_DESC_COUNTER(VCPU, cpucfg_exits),
2121
STATS_DESC_COUNTER(VCPU, signal_exits),
22+
STATS_DESC_COUNTER(VCPU, hypercall_exits)
2223
};
2324

2425
const struct kvm_stats_header kvm_vcpu_stats_header = {

0 commit comments

Comments
 (0)