Skip to content

Commit b4ba157

Browse files
bibo-maochenhuacai
authored andcommitted
LoongArch: KVM: Add PV steal time support in host side
Add ParaVirt steal time feature in host side, VM can search supported features provided by KVM hypervisor, a feature KVM_FEATURE_STEAL_TIME is added here. Like x86, steal time structure is saved in guest memory, one hypercall function KVM_HCALL_FUNC_NOTIFY is added to notify KVM to enable this feature. One CPU attr ioctl command KVM_LOONGARCH_VCPU_PVTIME_CTRL is added to save and restore the base address of steal time structure when a VM is migrated. Signed-off-by: Bibo Mao <[email protected]> Signed-off-by: Huacai Chen <[email protected]>
1 parent d7ad41a commit b4ba157

File tree

8 files changed

+189
-4
lines changed

8 files changed

+189
-4
lines changed

arch/loongarch/include/asm/kvm_host.h

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,7 @@
3131

3232
#define KVM_HALT_POLL_NS_DEFAULT 500000
3333
#define KVM_REQ_TLB_FLUSH_GPA KVM_ARCH_REQ(0)
34+
#define KVM_REQ_STEAL_UPDATE KVM_ARCH_REQ(1)
3435

3536
#define KVM_GUESTDBG_SW_BP_MASK \
3637
(KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)
@@ -206,6 +207,13 @@ struct kvm_vcpu_arch {
206207
struct kvm_mp_state mp_state;
207208
/* cpucfg */
208209
u32 cpucfg[KVM_MAX_CPUCFG_REGS];
210+
211+
/* paravirt steal time */
212+
struct {
213+
u64 guest_addr;
214+
u64 last_steal;
215+
struct gfn_to_hva_cache cache;
216+
} st;
209217
};
210218

211219
static inline unsigned long readl_sw_gcsr(struct loongarch_csrs *csr, int reg)

arch/loongarch/include/asm/kvm_para.h

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@
1414

1515
#define KVM_HCALL_SERVICE HYPERCALL_ENCODE(HYPERVISOR_KVM, KVM_HCALL_CODE_SERVICE)
1616
#define KVM_HCALL_FUNC_IPI 1
17+
#define KVM_HCALL_FUNC_NOTIFY 2
1718

1819
#define KVM_HCALL_SWDBG HYPERCALL_ENCODE(HYPERVISOR_KVM, KVM_HCALL_CODE_SWDBG)
1920

@@ -24,6 +25,16 @@
2425
#define KVM_HCALL_INVALID_CODE -1UL
2526
#define KVM_HCALL_INVALID_PARAMETER -2UL
2627

28+
#define KVM_STEAL_PHYS_VALID BIT_ULL(0)
29+
#define KVM_STEAL_PHYS_MASK GENMASK_ULL(63, 6)
30+
31+
struct kvm_steal_time {
32+
__u64 steal;
33+
__u32 version;
34+
__u32 flags;
35+
__u32 pad[12];
36+
};
37+
2738
/*
2839
* Hypercall interface for KVM hypervisor
2940
*

arch/loongarch/include/asm/kvm_vcpu.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -120,4 +120,9 @@ static inline void kvm_write_reg(struct kvm_vcpu *vcpu, int num, unsigned long v
120120
vcpu->arch.gprs[num] = val;
121121
}
122122

123+
static inline bool kvm_pvtime_supported(void)
124+
{
125+
return !!sched_info_on();
126+
}
127+
123128
#endif /* __ASM_LOONGARCH_KVM_VCPU_H__ */

arch/loongarch/include/asm/loongarch.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -169,6 +169,7 @@
169169
#define KVM_SIGNATURE "KVM\0"
170170
#define CPUCFG_KVM_FEATURE (CPUCFG_KVM_BASE + 4)
171171
#define KVM_FEATURE_IPI BIT(1)
172+
#define KVM_FEATURE_STEAL_TIME BIT(2)
172173

173174
#ifndef __ASSEMBLY__
174175

arch/loongarch/include/uapi/asm/kvm.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -81,7 +81,11 @@ struct kvm_fpu {
8181
#define LOONGARCH_REG_64(TYPE, REG) (TYPE | KVM_REG_SIZE_U64 | (REG << LOONGARCH_REG_SHIFT))
8282
#define KVM_IOC_CSRID(REG) LOONGARCH_REG_64(KVM_REG_LOONGARCH_CSR, REG)
8383
#define KVM_IOC_CPUCFG(REG) LOONGARCH_REG_64(KVM_REG_LOONGARCH_CPUCFG, REG)
84+
85+
/* Device Control API on vcpu fd */
8486
#define KVM_LOONGARCH_VCPU_CPUCFG 0
87+
#define KVM_LOONGARCH_VCPU_PVTIME_CTRL 1
88+
#define KVM_LOONGARCH_VCPU_PVTIME_GPA 0
8589

8690
struct kvm_debug_exit_arch {
8791
};

arch/loongarch/kvm/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,7 @@ config KVM
2929
select KVM_MMIO
3030
select HAVE_KVM_READONLY_MEM
3131
select KVM_XFER_TO_GUEST_WORK
32+
select SCHED_INFO
3233
help
3334
Support hosting virtualized guest machines using
3435
hardware virtualization extensions. You will need

arch/loongarch/kvm/exit.c

Lines changed: 36 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@
2424
static int kvm_emu_cpucfg(struct kvm_vcpu *vcpu, larch_inst inst)
2525
{
2626
int rd, rj;
27-
unsigned int index;
27+
unsigned int index, ret;
2828

2929
if (inst.reg2_format.opcode != cpucfg_op)
3030
return EMULATE_FAIL;
@@ -50,7 +50,10 @@ static int kvm_emu_cpucfg(struct kvm_vcpu *vcpu, larch_inst inst)
5050
vcpu->arch.gprs[rd] = *(unsigned int *)KVM_SIGNATURE;
5151
break;
5252
case CPUCFG_KVM_FEATURE:
53-
vcpu->arch.gprs[rd] = KVM_FEATURE_IPI;
53+
ret = KVM_FEATURE_IPI;
54+
if (kvm_pvtime_supported())
55+
ret |= KVM_FEATURE_STEAL_TIME;
56+
vcpu->arch.gprs[rd] = ret;
5457
break;
5558
default:
5659
vcpu->arch.gprs[rd] = 0;
@@ -687,6 +690,34 @@ static int kvm_handle_fpu_disabled(struct kvm_vcpu *vcpu)
687690
return RESUME_GUEST;
688691
}
689692

693+
static long kvm_save_notify(struct kvm_vcpu *vcpu)
694+
{
695+
unsigned long id, data;
696+
697+
id = kvm_read_reg(vcpu, LOONGARCH_GPR_A1);
698+
data = kvm_read_reg(vcpu, LOONGARCH_GPR_A2);
699+
switch (id) {
700+
case KVM_FEATURE_STEAL_TIME:
701+
if (!kvm_pvtime_supported())
702+
return KVM_HCALL_INVALID_CODE;
703+
704+
if (data & ~(KVM_STEAL_PHYS_MASK | KVM_STEAL_PHYS_VALID))
705+
return KVM_HCALL_INVALID_PARAMETER;
706+
707+
vcpu->arch.st.guest_addr = data;
708+
if (!(data & KVM_STEAL_PHYS_VALID))
709+
break;
710+
711+
vcpu->arch.st.last_steal = current->sched_info.run_delay;
712+
kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
713+
break;
714+
default:
715+
break;
716+
};
717+
718+
return 0;
719+
};
720+
690721
/*
691722
* kvm_handle_lsx_disabled() - Guest used LSX while disabled in root.
692723
* @vcpu: Virtual CPU context.
@@ -758,6 +789,9 @@ static void kvm_handle_service(struct kvm_vcpu *vcpu)
758789
kvm_send_pv_ipi(vcpu);
759790
ret = KVM_HCALL_SUCCESS;
760791
break;
792+
case KVM_HCALL_FUNC_NOTIFY:
793+
ret = kvm_save_notify(vcpu);
794+
break;
761795
default:
762796
ret = KVM_HCALL_INVALID_CODE;
763797
break;

arch/loongarch/kvm/vcpu.c

Lines changed: 123 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,50 @@ const struct kvm_stats_header kvm_vcpu_stats_header = {
3131
sizeof(kvm_vcpu_stats_desc),
3232
};
3333

34+
static void kvm_update_stolen_time(struct kvm_vcpu *vcpu)
35+
{
36+
u32 version;
37+
u64 steal;
38+
gpa_t gpa;
39+
struct kvm_memslots *slots;
40+
struct kvm_steal_time __user *st;
41+
struct gfn_to_hva_cache *ghc;
42+
43+
ghc = &vcpu->arch.st.cache;
44+
gpa = vcpu->arch.st.guest_addr;
45+
if (!(gpa & KVM_STEAL_PHYS_VALID))
46+
return;
47+
48+
gpa &= KVM_STEAL_PHYS_MASK;
49+
slots = kvm_memslots(vcpu->kvm);
50+
if (slots->generation != ghc->generation || gpa != ghc->gpa) {
51+
if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, gpa, sizeof(*st))) {
52+
ghc->gpa = INVALID_GPA;
53+
return;
54+
}
55+
}
56+
57+
st = (struct kvm_steal_time __user *)ghc->hva;
58+
unsafe_get_user(version, &st->version, out);
59+
if (version & 1)
60+
version += 1; /* first time write, random junk */
61+
62+
version += 1;
63+
unsafe_put_user(version, &st->version, out);
64+
smp_wmb();
65+
66+
unsafe_get_user(steal, &st->steal, out);
67+
steal += current->sched_info.run_delay - vcpu->arch.st.last_steal;
68+
vcpu->arch.st.last_steal = current->sched_info.run_delay;
69+
unsafe_put_user(steal, &st->steal, out);
70+
71+
smp_wmb();
72+
version += 1;
73+
unsafe_put_user(version, &st->version, out);
74+
out:
75+
mark_page_dirty_in_slot(vcpu->kvm, ghc->memslot, gpa_to_gfn(ghc->gpa));
76+
}
77+
3478
/*
3579
* kvm_check_requests - check and handle pending vCPU requests
3680
*
@@ -48,6 +92,9 @@ static int kvm_check_requests(struct kvm_vcpu *vcpu)
4892
if (kvm_dirty_ring_check_request(vcpu))
4993
return RESUME_HOST;
5094

95+
if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu))
96+
kvm_update_stolen_time(vcpu);
97+
5198
return RESUME_GUEST;
5299
}
53100

@@ -690,6 +737,16 @@ static int kvm_loongarch_cpucfg_has_attr(struct kvm_vcpu *vcpu,
690737
return -ENXIO;
691738
}
692739

740+
static int kvm_loongarch_pvtime_has_attr(struct kvm_vcpu *vcpu,
741+
struct kvm_device_attr *attr)
742+
{
743+
if (!kvm_pvtime_supported() ||
744+
attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA)
745+
return -ENXIO;
746+
747+
return 0;
748+
}
749+
693750
static int kvm_loongarch_vcpu_has_attr(struct kvm_vcpu *vcpu,
694751
struct kvm_device_attr *attr)
695752
{
@@ -699,14 +756,17 @@ static int kvm_loongarch_vcpu_has_attr(struct kvm_vcpu *vcpu,
699756
case KVM_LOONGARCH_VCPU_CPUCFG:
700757
ret = kvm_loongarch_cpucfg_has_attr(vcpu, attr);
701758
break;
759+
case KVM_LOONGARCH_VCPU_PVTIME_CTRL:
760+
ret = kvm_loongarch_pvtime_has_attr(vcpu, attr);
761+
break;
702762
default:
703763
break;
704764
}
705765

706766
return ret;
707767
}
708768

709-
static int kvm_loongarch_get_cpucfg_attr(struct kvm_vcpu *vcpu,
769+
static int kvm_loongarch_cpucfg_get_attr(struct kvm_vcpu *vcpu,
710770
struct kvm_device_attr *attr)
711771
{
712772
int ret = 0;
@@ -722,14 +782,34 @@ static int kvm_loongarch_get_cpucfg_attr(struct kvm_vcpu *vcpu,
722782
return ret;
723783
}
724784

785+
static int kvm_loongarch_pvtime_get_attr(struct kvm_vcpu *vcpu,
786+
struct kvm_device_attr *attr)
787+
{
788+
u64 gpa;
789+
u64 __user *user = (u64 __user *)attr->addr;
790+
791+
if (!kvm_pvtime_supported() ||
792+
attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA)
793+
return -ENXIO;
794+
795+
gpa = vcpu->arch.st.guest_addr;
796+
if (put_user(gpa, user))
797+
return -EFAULT;
798+
799+
return 0;
800+
}
801+
725802
static int kvm_loongarch_vcpu_get_attr(struct kvm_vcpu *vcpu,
726803
struct kvm_device_attr *attr)
727804
{
728805
int ret = -ENXIO;
729806

730807
switch (attr->group) {
731808
case KVM_LOONGARCH_VCPU_CPUCFG:
732-
ret = kvm_loongarch_get_cpucfg_attr(vcpu, attr);
809+
ret = kvm_loongarch_cpucfg_get_attr(vcpu, attr);
810+
break;
811+
case KVM_LOONGARCH_VCPU_PVTIME_CTRL:
812+
ret = kvm_loongarch_pvtime_get_attr(vcpu, attr);
733813
break;
734814
default:
735815
break;
@@ -744,6 +824,43 @@ static int kvm_loongarch_cpucfg_set_attr(struct kvm_vcpu *vcpu,
744824
return -ENXIO;
745825
}
746826

827+
static int kvm_loongarch_pvtime_set_attr(struct kvm_vcpu *vcpu,
828+
struct kvm_device_attr *attr)
829+
{
830+
int idx, ret = 0;
831+
u64 gpa, __user *user = (u64 __user *)attr->addr;
832+
struct kvm *kvm = vcpu->kvm;
833+
834+
if (!kvm_pvtime_supported() ||
835+
attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA)
836+
return -ENXIO;
837+
838+
if (get_user(gpa, user))
839+
return -EFAULT;
840+
841+
if (gpa & ~(KVM_STEAL_PHYS_MASK | KVM_STEAL_PHYS_VALID))
842+
return -EINVAL;
843+
844+
if (!(gpa & KVM_STEAL_PHYS_VALID)) {
845+
vcpu->arch.st.guest_addr = gpa;
846+
return 0;
847+
}
848+
849+
/* Check the address is in a valid memslot */
850+
idx = srcu_read_lock(&kvm->srcu);
851+
if (kvm_is_error_hva(gfn_to_hva(kvm, gpa >> PAGE_SHIFT)))
852+
ret = -EINVAL;
853+
srcu_read_unlock(&kvm->srcu, idx);
854+
855+
if (!ret) {
856+
vcpu->arch.st.guest_addr = gpa;
857+
vcpu->arch.st.last_steal = current->sched_info.run_delay;
858+
kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
859+
}
860+
861+
return ret;
862+
}
863+
747864
static int kvm_loongarch_vcpu_set_attr(struct kvm_vcpu *vcpu,
748865
struct kvm_device_attr *attr)
749866
{
@@ -753,6 +870,9 @@ static int kvm_loongarch_vcpu_set_attr(struct kvm_vcpu *vcpu,
753870
case KVM_LOONGARCH_VCPU_CPUCFG:
754871
ret = kvm_loongarch_cpucfg_set_attr(vcpu, attr);
755872
break;
873+
case KVM_LOONGARCH_VCPU_PVTIME_CTRL:
874+
ret = kvm_loongarch_pvtime_set_attr(vcpu, attr);
875+
break;
756876
default:
757877
break;
758878
}
@@ -1113,6 +1233,7 @@ static int _kvm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
11131233

11141234
/* Control guest page CCA attribute */
11151235
change_csr_gcfg(CSR_GCFG_MATC_MASK, CSR_GCFG_MATC_ROOT);
1236+
kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
11161237

11171238
/* Don't bother restoring registers multiple times unless necessary */
11181239
if (vcpu->arch.aux_inuse & KVM_LARCH_HWCSR_USABLE)

0 commit comments

Comments
 (0)