Skip to content

Commit b67ee19

Browse files
bibo-maochenhuacai
authored andcommitted
LoongArch: KVM: Add Binary Translation extension support
Loongson Binary Translation (LBT) is used to accelerate binary translation, which contains 4 scratch registers (scr0 to scr3), x86/ARM eflags (eflags) and x87 fpu stack pointer (ftop). Like FPU extension, here a lazy enabling method is used for LBT. the LBT context is saved/restored on the vcpu context switch path. Signed-off-by: Bibo Mao <[email protected]> Signed-off-by: Huacai Chen <[email protected]>
1 parent a53f48b commit b67ee19

File tree

4 files changed

+84
-3
lines changed

4 files changed

+84
-3
lines changed

arch/loongarch/include/asm/kvm_host.h

Lines changed: 9 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -133,8 +133,9 @@ enum emulation_result {
133133
#define KVM_LARCH_FPU (0x1 << 0)
134134
#define KVM_LARCH_LSX (0x1 << 1)
135135
#define KVM_LARCH_LASX (0x1 << 2)
136-
#define KVM_LARCH_SWCSR_LATEST (0x1 << 3)
137-
#define KVM_LARCH_HWCSR_USABLE (0x1 << 4)
136+
#define KVM_LARCH_LBT (0x1 << 3)
137+
#define KVM_LARCH_SWCSR_LATEST (0x1 << 4)
138+
#define KVM_LARCH_HWCSR_USABLE (0x1 << 5)
138139

139140
struct kvm_vcpu_arch {
140141
/*
@@ -168,6 +169,7 @@ struct kvm_vcpu_arch {
168169

169170
/* FPU state */
170171
struct loongarch_fpu fpu FPU_ALIGN;
172+
struct loongarch_lbt lbt;
171173

172174
/* CSR state */
173175
struct loongarch_csrs *csr;
@@ -239,6 +241,11 @@ static inline bool kvm_guest_has_lasx(struct kvm_vcpu_arch *arch)
239241
return arch->cpucfg[2] & CPUCFG2_LASX;
240242
}
241243

244+
static inline bool kvm_guest_has_lbt(struct kvm_vcpu_arch *arch)
245+
{
246+
return arch->cpucfg[2] & (CPUCFG2_X86BT | CPUCFG2_ARMBT | CPUCFG2_MIPSBT);
247+
}
248+
242249
/* Debug: dump vcpu state */
243250
int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
244251

arch/loongarch/include/asm/kvm_vcpu.h

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -75,6 +75,12 @@ static inline void kvm_save_lasx(struct loongarch_fpu *fpu) { }
7575
static inline void kvm_restore_lasx(struct loongarch_fpu *fpu) { }
7676
#endif
7777

78+
#ifdef CONFIG_CPU_HAS_LBT
79+
int kvm_own_lbt(struct kvm_vcpu *vcpu);
80+
#else
81+
static inline int kvm_own_lbt(struct kvm_vcpu *vcpu) { return -EINVAL; }
82+
#endif
83+
7884
void kvm_init_timer(struct kvm_vcpu *vcpu, unsigned long hz);
7985
void kvm_save_timer(struct kvm_vcpu *vcpu);
8086
void kvm_restore_timer(struct kvm_vcpu *vcpu);

arch/loongarch/kvm/exit.c

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -748,6 +748,14 @@ static int kvm_handle_lasx_disabled(struct kvm_vcpu *vcpu)
748748
return RESUME_GUEST;
749749
}
750750

751+
static int kvm_handle_lbt_disabled(struct kvm_vcpu *vcpu)
752+
{
753+
if (kvm_own_lbt(vcpu))
754+
kvm_queue_exception(vcpu, EXCCODE_INE, 0);
755+
756+
return RESUME_GUEST;
757+
}
758+
751759
static int kvm_send_pv_ipi(struct kvm_vcpu *vcpu)
752760
{
753761
unsigned int min, cpu, i;
@@ -865,6 +873,7 @@ static exit_handle_fn kvm_fault_tables[EXCCODE_INT_START] = {
865873
[EXCCODE_FPDIS] = kvm_handle_fpu_disabled,
866874
[EXCCODE_LSXDIS] = kvm_handle_lsx_disabled,
867875
[EXCCODE_LASXDIS] = kvm_handle_lasx_disabled,
876+
[EXCCODE_BTDIS] = kvm_handle_lbt_disabled,
868877
[EXCCODE_GSPR] = kvm_handle_gspr,
869878
[EXCCODE_HVC] = kvm_handle_hypercall,
870879
};

arch/loongarch/kvm/vcpu.c

Lines changed: 60 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66
#include <linux/kvm_host.h>
77
#include <linux/entry-kvm.h>
88
#include <asm/fpu.h>
9+
#include <asm/lbt.h>
910
#include <asm/loongarch.h>
1011
#include <asm/setup.h>
1112
#include <asm/time.h>
@@ -983,12 +984,66 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
983984
return 0;
984985
}
985986

987+
#ifdef CONFIG_CPU_HAS_LBT
988+
int kvm_own_lbt(struct kvm_vcpu *vcpu)
989+
{
990+
if (!kvm_guest_has_lbt(&vcpu->arch))
991+
return -EINVAL;
992+
993+
preempt_disable();
994+
set_csr_euen(CSR_EUEN_LBTEN);
995+
_restore_lbt(&vcpu->arch.lbt);
996+
vcpu->arch.aux_inuse |= KVM_LARCH_LBT;
997+
preempt_enable();
998+
999+
return 0;
1000+
}
1001+
1002+
static void kvm_lose_lbt(struct kvm_vcpu *vcpu)
1003+
{
1004+
preempt_disable();
1005+
if (vcpu->arch.aux_inuse & KVM_LARCH_LBT) {
1006+
_save_lbt(&vcpu->arch.lbt);
1007+
clear_csr_euen(CSR_EUEN_LBTEN);
1008+
vcpu->arch.aux_inuse &= ~KVM_LARCH_LBT;
1009+
}
1010+
preempt_enable();
1011+
}
1012+
1013+
static void kvm_check_fcsr(struct kvm_vcpu *vcpu, unsigned long fcsr)
1014+
{
1015+
/*
1016+
* If TM is enabled, top register save/restore will
1017+
* cause lbt exception, here enable lbt in advance
1018+
*/
1019+
if (fcsr & FPU_CSR_TM)
1020+
kvm_own_lbt(vcpu);
1021+
}
1022+
1023+
static void kvm_check_fcsr_alive(struct kvm_vcpu *vcpu)
1024+
{
1025+
if (vcpu->arch.aux_inuse & KVM_LARCH_FPU) {
1026+
if (vcpu->arch.aux_inuse & KVM_LARCH_LBT)
1027+
return;
1028+
kvm_check_fcsr(vcpu, read_fcsr(LOONGARCH_FCSR0));
1029+
}
1030+
}
1031+
#else
1032+
static inline void kvm_lose_lbt(struct kvm_vcpu *vcpu) { }
1033+
static inline void kvm_check_fcsr(struct kvm_vcpu *vcpu, unsigned long fcsr) { }
1034+
static inline void kvm_check_fcsr_alive(struct kvm_vcpu *vcpu) { }
1035+
#endif
1036+
9861037
/* Enable FPU and restore context */
9871038
void kvm_own_fpu(struct kvm_vcpu *vcpu)
9881039
{
9891040
preempt_disable();
9901041

991-
/* Enable FPU */
1042+
/*
1043+
* Enable FPU for guest
1044+
* Set FR and FRE according to guest context
1045+
*/
1046+
kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr);
9921047
set_csr_euen(CSR_EUEN_FPEN);
9931048

9941049
kvm_restore_fpu(&vcpu->arch.fpu);
@@ -1008,6 +1063,7 @@ int kvm_own_lsx(struct kvm_vcpu *vcpu)
10081063
preempt_disable();
10091064

10101065
/* Enable LSX for guest */
1066+
kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr);
10111067
set_csr_euen(CSR_EUEN_LSXEN | CSR_EUEN_FPEN);
10121068
switch (vcpu->arch.aux_inuse & KVM_LARCH_FPU) {
10131069
case KVM_LARCH_FPU:
@@ -1042,6 +1098,7 @@ int kvm_own_lasx(struct kvm_vcpu *vcpu)
10421098

10431099
preempt_disable();
10441100

1101+
kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr);
10451102
set_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN | CSR_EUEN_LASXEN);
10461103
switch (vcpu->arch.aux_inuse & (KVM_LARCH_FPU | KVM_LARCH_LSX)) {
10471104
case KVM_LARCH_LSX:
@@ -1073,6 +1130,7 @@ void kvm_lose_fpu(struct kvm_vcpu *vcpu)
10731130
{
10741131
preempt_disable();
10751132

1133+
kvm_check_fcsr_alive(vcpu);
10761134
if (vcpu->arch.aux_inuse & KVM_LARCH_LASX) {
10771135
kvm_save_lasx(&vcpu->arch.fpu);
10781136
vcpu->arch.aux_inuse &= ~(KVM_LARCH_LSX | KVM_LARCH_FPU | KVM_LARCH_LASX);
@@ -1095,6 +1153,7 @@ void kvm_lose_fpu(struct kvm_vcpu *vcpu)
10951153
/* Disable FPU */
10961154
clear_csr_euen(CSR_EUEN_FPEN);
10971155
}
1156+
kvm_lose_lbt(vcpu);
10981157

10991158
preempt_enable();
11001159
}

0 commit comments

Comments
 (0)