Skip to content

Commit 2fc3bd8

Browse files
zhaotianrui-loongsonchenhuacai
authored andcommitted
LoongArch: KVM: Implement basic vcpu interfaces
Implement basic vcpu interfaces, including: 1, vcpu create and destroy interface, saving info into vcpu arch structure such as vcpu exception entrance, vcpu enter guest pointer, etc. Init vcpu timer and set address translation mode when vcpu create. 2, vcpu run interface, handling mmio, iocsr reading fault and deliver interrupt, lose fpu before vcpu enter guest. 3, vcpu handle exit interface, getting the exit code by ESTAT register and using kvm exception vector to handle it. Reviewed-by: Bibo Mao <[email protected]> Tested-by: Huacai Chen <[email protected]> Signed-off-by: Tianrui Zhao <[email protected]> Signed-off-by: Huacai Chen <[email protected]>
1 parent dfe3dc0 commit 2fc3bd8

File tree

1 file changed

+261
-0
lines changed

1 file changed

+261
-0
lines changed

arch/loongarch/kvm/vcpu.c

Lines changed: 261 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,261 @@
1+
// SPDX-License-Identifier: GPL-2.0
2+
/*
3+
* Copyright (C) 2020-2023 Loongson Technology Corporation Limited
4+
*/
5+
6+
#include <linux/kvm_host.h>
7+
#include <linux/entry-kvm.h>
8+
#include <asm/fpu.h>
9+
#include <asm/loongarch.h>
10+
#include <asm/setup.h>
11+
#include <asm/time.h>
12+
13+
#define CREATE_TRACE_POINTS
14+
#include "trace.h"
15+
16+
/*
17+
* kvm_check_requests - check and handle pending vCPU requests
18+
*
19+
* Return: RESUME_GUEST if we should enter the guest
20+
* RESUME_HOST if we should exit to userspace
21+
*/
22+
static int kvm_check_requests(struct kvm_vcpu *vcpu)
23+
{
24+
if (!kvm_request_pending(vcpu))
25+
return RESUME_GUEST;
26+
27+
if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
28+
vcpu->arch.vpid = 0; /* Drop vpid for this vCPU */
29+
30+
if (kvm_dirty_ring_check_request(vcpu))
31+
return RESUME_HOST;
32+
33+
return RESUME_GUEST;
34+
}
35+
36+
/*
37+
* Check and handle pending signal and vCPU requests etc
38+
* Run with irq enabled and preempt enabled
39+
*
40+
* Return: RESUME_GUEST if we should enter the guest
41+
* RESUME_HOST if we should exit to userspace
42+
* < 0 if we should exit to userspace, where the return value
43+
* indicates an error
44+
*/
45+
static int kvm_enter_guest_check(struct kvm_vcpu *vcpu)
46+
{
47+
int ret;
48+
49+
/*
50+
* Check conditions before entering the guest
51+
*/
52+
ret = xfer_to_guest_mode_handle_work(vcpu);
53+
if (ret < 0)
54+
return ret;
55+
56+
ret = kvm_check_requests(vcpu);
57+
58+
return ret;
59+
}
60+
61+
/*
62+
* Called with irq enabled
63+
*
64+
* Return: RESUME_GUEST if we should enter the guest, and irq disabled
65+
* Others if we should exit to userspace
66+
*/
67+
static int kvm_pre_enter_guest(struct kvm_vcpu *vcpu)
68+
{
69+
int ret;
70+
71+
do {
72+
ret = kvm_enter_guest_check(vcpu);
73+
if (ret != RESUME_GUEST)
74+
break;
75+
76+
/*
77+
* Handle vcpu timer, interrupts, check requests and
78+
* check vmid before vcpu enter guest
79+
*/
80+
local_irq_disable();
81+
kvm_acquire_timer(vcpu);
82+
kvm_deliver_intr(vcpu);
83+
kvm_deliver_exception(vcpu);
84+
/* Make sure the vcpu mode has been written */
85+
smp_store_mb(vcpu->mode, IN_GUEST_MODE);
86+
kvm_check_vpid(vcpu);
87+
vcpu->arch.host_eentry = csr_read64(LOONGARCH_CSR_EENTRY);
88+
/* Clear KVM_LARCH_SWCSR_LATEST as CSR will change when enter guest */
89+
vcpu->arch.aux_inuse &= ~KVM_LARCH_SWCSR_LATEST;
90+
91+
if (kvm_request_pending(vcpu) || xfer_to_guest_mode_work_pending()) {
92+
/* make sure the vcpu mode has been written */
93+
smp_store_mb(vcpu->mode, OUTSIDE_GUEST_MODE);
94+
local_irq_enable();
95+
ret = -EAGAIN;
96+
}
97+
} while (ret != RESUME_GUEST);
98+
99+
return ret;
100+
}
101+
102+
/*
103+
* Return 1 for resume guest and "<= 0" for resume host.
104+
*/
105+
static int kvm_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
106+
{
107+
int ret = RESUME_GUEST;
108+
unsigned long estat = vcpu->arch.host_estat;
109+
u32 intr = estat & 0x1fff; /* Ignore NMI */
110+
u32 ecode = (estat & CSR_ESTAT_EXC) >> CSR_ESTAT_EXC_SHIFT;
111+
112+
vcpu->mode = OUTSIDE_GUEST_MODE;
113+
114+
/* Set a default exit reason */
115+
run->exit_reason = KVM_EXIT_UNKNOWN;
116+
117+
guest_timing_exit_irqoff();
118+
guest_state_exit_irqoff();
119+
local_irq_enable();
120+
121+
trace_kvm_exit(vcpu, ecode);
122+
if (ecode) {
123+
ret = kvm_handle_fault(vcpu, ecode);
124+
} else {
125+
WARN(!intr, "vm exiting with suspicious irq\n");
126+
++vcpu->stat.int_exits;
127+
}
128+
129+
if (ret == RESUME_GUEST)
130+
ret = kvm_pre_enter_guest(vcpu);
131+
132+
if (ret != RESUME_GUEST) {
133+
local_irq_disable();
134+
return ret;
135+
}
136+
137+
guest_timing_enter_irqoff();
138+
guest_state_enter_irqoff();
139+
trace_kvm_reenter(vcpu);
140+
141+
return RESUME_GUEST;
142+
}
143+
144+
int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
145+
{
146+
return 0;
147+
}
148+
149+
int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
150+
{
151+
unsigned long timer_hz;
152+
struct loongarch_csrs *csr;
153+
154+
vcpu->arch.vpid = 0;
155+
156+
hrtimer_init(&vcpu->arch.swtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
157+
vcpu->arch.swtimer.function = kvm_swtimer_wakeup;
158+
159+
vcpu->arch.handle_exit = kvm_handle_exit;
160+
vcpu->arch.guest_eentry = (unsigned long)kvm_loongarch_ops->exc_entry;
161+
vcpu->arch.csr = kzalloc(sizeof(struct loongarch_csrs), GFP_KERNEL);
162+
if (!vcpu->arch.csr)
163+
return -ENOMEM;
164+
165+
/*
166+
* All kvm exceptions share one exception entry, and host <-> guest
167+
* switch also switch ECFG.VS field, keep host ECFG.VS info here.
168+
*/
169+
vcpu->arch.host_ecfg = (read_csr_ecfg() & CSR_ECFG_VS);
170+
171+
/* Init */
172+
vcpu->arch.last_sched_cpu = -1;
173+
174+
/*
175+
* Initialize guest register state to valid architectural reset state.
176+
*/
177+
timer_hz = calc_const_freq();
178+
kvm_init_timer(vcpu, timer_hz);
179+
180+
/* Set Initialize mode for guest */
181+
csr = vcpu->arch.csr;
182+
kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CRMD, CSR_CRMD_DA);
183+
184+
/* Set cpuid */
185+
kvm_write_sw_gcsr(csr, LOONGARCH_CSR_TMID, vcpu->vcpu_id);
186+
187+
/* Start with no pending virtual guest interrupts */
188+
csr->csrs[LOONGARCH_CSR_GINTC] = 0;
189+
190+
return 0;
191+
}
192+
193+
void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
194+
{
195+
}
196+
197+
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
198+
{
199+
int cpu;
200+
struct kvm_context *context;
201+
202+
hrtimer_cancel(&vcpu->arch.swtimer);
203+
kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
204+
kfree(vcpu->arch.csr);
205+
206+
/*
207+
* If the vCPU is freed and reused as another vCPU, we don't want the
208+
* matching pointer wrongly hanging around in last_vcpu.
209+
*/
210+
for_each_possible_cpu(cpu) {
211+
context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu);
212+
if (context->last_vcpu == vcpu)
213+
context->last_vcpu = NULL;
214+
}
215+
}
216+
217+
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
218+
{
219+
int r = -EINTR;
220+
struct kvm_run *run = vcpu->run;
221+
222+
if (vcpu->mmio_needed) {
223+
if (!vcpu->mmio_is_write)
224+
kvm_complete_mmio_read(vcpu, run);
225+
vcpu->mmio_needed = 0;
226+
}
227+
228+
if (run->exit_reason == KVM_EXIT_LOONGARCH_IOCSR) {
229+
if (!run->iocsr_io.is_write)
230+
kvm_complete_iocsr_read(vcpu, run);
231+
}
232+
233+
if (run->immediate_exit)
234+
return r;
235+
236+
/* Clear exit_reason */
237+
run->exit_reason = KVM_EXIT_UNKNOWN;
238+
lose_fpu(1);
239+
vcpu_load(vcpu);
240+
kvm_sigset_activate(vcpu);
241+
r = kvm_pre_enter_guest(vcpu);
242+
if (r != RESUME_GUEST)
243+
goto out;
244+
245+
guest_timing_enter_irqoff();
246+
guest_state_enter_irqoff();
247+
trace_kvm_enter(vcpu);
248+
r = kvm_loongarch_ops->enter_guest(run, vcpu);
249+
250+
trace_kvm_out(vcpu);
251+
/*
252+
* Guest exit is already recorded at kvm_handle_exit()
253+
* return value must not be RESUME_GUEST
254+
*/
255+
local_irq_enable();
256+
out:
257+
kvm_sigset_deactivate(vcpu);
258+
vcpu_put(vcpu);
259+
260+
return r;
261+
}

0 commit comments

Comments
 (0)