|
13 | 13 | #define CREATE_TRACE_POINTS
|
14 | 14 | #include "trace.h"
|
15 | 15 |
|
| 16 | +const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = { |
| 17 | + KVM_GENERIC_VCPU_STATS(), |
| 18 | + STATS_DESC_COUNTER(VCPU, int_exits), |
| 19 | + STATS_DESC_COUNTER(VCPU, idle_exits), |
| 20 | + STATS_DESC_COUNTER(VCPU, cpucfg_exits), |
| 21 | + STATS_DESC_COUNTER(VCPU, signal_exits), |
| 22 | +}; |
| 23 | + |
| 24 | +const struct kvm_stats_header kvm_vcpu_stats_header = { |
| 25 | + .name_size = KVM_STATS_NAME_SIZE, |
| 26 | + .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc), |
| 27 | + .id_offset = sizeof(struct kvm_stats_header), |
| 28 | + .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE, |
| 29 | + .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE + |
| 30 | + sizeof(kvm_vcpu_stats_desc), |
| 31 | +}; |
| 32 | + |
16 | 33 | /*
|
17 | 34 | * kvm_check_requests - check and handle pending vCPU requests
|
18 | 35 | *
|
@@ -141,6 +158,109 @@ static int kvm_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
141 | 158 | return RESUME_GUEST;
|
142 | 159 | }
|
143 | 160 |
|
| 161 | +int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) |
| 162 | +{ |
| 163 | + return !!(vcpu->arch.irq_pending) && |
| 164 | + vcpu->arch.mp_state.mp_state == KVM_MP_STATE_RUNNABLE; |
| 165 | +} |
| 166 | + |
| 167 | +int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) |
| 168 | +{ |
| 169 | + return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE; |
| 170 | +} |
| 171 | + |
| 172 | +bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) |
| 173 | +{ |
| 174 | + return false; |
| 175 | +} |
| 176 | + |
| 177 | +vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) |
| 178 | +{ |
| 179 | + return VM_FAULT_SIGBUS; |
| 180 | +} |
| 181 | + |
| 182 | +int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, |
| 183 | + struct kvm_translation *tr) |
| 184 | +{ |
| 185 | + return -EINVAL; |
| 186 | +} |
| 187 | + |
| 188 | +int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) |
| 189 | +{ |
| 190 | + return kvm_pending_timer(vcpu) || |
| 191 | + kvm_read_hw_gcsr(LOONGARCH_CSR_ESTAT) & (1 << INT_TI); |
| 192 | +} |
| 193 | + |
| 194 | +int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu) |
| 195 | +{ |
| 196 | + int i; |
| 197 | + |
| 198 | + kvm_debug("vCPU Register Dump:\n"); |
| 199 | + kvm_debug("\tPC = 0x%08lx\n", vcpu->arch.pc); |
| 200 | + kvm_debug("\tExceptions: %08lx\n", vcpu->arch.irq_pending); |
| 201 | + |
| 202 | + for (i = 0; i < 32; i += 4) { |
| 203 | + kvm_debug("\tGPR%02d: %08lx %08lx %08lx %08lx\n", i, |
| 204 | + vcpu->arch.gprs[i], vcpu->arch.gprs[i + 1], |
| 205 | + vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]); |
| 206 | + } |
| 207 | + |
| 208 | + kvm_debug("\tCRMD: 0x%08lx, ESTAT: 0x%08lx\n", |
| 209 | + kvm_read_hw_gcsr(LOONGARCH_CSR_CRMD), |
| 210 | + kvm_read_hw_gcsr(LOONGARCH_CSR_ESTAT)); |
| 211 | + |
| 212 | + kvm_debug("\tERA: 0x%08lx\n", kvm_read_hw_gcsr(LOONGARCH_CSR_ERA)); |
| 213 | + |
| 214 | + return 0; |
| 215 | +} |
| 216 | + |
| 217 | +int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, |
| 218 | + struct kvm_mp_state *mp_state) |
| 219 | +{ |
| 220 | + *mp_state = vcpu->arch.mp_state; |
| 221 | + |
| 222 | + return 0; |
| 223 | +} |
| 224 | + |
| 225 | +int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, |
| 226 | + struct kvm_mp_state *mp_state) |
| 227 | +{ |
| 228 | + int ret = 0; |
| 229 | + |
| 230 | + switch (mp_state->mp_state) { |
| 231 | + case KVM_MP_STATE_RUNNABLE: |
| 232 | + vcpu->arch.mp_state = *mp_state; |
| 233 | + break; |
| 234 | + default: |
| 235 | + ret = -EINVAL; |
| 236 | + } |
| 237 | + |
| 238 | + return ret; |
| 239 | +} |
| 240 | + |
| 241 | +int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, |
| 242 | + struct kvm_guest_debug *dbg) |
| 243 | +{ |
| 244 | + return -EINVAL; |
| 245 | +} |
| 246 | + |
| 247 | +/** |
| 248 | + * kvm_migrate_count() - Migrate timer. |
| 249 | + * @vcpu: Virtual CPU. |
| 250 | + * |
| 251 | + * Migrate hrtimer to the current CPU by cancelling and restarting it |
| 252 | + * if the hrtimer is active. |
| 253 | + * |
| 254 | + * Must be called when the vCPU is migrated to a different CPU, so that |
| 255 | + * the timer can interrupt the guest at the new CPU, and the timer irq can |
| 256 | + * be delivered to the vCPU. |
| 257 | + */ |
| 258 | +static void kvm_migrate_count(struct kvm_vcpu *vcpu) |
| 259 | +{ |
| 260 | + if (hrtimer_cancel(&vcpu->arch.swtimer)) |
| 261 | + hrtimer_restart(&vcpu->arch.swtimer); |
| 262 | +} |
| 263 | + |
144 | 264 | static int _kvm_getcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 *val)
|
145 | 265 | {
|
146 | 266 | unsigned long gintc;
|
|
0 commit comments