|
| 1 | +/* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | +/* |
| 3 | + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited |
| 4 | + */ |
| 5 | + |
| 6 | +#ifndef __ASM_LOONGARCH_KVM_HOST_H__ |
| 7 | +#define __ASM_LOONGARCH_KVM_HOST_H__ |
| 8 | + |
| 9 | +#include <linux/cpumask.h> |
| 10 | +#include <linux/hrtimer.h> |
| 11 | +#include <linux/interrupt.h> |
| 12 | +#include <linux/kvm.h> |
| 13 | +#include <linux/kvm_types.h> |
| 14 | +#include <linux/mutex.h> |
| 15 | +#include <linux/spinlock.h> |
| 16 | +#include <linux/threads.h> |
| 17 | +#include <linux/types.h> |
| 18 | + |
| 19 | +#include <asm/inst.h> |
| 20 | +#include <asm/kvm_mmu.h> |
| 21 | +#include <asm/loongarch.h> |
| 22 | + |
| 23 | +/* Loongarch KVM register ids */ |
| 24 | +#define KVM_GET_IOC_CSR_IDX(id) ((id & KVM_CSR_IDX_MASK) >> LOONGARCH_REG_SHIFT) |
| 25 | +#define KVM_GET_IOC_CPUCFG_IDX(id) ((id & KVM_CPUCFG_IDX_MASK) >> LOONGARCH_REG_SHIFT) |
| 26 | + |
| 27 | +#define KVM_MAX_VCPUS 256 |
| 28 | +#define KVM_MAX_CPUCFG_REGS 21 |
| 29 | +/* memory slots that does not exposed to userspace */ |
| 30 | +#define KVM_PRIVATE_MEM_SLOTS 0 |
| 31 | + |
| 32 | +#define KVM_HALT_POLL_NS_DEFAULT 500000 |
| 33 | + |
| 34 | +struct kvm_vm_stat { |
| 35 | + struct kvm_vm_stat_generic generic; |
| 36 | + u64 pages; |
| 37 | + u64 hugepages; |
| 38 | +}; |
| 39 | + |
| 40 | +struct kvm_vcpu_stat { |
| 41 | + struct kvm_vcpu_stat_generic generic; |
| 42 | + u64 int_exits; |
| 43 | + u64 idle_exits; |
| 44 | + u64 cpucfg_exits; |
| 45 | + u64 signal_exits; |
| 46 | +}; |
| 47 | + |
| 48 | +struct kvm_arch_memory_slot { |
| 49 | +}; |
| 50 | + |
| 51 | +struct kvm_context { |
| 52 | + unsigned long vpid_cache; |
| 53 | + struct kvm_vcpu *last_vcpu; |
| 54 | +}; |
| 55 | + |
| 56 | +struct kvm_world_switch { |
| 57 | + int (*exc_entry)(void); |
| 58 | + int (*enter_guest)(struct kvm_run *run, struct kvm_vcpu *vcpu); |
| 59 | + unsigned long page_order; |
| 60 | +}; |
| 61 | + |
| 62 | +#define MAX_PGTABLE_LEVELS 4 |
| 63 | + |
| 64 | +struct kvm_arch { |
| 65 | + /* Guest physical mm */ |
| 66 | + kvm_pte_t *pgd; |
| 67 | + unsigned long gpa_size; |
| 68 | + unsigned long invalid_ptes[MAX_PGTABLE_LEVELS]; |
| 69 | + unsigned int pte_shifts[MAX_PGTABLE_LEVELS]; |
| 70 | + unsigned int root_level; |
| 71 | + |
| 72 | + s64 time_offset; |
| 73 | + struct kvm_context __percpu *vmcs; |
| 74 | +}; |
| 75 | + |
| 76 | +#define CSR_MAX_NUMS 0x800 |
| 77 | + |
| 78 | +struct loongarch_csrs { |
| 79 | + unsigned long csrs[CSR_MAX_NUMS]; |
| 80 | +}; |
| 81 | + |
| 82 | +/* Resume Flags */ |
| 83 | +#define RESUME_HOST 0 |
| 84 | +#define RESUME_GUEST 1 |
| 85 | + |
| 86 | +enum emulation_result { |
| 87 | + EMULATE_DONE, /* no further processing */ |
| 88 | + EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */ |
| 89 | + EMULATE_DO_IOCSR, /* handle IOCSR request */ |
| 90 | + EMULATE_FAIL, /* can't emulate this instruction */ |
| 91 | + EMULATE_EXCEPT, /* A guest exception has been generated */ |
| 92 | +}; |
| 93 | + |
| 94 | +#define KVM_LARCH_FPU (0x1 << 0) |
| 95 | +#define KVM_LARCH_SWCSR_LATEST (0x1 << 1) |
| 96 | +#define KVM_LARCH_HWCSR_USABLE (0x1 << 2) |
| 97 | + |
| 98 | +struct kvm_vcpu_arch { |
| 99 | + /* |
| 100 | + * Switch pointer-to-function type to unsigned long |
| 101 | + * for loading the value into register directly. |
| 102 | + */ |
| 103 | + unsigned long host_eentry; |
| 104 | + unsigned long guest_eentry; |
| 105 | + |
| 106 | + /* Pointers stored here for easy accessing from assembly code */ |
| 107 | + int (*handle_exit)(struct kvm_run *run, struct kvm_vcpu *vcpu); |
| 108 | + |
| 109 | + /* Host registers preserved across guest mode execution */ |
| 110 | + unsigned long host_sp; |
| 111 | + unsigned long host_tp; |
| 112 | + unsigned long host_pgd; |
| 113 | + |
| 114 | + /* Host CSRs are used when handling exits from guest */ |
| 115 | + unsigned long badi; |
| 116 | + unsigned long badv; |
| 117 | + unsigned long host_ecfg; |
| 118 | + unsigned long host_estat; |
| 119 | + unsigned long host_percpu; |
| 120 | + |
| 121 | + /* GPRs */ |
| 122 | + unsigned long gprs[32]; |
| 123 | + unsigned long pc; |
| 124 | + |
| 125 | + /* Which auxiliary state is loaded (KVM_LARCH_*) */ |
| 126 | + unsigned int aux_inuse; |
| 127 | + |
| 128 | + /* FPU state */ |
| 129 | + struct loongarch_fpu fpu FPU_ALIGN; |
| 130 | + |
| 131 | + /* CSR state */ |
| 132 | + struct loongarch_csrs *csr; |
| 133 | + |
| 134 | + /* GPR used as IO source/target */ |
| 135 | + u32 io_gpr; |
| 136 | + |
| 137 | + /* KVM register to control count timer */ |
| 138 | + u32 count_ctl; |
| 139 | + struct hrtimer swtimer; |
| 140 | + |
| 141 | + /* Bitmask of intr that are pending */ |
| 142 | + unsigned long irq_pending; |
| 143 | + /* Bitmask of pending intr to be cleared */ |
| 144 | + unsigned long irq_clear; |
| 145 | + |
| 146 | + /* Bitmask of exceptions that are pending */ |
| 147 | + unsigned long exception_pending; |
| 148 | + unsigned int esubcode; |
| 149 | + |
| 150 | + /* Cache for pages needed inside spinlock regions */ |
| 151 | + struct kvm_mmu_memory_cache mmu_page_cache; |
| 152 | + |
| 153 | + /* vcpu's vpid */ |
| 154 | + u64 vpid; |
| 155 | + |
| 156 | + /* Frequency of stable timer in Hz */ |
| 157 | + u64 timer_mhz; |
| 158 | + ktime_t expire; |
| 159 | + |
| 160 | + /* Last CPU the vCPU state was loaded on */ |
| 161 | + int last_sched_cpu; |
| 162 | + /* mp state */ |
| 163 | + struct kvm_mp_state mp_state; |
| 164 | + /* cpucfg */ |
| 165 | + u32 cpucfg[KVM_MAX_CPUCFG_REGS]; |
| 166 | +}; |
| 167 | + |
| 168 | +static inline unsigned long readl_sw_gcsr(struct loongarch_csrs *csr, int reg) |
| 169 | +{ |
| 170 | + return csr->csrs[reg]; |
| 171 | +} |
| 172 | + |
| 173 | +static inline void writel_sw_gcsr(struct loongarch_csrs *csr, int reg, unsigned long val) |
| 174 | +{ |
| 175 | + csr->csrs[reg] = val; |
| 176 | +} |
| 177 | + |
| 178 | +/* Debug: dump vcpu state */ |
| 179 | +int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu); |
| 180 | + |
| 181 | +/* MMU handling */ |
| 182 | +void kvm_flush_tlb_all(void); |
| 183 | +void kvm_flush_tlb_gpa(struct kvm_vcpu *vcpu, unsigned long gpa); |
| 184 | +int kvm_handle_mm_fault(struct kvm_vcpu *vcpu, unsigned long badv, bool write); |
| 185 | + |
| 186 | +#define KVM_ARCH_WANT_MMU_NOTIFIER |
| 187 | +void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); |
| 188 | +int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, bool blockable); |
| 189 | +int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); |
| 190 | +int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); |
| 191 | + |
| 192 | +static inline void update_pc(struct kvm_vcpu_arch *arch) |
| 193 | +{ |
| 194 | + arch->pc += 4; |
| 195 | +} |
| 196 | + |
| 197 | +/* |
| 198 | + * kvm_is_ifetch_fault() - Find whether a TLBL exception is due to ifetch fault. |
| 199 | + * @vcpu: Virtual CPU. |
| 200 | + * |
| 201 | + * Returns: Whether the TLBL exception was likely due to an instruction |
| 202 | + * fetch fault rather than a data load fault. |
| 203 | + */ |
| 204 | +static inline bool kvm_is_ifetch_fault(struct kvm_vcpu_arch *arch) |
| 205 | +{ |
| 206 | + return arch->pc == arch->badv; |
| 207 | +} |
| 208 | + |
| 209 | +/* Misc */ |
| 210 | +static inline void kvm_arch_hardware_unsetup(void) {} |
| 211 | +static inline void kvm_arch_sync_events(struct kvm *kvm) {} |
| 212 | +static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {} |
| 213 | +static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} |
| 214 | +static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {} |
| 215 | +static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {} |
| 216 | +static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {} |
| 217 | +static inline void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) {} |
| 218 | +void kvm_check_vpid(struct kvm_vcpu *vcpu); |
| 219 | +enum hrtimer_restart kvm_swtimer_wakeup(struct hrtimer *timer); |
| 220 | +void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm, const struct kvm_memory_slot *memslot); |
| 221 | +void kvm_init_vmcs(struct kvm *kvm); |
| 222 | +void kvm_exc_entry(void); |
| 223 | +int kvm_enter_guest(struct kvm_run *run, struct kvm_vcpu *vcpu); |
| 224 | + |
| 225 | +extern unsigned long vpid_mask; |
| 226 | +extern const unsigned long kvm_exception_size; |
| 227 | +extern const unsigned long kvm_enter_guest_size; |
| 228 | +extern struct kvm_world_switch *kvm_loongarch_ops; |
| 229 | + |
| 230 | +#define SW_GCSR (1 << 0) |
| 231 | +#define HW_GCSR (1 << 1) |
| 232 | +#define INVALID_GCSR (1 << 2) |
| 233 | + |
| 234 | +int get_gcsr_flag(int csr); |
| 235 | +void set_hw_gcsr(int csr_id, unsigned long val); |
| 236 | + |
| 237 | +#endif /* __ASM_LOONGARCH_KVM_HOST_H__ */ |
0 commit comments