|
| 1 | +/* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | +/* |
| 3 | + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited |
| 4 | + */ |
| 5 | + |
| 6 | +#include <linux/linkage.h> |
| 7 | +#include <asm/asm.h> |
| 8 | +#include <asm/asmmacro.h> |
| 9 | +#include <asm/loongarch.h> |
| 10 | +#include <asm/regdef.h> |
| 11 | +#include <asm/stackframe.h> |
| 12 | + |
| 13 | +#define HGPR_OFFSET(x) (PT_R0 + 8*x) |
| 14 | +#define GGPR_OFFSET(x) (KVM_ARCH_GGPR + 8*x) |
| 15 | + |
| 16 | +.macro kvm_save_host_gpr base |
| 17 | + .irp n,1,2,3,22,23,24,25,26,27,28,29,30,31 |
| 18 | + st.d $r\n, \base, HGPR_OFFSET(\n) |
| 19 | + .endr |
| 20 | +.endm |
| 21 | + |
| 22 | +.macro kvm_restore_host_gpr base |
| 23 | + .irp n,1,2,3,22,23,24,25,26,27,28,29,30,31 |
| 24 | + ld.d $r\n, \base, HGPR_OFFSET(\n) |
| 25 | + .endr |
| 26 | +.endm |
| 27 | + |
| 28 | +/* |
| 29 | + * Save and restore all GPRs except base register, |
| 30 | + * and default value of base register is a2. |
| 31 | + */ |
| 32 | +.macro kvm_save_guest_gprs base |
| 33 | + .irp n,1,2,3,4,5,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31 |
| 34 | + st.d $r\n, \base, GGPR_OFFSET(\n) |
| 35 | + .endr |
| 36 | +.endm |
| 37 | + |
| 38 | +.macro kvm_restore_guest_gprs base |
| 39 | + .irp n,1,2,3,4,5,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31 |
| 40 | + ld.d $r\n, \base, GGPR_OFFSET(\n) |
| 41 | + .endr |
| 42 | +.endm |
| 43 | + |
| 44 | +/* |
| 45 | + * Prepare switch to guest, save host regs and restore guest regs. |
| 46 | + * a2: kvm_vcpu_arch, don't touch it until 'ertn' |
| 47 | + * t0, t1: temp register |
| 48 | + */ |
| 49 | +.macro kvm_switch_to_guest |
| 50 | + /* Set host ECFG.VS=0, all exceptions share one exception entry */ |
| 51 | + csrrd t0, LOONGARCH_CSR_ECFG |
| 52 | + bstrins.w t0, zero, CSR_ECFG_VS_SHIFT_END, CSR_ECFG_VS_SHIFT |
| 53 | + csrwr t0, LOONGARCH_CSR_ECFG |
| 54 | + |
| 55 | + /* Load up the new EENTRY */ |
| 56 | + ld.d t0, a2, KVM_ARCH_GEENTRY |
| 57 | + csrwr t0, LOONGARCH_CSR_EENTRY |
| 58 | + |
| 59 | + /* Set Guest ERA */ |
| 60 | + ld.d t0, a2, KVM_ARCH_GPC |
| 61 | + csrwr t0, LOONGARCH_CSR_ERA |
| 62 | + |
| 63 | + /* Save host PGDL */ |
| 64 | + csrrd t0, LOONGARCH_CSR_PGDL |
| 65 | + st.d t0, a2, KVM_ARCH_HPGD |
| 66 | + |
| 67 | + /* Switch to kvm */ |
| 68 | + ld.d t1, a2, KVM_VCPU_KVM - KVM_VCPU_ARCH |
| 69 | + |
| 70 | + /* Load guest PGDL */ |
| 71 | + li.w t0, KVM_GPGD |
| 72 | + ldx.d t0, t1, t0 |
| 73 | + csrwr t0, LOONGARCH_CSR_PGDL |
| 74 | + |
| 75 | + /* Mix GID and RID */ |
| 76 | + csrrd t1, LOONGARCH_CSR_GSTAT |
| 77 | + bstrpick.w t1, t1, CSR_GSTAT_GID_SHIFT_END, CSR_GSTAT_GID_SHIFT |
| 78 | + csrrd t0, LOONGARCH_CSR_GTLBC |
| 79 | + bstrins.w t0, t1, CSR_GTLBC_TGID_SHIFT_END, CSR_GTLBC_TGID_SHIFT |
| 80 | + csrwr t0, LOONGARCH_CSR_GTLBC |
| 81 | + |
| 82 | + /* |
| 83 | + * Enable intr in root mode with future ertn so that host interrupt |
| 84 | + * can be responsed during VM runs |
| 85 | + * Guest CRMD comes from separate GCSR_CRMD register |
| 86 | + */ |
| 87 | + ori t0, zero, CSR_PRMD_PIE |
| 88 | + csrxchg t0, t0, LOONGARCH_CSR_PRMD |
| 89 | + |
| 90 | + /* Set PVM bit to setup ertn to guest context */ |
| 91 | + ori t0, zero, CSR_GSTAT_PVM |
| 92 | + csrxchg t0, t0, LOONGARCH_CSR_GSTAT |
| 93 | + |
| 94 | + /* Load Guest GPRs */ |
| 95 | + kvm_restore_guest_gprs a2 |
| 96 | + /* Load KVM_ARCH register */ |
| 97 | + ld.d a2, a2, (KVM_ARCH_GGPR + 8 * REG_A2) |
| 98 | + |
| 99 | + ertn /* Switch to guest: GSTAT.PGM = 1, ERRCTL.ISERR = 0, TLBRPRMD.ISTLBR = 0 */ |
| 100 | +.endm |
| 101 | + |
| 102 | + /* |
| 103 | + * Exception entry for general exception from guest mode |
| 104 | + * - IRQ is disabled |
| 105 | + * - kernel privilege in root mode |
| 106 | + * - page mode keep unchanged from previous PRMD in root mode |
| 107 | + * - Fixme: tlb exception cannot happen since registers relative with TLB |
| 108 | + * - is still in guest mode, such as pgd table/vmid registers etc, |
| 109 | + * - will fix with hw page walk enabled in future |
| 110 | + * load kvm_vcpu from reserved CSR KVM_VCPU_KS, and save a2 to KVM_TEMP_KS |
| 111 | + */ |
| 112 | + .text |
| 113 | + .cfi_sections .debug_frame |
| 114 | +SYM_CODE_START(kvm_exc_entry) |
| 115 | + csrwr a2, KVM_TEMP_KS |
| 116 | + csrrd a2, KVM_VCPU_KS |
| 117 | + addi.d a2, a2, KVM_VCPU_ARCH |
| 118 | + |
| 119 | + /* After save GPRs, free to use any GPR */ |
| 120 | + kvm_save_guest_gprs a2 |
| 121 | + /* Save guest A2 */ |
| 122 | + csrrd t0, KVM_TEMP_KS |
| 123 | + st.d t0, a2, (KVM_ARCH_GGPR + 8 * REG_A2) |
| 124 | + |
| 125 | + /* A2 is kvm_vcpu_arch, A1 is free to use */ |
| 126 | + csrrd s1, KVM_VCPU_KS |
| 127 | + ld.d s0, s1, KVM_VCPU_RUN |
| 128 | + |
| 129 | + csrrd t0, LOONGARCH_CSR_ESTAT |
| 130 | + st.d t0, a2, KVM_ARCH_HESTAT |
| 131 | + csrrd t0, LOONGARCH_CSR_ERA |
| 132 | + st.d t0, a2, KVM_ARCH_GPC |
| 133 | + csrrd t0, LOONGARCH_CSR_BADV |
| 134 | + st.d t0, a2, KVM_ARCH_HBADV |
| 135 | + csrrd t0, LOONGARCH_CSR_BADI |
| 136 | + st.d t0, a2, KVM_ARCH_HBADI |
| 137 | + |
| 138 | + /* Restore host ECFG.VS */ |
| 139 | + csrrd t0, LOONGARCH_CSR_ECFG |
| 140 | + ld.d t1, a2, KVM_ARCH_HECFG |
| 141 | + or t0, t0, t1 |
| 142 | + csrwr t0, LOONGARCH_CSR_ECFG |
| 143 | + |
| 144 | + /* Restore host EENTRY */ |
| 145 | + ld.d t0, a2, KVM_ARCH_HEENTRY |
| 146 | + csrwr t0, LOONGARCH_CSR_EENTRY |
| 147 | + |
| 148 | + /* Restore host pgd table */ |
| 149 | + ld.d t0, a2, KVM_ARCH_HPGD |
| 150 | + csrwr t0, LOONGARCH_CSR_PGDL |
| 151 | + |
| 152 | + /* |
| 153 | + * Disable PGM bit to enter root mode by default with next ertn |
| 154 | + */ |
| 155 | + ori t0, zero, CSR_GSTAT_PVM |
| 156 | + csrxchg zero, t0, LOONGARCH_CSR_GSTAT |
| 157 | + |
| 158 | + /* |
| 159 | + * Clear GTLBC.TGID field |
| 160 | + * 0: for root tlb update in future tlb instr |
| 161 | + * others: for guest tlb update like gpa to hpa in future tlb instr |
| 162 | + */ |
| 163 | + csrrd t0, LOONGARCH_CSR_GTLBC |
| 164 | + bstrins.w t0, zero, CSR_GTLBC_TGID_SHIFT_END, CSR_GTLBC_TGID_SHIFT |
| 165 | + csrwr t0, LOONGARCH_CSR_GTLBC |
| 166 | + ld.d tp, a2, KVM_ARCH_HTP |
| 167 | + ld.d sp, a2, KVM_ARCH_HSP |
| 168 | + /* restore per cpu register */ |
| 169 | + ld.d u0, a2, KVM_ARCH_HPERCPU |
| 170 | + addi.d sp, sp, -PT_SIZE |
| 171 | + |
| 172 | + /* Prepare handle exception */ |
| 173 | + or a0, s0, zero |
| 174 | + or a1, s1, zero |
| 175 | + ld.d t8, a2, KVM_ARCH_HANDLE_EXIT |
| 176 | + jirl ra, t8, 0 |
| 177 | + |
| 178 | + or a2, s1, zero |
| 179 | + addi.d a2, a2, KVM_VCPU_ARCH |
| 180 | + |
| 181 | + /* Resume host when ret <= 0 */ |
| 182 | + blez a0, ret_to_host |
| 183 | + |
| 184 | + /* |
| 185 | + * Return to guest |
| 186 | + * Save per cpu register again, maybe switched to another cpu |
| 187 | + */ |
| 188 | + st.d u0, a2, KVM_ARCH_HPERCPU |
| 189 | + |
| 190 | + /* Save kvm_vcpu to kscratch */ |
| 191 | + csrwr s1, KVM_VCPU_KS |
| 192 | + kvm_switch_to_guest |
| 193 | + |
| 194 | +ret_to_host: |
| 195 | + ld.d a2, a2, KVM_ARCH_HSP |
| 196 | + addi.d a2, a2, -PT_SIZE |
| 197 | + kvm_restore_host_gpr a2 |
| 198 | + jr ra |
| 199 | + |
| 200 | +SYM_INNER_LABEL(kvm_exc_entry_end, SYM_L_LOCAL) |
| 201 | +SYM_CODE_END(kvm_exc_entry) |
| 202 | + |
| 203 | +/* |
| 204 | + * int kvm_enter_guest(struct kvm_run *run, struct kvm_vcpu *vcpu) |
| 205 | + * |
| 206 | + * @register_param: |
| 207 | + * a0: kvm_run* run |
| 208 | + * a1: kvm_vcpu* vcpu |
| 209 | + */ |
| 210 | +SYM_FUNC_START(kvm_enter_guest) |
| 211 | + /* Allocate space in stack bottom */ |
| 212 | + addi.d a2, sp, -PT_SIZE |
| 213 | + /* Save host GPRs */ |
| 214 | + kvm_save_host_gpr a2 |
| 215 | + |
| 216 | + /* Save host CRMD, PRMD to stack */ |
| 217 | + csrrd a3, LOONGARCH_CSR_CRMD |
| 218 | + st.d a3, a2, PT_CRMD |
| 219 | + csrrd a3, LOONGARCH_CSR_PRMD |
| 220 | + st.d a3, a2, PT_PRMD |
| 221 | + |
| 222 | + addi.d a2, a1, KVM_VCPU_ARCH |
| 223 | + st.d sp, a2, KVM_ARCH_HSP |
| 224 | + st.d tp, a2, KVM_ARCH_HTP |
| 225 | + /* Save per cpu register */ |
| 226 | + st.d u0, a2, KVM_ARCH_HPERCPU |
| 227 | + |
| 228 | + /* Save kvm_vcpu to kscratch */ |
| 229 | + csrwr a1, KVM_VCPU_KS |
| 230 | + kvm_switch_to_guest |
| 231 | +SYM_INNER_LABEL(kvm_enter_guest_end, SYM_L_LOCAL) |
| 232 | +SYM_FUNC_END(kvm_enter_guest) |
| 233 | + |
| 234 | +SYM_FUNC_START(kvm_save_fpu) |
| 235 | + fpu_save_csr a0 t1 |
| 236 | + fpu_save_double a0 t1 |
| 237 | + fpu_save_cc a0 t1 t2 |
| 238 | + jr ra |
| 239 | +SYM_FUNC_END(kvm_save_fpu) |
| 240 | + |
| 241 | +SYM_FUNC_START(kvm_restore_fpu) |
| 242 | + fpu_restore_double a0 t1 |
| 243 | + fpu_restore_csr a0 t1 t2 |
| 244 | + fpu_restore_cc a0 t1 t2 |
| 245 | + jr ra |
| 246 | +SYM_FUNC_END(kvm_restore_fpu) |
| 247 | + |
| 248 | + .section ".rodata" |
| 249 | +SYM_DATA(kvm_exception_size, .quad kvm_exc_entry_end - kvm_exc_entry) |
| 250 | +SYM_DATA(kvm_enter_guest_size, .quad kvm_enter_guest_end - kvm_enter_guest) |
0 commit comments