Skip to content

Commit 1423afc

Browse files
Fuad TabbaMarc Zyngier
authored andcommitted
KVM: arm64: Trap access to pVM restricted features
Trap accesses to restricted features for VMs running in protected mode. Access to feature registers are emulated, and only supported features are exposed to protected VMs. Accesses to restricted registers as well as restricted instructions are trapped, and an undefined exception is injected into the protected guests, i.e., with EC = 0x0 (unknown reason). This EC is the one used, according to the Arm Architecture Reference Manual, for unallocated or undefined system registers or instructions. Only affects the functionality of protected VMs. Otherwise, should not affect non-protected VMs when KVM is running in protected mode. Signed-off-by: Fuad Tabba <[email protected]> Signed-off-by: Marc Zyngier <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent 72e1be1 commit 1423afc

File tree

1 file changed

+57
-0
lines changed

1 file changed

+57
-0
lines changed

arch/arm64/kvm/hyp/nvhe/switch.c

Lines changed: 57 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@
2020
#include <asm/kprobes.h>
2121
#include <asm/kvm_asm.h>
2222
#include <asm/kvm_emulate.h>
23+
#include <asm/kvm_fixed_config.h>
2324
#include <asm/kvm_hyp.h>
2425
#include <asm/kvm_mmu.h>
2526
#include <asm/fpsimd.h>
@@ -159,6 +160,49 @@ static void __pmu_switch_to_host(struct kvm_cpu_context *host_ctxt)
159160
write_sysreg(pmu->events_host, pmcntenset_el0);
160161
}
161162

163+
/**
164+
* Handler for protected VM restricted exceptions.
165+
*
166+
* Inject an undefined exception into the guest and return true to indicate that
167+
* the hypervisor has handled the exit, and control should go back to the guest.
168+
*/
169+
static bool kvm_handle_pvm_restricted(struct kvm_vcpu *vcpu, u64 *exit_code)
170+
{
171+
inject_undef64(vcpu);
172+
return true;
173+
}
174+
175+
/**
176+
* Handler for protected VM MSR, MRS or System instruction execution in AArch64.
177+
*
178+
* Returns true if the hypervisor has handled the exit, and control should go
179+
* back to the guest, or false if it hasn't.
180+
*/
181+
static bool kvm_handle_pvm_sys64(struct kvm_vcpu *vcpu, u64 *exit_code)
182+
{
183+
if (kvm_handle_pvm_sysreg(vcpu, exit_code))
184+
return true;
185+
186+
return kvm_hyp_handle_sysreg(vcpu, exit_code);
187+
}
188+
189+
/**
190+
* Handler for protected floating-point and Advanced SIMD accesses.
191+
*
192+
* Returns true if the hypervisor has handled the exit, and control should go
193+
* back to the guest, or false if it hasn't.
194+
*/
195+
static bool kvm_handle_pvm_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
196+
{
197+
/* Linux guests assume support for floating-point and Advanced SIMD. */
198+
BUILD_BUG_ON(!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_FP),
199+
PVM_ID_AA64PFR0_ALLOW));
200+
BUILD_BUG_ON(!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_ASIMD),
201+
PVM_ID_AA64PFR0_ALLOW));
202+
203+
return kvm_hyp_handle_fpsimd(vcpu, exit_code);
204+
}
205+
162206
static const exit_handler_fn hyp_exit_handlers[] = {
163207
[0 ... ESR_ELx_EC_MAX] = NULL,
164208
[ESR_ELx_EC_CP15_32] = kvm_hyp_handle_cp15_32,
@@ -170,8 +214,21 @@ static const exit_handler_fn hyp_exit_handlers[] = {
170214
[ESR_ELx_EC_PAC] = kvm_hyp_handle_ptrauth,
171215
};
172216

217+
static const exit_handler_fn pvm_exit_handlers[] = {
218+
[0 ... ESR_ELx_EC_MAX] = NULL,
219+
[ESR_ELx_EC_SYS64] = kvm_handle_pvm_sys64,
220+
[ESR_ELx_EC_SVE] = kvm_handle_pvm_restricted,
221+
[ESR_ELx_EC_FP_ASIMD] = kvm_handle_pvm_fpsimd,
222+
[ESR_ELx_EC_IABT_LOW] = kvm_hyp_handle_iabt_low,
223+
[ESR_ELx_EC_DABT_LOW] = kvm_hyp_handle_dabt_low,
224+
[ESR_ELx_EC_PAC] = kvm_hyp_handle_ptrauth,
225+
};
226+
173227
static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm *kvm)
174228
{
229+
if (unlikely(kvm_vm_is_protected(kvm)))
230+
return pvm_exit_handlers;
231+
175232
return hyp_exit_handlers;
176233
}
177234

0 commit comments

Comments
 (0)