66
77#include <linux/kvm_host.h>
88#include <linux/mm.h>
9+
10+ #include <asm/kvm_emulate.h>
11+
912#include <nvhe/fixed_config.h>
1013#include <nvhe/mem_protect.h>
1114#include <nvhe/memory.h>
@@ -289,6 +292,65 @@ void pkvm_put_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
289292 hyp_spin_unlock (& vm_table_lock );
290293}
291294
295+ static void pkvm_init_features_from_host (struct pkvm_hyp_vm * hyp_vm , const struct kvm * host_kvm )
296+ {
297+ struct kvm * kvm = & hyp_vm -> kvm ;
298+ DECLARE_BITMAP (allowed_features , KVM_VCPU_MAX_FEATURES );
299+
300+ /* No restrictions for non-protected VMs. */
301+ if (!kvm_vm_is_protected (kvm )) {
302+ bitmap_copy (kvm -> arch .vcpu_features ,
303+ host_kvm -> arch .vcpu_features ,
304+ KVM_VCPU_MAX_FEATURES );
305+ return ;
306+ }
307+
308+ bitmap_zero (allowed_features , KVM_VCPU_MAX_FEATURES );
309+
310+ /*
311+ * For protected VMs, always allow:
312+ * - CPU starting in poweroff state
313+ * - PSCI v0.2
314+ */
315+ set_bit (KVM_ARM_VCPU_POWER_OFF , allowed_features );
316+ set_bit (KVM_ARM_VCPU_PSCI_0_2 , allowed_features );
317+
318+ /*
319+ * Check if remaining features are allowed:
320+ * - Performance Monitoring
321+ * - Scalable Vectors
322+ * - Pointer Authentication
323+ */
324+ if (FIELD_GET (ARM64_FEATURE_MASK (ID_AA64DFR0_EL1_PMUVer ), PVM_ID_AA64DFR0_ALLOW ))
325+ set_bit (KVM_ARM_VCPU_PMU_V3 , allowed_features );
326+
327+ if (FIELD_GET (ARM64_FEATURE_MASK (ID_AA64PFR0_EL1_SVE ), PVM_ID_AA64PFR0_ALLOW ))
328+ set_bit (KVM_ARM_VCPU_SVE , allowed_features );
329+
330+ if (FIELD_GET (ARM64_FEATURE_MASK (ID_AA64ISAR1_EL1_API ), PVM_ID_AA64ISAR1_RESTRICT_UNSIGNED ) &&
331+ FIELD_GET (ARM64_FEATURE_MASK (ID_AA64ISAR1_EL1_APA ), PVM_ID_AA64ISAR1_RESTRICT_UNSIGNED ))
332+ set_bit (KVM_ARM_VCPU_PTRAUTH_ADDRESS , allowed_features );
333+
334+ if (FIELD_GET (ARM64_FEATURE_MASK (ID_AA64ISAR1_EL1_GPI ), PVM_ID_AA64ISAR1_ALLOW ) &&
335+ FIELD_GET (ARM64_FEATURE_MASK (ID_AA64ISAR1_EL1_GPA ), PVM_ID_AA64ISAR1_ALLOW ))
336+ set_bit (KVM_ARM_VCPU_PTRAUTH_GENERIC , allowed_features );
337+
338+ bitmap_and (kvm -> arch .vcpu_features , host_kvm -> arch .vcpu_features ,
339+ allowed_features , KVM_VCPU_MAX_FEATURES );
340+ }
341+
342+ static void pkvm_vcpu_init_ptrauth (struct pkvm_hyp_vcpu * hyp_vcpu )
343+ {
344+ struct kvm_vcpu * vcpu = & hyp_vcpu -> vcpu ;
345+
346+ if (vcpu_has_feature (vcpu , KVM_ARM_VCPU_PTRAUTH_ADDRESS ) ||
347+ vcpu_has_feature (vcpu , KVM_ARM_VCPU_PTRAUTH_GENERIC )) {
348+ kvm_vcpu_enable_ptrauth (vcpu );
349+ } else {
350+ vcpu_clear_flag (& hyp_vcpu -> vcpu , GUEST_HAS_PTRAUTH );
351+ }
352+ }
353+
292354static void unpin_host_vcpu (struct kvm_vcpu * host_vcpu )
293355{
294356 if (host_vcpu )
@@ -310,6 +372,18 @@ static void init_pkvm_hyp_vm(struct kvm *host_kvm, struct pkvm_hyp_vm *hyp_vm,
310372 hyp_vm -> host_kvm = host_kvm ;
311373 hyp_vm -> kvm .created_vcpus = nr_vcpus ;
312374 hyp_vm -> kvm .arch .mmu .vtcr = host_mmu .arch .mmu .vtcr ;
375+ hyp_vm -> kvm .arch .pkvm .enabled = READ_ONCE (host_kvm -> arch .pkvm .enabled );
376+ pkvm_init_features_from_host (hyp_vm , host_kvm );
377+ }
378+
379+ static void pkvm_vcpu_init_sve (struct pkvm_hyp_vcpu * hyp_vcpu , struct kvm_vcpu * host_vcpu )
380+ {
381+ struct kvm_vcpu * vcpu = & hyp_vcpu -> vcpu ;
382+
383+ if (!vcpu_has_feature (vcpu , KVM_ARM_VCPU_SVE )) {
384+ vcpu_clear_flag (vcpu , GUEST_HAS_SVE );
385+ vcpu_clear_flag (vcpu , VCPU_SVE_FINALIZED );
386+ }
313387}
314388
315389static int init_pkvm_hyp_vcpu (struct pkvm_hyp_vcpu * hyp_vcpu ,
@@ -335,7 +409,10 @@ static int init_pkvm_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu,
335409
336410 hyp_vcpu -> vcpu .arch .hw_mmu = & hyp_vm -> kvm .arch .mmu ;
337411 hyp_vcpu -> vcpu .arch .cflags = READ_ONCE (host_vcpu -> arch .cflags );
412+ hyp_vcpu -> vcpu .arch .mp_state .mp_state = KVM_MP_STATE_STOPPED ;
338413
414+ pkvm_vcpu_init_sve (hyp_vcpu , host_vcpu );
415+ pkvm_vcpu_init_ptrauth (hyp_vcpu );
339416 pkvm_vcpu_init_traps (& hyp_vcpu -> vcpu );
340417done :
341418 if (ret )
0 commit comments