66
77#include <linux/kvm_host.h>
88#include <linux/mm.h>
9+
10+ #include <asm/kvm_emulate.h>
11+
912#include <nvhe/fixed_config.h>
1013#include <nvhe/mem_protect.h>
1114#include <nvhe/memory.h>
@@ -201,11 +204,46 @@ static void pvm_init_trap_regs(struct kvm_vcpu *vcpu)
201204 }
202205}
203206
207+ static void pkvm_vcpu_reset_hcr (struct kvm_vcpu * vcpu )
208+ {
209+ vcpu -> arch .hcr_el2 = HCR_GUEST_FLAGS ;
210+
211+ if (has_hvhe ())
212+ vcpu -> arch .hcr_el2 |= HCR_E2H ;
213+
214+ if (cpus_have_final_cap (ARM64_HAS_RAS_EXTN )) {
215+ /* route synchronous external abort exceptions to EL2 */
216+ vcpu -> arch .hcr_el2 |= HCR_TEA ;
217+ /* trap error record accesses */
218+ vcpu -> arch .hcr_el2 |= HCR_TERR ;
219+ }
220+
221+ if (cpus_have_final_cap (ARM64_HAS_STAGE2_FWB ))
222+ vcpu -> arch .hcr_el2 |= HCR_FWB ;
223+
224+ if (cpus_have_final_cap (ARM64_HAS_EVT ) &&
225+ !cpus_have_final_cap (ARM64_MISMATCHED_CACHE_TYPE ))
226+ vcpu -> arch .hcr_el2 |= HCR_TID4 ;
227+ else
228+ vcpu -> arch .hcr_el2 |= HCR_TID2 ;
229+
230+ if (vcpu_has_ptrauth (vcpu ))
231+ vcpu -> arch .hcr_el2 |= (HCR_API | HCR_APK );
232+ }
233+
204234/*
205235 * Initialize trap register values in protected mode.
206236 */
207- void __pkvm_vcpu_init_traps (struct kvm_vcpu * vcpu )
237+ static void pkvm_vcpu_init_traps (struct kvm_vcpu * vcpu )
208238{
239+ vcpu -> arch .cptr_el2 = kvm_get_reset_cptr_el2 (vcpu );
240+ vcpu -> arch .mdcr_el2 = 0 ;
241+
242+ pkvm_vcpu_reset_hcr (vcpu );
243+
244+ if ((!vcpu_is_protected (vcpu )))
245+ return ;
246+
209247 pvm_init_trap_regs (vcpu );
210248 pvm_init_traps_aa64pfr0 (vcpu );
211249 pvm_init_traps_aa64pfr1 (vcpu );
@@ -289,6 +327,65 @@ void pkvm_put_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
289327 hyp_spin_unlock (& vm_table_lock );
290328}
291329
330+ static void pkvm_init_features_from_host (struct pkvm_hyp_vm * hyp_vm , const struct kvm * host_kvm )
331+ {
332+ struct kvm * kvm = & hyp_vm -> kvm ;
333+ DECLARE_BITMAP (allowed_features , KVM_VCPU_MAX_FEATURES );
334+
335+ /* No restrictions for non-protected VMs. */
336+ if (!kvm_vm_is_protected (kvm )) {
337+ bitmap_copy (kvm -> arch .vcpu_features ,
338+ host_kvm -> arch .vcpu_features ,
339+ KVM_VCPU_MAX_FEATURES );
340+ return ;
341+ }
342+
343+ bitmap_zero (allowed_features , KVM_VCPU_MAX_FEATURES );
344+
345+ /*
346+ * For protected VMs, always allow:
347+ * - CPU starting in poweroff state
348+ * - PSCI v0.2
349+ */
350+ set_bit (KVM_ARM_VCPU_POWER_OFF , allowed_features );
351+ set_bit (KVM_ARM_VCPU_PSCI_0_2 , allowed_features );
352+
353+ /*
354+ * Check if remaining features are allowed:
355+ * - Performance Monitoring
356+ * - Scalable Vectors
357+ * - Pointer Authentication
358+ */
359+ if (FIELD_GET (ARM64_FEATURE_MASK (ID_AA64DFR0_EL1_PMUVer ), PVM_ID_AA64DFR0_ALLOW ))
360+ set_bit (KVM_ARM_VCPU_PMU_V3 , allowed_features );
361+
362+ if (FIELD_GET (ARM64_FEATURE_MASK (ID_AA64PFR0_EL1_SVE ), PVM_ID_AA64PFR0_ALLOW ))
363+ set_bit (KVM_ARM_VCPU_SVE , allowed_features );
364+
365+ if (FIELD_GET (ARM64_FEATURE_MASK (ID_AA64ISAR1_EL1_API ), PVM_ID_AA64ISAR1_RESTRICT_UNSIGNED ) &&
366+ FIELD_GET (ARM64_FEATURE_MASK (ID_AA64ISAR1_EL1_APA ), PVM_ID_AA64ISAR1_RESTRICT_UNSIGNED ))
367+ set_bit (KVM_ARM_VCPU_PTRAUTH_ADDRESS , allowed_features );
368+
369+ if (FIELD_GET (ARM64_FEATURE_MASK (ID_AA64ISAR1_EL1_GPI ), PVM_ID_AA64ISAR1_ALLOW ) &&
370+ FIELD_GET (ARM64_FEATURE_MASK (ID_AA64ISAR1_EL1_GPA ), PVM_ID_AA64ISAR1_ALLOW ))
371+ set_bit (KVM_ARM_VCPU_PTRAUTH_GENERIC , allowed_features );
372+
373+ bitmap_and (kvm -> arch .vcpu_features , host_kvm -> arch .vcpu_features ,
374+ allowed_features , KVM_VCPU_MAX_FEATURES );
375+ }
376+
377+ static void pkvm_vcpu_init_ptrauth (struct pkvm_hyp_vcpu * hyp_vcpu )
378+ {
379+ struct kvm_vcpu * vcpu = & hyp_vcpu -> vcpu ;
380+
381+ if (vcpu_has_feature (vcpu , KVM_ARM_VCPU_PTRAUTH_ADDRESS ) ||
382+ vcpu_has_feature (vcpu , KVM_ARM_VCPU_PTRAUTH_GENERIC )) {
383+ kvm_vcpu_enable_ptrauth (vcpu );
384+ } else {
385+ vcpu_clear_flag (& hyp_vcpu -> vcpu , GUEST_HAS_PTRAUTH );
386+ }
387+ }
388+
292389static void unpin_host_vcpu (struct kvm_vcpu * host_vcpu )
293390{
294391 if (host_vcpu )
@@ -310,6 +407,18 @@ static void init_pkvm_hyp_vm(struct kvm *host_kvm, struct pkvm_hyp_vm *hyp_vm,
310407 hyp_vm -> host_kvm = host_kvm ;
311408 hyp_vm -> kvm .created_vcpus = nr_vcpus ;
312409 hyp_vm -> kvm .arch .mmu .vtcr = host_mmu .arch .mmu .vtcr ;
410+ hyp_vm -> kvm .arch .pkvm .enabled = READ_ONCE (host_kvm -> arch .pkvm .enabled );
411+ pkvm_init_features_from_host (hyp_vm , host_kvm );
412+ }
413+
414+ static void pkvm_vcpu_init_sve (struct pkvm_hyp_vcpu * hyp_vcpu , struct kvm_vcpu * host_vcpu )
415+ {
416+ struct kvm_vcpu * vcpu = & hyp_vcpu -> vcpu ;
417+
418+ if (!vcpu_has_feature (vcpu , KVM_ARM_VCPU_SVE )) {
419+ vcpu_clear_flag (vcpu , GUEST_HAS_SVE );
420+ vcpu_clear_flag (vcpu , VCPU_SVE_FINALIZED );
421+ }
313422}
314423
315424static int init_pkvm_hyp_vcpu (struct pkvm_hyp_vcpu * hyp_vcpu ,
@@ -335,6 +444,11 @@ static int init_pkvm_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu,
335444
336445 hyp_vcpu -> vcpu .arch .hw_mmu = & hyp_vm -> kvm .arch .mmu ;
337446 hyp_vcpu -> vcpu .arch .cflags = READ_ONCE (host_vcpu -> arch .cflags );
447+ hyp_vcpu -> vcpu .arch .mp_state .mp_state = KVM_MP_STATE_STOPPED ;
448+
449+ pkvm_vcpu_init_sve (hyp_vcpu , host_vcpu );
450+ pkvm_vcpu_init_ptrauth (hyp_vcpu );
451+ pkvm_vcpu_init_traps (& hyp_vcpu -> vcpu );
338452done :
339453 if (ret )
340454 unpin_host_vcpu (host_vcpu );
0 commit comments