@@ -325,8 +325,30 @@ struct kvm_vcpu_arch {
325
325
/* Exception Information */
326
326
struct kvm_vcpu_fault_info fault ;
327
327
328
- /* Miscellaneous vcpu state flags */
329
- u64 flags ;
328
+ /* Ownership of the FP regs */
329
+ enum {
330
+ FP_STATE_FREE ,
331
+ FP_STATE_HOST_OWNED ,
332
+ FP_STATE_GUEST_OWNED ,
333
+ } fp_state ;
334
+
335
+ /* Configuration flags, set once and for all before the vcpu can run */
336
+ u8 cflags ;
337
+
338
+ /* Input flags to the hypervisor code, potentially cleared after use */
339
+ u8 iflags ;
340
+
341
+ /* State flags for kernel bookkeeping, unused by the hypervisor code */
342
+ u8 sflags ;
343
+
344
+ /*
345
+ * Don't run the guest (internal implementation need).
346
+ *
347
+ * Contrary to the flags above, this is set/cleared outside of
348
+ * a vcpu context, and thus cannot be mixed with the flags
349
+ * themselves (or the flag accesses need to be made atomic).
350
+ */
351
+ bool pause ;
330
352
331
353
/*
332
354
* We maintain more than a single set of debug registers to support
@@ -376,9 +398,6 @@ struct kvm_vcpu_arch {
376
398
/* vcpu power state */
377
399
struct kvm_mp_state mp_state ;
378
400
379
- /* Don't run the guest (internal implementation need) */
380
- bool pause ;
381
-
382
401
/* Cache some mmu pages needed inside spinlock regions */
383
402
struct kvm_mmu_memory_cache mmu_page_cache ;
384
403
@@ -392,17 +411,131 @@ struct kvm_vcpu_arch {
392
411
/* Additional reset state */
393
412
struct vcpu_reset_state reset_state ;
394
413
395
- /* True when deferrable sysregs are loaded on the physical CPU,
396
- * see kvm_vcpu_load_sysregs_vhe and kvm_vcpu_put_sysregs_vhe. */
397
- bool sysregs_loaded_on_cpu ;
398
-
399
414
/* Guest PV state */
400
415
struct {
401
416
u64 last_steal ;
402
417
gpa_t base ;
403
418
} steal ;
404
419
};
405
420
421
+ /*
422
+ * Each 'flag' is composed of a comma-separated triplet:
423
+ *
424
+ * - the flag-set it belongs to in the vcpu->arch structure
425
+ * - the value for that flag
426
+ * - the mask for that flag
427
+ *
428
+ * __vcpu_single_flag() builds such a triplet for a single-bit flag.
429
+ * unpack_vcpu_flag() extract the flag value from the triplet for
430
+ * direct use outside of the flag accessors.
431
+ */
432
+ #define __vcpu_single_flag (_set , _f ) _set, (_f), (_f)
433
+
434
+ #define __unpack_flag (_set , _f , _m ) _f
435
+ #define unpack_vcpu_flag (...) __unpack_flag(__VA_ARGS__)
436
+
437
+ #define __build_check_flag (v , flagset , f , m ) \
438
+ do { \
439
+ typeof(v->arch.flagset) *_fset; \
440
+ \
441
+ /* Check that the flags fit in the mask */ \
442
+ BUILD_BUG_ON (HWEIGHT (m ) != HWEIGHT ((f ) | (m ))); \
443
+ /* Check that the flags fit in the type */ \
444
+ BUILD_BUG_ON ((sizeof (* _fset ) * 8 ) <= __fls (m )); \
445
+ } while (0 )
446
+
447
+ #define __vcpu_get_flag (v , flagset , f , m ) \
448
+ ({ \
449
+ __build_check_flag(v, flagset, f, m); \
450
+ \
451
+ v->arch.flagset & (m); \
452
+ })
453
+
454
+ #define __vcpu_set_flag (v , flagset , f , m ) \
455
+ do { \
456
+ typeof(v->arch.flagset) *fset; \
457
+ \
458
+ __build_check_flag(v, flagset, f, m); \
459
+ \
460
+ fset = &v->arch.flagset; \
461
+ if (HWEIGHT(m) > 1) \
462
+ *fset &= ~(m); \
463
+ *fset |= (f); \
464
+ } while (0)
465
+
466
+ #define __vcpu_clear_flag (v , flagset , f , m ) \
467
+ do { \
468
+ typeof(v->arch.flagset) *fset; \
469
+ \
470
+ __build_check_flag(v, flagset, f, m); \
471
+ \
472
+ fset = &v->arch.flagset; \
473
+ *fset &= ~(m); \
474
+ } while (0)
475
+
476
+ #define vcpu_get_flag (v , ...) __vcpu_get_flag((v), __VA_ARGS__)
477
+ #define vcpu_set_flag (v , ...) __vcpu_set_flag((v), __VA_ARGS__)
478
+ #define vcpu_clear_flag (v , ...) __vcpu_clear_flag((v), __VA_ARGS__)
479
+
480
+ /* SVE exposed to guest */
481
+ #define GUEST_HAS_SVE __vcpu_single_flag(cflags, BIT(0))
482
+ /* SVE config completed */
483
+ #define VCPU_SVE_FINALIZED __vcpu_single_flag(cflags, BIT(1))
484
+ /* PTRAUTH exposed to guest */
485
+ #define GUEST_HAS_PTRAUTH __vcpu_single_flag(cflags, BIT(2))
486
+
487
+ /* Exception pending */
488
+ #define PENDING_EXCEPTION __vcpu_single_flag(iflags, BIT(0))
489
+ /*
490
+ * PC increment. Overlaps with EXCEPT_MASK on purpose so that it can't
491
+ * be set together with an exception...
492
+ */
493
+ #define INCREMENT_PC __vcpu_single_flag(iflags, BIT(1))
494
+ /* Target EL/MODE (not a single flag, but let's abuse the macro) */
495
+ #define EXCEPT_MASK __vcpu_single_flag(iflags, GENMASK(3, 1))
496
+
497
+ /* Helpers to encode exceptions with minimum fuss */
498
+ #define __EXCEPT_MASK_VAL unpack_vcpu_flag(EXCEPT_MASK)
499
+ #define __EXCEPT_SHIFT __builtin_ctzl(__EXCEPT_MASK_VAL)
500
+ #define __vcpu_except_flags (_f ) iflags, (_f << __EXCEPT_SHIFT), __EXCEPT_MASK_VAL
501
+
502
+ /*
503
+ * When PENDING_EXCEPTION is set, EXCEPT_MASK can take the following
504
+ * values:
505
+ *
506
+ * For AArch32 EL1:
507
+ */
508
+ #define EXCEPT_AA32_UND __vcpu_except_flags(0)
509
+ #define EXCEPT_AA32_IABT __vcpu_except_flags(1)
510
+ #define EXCEPT_AA32_DABT __vcpu_except_flags(2)
511
+ /* For AArch64: */
512
+ #define EXCEPT_AA64_EL1_SYNC __vcpu_except_flags(0)
513
+ #define EXCEPT_AA64_EL1_IRQ __vcpu_except_flags(1)
514
+ #define EXCEPT_AA64_EL1_FIQ __vcpu_except_flags(2)
515
+ #define EXCEPT_AA64_EL1_SERR __vcpu_except_flags(3)
516
+ /* For AArch64 with NV (one day): */
517
+ #define EXCEPT_AA64_EL2_SYNC __vcpu_except_flags(4)
518
+ #define EXCEPT_AA64_EL2_IRQ __vcpu_except_flags(5)
519
+ #define EXCEPT_AA64_EL2_FIQ __vcpu_except_flags(6)
520
+ #define EXCEPT_AA64_EL2_SERR __vcpu_except_flags(7)
521
+ /* Guest debug is live */
522
+ #define DEBUG_DIRTY __vcpu_single_flag(iflags, BIT(4))
523
+ /* Save SPE context if active */
524
+ #define DEBUG_STATE_SAVE_SPE __vcpu_single_flag(iflags, BIT(5))
525
+ /* Save TRBE context if active */
526
+ #define DEBUG_STATE_SAVE_TRBE __vcpu_single_flag(iflags, BIT(6))
527
+
528
+ /* SVE enabled for host EL0 */
529
+ #define HOST_SVE_ENABLED __vcpu_single_flag(sflags, BIT(0))
530
+ /* SME enabled for EL0 */
531
+ #define HOST_SME_ENABLED __vcpu_single_flag(sflags, BIT(1))
532
+ /* Physical CPU not in supported_cpus */
533
+ #define ON_UNSUPPORTED_CPU __vcpu_single_flag(sflags, BIT(2))
534
+ /* WFIT instruction trapped */
535
+ #define IN_WFIT __vcpu_single_flag(sflags, BIT(3))
536
+ /* vcpu system registers loaded on physical CPU */
537
+ #define SYSREGS_ON_CPU __vcpu_single_flag(sflags, BIT(4))
538
+
406
539
/* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */
407
540
#define vcpu_sve_pffr (vcpu ) (kern_hyp_va((vcpu)->arch.sve_state) + \
408
541
sve_ffr_offset((vcpu)->arch.sve_max_vl))
@@ -423,70 +556,31 @@ struct kvm_vcpu_arch {
423
556
__size_ret; \
424
557
})
425
558
426
- /* vcpu_arch flags field values: */
427
- #define KVM_ARM64_DEBUG_DIRTY (1 << 0)
428
- #define KVM_ARM64_FP_ENABLED (1 << 1) /* guest FP regs loaded */
429
- #define KVM_ARM64_FP_HOST (1 << 2) /* host FP regs loaded */
430
- #define KVM_ARM64_HOST_SVE_ENABLED (1 << 4) /* SVE enabled for EL0 */
431
- #define KVM_ARM64_GUEST_HAS_SVE (1 << 5) /* SVE exposed to guest */
432
- #define KVM_ARM64_VCPU_SVE_FINALIZED (1 << 6) /* SVE config completed */
433
- #define KVM_ARM64_GUEST_HAS_PTRAUTH (1 << 7) /* PTRAUTH exposed to guest */
434
- #define KVM_ARM64_PENDING_EXCEPTION (1 << 8) /* Exception pending */
435
- /*
436
- * Overlaps with KVM_ARM64_EXCEPT_MASK on purpose so that it can't be
437
- * set together with an exception...
438
- */
439
- #define KVM_ARM64_INCREMENT_PC (1 << 9) /* Increment PC */
440
- #define KVM_ARM64_EXCEPT_MASK (7 << 9) /* Target EL/MODE */
441
- /*
442
- * When KVM_ARM64_PENDING_EXCEPTION is set, KVM_ARM64_EXCEPT_MASK can
443
- * take the following values:
444
- *
445
- * For AArch32 EL1:
446
- */
447
- #define KVM_ARM64_EXCEPT_AA32_UND (0 << 9)
448
- #define KVM_ARM64_EXCEPT_AA32_IABT (1 << 9)
449
- #define KVM_ARM64_EXCEPT_AA32_DABT (2 << 9)
450
- /* For AArch64: */
451
- #define KVM_ARM64_EXCEPT_AA64_ELx_SYNC (0 << 9)
452
- #define KVM_ARM64_EXCEPT_AA64_ELx_IRQ (1 << 9)
453
- #define KVM_ARM64_EXCEPT_AA64_ELx_FIQ (2 << 9)
454
- #define KVM_ARM64_EXCEPT_AA64_ELx_SERR (3 << 9)
455
- #define KVM_ARM64_EXCEPT_AA64_EL1 (0 << 11)
456
- #define KVM_ARM64_EXCEPT_AA64_EL2 (1 << 11)
457
-
458
- #define KVM_ARM64_DEBUG_STATE_SAVE_SPE (1 << 12) /* Save SPE context if active */
459
- #define KVM_ARM64_DEBUG_STATE_SAVE_TRBE (1 << 13) /* Save TRBE context if active */
460
- #define KVM_ARM64_FP_FOREIGN_FPSTATE (1 << 14)
461
- #define KVM_ARM64_ON_UNSUPPORTED_CPU (1 << 15) /* Physical CPU not in supported_cpus */
462
- #define KVM_ARM64_HOST_SME_ENABLED (1 << 16) /* SME enabled for EL0 */
463
- #define KVM_ARM64_WFIT (1 << 17) /* WFIT instruction trapped */
464
-
465
559
#define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE | \
466
560
KVM_GUESTDBG_USE_SW_BP | \
467
561
KVM_GUESTDBG_USE_HW | \
468
562
KVM_GUESTDBG_SINGLESTEP)
469
563
470
564
#define vcpu_has_sve (vcpu ) (system_supports_sve() && \
471
- (( vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_SVE ))
565
+ vcpu_get_flag( vcpu, GUEST_HAS_SVE ))
472
566
473
567
#ifdef CONFIG_ARM64_PTR_AUTH
474
568
#define vcpu_has_ptrauth (vcpu ) \
475
569
((cpus_have_final_cap(ARM64_HAS_ADDRESS_AUTH) || \
476
570
cpus_have_final_cap(ARM64_HAS_GENERIC_AUTH)) && \
477
- (vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_PTRAUTH )
571
+ vcpu_get_flag (vcpu, GUEST_HAS_PTRAUTH) )
478
572
#else
479
573
#define vcpu_has_ptrauth (vcpu ) false
480
574
#endif
481
575
482
576
#define vcpu_on_unsupported_cpu (vcpu ) \
483
- (( vcpu)->arch.flags & KVM_ARM64_ON_UNSUPPORTED_CPU )
577
+ vcpu_get_flag( vcpu, ON_UNSUPPORTED_CPU )
484
578
485
579
#define vcpu_set_on_unsupported_cpu (vcpu ) \
486
- (( vcpu)->arch.flags |= KVM_ARM64_ON_UNSUPPORTED_CPU )
580
+ vcpu_set_flag( vcpu, ON_UNSUPPORTED_CPU )
487
581
488
582
#define vcpu_clear_on_unsupported_cpu (vcpu ) \
489
- (( vcpu)->arch.flags &= ~KVM_ARM64_ON_UNSUPPORTED_CPU )
583
+ vcpu_clear_flag( vcpu, ON_UNSUPPORTED_CPU )
490
584
491
585
#define vcpu_gp_regs (v ) (&(v)->arch.ctxt.regs)
492
586
@@ -831,8 +925,7 @@ void kvm_init_protected_traps(struct kvm_vcpu *vcpu);
831
925
int kvm_arm_vcpu_finalize (struct kvm_vcpu * vcpu , int feature );
832
926
bool kvm_arm_vcpu_is_finalized (struct kvm_vcpu * vcpu );
833
927
834
- #define kvm_arm_vcpu_sve_finalized (vcpu ) \
835
- ((vcpu)->arch.flags & KVM_ARM64_VCPU_SVE_FINALIZED)
928
+ #define kvm_arm_vcpu_sve_finalized (vcpu ) vcpu_get_flag(vcpu, VCPU_SVE_FINALIZED)
836
929
837
930
#define kvm_has_mte (kvm ) \
838
931
(system_supports_mte() && \
0 commit comments