Skip to content

Commit dc94f89

Browse files
author
Marc Zyngier
committed
Merge branch kvm-arm64/burn-the-flags into kvmarm-master/next
* kvm-arm64/burn-the-flags: : . : Rework the per-vcpu flags to make them more manageable, : splitting them in different sets that have specific : uses: : : - configuration flags : - input to the world-switch : - state bookkeeping for the kernel itself : : The FP tracking is also simplified and tracked outside : of the flags as a separate state. : . KVM: arm64: Move the handling of !FP outside of the fast path KVM: arm64: Document why pause cannot be turned into a flag KVM: arm64: Reduce the size of the vcpu flag members KVM: arm64: Add build-time sanity checks for flags KVM: arm64: Warn when PENDING_EXCEPTION and INCREMENT_PC are set together KVM: arm64: Convert vcpu sysregs_loaded_on_cpu to a state flag KVM: arm64: Kill unused vcpu flags field KVM: arm64: Move vcpu WFIT flag to the state flag set KVM: arm64: Move vcpu ON_UNSUPPORTED_CPU flag to the state flag set KVM: arm64: Move vcpu SVE/SME flags to the state flag set KVM: arm64: Move vcpu debug/SPE/TRBE flags to the input flag set KVM: arm64: Move vcpu PC/Exception flags to the input flag set KVM: arm64: Move vcpu configuration flags into their own set KVM: arm64: Add three sets of flags to the vcpu state KVM: arm64: Add helpers to manipulate vcpu flags among a set KVM: arm64: Move FP state ownership from flag to a tristate KVM: arm64: Drop FP_FOREIGN_STATE from the hypervisor code Signed-off-by: Marc Zyngier <[email protected]>
2 parents a111daf + b4da918 commit dc94f89

File tree

19 files changed

+248
-164
lines changed

19 files changed

+248
-164
lines changed

arch/arm64/include/asm/kvm_emulate.h

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -473,9 +473,18 @@ static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
473473

474474
static __always_inline void kvm_incr_pc(struct kvm_vcpu *vcpu)
475475
{
476-
vcpu->arch.flags |= KVM_ARM64_INCREMENT_PC;
476+
WARN_ON(vcpu_get_flag(vcpu, PENDING_EXCEPTION));
477+
vcpu_set_flag(vcpu, INCREMENT_PC);
477478
}
478479

480+
#define kvm_pend_exception(v, e) \
481+
do { \
482+
WARN_ON(vcpu_get_flag((v), INCREMENT_PC)); \
483+
vcpu_set_flag((v), PENDING_EXCEPTION); \
484+
vcpu_set_flag((v), e); \
485+
} while (0)
486+
487+
479488
static inline bool vcpu_has_feature(struct kvm_vcpu *vcpu, int feature)
480489
{
481490
return test_bit(feature, vcpu->arch.features);

arch/arm64/include/asm/kvm_host.h

Lines changed: 148 additions & 55 deletions
Original file line numberDiff line numberDiff line change
@@ -325,8 +325,30 @@ struct kvm_vcpu_arch {
325325
/* Exception Information */
326326
struct kvm_vcpu_fault_info fault;
327327

328-
/* Miscellaneous vcpu state flags */
329-
u64 flags;
328+
/* Ownership of the FP regs */
329+
enum {
330+
FP_STATE_FREE,
331+
FP_STATE_HOST_OWNED,
332+
FP_STATE_GUEST_OWNED,
333+
} fp_state;
334+
335+
/* Configuration flags, set once and for all before the vcpu can run */
336+
u8 cflags;
337+
338+
/* Input flags to the hypervisor code, potentially cleared after use */
339+
u8 iflags;
340+
341+
/* State flags for kernel bookkeeping, unused by the hypervisor code */
342+
u8 sflags;
343+
344+
/*
345+
* Don't run the guest (internal implementation need).
346+
*
347+
* Contrary to the flags above, this is set/cleared outside of
348+
* a vcpu context, and thus cannot be mixed with the flags
349+
* themselves (or the flag accesses need to be made atomic).
350+
*/
351+
bool pause;
330352

331353
/*
332354
* We maintain more than a single set of debug registers to support
@@ -376,9 +398,6 @@ struct kvm_vcpu_arch {
376398
/* vcpu power state */
377399
struct kvm_mp_state mp_state;
378400

379-
/* Don't run the guest (internal implementation need) */
380-
bool pause;
381-
382401
/* Cache some mmu pages needed inside spinlock regions */
383402
struct kvm_mmu_memory_cache mmu_page_cache;
384403

@@ -392,17 +411,131 @@ struct kvm_vcpu_arch {
392411
/* Additional reset state */
393412
struct vcpu_reset_state reset_state;
394413

395-
/* True when deferrable sysregs are loaded on the physical CPU,
396-
* see kvm_vcpu_load_sysregs_vhe and kvm_vcpu_put_sysregs_vhe. */
397-
bool sysregs_loaded_on_cpu;
398-
399414
/* Guest PV state */
400415
struct {
401416
u64 last_steal;
402417
gpa_t base;
403418
} steal;
404419
};
405420

421+
/*
422+
* Each 'flag' is composed of a comma-separated triplet:
423+
*
424+
* - the flag-set it belongs to in the vcpu->arch structure
425+
* - the value for that flag
426+
* - the mask for that flag
427+
*
428+
* __vcpu_single_flag() builds such a triplet for a single-bit flag.
429+
* unpack_vcpu_flag() extract the flag value from the triplet for
430+
* direct use outside of the flag accessors.
431+
*/
432+
#define __vcpu_single_flag(_set, _f) _set, (_f), (_f)
433+
434+
#define __unpack_flag(_set, _f, _m) _f
435+
#define unpack_vcpu_flag(...) __unpack_flag(__VA_ARGS__)
436+
437+
#define __build_check_flag(v, flagset, f, m) \
438+
do { \
439+
typeof(v->arch.flagset) *_fset; \
440+
\
441+
/* Check that the flags fit in the mask */ \
442+
BUILD_BUG_ON(HWEIGHT(m) != HWEIGHT((f) | (m))); \
443+
/* Check that the flags fit in the type */ \
444+
BUILD_BUG_ON((sizeof(*_fset) * 8) <= __fls(m)); \
445+
} while (0)
446+
447+
#define __vcpu_get_flag(v, flagset, f, m) \
448+
({ \
449+
__build_check_flag(v, flagset, f, m); \
450+
\
451+
v->arch.flagset & (m); \
452+
})
453+
454+
#define __vcpu_set_flag(v, flagset, f, m) \
455+
do { \
456+
typeof(v->arch.flagset) *fset; \
457+
\
458+
__build_check_flag(v, flagset, f, m); \
459+
\
460+
fset = &v->arch.flagset; \
461+
if (HWEIGHT(m) > 1) \
462+
*fset &= ~(m); \
463+
*fset |= (f); \
464+
} while (0)
465+
466+
#define __vcpu_clear_flag(v, flagset, f, m) \
467+
do { \
468+
typeof(v->arch.flagset) *fset; \
469+
\
470+
__build_check_flag(v, flagset, f, m); \
471+
\
472+
fset = &v->arch.flagset; \
473+
*fset &= ~(m); \
474+
} while (0)
475+
476+
#define vcpu_get_flag(v, ...) __vcpu_get_flag((v), __VA_ARGS__)
477+
#define vcpu_set_flag(v, ...) __vcpu_set_flag((v), __VA_ARGS__)
478+
#define vcpu_clear_flag(v, ...) __vcpu_clear_flag((v), __VA_ARGS__)
479+
480+
/* SVE exposed to guest */
481+
#define GUEST_HAS_SVE __vcpu_single_flag(cflags, BIT(0))
482+
/* SVE config completed */
483+
#define VCPU_SVE_FINALIZED __vcpu_single_flag(cflags, BIT(1))
484+
/* PTRAUTH exposed to guest */
485+
#define GUEST_HAS_PTRAUTH __vcpu_single_flag(cflags, BIT(2))
486+
487+
/* Exception pending */
488+
#define PENDING_EXCEPTION __vcpu_single_flag(iflags, BIT(0))
489+
/*
490+
* PC increment. Overlaps with EXCEPT_MASK on purpose so that it can't
491+
* be set together with an exception...
492+
*/
493+
#define INCREMENT_PC __vcpu_single_flag(iflags, BIT(1))
494+
/* Target EL/MODE (not a single flag, but let's abuse the macro) */
495+
#define EXCEPT_MASK __vcpu_single_flag(iflags, GENMASK(3, 1))
496+
497+
/* Helpers to encode exceptions with minimum fuss */
498+
#define __EXCEPT_MASK_VAL unpack_vcpu_flag(EXCEPT_MASK)
499+
#define __EXCEPT_SHIFT __builtin_ctzl(__EXCEPT_MASK_VAL)
500+
#define __vcpu_except_flags(_f) iflags, (_f << __EXCEPT_SHIFT), __EXCEPT_MASK_VAL
501+
502+
/*
503+
* When PENDING_EXCEPTION is set, EXCEPT_MASK can take the following
504+
* values:
505+
*
506+
* For AArch32 EL1:
507+
*/
508+
#define EXCEPT_AA32_UND __vcpu_except_flags(0)
509+
#define EXCEPT_AA32_IABT __vcpu_except_flags(1)
510+
#define EXCEPT_AA32_DABT __vcpu_except_flags(2)
511+
/* For AArch64: */
512+
#define EXCEPT_AA64_EL1_SYNC __vcpu_except_flags(0)
513+
#define EXCEPT_AA64_EL1_IRQ __vcpu_except_flags(1)
514+
#define EXCEPT_AA64_EL1_FIQ __vcpu_except_flags(2)
515+
#define EXCEPT_AA64_EL1_SERR __vcpu_except_flags(3)
516+
/* For AArch64 with NV (one day): */
517+
#define EXCEPT_AA64_EL2_SYNC __vcpu_except_flags(4)
518+
#define EXCEPT_AA64_EL2_IRQ __vcpu_except_flags(5)
519+
#define EXCEPT_AA64_EL2_FIQ __vcpu_except_flags(6)
520+
#define EXCEPT_AA64_EL2_SERR __vcpu_except_flags(7)
521+
/* Guest debug is live */
522+
#define DEBUG_DIRTY __vcpu_single_flag(iflags, BIT(4))
523+
/* Save SPE context if active */
524+
#define DEBUG_STATE_SAVE_SPE __vcpu_single_flag(iflags, BIT(5))
525+
/* Save TRBE context if active */
526+
#define DEBUG_STATE_SAVE_TRBE __vcpu_single_flag(iflags, BIT(6))
527+
528+
/* SVE enabled for host EL0 */
529+
#define HOST_SVE_ENABLED __vcpu_single_flag(sflags, BIT(0))
530+
/* SME enabled for EL0 */
531+
#define HOST_SME_ENABLED __vcpu_single_flag(sflags, BIT(1))
532+
/* Physical CPU not in supported_cpus */
533+
#define ON_UNSUPPORTED_CPU __vcpu_single_flag(sflags, BIT(2))
534+
/* WFIT instruction trapped */
535+
#define IN_WFIT __vcpu_single_flag(sflags, BIT(3))
536+
/* vcpu system registers loaded on physical CPU */
537+
#define SYSREGS_ON_CPU __vcpu_single_flag(sflags, BIT(4))
538+
406539
/* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */
407540
#define vcpu_sve_pffr(vcpu) (kern_hyp_va((vcpu)->arch.sve_state) + \
408541
sve_ffr_offset((vcpu)->arch.sve_max_vl))
@@ -423,70 +556,31 @@ struct kvm_vcpu_arch {
423556
__size_ret; \
424557
})
425558

426-
/* vcpu_arch flags field values: */
427-
#define KVM_ARM64_DEBUG_DIRTY (1 << 0)
428-
#define KVM_ARM64_FP_ENABLED (1 << 1) /* guest FP regs loaded */
429-
#define KVM_ARM64_FP_HOST (1 << 2) /* host FP regs loaded */
430-
#define KVM_ARM64_HOST_SVE_ENABLED (1 << 4) /* SVE enabled for EL0 */
431-
#define KVM_ARM64_GUEST_HAS_SVE (1 << 5) /* SVE exposed to guest */
432-
#define KVM_ARM64_VCPU_SVE_FINALIZED (1 << 6) /* SVE config completed */
433-
#define KVM_ARM64_GUEST_HAS_PTRAUTH (1 << 7) /* PTRAUTH exposed to guest */
434-
#define KVM_ARM64_PENDING_EXCEPTION (1 << 8) /* Exception pending */
435-
/*
436-
* Overlaps with KVM_ARM64_EXCEPT_MASK on purpose so that it can't be
437-
* set together with an exception...
438-
*/
439-
#define KVM_ARM64_INCREMENT_PC (1 << 9) /* Increment PC */
440-
#define KVM_ARM64_EXCEPT_MASK (7 << 9) /* Target EL/MODE */
441-
/*
442-
* When KVM_ARM64_PENDING_EXCEPTION is set, KVM_ARM64_EXCEPT_MASK can
443-
* take the following values:
444-
*
445-
* For AArch32 EL1:
446-
*/
447-
#define KVM_ARM64_EXCEPT_AA32_UND (0 << 9)
448-
#define KVM_ARM64_EXCEPT_AA32_IABT (1 << 9)
449-
#define KVM_ARM64_EXCEPT_AA32_DABT (2 << 9)
450-
/* For AArch64: */
451-
#define KVM_ARM64_EXCEPT_AA64_ELx_SYNC (0 << 9)
452-
#define KVM_ARM64_EXCEPT_AA64_ELx_IRQ (1 << 9)
453-
#define KVM_ARM64_EXCEPT_AA64_ELx_FIQ (2 << 9)
454-
#define KVM_ARM64_EXCEPT_AA64_ELx_SERR (3 << 9)
455-
#define KVM_ARM64_EXCEPT_AA64_EL1 (0 << 11)
456-
#define KVM_ARM64_EXCEPT_AA64_EL2 (1 << 11)
457-
458-
#define KVM_ARM64_DEBUG_STATE_SAVE_SPE (1 << 12) /* Save SPE context if active */
459-
#define KVM_ARM64_DEBUG_STATE_SAVE_TRBE (1 << 13) /* Save TRBE context if active */
460-
#define KVM_ARM64_FP_FOREIGN_FPSTATE (1 << 14)
461-
#define KVM_ARM64_ON_UNSUPPORTED_CPU (1 << 15) /* Physical CPU not in supported_cpus */
462-
#define KVM_ARM64_HOST_SME_ENABLED (1 << 16) /* SME enabled for EL0 */
463-
#define KVM_ARM64_WFIT (1 << 17) /* WFIT instruction trapped */
464-
465559
#define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE | \
466560
KVM_GUESTDBG_USE_SW_BP | \
467561
KVM_GUESTDBG_USE_HW | \
468562
KVM_GUESTDBG_SINGLESTEP)
469563

470564
#define vcpu_has_sve(vcpu) (system_supports_sve() && \
471-
((vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_SVE))
565+
vcpu_get_flag(vcpu, GUEST_HAS_SVE))
472566

473567
#ifdef CONFIG_ARM64_PTR_AUTH
474568
#define vcpu_has_ptrauth(vcpu) \
475569
((cpus_have_final_cap(ARM64_HAS_ADDRESS_AUTH) || \
476570
cpus_have_final_cap(ARM64_HAS_GENERIC_AUTH)) && \
477-
(vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_PTRAUTH)
571+
vcpu_get_flag(vcpu, GUEST_HAS_PTRAUTH))
478572
#else
479573
#define vcpu_has_ptrauth(vcpu) false
480574
#endif
481575

482576
#define vcpu_on_unsupported_cpu(vcpu) \
483-
((vcpu)->arch.flags & KVM_ARM64_ON_UNSUPPORTED_CPU)
577+
vcpu_get_flag(vcpu, ON_UNSUPPORTED_CPU)
484578

485579
#define vcpu_set_on_unsupported_cpu(vcpu) \
486-
((vcpu)->arch.flags |= KVM_ARM64_ON_UNSUPPORTED_CPU)
580+
vcpu_set_flag(vcpu, ON_UNSUPPORTED_CPU)
487581

488582
#define vcpu_clear_on_unsupported_cpu(vcpu) \
489-
((vcpu)->arch.flags &= ~KVM_ARM64_ON_UNSUPPORTED_CPU)
583+
vcpu_clear_flag(vcpu, ON_UNSUPPORTED_CPU)
490584

491585
#define vcpu_gp_regs(v) (&(v)->arch.ctxt.regs)
492586

@@ -831,8 +925,7 @@ void kvm_init_protected_traps(struct kvm_vcpu *vcpu);
831925
int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature);
832926
bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu);
833927

834-
#define kvm_arm_vcpu_sve_finalized(vcpu) \
835-
((vcpu)->arch.flags & KVM_ARM64_VCPU_SVE_FINALIZED)
928+
#define kvm_arm_vcpu_sve_finalized(vcpu) vcpu_get_flag(vcpu, VCPU_SVE_FINALIZED)
836929

837930
#define kvm_has_mte(kvm) \
838931
(system_supports_mte() && \

arch/arm64/kvm/arch_timer.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -242,7 +242,7 @@ static bool kvm_timer_irq_can_fire(struct arch_timer_context *timer_ctx)
242242
static bool vcpu_has_wfit_active(struct kvm_vcpu *vcpu)
243243
{
244244
return (cpus_have_final_cap(ARM64_HAS_WFXT) &&
245-
(vcpu->arch.flags & KVM_ARM64_WFIT));
245+
vcpu_get_flag(vcpu, IN_WFIT));
246246
}
247247

248248
static u64 wfit_delay_ns(struct kvm_vcpu *vcpu)

arch/arm64/kvm/arm.c

Lines changed: 9 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -330,6 +330,12 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
330330

331331
vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO;
332332

333+
/*
334+
* Default value for the FP state, will be overloaded at load
335+
* time if we support FP (pretty likely)
336+
*/
337+
vcpu->arch.fp_state = FP_STATE_FREE;
338+
333339
/* Set up the timer */
334340
kvm_timer_vcpu_init(vcpu);
335341

@@ -659,7 +665,7 @@ void kvm_vcpu_wfi(struct kvm_vcpu *vcpu)
659665
preempt_enable();
660666

661667
kvm_vcpu_halt(vcpu);
662-
vcpu->arch.flags &= ~KVM_ARM64_WFIT;
668+
vcpu_clear_flag(vcpu, IN_WFIT);
663669
kvm_clear_request(KVM_REQ_UNHALT, vcpu);
664670

665671
preempt_disable();
@@ -1015,8 +1021,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
10151021
* the vcpu state. Note that this relies on __kvm_adjust_pc()
10161022
* being preempt-safe on VHE.
10171023
*/
1018-
if (unlikely(vcpu->arch.flags & (KVM_ARM64_PENDING_EXCEPTION |
1019-
KVM_ARM64_INCREMENT_PC)))
1024+
if (unlikely(vcpu_get_flag(vcpu, PENDING_EXCEPTION) ||
1025+
vcpu_get_flag(vcpu, INCREMENT_PC)))
10201026
kvm_call_hyp(__kvm_adjust_pc, vcpu);
10211027

10221028
vcpu_put(vcpu);

0 commit comments

Comments
 (0)