Skip to content

Commit f1ff3fc

Browse files
Sebastian Ottoupton
authored andcommitted
KVM: arm64: unify code to prepare traps
There are 2 functions to calculate traps via HCR_EL2: * kvm_init_sysreg() called via KVM_RUN (before the 1st run or when the pid changes) * vcpu_reset_hcr() called via KVM_ARM_VCPU_INIT To unify these 2 and to support traps that are dependent on the ID register configuration, move the code from vcpu_reset_hcr() to sys_regs.c and call it via kvm_init_sysreg(). We still have to keep the non-FWB handling stuff in vcpu_reset_hcr(). Also the initialization with HCR_GUEST_FLAGS is kept there but guarded by !vcpu_has_run_once() to ensure that previous calculated values don't get overwritten. While at it rename kvm_init_sysreg() to kvm_calculate_traps() to better reflect what it's doing. Signed-off-by: Sebastian Ott <[email protected]> Reviewed-by: Eric Auger <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Oliver Upton <[email protected]>
1 parent 44241f3 commit f1ff3fc

File tree

4 files changed

+43
-35
lines changed

4 files changed

+43
-35
lines changed

arch/arm64/include/asm/kvm_emulate.h

Lines changed: 9 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -69,39 +69,17 @@ static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
6969

7070
static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
7171
{
72-
vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
73-
if (has_vhe() || has_hvhe())
74-
vcpu->arch.hcr_el2 |= HCR_E2H;
75-
if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN)) {
76-
/* route synchronous external abort exceptions to EL2 */
77-
vcpu->arch.hcr_el2 |= HCR_TEA;
78-
/* trap error record accesses */
79-
vcpu->arch.hcr_el2 |= HCR_TERR;
80-
}
72+
if (!vcpu_has_run_once(vcpu))
73+
vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
8174

82-
if (cpus_have_final_cap(ARM64_HAS_STAGE2_FWB)) {
83-
vcpu->arch.hcr_el2 |= HCR_FWB;
84-
} else {
85-
/*
86-
* For non-FWB CPUs, we trap VM ops (HCR_EL2.TVM) until M+C
87-
* get set in SCTLR_EL1 such that we can detect when the guest
88-
* MMU gets turned on and do the necessary cache maintenance
89-
* then.
90-
*/
75+
/*
76+
* For non-FWB CPUs, we trap VM ops (HCR_EL2.TVM) until M+C
77+
* get set in SCTLR_EL1 such that we can detect when the guest
78+
* MMU gets turned on and do the necessary cache maintenance
79+
* then.
80+
*/
81+
if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
9182
vcpu->arch.hcr_el2 |= HCR_TVM;
92-
}
93-
94-
if (cpus_have_final_cap(ARM64_HAS_EVT) &&
95-
!cpus_have_final_cap(ARM64_MISMATCHED_CACHE_TYPE))
96-
vcpu->arch.hcr_el2 |= HCR_TID4;
97-
else
98-
vcpu->arch.hcr_el2 |= HCR_TID2;
99-
100-
if (vcpu_el1_is_32bit(vcpu))
101-
vcpu->arch.hcr_el2 &= ~HCR_RW;
102-
103-
if (kvm_has_mte(vcpu->kvm))
104-
vcpu->arch.hcr_el2 |= HCR_ATA;
10583
}
10684

10785
static inline unsigned long *vcpu_hcr(struct kvm_vcpu *vcpu)

arch/arm64/include/asm/kvm_host.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1120,7 +1120,7 @@ int __init populate_nv_trap_config(void);
11201120
bool lock_all_vcpus(struct kvm *kvm);
11211121
void unlock_all_vcpus(struct kvm *kvm);
11221122

1123-
void kvm_init_sysreg(struct kvm_vcpu *);
1123+
void kvm_calculate_traps(struct kvm_vcpu *vcpu);
11241124

11251125
/* MMIO helpers */
11261126
void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data);

arch/arm64/kvm/arm.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -797,7 +797,7 @@ int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
797797
* This needs to happen after NV has imposed its own restrictions on
798798
* the feature set
799799
*/
800-
kvm_init_sysreg(vcpu);
800+
kvm_calculate_traps(vcpu);
801801

802802
ret = kvm_timer_enable(vcpu);
803803
if (ret)

arch/arm64/kvm/sys_regs.c

Lines changed: 32 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4069,11 +4069,33 @@ int kvm_vm_ioctl_get_reg_writable_masks(struct kvm *kvm, struct reg_mask_range *
40694069
return 0;
40704070
}
40714071

4072-
void kvm_init_sysreg(struct kvm_vcpu *vcpu)
4072+
static void vcpu_set_hcr(struct kvm_vcpu *vcpu)
40734073
{
40744074
struct kvm *kvm = vcpu->kvm;
40754075

4076-
mutex_lock(&kvm->arch.config_lock);
4076+
if (has_vhe() || has_hvhe())
4077+
vcpu->arch.hcr_el2 |= HCR_E2H;
4078+
if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN)) {
4079+
/* route synchronous external abort exceptions to EL2 */
4080+
vcpu->arch.hcr_el2 |= HCR_TEA;
4081+
/* trap error record accesses */
4082+
vcpu->arch.hcr_el2 |= HCR_TERR;
4083+
}
4084+
4085+
if (cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
4086+
vcpu->arch.hcr_el2 |= HCR_FWB;
4087+
4088+
if (cpus_have_final_cap(ARM64_HAS_EVT) &&
4089+
!cpus_have_final_cap(ARM64_MISMATCHED_CACHE_TYPE))
4090+
vcpu->arch.hcr_el2 |= HCR_TID4;
4091+
else
4092+
vcpu->arch.hcr_el2 |= HCR_TID2;
4093+
4094+
if (vcpu_el1_is_32bit(vcpu))
4095+
vcpu->arch.hcr_el2 &= ~HCR_RW;
4096+
4097+
if (kvm_has_mte(vcpu->kvm))
4098+
vcpu->arch.hcr_el2 |= HCR_ATA;
40774099

40784100
/*
40794101
* In the absence of FGT, we cannot independently trap TLBI
@@ -4082,6 +4104,14 @@ void kvm_init_sysreg(struct kvm_vcpu *vcpu)
40824104
*/
40834105
if (!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS))
40844106
vcpu->arch.hcr_el2 |= HCR_TTLBOS;
4107+
}
4108+
4109+
void kvm_calculate_traps(struct kvm_vcpu *vcpu)
4110+
{
4111+
struct kvm *kvm = vcpu->kvm;
4112+
4113+
mutex_lock(&kvm->arch.config_lock);
4114+
vcpu_set_hcr(vcpu);
40854115

40864116
if (cpus_have_final_cap(ARM64_HAS_HCX)) {
40874117
vcpu->arch.hcrx_el2 = HCRX_GUEST_FLAGS;

0 commit comments

Comments
 (0)