Skip to content

Commit d47dcb6

Browse files
committed
Merge branch kvm-arm64/feature-flag-refactor into kvmarm/next
* kvm-arm64/feature-flag-refactor: : vCPU feature flag cleanup : : Clean up KVM's handling of vCPU feature flags to get rid of the : vCPU-scoped bitmaps and remove failure paths from kvm_reset_vcpu(). KVM: arm64: Get rid of vCPU-scoped feature bitmap KVM: arm64: Remove unused return value from kvm_reset_vcpu() KVM: arm64: Hoist NV+SVE check into KVM_ARM_VCPU_INIT ioctl handler KVM: arm64: Prevent NV feature flag on systems w/o nested virt KVM: arm64: Hoist PAuth checks into KVM_ARM_VCPU_INIT ioctl KVM: arm64: Hoist SVE check into KVM_ARM_VCPU_INIT ioctl handler KVM: arm64: Hoist PMUv3 check into KVM_ARM_VCPU_INIT ioctl handler KVM: arm64: Add generic check for system-supported vCPU features Signed-off-by: Oliver Upton <[email protected]>
2 parents 054056b + 1de10b7 commit d47dcb6

File tree

10 files changed

+72
-79
lines changed

10 files changed

+72
-79
lines changed

arch/arm64/include/asm/kvm_emulate.h

Lines changed: 6 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -54,6 +54,11 @@ void kvm_emulate_nested_eret(struct kvm_vcpu *vcpu);
5454
int kvm_inject_nested_sync(struct kvm_vcpu *vcpu, u64 esr_el2);
5555
int kvm_inject_nested_irq(struct kvm_vcpu *vcpu);
5656

57+
static inline bool vcpu_has_feature(const struct kvm_vcpu *vcpu, int feature)
58+
{
59+
return test_bit(feature, vcpu->kvm->arch.vcpu_features);
60+
}
61+
5762
#if defined(__KVM_VHE_HYPERVISOR__) || defined(__KVM_NVHE_HYPERVISOR__)
5863
static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
5964
{
@@ -62,7 +67,7 @@ static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
6267
#else
6368
static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
6469
{
65-
return test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features);
70+
return vcpu_has_feature(vcpu, KVM_ARM_VCPU_EL1_32BIT);
6671
}
6772
#endif
6873

@@ -565,12 +570,6 @@ static __always_inline void kvm_incr_pc(struct kvm_vcpu *vcpu)
565570
vcpu_set_flag((v), e); \
566571
} while (0)
567572

568-
569-
static inline bool vcpu_has_feature(struct kvm_vcpu *vcpu, int feature)
570-
{
571-
return test_bit(feature, vcpu->arch.features);
572-
}
573-
574573
static __always_inline void kvm_write_cptr_el2(u64 val)
575574
{
576575
if (has_vhe() || has_hvhe())

arch/arm64/include/asm/kvm_host.h

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -78,7 +78,7 @@ extern unsigned int __ro_after_init kvm_sve_max_vl;
7878
int __init kvm_arm_init_sve(void);
7979

8080
u32 __attribute_const__ kvm_target_cpu(void);
81-
int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
81+
void kvm_reset_vcpu(struct kvm_vcpu *vcpu);
8282
void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu);
8383

8484
struct kvm_hyp_memcache {
@@ -574,9 +574,6 @@ struct kvm_vcpu_arch {
574574
/* Cache some mmu pages needed inside spinlock regions */
575575
struct kvm_mmu_memory_cache mmu_page_cache;
576576

577-
/* feature flags */
578-
DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES);
579-
580577
/* Virtual SError ESR to restore when HCR_EL2.VSE is set */
581578
u64 vsesr_el2;
582579

arch/arm64/include/asm/kvm_nested.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,13 +2,14 @@
22
#ifndef __ARM64_KVM_NESTED_H
33
#define __ARM64_KVM_NESTED_H
44

5+
#include <asm/kvm_emulate.h>
56
#include <linux/kvm_host.h>
67

78
static inline bool vcpu_has_nv(const struct kvm_vcpu *vcpu)
89
{
910
return (!__is_defined(__KVM_NVHE_HYPERVISOR__) &&
1011
cpus_have_final_cap(ARM64_HAS_NESTED_VIRT) &&
11-
test_bit(KVM_ARM_VCPU_HAS_EL2, vcpu->arch.features));
12+
vcpu_has_feature(vcpu, KVM_ARM_VCPU_HAS_EL2));
1213
}
1314

1415
extern bool __check_nv_sr_forward(struct kvm_vcpu *vcpu);

arch/arm64/kvm/arch_timer.c

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -943,7 +943,7 @@ void kvm_timer_sync_user(struct kvm_vcpu *vcpu)
943943
unmask_vtimer_irq_user(vcpu);
944944
}
945945

946-
int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu)
946+
void kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu)
947947
{
948948
struct arch_timer_cpu *timer = vcpu_timer(vcpu);
949949
struct timer_map map;
@@ -987,8 +987,6 @@ int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu)
987987
soft_timer_cancel(&map.emul_vtimer->hrtimer);
988988
if (map.emul_ptimer)
989989
soft_timer_cancel(&map.emul_ptimer->hrtimer);
990-
991-
return 0;
992990
}
993991

994992
static void timer_context_init(struct kvm_vcpu *vcpu, int timerid)

arch/arm64/kvm/arm.c

Lines changed: 48 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -367,7 +367,6 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
367367

368368
/* Force users to call KVM_ARM_VCPU_INIT */
369369
vcpu_clear_flag(vcpu, VCPU_INITIALIZED);
370-
bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
371370

372371
vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO;
373372

@@ -1190,6 +1189,30 @@ int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
11901189
return -EINVAL;
11911190
}
11921191

1192+
static unsigned long system_supported_vcpu_features(void)
1193+
{
1194+
unsigned long features = KVM_VCPU_VALID_FEATURES;
1195+
1196+
if (!cpus_have_final_cap(ARM64_HAS_32BIT_EL1))
1197+
clear_bit(KVM_ARM_VCPU_EL1_32BIT, &features);
1198+
1199+
if (!kvm_arm_support_pmu_v3())
1200+
clear_bit(KVM_ARM_VCPU_PMU_V3, &features);
1201+
1202+
if (!system_supports_sve())
1203+
clear_bit(KVM_ARM_VCPU_SVE, &features);
1204+
1205+
if (!system_has_full_ptr_auth()) {
1206+
clear_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, &features);
1207+
clear_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, &features);
1208+
}
1209+
1210+
if (!cpus_have_final_cap(ARM64_HAS_NESTED_VIRT))
1211+
clear_bit(KVM_ARM_VCPU_HAS_EL2, &features);
1212+
1213+
return features;
1214+
}
1215+
11931216
static int kvm_vcpu_init_check_features(struct kvm_vcpu *vcpu,
11941217
const struct kvm_vcpu_init *init)
11951218
{
@@ -1204,12 +1227,25 @@ static int kvm_vcpu_init_check_features(struct kvm_vcpu *vcpu,
12041227
return -ENOENT;
12051228
}
12061229

1207-
if (!test_bit(KVM_ARM_VCPU_EL1_32BIT, &features))
1208-
return 0;
1230+
if (features & ~system_supported_vcpu_features())
1231+
return -EINVAL;
12091232

1210-
if (!cpus_have_const_cap(ARM64_HAS_32BIT_EL1))
1233+
/*
1234+
* For now make sure that both address/generic pointer authentication
1235+
* features are requested by the userspace together.
1236+
*/
1237+
if (test_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, &features) !=
1238+
test_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, &features))
1239+
return -EINVAL;
1240+
1241+
/* Disallow NV+SVE for the time being */
1242+
if (test_bit(KVM_ARM_VCPU_HAS_EL2, &features) &&
1243+
test_bit(KVM_ARM_VCPU_SVE, &features))
12111244
return -EINVAL;
12121245

1246+
if (!test_bit(KVM_ARM_VCPU_EL1_32BIT, &features))
1247+
return 0;
1248+
12131249
/* MTE is incompatible with AArch32 */
12141250
if (kvm_has_mte(vcpu->kvm))
12151251
return -EINVAL;
@@ -1226,7 +1262,8 @@ static bool kvm_vcpu_init_changed(struct kvm_vcpu *vcpu,
12261262
{
12271263
unsigned long features = init->features[0];
12281264

1229-
return !bitmap_equal(vcpu->arch.features, &features, KVM_VCPU_MAX_FEATURES);
1265+
return !bitmap_equal(vcpu->kvm->arch.vcpu_features, &features,
1266+
KVM_VCPU_MAX_FEATURES);
12301267
}
12311268

12321269
static int __kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
@@ -1239,21 +1276,17 @@ static int __kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
12391276
mutex_lock(&kvm->arch.config_lock);
12401277

12411278
if (test_bit(KVM_ARCH_FLAG_VCPU_FEATURES_CONFIGURED, &kvm->arch.flags) &&
1242-
!bitmap_equal(kvm->arch.vcpu_features, &features, KVM_VCPU_MAX_FEATURES))
1279+
kvm_vcpu_init_changed(vcpu, init))
12431280
goto out_unlock;
12441281

1245-
bitmap_copy(vcpu->arch.features, &features, KVM_VCPU_MAX_FEATURES);
1282+
bitmap_copy(kvm->arch.vcpu_features, &features, KVM_VCPU_MAX_FEATURES);
12461283

12471284
/* Now we know what it is, we can reset it. */
1248-
ret = kvm_reset_vcpu(vcpu);
1249-
if (ret) {
1250-
bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
1251-
goto out_unlock;
1252-
}
1285+
kvm_reset_vcpu(vcpu);
12531286

1254-
bitmap_copy(kvm->arch.vcpu_features, &features, KVM_VCPU_MAX_FEATURES);
12551287
set_bit(KVM_ARCH_FLAG_VCPU_FEATURES_CONFIGURED, &kvm->arch.flags);
12561288
vcpu_set_flag(vcpu, VCPU_INITIALIZED);
1289+
ret = 0;
12571290
out_unlock:
12581291
mutex_unlock(&kvm->arch.config_lock);
12591292
return ret;
@@ -1278,7 +1311,8 @@ static int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
12781311
if (kvm_vcpu_init_changed(vcpu, init))
12791312
return -EINVAL;
12801313

1281-
return kvm_reset_vcpu(vcpu);
1314+
kvm_reset_vcpu(vcpu);
1315+
return 0;
12821316
}
12831317

12841318
static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,

arch/arm64/kvm/hypercalls.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -554,7 +554,7 @@ int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
554554
{
555555
bool wants_02;
556556

557-
wants_02 = test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features);
557+
wants_02 = vcpu_has_feature(vcpu, KVM_ARM_VCPU_PSCI_0_2);
558558

559559
switch (val) {
560560
case KVM_ARM_PSCI_0_1:

arch/arm64/kvm/reset.c

Lines changed: 10 additions & 46 deletions
Original file line numberDiff line numberDiff line change
@@ -73,11 +73,8 @@ int __init kvm_arm_init_sve(void)
7373
return 0;
7474
}
7575

76-
static int kvm_vcpu_enable_sve(struct kvm_vcpu *vcpu)
76+
static void kvm_vcpu_enable_sve(struct kvm_vcpu *vcpu)
7777
{
78-
if (!system_supports_sve())
79-
return -EINVAL;
80-
8178
vcpu->arch.sve_max_vl = kvm_sve_max_vl;
8279

8380
/*
@@ -86,8 +83,6 @@ static int kvm_vcpu_enable_sve(struct kvm_vcpu *vcpu)
8683
* kvm_arm_vcpu_finalize(), which freezes the configuration.
8784
*/
8885
vcpu_set_flag(vcpu, GUEST_HAS_SVE);
89-
90-
return 0;
9186
}
9287

9388
/*
@@ -170,20 +165,9 @@ static void kvm_vcpu_reset_sve(struct kvm_vcpu *vcpu)
170165
memset(vcpu->arch.sve_state, 0, vcpu_sve_state_size(vcpu));
171166
}
172167

173-
static int kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu)
168+
static void kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu)
174169
{
175-
/*
176-
* For now make sure that both address/generic pointer authentication
177-
* features are requested by the userspace together and the system
178-
* supports these capabilities.
179-
*/
180-
if (!test_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, vcpu->arch.features) ||
181-
!test_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, vcpu->arch.features) ||
182-
!system_has_full_ptr_auth())
183-
return -EINVAL;
184-
185170
vcpu_set_flag(vcpu, GUEST_HAS_PTRAUTH);
186-
return 0;
187171
}
188172

189173
/**
@@ -204,10 +188,9 @@ static int kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu)
204188
* disable preemption around the vcpu reset as we would otherwise race with
205189
* preempt notifiers which also call put/load.
206190
*/
207-
int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
191+
void kvm_reset_vcpu(struct kvm_vcpu *vcpu)
208192
{
209193
struct vcpu_reset_state reset_state;
210-
int ret;
211194
bool loaded;
212195
u32 pstate;
213196

@@ -224,29 +207,16 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
224207
if (loaded)
225208
kvm_arch_vcpu_put(vcpu);
226209

227-
/* Disallow NV+SVE for the time being */
228-
if (vcpu_has_nv(vcpu) && vcpu_has_feature(vcpu, KVM_ARM_VCPU_SVE)) {
229-
ret = -EINVAL;
230-
goto out;
231-
}
232-
233210
if (!kvm_arm_vcpu_sve_finalized(vcpu)) {
234-
if (test_bit(KVM_ARM_VCPU_SVE, vcpu->arch.features)) {
235-
ret = kvm_vcpu_enable_sve(vcpu);
236-
if (ret)
237-
goto out;
238-
}
211+
if (vcpu_has_feature(vcpu, KVM_ARM_VCPU_SVE))
212+
kvm_vcpu_enable_sve(vcpu);
239213
} else {
240214
kvm_vcpu_reset_sve(vcpu);
241215
}
242216

243-
if (test_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, vcpu->arch.features) ||
244-
test_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, vcpu->arch.features)) {
245-
if (kvm_vcpu_enable_ptrauth(vcpu)) {
246-
ret = -EINVAL;
247-
goto out;
248-
}
249-
}
217+
if (vcpu_has_feature(vcpu, KVM_ARM_VCPU_PTRAUTH_ADDRESS) ||
218+
vcpu_has_feature(vcpu, KVM_ARM_VCPU_PTRAUTH_GENERIC))
219+
kvm_vcpu_enable_ptrauth(vcpu);
250220

251221
if (vcpu_el1_is_32bit(vcpu))
252222
pstate = VCPU_RESET_PSTATE_SVC;
@@ -255,11 +225,6 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
255225
else
256226
pstate = VCPU_RESET_PSTATE_EL1;
257227

258-
if (kvm_vcpu_has_pmu(vcpu) && !kvm_arm_support_pmu_v3()) {
259-
ret = -EINVAL;
260-
goto out;
261-
}
262-
263228
/* Reset core registers */
264229
memset(vcpu_gp_regs(vcpu), 0, sizeof(*vcpu_gp_regs(vcpu)));
265230
memset(&vcpu->arch.ctxt.fp_regs, 0, sizeof(vcpu->arch.ctxt.fp_regs));
@@ -294,12 +259,11 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
294259
}
295260

296261
/* Reset timer */
297-
ret = kvm_timer_vcpu_reset(vcpu);
298-
out:
262+
kvm_timer_vcpu_reset(vcpu);
263+
299264
if (loaded)
300265
kvm_arch_vcpu_load(vcpu, smp_processor_id());
301266
preempt_enable();
302-
return ret;
303267
}
304268

305269
u32 get_kvm_ipa_limit(void)

include/kvm/arm_arch_timer.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -94,7 +94,7 @@ struct arch_timer_cpu {
9494

9595
int __init kvm_timer_hyp_init(bool has_gic);
9696
int kvm_timer_enable(struct kvm_vcpu *vcpu);
97-
int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu);
97+
void kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu);
9898
void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu);
9999
void kvm_timer_sync_user(struct kvm_vcpu *vcpu);
100100
bool kvm_timer_should_notify_user(struct kvm_vcpu *vcpu);

include/kvm/arm_pmu.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -77,7 +77,7 @@ void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu);
7777
void kvm_vcpu_pmu_resync_el0(void);
7878

7979
#define kvm_vcpu_has_pmu(vcpu) \
80-
(test_bit(KVM_ARM_VCPU_PMU_V3, (vcpu)->arch.features))
80+
(vcpu_has_feature(vcpu, KVM_ARM_VCPU_PMU_V3))
8181

8282
/*
8383
* Updates the vcpu's view of the pmu events for this cpu.

include/kvm/arm_psci.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ static inline int kvm_psci_version(struct kvm_vcpu *vcpu)
2626
* revisions. It is thus safe to return the latest, unless
2727
* userspace has instructed us otherwise.
2828
*/
29-
if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features)) {
29+
if (vcpu_has_feature(vcpu, KVM_ARM_VCPU_PSCI_0_2)) {
3030
if (vcpu->kvm->arch.psci_version)
3131
return vcpu->kvm->arch.psci_version;
3232

0 commit comments

Comments
 (0)