Skip to content

Commit 6c3f5be

Browse files
committed
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull kvm fixes from Paolo Bonzini: "ARM: - Correctly expose GICv3 support even if no irqchip is created so that userspace doesn't observe it changing pointlessly (fixing a regression with QEMU) - Don't issue a hypercall to set the id-mapped vectors when protected mode is enabled (fix for pKVM in combination with CPUs affected by Spectre-v3a) x86 (five oneliners, of which the most interesting two are): - a NULL pointer dereference on INVPCID executed with paging disabled, but only if KVM is using shadow paging - an incorrect bsearch comparison function which could truncate the result and apply PMU event filtering incorrectly. This one comes with a selftests update too" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: KVM: x86/mmu: fix NULL pointer dereference on guest INVPCID KVM: x86: hyper-v: fix type of valid_bank_mask KVM: Free new dirty bitmap if creating a new memslot fails KVM: eventfd: Fix false positive RCU usage warning selftests: kvm/x86: Verify the pmu event filter matches the correct event selftests: kvm/x86: Add the helper function create_pmu_event_filter kvm: x86/pmu: Fix the compare function used by the pmu event filter KVM: arm64: Don't hypercall before EL2 init KVM: arm64: vgic-v3: Consistently populate ID_AA64PFR0_EL1.GIC KVM: x86/mmu: Update number of zapped pages even if page list is stable
2 parents b3454ce + 9f46c18 commit 6c3f5be

File tree

8 files changed

+56
-19
lines changed

8 files changed

+56
-19
lines changed

arch/arm64/kvm/arm.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1436,7 +1436,8 @@ static int kvm_init_vector_slots(void)
14361436
base = kern_hyp_va(kvm_ksym_ref(__bp_harden_hyp_vecs));
14371437
kvm_init_vector_slot(base, HYP_VECTOR_SPECTRE_DIRECT);
14381438

1439-
if (kvm_system_needs_idmapped_vectors() && !has_vhe()) {
1439+
if (kvm_system_needs_idmapped_vectors() &&
1440+
!is_protected_kvm_enabled()) {
14401441
err = create_hyp_exec_mappings(__pa_symbol(__bp_harden_hyp_vecs),
14411442
__BP_HARDEN_HYP_VECS_SZ, &base);
14421443
if (err)

arch/arm64/kvm/sys_regs.c

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1123,8 +1123,7 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu,
11231123
val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_CSV2), (u64)vcpu->kvm->arch.pfr0_csv2);
11241124
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_CSV3);
11251125
val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_CSV3), (u64)vcpu->kvm->arch.pfr0_csv3);
1126-
if (irqchip_in_kernel(vcpu->kvm) &&
1127-
vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
1126+
if (kvm_vgic_global_state.type == VGIC_V3) {
11281127
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_GIC);
11291128
val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_GIC), 1);
11301129
}

arch/x86/kvm/hyperv.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1914,7 +1914,7 @@ static u64 kvm_hv_send_ipi(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
19141914
struct hv_send_ipi_ex send_ipi_ex;
19151915
struct hv_send_ipi send_ipi;
19161916
DECLARE_BITMAP(vcpu_mask, KVM_MAX_VCPUS);
1917-
unsigned long valid_bank_mask;
1917+
u64 valid_bank_mask;
19181918
u64 sparse_banks[KVM_HV_MAX_SPARSE_VCPU_SET_BITS];
19191919
u32 vector;
19201920
bool all_cpus;
@@ -1956,7 +1956,7 @@ static u64 kvm_hv_send_ipi(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
19561956
valid_bank_mask = send_ipi_ex.vp_set.valid_bank_mask;
19571957
all_cpus = send_ipi_ex.vp_set.format == HV_GENERIC_SET_ALL;
19581958

1959-
if (hc->var_cnt != bitmap_weight(&valid_bank_mask, 64))
1959+
if (hc->var_cnt != bitmap_weight((unsigned long *)&valid_bank_mask, 64))
19601960
return HV_STATUS_INVALID_HYPERCALL_INPUT;
19611961

19621962
if (all_cpus)

arch/x86/kvm/mmu/mmu.c

Lines changed: 10 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -5470,14 +5470,16 @@ void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid)
54705470
uint i;
54715471

54725472
if (pcid == kvm_get_active_pcid(vcpu)) {
5473-
mmu->invlpg(vcpu, gva, mmu->root.hpa);
5473+
if (mmu->invlpg)
5474+
mmu->invlpg(vcpu, gva, mmu->root.hpa);
54745475
tlb_flush = true;
54755476
}
54765477

54775478
for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
54785479
if (VALID_PAGE(mmu->prev_roots[i].hpa) &&
54795480
pcid == kvm_get_pcid(vcpu, mmu->prev_roots[i].pgd)) {
5480-
mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa);
5481+
if (mmu->invlpg)
5482+
mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa);
54815483
tlb_flush = true;
54825484
}
54835485
}
@@ -5665,6 +5667,7 @@ static void kvm_zap_obsolete_pages(struct kvm *kvm)
56655667
{
56665668
struct kvm_mmu_page *sp, *node;
56675669
int nr_zapped, batch = 0;
5670+
bool unstable;
56685671

56695672
restart:
56705673
list_for_each_entry_safe_reverse(sp, node,
@@ -5696,11 +5699,12 @@ static void kvm_zap_obsolete_pages(struct kvm *kvm)
56965699
goto restart;
56975700
}
56985701

5699-
if (__kvm_mmu_prepare_zap_page(kvm, sp,
5700-
&kvm->arch.zapped_obsolete_pages, &nr_zapped)) {
5701-
batch += nr_zapped;
5702+
unstable = __kvm_mmu_prepare_zap_page(kvm, sp,
5703+
&kvm->arch.zapped_obsolete_pages, &nr_zapped);
5704+
batch += nr_zapped;
5705+
5706+
if (unstable)
57025707
goto restart;
5703-
}
57045708
}
57055709

57065710
/*

arch/x86/kvm/pmu.c

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -171,9 +171,12 @@ static bool pmc_resume_counter(struct kvm_pmc *pmc)
171171
return true;
172172
}
173173

174-
static int cmp_u64(const void *a, const void *b)
174+
static int cmp_u64(const void *pa, const void *pb)
175175
{
176-
return *(__u64 *)a - *(__u64 *)b;
176+
u64 a = *(u64 *)pa;
177+
u64 b = *(u64 *)pb;
178+
179+
return (a > b) - (a < b);
177180
}
178181

179182
void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)

tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c

Lines changed: 33 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -208,7 +208,7 @@ static bool sanity_check_pmu(struct kvm_vm *vm)
208208
return success;
209209
}
210210

211-
static struct kvm_pmu_event_filter *make_pmu_event_filter(uint32_t nevents)
211+
static struct kvm_pmu_event_filter *alloc_pmu_event_filter(uint32_t nevents)
212212
{
213213
struct kvm_pmu_event_filter *f;
214214
int size = sizeof(*f) + nevents * sizeof(f->events[0]);
@@ -220,19 +220,29 @@ static struct kvm_pmu_event_filter *make_pmu_event_filter(uint32_t nevents)
220220
return f;
221221
}
222222

223-
static struct kvm_pmu_event_filter *event_filter(uint32_t action)
223+
224+
static struct kvm_pmu_event_filter *
225+
create_pmu_event_filter(const uint64_t event_list[],
226+
int nevents, uint32_t action)
224227
{
225228
struct kvm_pmu_event_filter *f;
226229
int i;
227230

228-
f = make_pmu_event_filter(ARRAY_SIZE(event_list));
231+
f = alloc_pmu_event_filter(nevents);
229232
f->action = action;
230-
for (i = 0; i < ARRAY_SIZE(event_list); i++)
233+
for (i = 0; i < nevents; i++)
231234
f->events[i] = event_list[i];
232235

233236
return f;
234237
}
235238

239+
static struct kvm_pmu_event_filter *event_filter(uint32_t action)
240+
{
241+
return create_pmu_event_filter(event_list,
242+
ARRAY_SIZE(event_list),
243+
action);
244+
}
245+
236246
/*
237247
* Remove the first occurrence of 'event' (if any) from the filter's
238248
* event list.
@@ -271,6 +281,22 @@ static uint64_t test_with_filter(struct kvm_vm *vm,
271281
return run_vm_to_sync(vm);
272282
}
273283

284+
static void test_amd_deny_list(struct kvm_vm *vm)
285+
{
286+
uint64_t event = EVENT(0x1C2, 0);
287+
struct kvm_pmu_event_filter *f;
288+
uint64_t count;
289+
290+
f = create_pmu_event_filter(&event, 1, KVM_PMU_EVENT_DENY);
291+
count = test_with_filter(vm, f);
292+
293+
free(f);
294+
if (count != NUM_BRANCHES)
295+
pr_info("%s: Branch instructions retired = %lu (expected %u)\n",
296+
__func__, count, NUM_BRANCHES);
297+
TEST_ASSERT(count, "Allowed PMU event is not counting");
298+
}
299+
274300
static void test_member_deny_list(struct kvm_vm *vm)
275301
{
276302
struct kvm_pmu_event_filter *f = event_filter(KVM_PMU_EVENT_DENY);
@@ -453,6 +479,9 @@ int main(int argc, char *argv[])
453479
exit(KSFT_SKIP);
454480
}
455481

482+
if (use_amd_pmu())
483+
test_amd_deny_list(vm);
484+
456485
test_without_filter(vm);
457486
test_member_deny_list(vm);
458487
test_member_allow_list(vm);

virt/kvm/eventfd.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -77,7 +77,8 @@ irqfd_resampler_ack(struct kvm_irq_ack_notifier *kian)
7777

7878
idx = srcu_read_lock(&kvm->irq_srcu);
7979

80-
list_for_each_entry_rcu(irqfd, &resampler->list, resampler_link)
80+
list_for_each_entry_srcu(irqfd, &resampler->list, resampler_link,
81+
srcu_read_lock_held(&kvm->irq_srcu))
8182
eventfd_signal(irqfd->resamplefd, 1);
8283

8384
srcu_read_unlock(&kvm->irq_srcu, idx);

virt/kvm/kvm_main.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1560,7 +1560,7 @@ static int kvm_prepare_memory_region(struct kvm *kvm,
15601560
r = kvm_arch_prepare_memory_region(kvm, old, new, change);
15611561

15621562
/* Free the bitmap on failure if it was allocated above. */
1563-
if (r && new && new->dirty_bitmap && old && !old->dirty_bitmap)
1563+
if (r && new && new->dirty_bitmap && (!old || !old->dirty_bitmap))
15641564
kvm_destroy_dirty_bitmap(new);
15651565

15661566
return r;

0 commit comments

Comments
 (0)