Skip to content

Commit 5656374

Browse files
committed
Merge branch 'gpc-fixes' of git://git.infradead.org/users/dwmw2/linux into HEAD
Pull Xen-for-KVM changes from David Woodhouse: * add support for 32-bit guests in SCHEDOP_poll * the rest of the gfn-to-pfn cache API cleanup "I still haven't reinstated the last of those patches to make gpc->len immutable." Signed-off-by: Paolo Bonzini <[email protected]>
2 parents 74bee0c + 06e155c commit 5656374

File tree

6 files changed

+158
-161
lines changed

6 files changed

+158
-161
lines changed

arch/x86/kvm/x86.c

Lines changed: 8 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -2311,13 +2311,11 @@ static void kvm_write_system_time(struct kvm_vcpu *vcpu, gpa_t system_time,
23112311
kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu);
23122312

23132313
/* we verify if the enable bit is set... */
2314-
if (system_time & 1) {
2315-
kvm_gpc_activate(vcpu->kvm, &vcpu->arch.pv_time, vcpu,
2316-
KVM_HOST_USES_PFN, system_time & ~1ULL,
2314+
if (system_time & 1)
2315+
kvm_gpc_activate(&vcpu->arch.pv_time, system_time & ~1ULL,
23172316
sizeof(struct pvclock_vcpu_time_info));
2318-
} else {
2319-
kvm_gpc_deactivate(vcpu->kvm, &vcpu->arch.pv_time);
2320-
}
2317+
else
2318+
kvm_gpc_deactivate(&vcpu->arch.pv_time);
23212319

23222320
return;
23232321
}
@@ -3047,12 +3045,10 @@ static void kvm_setup_guest_pvclock(struct kvm_vcpu *v,
30473045
unsigned long flags;
30483046

30493047
read_lock_irqsave(&gpc->lock, flags);
3050-
while (!kvm_gpc_check(v->kvm, gpc, gpc->gpa,
3051-
offset + sizeof(*guest_hv_clock))) {
3048+
while (!kvm_gpc_check(gpc, offset + sizeof(*guest_hv_clock))) {
30523049
read_unlock_irqrestore(&gpc->lock, flags);
30533050

3054-
if (kvm_gpc_refresh(v->kvm, gpc, gpc->gpa,
3055-
offset + sizeof(*guest_hv_clock)))
3051+
if (kvm_gpc_refresh(gpc, offset + sizeof(*guest_hv_clock)))
30563052
return;
30573053

30583054
read_lock_irqsave(&gpc->lock, flags);
@@ -3401,7 +3397,7 @@ static int kvm_pv_enable_async_pf_int(struct kvm_vcpu *vcpu, u64 data)
34013397

34023398
static void kvmclock_reset(struct kvm_vcpu *vcpu)
34033399
{
3404-
kvm_gpc_deactivate(vcpu->kvm, &vcpu->arch.pv_time);
3400+
kvm_gpc_deactivate(&vcpu->arch.pv_time);
34053401
vcpu->arch.time = 0;
34063402
}
34073403

@@ -11559,7 +11555,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
1155911555
vcpu->arch.regs_avail = ~0;
1156011556
vcpu->arch.regs_dirty = ~0;
1156111557

11562-
kvm_gpc_init(&vcpu->arch.pv_time);
11558+
kvm_gpc_init(&vcpu->arch.pv_time, vcpu->kvm, vcpu, KVM_HOST_USES_PFN);
1156311559

1156411560
if (!irqchip_in_kernel(vcpu->kvm) || kvm_vcpu_is_reset_bsp(vcpu))
1156511561
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;

arch/x86/kvm/xen.c

Lines changed: 70 additions & 54 deletions
Original file line numberDiff line numberDiff line change
@@ -42,13 +42,12 @@ static int kvm_xen_shared_info_init(struct kvm *kvm, gfn_t gfn)
4242
int idx = srcu_read_lock(&kvm->srcu);
4343

4444
if (gfn == GPA_INVALID) {
45-
kvm_gpc_deactivate(kvm, gpc);
45+
kvm_gpc_deactivate(gpc);
4646
goto out;
4747
}
4848

4949
do {
50-
ret = kvm_gpc_activate(kvm, gpc, NULL, KVM_HOST_USES_PFN, gpa,
51-
PAGE_SIZE);
50+
ret = kvm_gpc_activate(gpc, gpa, PAGE_SIZE);
5251
if (ret)
5352
goto out;
5453

@@ -273,14 +272,14 @@ static void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, bool atomic)
273272
* gfn_to_pfn caches that cover the region.
274273
*/
275274
read_lock_irqsave(&gpc1->lock, flags);
276-
while (!kvm_gpc_check(v->kvm, gpc1, gpc1->gpa, user_len1)) {
275+
while (!kvm_gpc_check(gpc1, user_len1)) {
277276
read_unlock_irqrestore(&gpc1->lock, flags);
278277

279278
/* When invoked from kvm_sched_out() we cannot sleep */
280279
if (atomic)
281280
return;
282281

283-
if (kvm_gpc_refresh(v->kvm, gpc1, gpc1->gpa, user_len1))
282+
if (kvm_gpc_refresh(gpc1, user_len1))
284283
return;
285284

286285
read_lock_irqsave(&gpc1->lock, flags);
@@ -309,7 +308,7 @@ static void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, bool atomic)
309308
*/
310309
read_lock(&gpc2->lock);
311310

312-
if (!kvm_gpc_check(v->kvm, gpc2, gpc2->gpa, user_len2)) {
311+
if (!kvm_gpc_check(gpc2, user_len2)) {
313312
read_unlock(&gpc2->lock);
314313
read_unlock_irqrestore(&gpc1->lock, flags);
315314

@@ -323,8 +322,8 @@ static void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, bool atomic)
323322
* to the second page now because the guest changed to
324323
* 64-bit mode, the second GPC won't have been set up.
325324
*/
326-
if (kvm_gpc_activate(v->kvm, gpc2, NULL, KVM_HOST_USES_PFN,
327-
gpc1->gpa + user_len1, user_len2))
325+
if (kvm_gpc_activate(gpc2, gpc1->gpa + user_len1,
326+
user_len2))
328327
return;
329328

330329
/*
@@ -489,12 +488,10 @@ void kvm_xen_inject_pending_events(struct kvm_vcpu *v)
489488
* little more honest about it.
490489
*/
491490
read_lock_irqsave(&gpc->lock, flags);
492-
while (!kvm_gpc_check(v->kvm, gpc, gpc->gpa,
493-
sizeof(struct vcpu_info))) {
491+
while (!kvm_gpc_check(gpc, sizeof(struct vcpu_info))) {
494492
read_unlock_irqrestore(&gpc->lock, flags);
495493

496-
if (kvm_gpc_refresh(v->kvm, gpc, gpc->gpa,
497-
sizeof(struct vcpu_info)))
494+
if (kvm_gpc_refresh(gpc, sizeof(struct vcpu_info)))
498495
return;
499496

500497
read_lock_irqsave(&gpc->lock, flags);
@@ -554,8 +551,7 @@ int __kvm_xen_has_interrupt(struct kvm_vcpu *v)
554551
sizeof_field(struct compat_vcpu_info, evtchn_upcall_pending));
555552

556553
read_lock_irqsave(&gpc->lock, flags);
557-
while (!kvm_gpc_check(v->kvm, gpc, gpc->gpa,
558-
sizeof(struct vcpu_info))) {
554+
while (!kvm_gpc_check(gpc, sizeof(struct vcpu_info))) {
559555
read_unlock_irqrestore(&gpc->lock, flags);
560556

561557
/*
@@ -569,8 +565,7 @@ int __kvm_xen_has_interrupt(struct kvm_vcpu *v)
569565
if (in_atomic() || !task_is_running(current))
570566
return 1;
571567

572-
if (kvm_gpc_refresh(v->kvm, gpc, gpc->gpa,
573-
sizeof(struct vcpu_info))) {
568+
if (kvm_gpc_refresh(gpc, sizeof(struct vcpu_info))) {
574569
/*
575570
* If this failed, userspace has screwed up the
576571
* vcpu_info mapping. No interrupts for you.
@@ -711,31 +706,27 @@ int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
711706
offsetof(struct compat_vcpu_info, time));
712707

713708
if (data->u.gpa == GPA_INVALID) {
714-
kvm_gpc_deactivate(vcpu->kvm, &vcpu->arch.xen.vcpu_info_cache);
709+
kvm_gpc_deactivate(&vcpu->arch.xen.vcpu_info_cache);
715710
r = 0;
716711
break;
717712
}
718713

719-
r = kvm_gpc_activate(vcpu->kvm,
720-
&vcpu->arch.xen.vcpu_info_cache, NULL,
721-
KVM_HOST_USES_PFN, data->u.gpa,
722-
sizeof(struct vcpu_info));
714+
r = kvm_gpc_activate(&vcpu->arch.xen.vcpu_info_cache,
715+
data->u.gpa, sizeof(struct vcpu_info));
723716
if (!r)
724717
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
725718

726719
break;
727720

728721
case KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO:
729722
if (data->u.gpa == GPA_INVALID) {
730-
kvm_gpc_deactivate(vcpu->kvm,
731-
&vcpu->arch.xen.vcpu_time_info_cache);
723+
kvm_gpc_deactivate(&vcpu->arch.xen.vcpu_time_info_cache);
732724
r = 0;
733725
break;
734726
}
735727

736-
r = kvm_gpc_activate(vcpu->kvm,
737-
&vcpu->arch.xen.vcpu_time_info_cache,
738-
NULL, KVM_HOST_USES_PFN, data->u.gpa,
728+
r = kvm_gpc_activate(&vcpu->arch.xen.vcpu_time_info_cache,
729+
data->u.gpa,
739730
sizeof(struct pvclock_vcpu_time_info));
740731
if (!r)
741732
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
@@ -751,10 +742,8 @@ int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
751742
if (data->u.gpa == GPA_INVALID) {
752743
r = 0;
753744
deactivate_out:
754-
kvm_gpc_deactivate(vcpu->kvm,
755-
&vcpu->arch.xen.runstate_cache);
756-
kvm_gpc_deactivate(vcpu->kvm,
757-
&vcpu->arch.xen.runstate2_cache);
745+
kvm_gpc_deactivate(&vcpu->arch.xen.runstate_cache);
746+
kvm_gpc_deactivate(&vcpu->arch.xen.runstate2_cache);
758747
break;
759748
}
760749

@@ -770,20 +759,18 @@ int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
770759

771760
/* How much fits in the (first) page? */
772761
sz1 = PAGE_SIZE - (data->u.gpa & ~PAGE_MASK);
773-
r = kvm_gpc_activate(vcpu->kvm, &vcpu->arch.xen.runstate_cache,
774-
NULL, KVM_HOST_USES_PFN, data->u.gpa, sz1);
762+
r = kvm_gpc_activate(&vcpu->arch.xen.runstate_cache,
763+
data->u.gpa, sz1);
775764
if (r)
776765
goto deactivate_out;
777766

778767
/* Either map the second page, or deactivate the second GPC */
779768
if (sz1 >= sz) {
780-
kvm_gpc_deactivate(vcpu->kvm,
781-
&vcpu->arch.xen.runstate2_cache);
769+
kvm_gpc_deactivate(&vcpu->arch.xen.runstate2_cache);
782770
} else {
783771
sz2 = sz - sz1;
784772
BUG_ON((data->u.gpa + sz1) & ~PAGE_MASK);
785-
r = kvm_gpc_activate(vcpu->kvm, &vcpu->arch.xen.runstate2_cache,
786-
NULL, KVM_HOST_USES_PFN,
773+
r = kvm_gpc_activate(&vcpu->arch.xen.runstate2_cache,
787774
data->u.gpa + sz1, sz2);
788775
if (r)
789776
goto deactivate_out;
@@ -1167,7 +1154,7 @@ static bool wait_pending_event(struct kvm_vcpu *vcpu, int nr_ports,
11671154

11681155
idx = srcu_read_lock(&kvm->srcu);
11691156
read_lock_irqsave(&gpc->lock, flags);
1170-
if (!kvm_gpc_check(kvm, gpc, gpc->gpa, PAGE_SIZE))
1157+
if (!kvm_gpc_check(gpc, PAGE_SIZE))
11711158
goto out_rcu;
11721159

11731160
ret = false;
@@ -1201,20 +1188,45 @@ static bool kvm_xen_schedop_poll(struct kvm_vcpu *vcpu, bool longmode,
12011188
evtchn_port_t port, *ports;
12021189
gpa_t gpa;
12031190

1204-
if (!longmode || !lapic_in_kernel(vcpu) ||
1191+
if (!lapic_in_kernel(vcpu) ||
12051192
!(vcpu->kvm->arch.xen_hvm_config.flags & KVM_XEN_HVM_CONFIG_EVTCHN_SEND))
12061193
return false;
12071194

12081195
idx = srcu_read_lock(&vcpu->kvm->srcu);
12091196
gpa = kvm_mmu_gva_to_gpa_system(vcpu, param, NULL);
12101197
srcu_read_unlock(&vcpu->kvm->srcu, idx);
1211-
1212-
if (!gpa || kvm_vcpu_read_guest(vcpu, gpa, &sched_poll,
1213-
sizeof(sched_poll))) {
1198+
if (!gpa) {
12141199
*r = -EFAULT;
12151200
return true;
12161201
}
12171202

1203+
if (IS_ENABLED(CONFIG_64BIT) && !longmode) {
1204+
struct compat_sched_poll sp32;
1205+
1206+
/* Sanity check that the compat struct definition is correct */
1207+
BUILD_BUG_ON(sizeof(sp32) != 16);
1208+
1209+
if (kvm_vcpu_read_guest(vcpu, gpa, &sp32, sizeof(sp32))) {
1210+
*r = -EFAULT;
1211+
return true;
1212+
}
1213+
1214+
/*
1215+
* This is a 32-bit pointer to an array of evtchn_port_t which
1216+
* are uint32_t, so once it's converted no further compat
1217+
* handling is needed.
1218+
*/
1219+
sched_poll.ports = (void *)(unsigned long)(sp32.ports);
1220+
sched_poll.nr_ports = sp32.nr_ports;
1221+
sched_poll.timeout = sp32.timeout;
1222+
} else {
1223+
if (kvm_vcpu_read_guest(vcpu, gpa, &sched_poll,
1224+
sizeof(sched_poll))) {
1225+
*r = -EFAULT;
1226+
return true;
1227+
}
1228+
}
1229+
12181230
if (unlikely(sched_poll.nr_ports > 1)) {
12191231
/* Xen (unofficially) limits number of pollers to 128 */
12201232
if (sched_poll.nr_ports > 128) {
@@ -1564,7 +1576,7 @@ int kvm_xen_set_evtchn_fast(struct kvm_xen_evtchn *xe, struct kvm *kvm)
15641576
idx = srcu_read_lock(&kvm->srcu);
15651577

15661578
read_lock_irqsave(&gpc->lock, flags);
1567-
if (!kvm_gpc_check(kvm, gpc, gpc->gpa, PAGE_SIZE))
1579+
if (!kvm_gpc_check(gpc, PAGE_SIZE))
15681580
goto out_rcu;
15691581

15701582
if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) {
@@ -1598,7 +1610,7 @@ int kvm_xen_set_evtchn_fast(struct kvm_xen_evtchn *xe, struct kvm *kvm)
15981610
gpc = &vcpu->arch.xen.vcpu_info_cache;
15991611

16001612
read_lock_irqsave(&gpc->lock, flags);
1601-
if (!kvm_gpc_check(kvm, gpc, gpc->gpa, sizeof(struct vcpu_info))) {
1613+
if (!kvm_gpc_check(gpc, sizeof(struct vcpu_info))) {
16021614
/*
16031615
* Could not access the vcpu_info. Set the bit in-kernel
16041616
* and prod the vCPU to deliver it for itself.
@@ -1696,7 +1708,7 @@ static int kvm_xen_set_evtchn(struct kvm_xen_evtchn *xe, struct kvm *kvm)
16961708
break;
16971709

16981710
idx = srcu_read_lock(&kvm->srcu);
1699-
rc = kvm_gpc_refresh(kvm, gpc, gpc->gpa, PAGE_SIZE);
1711+
rc = kvm_gpc_refresh(gpc, PAGE_SIZE);
17001712
srcu_read_unlock(&kvm->srcu, idx);
17011713
} while(!rc);
17021714

@@ -2026,37 +2038,41 @@ void kvm_xen_init_vcpu(struct kvm_vcpu *vcpu)
20262038

20272039
timer_setup(&vcpu->arch.xen.poll_timer, cancel_evtchn_poll, 0);
20282040

2029-
kvm_gpc_init(&vcpu->arch.xen.runstate_cache);
2030-
kvm_gpc_init(&vcpu->arch.xen.runstate2_cache);
2031-
kvm_gpc_init(&vcpu->arch.xen.vcpu_info_cache);
2032-
kvm_gpc_init(&vcpu->arch.xen.vcpu_time_info_cache);
2041+
kvm_gpc_init(&vcpu->arch.xen.runstate_cache, vcpu->kvm, NULL,
2042+
KVM_HOST_USES_PFN);
2043+
kvm_gpc_init(&vcpu->arch.xen.runstate2_cache, vcpu->kvm, NULL,
2044+
KVM_HOST_USES_PFN);
2045+
kvm_gpc_init(&vcpu->arch.xen.vcpu_info_cache, vcpu->kvm, NULL,
2046+
KVM_HOST_USES_PFN);
2047+
kvm_gpc_init(&vcpu->arch.xen.vcpu_time_info_cache, vcpu->kvm, NULL,
2048+
KVM_HOST_USES_PFN);
20332049
}
20342050

20352051
void kvm_xen_destroy_vcpu(struct kvm_vcpu *vcpu)
20362052
{
20372053
if (kvm_xen_timer_enabled(vcpu))
20382054
kvm_xen_stop_timer(vcpu);
20392055

2040-
kvm_gpc_deactivate(vcpu->kvm, &vcpu->arch.xen.runstate_cache);
2041-
kvm_gpc_deactivate(vcpu->kvm, &vcpu->arch.xen.runstate2_cache);
2042-
kvm_gpc_deactivate(vcpu->kvm, &vcpu->arch.xen.vcpu_info_cache);
2043-
kvm_gpc_deactivate(vcpu->kvm, &vcpu->arch.xen.vcpu_time_info_cache);
2056+
kvm_gpc_deactivate(&vcpu->arch.xen.runstate_cache);
2057+
kvm_gpc_deactivate(&vcpu->arch.xen.runstate2_cache);
2058+
kvm_gpc_deactivate(&vcpu->arch.xen.vcpu_info_cache);
2059+
kvm_gpc_deactivate(&vcpu->arch.xen.vcpu_time_info_cache);
20442060

20452061
del_timer_sync(&vcpu->arch.xen.poll_timer);
20462062
}
20472063

20482064
void kvm_xen_init_vm(struct kvm *kvm)
20492065
{
20502066
idr_init(&kvm->arch.xen.evtchn_ports);
2051-
kvm_gpc_init(&kvm->arch.xen.shinfo_cache);
2067+
kvm_gpc_init(&kvm->arch.xen.shinfo_cache, kvm, NULL, KVM_HOST_USES_PFN);
20522068
}
20532069

20542070
void kvm_xen_destroy_vm(struct kvm *kvm)
20552071
{
20562072
struct evtchnfd *evtchnfd;
20572073
int i;
20582074

2059-
kvm_gpc_deactivate(kvm, &kvm->arch.xen.shinfo_cache);
2075+
kvm_gpc_deactivate(&kvm->arch.xen.shinfo_cache);
20602076

20612077
idr_for_each_entry(&kvm->arch.xen.evtchn_ports, evtchnfd, i) {
20622078
if (!evtchnfd->deliver.port.port)

arch/x86/kvm/xen.h

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -207,4 +207,11 @@ struct compat_vcpu_runstate_info {
207207
uint64_t time[4];
208208
} __attribute__((packed));
209209

210+
struct compat_sched_poll {
211+
/* This is actually a guest virtual address which points to ports. */
212+
uint32_t ports;
213+
unsigned int nr_ports;
214+
uint64_t timeout;
215+
};
216+
210217
#endif /* __ARCH_X86_KVM_XEN_H__ */

0 commit comments

Comments
 (0)