Skip to content

Commit 78a3a1e

Browse files
committed
amd64/vmm: Factor vcpu_notify_event() into two functions
vcpu_notify_event() previously took a boolean parameter which determines whether the implementation should try to use a posted interrupt. On arm64 and riscv, the implementation of vcpu_notify_event() is otherwise identical to that of amd64. With the aim of deduplicating vcpu state management code, introduce a separate amd64-only function which tries to use posted interrupts. This requires some duplication with vcpu_notify_event_locked(), but only a little bit. Then, fix up callers. No functional change intended. Reviewed by: corvink, jhb MFC after: 2 weeks Sponsored by: The FreeBSD Foundation Sponsored by: Klara, Inc. Differential Revision: https://reviews.freebsd.org/D53419
1 parent c3f41c0 commit 78a3a1e

File tree

4 files changed

+28
-21
lines changed

4 files changed

+28
-21
lines changed

sys/amd64/include/vmm.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -383,7 +383,8 @@ vcpu_should_yield(struct vcpu *vcpu)
383383
#endif
384384

385385
void *vcpu_stats(struct vcpu *vcpu);
386-
void vcpu_notify_event(struct vcpu *vcpu, bool lapic_intr);
386+
void vcpu_notify_event(struct vcpu *vcpu);
387+
void vcpu_notify_lapic(struct vcpu *vcpu);
387388
struct vm_mem *vm_mem(struct vm *vm);
388389
struct vatpic *vm_atpic(struct vm *vm);
389390
struct vatpit *vm_atpit(struct vm *vm);

sys/amd64/vmm/io/vlapic.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -456,7 +456,7 @@ vlapic_fire_lvt(struct vlapic *vlapic, u_int lvt)
456456
return (0);
457457
}
458458
if (vlapic_set_intr_ready(vlapic, vec, false))
459-
vcpu_notify_event(vlapic->vcpu, true);
459+
vcpu_notify_lapic(vlapic->vcpu);
460460
break;
461461
case APIC_LVT_DM_NMI:
462462
vm_inject_nmi(vlapic->vcpu);

sys/amd64/vmm/vmm.c

Lines changed: 24 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -274,7 +274,7 @@ u_int vm_maxcpu;
274274
SYSCTL_UINT(_hw_vmm, OID_AUTO, maxcpu, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
275275
&vm_maxcpu, 0, "Maximum number of vCPUs");
276276

277-
static void vcpu_notify_event_locked(struct vcpu *vcpu, bool lapic_intr);
277+
static void vcpu_notify_event_locked(struct vcpu *vcpu);
278278

279279
/* global statistics */
280280
VMM_STAT(VCPU_MIGRATIONS, "vcpu migration across host cpus");
@@ -1028,7 +1028,7 @@ vcpu_wait_idle(struct vcpu *vcpu)
10281028
KASSERT(vcpu->state != VCPU_IDLE, ("vcpu already idle"));
10291029

10301030
vcpu->reqidle = 1;
1031-
vcpu_notify_event_locked(vcpu, false);
1031+
vcpu_notify_event_locked(vcpu);
10321032
VMM_CTR1(vcpu, "vcpu state change from %s to "
10331033
"idle requested", vcpu_state2str(vcpu->state));
10341034
msleep_spin(&vcpu->state, &vcpu->mtx, "vmstat", hz);
@@ -1509,7 +1509,7 @@ vm_handle_suspend(struct vcpu *vcpu, bool *retu)
15091509
*/
15101510
for (i = 0; i < vm->maxcpus; i++) {
15111511
if (CPU_ISSET(i, &vm->suspended_cpus)) {
1512-
vcpu_notify_event(vm_vcpu(vm, i), false);
1512+
vcpu_notify_event(vm_vcpu(vm, i));
15131513
}
15141514
}
15151515

@@ -1583,7 +1583,7 @@ vm_suspend(struct vm *vm, enum vm_suspend_how how)
15831583
*/
15841584
for (i = 0; i < vm->maxcpus; i++) {
15851585
if (CPU_ISSET(i, &vm->active_cpus))
1586-
vcpu_notify_event(vm_vcpu(vm, i), false);
1586+
vcpu_notify_event(vm_vcpu(vm, i));
15871587
}
15881588

15891589
return (0);
@@ -2063,7 +2063,7 @@ vm_inject_nmi(struct vcpu *vcpu)
20632063
{
20642064

20652065
vcpu->nmi_pending = 1;
2066-
vcpu_notify_event(vcpu, false);
2066+
vcpu_notify_event(vcpu);
20672067
return (0);
20682068
}
20692069

@@ -2090,7 +2090,7 @@ vm_inject_extint(struct vcpu *vcpu)
20902090
{
20912091

20922092
vcpu->extint_pending = 1;
2093-
vcpu_notify_event(vcpu, false);
2093+
vcpu_notify_event(vcpu);
20942094
return (0);
20952095
}
20962096

@@ -2261,14 +2261,14 @@ vm_suspend_cpu(struct vm *vm, struct vcpu *vcpu)
22612261
vm->debug_cpus = vm->active_cpus;
22622262
for (int i = 0; i < vm->maxcpus; i++) {
22632263
if (CPU_ISSET(i, &vm->active_cpus))
2264-
vcpu_notify_event(vm_vcpu(vm, i), false);
2264+
vcpu_notify_event(vm_vcpu(vm, i));
22652265
}
22662266
} else {
22672267
if (!CPU_ISSET(vcpu->vcpuid, &vm->active_cpus))
22682268
return (EINVAL);
22692269

22702270
CPU_SET_ATOMIC(vcpu->vcpuid, &vm->debug_cpus);
2271-
vcpu_notify_event(vcpu, false);
2271+
vcpu_notify_event(vcpu);
22722272
}
22732273
return (0);
22742274
}
@@ -2376,20 +2376,15 @@ vm_set_x2apic_state(struct vcpu *vcpu, enum x2apic_state state)
23762376
* to the host_cpu to cause the vcpu to trap into the hypervisor.
23772377
*/
23782378
static void
2379-
vcpu_notify_event_locked(struct vcpu *vcpu, bool lapic_intr)
2379+
vcpu_notify_event_locked(struct vcpu *vcpu)
23802380
{
23812381
int hostcpu;
23822382

23832383
hostcpu = vcpu->hostcpu;
23842384
if (vcpu->state == VCPU_RUNNING) {
23852385
KASSERT(hostcpu != NOCPU, ("vcpu running on invalid hostcpu"));
23862386
if (hostcpu != curcpu) {
2387-
if (lapic_intr) {
2388-
vlapic_post_intr(vcpu->vlapic, hostcpu,
2389-
vmm_ipinum);
2390-
} else {
2391-
ipi_cpu(hostcpu, vmm_ipinum);
2392-
}
2387+
ipi_cpu(hostcpu, vmm_ipinum);
23932388
} else {
23942389
/*
23952390
* If the 'vcpu' is running on 'curcpu' then it must
@@ -2407,10 +2402,21 @@ vcpu_notify_event_locked(struct vcpu *vcpu, bool lapic_intr)
24072402
}
24082403

24092404
void
2410-
vcpu_notify_event(struct vcpu *vcpu, bool lapic_intr)
2405+
vcpu_notify_event(struct vcpu *vcpu)
2406+
{
2407+
vcpu_lock(vcpu);
2408+
vcpu_notify_event_locked(vcpu);
2409+
vcpu_unlock(vcpu);
2410+
}
2411+
2412+
void
2413+
vcpu_notify_lapic(struct vcpu *vcpu)
24112414
{
24122415
vcpu_lock(vcpu);
2413-
vcpu_notify_event_locked(vcpu, lapic_intr);
2416+
if (vcpu->state == VCPU_RUNNING && vcpu->hostcpu != curcpu)
2417+
vlapic_post_intr(vcpu->vlapic, vcpu->hostcpu, vmm_ipinum);
2418+
else
2419+
vcpu_notify_event_locked(vcpu);
24142420
vcpu_unlock(vcpu);
24152421
}
24162422

@@ -2472,7 +2478,7 @@ vm_smp_rendezvous(struct vcpu *vcpu, cpuset_t dest,
24722478
*/
24732479
for (i = 0; i < vm->maxcpus; i++) {
24742480
if (CPU_ISSET(i, &dest))
2475-
vcpu_notify_event(vm_vcpu(vm, i), false);
2481+
vcpu_notify_event(vm_vcpu(vm, i));
24762482
}
24772483

24782484
return (vm_handle_rendezvous(vcpu));

sys/amd64/vmm/vmm_lapic.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,7 @@ lapic_set_intr(struct vcpu *vcpu, int vector, bool level)
6161

6262
vlapic = vm_lapic(vcpu);
6363
if (vlapic_set_intr_ready(vlapic, vector, level))
64-
vcpu_notify_event(vcpu, true);
64+
vcpu_notify_lapic(vcpu);
6565
return (0);
6666
}
6767

0 commit comments

Comments
 (0)