Skip to content

Commit 8e01d9a

Browse files
author
Marc Zyngier
committed
KVM: arm64: vgic-v4: Move the GICv4 residency flow to be driven by vcpu_load/put
When the VHE code was reworked, a lot of the vgic stuff was moved around, but the GICv4 residency code did stay untouched, meaning that we come in and out of residency on each flush/sync, which is obviously suboptimal. To address this, let's move things around a bit: - Residency entry (flush) moves to vcpu_load - Residency exit (sync) moves to vcpu_put - On blocking (entry to WFI), we "put" - On unblocking (exit from WFI), we "load" Because these can nest (load/block/put/load/unblock/put, for example), we now have per-VPE tracking of the residency state. Additionally, vgic_v4_put gains a "need doorbell" parameter, which only gets set to true when blocking because of a WFI. This allows a finer control of the doorbell, which now also gets disabled as soon as it gets signaled. Signed-off-by: Marc Zyngier <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent 5c40130 commit 8e01d9a

File tree

8 files changed

+48
-42
lines changed

8 files changed

+48
-42
lines changed

drivers/irqchip/irq-gic-v4.c

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -141,12 +141,17 @@ static int its_send_vpe_cmd(struct its_vpe *vpe, struct its_cmd_info *info)
141141
int its_schedule_vpe(struct its_vpe *vpe, bool on)
142142
{
143143
struct its_cmd_info info;
144+
int ret;
144145

145146
WARN_ON(preemptible());
146147

147148
info.cmd_type = on ? SCHEDULE_VPE : DESCHEDULE_VPE;
148149

149-
return its_send_vpe_cmd(vpe, &info);
150+
ret = its_send_vpe_cmd(vpe, &info);
151+
if (!ret)
152+
vpe->resident = on;
153+
154+
return ret;
150155
}
151156

152157
int its_invall_vpe(struct its_vpe *vpe)

include/kvm/arm_vgic.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -396,7 +396,7 @@ int kvm_vgic_v4_set_forwarding(struct kvm *kvm, int irq,
396396
int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int irq,
397397
struct kvm_kernel_irq_routing_entry *irq_entry);
398398

399-
void kvm_vgic_v4_enable_doorbell(struct kvm_vcpu *vcpu);
400-
void kvm_vgic_v4_disable_doorbell(struct kvm_vcpu *vcpu);
399+
int vgic_v4_load(struct kvm_vcpu *vcpu);
400+
int vgic_v4_put(struct kvm_vcpu *vcpu, bool need_db);
401401

402402
#endif /* __KVM_ARM_VGIC_H */

include/linux/irqchip/arm-gic-v4.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,8 @@ struct its_vpe {
3535
/* Doorbell interrupt */
3636
int irq;
3737
irq_hw_number_t vpe_db_lpi;
38+
/* VPE resident */
39+
bool resident;
3840
/* VPE proxy mapping */
3941
int vpe_proxy_event;
4042
/*

virt/kvm/arm/arm.c

Lines changed: 8 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -322,20 +322,24 @@ void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
322322
/*
323323
* If we're about to block (most likely because we've just hit a
324324
* WFI), we need to sync back the state of the GIC CPU interface
325-
* so that we have the lastest PMR and group enables. This ensures
325+
* so that we have the latest PMR and group enables. This ensures
326326
* that kvm_arch_vcpu_runnable has up-to-date data to decide
327327
* whether we have pending interrupts.
328+
*
329+
* For the same reason, we want to tell GICv4 that we need
330+
* doorbells to be signalled, should an interrupt become pending.
328331
*/
329332
preempt_disable();
330333
kvm_vgic_vmcr_sync(vcpu);
334+
vgic_v4_put(vcpu, true);
331335
preempt_enable();
332-
333-
kvm_vgic_v4_enable_doorbell(vcpu);
334336
}
335337

336338
void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
337339
{
338-
kvm_vgic_v4_disable_doorbell(vcpu);
340+
preempt_disable();
341+
vgic_v4_load(vcpu);
342+
preempt_enable();
339343
}
340344

341345
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)

virt/kvm/arm/vgic/vgic-v3.c

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -664,6 +664,8 @@ void vgic_v3_load(struct kvm_vcpu *vcpu)
664664

665665
if (has_vhe())
666666
__vgic_v3_activate_traps(vcpu);
667+
668+
WARN_ON(vgic_v4_load(vcpu));
667669
}
668670

669671
void vgic_v3_vmcr_sync(struct kvm_vcpu *vcpu)
@@ -676,6 +678,8 @@ void vgic_v3_vmcr_sync(struct kvm_vcpu *vcpu)
676678

677679
void vgic_v3_put(struct kvm_vcpu *vcpu)
678680
{
681+
WARN_ON(vgic_v4_put(vcpu, false));
682+
679683
vgic_v3_vmcr_sync(vcpu);
680684

681685
kvm_call_hyp(__vgic_v3_save_aprs, vcpu);

virt/kvm/arm/vgic/vgic-v4.c

Lines changed: 26 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -85,6 +85,10 @@ static irqreturn_t vgic_v4_doorbell_handler(int irq, void *info)
8585
{
8686
struct kvm_vcpu *vcpu = info;
8787

88+
/* We got the message, no need to fire again */
89+
if (!irqd_irq_disabled(&irq_to_desc(irq)->irq_data))
90+
disable_irq_nosync(irq);
91+
8892
vcpu->arch.vgic_cpu.vgic_v3.its_vpe.pending_last = true;
8993
kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
9094
kvm_vcpu_kick(vcpu);
@@ -192,20 +196,30 @@ void vgic_v4_teardown(struct kvm *kvm)
192196
its_vm->vpes = NULL;
193197
}
194198

195-
int vgic_v4_sync_hwstate(struct kvm_vcpu *vcpu)
199+
int vgic_v4_put(struct kvm_vcpu *vcpu, bool need_db)
196200
{
197-
if (!vgic_supports_direct_msis(vcpu->kvm))
201+
struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
202+
struct irq_desc *desc = irq_to_desc(vpe->irq);
203+
204+
if (!vgic_supports_direct_msis(vcpu->kvm) || !vpe->resident)
198205
return 0;
199206

200-
return its_schedule_vpe(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe, false);
207+
/*
208+
* If blocking, a doorbell is required. Undo the nested
209+
* disable_irq() calls...
210+
*/
211+
while (need_db && irqd_irq_disabled(&desc->irq_data))
212+
enable_irq(vpe->irq);
213+
214+
return its_schedule_vpe(vpe, false);
201215
}
202216

203-
int vgic_v4_flush_hwstate(struct kvm_vcpu *vcpu)
217+
int vgic_v4_load(struct kvm_vcpu *vcpu)
204218
{
205-
int irq = vcpu->arch.vgic_cpu.vgic_v3.its_vpe.irq;
219+
struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
206220
int err;
207221

208-
if (!vgic_supports_direct_msis(vcpu->kvm))
222+
if (!vgic_supports_direct_msis(vcpu->kvm) || vpe->resident)
209223
return 0;
210224

211225
/*
@@ -214,21 +228,22 @@ int vgic_v4_flush_hwstate(struct kvm_vcpu *vcpu)
214228
* doc in drivers/irqchip/irq-gic-v4.c to understand how this
215229
* turns into a VMOVP command at the ITS level.
216230
*/
217-
err = irq_set_affinity(irq, cpumask_of(smp_processor_id()));
231+
err = irq_set_affinity(vpe->irq, cpumask_of(smp_processor_id()));
218232
if (err)
219233
return err;
220234

221-
err = its_schedule_vpe(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe, true);
235+
/* Disabled the doorbell, as we're about to enter the guest */
236+
disable_irq_nosync(vpe->irq);
237+
238+
err = its_schedule_vpe(vpe, true);
222239
if (err)
223240
return err;
224241

225242
/*
226243
* Now that the VPE is resident, let's get rid of a potential
227244
* doorbell interrupt that would still be pending.
228245
*/
229-
err = irq_set_irqchip_state(irq, IRQCHIP_STATE_PENDING, false);
230-
231-
return err;
246+
return irq_set_irqchip_state(vpe->irq, IRQCHIP_STATE_PENDING, false);
232247
}
233248

234249
static struct vgic_its *vgic_get_its(struct kvm *kvm,
@@ -335,21 +350,3 @@ int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int virq,
335350
mutex_unlock(&its->its_lock);
336351
return ret;
337352
}
338-
339-
void kvm_vgic_v4_enable_doorbell(struct kvm_vcpu *vcpu)
340-
{
341-
if (vgic_supports_direct_msis(vcpu->kvm)) {
342-
int irq = vcpu->arch.vgic_cpu.vgic_v3.its_vpe.irq;
343-
if (irq)
344-
enable_irq(irq);
345-
}
346-
}
347-
348-
void kvm_vgic_v4_disable_doorbell(struct kvm_vcpu *vcpu)
349-
{
350-
if (vgic_supports_direct_msis(vcpu->kvm)) {
351-
int irq = vcpu->arch.vgic_cpu.vgic_v3.its_vpe.irq;
352-
if (irq)
353-
disable_irq(irq);
354-
}
355-
}

virt/kvm/arm/vgic/vgic.c

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -857,8 +857,6 @@ void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
857857
{
858858
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
859859

860-
WARN_ON(vgic_v4_sync_hwstate(vcpu));
861-
862860
/* An empty ap_list_head implies used_lrs == 0 */
863861
if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head))
864862
return;
@@ -882,8 +880,6 @@ static inline void vgic_restore_state(struct kvm_vcpu *vcpu)
882880
/* Flush our emulation state into the GIC hardware before entering the guest. */
883881
void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
884882
{
885-
WARN_ON(vgic_v4_flush_hwstate(vcpu));
886-
887883
/*
888884
* If there are no virtual interrupts active or pending for this
889885
* VCPU, then there is no work to do and we can bail out without

virt/kvm/arm/vgic/vgic.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -316,7 +316,5 @@ void vgic_its_invalidate_cache(struct kvm *kvm);
316316
bool vgic_supports_direct_msis(struct kvm *kvm);
317317
int vgic_v4_init(struct kvm *kvm);
318318
void vgic_v4_teardown(struct kvm *kvm);
319-
int vgic_v4_sync_hwstate(struct kvm_vcpu *vcpu);
320-
int vgic_v4_flush_hwstate(struct kvm_vcpu *vcpu);
321319

322320
#endif

0 commit comments

Comments
 (0)