Skip to content

Commit f3f60a5

Browse files
Marc Zyngieroupton
authored andcommitted
KVM: arm64: vgic-v3: Refactor GICv3 SGI generation
As we're about to change the way SGIs are sent, start by splitting out some of the basic functionnality: instead of intermingling the broadcast and non-broadcast cases with the actual SGI generation, perform the following cleanups: - move the SGI queuing into its own helper - split the broadcast code from the affinity-driven code - replace the mask/shift combinations with FIELD_GET() - fix the confusion between vcpu_id and vcpu when handling the broadcast case The result is much more readable, and paves the way for further optimisations. Tested-by: Joey Gouly <[email protected]> Tested-by: Shameer Kolothum <[email protected]> Reviewed-by: Zenghui Yu <[email protected]> Signed-off-by: Marc Zyngier <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Oliver Upton <[email protected]>
1 parent d455d36 commit f3f60a5

File tree

1 file changed

+59
-51
lines changed

1 file changed

+59
-51
lines changed

arch/arm64/kvm/vgic/vgic-mmio-v3.c

Lines changed: 59 additions & 51 deletions
Original file line numberDiff line numberDiff line change
@@ -1052,6 +1052,38 @@ static int match_mpidr(u64 sgi_aff, u16 sgi_cpu_mask, struct kvm_vcpu *vcpu)
10521052
((((reg) & ICC_SGI1R_AFFINITY_## level ##_MASK) \
10531053
>> ICC_SGI1R_AFFINITY_## level ##_SHIFT) << MPIDR_LEVEL_SHIFT(level))
10541054

1055+
static void vgic_v3_queue_sgi(struct kvm_vcpu *vcpu, u32 sgi, bool allow_group1)
1056+
{
1057+
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, sgi);
1058+
unsigned long flags;
1059+
1060+
raw_spin_lock_irqsave(&irq->irq_lock, flags);
1061+
1062+
/*
1063+
* An access targeting Group0 SGIs can only generate
1064+
* those, while an access targeting Group1 SGIs can
1065+
* generate interrupts of either group.
1066+
*/
1067+
if (!irq->group || allow_group1) {
1068+
if (!irq->hw) {
1069+
irq->pending_latch = true;
1070+
vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
1071+
} else {
1072+
/* HW SGI? Ask the GIC to inject it */
1073+
int err;
1074+
err = irq_set_irqchip_state(irq->host_irq,
1075+
IRQCHIP_STATE_PENDING,
1076+
true);
1077+
WARN_RATELIMIT(err, "IRQ %d", irq->host_irq);
1078+
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
1079+
}
1080+
} else {
1081+
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
1082+
}
1083+
1084+
vgic_put_irq(vcpu->kvm, irq);
1085+
}
1086+
10551087
/**
10561088
* vgic_v3_dispatch_sgi - handle SGI requests from VCPUs
10571089
* @vcpu: The VCPU requesting a SGI
@@ -1070,19 +1102,30 @@ void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg, bool allow_group1)
10701102
{
10711103
struct kvm *kvm = vcpu->kvm;
10721104
struct kvm_vcpu *c_vcpu;
1073-
u16 target_cpus;
1105+
unsigned long target_cpus;
10741106
u64 mpidr;
1075-
int sgi;
1076-
int vcpu_id = vcpu->vcpu_id;
1077-
bool broadcast;
1078-
unsigned long c, flags;
1079-
1080-
sgi = (reg & ICC_SGI1R_SGI_ID_MASK) >> ICC_SGI1R_SGI_ID_SHIFT;
1081-
broadcast = reg & BIT_ULL(ICC_SGI1R_IRQ_ROUTING_MODE_BIT);
1082-
target_cpus = (reg & ICC_SGI1R_TARGET_LIST_MASK) >> ICC_SGI1R_TARGET_LIST_SHIFT;
1107+
u32 sgi;
1108+
unsigned long c;
1109+
1110+
sgi = FIELD_GET(ICC_SGI1R_SGI_ID_MASK, reg);
1111+
1112+
/* Broadcast */
1113+
if (unlikely(reg & BIT_ULL(ICC_SGI1R_IRQ_ROUTING_MODE_BIT))) {
1114+
kvm_for_each_vcpu(c, c_vcpu, kvm) {
1115+
/* Don't signal the calling VCPU */
1116+
if (c_vcpu == vcpu)
1117+
continue;
1118+
1119+
vgic_v3_queue_sgi(c_vcpu, sgi, allow_group1);
1120+
}
1121+
1122+
return;
1123+
}
1124+
10831125
mpidr = SGI_AFFINITY_LEVEL(reg, 3);
10841126
mpidr |= SGI_AFFINITY_LEVEL(reg, 2);
10851127
mpidr |= SGI_AFFINITY_LEVEL(reg, 1);
1128+
target_cpus = FIELD_GET(ICC_SGI1R_TARGET_LIST_MASK, reg);
10861129

10871130
/*
10881131
* We iterate over all VCPUs to find the MPIDRs matching the request.
@@ -1091,54 +1134,19 @@ void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg, bool allow_group1)
10911134
* VCPUs when most of the times we just signal a single VCPU.
10921135
*/
10931136
kvm_for_each_vcpu(c, c_vcpu, kvm) {
1094-
struct vgic_irq *irq;
1137+
int level0;
10951138

10961139
/* Exit early if we have dealt with all requested CPUs */
1097-
if (!broadcast && target_cpus == 0)
1140+
if (target_cpus == 0)
10981141
break;
1099-
1100-
/* Don't signal the calling VCPU */
1101-
if (broadcast && c == vcpu_id)
1142+
level0 = match_mpidr(mpidr, target_cpus, c_vcpu);
1143+
if (level0 == -1)
11021144
continue;
11031145

1104-
if (!broadcast) {
1105-
int level0;
1106-
1107-
level0 = match_mpidr(mpidr, target_cpus, c_vcpu);
1108-
if (level0 == -1)
1109-
continue;
1110-
1111-
/* remove this matching VCPU from the mask */
1112-
target_cpus &= ~BIT(level0);
1113-
}
1146+
/* remove this matching VCPU from the mask */
1147+
target_cpus &= ~BIT(level0);
11141148

1115-
irq = vgic_get_irq(vcpu->kvm, c_vcpu, sgi);
1116-
1117-
raw_spin_lock_irqsave(&irq->irq_lock, flags);
1118-
1119-
/*
1120-
* An access targeting Group0 SGIs can only generate
1121-
* those, while an access targeting Group1 SGIs can
1122-
* generate interrupts of either group.
1123-
*/
1124-
if (!irq->group || allow_group1) {
1125-
if (!irq->hw) {
1126-
irq->pending_latch = true;
1127-
vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
1128-
} else {
1129-
/* HW SGI? Ask the GIC to inject it */
1130-
int err;
1131-
err = irq_set_irqchip_state(irq->host_irq,
1132-
IRQCHIP_STATE_PENDING,
1133-
true);
1134-
WARN_RATELIMIT(err, "IRQ %d", irq->host_irq);
1135-
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
1136-
}
1137-
} else {
1138-
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
1139-
}
1140-
1141-
vgic_put_irq(vcpu->kvm, irq);
1149+
vgic_v3_queue_sgi(c_vcpu, sgi, allow_group1);
11421150
}
11431151
}
11441152

0 commit comments

Comments
 (0)