@@ -1052,6 +1052,38 @@ static int match_mpidr(u64 sgi_aff, u16 sgi_cpu_mask, struct kvm_vcpu *vcpu)
1052
1052
((((reg) & ICC_SGI1R_AFFINITY_## level ##_MASK) \
1053
1053
>> ICC_SGI1R_AFFINITY_## level ##_SHIFT) << MPIDR_LEVEL_SHIFT(level))
1054
1054
1055
+ static void vgic_v3_queue_sgi (struct kvm_vcpu * vcpu , u32 sgi , bool allow_group1 )
1056
+ {
1057
+ struct vgic_irq * irq = vgic_get_irq (vcpu -> kvm , vcpu , sgi );
1058
+ unsigned long flags ;
1059
+
1060
+ raw_spin_lock_irqsave (& irq -> irq_lock , flags );
1061
+
1062
+ /*
1063
+ * An access targeting Group0 SGIs can only generate
1064
+ * those, while an access targeting Group1 SGIs can
1065
+ * generate interrupts of either group.
1066
+ */
1067
+ if (!irq -> group || allow_group1 ) {
1068
+ if (!irq -> hw ) {
1069
+ irq -> pending_latch = true;
1070
+ vgic_queue_irq_unlock (vcpu -> kvm , irq , flags );
1071
+ } else {
1072
+ /* HW SGI? Ask the GIC to inject it */
1073
+ int err ;
1074
+ err = irq_set_irqchip_state (irq -> host_irq ,
1075
+ IRQCHIP_STATE_PENDING ,
1076
+ true);
1077
+ WARN_RATELIMIT (err , "IRQ %d" , irq -> host_irq );
1078
+ raw_spin_unlock_irqrestore (& irq -> irq_lock , flags );
1079
+ }
1080
+ } else {
1081
+ raw_spin_unlock_irqrestore (& irq -> irq_lock , flags );
1082
+ }
1083
+
1084
+ vgic_put_irq (vcpu -> kvm , irq );
1085
+ }
1086
+
1055
1087
/**
1056
1088
* vgic_v3_dispatch_sgi - handle SGI requests from VCPUs
1057
1089
* @vcpu: The VCPU requesting a SGI
@@ -1070,19 +1102,30 @@ void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg, bool allow_group1)
1070
1102
{
1071
1103
struct kvm * kvm = vcpu -> kvm ;
1072
1104
struct kvm_vcpu * c_vcpu ;
1073
- u16 target_cpus ;
1105
+ unsigned long target_cpus ;
1074
1106
u64 mpidr ;
1075
- int sgi ;
1076
- int vcpu_id = vcpu -> vcpu_id ;
1077
- bool broadcast ;
1078
- unsigned long c , flags ;
1079
-
1080
- sgi = (reg & ICC_SGI1R_SGI_ID_MASK ) >> ICC_SGI1R_SGI_ID_SHIFT ;
1081
- broadcast = reg & BIT_ULL (ICC_SGI1R_IRQ_ROUTING_MODE_BIT );
1082
- target_cpus = (reg & ICC_SGI1R_TARGET_LIST_MASK ) >> ICC_SGI1R_TARGET_LIST_SHIFT ;
1107
+ u32 sgi ;
1108
+ unsigned long c ;
1109
+
1110
+ sgi = FIELD_GET (ICC_SGI1R_SGI_ID_MASK , reg );
1111
+
1112
+ /* Broadcast */
1113
+ if (unlikely (reg & BIT_ULL (ICC_SGI1R_IRQ_ROUTING_MODE_BIT ))) {
1114
+ kvm_for_each_vcpu (c , c_vcpu , kvm ) {
1115
+ /* Don't signal the calling VCPU */
1116
+ if (c_vcpu == vcpu )
1117
+ continue ;
1118
+
1119
+ vgic_v3_queue_sgi (c_vcpu , sgi , allow_group1 );
1120
+ }
1121
+
1122
+ return ;
1123
+ }
1124
+
1083
1125
mpidr = SGI_AFFINITY_LEVEL (reg , 3 );
1084
1126
mpidr |= SGI_AFFINITY_LEVEL (reg , 2 );
1085
1127
mpidr |= SGI_AFFINITY_LEVEL (reg , 1 );
1128
+ target_cpus = FIELD_GET (ICC_SGI1R_TARGET_LIST_MASK , reg );
1086
1129
1087
1130
/*
1088
1131
* We iterate over all VCPUs to find the MPIDRs matching the request.
@@ -1091,54 +1134,19 @@ void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg, bool allow_group1)
1091
1134
* VCPUs when most of the times we just signal a single VCPU.
1092
1135
*/
1093
1136
kvm_for_each_vcpu (c , c_vcpu , kvm ) {
1094
- struct vgic_irq * irq ;
1137
+ int level0 ;
1095
1138
1096
1139
/* Exit early if we have dealt with all requested CPUs */
1097
- if (! broadcast && target_cpus == 0 )
1140
+ if (target_cpus == 0 )
1098
1141
break ;
1099
-
1100
- /* Don't signal the calling VCPU */
1101
- if (broadcast && c == vcpu_id )
1142
+ level0 = match_mpidr (mpidr , target_cpus , c_vcpu );
1143
+ if (level0 == -1 )
1102
1144
continue ;
1103
1145
1104
- if (!broadcast ) {
1105
- int level0 ;
1106
-
1107
- level0 = match_mpidr (mpidr , target_cpus , c_vcpu );
1108
- if (level0 == -1 )
1109
- continue ;
1110
-
1111
- /* remove this matching VCPU from the mask */
1112
- target_cpus &= ~BIT (level0 );
1113
- }
1146
+ /* remove this matching VCPU from the mask */
1147
+ target_cpus &= ~BIT (level0 );
1114
1148
1115
- irq = vgic_get_irq (vcpu -> kvm , c_vcpu , sgi );
1116
-
1117
- raw_spin_lock_irqsave (& irq -> irq_lock , flags );
1118
-
1119
- /*
1120
- * An access targeting Group0 SGIs can only generate
1121
- * those, while an access targeting Group1 SGIs can
1122
- * generate interrupts of either group.
1123
- */
1124
- if (!irq -> group || allow_group1 ) {
1125
- if (!irq -> hw ) {
1126
- irq -> pending_latch = true;
1127
- vgic_queue_irq_unlock (vcpu -> kvm , irq , flags );
1128
- } else {
1129
- /* HW SGI? Ask the GIC to inject it */
1130
- int err ;
1131
- err = irq_set_irqchip_state (irq -> host_irq ,
1132
- IRQCHIP_STATE_PENDING ,
1133
- true);
1134
- WARN_RATELIMIT (err , "IRQ %d" , irq -> host_irq );
1135
- raw_spin_unlock_irqrestore (& irq -> irq_lock , flags );
1136
- }
1137
- } else {
1138
- raw_spin_unlock_irqrestore (& irq -> irq_lock , flags );
1139
- }
1140
-
1141
- vgic_put_irq (vcpu -> kvm , irq );
1149
+ vgic_v3_queue_sgi (c_vcpu , sgi , allow_group1 );
1142
1150
}
1143
1151
}
1144
1152
0 commit comments