@@ -64,6 +64,11 @@ static int imsic_irq_retrigger(struct irq_data *d)
64
64
return 0 ;
65
65
}
66
66
67
+ static void imsic_irq_ack (struct irq_data * d )
68
+ {
69
+ irq_move_irq (d );
70
+ }
71
+
67
72
static void imsic_irq_compose_vector_msg (struct imsic_vector * vec , struct msi_msg * msg )
68
73
{
69
74
phys_addr_t msi_addr ;
@@ -97,6 +102,21 @@ static int imsic_irq_set_affinity(struct irq_data *d, const struct cpumask *mask
97
102
bool force )
98
103
{
99
104
struct imsic_vector * old_vec , * new_vec ;
105
+ struct imsic_vector tmp_vec ;
106
+
107
+ /*
108
+ * Requirements for the downstream irqdomains (or devices):
109
+ *
110
+ * 1) Downstream irqdomains (or devices) with atomic MSI update can
111
+ * happily do imsic_irq_set_affinity() in the process-context on
112
+ * any CPU so the irqchip of such irqdomains must not set the
113
+ * IRQCHIP_MOVE_DEFERRED flag.
114
+ *
115
+ * 2) Downstream irqdomains (or devices) with non-atomic MSI update
116
+ * must use imsic_irq_set_affinity() in nterrupt-context upon
117
+ * the next device interrupt so the irqchip of such irqdomains
118
+ * must set the IRQCHIP_MOVE_DEFERRED flag.
119
+ */
100
120
101
121
old_vec = irq_data_get_irq_chip_data (d );
102
122
if (WARN_ON (!old_vec ))
@@ -115,6 +135,32 @@ static int imsic_irq_set_affinity(struct irq_data *d, const struct cpumask *mask
115
135
if (!new_vec )
116
136
return - ENOSPC ;
117
137
138
+ /*
139
+ * Device having non-atomic MSI update might see an intermediate
140
+ * state when changing target IMSIC vector from one CPU to another.
141
+ *
142
+ * To avoid losing interrupt to such intermediate state, do the
143
+ * following (just like x86 APIC):
144
+ *
145
+ * 1) First write a temporary IMSIC vector to the device which
146
+ * has MSI address same as the old IMSIC vector but MSI data
147
+ * matches the new IMSIC vector.
148
+ *
149
+ * 2) Next write the new IMSIC vector to the device.
150
+ *
151
+ * Based on the above, __imsic_local_sync() must check pending
152
+ * status of both old MSI data and new MSI data on the old CPU.
153
+ */
154
+ if (!irq_can_move_in_process_context (d ) &&
155
+ new_vec -> local_id != old_vec -> local_id ) {
156
+ /* Setup temporary vector */
157
+ tmp_vec .cpu = old_vec -> cpu ;
158
+ tmp_vec .local_id = new_vec -> local_id ;
159
+
160
+ /* Point device to the temporary vector */
161
+ imsic_msi_update_msg (irq_get_irq_data (d -> irq ), & tmp_vec );
162
+ }
163
+
118
164
/* Point device to the new vector */
119
165
imsic_msi_update_msg (irq_get_irq_data (d -> irq ), new_vec );
120
166
@@ -163,17 +209,17 @@ static void imsic_irq_force_complete_move(struct irq_data *d)
163
209
#endif
164
210
165
211
static struct irq_chip imsic_irq_base_chip = {
166
- .name = "IMSIC" ,
167
- .irq_mask = imsic_irq_mask ,
168
- .irq_unmask = imsic_irq_unmask ,
212
+ .name = "IMSIC" ,
213
+ .irq_mask = imsic_irq_mask ,
214
+ .irq_unmask = imsic_irq_unmask ,
169
215
#ifdef CONFIG_SMP
170
- .irq_set_affinity = imsic_irq_set_affinity ,
171
- .irq_force_complete_move = imsic_irq_force_complete_move ,
216
+ .irq_set_affinity = imsic_irq_set_affinity ,
217
+ .irq_force_complete_move = imsic_irq_force_complete_move ,
172
218
#endif
173
- .irq_retrigger = imsic_irq_retrigger ,
174
- .irq_compose_msi_msg = imsic_irq_compose_msg ,
175
- .flags = IRQCHIP_SKIP_SET_WAKE |
176
- IRQCHIP_MASK_ON_SUSPEND ,
219
+ .irq_retrigger = imsic_irq_retrigger ,
220
+ .irq_ack = imsic_irq_ack ,
221
+ .irq_compose_msi_msg = imsic_irq_compose_msg ,
222
+ . flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND ,
177
223
};
178
224
179
225
static int imsic_irq_domain_alloc (struct irq_domain * domain , unsigned int virq ,
@@ -190,7 +236,7 @@ static int imsic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
190
236
return - ENOSPC ;
191
237
192
238
irq_domain_set_info (domain , virq , virq , & imsic_irq_base_chip , vec ,
193
- handle_simple_irq , NULL , NULL );
239
+ handle_edge_irq , NULL , NULL );
194
240
irq_set_noprobe (virq );
195
241
irq_set_affinity (virq , cpu_online_mask );
196
242
irq_data_update_effective_affinity (irq_get_irq_data (virq ), cpumask_of (vec -> cpu ));
@@ -229,15 +275,34 @@ static const struct irq_domain_ops imsic_base_domain_ops = {
229
275
#endif
230
276
};
231
277
278
+ static bool imsic_init_dev_msi_info (struct device * dev , struct irq_domain * domain ,
279
+ struct irq_domain * real_parent , struct msi_domain_info * info )
280
+ {
281
+ if (!msi_lib_init_dev_msi_info (dev , domain , real_parent , info ))
282
+ return false;
283
+
284
+ switch (info -> bus_token ) {
285
+ case DOMAIN_BUS_PCI_DEVICE_MSI :
286
+ case DOMAIN_BUS_PCI_DEVICE_MSIX :
287
+ info -> chip -> flags |= IRQCHIP_MOVE_DEFERRED ;
288
+ break ;
289
+ default :
290
+ break ;
291
+ }
292
+
293
+ return true;
294
+ }
295
+
232
296
static const struct msi_parent_ops imsic_msi_parent_ops = {
233
297
.supported_flags = MSI_GENERIC_FLAGS_MASK |
234
298
MSI_FLAG_PCI_MSIX ,
235
299
.required_flags = MSI_FLAG_USE_DEF_DOM_OPS |
236
300
MSI_FLAG_USE_DEF_CHIP_OPS |
237
301
MSI_FLAG_PCI_MSI_MASK_PARENT ,
302
+ .chip_flags = MSI_CHIP_FLAG_SET_ACK ,
238
303
.bus_select_token = DOMAIN_BUS_NEXUS ,
239
304
.bus_select_mask = MATCH_PCI_MSI | MATCH_PLATFORM_MSI ,
240
- .init_dev_msi_info = msi_lib_init_dev_msi_info ,
305
+ .init_dev_msi_info = imsic_init_dev_msi_info ,
241
306
};
242
307
243
308
int imsic_irqdomain_init (void )
0 commit comments