24
24
#define NR_HW_IRQS 16
25
25
#define NR_MSI_VEC (IDX_PER_GROUP * IRQS_PER_IDX * NR_HW_IRQS)
26
26
27
- struct xgene_msi_group {
28
- struct xgene_msi * msi ;
29
- int gic_irq ;
30
- u32 msi_grp ;
31
- };
32
-
33
27
struct xgene_msi {
34
28
struct irq_domain * inner_domain ;
35
29
u64 msi_addr ;
36
30
void __iomem * msi_regs ;
37
31
unsigned long * bitmap ;
38
32
struct mutex bitmap_lock ;
39
- struct xgene_msi_group * msi_groups ;
33
+ unsigned int gic_irq [ NR_HW_IRQS ] ;
40
34
};
41
35
42
36
/* Global data */
@@ -261,27 +255,20 @@ static int xgene_msi_init_allocator(struct device *dev)
261
255
262
256
mutex_init (& xgene_msi_ctrl -> bitmap_lock );
263
257
264
- xgene_msi_ctrl -> msi_groups = devm_kcalloc (dev , NR_HW_IRQS ,
265
- sizeof (struct xgene_msi_group ),
266
- GFP_KERNEL );
267
- if (!xgene_msi_ctrl -> msi_groups )
268
- return - ENOMEM ;
269
-
270
258
return 0 ;
271
259
}
272
260
273
261
static void xgene_msi_isr (struct irq_desc * desc )
274
262
{
263
+ unsigned int * irqp = irq_desc_get_handler_data (desc );
275
264
struct irq_chip * chip = irq_desc_get_chip (desc );
276
265
struct xgene_msi * xgene_msi = xgene_msi_ctrl ;
277
- struct xgene_msi_group * msi_groups ;
278
266
int msir_index , msir_val , hw_irq , ret ;
279
267
u32 intr_index , grp_select , msi_grp ;
280
268
281
269
chained_irq_enter (chip , desc );
282
270
283
- msi_groups = irq_desc_get_handler_data (desc );
284
- msi_grp = msi_groups -> msi_grp ;
271
+ msi_grp = irqp - xgene_msi -> gic_irq ;
285
272
286
273
/*
287
274
* MSIINTn (n is 0..F) indicates if there is a pending MSI interrupt
@@ -341,35 +328,31 @@ static void xgene_msi_remove(struct platform_device *pdev)
341
328
cpuhp_remove_state (pci_xgene_online );
342
329
cpuhp_remove_state (CPUHP_PCI_XGENE_DEAD );
343
330
344
- kfree (msi -> msi_groups );
345
-
346
331
xgene_free_domains (msi );
347
332
}
348
333
349
334
static int xgene_msi_hwirq_alloc (unsigned int cpu )
350
335
{
351
- struct xgene_msi * msi = xgene_msi_ctrl ;
352
- struct xgene_msi_group * msi_group ;
353
336
int i ;
354
337
int err ;
355
338
356
339
for (i = cpu ; i < NR_HW_IRQS ; i += num_possible_cpus ()) {
357
- msi_group = & msi -> msi_groups [i ];
340
+ unsigned int irq = xgene_msi_ctrl -> gic_irq [i ];
358
341
359
342
/*
360
343
* Statically allocate MSI GIC IRQs to each CPU core.
361
344
* With 8-core X-Gene v1, 2 MSI GIC IRQs are allocated
362
345
* to each core.
363
346
*/
364
- irq_set_status_flags (msi_group -> gic_irq , IRQ_NO_BALANCING );
365
- err = irq_set_affinity (msi_group -> gic_irq , cpumask_of (cpu ));
347
+ irq_set_status_flags (irq , IRQ_NO_BALANCING );
348
+ err = irq_set_affinity (irq , cpumask_of (cpu ));
366
349
if (err ) {
367
350
pr_err ("failed to set affinity for GIC IRQ" );
368
351
return err ;
369
352
}
370
353
371
- irq_set_chained_handler_and_data (msi_group -> gic_irq ,
372
- xgene_msi_isr , msi_group );
354
+ irq_set_chained_handler_and_data (irq , xgene_msi_isr ,
355
+ & xgene_msi_ctrl -> gic_irq [ i ] );
373
356
}
374
357
375
358
return 0 ;
@@ -378,14 +361,11 @@ static int xgene_msi_hwirq_alloc(unsigned int cpu)
378
361
static int xgene_msi_hwirq_free (unsigned int cpu )
379
362
{
380
363
struct xgene_msi * msi = xgene_msi_ctrl ;
381
- struct xgene_msi_group * msi_group ;
382
364
int i ;
383
365
384
- for (i = cpu ; i < NR_HW_IRQS ; i += num_possible_cpus ()) {
385
- msi_group = & msi -> msi_groups [i ];
386
- irq_set_chained_handler_and_data (msi_group -> gic_irq , NULL ,
387
- NULL );
388
- }
366
+ for (i = cpu ; i < NR_HW_IRQS ; i += num_possible_cpus ())
367
+ irq_set_chained_handler_and_data (msi -> gic_irq [i ], NULL , NULL );
368
+
389
369
return 0 ;
390
370
}
391
371
@@ -397,10 +377,9 @@ static const struct of_device_id xgene_msi_match_table[] = {
397
377
static int xgene_msi_probe (struct platform_device * pdev )
398
378
{
399
379
struct resource * res ;
400
- int rc , irq_index ;
401
380
struct xgene_msi * xgene_msi ;
402
- int virt_msir ;
403
381
u32 msi_val , msi_idx ;
382
+ int rc ;
404
383
405
384
xgene_msi_ctrl = devm_kzalloc (& pdev -> dev , sizeof (* xgene_msi_ctrl ),
406
385
GFP_KERNEL );
@@ -430,23 +409,20 @@ static int xgene_msi_probe(struct platform_device *pdev)
430
409
goto error ;
431
410
}
432
411
433
- for (irq_index = 0 ; irq_index < NR_HW_IRQS ; irq_index ++ ) {
434
- virt_msir = platform_get_irq (pdev , irq_index );
435
- if (virt_msir < 0 ) {
436
- rc = virt_msir ;
412
+ for (int irq_index = 0 ; irq_index < NR_HW_IRQS ; irq_index ++ ) {
413
+ rc = platform_get_irq (pdev , irq_index );
414
+ if (rc < 0 )
437
415
goto error ;
438
- }
439
- xgene_msi -> msi_groups [irq_index ].gic_irq = virt_msir ;
440
- xgene_msi -> msi_groups [irq_index ].msi_grp = irq_index ;
441
- xgene_msi -> msi_groups [irq_index ].msi = xgene_msi ;
416
+
417
+ xgene_msi -> gic_irq [irq_index ] = rc ;
442
418
}
443
419
444
420
/*
445
421
* MSInIRx registers are read-to-clear; before registering
446
422
* interrupt handlers, read all of them to clear spurious
447
423
* interrupts that may occur before the driver is probed.
448
424
*/
449
- for (irq_index = 0 ; irq_index < NR_HW_IRQS ; irq_index ++ ) {
425
+ for (int irq_index = 0 ; irq_index < NR_HW_IRQS ; irq_index ++ ) {
450
426
for (msi_idx = 0 ; msi_idx < IDX_PER_GROUP ; msi_idx ++ )
451
427
xgene_msi_ir_read (xgene_msi , irq_index , msi_idx );
452
428
0 commit comments