@@ -31,14 +31,12 @@ struct xgene_msi_group {
31
31
};
32
32
33
33
struct xgene_msi {
34
- struct device_node * node ;
35
34
struct irq_domain * inner_domain ;
36
35
u64 msi_addr ;
37
36
void __iomem * msi_regs ;
38
37
unsigned long * bitmap ;
39
38
struct mutex bitmap_lock ;
40
39
struct xgene_msi_group * msi_groups ;
41
- int num_cpus ;
42
40
};
43
41
44
42
/* Global data */
@@ -147,7 +145,7 @@ static void xgene_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
147
145
*/
148
146
static int hwirq_to_cpu (unsigned long hwirq )
149
147
{
150
- return (hwirq % xgene_msi_ctrl . num_cpus );
148
+ return (hwirq % num_possible_cpus () );
151
149
}
152
150
153
151
static unsigned long hwirq_to_canonical_hwirq (unsigned long hwirq )
@@ -186,9 +184,9 @@ static int xgene_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
186
184
mutex_lock (& msi -> bitmap_lock );
187
185
188
186
msi_irq = bitmap_find_next_zero_area (msi -> bitmap , NR_MSI_VEC , 0 ,
189
- msi -> num_cpus , 0 );
187
+ num_possible_cpus () , 0 );
190
188
if (msi_irq < NR_MSI_VEC )
191
- bitmap_set (msi -> bitmap , msi_irq , msi -> num_cpus );
189
+ bitmap_set (msi -> bitmap , msi_irq , num_possible_cpus () );
192
190
else
193
191
msi_irq = - ENOSPC ;
194
192
@@ -214,7 +212,7 @@ static void xgene_irq_domain_free(struct irq_domain *domain,
214
212
mutex_lock (& msi -> bitmap_lock );
215
213
216
214
hwirq = hwirq_to_canonical_hwirq (d -> hwirq );
217
- bitmap_clear (msi -> bitmap , hwirq , msi -> num_cpus );
215
+ bitmap_clear (msi -> bitmap , hwirq , num_possible_cpus () );
218
216
219
217
mutex_unlock (& msi -> bitmap_lock );
220
218
@@ -235,10 +233,11 @@ static const struct msi_parent_ops xgene_msi_parent_ops = {
235
233
.init_dev_msi_info = msi_lib_init_dev_msi_info ,
236
234
};
237
235
238
- static int xgene_allocate_domains (struct xgene_msi * msi )
236
+ static int xgene_allocate_domains (struct device_node * node ,
237
+ struct xgene_msi * msi )
239
238
{
240
239
struct irq_domain_info info = {
241
- .fwnode = of_fwnode_handle (msi -> node ),
240
+ .fwnode = of_fwnode_handle (node ),
242
241
.ops = & xgene_msi_domain_ops ,
243
242
.size = NR_MSI_VEC ,
244
243
.host_data = msi ,
@@ -358,7 +357,7 @@ static int xgene_msi_hwirq_alloc(unsigned int cpu)
358
357
int i ;
359
358
int err ;
360
359
361
- for (i = cpu ; i < NR_HW_IRQS ; i += msi -> num_cpus ) {
360
+ for (i = cpu ; i < NR_HW_IRQS ; i += num_possible_cpus () ) {
362
361
msi_group = & msi -> msi_groups [i ];
363
362
364
363
/*
@@ -386,7 +385,7 @@ static int xgene_msi_hwirq_free(unsigned int cpu)
386
385
struct xgene_msi_group * msi_group ;
387
386
int i ;
388
387
389
- for (i = cpu ; i < NR_HW_IRQS ; i += msi -> num_cpus ) {
388
+ for (i = cpu ; i < NR_HW_IRQS ; i += num_possible_cpus () ) {
390
389
msi_group = & msi -> msi_groups [i ];
391
390
irq_set_chained_handler_and_data (msi_group -> gic_irq , NULL ,
392
391
NULL );
@@ -417,16 +416,14 @@ static int xgene_msi_probe(struct platform_device *pdev)
417
416
goto error ;
418
417
}
419
418
xgene_msi -> msi_addr = res -> start ;
420
- xgene_msi -> node = pdev -> dev .of_node ;
421
- xgene_msi -> num_cpus = num_possible_cpus ();
422
419
423
420
rc = xgene_msi_init_allocator (xgene_msi );
424
421
if (rc ) {
425
422
dev_err (& pdev -> dev , "Error allocating MSI bitmap\n" );
426
423
goto error ;
427
424
}
428
425
429
- rc = xgene_allocate_domains (xgene_msi );
426
+ rc = xgene_allocate_domains (dev_of_node ( & pdev -> dev ), xgene_msi );
430
427
if (rc ) {
431
428
dev_err (& pdev -> dev , "Failed to allocate MSI domain\n" );
432
429
goto error ;
0 commit comments