@@ -213,7 +213,7 @@ struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, int size,
213
213
214
214
/* Fill structure */
215
215
INIT_RADIX_TREE (& domain -> revmap_tree , GFP_KERNEL );
216
- mutex_init (& domain -> revmap_tree_mutex );
216
+ mutex_init (& domain -> revmap_mutex );
217
217
domain -> ops = ops ;
218
218
domain -> host_data = host_data ;
219
219
domain -> hwirq_max = hwirq_max ;
@@ -504,13 +504,12 @@ static void irq_domain_clear_mapping(struct irq_domain *domain,
504
504
if (irq_domain_is_nomap (domain ))
505
505
return ;
506
506
507
- if ( hwirq < domain -> revmap_size ) {
508
- domain -> revmap [ hwirq ] = NULL ;
509
- } else {
510
- mutex_lock ( & domain -> revmap_tree_mutex );
507
+ mutex_lock ( & domain -> revmap_mutex );
508
+ if ( hwirq < domain -> revmap_size )
509
+ rcu_assign_pointer ( domain -> revmap [ hwirq ], NULL );
510
+ else
511
511
radix_tree_delete (& domain -> revmap_tree , hwirq );
512
- mutex_unlock (& domain -> revmap_tree_mutex );
513
- }
512
+ mutex_unlock (& domain -> revmap_mutex );
514
513
}
515
514
516
515
static void irq_domain_set_mapping (struct irq_domain * domain ,
@@ -520,13 +519,12 @@ static void irq_domain_set_mapping(struct irq_domain *domain,
520
519
if (irq_domain_is_nomap (domain ))
521
520
return ;
522
521
523
- if ( hwirq < domain -> revmap_size ) {
524
- domain -> revmap [ hwirq ] = irq_data ;
525
- } else {
526
- mutex_lock ( & domain -> revmap_tree_mutex );
522
+ mutex_lock ( & domain -> revmap_mutex );
523
+ if ( hwirq < domain -> revmap_size )
524
+ rcu_assign_pointer ( domain -> revmap [ hwirq ], irq_data );
525
+ else
527
526
radix_tree_insert (& domain -> revmap_tree , hwirq , irq_data );
528
- mutex_unlock (& domain -> revmap_tree_mutex );
529
- }
527
+ mutex_unlock (& domain -> revmap_mutex );
530
528
}
531
529
532
530
static void irq_domain_disassociate (struct irq_domain * domain , unsigned int irq )
@@ -911,12 +909,12 @@ unsigned int irq_find_mapping(struct irq_domain *domain,
911
909
return 0 ;
912
910
}
913
911
912
+ rcu_read_lock ();
914
913
/* Check if the hwirq is in the linear revmap. */
915
914
if (hwirq < domain -> revmap_size )
916
- return domain -> revmap [hwirq ]-> irq ;
917
-
918
- rcu_read_lock ();
919
- data = radix_tree_lookup (& domain -> revmap_tree , hwirq );
915
+ data = rcu_dereference (domain -> revmap [hwirq ]);
916
+ else
917
+ data = radix_tree_lookup (& domain -> revmap_tree , hwirq );
920
918
rcu_read_unlock ();
921
919
return data ? data -> irq : 0 ;
922
920
}
@@ -1499,18 +1497,17 @@ static void irq_domain_fix_revmap(struct irq_data *d)
1499
1497
if (irq_domain_is_nomap (d -> domain ))
1500
1498
return ;
1501
1499
1500
+ /* Fix up the revmap. */
1501
+ mutex_lock (& d -> domain -> revmap_mutex );
1502
1502
if (d -> hwirq < d -> domain -> revmap_size ) {
1503
1503
/* Not using radix tree */
1504
- d -> domain -> revmap [d -> hwirq ] = d ;
1505
- return ;
1504
+ rcu_assign_pointer (d -> domain -> revmap [d -> hwirq ], d );
1505
+ } else {
1506
+ slot = radix_tree_lookup_slot (& d -> domain -> revmap_tree , d -> hwirq );
1507
+ if (slot )
1508
+ radix_tree_replace_slot (& d -> domain -> revmap_tree , slot , d );
1506
1509
}
1507
-
1508
- /* Fix up the revmap. */
1509
- mutex_lock (& d -> domain -> revmap_tree_mutex );
1510
- slot = radix_tree_lookup_slot (& d -> domain -> revmap_tree , d -> hwirq );
1511
- if (slot )
1512
- radix_tree_replace_slot (& d -> domain -> revmap_tree , slot , d );
1513
- mutex_unlock (& d -> domain -> revmap_tree_mutex );
1510
+ mutex_unlock (& d -> domain -> revmap_mutex );
1514
1511
}
1515
1512
1516
1513
/**
0 commit comments