@@ -92,11 +92,23 @@ static void desc_smp_init(struct irq_desc *desc, int node,
92
92
#endif
93
93
}
94
94
95
+ static void free_masks (struct irq_desc * desc )
96
+ {
97
+ #ifdef CONFIG_GENERIC_PENDING_IRQ
98
+ free_cpumask_var (desc -> pending_mask );
99
+ #endif
100
+ free_cpumask_var (desc -> irq_common_data .affinity );
101
+ #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
102
+ free_cpumask_var (desc -> irq_common_data .effective_affinity );
103
+ #endif
104
+ }
105
+
95
106
#else
96
107
static inline int
97
108
alloc_masks (struct irq_desc * desc , int node ) { return 0 ; }
98
109
static inline void
99
110
desc_smp_init (struct irq_desc * desc , int node , const struct cpumask * affinity ) { }
111
+ static inline void free_masks (struct irq_desc * desc ) { }
100
112
#endif
101
113
102
114
static void desc_set_defaults (unsigned int irq , struct irq_desc * desc , int node ,
@@ -165,6 +177,39 @@ static void delete_irq_desc(unsigned int irq)
165
177
mas_erase (& mas );
166
178
}
167
179
180
+ #ifdef CONFIG_SPARSE_IRQ
181
+ static const struct kobj_type irq_kobj_type ;
182
+ #endif
183
+
184
+ static int init_desc (struct irq_desc * desc , int irq , int node ,
185
+ unsigned int flags ,
186
+ const struct cpumask * affinity ,
187
+ struct module * owner )
188
+ {
189
+ desc -> kstat_irqs = alloc_percpu (unsigned int );
190
+ if (!desc -> kstat_irqs )
191
+ return - ENOMEM ;
192
+
193
+ if (alloc_masks (desc , node )) {
194
+ free_percpu (desc -> kstat_irqs );
195
+ return - ENOMEM ;
196
+ }
197
+
198
+ raw_spin_lock_init (& desc -> lock );
199
+ lockdep_set_class (& desc -> lock , & irq_desc_lock_class );
200
+ mutex_init (& desc -> request_mutex );
201
+ init_waitqueue_head (& desc -> wait_for_threads );
202
+ desc_set_defaults (irq , desc , node , affinity , owner );
203
+ irqd_set (& desc -> irq_data , flags );
204
+ irq_resend_init (desc );
205
+ #ifdef CONFIG_SPARSE_IRQ
206
+ kobject_init (& desc -> kobj , & irq_kobj_type );
207
+ init_rcu_head (& desc -> rcu );
208
+ #endif
209
+
210
+ return 0 ;
211
+ }
212
+
168
213
#ifdef CONFIG_SPARSE_IRQ
169
214
170
215
static void irq_kobj_release (struct kobject * kobj );
@@ -384,21 +429,6 @@ struct irq_desc *irq_to_desc(unsigned int irq)
384
429
EXPORT_SYMBOL_GPL (irq_to_desc );
385
430
#endif
386
431
387
- #ifdef CONFIG_SMP
388
- static void free_masks (struct irq_desc * desc )
389
- {
390
- #ifdef CONFIG_GENERIC_PENDING_IRQ
391
- free_cpumask_var (desc -> pending_mask );
392
- #endif
393
- free_cpumask_var (desc -> irq_common_data .affinity );
394
- #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
395
- free_cpumask_var (desc -> irq_common_data .effective_affinity );
396
- #endif
397
- }
398
- #else
399
- static inline void free_masks (struct irq_desc * desc ) { }
400
- #endif
401
-
402
432
void irq_lock_sparse (void )
403
433
{
404
434
mutex_lock (& sparse_irq_lock );
@@ -414,36 +444,19 @@ static struct irq_desc *alloc_desc(int irq, int node, unsigned int flags,
414
444
struct module * owner )
415
445
{
416
446
struct irq_desc * desc ;
447
+ int ret ;
417
448
418
449
desc = kzalloc_node (sizeof (* desc ), GFP_KERNEL , node );
419
450
if (!desc )
420
451
return NULL ;
421
- /* allocate based on nr_cpu_ids */
422
- desc -> kstat_irqs = alloc_percpu (unsigned int );
423
- if (!desc -> kstat_irqs )
424
- goto err_desc ;
425
-
426
- if (alloc_masks (desc , node ))
427
- goto err_kstat ;
428
452
429
- raw_spin_lock_init (& desc -> lock );
430
- lockdep_set_class (& desc -> lock , & irq_desc_lock_class );
431
- mutex_init (& desc -> request_mutex );
432
- init_rcu_head (& desc -> rcu );
433
- init_waitqueue_head (& desc -> wait_for_threads );
434
-
435
- desc_set_defaults (irq , desc , node , affinity , owner );
436
- irqd_set (& desc -> irq_data , flags );
437
- kobject_init (& desc -> kobj , & irq_kobj_type );
438
- irq_resend_init (desc );
453
+ ret = init_desc (desc , irq , node , flags , affinity , owner );
454
+ if (unlikely (ret )) {
455
+ kfree (desc );
456
+ return NULL ;
457
+ }
439
458
440
459
return desc ;
441
-
442
- err_kstat :
443
- free_percpu (desc -> kstat_irqs );
444
- err_desc :
445
- kfree (desc );
446
- return NULL ;
447
460
}
448
461
449
462
static void irq_kobj_release (struct kobject * kobj )
@@ -583,26 +596,29 @@ struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
583
596
int __init early_irq_init (void )
584
597
{
585
598
int count , i , node = first_online_node ;
586
- struct irq_desc * desc ;
599
+ int ret ;
587
600
588
601
init_irq_default_affinity ();
589
602
590
603
printk (KERN_INFO "NR_IRQS: %d\n" , NR_IRQS );
591
604
592
- desc = irq_desc ;
593
605
count = ARRAY_SIZE (irq_desc );
594
606
595
607
for (i = 0 ; i < count ; i ++ ) {
596
- desc [i ].kstat_irqs = alloc_percpu (unsigned int );
597
- alloc_masks (& desc [i ], node );
598
- raw_spin_lock_init (& desc [i ].lock );
599
- lockdep_set_class (& desc [i ].lock , & irq_desc_lock_class );
600
- mutex_init (& desc [i ].request_mutex );
601
- init_waitqueue_head (& desc [i ].wait_for_threads );
602
- desc_set_defaults (i , & desc [i ], node , NULL , NULL );
603
- irq_resend_init (& desc [i ]);
608
+ ret = init_desc (irq_desc + i , i , node , 0 , NULL , NULL );
609
+ if (unlikely (ret ))
610
+ goto __free_desc_res ;
604
611
}
612
+
605
613
return arch_early_irq_init ();
614
+
615
+ __free_desc_res :
616
+ while (-- i >= 0 ) {
617
+ free_masks (irq_desc + i );
618
+ free_percpu (irq_desc [i ].kstat_irqs );
619
+ }
620
+
621
+ return ret ;
606
622
}
607
623
608
624
struct irq_desc * irq_to_desc (unsigned int irq )
0 commit comments