Skip to content

Commit 2265324

Browse files
Dawei LiKAGA-KOKO
authored andcommitted
genirq: Deduplicate interrupt descriptor initialization
alloc_desc() and early_irq_init() contain duplicated code to initialize interrupt descriptors. Replace that with a helper function. Suggested-by: Marc Zyngier <[email protected]> Signed-off-by: Dawei Li <[email protected]> Signed-off-by: Thomas Gleixner <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent 9676635 commit 2265324

File tree

1 file changed

+64
-48
lines changed

1 file changed

+64
-48
lines changed

kernel/irq/irqdesc.c

Lines changed: 64 additions & 48 deletions
Original file line numberDiff line numberDiff line change
@@ -92,11 +92,23 @@ static void desc_smp_init(struct irq_desc *desc, int node,
9292
#endif
9393
}
9494

95+
static void free_masks(struct irq_desc *desc)
96+
{
97+
#ifdef CONFIG_GENERIC_PENDING_IRQ
98+
free_cpumask_var(desc->pending_mask);
99+
#endif
100+
free_cpumask_var(desc->irq_common_data.affinity);
101+
#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
102+
free_cpumask_var(desc->irq_common_data.effective_affinity);
103+
#endif
104+
}
105+
95106
#else
96107
static inline int
97108
alloc_masks(struct irq_desc *desc, int node) { return 0; }
98109
static inline void
99110
desc_smp_init(struct irq_desc *desc, int node, const struct cpumask *affinity) { }
111+
static inline void free_masks(struct irq_desc *desc) { }
100112
#endif
101113

102114
static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node,
@@ -165,6 +177,39 @@ static void delete_irq_desc(unsigned int irq)
165177
mas_erase(&mas);
166178
}
167179

180+
#ifdef CONFIG_SPARSE_IRQ
181+
static const struct kobj_type irq_kobj_type;
182+
#endif
183+
184+
static int init_desc(struct irq_desc *desc, int irq, int node,
185+
unsigned int flags,
186+
const struct cpumask *affinity,
187+
struct module *owner)
188+
{
189+
desc->kstat_irqs = alloc_percpu(unsigned int);
190+
if (!desc->kstat_irqs)
191+
return -ENOMEM;
192+
193+
if (alloc_masks(desc, node)) {
194+
free_percpu(desc->kstat_irqs);
195+
return -ENOMEM;
196+
}
197+
198+
raw_spin_lock_init(&desc->lock);
199+
lockdep_set_class(&desc->lock, &irq_desc_lock_class);
200+
mutex_init(&desc->request_mutex);
201+
init_waitqueue_head(&desc->wait_for_threads);
202+
desc_set_defaults(irq, desc, node, affinity, owner);
203+
irqd_set(&desc->irq_data, flags);
204+
irq_resend_init(desc);
205+
#ifdef CONFIG_SPARSE_IRQ
206+
kobject_init(&desc->kobj, &irq_kobj_type);
207+
init_rcu_head(&desc->rcu);
208+
#endif
209+
210+
return 0;
211+
}
212+
168213
#ifdef CONFIG_SPARSE_IRQ
169214

170215
static void irq_kobj_release(struct kobject *kobj);
@@ -384,21 +429,6 @@ struct irq_desc *irq_to_desc(unsigned int irq)
384429
EXPORT_SYMBOL_GPL(irq_to_desc);
385430
#endif
386431

387-
#ifdef CONFIG_SMP
388-
static void free_masks(struct irq_desc *desc)
389-
{
390-
#ifdef CONFIG_GENERIC_PENDING_IRQ
391-
free_cpumask_var(desc->pending_mask);
392-
#endif
393-
free_cpumask_var(desc->irq_common_data.affinity);
394-
#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
395-
free_cpumask_var(desc->irq_common_data.effective_affinity);
396-
#endif
397-
}
398-
#else
399-
static inline void free_masks(struct irq_desc *desc) { }
400-
#endif
401-
402432
void irq_lock_sparse(void)
403433
{
404434
mutex_lock(&sparse_irq_lock);
@@ -414,36 +444,19 @@ static struct irq_desc *alloc_desc(int irq, int node, unsigned int flags,
414444
struct module *owner)
415445
{
416446
struct irq_desc *desc;
447+
int ret;
417448

418449
desc = kzalloc_node(sizeof(*desc), GFP_KERNEL, node);
419450
if (!desc)
420451
return NULL;
421-
/* allocate based on nr_cpu_ids */
422-
desc->kstat_irqs = alloc_percpu(unsigned int);
423-
if (!desc->kstat_irqs)
424-
goto err_desc;
425-
426-
if (alloc_masks(desc, node))
427-
goto err_kstat;
428452

429-
raw_spin_lock_init(&desc->lock);
430-
lockdep_set_class(&desc->lock, &irq_desc_lock_class);
431-
mutex_init(&desc->request_mutex);
432-
init_rcu_head(&desc->rcu);
433-
init_waitqueue_head(&desc->wait_for_threads);
434-
435-
desc_set_defaults(irq, desc, node, affinity, owner);
436-
irqd_set(&desc->irq_data, flags);
437-
kobject_init(&desc->kobj, &irq_kobj_type);
438-
irq_resend_init(desc);
453+
ret = init_desc(desc, irq, node, flags, affinity, owner);
454+
if (unlikely(ret)) {
455+
kfree(desc);
456+
return NULL;
457+
}
439458

440459
return desc;
441-
442-
err_kstat:
443-
free_percpu(desc->kstat_irqs);
444-
err_desc:
445-
kfree(desc);
446-
return NULL;
447460
}
448461

449462
static void irq_kobj_release(struct kobject *kobj)
@@ -583,26 +596,29 @@ struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
583596
int __init early_irq_init(void)
584597
{
585598
int count, i, node = first_online_node;
586-
struct irq_desc *desc;
599+
int ret;
587600

588601
init_irq_default_affinity();
589602

590603
printk(KERN_INFO "NR_IRQS: %d\n", NR_IRQS);
591604

592-
desc = irq_desc;
593605
count = ARRAY_SIZE(irq_desc);
594606

595607
for (i = 0; i < count; i++) {
596-
desc[i].kstat_irqs = alloc_percpu(unsigned int);
597-
alloc_masks(&desc[i], node);
598-
raw_spin_lock_init(&desc[i].lock);
599-
lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
600-
mutex_init(&desc[i].request_mutex);
601-
init_waitqueue_head(&desc[i].wait_for_threads);
602-
desc_set_defaults(i, &desc[i], node, NULL, NULL);
603-
irq_resend_init(&desc[i]);
608+
ret = init_desc(irq_desc + i, i, node, 0, NULL, NULL);
609+
if (unlikely(ret))
610+
goto __free_desc_res;
604611
}
612+
605613
return arch_early_irq_init();
614+
615+
__free_desc_res:
616+
while (--i >= 0) {
617+
free_masks(irq_desc + i);
618+
free_percpu(irq_desc[i].kstat_irqs);
619+
}
620+
621+
return ret;
606622
}
607623

608624
struct irq_desc *irq_to_desc(unsigned int irq)

0 commit comments

Comments
 (0)