Skip to content

Commit 17c1953

Browse files
committed
genirq/manage: Convert to lock guards
Convert lock/unlock pairs to guards. No functional change. Signed-off-by: Thomas Gleixner <[email protected]> Acked-by: Peter Zijlstra (Intel) <[email protected]> Link: https://lore.kernel.org/all/[email protected]
1 parent 0c169ed commit 17c1953

File tree

1 file changed

+58
-97
lines changed

1 file changed

+58
-97
lines changed

kernel/irq/manage.c

Lines changed: 58 additions & 97 deletions
Original file line numberDiff line numberDiff line change
@@ -43,8 +43,6 @@ static void __synchronize_hardirq(struct irq_desc *desc, bool sync_chip)
4343
bool inprogress;
4444

4545
do {
46-
unsigned long flags;
47-
4846
/*
4947
* Wait until we're out of the critical section. This might
5048
* give the wrong answer due to the lack of memory barriers.
@@ -53,7 +51,7 @@ static void __synchronize_hardirq(struct irq_desc *desc, bool sync_chip)
5351
cpu_relax();
5452

5553
/* Ok, that indicated we're done: double-check carefully. */
56-
raw_spin_lock_irqsave(&desc->lock, flags);
54+
guard(raw_spinlock_irqsave)(&desc->lock);
5755
inprogress = irqd_irq_inprogress(&desc->irq_data);
5856

5957
/*
@@ -69,8 +67,6 @@ static void __synchronize_hardirq(struct irq_desc *desc, bool sync_chip)
6967
__irq_get_irqchip_state(irqd, IRQCHIP_STATE_ACTIVE,
7068
&inprogress);
7169
}
72-
raw_spin_unlock_irqrestore(&desc->lock, flags);
73-
7470
/* Oops, that failed? */
7571
} while (inprogress);
7672
}
@@ -458,16 +454,12 @@ static int __irq_set_affinity(unsigned int irq, const struct cpumask *mask,
458454
bool force)
459455
{
460456
struct irq_desc *desc = irq_to_desc(irq);
461-
unsigned long flags;
462-
int ret;
463457

464458
if (!desc)
465459
return -EINVAL;
466460

467-
raw_spin_lock_irqsave(&desc->lock, flags);
468-
ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force);
469-
raw_spin_unlock_irqrestore(&desc->lock, flags);
470-
return ret;
461+
guard(raw_spinlock_irqsave)(&desc->lock);
462+
return irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force);
471463
}
472464

473465
/**
@@ -522,17 +514,16 @@ static void irq_affinity_notify(struct work_struct *work)
522514
container_of(work, struct irq_affinity_notify, work);
523515
struct irq_desc *desc = irq_to_desc(notify->irq);
524516
cpumask_var_t cpumask;
525-
unsigned long flags;
526517

527518
if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
528519
goto out;
529520

530-
raw_spin_lock_irqsave(&desc->lock, flags);
531-
if (irq_move_pending(&desc->irq_data))
532-
irq_get_pending(cpumask, desc);
533-
else
534-
cpumask_copy(cpumask, desc->irq_common_data.affinity);
535-
raw_spin_unlock_irqrestore(&desc->lock, flags);
521+
scoped_guard(raw_spinlock_irqsave, &desc->lock) {
522+
if (irq_move_pending(&desc->irq_data))
523+
irq_get_pending(cpumask, desc);
524+
else
525+
cpumask_copy(cpumask, desc->irq_common_data.affinity);
526+
}
536527

537528
notify->notify(notify, cpumask);
538529

@@ -556,7 +547,6 @@ int irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *noti
556547
{
557548
struct irq_desc *desc = irq_to_desc(irq);
558549
struct irq_affinity_notify *old_notify;
559-
unsigned long flags;
560550

561551
/* The release function is promised process context */
562552
might_sleep();
@@ -571,10 +561,10 @@ int irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *noti
571561
INIT_WORK(&notify->work, irq_affinity_notify);
572562
}
573563

574-
raw_spin_lock_irqsave(&desc->lock, flags);
575-
old_notify = desc->affinity_notify;
576-
desc->affinity_notify = notify;
577-
raw_spin_unlock_irqrestore(&desc->lock, flags);
564+
scoped_guard(raw_spinlock_irqsave, &desc->lock) {
565+
old_notify = desc->affinity_notify;
566+
desc->affinity_notify = notify;
567+
}
578568

579569
if (old_notify) {
580570
if (cancel_work_sync(&old_notify->work)) {
@@ -595,15 +585,16 @@ EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
595585
int irq_setup_affinity(struct irq_desc *desc)
596586
{
597587
struct cpumask *set = irq_default_affinity;
598-
int ret, node = irq_desc_get_node(desc);
588+
int node = irq_desc_get_node(desc);
589+
599590
static DEFINE_RAW_SPINLOCK(mask_lock);
600591
static struct cpumask mask;
601592

602593
/* Excludes PER_CPU and NO_BALANCE interrupts */
603594
if (!__irq_can_set_affinity(desc))
604595
return 0;
605596

606-
raw_spin_lock(&mask_lock);
597+
guard(raw_spinlock)(&mask_lock);
607598
/*
608599
* Preserve the managed affinity setting and a userspace affinity
609600
* setup, but make sure that one of the targets is online.
@@ -628,9 +619,7 @@ int irq_setup_affinity(struct irq_desc *desc)
628619
if (cpumask_intersects(&mask, nodemask))
629620
cpumask_and(&mask, &mask, nodemask);
630621
}
631-
ret = irq_do_set_affinity(&desc->irq_data, &mask, false);
632-
raw_spin_unlock(&mask_lock);
633-
return ret;
622+
return irq_do_set_affinity(&desc->irq_data, &mask, false);
634623
}
635624
#else
636625
/* Wrapper for ALPHA specific affinity selector magic */
@@ -1072,19 +1061,19 @@ static void irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *a
10721061
return;
10731062
}
10741063

1075-
raw_spin_lock_irq(&desc->lock);
1076-
/*
1077-
* This code is triggered unconditionally. Check the affinity
1078-
* mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out.
1079-
*/
1080-
if (cpumask_available(desc->irq_common_data.affinity)) {
1081-
const struct cpumask *m;
1064+
scoped_guard(raw_spinlock_irq, &desc->lock) {
1065+
/*
1066+
* This code is triggered unconditionally. Check the affinity
1067+
* mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out.
1068+
*/
1069+
if (cpumask_available(desc->irq_common_data.affinity)) {
1070+
const struct cpumask *m;
10821071

1083-
m = irq_data_get_effective_affinity_mask(&desc->irq_data);
1084-
cpumask_copy(mask, m);
1085-
valid = true;
1072+
m = irq_data_get_effective_affinity_mask(&desc->irq_data);
1073+
cpumask_copy(mask, m);
1074+
valid = true;
1075+
}
10861076
}
1087-
raw_spin_unlock_irq(&desc->lock);
10881077

10891078
if (valid)
10901079
set_cpus_allowed_ptr(current, mask);
@@ -1252,9 +1241,8 @@ static void irq_wake_secondary(struct irq_desc *desc, struct irqaction *action)
12521241
if (WARN_ON_ONCE(!secondary))
12531242
return;
12541243

1255-
raw_spin_lock_irq(&desc->lock);
1244+
guard(raw_spinlock_irq)(&desc->lock);
12561245
__irq_wake_thread(desc, secondary);
1257-
raw_spin_unlock_irq(&desc->lock);
12581246
}
12591247

12601248
/*
@@ -1335,20 +1323,18 @@ void irq_wake_thread(unsigned int irq, void *dev_id)
13351323
{
13361324
struct irq_desc *desc = irq_to_desc(irq);
13371325
struct irqaction *action;
1338-
unsigned long flags;
13391326

13401327
if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
13411328
return;
13421329

1343-
raw_spin_lock_irqsave(&desc->lock, flags);
1330+
guard(raw_spinlock_irqsave)(&desc->lock);
13441331
for_each_action_of_desc(desc, action) {
13451332
if (action->dev_id == dev_id) {
13461333
if (action->thread)
13471334
__irq_wake_thread(desc, action);
13481335
break;
13491336
}
13501337
}
1351-
raw_spin_unlock_irqrestore(&desc->lock, flags);
13521338
}
13531339
EXPORT_SYMBOL_GPL(irq_wake_thread);
13541340

@@ -1979,9 +1965,8 @@ static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
19791965
* There is no interrupt on the fly anymore. Deactivate it
19801966
* completely.
19811967
*/
1982-
raw_spin_lock_irqsave(&desc->lock, flags);
1983-
irq_domain_deactivate_irq(&desc->irq_data);
1984-
raw_spin_unlock_irqrestore(&desc->lock, flags);
1968+
scoped_guard(raw_spinlock_irqsave, &desc->lock)
1969+
irq_domain_deactivate_irq(&desc->irq_data);
19851970

19861971
irq_release_resources(desc);
19871972
chip_bus_sync_unlock(desc);
@@ -2066,8 +2051,6 @@ static const void *__cleanup_nmi(unsigned int irq, struct irq_desc *desc)
20662051
const void *free_nmi(unsigned int irq, void *dev_id)
20672052
{
20682053
struct irq_desc *desc = irq_to_desc(irq);
2069-
unsigned long flags;
2070-
const void *devname;
20712054

20722055
if (!desc || WARN_ON(!irq_is_nmi(desc)))
20732056
return NULL;
@@ -2079,14 +2062,9 @@ const void *free_nmi(unsigned int irq, void *dev_id)
20792062
if (WARN_ON(desc->depth == 0))
20802063
disable_nmi_nosync(irq);
20812064

2082-
raw_spin_lock_irqsave(&desc->lock, flags);
2083-
2065+
guard(raw_spinlock_irqsave)(&desc->lock);
20842066
irq_nmi_teardown(desc);
2085-
devname = __cleanup_nmi(irq, desc);
2086-
2087-
raw_spin_unlock_irqrestore(&desc->lock, flags);
2088-
2089-
return devname;
2067+
return __cleanup_nmi(irq, desc);
20902068
}
20912069

20922070
/**
@@ -2290,7 +2268,6 @@ int request_nmi(unsigned int irq, irq_handler_t handler,
22902268
{
22912269
struct irqaction *action;
22922270
struct irq_desc *desc;
2293-
unsigned long flags;
22942271
int retval;
22952272

22962273
if (irq == IRQ_NOTCONNECTED)
@@ -2332,21 +2309,17 @@ int request_nmi(unsigned int irq, irq_handler_t handler,
23322309
if (retval)
23332310
goto err_irq_setup;
23342311

2335-
raw_spin_lock_irqsave(&desc->lock, flags);
2336-
2337-
/* Setup NMI state */
2338-
desc->istate |= IRQS_NMI;
2339-
retval = irq_nmi_setup(desc);
2340-
if (retval) {
2341-
__cleanup_nmi(irq, desc);
2342-
raw_spin_unlock_irqrestore(&desc->lock, flags);
2343-
return -EINVAL;
2312+
scoped_guard(raw_spinlock_irqsave, &desc->lock) {
2313+
/* Setup NMI state */
2314+
desc->istate |= IRQS_NMI;
2315+
retval = irq_nmi_setup(desc);
2316+
if (retval) {
2317+
__cleanup_nmi(irq, desc);
2318+
return -EINVAL;
2319+
}
2320+
return 0;
23442321
}
23452322

2346-
raw_spin_unlock_irqrestore(&desc->lock, flags);
2347-
2348-
return 0;
2349-
23502323
err_irq_setup:
23512324
irq_chip_pm_put(&desc->irq_data);
23522325
err_out:
@@ -2445,43 +2418,34 @@ static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_
24452418
{
24462419
struct irq_desc *desc = irq_to_desc(irq);
24472420
struct irqaction *action;
2448-
unsigned long flags;
24492421

24502422
WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
24512423

24522424
if (!desc)
24532425
return NULL;
24542426

2455-
raw_spin_lock_irqsave(&desc->lock, flags);
2427+
scoped_guard(raw_spinlock_irqsave, &desc->lock) {
2428+
action = desc->action;
2429+
if (!action || action->percpu_dev_id != dev_id) {
2430+
WARN(1, "Trying to free already-free IRQ %d\n", irq);
2431+
return NULL;
2432+
}
24562433

2457-
action = desc->action;
2458-
if (!action || action->percpu_dev_id != dev_id) {
2459-
WARN(1, "Trying to free already-free IRQ %d\n", irq);
2460-
goto bad;
2461-
}
2434+
if (!cpumask_empty(desc->percpu_enabled)) {
2435+
WARN(1, "percpu IRQ %d still enabled on CPU%d!\n",
2436+
irq, cpumask_first(desc->percpu_enabled));
2437+
return NULL;
2438+
}
24622439

2463-
if (!cpumask_empty(desc->percpu_enabled)) {
2464-
WARN(1, "percpu IRQ %d still enabled on CPU%d!\n",
2465-
irq, cpumask_first(desc->percpu_enabled));
2466-
goto bad;
2440+
/* Found it - now remove it from the list of entries: */
2441+
desc->action = NULL;
2442+
desc->istate &= ~IRQS_NMI;
24672443
}
24682444

2469-
/* Found it - now remove it from the list of entries: */
2470-
desc->action = NULL;
2471-
2472-
desc->istate &= ~IRQS_NMI;
2473-
2474-
raw_spin_unlock_irqrestore(&desc->lock, flags);
2475-
24762445
unregister_handler_proc(irq, action);
2477-
24782446
irq_chip_pm_put(&desc->irq_data);
24792447
module_put(desc->owner);
24802448
return action;
2481-
2482-
bad:
2483-
raw_spin_unlock_irqrestore(&desc->lock, flags);
2484-
return NULL;
24852449
}
24862450

24872451
/**
@@ -2651,7 +2615,6 @@ int request_percpu_nmi(unsigned int irq, irq_handler_t handler,
26512615
{
26522616
struct irqaction *action;
26532617
struct irq_desc *desc;
2654-
unsigned long flags;
26552618
int retval;
26562619

26572620
if (!handler)
@@ -2687,10 +2650,8 @@ int request_percpu_nmi(unsigned int irq, irq_handler_t handler,
26872650
if (retval)
26882651
goto err_irq_setup;
26892652

2690-
raw_spin_lock_irqsave(&desc->lock, flags);
2653+
guard(raw_spinlock_irqsave)(&desc->lock);
26912654
desc->istate |= IRQS_NMI;
2692-
raw_spin_unlock_irqrestore(&desc->lock, flags);
2693-
26942655
return 0;
26952656

26962657
err_irq_setup:

0 commit comments

Comments
 (0)