@@ -40,10 +40,9 @@ void irq_gc_mask_disable_reg(struct irq_data *d)
40
40
struct irq_chip_type * ct = irq_data_get_chip_type (d );
41
41
u32 mask = d -> mask ;
42
42
43
- irq_gc_lock ( gc );
43
+ guard ( raw_spinlock )( & gc -> lock );
44
44
irq_reg_writel (gc , mask , ct -> regs .disable );
45
45
* ct -> mask_cache &= ~mask ;
46
- irq_gc_unlock (gc );
47
46
}
48
47
EXPORT_SYMBOL_GPL (irq_gc_mask_disable_reg );
49
48
@@ -60,10 +59,9 @@ void irq_gc_mask_set_bit(struct irq_data *d)
60
59
struct irq_chip_type * ct = irq_data_get_chip_type (d );
61
60
u32 mask = d -> mask ;
62
61
63
- irq_gc_lock ( gc );
62
+ guard ( raw_spinlock )( & gc -> lock );
64
63
* ct -> mask_cache |= mask ;
65
64
irq_reg_writel (gc , * ct -> mask_cache , ct -> regs .mask );
66
- irq_gc_unlock (gc );
67
65
}
68
66
EXPORT_SYMBOL_GPL (irq_gc_mask_set_bit );
69
67
@@ -80,10 +78,9 @@ void irq_gc_mask_clr_bit(struct irq_data *d)
80
78
struct irq_chip_type * ct = irq_data_get_chip_type (d );
81
79
u32 mask = d -> mask ;
82
80
83
- irq_gc_lock ( gc );
81
+ guard ( raw_spinlock )( & gc -> lock );
84
82
* ct -> mask_cache &= ~mask ;
85
83
irq_reg_writel (gc , * ct -> mask_cache , ct -> regs .mask );
86
- irq_gc_unlock (gc );
87
84
}
88
85
EXPORT_SYMBOL_GPL (irq_gc_mask_clr_bit );
89
86
@@ -100,10 +97,9 @@ void irq_gc_unmask_enable_reg(struct irq_data *d)
100
97
struct irq_chip_type * ct = irq_data_get_chip_type (d );
101
98
u32 mask = d -> mask ;
102
99
103
- irq_gc_lock ( gc );
100
+ guard ( raw_spinlock )( & gc -> lock );
104
101
irq_reg_writel (gc , mask , ct -> regs .enable );
105
102
* ct -> mask_cache |= mask ;
106
- irq_gc_unlock (gc );
107
103
}
108
104
EXPORT_SYMBOL_GPL (irq_gc_unmask_enable_reg );
109
105
@@ -117,9 +113,8 @@ void irq_gc_ack_set_bit(struct irq_data *d)
117
113
struct irq_chip_type * ct = irq_data_get_chip_type (d );
118
114
u32 mask = d -> mask ;
119
115
120
- irq_gc_lock ( gc );
116
+ guard ( raw_spinlock )( & gc -> lock );
121
117
irq_reg_writel (gc , mask , ct -> regs .ack );
122
- irq_gc_unlock (gc );
123
118
}
124
119
EXPORT_SYMBOL_GPL (irq_gc_ack_set_bit );
125
120
@@ -133,9 +128,8 @@ void irq_gc_ack_clr_bit(struct irq_data *d)
133
128
struct irq_chip_type * ct = irq_data_get_chip_type (d );
134
129
u32 mask = ~d -> mask ;
135
130
136
- irq_gc_lock ( gc );
131
+ guard ( raw_spinlock )( & gc -> lock );
137
132
irq_reg_writel (gc , mask , ct -> regs .ack );
138
- irq_gc_unlock (gc );
139
133
}
140
134
141
135
/**
@@ -156,11 +150,10 @@ void irq_gc_mask_disable_and_ack_set(struct irq_data *d)
156
150
struct irq_chip_type * ct = irq_data_get_chip_type (d );
157
151
u32 mask = d -> mask ;
158
152
159
- irq_gc_lock ( gc );
153
+ guard ( raw_spinlock )( & gc -> lock );
160
154
irq_reg_writel (gc , mask , ct -> regs .disable );
161
155
* ct -> mask_cache &= ~mask ;
162
156
irq_reg_writel (gc , mask , ct -> regs .ack );
163
- irq_gc_unlock (gc );
164
157
}
165
158
EXPORT_SYMBOL_GPL (irq_gc_mask_disable_and_ack_set );
166
159
@@ -174,9 +167,8 @@ void irq_gc_eoi(struct irq_data *d)
174
167
struct irq_chip_type * ct = irq_data_get_chip_type (d );
175
168
u32 mask = d -> mask ;
176
169
177
- irq_gc_lock ( gc );
170
+ guard ( raw_spinlock )( & gc -> lock );
178
171
irq_reg_writel (gc , mask , ct -> regs .eoi );
179
- irq_gc_unlock (gc );
180
172
}
181
173
182
174
/**
@@ -196,12 +188,11 @@ int irq_gc_set_wake(struct irq_data *d, unsigned int on)
196
188
if (!(mask & gc -> wake_enabled ))
197
189
return - EINVAL ;
198
190
199
- irq_gc_lock ( gc );
191
+ guard ( raw_spinlock )( & gc -> lock );
200
192
if (on )
201
193
gc -> wake_active |= mask ;
202
194
else
203
195
gc -> wake_active &= ~mask ;
204
- irq_gc_unlock (gc );
205
196
return 0 ;
206
197
}
207
198
EXPORT_SYMBOL_GPL (irq_gc_set_wake );
@@ -288,7 +279,6 @@ int irq_domain_alloc_generic_chips(struct irq_domain *d,
288
279
{
289
280
struct irq_domain_chip_generic * dgc ;
290
281
struct irq_chip_generic * gc ;
291
- unsigned long flags ;
292
282
int numchips , i ;
293
283
size_t dgc_sz ;
294
284
size_t gc_sz ;
@@ -340,9 +330,8 @@ int irq_domain_alloc_generic_chips(struct irq_domain *d,
340
330
goto err ;
341
331
}
342
332
343
- raw_spin_lock_irqsave (& gc_lock , flags );
344
- list_add_tail (& gc -> list , & gc_list );
345
- raw_spin_unlock_irqrestore (& gc_lock , flags );
333
+ scoped_guard (raw_spinlock , & gc_lock )
334
+ list_add_tail (& gc -> list , & gc_list );
346
335
/* Calc pointer to the next generic chip */
347
336
tmp += gc_sz ;
348
337
}
@@ -459,7 +448,6 @@ int irq_map_generic_chip(struct irq_domain *d, unsigned int virq,
459
448
struct irq_chip_generic * gc ;
460
449
struct irq_chip_type * ct ;
461
450
struct irq_chip * chip ;
462
- unsigned long flags ;
463
451
int idx ;
464
452
465
453
gc = __irq_get_domain_generic_chip (d , hw_irq );
@@ -479,9 +467,8 @@ int irq_map_generic_chip(struct irq_domain *d, unsigned int virq,
479
467
480
468
/* We only init the cache for the first mapping of a generic chip */
481
469
if (!gc -> installed ) {
482
- raw_spin_lock_irqsave ( & gc -> lock , flags );
470
+ guard ( raw_spinlock_irq )( & gc -> lock );
483
471
irq_gc_init_mask_cache (gc , dgc -> gc_flags );
484
- raw_spin_unlock_irqrestore (& gc -> lock , flags );
485
472
}
486
473
487
474
/* Mark the interrupt as installed */
@@ -548,9 +535,8 @@ void irq_setup_generic_chip(struct irq_chip_generic *gc, u32 msk,
548
535
struct irq_chip * chip = & ct -> chip ;
549
536
unsigned int i ;
550
537
551
- raw_spin_lock (& gc_lock );
552
- list_add_tail (& gc -> list , & gc_list );
553
- raw_spin_unlock (& gc_lock );
538
+ scoped_guard (raw_spinlock , & gc_lock )
539
+ list_add_tail (& gc -> list , & gc_list );
554
540
555
541
irq_gc_init_mask_cache (gc , flags );
556
542
@@ -616,9 +602,8 @@ void irq_remove_generic_chip(struct irq_chip_generic *gc, u32 msk,
616
602
{
617
603
unsigned int i , virq ;
618
604
619
- raw_spin_lock (& gc_lock );
620
- list_del (& gc -> list );
621
- raw_spin_unlock (& gc_lock );
605
+ scoped_guard (raw_spinlock , & gc_lock )
606
+ list_del (& gc -> list );
622
607
623
608
for (i = 0 ; msk ; msk >>= 1 , i ++ ) {
624
609
if (!(msk & 0x01 ))
0 commit comments