@@ -180,30 +180,28 @@ static void vmd_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
180
180
static void vmd_irq_enable (struct irq_data * data )
181
181
{
182
182
struct vmd_irq * vmdirq = data -> chip_data ;
183
- unsigned long flags ;
184
183
185
- raw_spin_lock_irqsave ( & list_lock , flags );
186
- WARN_ON (vmdirq -> enabled );
187
- list_add_tail_rcu (& vmdirq -> node , & vmdirq -> irq -> irq_list );
188
- vmdirq -> enabled = true;
189
- raw_spin_unlock_irqrestore ( & list_lock , flags );
184
+ scoped_guard ( raw_spinlock_irqsave , & list_lock ) {
185
+ WARN_ON (vmdirq -> enabled );
186
+ list_add_tail_rcu (& vmdirq -> node , & vmdirq -> irq -> irq_list );
187
+ vmdirq -> enabled = true;
188
+ }
190
189
191
190
data -> chip -> irq_unmask (data );
192
191
}
193
192
194
193
static void vmd_irq_disable (struct irq_data * data )
195
194
{
196
195
struct vmd_irq * vmdirq = data -> chip_data ;
197
- unsigned long flags ;
198
196
199
197
data -> chip -> irq_mask (data );
200
198
201
- raw_spin_lock_irqsave (& list_lock , flags );
202
- if (vmdirq -> enabled ) {
203
- list_del_rcu (& vmdirq -> node );
204
- vmdirq -> enabled = false;
199
+ scoped_guard (raw_spinlock_irqsave , & list_lock ) {
200
+ if (vmdirq -> enabled ) {
201
+ list_del_rcu (& vmdirq -> node );
202
+ vmdirq -> enabled = false;
203
+ }
205
204
}
206
- raw_spin_unlock_irqrestore (& list_lock , flags );
207
205
}
208
206
209
207
static struct irq_chip vmd_msi_controller = {
@@ -225,7 +223,6 @@ static irq_hw_number_t vmd_get_hwirq(struct msi_domain_info *info,
225
223
*/
226
224
static struct vmd_irq_list * vmd_next_irq (struct vmd_dev * vmd , struct msi_desc * desc )
227
225
{
228
- unsigned long flags ;
229
226
int i , best ;
230
227
231
228
if (vmd -> msix_count == 1 + vmd -> first_vec )
@@ -242,13 +239,13 @@ static struct vmd_irq_list *vmd_next_irq(struct vmd_dev *vmd, struct msi_desc *d
242
239
return & vmd -> irqs [vmd -> first_vec ];
243
240
}
244
241
245
- raw_spin_lock_irqsave ( & list_lock , flags );
246
- best = vmd -> first_vec + 1 ;
247
- for (i = best ; i < vmd -> msix_count ; i ++ )
248
- if (vmd -> irqs [i ].count < vmd -> irqs [best ].count )
249
- best = i ;
250
- vmd -> irqs [best ].count ++ ;
251
- raw_spin_unlock_irqrestore ( & list_lock , flags );
242
+ scoped_guard ( raw_spinlock_irq , & list_lock ) {
243
+ best = vmd -> first_vec + 1 ;
244
+ for (i = best ; i < vmd -> msix_count ; i ++ )
245
+ if (vmd -> irqs [i ].count < vmd -> irqs [best ].count )
246
+ best = i ;
247
+ vmd -> irqs [best ].count ++ ;
248
+ }
252
249
253
250
return & vmd -> irqs [best ];
254
251
}
@@ -277,14 +274,12 @@ static void vmd_msi_free(struct irq_domain *domain,
277
274
struct msi_domain_info * info , unsigned int virq )
278
275
{
279
276
struct vmd_irq * vmdirq = irq_get_chip_data (virq );
280
- unsigned long flags ;
281
277
282
278
synchronize_srcu (& vmdirq -> irq -> srcu );
283
279
284
280
/* XXX: Potential optimization to rebalance */
285
- raw_spin_lock_irqsave (& list_lock , flags );
286
- vmdirq -> irq -> count -- ;
287
- raw_spin_unlock_irqrestore (& list_lock , flags );
281
+ scoped_guard (raw_spinlock_irq , & list_lock )
282
+ vmdirq -> irq -> count -- ;
288
283
289
284
kfree (vmdirq );
290
285
}
@@ -387,29 +382,24 @@ static int vmd_pci_read(struct pci_bus *bus, unsigned int devfn, int reg,
387
382
{
388
383
struct vmd_dev * vmd = vmd_from_bus (bus );
389
384
void __iomem * addr = vmd_cfg_addr (vmd , bus , devfn , reg , len );
390
- unsigned long flags ;
391
- int ret = 0 ;
392
385
393
386
if (!addr )
394
387
return - EFAULT ;
395
388
396
- raw_spin_lock_irqsave ( & vmd -> cfg_lock , flags );
389
+ guard ( raw_spinlock_irqsave )( & vmd -> cfg_lock );
397
390
switch (len ) {
398
391
case 1 :
399
392
* value = readb (addr );
400
- break ;
393
+ return 0 ;
401
394
case 2 :
402
395
* value = readw (addr );
403
- break ;
396
+ return 0 ;
404
397
case 4 :
405
398
* value = readl (addr );
406
- break ;
399
+ return 0 ;
407
400
default :
408
- ret = - EINVAL ;
409
- break ;
401
+ return - EINVAL ;
410
402
}
411
- raw_spin_unlock_irqrestore (& vmd -> cfg_lock , flags );
412
- return ret ;
413
403
}
414
404
415
405
/*
@@ -422,32 +412,27 @@ static int vmd_pci_write(struct pci_bus *bus, unsigned int devfn, int reg,
422
412
{
423
413
struct vmd_dev * vmd = vmd_from_bus (bus );
424
414
void __iomem * addr = vmd_cfg_addr (vmd , bus , devfn , reg , len );
425
- unsigned long flags ;
426
- int ret = 0 ;
427
415
428
416
if (!addr )
429
417
return - EFAULT ;
430
418
431
- raw_spin_lock_irqsave ( & vmd -> cfg_lock , flags );
419
+ guard ( raw_spinlock_irqsave )( & vmd -> cfg_lock );
432
420
switch (len ) {
433
421
case 1 :
434
422
writeb (value , addr );
435
423
readb (addr );
436
- break ;
424
+ return 0 ;
437
425
case 2 :
438
426
writew (value , addr );
439
427
readw (addr );
440
- break ;
428
+ return 0 ;
441
429
case 4 :
442
430
writel (value , addr );
443
431
readl (addr );
444
- break ;
432
+ return 0 ;
445
433
default :
446
- ret = - EINVAL ;
447
- break ;
434
+ return - EINVAL ;
448
435
}
449
- raw_spin_unlock_irqrestore (& vmd -> cfg_lock , flags );
450
- return ret ;
451
436
}
452
437
453
438
static struct pci_ops vmd_ops = {
0 commit comments