@@ -297,7 +297,6 @@ static void __pci_restore_msix_state(struct pci_dev *dev)
297
297
298
298
if (!dev -> msix_enabled )
299
299
return ;
300
- BUG_ON (list_empty (dev_to_msi_list (& dev -> dev )));
301
300
302
301
/* route the table */
303
302
pci_intx_for_msi (dev , 0 );
@@ -307,7 +306,7 @@ static void __pci_restore_msix_state(struct pci_dev *dev)
307
306
write_msg = arch_restore_msi_irqs (dev );
308
307
309
308
msi_lock_descs (& dev -> dev );
310
- for_each_pci_msi_entry (entry , dev ) {
309
+ msi_for_each_desc (entry , & dev -> dev , MSI_DESC_ALL ) {
311
310
if (write_msg )
312
311
__pci_write_msi_msg (entry , & entry -> msg );
313
312
pci_msix_write_vector_ctrl (entry , entry -> pci .msix_ctrl );
@@ -406,14 +405,14 @@ static int msi_verify_entries(struct pci_dev *dev)
406
405
if (!dev -> no_64bit_msi )
407
406
return 0 ;
408
407
409
- for_each_pci_msi_entry (entry , dev ) {
408
+ msi_for_each_desc (entry , & dev -> dev , MSI_DESC_ALL ) {
410
409
if (entry -> msg .address_hi ) {
411
410
pci_err (dev , "arch assigned 64-bit MSI address %#x%08x but device only supports 32 bits\n" ,
412
411
entry -> msg .address_hi , entry -> msg .address_lo );
413
- return - EIO ;
412
+ break ;
414
413
}
415
414
}
416
- return 0 ;
415
+ return ! entry ? 0 : - EIO ;
417
416
}
418
417
419
418
/**
@@ -451,7 +450,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec,
451
450
goto fail ;
452
451
453
452
/* All MSIs are unmasked by default; mask them all */
454
- entry = first_pci_msi_entry ( dev );
453
+ entry = msi_first_desc ( & dev -> dev , MSI_DESC_ALL );
455
454
pci_msi_mask (entry , msi_multi_mask (entry ));
456
455
457
456
/* Configure MSI capability structure */
@@ -541,11 +540,11 @@ static int msix_setup_msi_descs(struct pci_dev *dev, void __iomem *base,
541
540
542
541
static void msix_update_entries (struct pci_dev * dev , struct msix_entry * entries )
543
542
{
544
- struct msi_desc * entry ;
543
+ struct msi_desc * desc ;
545
544
546
545
if (entries ) {
547
- for_each_pci_msi_entry ( entry , dev ) {
548
- entries -> vector = entry -> irq ;
546
+ msi_for_each_desc ( desc , & dev -> dev , MSI_DESC_ALL ) {
547
+ entries -> vector = desc -> irq ;
549
548
entries ++ ;
550
549
}
551
550
}
@@ -747,15 +746,14 @@ static void pci_msi_shutdown(struct pci_dev *dev)
747
746
if (!pci_msi_enable || !dev || !dev -> msi_enabled )
748
747
return ;
749
748
750
- BUG_ON (list_empty (dev_to_msi_list (& dev -> dev )));
751
- desc = first_pci_msi_entry (dev );
752
-
753
749
pci_msi_set_enable (dev , 0 );
754
750
pci_intx_for_msi (dev , 1 );
755
751
dev -> msi_enabled = 0 ;
756
752
757
753
/* Return the device with MSI unmasked as initial states */
758
- pci_msi_unmask (desc , msi_multi_mask (desc ));
754
+ desc = msi_first_desc (& dev -> dev , MSI_DESC_ALL );
755
+ if (!WARN_ON_ONCE (!desc ))
756
+ pci_msi_unmask (desc , msi_multi_mask (desc ));
759
757
760
758
/* Restore dev->irq to its default pin-assertion IRQ */
761
759
dev -> irq = desc -> pci .msi_attrib .default_irq ;
@@ -831,7 +829,7 @@ static int __pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries,
831
829
832
830
static void pci_msix_shutdown (struct pci_dev * dev )
833
831
{
834
- struct msi_desc * entry ;
832
+ struct msi_desc * desc ;
835
833
836
834
if (!pci_msi_enable || !dev || !dev -> msix_enabled )
837
835
return ;
@@ -842,8 +840,8 @@ static void pci_msix_shutdown(struct pci_dev *dev)
842
840
}
843
841
844
842
/* Return the device with MSI-X masked as initial states */
845
- for_each_pci_msi_entry ( entry , dev )
846
- pci_msix_mask (entry );
843
+ msi_for_each_desc ( desc , & dev -> dev , MSI_DESC_ALL )
844
+ pci_msix_mask (desc );
847
845
848
846
pci_msix_clear_and_set_ctrl (dev , PCI_MSIX_FLAGS_ENABLE , 0 );
849
847
pci_intx_for_msi (dev , 1 );
0 commit comments