@@ -336,41 +336,11 @@ static int msi_verify_entries(struct pci_dev *dev)
336
336
return !entry ? 0 : - EIO ;
337
337
}
338
338
339
- /**
340
- * msi_capability_init - configure device's MSI capability structure
341
- * @dev: pointer to the pci_dev data structure of MSI device function
342
- * @nvec: number of interrupts to allocate
343
- * @affd: description of automatic IRQ affinity assignments (may be %NULL)
344
- *
345
- * Setup the MSI capability structure of the device with the requested
346
- * number of interrupts. A return value of zero indicates the successful
347
- * setup of an entry with the new MSI IRQ. A negative return value indicates
348
- * an error, and a positive return value indicates the number of interrupts
349
- * which could have been allocated.
350
- */
351
- static int msi_capability_init (struct pci_dev * dev , int nvec ,
352
- struct irq_affinity * affd )
339
+ static int __msi_capability_init (struct pci_dev * dev , int nvec , struct irq_affinity_desc * masks )
353
340
{
354
- struct irq_affinity_desc * masks = NULL ;
341
+ int ret = msi_setup_msi_desc ( dev , nvec , masks ) ;
355
342
struct msi_desc * entry , desc ;
356
- int ret ;
357
343
358
- /* Reject multi-MSI early on irq domain enabled architectures */
359
- if (nvec > 1 && !pci_msi_domain_supports (dev , MSI_FLAG_MULTI_PCI_MSI , ALLOW_LEGACY ))
360
- return 1 ;
361
-
362
- /*
363
- * Disable MSI during setup in the hardware, but mark it enabled
364
- * so that setup code can evaluate it.
365
- */
366
- pci_msi_set_enable (dev , 0 );
367
- dev -> msi_enabled = 1 ;
368
-
369
- if (affd )
370
- masks = irq_create_affinity_masks (nvec , affd );
371
-
372
- msi_lock_descs (& dev -> dev );
373
- ret = msi_setup_msi_desc (dev , nvec , masks );
374
344
if (ret )
375
345
goto fail ;
376
346
@@ -399,19 +369,48 @@ static int msi_capability_init(struct pci_dev *dev, int nvec,
399
369
400
370
pcibios_free_irq (dev );
401
371
dev -> irq = entry -> irq ;
402
- goto unlock ;
403
-
372
+ return 0 ;
404
373
err :
405
374
pci_msi_unmask (& desc , msi_multi_mask (& desc ));
406
375
pci_free_msi_irqs (dev );
407
376
fail :
408
377
dev -> msi_enabled = 0 ;
409
- unlock :
410
- msi_unlock_descs (& dev -> dev );
411
- kfree (masks );
412
378
return ret ;
413
379
}
414
380
381
+ /**
382
+ * msi_capability_init - configure device's MSI capability structure
383
+ * @dev: pointer to the pci_dev data structure of MSI device function
384
+ * @nvec: number of interrupts to allocate
385
+ * @affd: description of automatic IRQ affinity assignments (may be %NULL)
386
+ *
387
+ * Setup the MSI capability structure of the device with the requested
388
+ * number of interrupts. A return value of zero indicates the successful
389
+ * setup of an entry with the new MSI IRQ. A negative return value indicates
390
+ * an error, and a positive return value indicates the number of interrupts
391
+ * which could have been allocated.
392
+ */
393
+ static int msi_capability_init (struct pci_dev * dev , int nvec ,
394
+ struct irq_affinity * affd )
395
+ {
396
+ /* Reject multi-MSI early on irq domain enabled architectures */
397
+ if (nvec > 1 && !pci_msi_domain_supports (dev , MSI_FLAG_MULTI_PCI_MSI , ALLOW_LEGACY ))
398
+ return 1 ;
399
+
400
+ /*
401
+ * Disable MSI during setup in the hardware, but mark it enabled
402
+ * so that setup code can evaluate it.
403
+ */
404
+ pci_msi_set_enable (dev , 0 );
405
+ dev -> msi_enabled = 1 ;
406
+
407
+ struct irq_affinity_desc * masks __free (kfree ) =
408
+ affd ? irq_create_affinity_masks (nvec , affd ) : NULL ;
409
+
410
+ guard (msi_descs_lock )(& dev -> dev );
411
+ return __msi_capability_init (dev , nvec , masks );
412
+ }
413
+
415
414
int __pci_enable_msi_range (struct pci_dev * dev , int minvec , int maxvec ,
416
415
struct irq_affinity * affd )
417
416
{
@@ -666,40 +665,41 @@ static void msix_mask_all(void __iomem *base, int tsize)
666
665
writel (ctrl , base + PCI_MSIX_ENTRY_VECTOR_CTRL );
667
666
}
668
667
669
- static int msix_setup_interrupts (struct pci_dev * dev , struct msix_entry * entries ,
670
- int nvec , struct irq_affinity * affd )
668
+ static int __msix_setup_interrupts (struct pci_dev * dev , struct msix_entry * entries ,
669
+ int nvec , struct irq_affinity_desc * masks )
671
670
{
672
- struct irq_affinity_desc * masks = NULL ;
673
- int ret ;
674
-
675
- if (affd )
676
- masks = irq_create_affinity_masks (nvec , affd );
671
+ int ret = msix_setup_msi_descs (dev , entries , nvec , masks );
677
672
678
- msi_lock_descs (& dev -> dev );
679
- ret = msix_setup_msi_descs (dev , entries , nvec , masks );
680
673
if (ret )
681
- goto out_free ;
674
+ goto fail ;
682
675
683
676
ret = pci_msi_setup_msi_irqs (dev , nvec , PCI_CAP_ID_MSIX );
684
677
if (ret )
685
- goto out_free ;
678
+ goto fail ;
686
679
687
680
/* Check if all MSI entries honor device restrictions */
688
681
ret = msi_verify_entries (dev );
689
682
if (ret )
690
- goto out_free ;
683
+ goto fail ;
691
684
692
685
msix_update_entries (dev , entries );
693
- goto out_unlock ;
686
+ return 0 ;
694
687
695
- out_free :
688
+ fail :
696
689
pci_free_msi_irqs (dev );
697
- out_unlock :
698
- msi_unlock_descs (& dev -> dev );
699
- kfree (masks );
700
690
return ret ;
701
691
}
702
692
693
+ static int msix_setup_interrupts (struct pci_dev * dev , struct msix_entry * entries ,
694
+ int nvec , struct irq_affinity * affd )
695
+ {
696
+ struct irq_affinity_desc * masks __free (kfree ) =
697
+ affd ? irq_create_affinity_masks (nvec , affd ) : NULL ;
698
+
699
+ guard (msi_descs_lock )(& dev -> dev );
700
+ return __msix_setup_interrupts (dev , entries , nvec , masks );
701
+ }
702
+
703
703
/**
704
704
* msix_capability_init - configure device's MSI-X capability
705
705
* @dev: pointer to the pci_dev data structure of MSI-X device function
@@ -871,13 +871,13 @@ void __pci_restore_msix_state(struct pci_dev *dev)
871
871
872
872
write_msg = arch_restore_msi_irqs (dev );
873
873
874
- msi_lock_descs (& dev -> dev );
875
- msi_for_each_desc (entry , & dev -> dev , MSI_DESC_ALL ) {
876
- if (write_msg )
877
- __pci_write_msi_msg (entry , & entry -> msg );
878
- pci_msix_write_vector_ctrl (entry , entry -> pci .msix_ctrl );
874
+ scoped_guard (msi_descs_lock , & dev -> dev ) {
875
+ msi_for_each_desc (entry , & dev -> dev , MSI_DESC_ALL ) {
876
+ if (write_msg )
877
+ __pci_write_msi_msg (entry , & entry -> msg );
878
+ pci_msix_write_vector_ctrl (entry , entry -> pci .msix_ctrl );
879
+ }
879
880
}
880
- msi_unlock_descs (& dev -> dev );
881
881
882
882
pci_msix_clear_and_set_ctrl (dev , PCI_MSIX_FLAGS_MASKALL , 0 );
883
883
}
@@ -916,6 +916,53 @@ void pci_free_msi_irqs(struct pci_dev *dev)
916
916
}
917
917
}
918
918
919
+ #ifdef CONFIG_PCIE_TPH
920
+ /**
921
+ * pci_msix_write_tph_tag - Update the TPH tag for a given MSI-X vector
922
+ * @pdev: The PCIe device to update
923
+ * @index: The MSI-X index to update
924
+ * @tag: The tag to write
925
+ *
926
+ * Returns: 0 on success, error code on failure
927
+ */
928
+ int pci_msix_write_tph_tag (struct pci_dev * pdev , unsigned int index , u16 tag )
929
+ {
930
+ struct msi_desc * msi_desc ;
931
+ struct irq_desc * irq_desc ;
932
+ unsigned int virq ;
933
+
934
+ if (!pdev -> msix_enabled )
935
+ return - ENXIO ;
936
+
937
+ guard (msi_descs_lock )(& pdev -> dev );
938
+ virq = msi_get_virq (& pdev -> dev , index );
939
+ if (!virq )
940
+ return - ENXIO ;
941
+ /*
942
+ * This is a horrible hack, but short of implementing a PCI
943
+ * specific interrupt chip callback and a huge pile of
944
+ * infrastructure, this is the minor nuissance. It provides the
945
+ * protection against concurrent operations on this entry and keeps
946
+ * the control word cache in sync.
947
+ */
948
+ irq_desc = irq_to_desc (virq );
949
+ if (!irq_desc )
950
+ return - ENXIO ;
951
+
952
+ guard (raw_spinlock_irq )(& irq_desc -> lock );
953
+ msi_desc = irq_data_get_msi_desc (& irq_desc -> irq_data );
954
+ if (!msi_desc || msi_desc -> pci .msi_attrib .is_virtual )
955
+ return - ENXIO ;
956
+
957
+ msi_desc -> pci .msix_ctrl &= ~PCI_MSIX_ENTRY_CTRL_ST ;
958
+ msi_desc -> pci .msix_ctrl |= FIELD_PREP (PCI_MSIX_ENTRY_CTRL_ST , tag );
959
+ pci_msix_write_vector_ctrl (msi_desc , msi_desc -> pci .msix_ctrl );
960
+ /* Flush the write */
961
+ readl (pci_msix_desc_addr (msi_desc ));
962
+ return 0 ;
963
+ }
964
+ #endif
965
+
919
966
/* Misc. infrastructure */
920
967
921
968
struct pci_dev * msi_desc_to_pci_dev (struct msi_desc * desc )
0 commit comments