@@ -143,24 +143,25 @@ static inline __attribute_const__ u32 msi_mask(unsigned x)
143
143
* reliably as devices without an INTx disable bit will then generate a
144
144
* level IRQ which will never be cleared.
145
145
*/
146
- u32 __pci_msi_desc_mask_irq (struct msi_desc * desc , u32 mask , u32 flag )
146
+ void __pci_msi_desc_mask_irq (struct msi_desc * desc , u32 mask , u32 flag )
147
147
{
148
- u32 mask_bits = desc -> masked ;
148
+ raw_spinlock_t * lock = & desc -> dev -> msi_lock ;
149
+ unsigned long flags ;
149
150
150
151
if (pci_msi_ignore_mask || !desc -> msi_attrib .maskbit )
151
- return 0 ;
152
+ return ;
152
153
153
- mask_bits &= ~mask ;
154
- mask_bits |= flag ;
154
+ raw_spin_lock_irqsave (lock , flags );
155
+ desc -> masked &= ~mask ;
156
+ desc -> masked |= flag ;
155
157
pci_write_config_dword (msi_desc_to_pci_dev (desc ), desc -> mask_pos ,
156
- mask_bits );
157
-
158
- return mask_bits ;
158
+ desc -> masked );
159
+ raw_spin_unlock_irqrestore (lock , flags );
159
160
}
160
161
161
162
static void msi_mask_irq (struct msi_desc * desc , u32 mask , u32 flag )
162
163
{
163
- desc -> masked = __pci_msi_desc_mask_irq (desc , mask , flag );
164
+ __pci_msi_desc_mask_irq (desc , mask , flag );
164
165
}
165
166
166
167
static void __iomem * pci_msix_desc_addr (struct msi_desc * desc )
@@ -289,13 +290,31 @@ void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
289
290
/* Don't touch the hardware now */
290
291
} else if (entry -> msi_attrib .is_msix ) {
291
292
void __iomem * base = pci_msix_desc_addr (entry );
293
+ bool unmasked = !(entry -> masked & PCI_MSIX_ENTRY_CTRL_MASKBIT );
292
294
293
295
if (!base )
294
296
goto skip ;
295
297
298
+ /*
299
+ * The specification mandates that the entry is masked
300
+ * when the message is modified:
301
+ *
302
+ * "If software changes the Address or Data value of an
303
+ * entry while the entry is unmasked, the result is
304
+ * undefined."
305
+ */
306
+ if (unmasked )
307
+ __pci_msix_desc_mask_irq (entry , PCI_MSIX_ENTRY_CTRL_MASKBIT );
308
+
296
309
writel (msg -> address_lo , base + PCI_MSIX_ENTRY_LOWER_ADDR );
297
310
writel (msg -> address_hi , base + PCI_MSIX_ENTRY_UPPER_ADDR );
298
311
writel (msg -> data , base + PCI_MSIX_ENTRY_DATA );
312
+
313
+ if (unmasked )
314
+ __pci_msix_desc_mask_irq (entry , 0 );
315
+
316
+ /* Ensure that the writes are visible in the device */
317
+ readl (base + PCI_MSIX_ENTRY_DATA );
299
318
} else {
300
319
int pos = dev -> msi_cap ;
301
320
u16 msgctl ;
@@ -316,6 +335,8 @@ void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
316
335
pci_write_config_word (dev , pos + PCI_MSI_DATA_32 ,
317
336
msg -> data );
318
337
}
338
+ /* Ensure that the writes are visible in the device */
339
+ pci_read_config_word (dev , pos + PCI_MSI_FLAGS , & msgctl );
319
340
}
320
341
321
342
skip :
@@ -636,21 +657,21 @@ static int msi_capability_init(struct pci_dev *dev, int nvec,
636
657
/* Configure MSI capability structure */
637
658
ret = pci_msi_setup_msi_irqs (dev , nvec , PCI_CAP_ID_MSI );
638
659
if (ret ) {
639
- msi_mask_irq (entry , mask , ~ mask );
660
+ msi_mask_irq (entry , mask , 0 );
640
661
free_msi_irqs (dev );
641
662
return ret ;
642
663
}
643
664
644
665
ret = msi_verify_entries (dev );
645
666
if (ret ) {
646
- msi_mask_irq (entry , mask , ~ mask );
667
+ msi_mask_irq (entry , mask , 0 );
647
668
free_msi_irqs (dev );
648
669
return ret ;
649
670
}
650
671
651
672
ret = populate_msi_sysfs (dev );
652
673
if (ret ) {
653
- msi_mask_irq (entry , mask , ~ mask );
674
+ msi_mask_irq (entry , mask , 0 );
654
675
free_msi_irqs (dev );
655
676
return ret ;
656
677
}
@@ -691,6 +712,7 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
691
712
{
692
713
struct irq_affinity_desc * curmsk , * masks = NULL ;
693
714
struct msi_desc * entry ;
715
+ void __iomem * addr ;
694
716
int ret , i ;
695
717
int vec_count = pci_msix_vec_count (dev );
696
718
@@ -711,6 +733,7 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
711
733
712
734
entry -> msi_attrib .is_msix = 1 ;
713
735
entry -> msi_attrib .is_64 = 1 ;
736
+
714
737
if (entries )
715
738
entry -> msi_attrib .entry_nr = entries [i ].entry ;
716
739
else
@@ -722,6 +745,10 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
722
745
entry -> msi_attrib .default_irq = dev -> irq ;
723
746
entry -> mask_base = base ;
724
747
748
+ addr = pci_msix_desc_addr (entry );
749
+ if (addr )
750
+ entry -> masked = readl (addr + PCI_MSIX_ENTRY_VECTOR_CTRL );
751
+
725
752
list_add_tail (& entry -> list , dev_to_msi_list (& dev -> dev ));
726
753
if (masks )
727
754
curmsk ++ ;
@@ -732,26 +759,25 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
732
759
return ret ;
733
760
}
734
761
735
- static void msix_program_entries (struct pci_dev * dev ,
736
- struct msix_entry * entries )
762
+ static void msix_update_entries (struct pci_dev * dev , struct msix_entry * entries )
737
763
{
738
764
struct msi_desc * entry ;
739
- int i = 0 ;
740
- void __iomem * desc_addr ;
741
765
742
766
for_each_pci_msi_entry (entry , dev ) {
743
- if (entries )
744
- entries [i ++ ].vector = entry -> irq ;
767
+ if (entries ) {
768
+ entries -> vector = entry -> irq ;
769
+ entries ++ ;
770
+ }
771
+ }
772
+ }
745
773
746
- desc_addr = pci_msix_desc_addr (entry );
747
- if (desc_addr )
748
- entry -> masked = readl (desc_addr +
749
- PCI_MSIX_ENTRY_VECTOR_CTRL );
750
- else
751
- entry -> masked = 0 ;
774
+ static void msix_mask_all (void __iomem * base , int tsize )
775
+ {
776
+ u32 ctrl = PCI_MSIX_ENTRY_CTRL_MASKBIT ;
777
+ int i ;
752
778
753
- msix_mask_irq ( entry , 1 );
754
- }
779
+ for ( i = 0 ; i < tsize ; i ++ , base += PCI_MSIX_ENTRY_SIZE )
780
+ writel ( ctrl , base + PCI_MSIX_ENTRY_VECTOR_CTRL );
755
781
}
756
782
757
783
/**
@@ -768,22 +794,33 @@ static void msix_program_entries(struct pci_dev *dev,
768
794
static int msix_capability_init (struct pci_dev * dev , struct msix_entry * entries ,
769
795
int nvec , struct irq_affinity * affd )
770
796
{
771
- int ret ;
772
- u16 control ;
773
797
void __iomem * base ;
798
+ int ret , tsize ;
799
+ u16 control ;
774
800
775
- /* Ensure MSI-X is disabled while it is set up */
776
- pci_msix_clear_and_set_ctrl (dev , PCI_MSIX_FLAGS_ENABLE , 0 );
801
+ /*
802
+ * Some devices require MSI-X to be enabled before the MSI-X
803
+ * registers can be accessed. Mask all the vectors to prevent
804
+ * interrupts coming in before they're fully set up.
805
+ */
806
+ pci_msix_clear_and_set_ctrl (dev , 0 , PCI_MSIX_FLAGS_MASKALL |
807
+ PCI_MSIX_FLAGS_ENABLE );
777
808
778
809
pci_read_config_word (dev , dev -> msix_cap + PCI_MSIX_FLAGS , & control );
779
810
/* Request & Map MSI-X table region */
780
- base = msix_map_region (dev , msix_table_size (control ));
781
- if (!base )
782
- return - ENOMEM ;
811
+ tsize = msix_table_size (control );
812
+ base = msix_map_region (dev , tsize );
813
+ if (!base ) {
814
+ ret = - ENOMEM ;
815
+ goto out_disable ;
816
+ }
817
+
818
+ /* Ensure that all table entries are masked. */
819
+ msix_mask_all (base , tsize );
783
820
784
821
ret = msix_setup_entries (dev , base , entries , nvec , affd );
785
822
if (ret )
786
- return ret ;
823
+ goto out_disable ;
787
824
788
825
ret = pci_msi_setup_msi_irqs (dev , nvec , PCI_CAP_ID_MSIX );
789
826
if (ret )
@@ -794,15 +831,7 @@ static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries,
794
831
if (ret )
795
832
goto out_free ;
796
833
797
- /*
798
- * Some devices require MSI-X to be enabled before we can touch the
799
- * MSI-X registers. We need to mask all the vectors to prevent
800
- * interrupts coming in before they're fully set up.
801
- */
802
- pci_msix_clear_and_set_ctrl (dev , 0 ,
803
- PCI_MSIX_FLAGS_MASKALL | PCI_MSIX_FLAGS_ENABLE );
804
-
805
- msix_program_entries (dev , entries );
834
+ msix_update_entries (dev , entries );
806
835
807
836
ret = populate_msi_sysfs (dev );
808
837
if (ret )
@@ -836,6 +865,9 @@ static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries,
836
865
out_free :
837
866
free_msi_irqs (dev );
838
867
868
+ out_disable :
869
+ pci_msix_clear_and_set_ctrl (dev , PCI_MSIX_FLAGS_ENABLE , 0 );
870
+
839
871
return ret ;
840
872
}
841
873
@@ -930,8 +962,7 @@ static void pci_msi_shutdown(struct pci_dev *dev)
930
962
931
963
/* Return the device with MSI unmasked as initial states */
932
964
mask = msi_mask (desc -> msi_attrib .multi_cap );
933
- /* Keep cached state to be restored */
934
- __pci_msi_desc_mask_irq (desc , mask , ~mask );
965
+ msi_mask_irq (desc , mask , 0 );
935
966
936
967
/* Restore dev->irq to its default pin-assertion IRQ */
937
968
dev -> irq = desc -> msi_attrib .default_irq ;
@@ -1016,10 +1047,8 @@ static void pci_msix_shutdown(struct pci_dev *dev)
1016
1047
}
1017
1048
1018
1049
/* Return the device with MSI-X masked as initial states */
1019
- for_each_pci_msi_entry (entry , dev ) {
1020
- /* Keep cached states to be restored */
1050
+ for_each_pci_msi_entry (entry , dev )
1021
1051
__pci_msix_desc_mask_irq (entry , 1 );
1022
- }
1023
1052
1024
1053
pci_msix_clear_and_set_ctrl (dev , PCI_MSIX_FLAGS_ENABLE , 0 );
1025
1054
pci_intx_for_msi (dev , 1 );
0 commit comments