@@ -49,11 +49,25 @@ static irqreturn_t octep_vdpa_intr_handler(int irq, void *data)
49
49
struct octep_hw * oct_hw = data ;
50
50
int i ;
51
51
52
- for (i = 0 ; i < oct_hw -> nr_vring ; i ++ ) {
53
- if (oct_hw -> vqs [i ].cb .callback && ioread32 (oct_hw -> vqs [i ].cb_notify_addr )) {
54
- /* Acknowledge the per queue notification to the device */
55
- iowrite32 (0 , oct_hw -> vqs [i ].cb_notify_addr );
56
- oct_hw -> vqs [i ].cb .callback (oct_hw -> vqs [i ].cb .private );
52
+ /* Each device has multiple interrupts (nb_irqs) shared among rings
53
+ * (nr_vring). Device interrupts are mapped to the rings in a
54
+ * round-robin fashion.
55
+ *
56
+ * For example, if nb_irqs = 8 and nr_vring = 64:
57
+ * 0 -> 0, 8, 16, 24, 32, 40, 48, 56;
58
+ * 1 -> 1, 9, 17, 25, 33, 41, 49, 57;
59
+ * ...
60
+ * 7 -> 7, 15, 23, 31, 39, 47, 55, 63;
61
+ */
62
+
63
+ for (i = irq - oct_hw -> irqs [0 ]; i < oct_hw -> nr_vring ; i += oct_hw -> nb_irqs ) {
64
+ if (ioread8 (oct_hw -> vqs [i ].cb_notify_addr )) {
65
+ /* Acknowledge the per ring notification to the device */
66
+ iowrite8 (0 , oct_hw -> vqs [i ].cb_notify_addr );
67
+
68
+ if (likely (oct_hw -> vqs [i ].cb .callback ))
69
+ oct_hw -> vqs [i ].cb .callback (oct_hw -> vqs [i ].cb .private );
70
+ break ;
57
71
}
58
72
}
59
73
@@ -63,44 +77,53 @@ static irqreturn_t octep_vdpa_intr_handler(int irq, void *data)
63
77
static void octep_free_irqs (struct octep_hw * oct_hw )
64
78
{
65
79
struct pci_dev * pdev = oct_hw -> pdev ;
80
+ int irq ;
81
+
82
+ if (!oct_hw -> irqs )
83
+ return ;
66
84
67
- if (oct_hw -> irq != -1 ) {
68
- devm_free_irq (& pdev -> dev , oct_hw -> irq , oct_hw );
69
- oct_hw -> irq = -1 ;
85
+ for (irq = 0 ; irq < oct_hw -> nb_irqs ; irq ++ ) {
86
+ if (!oct_hw -> irqs [irq ])
87
+ break ;
88
+
89
+ devm_free_irq (& pdev -> dev , oct_hw -> irqs [irq ], oct_hw );
70
90
}
91
+
71
92
pci_free_irq_vectors (pdev );
93
+ devm_kfree (& pdev -> dev , oct_hw -> irqs );
94
+ oct_hw -> irqs = NULL ;
72
95
}
73
96
74
97
static int octep_request_irqs (struct octep_hw * oct_hw )
75
98
{
76
99
struct pci_dev * pdev = oct_hw -> pdev ;
77
- int ret , irq ;
100
+ int ret , irq , idx ;
78
101
79
- /* Currently HW device provisions one IRQ per VF, hence
80
- * allocate one IRQ for all virtqueues call interface.
81
- */
82
- ret = pci_alloc_irq_vectors (pdev , 1 , 1 , PCI_IRQ_MSIX );
102
+ oct_hw -> irqs = devm_kcalloc (& pdev -> dev , oct_hw -> nb_irqs , sizeof (int ), GFP_KERNEL );
103
+ if (!oct_hw -> irqs )
104
+ return - ENOMEM ;
105
+
106
+ ret = pci_alloc_irq_vectors (pdev , 1 , oct_hw -> nb_irqs , PCI_IRQ_MSIX );
83
107
if (ret < 0 ) {
84
108
dev_err (& pdev -> dev , "Failed to alloc msix vector" );
85
109
return ret ;
86
110
}
87
111
88
- snprintf ( oct_hw -> vqs -> msix_name , sizeof ( oct_hw -> vqs -> msix_name ),
89
- OCTEP_VDPA_DRIVER_NAME "-vf-%d" , pci_iov_vf_id (pdev ) );
90
-
91
- irq = pci_irq_vector ( pdev , 0 );
92
- ret = devm_request_irq ( & pdev -> dev , irq , octep_vdpa_intr_handler , 0 ,
93
- oct_hw -> vqs -> msix_name , oct_hw );
94
- if ( ret ) {
95
- dev_err ( & pdev -> dev , "Failed to register interrupt handler\n" );
96
- goto free_irq_vec ;
112
+ for ( idx = 0 ; idx < oct_hw -> nb_irqs ; idx ++ ) {
113
+ irq = pci_irq_vector (pdev , idx );
114
+ ret = devm_request_irq ( & pdev -> dev , irq , octep_vdpa_intr_handler , 0 ,
115
+ dev_name ( & pdev -> dev ), oct_hw );
116
+ if ( ret ) {
117
+ dev_err ( & pdev -> dev , "Failed to register interrupt handler\n" );
118
+ goto free_irqs ;
119
+ }
120
+ oct_hw -> irqs [ idx ] = irq ;
97
121
}
98
- oct_hw -> irq = irq ;
99
122
100
123
return 0 ;
101
124
102
- free_irq_vec :
103
- pci_free_irq_vectors ( pdev );
125
+ free_irqs :
126
+ octep_free_irqs ( oct_hw );
104
127
return ret ;
105
128
}
106
129
@@ -559,6 +582,7 @@ static void octep_vdpa_setup_task(struct work_struct *work)
559
582
struct device * dev = & pdev -> dev ;
560
583
struct octep_hw * oct_hw ;
561
584
unsigned long timeout ;
585
+ u64 val ;
562
586
int ret ;
563
587
564
588
oct_hw = & mgmt_dev -> oct_hw ;
@@ -590,6 +614,13 @@ static void octep_vdpa_setup_task(struct work_struct *work)
590
614
if (ret )
591
615
return ;
592
616
617
+ val = readq (oct_hw -> base [OCTEP_HW_MBOX_BAR ] + OCTEP_VF_IN_CTRL (0 ));
618
+ oct_hw -> nb_irqs = OCTEP_VF_IN_CTRL_RPVF (val );
619
+ if (!oct_hw -> nb_irqs || oct_hw -> nb_irqs > OCTEP_MAX_CB_INTR ) {
620
+ dev_err (dev , "Invalid number of interrupts %d\n" , oct_hw -> nb_irqs );
621
+ goto unmap_region ;
622
+ }
623
+
593
624
ret = octep_hw_caps_read (oct_hw , pdev );
594
625
if (ret < 0 )
595
626
goto unmap_region ;
@@ -768,12 +799,6 @@ static int octep_vdpa_pf_setup(struct octep_pf *octpf)
768
799
return - EINVAL ;
769
800
}
770
801
771
- if (OCTEP_EPF_RINFO_RPVF (val ) != BIT_ULL (0 )) {
772
- val &= ~GENMASK_ULL (35 , 32 );
773
- val |= BIT_ULL (32 );
774
- writeq (val , addr + OCTEP_EPF_RINFO (0 ));
775
- }
776
-
777
802
len = pci_resource_len (pdev , OCTEP_HW_CAPS_BAR );
778
803
779
804
octpf -> vf_stride = len / totalvfs ;
0 commit comments