@@ -46,12 +46,26 @@ bool vp_notify(struct virtqueue *vq)
4646 return true;
4747}
4848
49+ /* Notify all slow path virtqueues on an interrupt. */
50+ static void vp_vring_slow_path_interrupt (int irq ,
51+ struct virtio_pci_device * vp_dev )
52+ {
53+ struct virtio_pci_vq_info * info ;
54+ unsigned long flags ;
55+
56+ spin_lock_irqsave (& vp_dev -> lock , flags );
57+ list_for_each_entry (info , & vp_dev -> slow_virtqueues , node )
58+ vring_interrupt (irq , info -> vq );
59+ spin_unlock_irqrestore (& vp_dev -> lock , flags );
60+ }
61+
4962/* Handle a configuration change: Tell driver if it wants to know. */
5063static irqreturn_t vp_config_changed (int irq , void * opaque )
5164{
5265 struct virtio_pci_device * vp_dev = opaque ;
5366
5467 virtio_config_changed (& vp_dev -> vdev );
68+ vp_vring_slow_path_interrupt (irq , vp_dev );
5569 return IRQ_HANDLED ;
5670}
5771
@@ -174,6 +188,11 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
174188 return err ;
175189}
176190
191+ static bool vp_is_slow_path_vector (u16 msix_vec )
192+ {
193+ return msix_vec == VP_MSIX_CONFIG_VECTOR ;
194+ }
195+
177196static struct virtqueue * vp_setup_vq (struct virtio_device * vdev , unsigned int index ,
178197 void (* callback )(struct virtqueue * vq ),
179198 const char * name ,
@@ -197,7 +216,10 @@ static struct virtqueue *vp_setup_vq(struct virtio_device *vdev, unsigned int in
197216 info -> vq = vq ;
198217 if (callback ) {
199218 spin_lock_irqsave (& vp_dev -> lock , flags );
200- list_add (& info -> node , & vp_dev -> virtqueues );
219+ if (!vp_is_slow_path_vector (msix_vec ))
220+ list_add (& info -> node , & vp_dev -> virtqueues );
221+ else
222+ list_add (& info -> node , & vp_dev -> slow_virtqueues );
201223 spin_unlock_irqrestore (& vp_dev -> lock , flags );
202224 } else {
203225 INIT_LIST_HEAD (& info -> node );
@@ -245,7 +267,8 @@ void vp_del_vqs(struct virtio_device *vdev)
245267 if (vp_dev -> per_vq_vectors ) {
246268 int v = vp_dev -> vqs [vq -> index ]-> msix_vector ;
247269
248- if (v != VIRTIO_MSI_NO_VECTOR ) {
270+ if (v != VIRTIO_MSI_NO_VECTOR &&
271+ !vp_is_slow_path_vector (v )) {
249272 int irq = pci_irq_vector (vp_dev -> pci_dev , v );
250273
251274 irq_update_affinity_hint (irq , NULL );
@@ -289,13 +312,14 @@ void vp_del_vqs(struct virtio_device *vdev)
289312
290313enum vp_vq_vector_policy {
291314 VP_VQ_VECTOR_POLICY_EACH ,
315+ VP_VQ_VECTOR_POLICY_SHARED_SLOW ,
292316 VP_VQ_VECTOR_POLICY_SHARED ,
293317};
294318
295319static struct virtqueue *
296320vp_find_one_vq_msix (struct virtio_device * vdev , int queue_idx ,
297321 vq_callback_t * callback , const char * name , bool ctx ,
298- int * allocated_vectors ,
322+ bool slow_path , int * allocated_vectors ,
299323 enum vp_vq_vector_policy vector_policy )
300324{
301325 struct virtio_pci_device * vp_dev = to_vp_device (vdev );
@@ -305,16 +329,22 @@ vp_find_one_vq_msix(struct virtio_device *vdev, int queue_idx,
305329
306330 if (!callback )
307331 msix_vec = VIRTIO_MSI_NO_VECTOR ;
308- else if (vector_policy == VP_VQ_VECTOR_POLICY_EACH )
332+ else if (vector_policy == VP_VQ_VECTOR_POLICY_EACH ||
333+ (vector_policy == VP_VQ_VECTOR_POLICY_SHARED_SLOW &&
334+ !slow_path ))
309335 msix_vec = (* allocated_vectors )++ ;
336+ else if (vector_policy != VP_VQ_VECTOR_POLICY_EACH &&
337+ slow_path )
338+ msix_vec = VP_MSIX_CONFIG_VECTOR ;
310339 else
311340 msix_vec = VP_MSIX_VQ_VECTOR ;
312341 vq = vp_setup_vq (vdev , queue_idx , callback , name , ctx , msix_vec );
313342 if (IS_ERR (vq ))
314343 return vq ;
315344
316345 if (vector_policy == VP_VQ_VECTOR_POLICY_SHARED ||
317- msix_vec == VIRTIO_MSI_NO_VECTOR )
346+ msix_vec == VIRTIO_MSI_NO_VECTOR ||
347+ vp_is_slow_path_vector (msix_vec ))
318348 return vq ;
319349
320350 /* allocate per-vq irq if available and necessary */
@@ -374,7 +404,7 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned int nvqs,
374404 continue ;
375405 }
376406 vqs [i ] = vp_find_one_vq_msix (vdev , queue_idx ++ , vqi -> callback ,
377- vqi -> name , vqi -> ctx ,
407+ vqi -> name , vqi -> ctx , false,
378408 & allocated_vectors , vector_policy );
379409 if (IS_ERR (vqs [i ])) {
380410 err = PTR_ERR (vqs [i ]);
@@ -440,6 +470,13 @@ int vp_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
440470 VP_VQ_VECTOR_POLICY_EACH , desc );
441471 if (!err )
442472 return 0 ;
473+ /* Fallback: MSI-X with one shared vector for config and
474+ * slow path queues, one vector per queue for the rest.
475+ */
476+ err = vp_find_vqs_msix (vdev , nvqs , vqs , vqs_info ,
477+ VP_VQ_VECTOR_POLICY_SHARED_SLOW , desc );
478+ if (!err )
479+ return 0 ;
443480 /* Fallback: MSI-X with one vector for config, one shared for queues. */
444481 err = vp_find_vqs_msix (vdev , nvqs , vqs , vqs_info ,
445482 VP_VQ_VECTOR_POLICY_SHARED , desc );
@@ -493,7 +530,8 @@ const struct cpumask *vp_get_vq_affinity(struct virtio_device *vdev, int index)
493530 struct virtio_pci_device * vp_dev = to_vp_device (vdev );
494531
495532 if (!vp_dev -> per_vq_vectors ||
496- vp_dev -> vqs [index ]-> msix_vector == VIRTIO_MSI_NO_VECTOR )
533+ vp_dev -> vqs [index ]-> msix_vector == VIRTIO_MSI_NO_VECTOR ||
534+ vp_is_slow_path_vector (vp_dev -> vqs [index ]-> msix_vector ))
497535 return NULL ;
498536
499537 return pci_irq_get_affinity (vp_dev -> pci_dev ,
@@ -601,6 +639,7 @@ static int virtio_pci_probe(struct pci_dev *pci_dev,
601639 vp_dev -> vdev .dev .release = virtio_pci_release_dev ;
602640 vp_dev -> pci_dev = pci_dev ;
603641 INIT_LIST_HEAD (& vp_dev -> virtqueues );
642+ INIT_LIST_HEAD (& vp_dev -> slow_virtqueues );
604643 spin_lock_init (& vp_dev -> lock );
605644
606645 /* enable the device */
0 commit comments