1616#include <linux/kernel.h>
1717#include <linux/module.h>
1818#include <linux/mutex.h>
19+ #include <linux/spinlock.h>
1920#include <linux/virtio_config.h>
2021#include <uapi/linux/virtio_gpio.h>
2122#include <uapi/linux/virtio_ids.h>
@@ -28,12 +29,30 @@ struct virtio_gpio_line {
2829 unsigned int rxlen ;
2930};
3031
32+ struct vgpio_irq_line {
33+ u8 type ;
34+ bool disabled ;
35+ bool masked ;
36+ bool queued ;
37+ bool update_pending ;
38+ bool queue_pending ;
39+
40+ struct virtio_gpio_irq_request ireq ____cacheline_aligned ;
41+ struct virtio_gpio_irq_response ires ____cacheline_aligned ;
42+ };
43+
3144struct virtio_gpio {
3245 struct virtio_device * vdev ;
3346 struct mutex lock ; /* Protects virtqueue operation */
3447 struct gpio_chip gc ;
3548 struct virtio_gpio_line * lines ;
3649 struct virtqueue * request_vq ;
50+
51+ /* irq support */
52+ struct virtqueue * event_vq ;
53+ struct mutex irq_lock ; /* Protects irq operation */
54+ raw_spinlock_t eventq_lock ; /* Protects queuing of the buffer */
55+ struct vgpio_irq_line * irq_lines ;
3756};
3857
3958static int _virtio_gpio_req (struct virtio_gpio * vgpio , u16 type , u16 gpio ,
@@ -186,6 +205,238 @@ static void virtio_gpio_set(struct gpio_chip *gc, unsigned int gpio, int value)
186205 virtio_gpio_req (vgpio , VIRTIO_GPIO_MSG_SET_VALUE , gpio , value , NULL );
187206}
188207
208+ /* Interrupt handling */
209+ static void virtio_gpio_irq_prepare (struct virtio_gpio * vgpio , u16 gpio )
210+ {
211+ struct vgpio_irq_line * irq_line = & vgpio -> irq_lines [gpio ];
212+ struct virtio_gpio_irq_request * ireq = & irq_line -> ireq ;
213+ struct virtio_gpio_irq_response * ires = & irq_line -> ires ;
214+ struct scatterlist * sgs [2 ], req_sg , res_sg ;
215+ int ret ;
216+
217+ if (WARN_ON (irq_line -> queued || irq_line -> masked || irq_line -> disabled ))
218+ return ;
219+
220+ ireq -> gpio = cpu_to_le16 (gpio );
221+ sg_init_one (& req_sg , ireq , sizeof (* ireq ));
222+ sg_init_one (& res_sg , ires , sizeof (* ires ));
223+ sgs [0 ] = & req_sg ;
224+ sgs [1 ] = & res_sg ;
225+
226+ ret = virtqueue_add_sgs (vgpio -> event_vq , sgs , 1 , 1 , irq_line , GFP_ATOMIC );
227+ if (ret ) {
228+ dev_err (& vgpio -> vdev -> dev , "failed to add request to eventq\n" );
229+ return ;
230+ }
231+
232+ irq_line -> queued = true;
233+ virtqueue_kick (vgpio -> event_vq );
234+ }
235+
236+ static void virtio_gpio_irq_enable (struct irq_data * d )
237+ {
238+ struct gpio_chip * gc = irq_data_get_irq_chip_data (d );
239+ struct virtio_gpio * vgpio = gpiochip_get_data (gc );
240+ struct vgpio_irq_line * irq_line = & vgpio -> irq_lines [d -> hwirq ];
241+
242+ raw_spin_lock (& vgpio -> eventq_lock );
243+ irq_line -> disabled = false;
244+ irq_line -> masked = false;
245+ irq_line -> queue_pending = true;
246+ raw_spin_unlock (& vgpio -> eventq_lock );
247+
248+ irq_line -> update_pending = true;
249+ }
250+
251+ static void virtio_gpio_irq_disable (struct irq_data * d )
252+ {
253+ struct gpio_chip * gc = irq_data_get_irq_chip_data (d );
254+ struct virtio_gpio * vgpio = gpiochip_get_data (gc );
255+ struct vgpio_irq_line * irq_line = & vgpio -> irq_lines [d -> hwirq ];
256+
257+ raw_spin_lock (& vgpio -> eventq_lock );
258+ irq_line -> disabled = true;
259+ irq_line -> masked = true;
260+ irq_line -> queue_pending = false;
261+ raw_spin_unlock (& vgpio -> eventq_lock );
262+
263+ irq_line -> update_pending = true;
264+ }
265+
266+ static void virtio_gpio_irq_mask (struct irq_data * d )
267+ {
268+ struct gpio_chip * gc = irq_data_get_irq_chip_data (d );
269+ struct virtio_gpio * vgpio = gpiochip_get_data (gc );
270+ struct vgpio_irq_line * irq_line = & vgpio -> irq_lines [d -> hwirq ];
271+
272+ raw_spin_lock (& vgpio -> eventq_lock );
273+ irq_line -> masked = true;
274+ raw_spin_unlock (& vgpio -> eventq_lock );
275+ }
276+
277+ static void virtio_gpio_irq_unmask (struct irq_data * d )
278+ {
279+ struct gpio_chip * gc = irq_data_get_irq_chip_data (d );
280+ struct virtio_gpio * vgpio = gpiochip_get_data (gc );
281+ struct vgpio_irq_line * irq_line = & vgpio -> irq_lines [d -> hwirq ];
282+
283+ raw_spin_lock (& vgpio -> eventq_lock );
284+ irq_line -> masked = false;
285+
286+ /* Queue the buffer unconditionally on unmask */
287+ virtio_gpio_irq_prepare (vgpio , d -> hwirq );
288+ raw_spin_unlock (& vgpio -> eventq_lock );
289+ }
290+
291+ static int virtio_gpio_irq_set_type (struct irq_data * d , unsigned int type )
292+ {
293+ struct gpio_chip * gc = irq_data_get_irq_chip_data (d );
294+ struct virtio_gpio * vgpio = gpiochip_get_data (gc );
295+ struct vgpio_irq_line * irq_line = & vgpio -> irq_lines [d -> hwirq ];
296+
297+ switch (type ) {
298+ case IRQ_TYPE_EDGE_RISING :
299+ type = VIRTIO_GPIO_IRQ_TYPE_EDGE_RISING ;
300+ break ;
301+ case IRQ_TYPE_EDGE_FALLING :
302+ type = VIRTIO_GPIO_IRQ_TYPE_EDGE_FALLING ;
303+ break ;
304+ case IRQ_TYPE_EDGE_BOTH :
305+ type = VIRTIO_GPIO_IRQ_TYPE_EDGE_BOTH ;
306+ break ;
307+ case IRQ_TYPE_LEVEL_LOW :
308+ type = VIRTIO_GPIO_IRQ_TYPE_LEVEL_LOW ;
309+ break ;
310+ case IRQ_TYPE_LEVEL_HIGH :
311+ type = VIRTIO_GPIO_IRQ_TYPE_LEVEL_HIGH ;
312+ break ;
313+ default :
314+ dev_err (& vgpio -> vdev -> dev , "unsupported irq type: %u\n" , type );
315+ return - EINVAL ;
316+ }
317+
318+ irq_line -> type = type ;
319+ irq_line -> update_pending = true;
320+
321+ return 0 ;
322+ }
323+
324+ static void virtio_gpio_irq_bus_lock (struct irq_data * d )
325+ {
326+ struct gpio_chip * gc = irq_data_get_irq_chip_data (d );
327+ struct virtio_gpio * vgpio = gpiochip_get_data (gc );
328+
329+ mutex_lock (& vgpio -> irq_lock );
330+ }
331+
332+ static void virtio_gpio_irq_bus_sync_unlock (struct irq_data * d )
333+ {
334+ struct gpio_chip * gc = irq_data_get_irq_chip_data (d );
335+ struct virtio_gpio * vgpio = gpiochip_get_data (gc );
336+ struct vgpio_irq_line * irq_line = & vgpio -> irq_lines [d -> hwirq ];
337+ u8 type = irq_line -> disabled ? VIRTIO_GPIO_IRQ_TYPE_NONE : irq_line -> type ;
338+ unsigned long flags ;
339+
340+ if (irq_line -> update_pending ) {
341+ irq_line -> update_pending = false;
342+ virtio_gpio_req (vgpio , VIRTIO_GPIO_MSG_IRQ_TYPE , d -> hwirq , type ,
343+ NULL );
344+
345+ /* Queue the buffer only after interrupt is enabled */
346+ raw_spin_lock_irqsave (& vgpio -> eventq_lock , flags );
347+ if (irq_line -> queue_pending ) {
348+ irq_line -> queue_pending = false;
349+ virtio_gpio_irq_prepare (vgpio , d -> hwirq );
350+ }
351+ raw_spin_unlock_irqrestore (& vgpio -> eventq_lock , flags );
352+ }
353+
354+ mutex_unlock (& vgpio -> irq_lock );
355+ }
356+
357+ static struct irq_chip vgpio_irq_chip = {
358+ .name = "virtio-gpio" ,
359+ .irq_enable = virtio_gpio_irq_enable ,
360+ .irq_disable = virtio_gpio_irq_disable ,
361+ .irq_mask = virtio_gpio_irq_mask ,
362+ .irq_unmask = virtio_gpio_irq_unmask ,
363+ .irq_set_type = virtio_gpio_irq_set_type ,
364+
365+ /* These are required to implement irqchip for slow busses */
366+ .irq_bus_lock = virtio_gpio_irq_bus_lock ,
367+ .irq_bus_sync_unlock = virtio_gpio_irq_bus_sync_unlock ,
368+ };
369+
370+ static bool ignore_irq (struct virtio_gpio * vgpio , int gpio ,
371+ struct vgpio_irq_line * irq_line )
372+ {
373+ bool ignore = false;
374+
375+ raw_spin_lock (& vgpio -> eventq_lock );
376+ irq_line -> queued = false;
377+
378+ /* Interrupt is disabled currently */
379+ if (irq_line -> masked || irq_line -> disabled ) {
380+ ignore = true;
381+ goto unlock ;
382+ }
383+
384+ /*
385+ * Buffer is returned as the interrupt was disabled earlier, but is
386+ * enabled again now. Requeue the buffers.
387+ */
388+ if (irq_line -> ires .status == VIRTIO_GPIO_IRQ_STATUS_INVALID ) {
389+ virtio_gpio_irq_prepare (vgpio , gpio );
390+ ignore = true;
391+ goto unlock ;
392+ }
393+
394+ if (WARN_ON (irq_line -> ires .status != VIRTIO_GPIO_IRQ_STATUS_VALID ))
395+ ignore = true;
396+
397+ unlock :
398+ raw_spin_unlock (& vgpio -> eventq_lock );
399+
400+ return ignore ;
401+ }
402+
403+ static void virtio_gpio_event_vq (struct virtqueue * vq )
404+ {
405+ struct virtio_gpio * vgpio = vq -> vdev -> priv ;
406+ struct device * dev = & vgpio -> vdev -> dev ;
407+ struct vgpio_irq_line * irq_line ;
408+ int gpio , ret ;
409+ unsigned int len ;
410+
411+ while (true) {
412+ irq_line = virtqueue_get_buf (vgpio -> event_vq , & len );
413+ if (!irq_line )
414+ break ;
415+
416+ if (len != sizeof (irq_line -> ires )) {
417+ dev_err (dev , "irq with incorrect length (%u : %u)\n" ,
418+ len , (unsigned int )sizeof (irq_line -> ires ));
419+ continue ;
420+ }
421+
422+ /*
423+ * Find GPIO line number from the offset of irq_line within the
424+ * irq_lines block. We can also get GPIO number from
425+ * irq-request, but better not to rely on a buffer returned by
426+ * remote.
427+ */
428+ gpio = irq_line - vgpio -> irq_lines ;
429+ WARN_ON (gpio >= vgpio -> gc .ngpio );
430+
431+ if (unlikely (ignore_irq (vgpio , gpio , irq_line )))
432+ continue ;
433+
434+ ret = generic_handle_domain_irq (vgpio -> gc .irq .domain , gpio );
435+ if (ret )
436+ dev_err (dev , "failed to handle interrupt: %d\n" , ret );
437+ };
438+ }
439+
189440static void virtio_gpio_request_vq (struct virtqueue * vq )
190441{
191442 struct virtio_gpio_line * line ;
@@ -210,26 +461,39 @@ static void virtio_gpio_free_vqs(struct virtio_device *vdev)
210461static int virtio_gpio_alloc_vqs (struct virtio_gpio * vgpio ,
211462 struct virtio_device * vdev )
212463{
213- const char * const names [] = { "requestq" };
464+ const char * const names [] = { "requestq" , "eventq" };
214465 vq_callback_t * cbs [] = {
215466 virtio_gpio_request_vq ,
467+ virtio_gpio_event_vq ,
216468 };
217- struct virtqueue * vqs [1 ] = { NULL };
469+ struct virtqueue * vqs [2 ] = { NULL , NULL };
218470 int ret ;
219471
220- ret = virtio_find_vqs (vdev , 1 , vqs , cbs , names , NULL );
472+ ret = virtio_find_vqs (vdev , vgpio -> irq_lines ? 2 : 1 , vqs , cbs , names , NULL );
221473 if (ret ) {
222474 dev_err (& vdev -> dev , "failed to find vqs: %d\n" , ret );
223475 return ret ;
224476 }
225477
226478 if (!vqs [0 ]) {
227479 dev_err (& vdev -> dev , "failed to find requestq vq\n" );
228- return - ENODEV ;
480+ goto out ;
229481 }
230482 vgpio -> request_vq = vqs [0 ];
231483
484+ if (vgpio -> irq_lines && !vqs [1 ]) {
485+ dev_err (& vdev -> dev , "failed to find eventq vq\n" );
486+ goto out ;
487+ }
488+ vgpio -> event_vq = vqs [1 ];
489+
232490 return 0 ;
491+
492+ out :
493+ if (vqs [0 ] || vqs [1 ])
494+ virtio_gpio_free_vqs (vdev );
495+
496+ return - ENODEV ;
233497}
234498
235499static const char * * virtio_gpio_get_names (struct virtio_gpio * vgpio ,
@@ -325,6 +589,30 @@ static int virtio_gpio_probe(struct virtio_device *vdev)
325589 vgpio -> gc .owner = THIS_MODULE ;
326590 vgpio -> gc .can_sleep = true;
327591
592+ /* Interrupt support */
593+ if (virtio_has_feature (vdev , VIRTIO_GPIO_F_IRQ )) {
594+ vgpio -> irq_lines = devm_kcalloc (dev , ngpio , sizeof (* vgpio -> irq_lines ), GFP_KERNEL );
595+ if (!vgpio -> irq_lines )
596+ return - ENOMEM ;
597+
598+ /* The event comes from the outside so no parent handler */
599+ vgpio -> gc .irq .parent_handler = NULL ;
600+ vgpio -> gc .irq .num_parents = 0 ;
601+ vgpio -> gc .irq .parents = NULL ;
602+ vgpio -> gc .irq .default_type = IRQ_TYPE_NONE ;
603+ vgpio -> gc .irq .handler = handle_level_irq ;
604+ vgpio -> gc .irq .chip = & vgpio_irq_chip ;
605+
606+ for (i = 0 ; i < ngpio ; i ++ ) {
607+ vgpio -> irq_lines [i ].type = VIRTIO_GPIO_IRQ_TYPE_NONE ;
608+ vgpio -> irq_lines [i ].disabled = true;
609+ vgpio -> irq_lines [i ].masked = true;
610+ }
611+
612+ mutex_init (& vgpio -> irq_lock );
613+ raw_spin_lock_init (& vgpio -> eventq_lock );
614+ }
615+
328616 ret = virtio_gpio_alloc_vqs (vgpio , vdev );
329617 if (ret )
330618 return ret ;
@@ -357,7 +645,13 @@ static const struct virtio_device_id id_table[] = {
357645};
358646MODULE_DEVICE_TABLE (virtio , id_table );
359647
648+ static const unsigned int features [] = {
649+ VIRTIO_GPIO_F_IRQ ,
650+ };
651+
360652static struct virtio_driver virtio_gpio_driver = {
653+ .feature_table = features ,
654+ .feature_table_size = ARRAY_SIZE (features ),
361655 .id_table = id_table ,
362656 .probe = virtio_gpio_probe ,
363657 .remove = virtio_gpio_remove ,
0 commit comments