Skip to content

Commit

Permalink
gpio: virtio: Add IRQ support
Browse files Browse the repository at this point in the history
This patch adds IRQ support for the virtio GPIO driver. Note that this
uses the irq_bus_lock/unlock() callbacks, since those operations over
virtio may sleep. Also the notifications for the eventq are processed
using a work item to allow sleep-able operations.

Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
  • Loading branch information
vireshk authored and intel-lab-lkp committed Aug 3, 2021
1 parent 8b6c276 commit 8dd9cb7
Show file tree
Hide file tree
Showing 3 changed files with 303 additions and 4 deletions.
1 change: 1 addition & 0 deletions drivers/gpio/Kconfig
Expand Up @@ -1672,6 +1672,7 @@ config GPIO_MOCKUP
config GPIO_VIRTIO
tristate "VirtIO GPIO support"
depends on VIRTIO
select GPIOLIB_IRQCHIP
help
Say Y here to enable guest support for virtio-based GPIO controllers.

Expand Down
281 changes: 277 additions & 4 deletions drivers/gpio/gpio-virtio.c
Expand Up @@ -17,6 +17,7 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/virtio_config.h>
#include <linux/workqueue.h>
#include <uapi/linux/virtio_gpio.h>
#include <uapi/linux/virtio_ids.h>

Expand All @@ -28,13 +29,29 @@ struct virtio_gpio_line {
unsigned int rxlen;
};

struct vgpio_irq_line {
u8 type;
bool masked;
bool update_pending;
bool queued;

struct virtio_gpio_irq_request ireq;
struct virtio_gpio_irq_response ires;
};

struct virtio_gpio {
struct virtio_device *vdev;
struct mutex lock; /* Protects virtqueue operation */
struct gpio_chip gc;
struct virtio_gpio_config config;
struct virtio_gpio_line *lines;
struct virtqueue *request_vq;

/* fields for irq support */
struct virtqueue *event_vq;
struct mutex irq_lock; /* Protects irq operation */
struct work_struct work;
struct vgpio_irq_line *irq_lines;
};

static int _virtio_gpio_req(struct virtio_gpio *vgpio, u16 type, u16 gpio,
Expand Down Expand Up @@ -187,6 +204,220 @@ static void virtio_gpio_set(struct gpio_chip *gc, unsigned int gpio, int value)
virtio_gpio_req(vgpio, VIRTIO_GPIO_MSG_SET_VALUE, gpio, value, NULL);
}

/* Interrupt handling */
static void virtio_gpio_irq_prepare(struct virtio_gpio *vgpio, u16 gpio)
{
struct vgpio_irq_line *irq_line = &vgpio->irq_lines[gpio];
struct virtio_gpio_irq_request *ireq = &irq_line->ireq;
struct virtio_gpio_irq_response *ires = &irq_line->ires;
struct scatterlist *sgs[2], req_sg, res_sg;
int ret;

ireq->gpio = cpu_to_le16(gpio);
sg_init_one(&req_sg, ireq, sizeof(*ireq));
sg_init_one(&res_sg, ires, sizeof(*ires));
sgs[0] = &req_sg;
sgs[1] = &res_sg;

ret = virtqueue_add_sgs(vgpio->event_vq, sgs, 1, 1, irq_line, GFP_KERNEL);
if (ret) {
dev_err(&vgpio->vdev->dev, "failed to add request to eventq\n");
return;
}

WARN_ON(irq_line->queued);

irq_line->queued = true;
virtqueue_kick(vgpio->event_vq);
}

static void virtio_gpio_irq_eoi(struct irq_data *d)
{
/*
* Queue buffers, by calling virtio_gpio_irq_prepare(), from
* virtio_gpio_event_vq() itself, after taking into consideration the
* masking status of the interrupt.
*/
}

static void virtio_gpio_irq_mask(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct virtio_gpio *vgpio = gpiochip_get_data(gc);
struct vgpio_irq_line *irq_line = &vgpio->irq_lines[d->hwirq];

irq_line->masked = true;
irq_line->update_pending = true;
}

static void virtio_gpio_irq_unmask(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct virtio_gpio *vgpio = gpiochip_get_data(gc);
struct vgpio_irq_line *irq_line = &vgpio->irq_lines[d->hwirq];

irq_line->masked = false;
irq_line->update_pending = true;
}

static int virtio_gpio_irq_set_type(struct irq_data *d, unsigned int type)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct virtio_gpio *vgpio = gpiochip_get_data(gc);
struct vgpio_irq_line *irq_line = &vgpio->irq_lines[d->hwirq];

switch (type) {
case IRQ_TYPE_NONE:
type = VIRTIO_GPIO_IRQ_TYPE_NONE;
break;
case IRQ_TYPE_EDGE_RISING:
type = VIRTIO_GPIO_IRQ_TYPE_EDGE_RISING;
break;
case IRQ_TYPE_EDGE_FALLING:
type = VIRTIO_GPIO_IRQ_TYPE_EDGE_FALLING;
break;
case IRQ_TYPE_EDGE_BOTH:
type = VIRTIO_GPIO_IRQ_TYPE_EDGE_BOTH;
break;
case IRQ_TYPE_LEVEL_LOW:
type = VIRTIO_GPIO_IRQ_TYPE_LEVEL_LOW;
break;
case IRQ_TYPE_LEVEL_HIGH:
type = VIRTIO_GPIO_IRQ_TYPE_LEVEL_HIGH;
break;
default:
dev_err(&vgpio->vdev->dev, "unsupported irq type: %u\n", type);
return -EINVAL;
}

irq_line->type = type;
irq_line->update_pending = true;

return 0;
}

static void update_irq_type(struct virtio_gpio *vgpio, u16 gpio, u8 type)
{
virtio_gpio_req(vgpio, VIRTIO_GPIO_MSG_IRQ_TYPE, gpio, type, NULL);
}

static void virtio_gpio_irq_bus_lock(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct virtio_gpio *vgpio = gpiochip_get_data(gc);

mutex_lock(&vgpio->irq_lock);
}

static void virtio_gpio_irq_bus_sync_unlock(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct virtio_gpio *vgpio = gpiochip_get_data(gc);
int gpio = d->hwirq;
struct vgpio_irq_line *irq_line = &vgpio->irq_lines[gpio];

if (unlikely(!irq_line->update_pending))
goto out;

if (irq_line->masked) {
update_irq_type(vgpio, gpio, VIRTIO_GPIO_IRQ_TYPE_NONE);
} else {
update_irq_type(vgpio, gpio, irq_line->type);
virtio_gpio_irq_prepare(vgpio, gpio);
}

irq_line->update_pending = false;

out:
mutex_unlock(&vgpio->irq_lock);
}

static struct irq_chip vgpio_irq_chip = {
.name = "virtio-gpio",
.irq_eoi = virtio_gpio_irq_eoi,
.irq_mask = virtio_gpio_irq_mask,
.irq_unmask = virtio_gpio_irq_unmask,
.irq_set_type = virtio_gpio_irq_set_type,

/* These are required to implement irqchip for slow busses */
.irq_bus_lock = virtio_gpio_irq_bus_lock,
.irq_bus_sync_unlock = virtio_gpio_irq_bus_sync_unlock,
};

static void vgpio_work_handler(struct work_struct *work)
{
struct virtio_gpio *vgpio = container_of(work, struct virtio_gpio,
work);
struct device *dev = &vgpio->vdev->dev;
struct vgpio_irq_line *irq_line;
int irq, gpio, ret;
unsigned int len;

mutex_lock(&vgpio->irq_lock);

while (true) {
irq_line = virtqueue_get_buf(vgpio->event_vq, &len);
if (!irq_line)
break;

if (len != sizeof(irq_line->ires)) {
dev_err(dev, "irq with incorrect length (%u : %lu)\n",
len, sizeof(irq_line->ires));
continue;
}

WARN_ON(!irq_line->queued);
irq_line->queued = false;

/* Buffer is returned after interrupt is masked */
if (irq_line->ires.status == VIRTIO_GPIO_IRQ_STATUS_INVALID)
continue;

if (WARN_ON(irq_line->ires.status != VIRTIO_GPIO_IRQ_STATUS_VALID))
continue;

/*
* Find GPIO line number from the offset of irq_line within the
* irq_lines block. We can also get GPIO number from
* irq-request, but better not rely on a value returned by
* remote.
*/
gpio = irq_line - vgpio->irq_lines;
WARN_ON(gpio >= vgpio->config.ngpio);

irq = irq_find_mapping(vgpio->gc.irq.domain, gpio);
WARN_ON(!irq);

local_irq_disable();
ret = generic_handle_irq(irq);
local_irq_enable();

if (ret)
dev_err(dev, "failed to handle interrupt: %d\n", ret);

/* The interrupt may have been disabled by now */
if (irq_line->update_pending && irq_line->masked)
update_irq_type(vgpio, gpio, VIRTIO_GPIO_IRQ_TYPE_NONE);
else
virtio_gpio_irq_prepare(vgpio, gpio);

irq_line->update_pending = false;
};

mutex_unlock(&vgpio->irq_lock);
}

static void virtio_gpio_event_vq(struct virtqueue *vq)
{
struct virtio_gpio *vgpio = vq->vdev->priv;

/*
* We can't initiate virtio-gpio operations from hard irq context, as
* they need sleep-able context.
*/
schedule_work(&vgpio->work);
}

static void virtio_gpio_request_vq(struct virtqueue *vq)
{
struct virtio_gpio_line *line;
Expand All @@ -211,26 +442,39 @@ static void virtio_gpio_free_vqs(struct virtio_device *vdev)
static int virtio_gpio_alloc_vqs(struct virtio_gpio *vgpio,
struct virtio_device *vdev)
{
const char * const names[] = { "requestq" };
const char * const names[] = { "requestq", "eventq" };
vq_callback_t *cbs[] = {
virtio_gpio_request_vq,
virtio_gpio_event_vq,
};
struct virtqueue *vqs[1] = { NULL };
struct virtqueue *vqs[2] = { NULL, NULL };
int ret;

ret = virtio_find_vqs(vdev, 1, vqs, cbs, names, NULL);
ret = virtio_find_vqs(vdev, vgpio->irq_lines ? 2 : 1, vqs, cbs, names, NULL);
if (ret) {
dev_err(&vdev->dev, "failed to find vqs: %d\n", ret);
return ret;
}

if (!vqs[0]) {
dev_err(&vdev->dev, "failed to find requestq vq\n");
return -ENODEV;
goto out;
}
vgpio->request_vq = vqs[0];

if (vgpio->irq_lines && !vqs[1]) {
dev_err(&vdev->dev, "failed to find eventq vq\n");
goto out;
}
vgpio->event_vq = vqs[1];

return 0;

out:
if (vqs[0] || vqs[1])
virtio_gpio_free_vqs(vdev);

return -ENODEV;
}

static const char **virtio_gpio_get_names(struct virtio_gpio *vgpio)
Expand Down Expand Up @@ -326,6 +570,29 @@ static int virtio_gpio_probe(struct virtio_device *vdev)
vgpio->gc.owner = THIS_MODULE;
vgpio->gc.can_sleep = true;

/* Interrupt support */
if (virtio_has_feature(vdev, VIRTIO_GPIO_F_IRQ)) {
vgpio->irq_lines = devm_kcalloc(dev, config->ngpio,
sizeof(*vgpio->irq_lines),
GFP_KERNEL);
if (!vgpio->irq_lines)
return -ENOMEM;

/* The event comes from the outside so no parent handler */
vgpio->gc.irq.parent_handler = NULL;
vgpio->gc.irq.num_parents = 0;
vgpio->gc.irq.parents = NULL;
vgpio->gc.irq.default_type = IRQ_TYPE_NONE;
vgpio->gc.irq.handler = handle_fasteoi_irq;
vgpio->gc.irq.chip = &vgpio_irq_chip;

for (i = 0; i < config->ngpio; i++)
vgpio->irq_lines[i].type = VIRTIO_GPIO_IRQ_TYPE_NONE;

mutex_init(&vgpio->irq_lock);
INIT_WORK(&vgpio->work, vgpio_work_handler);
}

ret = virtio_gpio_alloc_vqs(vgpio, vdev);
if (ret)
return ret;
Expand Down Expand Up @@ -358,7 +625,13 @@ static const struct virtio_device_id id_table[] = {
};
MODULE_DEVICE_TABLE(virtio, id_table);

static const unsigned int features[] = {
VIRTIO_GPIO_F_IRQ,
};

static struct virtio_driver virtio_gpio_driver = {
.feature_table = features,
.feature_table_size = ARRAY_SIZE(features),
.id_table = id_table,
.probe = virtio_gpio_probe,
.remove = virtio_gpio_remove,
Expand Down

0 comments on commit 8dd9cb7

Please sign in to comment.