Skip to content

Commit

Permalink
Merge tag 'vfio-v4.17-rc1' of git://github.com/awilliam/linux-vfio
Browse files Browse the repository at this point in the history
Pull VFIO updates from Alex Williamson:

 - Adopt iommu_unmap_fast() interface to type1 backend
   (Suravee Suthikulpanit)

 - mdev sample driver fixup (Shunyong Yang)

 - More efficient PFN mapping handling in type1 backend
   (Jason Cai)

 - VFIO device ioeventfd interface (Alex Williamson)

 - Tag new vfio-platform sub-maintainer (Alex Williamson)

* tag 'vfio-v4.17-rc1' of git://github.com/awilliam/linux-vfio:
  MAINTAINERS: vfio/platform: Update sub-maintainer
  vfio/pci: Add ioeventfd support
  vfio/pci: Use endian neutral helpers
  vfio/pci: Pull BAR mapping setup from read-write path
  vfio/type1: Improve memory pinning process for raw PFN mapping
  vfio-mdev/samples: change RDI interrupt condition
  vfio/type1: Adopt fast IOTLB flush interface when unmap IOVAs
  • Loading branch information
torvalds committed Apr 7, 2018
2 parents 016c6f2 + da91471 commit f605ba9
Show file tree
Hide file tree
Showing 7 changed files with 377 additions and 43 deletions.
2 changes: 1 addition & 1 deletion MAINTAINERS
Expand Up @@ -14797,7 +14797,7 @@ F: include/linux/mdev.h
F: samples/vfio-mdev/

VFIO PLATFORM DRIVER
M: Baptiste Reynal <b.reynal@virtualopensystems.com>
M: Eric Auger <eric.auger@redhat.com>
L: kvm@vger.kernel.org
S: Maintained
F: drivers/vfio/platform/
Expand Down
35 changes: 35 additions & 0 deletions drivers/vfio/pci/vfio_pci.c
Expand Up @@ -302,6 +302,7 @@ static void vfio_pci_disable(struct vfio_pci_device *vdev)
{
struct pci_dev *pdev = vdev->pdev;
struct vfio_pci_dummy_resource *dummy_res, *tmp;
struct vfio_pci_ioeventfd *ioeventfd, *ioeventfd_tmp;
int i, bar;

/* Stop the device from further DMA */
Expand All @@ -311,6 +312,15 @@ static void vfio_pci_disable(struct vfio_pci_device *vdev)
VFIO_IRQ_SET_ACTION_TRIGGER,
vdev->irq_type, 0, 0, NULL);

/* Device closed, don't need mutex here */
list_for_each_entry_safe(ioeventfd, ioeventfd_tmp,
&vdev->ioeventfds_list, next) {
vfio_virqfd_disable(&ioeventfd->virqfd);
list_del(&ioeventfd->next);
kfree(ioeventfd);
}
vdev->ioeventfds_nr = 0;

vdev->virq_disabled = false;

for (i = 0; i < vdev->num_regions; i++)
Expand Down Expand Up @@ -1009,6 +1019,28 @@ static long vfio_pci_ioctl(void *device_data,

kfree(groups);
return ret;
} else if (cmd == VFIO_DEVICE_IOEVENTFD) {
struct vfio_device_ioeventfd ioeventfd;
int count;

minsz = offsetofend(struct vfio_device_ioeventfd, fd);

if (copy_from_user(&ioeventfd, (void __user *)arg, minsz))
return -EFAULT;

if (ioeventfd.argsz < minsz)
return -EINVAL;

if (ioeventfd.flags & ~VFIO_DEVICE_IOEVENTFD_SIZE_MASK)
return -EINVAL;

count = ioeventfd.flags & VFIO_DEVICE_IOEVENTFD_SIZE_MASK;

if (hweight8(count) != 1 || ioeventfd.fd < -1)
return -EINVAL;

return vfio_pci_ioeventfd(vdev, ioeventfd.offset,
ioeventfd.data, count, ioeventfd.fd);
}

return -ENOTTY;
Expand Down Expand Up @@ -1171,6 +1203,8 @@ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
vdev->irq_type = VFIO_PCI_NUM_IRQS;
mutex_init(&vdev->igate);
spin_lock_init(&vdev->irqlock);
mutex_init(&vdev->ioeventfds_lock);
INIT_LIST_HEAD(&vdev->ioeventfds_list);

ret = vfio_add_group_dev(&pdev->dev, &vfio_pci_ops, vdev);
if (ret) {
Expand Down Expand Up @@ -1212,6 +1246,7 @@ static void vfio_pci_remove(struct pci_dev *pdev)

vfio_iommu_group_put(pdev->dev.iommu_group, &pdev->dev);
kfree(vdev->region);
mutex_destroy(&vdev->ioeventfds_lock);
kfree(vdev);

if (vfio_pci_is_vga(pdev)) {
Expand Down
19 changes: 19 additions & 0 deletions drivers/vfio/pci/vfio_pci_private.h
Expand Up @@ -29,6 +29,19 @@
#define PCI_CAP_ID_INVALID 0xFF /* default raw access */
#define PCI_CAP_ID_INVALID_VIRT 0xFE /* default virt access */

/* Cap maximum number of ioeventfds per device (arbitrary) */
#define VFIO_PCI_IOEVENTFD_MAX 1000

struct vfio_pci_ioeventfd {
struct list_head next;
struct virqfd *virqfd;
void __iomem *addr;
uint64_t data;
loff_t pos;
int bar;
int count;
};

struct vfio_pci_irq_ctx {
struct eventfd_ctx *trigger;
struct virqfd *unmask;
Expand Down Expand Up @@ -92,9 +105,12 @@ struct vfio_pci_device {
bool nointx;
struct pci_saved_state *pci_saved_state;
int refcnt;
int ioeventfds_nr;
struct eventfd_ctx *err_trigger;
struct eventfd_ctx *req_trigger;
struct list_head dummy_resources_list;
struct mutex ioeventfds_lock;
struct list_head ioeventfds_list;
};

#define is_intx(vdev) (vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX)
Expand All @@ -120,6 +136,9 @@ extern ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf,
extern ssize_t vfio_pci_vga_rw(struct vfio_pci_device *vdev, char __user *buf,
size_t count, loff_t *ppos, bool iswrite);

extern long vfio_pci_ioeventfd(struct vfio_pci_device *vdev, loff_t offset,
uint64_t data, int count, int fd);

extern int vfio_pci_init_perm_bits(void);
extern void vfio_pci_uninit_perm_bits(void);

Expand Down
184 changes: 164 additions & 20 deletions drivers/vfio/pci/vfio_pci_rdwr.c
Expand Up @@ -17,10 +17,29 @@
#include <linux/pci.h>
#include <linux/uaccess.h>
#include <linux/io.h>
#include <linux/vfio.h>
#include <linux/vgaarb.h>

#include "vfio_pci_private.h"

#ifdef __LITTLE_ENDIAN
#define vfio_ioread64 ioread64
#define vfio_iowrite64 iowrite64
#define vfio_ioread32 ioread32
#define vfio_iowrite32 iowrite32
#define vfio_ioread16 ioread16
#define vfio_iowrite16 iowrite16
#else
#define vfio_ioread64 ioread64be
#define vfio_iowrite64 iowrite64be
#define vfio_ioread32 ioread32be
#define vfio_iowrite32 iowrite32be
#define vfio_ioread16 ioread16be
#define vfio_iowrite16 iowrite16be
#endif
#define vfio_ioread8 ioread8
#define vfio_iowrite8 iowrite8

/*
* Read or write from an __iomem region (MMIO or I/O port) with an excluded
* range which is inaccessible. The excluded range drops writes and fills
Expand All @@ -44,31 +63,31 @@ static ssize_t do_io_rw(void __iomem *io, char __user *buf,
fillable = 0;

if (fillable >= 4 && !(off % 4)) {
__le32 val;
u32 val;

if (iswrite) {
if (copy_from_user(&val, buf, 4))
return -EFAULT;

iowrite32(le32_to_cpu(val), io + off);
vfio_iowrite32(val, io + off);
} else {
val = cpu_to_le32(ioread32(io + off));
val = vfio_ioread32(io + off);

if (copy_to_user(buf, &val, 4))
return -EFAULT;
}

filled = 4;
} else if (fillable >= 2 && !(off % 2)) {
__le16 val;
u16 val;

if (iswrite) {
if (copy_from_user(&val, buf, 2))
return -EFAULT;

iowrite16(le16_to_cpu(val), io + off);
vfio_iowrite16(val, io + off);
} else {
val = cpu_to_le16(ioread16(io + off));
val = vfio_ioread16(io + off);

if (copy_to_user(buf, &val, 2))
return -EFAULT;
Expand All @@ -82,9 +101,9 @@ static ssize_t do_io_rw(void __iomem *io, char __user *buf,
if (copy_from_user(&val, buf, 1))
return -EFAULT;

iowrite8(val, io + off);
vfio_iowrite8(val, io + off);
} else {
val = ioread8(io + off);
val = vfio_ioread8(io + off);

if (copy_to_user(buf, &val, 1))
return -EFAULT;
Expand Down Expand Up @@ -113,6 +132,30 @@ static ssize_t do_io_rw(void __iomem *io, char __user *buf,
return done;
}

static int vfio_pci_setup_barmap(struct vfio_pci_device *vdev, int bar)
{
struct pci_dev *pdev = vdev->pdev;
int ret;
void __iomem *io;

if (vdev->barmap[bar])
return 0;

ret = pci_request_selected_regions(pdev, 1 << bar, "vfio");
if (ret)
return ret;

io = pci_iomap(pdev, bar, 0);
if (!io) {
pci_release_selected_regions(pdev, 1 << bar);
return -ENOMEM;
}

vdev->barmap[bar] = io;

return 0;
}

ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf,
size_t count, loff_t *ppos, bool iswrite)
{
Expand Down Expand Up @@ -147,22 +190,13 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf,
if (!io)
return -ENOMEM;
x_end = end;
} else if (!vdev->barmap[bar]) {
int ret;

ret = pci_request_selected_regions(pdev, 1 << bar, "vfio");
} else {
int ret = vfio_pci_setup_barmap(vdev, bar);
if (ret)
return ret;

io = pci_iomap(pdev, bar, 0);
if (!io) {
pci_release_selected_regions(pdev, 1 << bar);
return -ENOMEM;
}

vdev->barmap[bar] = io;
} else
io = vdev->barmap[bar];
}

if (bar == vdev->msix_bar) {
x_start = vdev->msix_offset;
Expand Down Expand Up @@ -242,3 +276,113 @@ ssize_t vfio_pci_vga_rw(struct vfio_pci_device *vdev, char __user *buf,

return done;
}

static int vfio_pci_ioeventfd_handler(void *opaque, void *unused)
{
struct vfio_pci_ioeventfd *ioeventfd = opaque;

switch (ioeventfd->count) {
case 1:
vfio_iowrite8(ioeventfd->data, ioeventfd->addr);
break;
case 2:
vfio_iowrite16(ioeventfd->data, ioeventfd->addr);
break;
case 4:
vfio_iowrite32(ioeventfd->data, ioeventfd->addr);
break;
#ifdef iowrite64
case 8:
vfio_iowrite64(ioeventfd->data, ioeventfd->addr);
break;
#endif
}

return 0;
}

long vfio_pci_ioeventfd(struct vfio_pci_device *vdev, loff_t offset,
uint64_t data, int count, int fd)
{
struct pci_dev *pdev = vdev->pdev;
loff_t pos = offset & VFIO_PCI_OFFSET_MASK;
int ret, bar = VFIO_PCI_OFFSET_TO_INDEX(offset);
struct vfio_pci_ioeventfd *ioeventfd;

/* Only support ioeventfds into BARs */
if (bar > VFIO_PCI_BAR5_REGION_INDEX)
return -EINVAL;

if (pos + count > pci_resource_len(pdev, bar))
return -EINVAL;

/* Disallow ioeventfds working around MSI-X table writes */
if (bar == vdev->msix_bar &&
!(pos + count <= vdev->msix_offset ||
pos >= vdev->msix_offset + vdev->msix_size))
return -EINVAL;

#ifndef iowrite64
if (count == 8)
return -EINVAL;
#endif

ret = vfio_pci_setup_barmap(vdev, bar);
if (ret)
return ret;

mutex_lock(&vdev->ioeventfds_lock);

list_for_each_entry(ioeventfd, &vdev->ioeventfds_list, next) {
if (ioeventfd->pos == pos && ioeventfd->bar == bar &&
ioeventfd->data == data && ioeventfd->count == count) {
if (fd == -1) {
vfio_virqfd_disable(&ioeventfd->virqfd);
list_del(&ioeventfd->next);
vdev->ioeventfds_nr--;
kfree(ioeventfd);
ret = 0;
} else
ret = -EEXIST;

goto out_unlock;
}
}

if (fd < 0) {
ret = -ENODEV;
goto out_unlock;
}

if (vdev->ioeventfds_nr >= VFIO_PCI_IOEVENTFD_MAX) {
ret = -ENOSPC;
goto out_unlock;
}

ioeventfd = kzalloc(sizeof(*ioeventfd), GFP_KERNEL);
if (!ioeventfd) {
ret = -ENOMEM;
goto out_unlock;
}

ioeventfd->addr = vdev->barmap[bar] + pos;
ioeventfd->data = data;
ioeventfd->pos = pos;
ioeventfd->bar = bar;
ioeventfd->count = count;

ret = vfio_virqfd_enable(ioeventfd, vfio_pci_ioeventfd_handler,
NULL, NULL, &ioeventfd->virqfd, fd);
if (ret) {
kfree(ioeventfd);
goto out_unlock;
}

list_add(&ioeventfd->next, &vdev->ioeventfds_list);
vdev->ioeventfds_nr++;

out_unlock:
mutex_unlock(&vdev->ioeventfds_lock);

return ret;
}

0 comments on commit f605ba9

Please sign in to comment.