Skip to content

Commit

Permalink
vfio/fsl: Move to the device set infrastructure
Browse files Browse the repository at this point in the history
FSL uses the internal reflck to implement the open_device_set()
functionality, conversion to the core code is straightforward.

The decision on which set to be part of is trivially based on the
is_fsl_mc_bus_dprc() and we use a struct device * pointer as the set_id.

Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
  • Loading branch information
jgunthorpe committed Jul 1, 2021
1 parent 8965ebd commit 778d5b4
Show file tree
Hide file tree
Showing 3 changed files with 27 additions and 138 deletions.
153 changes: 24 additions & 129 deletions drivers/vfio/fsl-mc/vfio_fsl_mc.c
Expand Up @@ -19,81 +19,10 @@

static struct fsl_mc_driver vfio_fsl_mc_driver;

static DEFINE_MUTEX(reflck_lock);

static void vfio_fsl_mc_reflck_get(struct vfio_fsl_mc_reflck *reflck)
{
kref_get(&reflck->kref);
}

static void vfio_fsl_mc_reflck_release(struct kref *kref)
{
struct vfio_fsl_mc_reflck *reflck = container_of(kref,
struct vfio_fsl_mc_reflck,
kref);

mutex_destroy(&reflck->lock);
kfree(reflck);
mutex_unlock(&reflck_lock);
}

static void vfio_fsl_mc_reflck_put(struct vfio_fsl_mc_reflck *reflck)
{
kref_put_mutex(&reflck->kref, vfio_fsl_mc_reflck_release, &reflck_lock);
}

static struct vfio_fsl_mc_reflck *vfio_fsl_mc_reflck_alloc(void)
{
struct vfio_fsl_mc_reflck *reflck;

reflck = kzalloc(sizeof(*reflck), GFP_KERNEL);
if (!reflck)
return ERR_PTR(-ENOMEM);

kref_init(&reflck->kref);
mutex_init(&reflck->lock);

return reflck;
}

static int vfio_fsl_mc_reflck_attach(struct vfio_fsl_mc_device *vdev)
{
int ret = 0;

mutex_lock(&reflck_lock);
if (is_fsl_mc_bus_dprc(vdev->mc_dev)) {
vdev->reflck = vfio_fsl_mc_reflck_alloc();
ret = PTR_ERR_OR_ZERO(vdev->reflck);
} else {
struct device *mc_cont_dev = vdev->mc_dev->dev.parent;
struct vfio_device *device;
struct vfio_fsl_mc_device *cont_vdev;

device = vfio_device_get_from_dev(mc_cont_dev);
if (!device) {
ret = -ENODEV;
goto unlock;
}

cont_vdev =
container_of(device, struct vfio_fsl_mc_device, vdev);
if (!cont_vdev || !cont_vdev->reflck) {
vfio_device_put(device);
ret = -ENODEV;
goto unlock;
}
vfio_fsl_mc_reflck_get(cont_vdev->reflck);
vdev->reflck = cont_vdev->reflck;
vfio_device_put(device);
}

unlock:
mutex_unlock(&reflck_lock);
return ret;
}

static int vfio_fsl_mc_regions_init(struct vfio_fsl_mc_device *vdev)
static int vfio_fsl_mc_open_device_set(struct vfio_device *core_vdev)
{
struct vfio_fsl_mc_device *vdev =
container_of(core_vdev, struct vfio_fsl_mc_device, vdev);
struct fsl_mc_device *mc_dev = vdev->mc_dev;
int count = mc_dev->obj_desc.region_count;
int i;
Expand Down Expand Up @@ -136,58 +65,30 @@ static void vfio_fsl_mc_regions_cleanup(struct vfio_fsl_mc_device *vdev)
kfree(vdev->regions);
}

static int vfio_fsl_mc_open(struct vfio_device *core_vdev)
{
struct vfio_fsl_mc_device *vdev =
container_of(core_vdev, struct vfio_fsl_mc_device, vdev);
int ret = 0;

mutex_lock(&vdev->reflck->lock);
if (!vdev->refcnt) {
ret = vfio_fsl_mc_regions_init(vdev);
if (ret)
goto out;
}
vdev->refcnt++;
out:
mutex_unlock(&vdev->reflck->lock);

return ret;
}

static void vfio_fsl_mc_release(struct vfio_device *core_vdev)
static void vfio_fsl_mc_close_device_set(struct vfio_device *core_vdev)
{
struct vfio_fsl_mc_device *vdev =
container_of(core_vdev, struct vfio_fsl_mc_device, vdev);
struct fsl_mc_device *mc_dev = vdev->mc_dev;
struct device *cont_dev = fsl_mc_cont_dev(&mc_dev->dev);
struct fsl_mc_device *mc_cont = to_fsl_mc_device(cont_dev);
int ret;

mutex_lock(&vdev->reflck->lock);
vfio_fsl_mc_regions_cleanup(vdev);

if (!(--vdev->refcnt)) {
struct fsl_mc_device *mc_dev = vdev->mc_dev;
struct device *cont_dev = fsl_mc_cont_dev(&mc_dev->dev);
struct fsl_mc_device *mc_cont = to_fsl_mc_device(cont_dev);
/* reset the device before cleaning up the interrupts */
ret = dprc_reset_container(mc_cont->mc_io, 0, mc_cont->mc_handle,
mc_cont->obj_desc.id,
DPRC_RESET_OPTION_NON_RECURSIVE);

vfio_fsl_mc_regions_cleanup(vdev);
if (WARN_ON(ret))
dev_warn(&mc_cont->dev,
"VFIO_FLS_MC: reset device has failed (%d)\n", ret);

/* reset the device before cleaning up the interrupts */
ret = dprc_reset_container(mc_cont->mc_io, 0,
mc_cont->mc_handle,
mc_cont->obj_desc.id,
DPRC_RESET_OPTION_NON_RECURSIVE);
vfio_fsl_mc_irqs_cleanup(vdev);

if (ret) {
dev_warn(&mc_cont->dev, "VFIO_FLS_MC: reset device has failed (%d)\n",
ret);
WARN_ON(1);
}

vfio_fsl_mc_irqs_cleanup(vdev);

fsl_mc_cleanup_irq_pool(mc_cont);
}

mutex_unlock(&vdev->reflck->lock);
fsl_mc_cleanup_irq_pool(mc_cont);
}

static long vfio_fsl_mc_ioctl(struct vfio_device *core_vdev,
Expand Down Expand Up @@ -504,8 +405,8 @@ static int vfio_fsl_mc_mmap(struct vfio_device *core_vdev,

static const struct vfio_device_ops vfio_fsl_mc_ops = {
.name = "vfio-fsl-mc",
.open = vfio_fsl_mc_open,
.release = vfio_fsl_mc_release,
.open_device_set = vfio_fsl_mc_open_device_set,
.close_device_set = vfio_fsl_mc_close_device_set,
.ioctl = vfio_fsl_mc_ioctl,
.read = vfio_fsl_mc_read,
.write = vfio_fsl_mc_write,
Expand Down Expand Up @@ -625,26 +526,23 @@ static int vfio_fsl_mc_probe(struct fsl_mc_device *mc_dev)
vdev->mc_dev = mc_dev;
mutex_init(&vdev->igate);

ret = vfio_fsl_mc_reflck_attach(vdev);
ret = vfio_assign_device_set(&vdev->vdev, is_fsl_mc_bus_dprc(mc_dev) ?
&mc_dev->dev :
mc_dev->dev.parent);
if (ret)
goto out_uninit;

ret = vfio_fsl_mc_init_device(vdev);
if (ret)
goto out_reflck;
goto out_kfree;

ret = vfio_register_group_dev(&vdev->vdev);
if (ret) {
dev_err(dev, "VFIO_FSL_MC: Failed to add to vfio group\n");
goto out_device;
}

/*
* This triggers recursion into vfio_fsl_mc_probe() on another device
* and the vfio_fsl_mc_reflck_attach() must succeed, which relies on the
* vfio_add_group_dev() above. It has no impact on this vdev, so it is
* safe to be after the vfio device is made live.
*/
/* FIXME: this should be moved above, see my ealier patch that did that */
ret = vfio_fsl_mc_scan_container(mc_dev);
if (ret)
goto out_group_dev;
Expand All @@ -655,8 +553,6 @@ static int vfio_fsl_mc_probe(struct fsl_mc_device *mc_dev)
vfio_unregister_group_dev(&vdev->vdev);
out_device:
vfio_fsl_uninit_device(vdev);
out_reflck:
vfio_fsl_mc_reflck_put(vdev->reflck);
out_uninit:
vfio_uninit_group_dev(&vdev->vdev);
kfree(vdev);
Expand All @@ -676,7 +572,6 @@ static int vfio_fsl_mc_remove(struct fsl_mc_device *mc_dev)
dprc_remove_devices(mc_dev, NULL, 0);
vfio_fsl_uninit_device(vdev);
vfio_uninit_group_dev(&vdev->vdev);
vfio_fsl_mc_reflck_put(vdev->reflck);

kfree(vdev);
vfio_iommu_group_put(mc_dev->dev.iommu_group, dev);
Expand Down
6 changes: 3 additions & 3 deletions drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c
Expand Up @@ -120,7 +120,7 @@ static int vfio_fsl_mc_set_irq_trigger(struct vfio_fsl_mc_device *vdev,
if (start != 0 || count != 1)
return -EINVAL;

mutex_lock(&vdev->reflck->lock);
mutex_lock(&vdev->vdev.dev_set->lock);
ret = fsl_mc_populate_irq_pool(mc_cont,
FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS);
if (ret)
Expand All @@ -129,7 +129,7 @@ static int vfio_fsl_mc_set_irq_trigger(struct vfio_fsl_mc_device *vdev,
ret = vfio_fsl_mc_irqs_allocate(vdev);
if (ret)
goto unlock;
mutex_unlock(&vdev->reflck->lock);
mutex_unlock(&vdev->vdev.dev_set->lock);

if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
s32 fd = *(s32 *)data;
Expand All @@ -154,7 +154,7 @@ static int vfio_fsl_mc_set_irq_trigger(struct vfio_fsl_mc_device *vdev,
return 0;

unlock:
mutex_unlock(&vdev->reflck->lock);
mutex_unlock(&vdev->vdev.dev_set->lock);
return ret;

}
Expand Down
6 changes: 0 additions & 6 deletions drivers/vfio/fsl-mc/vfio_fsl_mc_private.h
Expand Up @@ -22,11 +22,6 @@ struct vfio_fsl_mc_irq {
char *name;
};

struct vfio_fsl_mc_reflck {
struct kref kref;
struct mutex lock;
};

struct vfio_fsl_mc_region {
u32 flags;
u32 type;
Expand All @@ -41,7 +36,6 @@ struct vfio_fsl_mc_device {
struct notifier_block nb;
int refcnt;
struct vfio_fsl_mc_region *regions;
struct vfio_fsl_mc_reflck *reflck;
struct mutex igate;
struct vfio_fsl_mc_irq *mc_irqs;
};
Expand Down

0 comments on commit 778d5b4

Please sign in to comment.