Skip to content

Commit

Permalink
vhost: protect vring access done by application
Browse files Browse the repository at this point in the history
[ upstream commit 4e0de8d ]

Besides the enqueue/dequeue API, other APIs of the builtin net
backend should also be protected.

Fixes: a368804 ("vhost: protect active rings from async ring changes")

Reported-by: Peng He <xnhp0320@icloud.com>
Signed-off-by: Tiwei Bie <tiwei.bie@intel.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
  • Loading branch information
Tiwei Bie authored and kevintraynor committed Dec 3, 2019
1 parent 9e564f0 commit b0818ba
Showing 1 changed file with 39 additions and 11 deletions.
50 changes: 39 additions & 11 deletions lib/librte_vhost/vhost.c
Original file line number Diff line number Diff line change
Expand Up @@ -634,22 +634,33 @@ rte_vhost_avail_entries(int vid, uint16_t queue_id)
{
struct virtio_net *dev;
struct vhost_virtqueue *vq;
uint16_t ret = 0;

dev = get_device(vid);
if (!dev)
return 0;

vq = dev->virtqueue[queue_id];
if (!vq->enabled)
return 0;

return *(volatile uint16_t *)&vq->avail->idx - vq->last_used_idx;
rte_spinlock_lock(&vq->access_lock);

if (unlikely(!vq->enabled || vq->avail == NULL))
goto out;

ret = *(volatile uint16_t *)&vq->avail->idx - vq->last_used_idx;

out:
rte_spinlock_unlock(&vq->access_lock);
return ret;
}

static inline void
static inline int
vhost_enable_notify_split(struct virtio_net *dev,
struct vhost_virtqueue *vq, int enable)
{
if (vq->used == NULL)
return -1;

if (!(dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))) {
if (enable)
vq->used->flags &= ~VRING_USED_F_NO_NOTIFY;
Expand All @@ -659,17 +670,21 @@ vhost_enable_notify_split(struct virtio_net *dev,
if (enable)
vhost_avail_event(vq) = vq->last_avail_idx;
}
return 0;
}

static inline void
static inline int
vhost_enable_notify_packed(struct virtio_net *dev,
struct vhost_virtqueue *vq, int enable)
{
uint16_t flags;

if (vq->device_event == NULL)
return -1;

if (!enable) {
vq->device_event->flags = VRING_EVENT_F_DISABLE;
return;
return 0;
}

flags = VRING_EVENT_F_ENABLE;
Expand All @@ -682,25 +697,31 @@ vhost_enable_notify_packed(struct virtio_net *dev,
rte_smp_wmb();

vq->device_event->flags = flags;
return 0;
}

int
rte_vhost_enable_guest_notification(int vid, uint16_t queue_id, int enable)
{
struct virtio_net *dev = get_device(vid);
struct vhost_virtqueue *vq;
int ret;

if (!dev)
return -1;

vq = dev->virtqueue[queue_id];

rte_spinlock_lock(&vq->access_lock);

if (vq_is_packed(dev))
vhost_enable_notify_packed(dev, vq, enable);
ret = vhost_enable_notify_packed(dev, vq, enable);
else
vhost_enable_notify_split(dev, vq, enable);
ret = vhost_enable_notify_split(dev, vq, enable);

return 0;
rte_spinlock_unlock(&vq->access_lock);

return ret;
}

void
Expand Down Expand Up @@ -739,6 +760,7 @@ rte_vhost_rx_queue_count(int vid, uint16_t qid)
{
struct virtio_net *dev;
struct vhost_virtqueue *vq;
uint16_t ret = 0;

dev = get_device(vid);
if (dev == NULL)
Expand All @@ -754,10 +776,16 @@ rte_vhost_rx_queue_count(int vid, uint16_t qid)
if (vq == NULL)
return 0;

rte_spinlock_lock(&vq->access_lock);

if (unlikely(vq->enabled == 0 || vq->avail == NULL))
return 0;
goto out;

ret = *((volatile uint16_t *)&vq->avail->idx) - vq->last_avail_idx;

return *((volatile uint16_t *)&vq->avail->idx) - vq->last_avail_idx;
out:
rte_spinlock_unlock(&vq->access_lock);
return ret;
}

int rte_vhost_get_vdpa_device_id(int vid)
Expand Down

0 comments on commit b0818ba

Please sign in to comment.