Skip to content

Commit

Permalink
vhost: multiqueue support
Browse files Browse the repository at this point in the history
This patch lets vhost support multiqueue. The idea is simple, just launching
multiple threads of vhost and let each of vhost thread processing a subset of
the virtqueues of the device. After this change each emulated device can have
multiple vhost threads as its backend.

To do this, a virtqueue index were introduced to record to first virtqueue that
will be handled by this vhost_net device. Based on this and nvqs, vhost could
calculate its relative index to setup vhost_net device.

Since we may have many vhost/net devices for a virtio-net device. The setting of
guest notifiers were moved out of the starting/stopping of a specific vhost
thread. The vhost_net_{start|stop}() were renamed to
vhost_net_{start|stop}_one(), and a new vhost_net_{start|stop}() were introduced
to configure the guest notifiers and start/stop all vhost/vhost_net devices.

Signed-off-by: Jason Wang <jasowang@redhat.com>
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
  • Loading branch information
jasowang authored and Anthony Liguori committed Feb 1, 2013
1 parent 264986e commit a9f98bb
Show file tree
Hide file tree
Showing 5 changed files with 120 additions and 58 deletions.
82 changes: 36 additions & 46 deletions hw/vhost.c
Expand Up @@ -616,14 +616,17 @@ static int vhost_virtqueue_start(struct vhost_dev *dev,
{
hwaddr s, l, a;
int r;
int vhost_vq_index = idx - dev->vq_index;
struct vhost_vring_file file = {
.index = idx,
.index = vhost_vq_index
};
struct vhost_vring_state state = {
.index = idx,
.index = vhost_vq_index
};
struct VirtQueue *vvq = virtio_get_queue(vdev, idx);

assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs);

vq->num = state.num = virtio_queue_get_num(vdev, idx);
r = ioctl(dev->control, VHOST_SET_VRING_NUM, &state);
if (r) {
Expand Down Expand Up @@ -666,11 +669,12 @@ static int vhost_virtqueue_start(struct vhost_dev *dev,
goto fail_alloc_ring;
}

r = vhost_virtqueue_set_addr(dev, vq, idx, dev->log_enabled);
r = vhost_virtqueue_set_addr(dev, vq, vhost_vq_index, dev->log_enabled);
if (r < 0) {
r = -errno;
goto fail_alloc;
}

file.fd = event_notifier_get_fd(virtio_queue_get_host_notifier(vvq));
r = ioctl(dev->control, VHOST_SET_VRING_KICK, &file);
if (r) {
Expand Down Expand Up @@ -706,9 +710,10 @@ static void vhost_virtqueue_stop(struct vhost_dev *dev,
unsigned idx)
{
struct vhost_vring_state state = {
.index = idx,
.index = idx - dev->vq_index
};
int r;
assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs);
r = ioctl(dev->control, VHOST_GET_VRING_BASE, &state);
if (r < 0) {
fprintf(stderr, "vhost VQ %d ring restore failed: %d\n", idx, r);
Expand Down Expand Up @@ -864,7 +869,9 @@ int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
}

for (i = 0; i < hdev->nvqs; ++i) {
r = vdev->binding->set_host_notifier(vdev->binding_opaque, i, true);
r = vdev->binding->set_host_notifier(vdev->binding_opaque,
hdev->vq_index + i,
true);
if (r < 0) {
fprintf(stderr, "vhost VQ %d notifier binding failed: %d\n", i, -r);
goto fail_vq;
Expand All @@ -874,7 +881,9 @@ int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
return 0;
fail_vq:
while (--i >= 0) {
r = vdev->binding->set_host_notifier(vdev->binding_opaque, i, false);
r = vdev->binding->set_host_notifier(vdev->binding_opaque,
hdev->vq_index + i,
false);
if (r < 0) {
fprintf(stderr, "vhost VQ %d notifier cleanup error: %d\n", i, -r);
fflush(stderr);
Expand All @@ -895,7 +904,9 @@ void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
int i, r;

for (i = 0; i < hdev->nvqs; ++i) {
r = vdev->binding->set_host_notifier(vdev->binding_opaque, i, false);
r = vdev->binding->set_host_notifier(vdev->binding_opaque,
hdev->vq_index + i,
false);
if (r < 0) {
fprintf(stderr, "vhost VQ %d notifier cleanup failed: %d\n", i, -r);
fflush(stderr);
Expand All @@ -909,8 +920,9 @@ void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
*/
bool vhost_virtqueue_pending(struct vhost_dev *hdev, int n)
{
struct vhost_virtqueue *vq = hdev->vqs + n;
struct vhost_virtqueue *vq = hdev->vqs + n - hdev->vq_index;
assert(hdev->started);
assert(n >= hdev->vq_index && n < hdev->vq_index + hdev->nvqs);
return event_notifier_test_and_clear(&vq->masked_notifier);
}

Expand All @@ -919,15 +931,16 @@ void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n,
bool mask)
{
struct VirtQueue *vvq = virtio_get_queue(vdev, n);
int r;
int r, index = n - hdev->vq_index;

assert(hdev->started);
assert(n >= hdev->vq_index && n < hdev->vq_index + hdev->nvqs);

struct vhost_vring_file file = {
.index = n,
.index = index
};
if (mask) {
file.fd = event_notifier_get_fd(&hdev->vqs[n].masked_notifier);
file.fd = event_notifier_get_fd(&hdev->vqs[index].masked_notifier);
} else {
file.fd = event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq));
}
Expand All @@ -942,20 +955,6 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)

hdev->started = true;

if (!vdev->binding->set_guest_notifiers) {
fprintf(stderr, "binding does not support guest notifiers\n");
r = -ENOSYS;
goto fail;
}

r = vdev->binding->set_guest_notifiers(vdev->binding_opaque,
hdev->nvqs,
true);
if (r < 0) {
fprintf(stderr, "Error binding guest notifier: %d\n", -r);
goto fail_notifiers;
}

r = vhost_dev_set_features(hdev, hdev->log_enabled);
if (r < 0) {
goto fail_features;
Expand All @@ -967,9 +966,9 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
}
for (i = 0; i < hdev->nvqs; ++i) {
r = vhost_virtqueue_start(hdev,
vdev,
hdev->vqs + i,
i);
vdev,
hdev->vqs + i,
hdev->vq_index + i);
if (r < 0) {
goto fail_vq;
}
Expand All @@ -992,15 +991,13 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
fail_vq:
while (--i >= 0) {
vhost_virtqueue_stop(hdev,
vdev,
hdev->vqs + i,
i);
vdev,
hdev->vqs + i,
hdev->vq_index + i);
}
i = hdev->nvqs;
fail_mem:
fail_features:
vdev->binding->set_guest_notifiers(vdev->binding_opaque, hdev->nvqs, false);
fail_notifiers:
fail:

hdev->started = false;
return r;
Expand All @@ -1009,29 +1006,22 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
/* Host notifiers must be enabled at this point. */
void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev)
{
int i, r;
int i;

for (i = 0; i < hdev->nvqs; ++i) {
vhost_virtqueue_stop(hdev,
vdev,
hdev->vqs + i,
i);
vdev,
hdev->vqs + i,
hdev->vq_index + i);
}
for (i = 0; i < hdev->n_mem_sections; ++i) {
vhost_sync_dirty_bitmap(hdev, &hdev->mem_sections[i],
0, (hwaddr)~0x0ull);
}
r = vdev->binding->set_guest_notifiers(vdev->binding_opaque,
hdev->nvqs,
false);
if (r < 0) {
fprintf(stderr, "vhost guest notifier cleanup failed: %d\n", r);
fflush(stderr);
}
assert (r >= 0);

hdev->started = false;
g_free(hdev->log);
hdev->log = NULL;
hdev->log_size = 0;
}

2 changes: 2 additions & 0 deletions hw/vhost.h
Expand Up @@ -35,6 +35,8 @@ struct vhost_dev {
MemoryRegionSection *mem_sections;
struct vhost_virtqueue *vqs;
int nvqs;
/* the first virtuque which would be used by this vhost dev */
int vq_index;
unsigned long long features;
unsigned long long acked_features;
unsigned long long backend_features;
Expand Down
86 changes: 78 additions & 8 deletions hw/vhost_net.c
Expand Up @@ -140,12 +140,21 @@ bool vhost_net_query(VHostNetState *net, VirtIODevice *dev)
return vhost_dev_query(&net->dev, dev);
}

int vhost_net_start(struct vhost_net *net,
VirtIODevice *dev)
static int vhost_net_start_one(struct vhost_net *net,
VirtIODevice *dev,
int vq_index)
{
struct vhost_vring_file file = { };
int r;

if (net->dev.started) {
return 0;
}

net->dev.nvqs = 2;
net->dev.vqs = net->vqs;
net->dev.vq_index = vq_index;

r = vhost_dev_enable_notifiers(&net->dev, dev);
if (r < 0) {
goto fail_notifiers;
Expand Down Expand Up @@ -181,11 +190,15 @@ int vhost_net_start(struct vhost_net *net,
return r;
}

void vhost_net_stop(struct vhost_net *net,
VirtIODevice *dev)
static void vhost_net_stop_one(struct vhost_net *net,
VirtIODevice *dev)
{
struct vhost_vring_file file = { .fd = -1 };

if (!net->dev.started) {
return;
}

for (file.index = 0; file.index < net->dev.nvqs; ++file.index) {
int r = ioctl(net->dev.control, VHOST_NET_SET_BACKEND, &file);
assert(r >= 0);
Expand All @@ -195,6 +208,61 @@ void vhost_net_stop(struct vhost_net *net,
vhost_dev_disable_notifiers(&net->dev, dev);
}

int vhost_net_start(VirtIODevice *dev, NetClientState *ncs,
int total_queues)
{
int r, i = 0;

if (!dev->binding->set_guest_notifiers) {
error_report("binding does not support guest notifiers\n");
r = -ENOSYS;
goto err;
}

for (i = 0; i < total_queues; i++) {
r = vhost_net_start_one(tap_get_vhost_net(ncs[i].peer), dev, i * 2);

if (r < 0) {
goto err;
}
}

r = dev->binding->set_guest_notifiers(dev->binding_opaque,
total_queues * 2,
true);
if (r < 0) {
error_report("Error binding guest notifier: %d\n", -r);
goto err;
}

return 0;

err:
while (--i >= 0) {
vhost_net_stop_one(tap_get_vhost_net(ncs[i].peer), dev);
}
return r;
}

void vhost_net_stop(VirtIODevice *dev, NetClientState *ncs,
int total_queues)
{
int i, r;

r = dev->binding->set_guest_notifiers(dev->binding_opaque,
total_queues * 2,
false);
if (r < 0) {
fprintf(stderr, "vhost guest notifier cleanup failed: %d\n", r);
fflush(stderr);
}
assert(r >= 0);

for (i = 0; i < total_queues; i++) {
vhost_net_stop_one(tap_get_vhost_net(ncs[i].peer), dev);
}
}

void vhost_net_cleanup(struct vhost_net *net)
{
vhost_dev_cleanup(&net->dev);
Expand Down Expand Up @@ -224,13 +292,15 @@ bool vhost_net_query(VHostNetState *net, VirtIODevice *dev)
return false;
}

int vhost_net_start(struct vhost_net *net,
VirtIODevice *dev)
int vhost_net_start(VirtIODevice *dev,
NetClientState *ncs,
int total_queues)
{
return -ENOSYS;
}
void vhost_net_stop(struct vhost_net *net,
VirtIODevice *dev)
void vhost_net_stop(VirtIODevice *dev,
NetClientState *ncs,
int total_queues)
{
}

Expand Down
4 changes: 2 additions & 2 deletions hw/vhost_net.h
Expand Up @@ -9,8 +9,8 @@ typedef struct vhost_net VHostNetState;
VHostNetState *vhost_net_init(NetClientState *backend, int devfd, bool force);

bool vhost_net_query(VHostNetState *net, VirtIODevice *dev);
int vhost_net_start(VHostNetState *net, VirtIODevice *dev);
void vhost_net_stop(VHostNetState *net, VirtIODevice *dev);
int vhost_net_start(VirtIODevice *dev, NetClientState *ncs, int total_queues);
void vhost_net_stop(VirtIODevice *dev, NetClientState *ncs, int total_queues);

void vhost_net_cleanup(VHostNetState *net);

Expand Down
4 changes: 2 additions & 2 deletions hw/virtio-net.c
Expand Up @@ -130,14 +130,14 @@ static void virtio_net_vhost_status(VirtIONet *n, uint8_t status)
return;
}
n->vhost_started = 1;
r = vhost_net_start(tap_get_vhost_net(nc->peer), &n->vdev);
r = vhost_net_start(&n->vdev, nc, 1);
if (r < 0) {
error_report("unable to start vhost net: %d: "
"falling back on userspace virtio", -r);
n->vhost_started = 0;
}
} else {
vhost_net_stop(tap_get_vhost_net(nc->peer), &n->vdev);
vhost_net_stop(&n->vdev, nc, 1);
n->vhost_started = 0;
}
}
Expand Down

0 comments on commit a9f98bb

Please sign in to comment.