Skip to content

Commit

Permalink
vdpa: request iova_range only once
Browse files Browse the repository at this point in the history
Currently iova range is requested once per queue pair in the case of
net. Reduce the number of ioctls asking it once at initialization and
reusing that value for each vhost_vdpa.

Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
Message-Id: <20221215113144.322011-7-eperezma@redhat.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Acked-by: Jason Wang <jasonwang@redhat.com>
  • Loading branch information
eugpermar authored and mstsirkin committed Dec 21, 2022
1 parent 36e4647 commit a585fad
Show file tree
Hide file tree
Showing 2 changed files with 14 additions and 28 deletions.
15 changes: 0 additions & 15 deletions hw/virtio/vhost-vdpa.c
Expand Up @@ -365,19 +365,6 @@ static int vhost_vdpa_add_status(struct vhost_dev *dev, uint8_t status)
return 0;
}

static void vhost_vdpa_get_iova_range(struct vhost_vdpa *v)
{
int ret = vhost_vdpa_call(v->dev, VHOST_VDPA_GET_IOVA_RANGE,
&v->iova_range);
if (ret != 0) {
v->iova_range.first = 0;
v->iova_range.last = UINT64_MAX;
}

trace_vhost_vdpa_get_iova_range(v->dev, v->iova_range.first,
v->iova_range.last);
}

/*
* The use of this function is for requests that only need to be
* applied once. Typically such request occurs at the beginning
Expand Down Expand Up @@ -465,8 +452,6 @@ static int vhost_vdpa_init(struct vhost_dev *dev, void *opaque, Error **errp)
goto err;
}

vhost_vdpa_get_iova_range(v);

if (!vhost_vdpa_first_dev(dev)) {
return 0;
}
Expand Down
27 changes: 14 additions & 13 deletions net/vhost-vdpa.c
Expand Up @@ -541,14 +541,15 @@ static const VhostShadowVirtqueueOps vhost_vdpa_net_svq_ops = {
};

static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
const char *device,
const char *name,
int vdpa_device_fd,
int queue_pair_index,
int nvqs,
bool is_datapath,
bool svq,
VhostIOVATree *iova_tree)
const char *device,
const char *name,
int vdpa_device_fd,
int queue_pair_index,
int nvqs,
bool is_datapath,
bool svq,
struct vhost_vdpa_iova_range iova_range,
VhostIOVATree *iova_tree)
{
NetClientState *nc = NULL;
VhostVDPAState *s;
Expand All @@ -567,6 +568,7 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
s->vhost_vdpa.device_fd = vdpa_device_fd;
s->vhost_vdpa.index = queue_pair_index;
s->vhost_vdpa.shadow_vqs_enabled = svq;
s->vhost_vdpa.iova_range = iova_range;
s->vhost_vdpa.iova_tree = iova_tree;
if (!is_datapath) {
s->cvq_cmd_out_buffer = qemu_memalign(qemu_real_host_page_size(),
Expand Down Expand Up @@ -646,6 +648,7 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
int vdpa_device_fd;
g_autofree NetClientState **ncs = NULL;
g_autoptr(VhostIOVATree) iova_tree = NULL;
struct vhost_vdpa_iova_range iova_range;
NetClientState *nc;
int queue_pairs, r, i = 0, has_cvq = 0;

Expand Down Expand Up @@ -689,14 +692,12 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
return queue_pairs;
}

vhost_vdpa_get_iova_range(vdpa_device_fd, &iova_range);
if (opts->x_svq) {
struct vhost_vdpa_iova_range iova_range;

if (!vhost_vdpa_net_valid_svq_features(features, errp)) {
goto err_svq;
}

vhost_vdpa_get_iova_range(vdpa_device_fd, &iova_range);
iova_tree = vhost_iova_tree_new(iova_range.first, iova_range.last);
}

Expand All @@ -705,15 +706,15 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
for (i = 0; i < queue_pairs; i++) {
ncs[i] = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
vdpa_device_fd, i, 2, true, opts->x_svq,
iova_tree);
iova_range, iova_tree);
if (!ncs[i])
goto err;
}

if (has_cvq) {
nc = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
vdpa_device_fd, i, 1, false,
opts->x_svq, iova_tree);
opts->x_svq, iova_range, iova_tree);
if (!nc)
goto err;
}
Expand Down

0 comments on commit a585fad

Please sign in to comment.