Skip to content

Commit

Permalink
Browse files Browse the repository at this point in the history
vdpa: move iova_range to vhost_vdpa_shared
Next patches will register the vhost_vdpa memory listener while the VM
is migrating at the destination, so we can map the memory to the device
before stopping the VM at the source.  The main goal is to reduce the
downtime.

However, the destination QEMU is unaware of which vhost_vdpa device will
register its memory_listener.  If the source guest has CVQ enabled, it
will be the CVQ device.  Otherwise, it  will be the first one.

Move the iova range to VhostVDPAShared so all vhost_vdpa can use it,
rather than always in the first or last vhost_vdpa.

Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
Acked-by: Jason Wang <jasowang@redhat.com>
Message-Id: <20231221174322.3130442-4-eperezma@redhat.com>
Tested-by: Lei Yang <leiyang@redhat.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
  • Loading branch information
eugpermar authored and mstsirkin committed Dec 26, 2023
1 parent 5edb02e commit ae25ff4
Show file tree
Hide file tree
Showing 4 changed files with 21 additions and 13 deletions.
5 changes: 4 additions & 1 deletion hw/virtio/vdpa-dev.c
Expand Up @@ -114,7 +114,8 @@ static void vhost_vdpa_device_realize(DeviceState *dev, Error **errp)
strerror(-ret));
goto free_vqs;
}
v->vdpa.iova_range = iova_range;
v->vdpa.shared = g_new0(VhostVDPAShared, 1);
v->vdpa.shared->iova_range = iova_range;

ret = vhost_dev_init(&v->dev, &v->vdpa, VHOST_BACKEND_TYPE_VDPA, 0, NULL);
if (ret < 0) {
Expand Down Expand Up @@ -162,6 +163,7 @@ static void vhost_vdpa_device_realize(DeviceState *dev, Error **errp)
vhost_dev_cleanup(&v->dev);
free_vqs:
g_free(vqs);
g_free(v->vdpa.shared);
out:
qemu_close(v->vhostfd);
v->vhostfd = -1;
Expand All @@ -184,6 +186,7 @@ static void vhost_vdpa_device_unrealize(DeviceState *dev)
g_free(s->config);
g_free(s->dev.vqs);
vhost_dev_cleanup(&s->dev);
g_free(s->vdpa.shared);
qemu_close(s->vhostfd);
s->vhostfd = -1;
}
Expand Down
16 changes: 10 additions & 6 deletions hw/virtio/vhost-vdpa.c
Expand Up @@ -213,10 +213,10 @@ static void vhost_vdpa_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
RCU_READ_LOCK_GUARD();
/* check if RAM section out of device range */
llend = int128_add(int128_makes64(iotlb->addr_mask), int128_makes64(iova));
if (int128_gt(llend, int128_make64(v->iova_range.last))) {
if (int128_gt(llend, int128_make64(v->shared->iova_range.last))) {
error_report("RAM section out of device range (max=0x%" PRIx64
", end addr=0x%" PRIx64 ")",
v->iova_range.last, int128_get64(llend));
v->shared->iova_range.last, int128_get64(llend));
return;
}

Expand Down Expand Up @@ -316,8 +316,10 @@ static void vhost_vdpa_listener_region_add(MemoryListener *listener,
int page_size = qemu_target_page_size();
int page_mask = -page_size;

if (vhost_vdpa_listener_skipped_section(section, v->iova_range.first,
v->iova_range.last, page_mask)) {
if (vhost_vdpa_listener_skipped_section(section,
v->shared->iova_range.first,
v->shared->iova_range.last,
page_mask)) {
return;
}
if (memory_region_is_iommu(section->mr)) {
Expand Down Expand Up @@ -403,8 +405,10 @@ static void vhost_vdpa_listener_region_del(MemoryListener *listener,
int page_size = qemu_target_page_size();
int page_mask = -page_size;

if (vhost_vdpa_listener_skipped_section(section, v->iova_range.first,
v->iova_range.last, page_mask)) {
if (vhost_vdpa_listener_skipped_section(section,
v->shared->iova_range.first,
v->shared->iova_range.last,
page_mask)) {
return;
}
if (memory_region_is_iommu(section->mr)) {
Expand Down
3 changes: 2 additions & 1 deletion include/hw/virtio/vhost-vdpa.h
Expand Up @@ -32,6 +32,8 @@ typedef struct VhostVDPAHostNotifier {

/* Info shared by all vhost_vdpa device models */
typedef struct vhost_vdpa_shared {
struct vhost_vdpa_iova_range iova_range;

/* IOVA mapping used by the Shadow Virtqueue */
VhostIOVATree *iova_tree;
} VhostVDPAShared;
Expand All @@ -43,7 +45,6 @@ typedef struct vhost_vdpa {
bool iotlb_batch_begin_sent;
uint32_t address_space_id;
MemoryListener listener;
struct vhost_vdpa_iova_range iova_range;
uint64_t acked_features;
bool shadow_vqs_enabled;
/* Vdpa must send shadow addresses as IOTLB key for data queues, not GPA */
Expand Down
10 changes: 5 additions & 5 deletions net/vhost-vdpa.c
Expand Up @@ -354,8 +354,8 @@ static void vhost_vdpa_net_data_start_first(VhostVDPAState *s)
migration_add_notifier(&s->migration_state,
vdpa_net_migration_state_notifier);
if (v->shadow_vqs_enabled) {
v->shared->iova_tree = vhost_iova_tree_new(v->iova_range.first,
v->iova_range.last);
v->shared->iova_tree = vhost_iova_tree_new(v->shared->iova_range.first,
v->shared->iova_range.last);
}
}

Expand Down Expand Up @@ -591,8 +591,8 @@ static int vhost_vdpa_net_cvq_start(NetClientState *nc)
* and it is not worth it for the moment.
*/
if (!v->shared->iova_tree) {
v->shared->iova_tree = vhost_iova_tree_new(v->iova_range.first,
v->iova_range.last);
v->shared->iova_tree = vhost_iova_tree_new(v->shared->iova_range.first,
v->shared->iova_range.last);
}

r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer,
Expand Down Expand Up @@ -1688,12 +1688,12 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
s->always_svq = svq;
s->migration_state.notify = NULL;
s->vhost_vdpa.shadow_vqs_enabled = svq;
s->vhost_vdpa.iova_range = iova_range;
s->vhost_vdpa.shadow_data = svq;
if (queue_pair_index == 0) {
vhost_vdpa_net_valid_svq_features(features,
&s->vhost_vdpa.migration_blocker);
s->vhost_vdpa.shared = g_new0(VhostVDPAShared, 1);
s->vhost_vdpa.shared->iova_range = iova_range;
} else if (!is_datapath) {
s->cvq_cmd_out_buffer = mmap(NULL, vhost_vdpa_net_cvq_cmd_page_len(),
PROT_READ | PROT_WRITE,
Expand Down

0 comments on commit ae25ff4

Please sign in to comment.