Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Xen/mmapv2 #160

Merged
merged 5 commits into from
Jul 5, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 8 additions & 0 deletions crates/vhost-user-backend/CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,14 @@

### Deprecated

## v0.10.0

### Added
- [[#169]](https://github.com/rust-vmm/vhost/pull/160) vhost-user-backend: Add support for Xen memory mappings

### Fixed
- [[#161]](https://github.com/rust-vmm/vhost/pull/161) get_vring_base should not reset the queue

## v0.9.0

### Added
Expand Down
17 changes: 10 additions & 7 deletions crates/vhost-user-backend/Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,24 +1,27 @@
[package]
name = "vhost-user-backend"
version = "0.9.0"
version = "0.10.0"
authors = ["The Cloud Hypervisor Authors"]
keywords = ["vhost-user", "virtio"]
description = "A framework to build vhost-user backend service daemon"
repository = "https://github.com/rust-vmm/vhost"
edition = "2018"
license = "Apache-2.0"

[features]
xen = ["vm-memory/xen", "vhost/xen"]

[dependencies]
libc = "0.2.39"
log = "0.4.17"
vhost = { path = "../vhost", version = "0.7", features = ["vhost-user-slave"] }
virtio-bindings = "0.2.0"
virtio-queue = "0.8.0"
vm-memory = { version = "0.11.0", features = ["backend-mmap", "backend-atomic"] }
vhost = { path = "../vhost", version = "0.8", features = ["vhost-user-slave"] }
virtio-bindings = "0.2.1"
virtio-queue = "0.9.0"
vm-memory = { version = "0.12.0", features = ["backend-mmap", "backend-atomic"] }
vmm-sys-util = "0.11.0"

[dev-dependencies]
nix = "0.26"
vhost = { path = "../vhost", version = "0.7", features = ["test-utils", "vhost-user-master", "vhost-user-slave"] }
vm-memory = { version = "0.11.0", features = ["backend-mmap", "backend-atomic", "backend-bitmap"] }
vhost = { path = "../vhost", version = "0.8", features = ["test-utils", "vhost-user-master", "vhost-user-slave"] }
vm-memory = { version = "0.12.0", features = ["backend-mmap", "backend-atomic", "backend-bitmap"] }
tempfile = "3.2.0"
12 changes: 12 additions & 0 deletions crates/vhost-user-backend/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -98,6 +98,18 @@ impl VhostUserBackendMut for VhostUserService {
}
```

## Xen support

Supporting Xen requires special handling while mapping the guest memory. The
`vm-memory` crate implements xen memory mapping support via a separate feature
`xen`, and this crate uses the same feature name to enable Xen support.

Also, for xen mappings, the memory regions passed by the frontend contains few
extra fields as described in the vhost-user protocol documentation.

It was decided by the `rust-vmm` maintainers to keep the interface simple and
build the crate for either standard Unix memory mapping or Xen, and not both.

## License

This project is licensed under
Expand Down
25 changes: 14 additions & 11 deletions crates/vhost-user-backend/src/handler.rs
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ use virtio_bindings::bindings::virtio_ring::VIRTIO_RING_F_EVENT_IDX;
use virtio_queue::{Error as VirtQueError, QueueT};
use vm_memory::bitmap::Bitmap;
use vm_memory::mmap::NewBitmap;
use vm_memory::{FileOffset, GuestAddress, GuestAddressSpace, GuestMemoryMmap, GuestRegionMmap};
use vm_memory::{GuestAddress, GuestAddressSpace, GuestMemoryMmap, GuestRegionMmap};
use vmm_sys_util::epoll::EventSet;

use super::backend::VhostUserBackend;
Expand Down Expand Up @@ -277,23 +277,27 @@ where
) -> VhostUserResult<()> {
// We need to create tuple of ranges from the list of VhostUserMemoryRegion
// that we get from the caller.
let mut regions: Vec<(GuestAddress, usize, Option<FileOffset>)> = Vec::new();
let mut regions = Vec::new();
let mut mappings: Vec<AddrMapping> = Vec::new();

for (region, file) in ctx.iter().zip(files) {
let g_addr = GuestAddress(region.guest_phys_addr);
let len = region.memory_size as usize;
let f_off = FileOffset::new(file, region.mmap_offset);

regions.push((g_addr, len, Some(f_off)));
regions.push(
GuestRegionMmap::new(
region.mmap_region(file)?,
GuestAddress(region.guest_phys_addr),
)
.map_err(|e| {
VhostUserError::ReqHandlerError(io::Error::new(io::ErrorKind::Other, e))
})?,
);
mappings.push(AddrMapping {
vmm_addr: region.user_addr,
size: region.memory_size,
gpa_base: region.guest_phys_addr,
});
}

let mem = GuestMemoryMmap::from_ranges_with_files(regions).map_err(|e| {
let mem = GuestMemoryMmap::from_regions(regions).map_err(|e| {
VhostUserError::ReqHandlerError(io::Error::new(io::ErrorKind::Other, e))
})?;

Expand Down Expand Up @@ -526,10 +530,9 @@ where
file: File,
) -> VhostUserResult<()> {
let guest_region = Arc::new(
GuestRegionMmap::from_range(
GuestRegionMmap::new(
region.mmap_region(file)?,
GuestAddress(region.guest_phys_addr),
region.memory_size as usize,
Some(FileOffset::new(file, region.mmap_offset)),
)
.map_err(|e| {
VhostUserError::ReqHandlerError(io::Error::new(io::ErrorKind::Other, e))
Expand Down
9 changes: 8 additions & 1 deletion crates/vhost/CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,10 +6,17 @@
### Changed

### Fixed
- [[#165]](https://github.com/rust-vmm/vhost/pull/165) vhost: vdpa: Provide custom set_vring_addr() implementation

### Deprecated

## [0.8.0]

### Added
- [[#169]](https://github.com/rust-vmm/vhost/pull/160) vhost: Add xen memory mapping support

### Fixed
- [[#165]](https://github.com/rust-vmm/vhost/pull/165) vhost: vdpa: Provide custom set_vring_addr() implementation

## [0.7.0]

### Added
Expand Down
7 changes: 4 additions & 3 deletions crates/vhost/Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[package]
name = "vhost"
version = "0.7.0"
version = "0.8.0"
keywords = ["vhost", "vhost-user", "virtio", "vdpa"]
description = "a pure rust library for vdpa, vhost and vhost-user"
authors = ["Liu Jiang <gerry@linux.alibaba.com>"]
Expand All @@ -23,15 +23,16 @@ vhost-net = ["vhost-kern"]
vhost-user = []
vhost-user-master = ["vhost-user"]
vhost-user-slave = ["vhost-user"]
xen = ["vm-memory/xen"]

[dependencies]
bitflags = "1.0"
libc = "0.2.39"

vmm-sys-util = "0.11.0"
vm-memory = "0.11.0"
vm-memory = "0.12.0"

[dev-dependencies]
tempfile = "3.2.0"
vm-memory = { version = "0.11.0", features=["backend-mmap"] }
vm-memory = { version = "0.12.0", features=["backend-mmap"] }
serial_test = "0.5"
12 changes: 12 additions & 0 deletions crates/vhost/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -30,3 +30,15 @@ The protocol defines two sides of the communication, master and slave.
Master is the application that shares its virtqueues, slave is the consumer
of the virtqueues. Master and slave can be either a client (i.e. connecting)
or server (listening) in the socket communication.

## Xen support

Supporting Xen requires special handling while mapping the guest memory. The
`vm-memory` crate implements xen memory mapping support via a separate feature
`xen`, and this crate uses the same feature name to enable Xen support.

Also, for xen mappings, the memory regions passed by the frontend contains few
extra fields as described in the vhost-user protocol documentation.

It was decided by the `rust-vmm` maintainers to keep the interface simple and
build the crate for either standard Unix memory mapping or Xen, and not both.
74 changes: 73 additions & 1 deletion crates/vhost/src/backend.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,12 +10,15 @@
//! Common traits and structs for vhost-kern and vhost-user backend drivers.

use std::cell::RefCell;
use std::os::unix::io::AsRawFd;
use std::os::unix::io::RawFd;
use std::sync::RwLock;

use vm_memory::{bitmap::Bitmap, Address, GuestMemoryRegion, GuestRegionMmap};
use vmm_sys_util::eventfd::EventFd;

use super::Result;
use super::vhost_user::message::{VhostUserMemoryRegion, VhostUserSingleMemoryRegion};
use super::{Error, Result};

/// Maximum number of memory regions supported.
pub const VHOST_MAX_MEMORY_REGIONS: usize = 255;
Expand Down Expand Up @@ -72,6 +75,70 @@ pub struct VhostUserMemoryRegionInfo {
pub mmap_offset: u64,
/// Optional file descriptor for mmap.
pub mmap_handle: RawFd,

#[cfg(feature = "xen")]
/// Xen specific flags.
pub xen_mmap_flags: u32,

#[cfg(feature = "xen")]
/// Xen specific data.
pub xen_mmap_data: u32,
}

impl VhostUserMemoryRegionInfo {
/// Creates Self from GuestRegionMmap.
pub fn from_guest_region<B: Bitmap>(region: &GuestRegionMmap<B>) -> Result<Self> {
let file_offset = region
.file_offset()
.ok_or(Error::InvalidGuestMemoryRegion)?;

Ok(Self {
guest_phys_addr: region.start_addr().raw_value(),
memory_size: region.len(),
userspace_addr: region.as_ptr() as u64,
mmap_offset: file_offset.start(),
mmap_handle: file_offset.file().as_raw_fd(),
#[cfg(feature = "xen")]
xen_mmap_flags: region.xen_mmap_flags(),
#[cfg(feature = "xen")]
xen_mmap_data: region.xen_mmap_data(),
})
}

/// Creates VhostUserMemoryRegion from Self.
pub fn to_region(&self) -> VhostUserMemoryRegion {
#[cfg(not(feature = "xen"))]
return VhostUserMemoryRegion::new(
self.guest_phys_addr,
self.memory_size,
self.userspace_addr,
self.mmap_offset,
);

#[cfg(feature = "xen")]
VhostUserMemoryRegion::with_xen(
self.guest_phys_addr,
self.memory_size,
self.userspace_addr,
self.mmap_offset,
self.xen_mmap_flags,
self.xen_mmap_data,
)
}

/// Creates VhostUserSingleMemoryRegion from Self.
pub fn to_single_region(&self) -> VhostUserSingleMemoryRegion {
VhostUserSingleMemoryRegion::new(
self.guest_phys_addr,
self.memory_size,
self.userspace_addr,
self.mmap_offset,
#[cfg(feature = "xen")]
self.xen_mmap_flags,
#[cfg(feature = "xen")]
self.xen_mmap_data,
)
}
}

/// Shared memory region data for logging dirty pages
Expand Down Expand Up @@ -460,6 +527,11 @@ impl VhostUserMemoryRegionInfo {
userspace_addr,
mmap_offset,
mmap_handle,

#[cfg(feature = "xen")]
xen_mmap_flags: vm_memory::MmapXenFlags::UNIX.bits(),
#[cfg(feature = "xen")]
xen_mmap_data: 0,
}
}
}
Expand Down
23 changes: 4 additions & 19 deletions crates/vhost/src/vhost_user/master.rs
Original file line number Diff line number Diff line change
Expand Up @@ -193,13 +193,8 @@ impl VhostBackend for Master {
if region.memory_size == 0 || region.mmap_handle < 0 {
return error_code(VhostUserError::InvalidParam);
}
let reg = VhostUserMemoryRegion {
guest_phys_addr: region.guest_phys_addr,
memory_size: region.memory_size,
user_addr: region.userspace_addr,
mmap_offset: region.mmap_offset,
};
ctx.append(&reg, region.mmap_handle);

ctx.append(&region.to_region(), region.mmap_handle);
}

let mut node = self.node();
Expand Down Expand Up @@ -501,12 +496,7 @@ impl VhostUserMaster for Master {
return error_code(VhostUserError::InvalidParam);
}

let body = VhostUserSingleMemoryRegion::new(
region.guest_phys_addr,
region.memory_size,
region.userspace_addr,
region.mmap_offset,
);
let body = region.to_single_region();
let fds = [region.mmap_handle];
let hdr = node.send_request_with_body(MasterReq::ADD_MEM_REG, &body, Some(&fds))?;
node.wait_for_ack(&hdr).map_err(|e| e.into())
Expand All @@ -519,12 +509,7 @@ impl VhostUserMaster for Master {
return error_code(VhostUserError::InvalidParam);
}

let body = VhostUserSingleMemoryRegion::new(
region.guest_phys_addr,
region.memory_size,
region.userspace_addr,
region.mmap_offset,
);
let body = region.to_single_region();
let hdr = node.send_request_with_body(MasterReq::REM_MEM_REG, &body, None)?;
node.wait_for_ack(&hdr).map_err(|e| e.into())
}
Expand Down
Loading