Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,8 @@ and this project adheres to
VMGenID support for microVMs running on ARM hosts with 6.1 guest kernels.
Support for VMGenID via DeviceTree bindings exists only on mainline 6.10 Linux
onwards. Users of Firecracker will need to backport the relevant patches on
top of their 6.1 kernels to make use of the feature.
top of their 6.1 kernels to make use of the feature. As a result, Firecracker
snapshot version is now 3.0.0
- [#4732](https://github.com/firecracker-microvm/firecracker/pull/4732),
[#4733](https://github.com/firecracker-microvm/firecracker/pull/4733),
[#4741](https://github.com/firecracker-microvm/firecracker/pull/4741),
Expand Down
9 changes: 3 additions & 6 deletions src/vmm/src/builder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -155,7 +155,6 @@ fn create_vmm_and_vcpus(
event_manager: &mut EventManager,
guest_memory: GuestMemoryMmap,
uffd: Option<Uffd>,
track_dirty_pages: bool,
vcpu_count: u8,
kvm_capabilities: Vec<KvmCapability>,
) -> Result<(Vmm, Vec<Vcpu>), StartMicrovmError> {
Expand All @@ -172,7 +171,7 @@ fn create_vmm_and_vcpus(
kvm.check_memory(&guest_memory)
.map_err(VmmError::Kvm)
.map_err(StartMicrovmError::Internal)?;
vm.memory_init(&guest_memory, track_dirty_pages)
vm.memory_init(&guest_memory)
.map_err(VmmError::Vm)
.map_err(StartMicrovmError::Internal)?;

Expand Down Expand Up @@ -292,7 +291,6 @@ pub fn build_microvm_for_boot(
event_manager,
guest_memory,
None,
vm_resources.machine_config.track_dirty_pages,
vm_resources.machine_config.vcpu_count,
cpu_template.kvm_capabilities.clone(),
)?;
Expand Down Expand Up @@ -482,7 +480,6 @@ pub fn build_microvm_from_snapshot(
event_manager,
guest_memory,
uffd,
vm_resources.machine_config.track_dirty_pages,
vm_resources.machine_config.vcpu_count,
microvm_state.kvm_state.kvm_cap_modifiers.clone(),
)?;
Expand Down Expand Up @@ -1140,7 +1137,7 @@ pub(crate) mod tests {

let kvm = Kvm::new(vec![]).unwrap();
let mut vm = Vm::new(&kvm).unwrap();
vm.memory_init(&guest_memory, false).unwrap();
vm.memory_init(&guest_memory).unwrap();
let mmio_device_manager = MMIODeviceManager::new();
let acpi_device_manager = ACPIDeviceManager::new();
#[cfg(target_arch = "x86_64")]
Expand Down Expand Up @@ -1394,7 +1391,7 @@ pub(crate) mod tests {
let kvm = Kvm::new(vec![]).expect("Cannot create Kvm");
#[allow(unused_mut)]
let mut vm = Vm::new(&kvm).unwrap();
vm.memory_init(&guest_memory, false).unwrap();
vm.memory_init(&guest_memory).unwrap();
let evfd = EventFd::new(libc::EFD_NONBLOCK).unwrap();

#[cfg(target_arch = "x86_64")]
Expand Down
6 changes: 3 additions & 3 deletions src/vmm/src/device_manager/mmio.rs
Original file line number Diff line number Diff line change
Expand Up @@ -666,7 +666,7 @@ mod tests {
let guest_mem = multi_region_mem(&[(start_addr1, 0x1000), (start_addr2, 0x1000)]);
let kvm = Kvm::new(vec![]).expect("Cannot create Kvm");
let mut vm = Vm::new(&kvm).unwrap();
vm.memory_init(&guest_mem, false).unwrap();
vm.memory_init(&guest_mem).unwrap();
let mut device_manager = MMIODeviceManager::new();
let mut resource_allocator = ResourceAllocator::new().unwrap();

Expand Down Expand Up @@ -696,7 +696,7 @@ mod tests {
let guest_mem = multi_region_mem(&[(start_addr1, 0x1000), (start_addr2, 0x1000)]);
let kvm = Kvm::new(vec![]).expect("Cannot create Kvm");
let mut vm = Vm::new(&kvm).unwrap();
vm.memory_init(&guest_mem, false).unwrap();
vm.memory_init(&guest_mem).unwrap();
let mut device_manager = MMIODeviceManager::new();
let mut resource_allocator = ResourceAllocator::new().unwrap();

Expand Down Expand Up @@ -751,7 +751,7 @@ mod tests {
let guest_mem = multi_region_mem(&[(start_addr1, 0x1000), (start_addr2, 0x1000)]);
let kvm = Kvm::new(vec![]).expect("Cannot create Kvm");
let mut vm = Vm::new(&kvm).unwrap();
vm.memory_init(&guest_mem, false).unwrap();
vm.memory_init(&guest_mem).unwrap();

let mem_clone = guest_mem.clone();

Expand Down
12 changes: 5 additions & 7 deletions src/vmm/src/devices/virtio/block/vhost_user/device.rs
Original file line number Diff line number Diff line change
Expand Up @@ -378,7 +378,7 @@ mod tests {
use crate::devices::virtio::block::virtio::device::FileEngineType;
use crate::devices::virtio::mmio::VIRTIO_MMIO_INT_CONFIG;
use crate::test_utils::create_tmp_socket;
use crate::vstate::memory::{FileOffset, GuestAddress, GuestMemoryExtension};
use crate::vstate::memory::{GuestAddress, GuestMemoryExtension};

#[test]
fn test_from_config() {
Expand Down Expand Up @@ -778,12 +778,10 @@ mod tests {
let region_size = 0x10000;
let file = TempFile::new().unwrap().into_file();
file.set_len(region_size as u64).unwrap();
let regions = vec![(
FileOffset::new(file.try_clone().unwrap(), 0x0),
GuestAddress(0x0),
region_size,
)];
let guest_memory = GuestMemoryMmap::from_raw_regions_file(regions, false, false).unwrap();
let regions = vec![(GuestAddress(0x0), region_size)];
let guest_memory =
GuestMemoryMmap::create(regions.into_iter(), libc::MAP_PRIVATE, Some(file), false)
.unwrap();

// During actiavion of the device features, memory and queues should be set and activated.
vhost_block.activate(guest_memory).unwrap();
Expand Down
8 changes: 6 additions & 2 deletions src/vmm/src/devices/virtio/block/virtio/io/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -230,8 +230,12 @@ pub mod tests {
}

fn create_mem() -> GuestMemoryMmap {
GuestMemoryMmap::from_raw_regions(&[(GuestAddress(0), MEM_LEN)], true, HugePageConfig::None)
.unwrap()
GuestMemoryMmap::anonymous(
[(GuestAddress(0), MEM_LEN)].into_iter(),
true,
HugePageConfig::None,
)
.unwrap()
}

fn check_dirty_mem(mem: &GuestMemoryMmap, addr: GuestAddress, len: u32) {
Expand Down
28 changes: 10 additions & 18 deletions src/vmm/src/devices/virtio/vhost_user.rs
Original file line number Diff line number Diff line change
Expand Up @@ -466,7 +466,7 @@ mod tests {

use super::*;
use crate::test_utils::create_tmp_socket;
use crate::vstate::memory::{FileOffset, GuestAddress, GuestMemoryExtension};
use crate::vstate::memory::{GuestAddress, GuestMemoryExtension};

#[test]
fn test_new() {
Expand Down Expand Up @@ -759,19 +759,13 @@ mod tests {
let file_size = 2 * region_size;
file.set_len(file_size as u64).unwrap();
let regions = vec![
(
FileOffset::new(file.try_clone().unwrap(), 0x0),
GuestAddress(0x0),
region_size,
),
(
FileOffset::new(file.try_clone().unwrap(), 0x10000),
GuestAddress(0x10000),
region_size,
),
(GuestAddress(0x0), region_size),
(GuestAddress(0x10000), region_size),
];

let guest_memory = GuestMemoryMmap::from_raw_regions_file(regions, false, false).unwrap();
let guest_memory =
GuestMemoryMmap::create(regions.into_iter(), libc::MAP_PRIVATE, Some(file), false)
.unwrap();

vuh.update_mem_table(&guest_memory).unwrap();

Expand Down Expand Up @@ -883,13 +877,11 @@ mod tests {
let region_size = 0x10000;
let file = TempFile::new().unwrap().into_file();
file.set_len(region_size as u64).unwrap();
let regions = vec![(
FileOffset::new(file.try_clone().unwrap(), 0x0),
GuestAddress(0x0),
region_size,
)];
let regions = vec![(GuestAddress(0x0), region_size)];

let guest_memory = GuestMemoryMmap::from_raw_regions_file(regions, false, false).unwrap();
let guest_memory =
GuestMemoryMmap::create(regions.into_iter(), libc::MAP_PRIVATE, Some(file), false)
.unwrap();

let mut queue = Queue::new(69);
queue.initialize(&guest_memory).unwrap();
Expand Down
12 changes: 0 additions & 12 deletions src/vmm/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -629,18 +629,6 @@ impl Vmm {
Ok(bitmap)
}

/// Enables or disables KVM dirty page tracking.
pub fn set_dirty_page_tracking(&mut self, enable: bool) -> Result<(), VmmError> {
// This function _always_ results in an ioctl update. The VMM is stateless in the sense
// that it's unaware of the current dirty page tracking setting.
// The VMM's consumer will need to cache the dirty tracking setting internally. For
// example, if this function were to be exposed through the VMM controller, the VMM
// resources should cache the flag.
self.vm
.set_kvm_memory_regions(&self.guest_memory, enable)
.map_err(VmmError::Vm)
}

/// Updates the path of the host file backing the emulated block device with id `drive_id`.
/// We update the disk image on the device and its virtio configuration.
pub fn update_block_device_path(
Expand Down
39 changes: 22 additions & 17 deletions src/vmm/src/persist.rs
Original file line number Diff line number Diff line change
Expand Up @@ -448,16 +448,19 @@
let mem_state = &microvm_state.memory_state;

let (guest_memory, uffd) = match params.mem_backend.backend_type {
MemBackendType::File => (
guest_memory_from_file(
mem_backend_path,
mem_state,
track_dirty_pages,
vm_resources.machine_config.huge_pages,
MemBackendType::File => {
if vm_resources.machine_config.huge_pages.is_hugetlbfs() {
return Err(RestoreFromSnapshotGuestMemoryError::File(
GuestMemoryFromFileError::HugetlbfsSnapshot,
)
.into());

Check warning on line 456 in src/vmm/src/persist.rs

View check run for this annotation

Codecov / codecov/patch

src/vmm/src/persist.rs#L453-L456

Added lines #L453 - L456 were not covered by tests
}
(
guest_memory_from_file(mem_backend_path, mem_state, track_dirty_pages)
.map_err(RestoreFromSnapshotGuestMemoryError::File)?,
None,
)
.map_err(RestoreFromSnapshotGuestMemoryError::File)?,
None,
),
}
MemBackendType::Uffd => guest_memory_from_uffd(
mem_backend_path,
mem_state,
Expand Down Expand Up @@ -513,17 +516,17 @@
File(#[from] std::io::Error),
/// Failed to restore guest memory: {0}
Restore(#[from] MemoryError),
/// Cannot restore hugetlbfs backed snapshot by mapping the memory file. Please use uffd.
HugetlbfsSnapshot,
}

fn guest_memory_from_file(
mem_file_path: &Path,
mem_state: &GuestMemoryState,
track_dirty_pages: bool,
huge_pages: HugePageConfig,
) -> Result<GuestMemoryMmap, GuestMemoryFromFileError> {
let mem_file = File::open(mem_file_path)?;
let guest_mem =
GuestMemoryMmap::from_state(Some(&mem_file), mem_state, track_dirty_pages, huge_pages)?;
let guest_mem = GuestMemoryMmap::snapshot_file(mem_file, mem_state, track_dirty_pages)?;
Ok(guest_mem)
}

Expand Down Expand Up @@ -582,15 +585,18 @@
track_dirty_pages: bool,
huge_pages: HugePageConfig,
) -> Result<(GuestMemoryMmap, Vec<GuestRegionUffdMapping>), GuestMemoryFromUffdError> {
let guest_memory = GuestMemoryMmap::from_state(None, mem_state, track_dirty_pages, huge_pages)?;
let guest_memory =
GuestMemoryMmap::anonymous(mem_state.regions(), track_dirty_pages, huge_pages)?;
let mut backend_mappings = Vec::with_capacity(guest_memory.num_regions());
for (mem_region, state_region) in guest_memory.iter().zip(mem_state.regions.iter()) {
let mut offset = 0;
for mem_region in guest_memory.iter() {
backend_mappings.push(GuestRegionUffdMapping {
base_host_virt_addr: mem_region.as_ptr() as u64,
size: mem_region.size(),
offset: state_region.offset,
offset,
page_size_kib: huge_pages.page_size_kib(),
});
offset += mem_region.size() as u64;
}

Ok((guest_memory, backend_mappings))
Expand Down Expand Up @@ -770,7 +776,6 @@
regions: vec![GuestMemoryRegionState {
base_address: 0,
size: 0x20000,
offset: 0x10000,
}],
};

Expand All @@ -779,7 +784,7 @@

assert_eq!(uffd_regions.len(), 1);
assert_eq!(uffd_regions[0].size, 0x20000);
assert_eq!(uffd_regions[0].offset, 0x10000);
assert_eq!(uffd_regions[0].offset, 0);
assert_eq!(
uffd_regions[0].page_size_kib,
HugePageConfig::None.page_size_kib()
Expand Down
4 changes: 2 additions & 2 deletions src/vmm/src/resources.rs
Original file line number Diff line number Diff line change
Expand Up @@ -472,8 +472,8 @@ impl VmResources {
)
} else {
let regions = crate::arch::arch_memory_regions(self.machine_config.mem_size_mib << 20);
GuestMemoryMmap::from_raw_regions(
&regions,
GuestMemoryMmap::anonymous(
regions.into_iter(),
self.machine_config.track_dirty_pages,
self.machine_config.huge_pages,
)
Expand Down
2 changes: 1 addition & 1 deletion src/vmm/src/test_utils/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ pub fn single_region_mem_at(at: u64, size: usize) -> GuestMemoryMmap {

/// Creates a [`GuestMemoryMmap`] with multiple regions and without dirty page tracking.
pub fn multi_region_mem(regions: &[(GuestAddress, usize)]) -> GuestMemoryMmap {
GuestMemoryMmap::from_raw_regions(regions, false, HugePageConfig::None)
GuestMemoryMmap::anonymous(regions.iter().copied(), false, HugePageConfig::None)
.expect("Cannot initialize memory")
}

Expand Down
Loading