Skip to content

Commit

Permalink
Fix formatting and add tests
Browse files Browse the repository at this point in the history
Fixing the errors with the formatting and add unit test for the backup
method

Signed-off-by: Jack Thomson <jackabt@amazon.com>
  • Loading branch information
JackThomson2 authored and roypat committed May 7, 2024
1 parent 5c50b59 commit f497630
Show file tree
Hide file tree
Showing 2 changed files with 104 additions and 61 deletions.
157 changes: 98 additions & 59 deletions src/vmm/src/vstate/memory.rs
Original file line number Diff line number Diff line change
Expand Up @@ -107,6 +107,9 @@ where

/// Resets all the memory region bitmaps
fn reset_dirty(&self);

/// Store the dirty bitmap in internal store
fn store_dirty_bitmap(&self, dirty_bitmap: &DirtyBitmap, page_size: usize);
}

/// State of a guest memory region saved to file/buffer.
Expand Down Expand Up @@ -301,72 +304,52 @@ impl GuestMemoryExtension for GuestMemoryMmap {
let mut writer_offset = 0;
let page_size = get_page_size().map_err(MemoryError::PageSize)?;

let write_result = self.iter()
.enumerate()
.try_for_each(|(slot, region)| {
let kvm_bitmap = dirty_bitmap.get(&slot).unwrap();
let firecracker_bitmap = region.bitmap();
let mut write_size = 0;
let mut dirty_batch_start: u64 = 0;

for (i, v) in kvm_bitmap.iter().enumerate() {
for j in 0..64 {
let is_kvm_page_dirty = ((v >> j) & 1u64) != 0u64;
let page_offset = ((i * 64) + j) * page_size;
let is_firecracker_page_dirty = firecracker_bitmap.dirty_at(page_offset);

if is_kvm_page_dirty || is_firecracker_page_dirty {
// We are at the start of a new batch of dirty pages.
if write_size == 0 {
// Seek forward over the unmodified pages.
writer
.seek(SeekFrom::Start(writer_offset + page_offset as u64))
.unwrap();
dirty_batch_start = page_offset as u64;
}
write_size += page_size;

} else if write_size > 0 {
// We are at the end of a batch of dirty pages.
writer.write_all_volatile(
&region.get_slice(
MemoryRegionAddress(dirty_batch_start),
write_size,
)?,
)?;

write_size = 0;
let write_result = self.iter().enumerate().try_for_each(|(slot, region)| {
let kvm_bitmap = dirty_bitmap.get(&slot).unwrap();
let firecracker_bitmap = region.bitmap();
let mut write_size = 0;
let mut dirty_batch_start: u64 = 0;

for (i, v) in kvm_bitmap.iter().enumerate() {
for j in 0..64 {
let is_kvm_page_dirty = ((v >> j) & 1u64) != 0u64;
let page_offset = ((i * 64) + j) * page_size;
let is_firecracker_page_dirty = firecracker_bitmap.dirty_at(page_offset);

if is_kvm_page_dirty || is_firecracker_page_dirty {
// We are at the start of a new batch of dirty pages.
if write_size == 0 {
// Seek forward over the unmodified pages.
writer
.seek(SeekFrom::Start(writer_offset + page_offset as u64))
.unwrap();
dirty_batch_start = page_offset as u64;
}
write_size += page_size;
} else if write_size > 0 {
// We are at the end of a batch of dirty pages.
writer.write_all_volatile(
&region
.get_slice(MemoryRegionAddress(dirty_batch_start), write_size)?,
)?;

Check warning on line 334 in src/vmm/src/vstate/memory.rs

View check run for this annotation

Codecov / codecov/patch

src/vmm/src/vstate/memory.rs#L334

Added line #L334 was not covered by tests

write_size = 0;
}
}
}

if write_size > 0 {
writer.write_all_volatile(
&region.get_slice(MemoryRegionAddress(dirty_batch_start), write_size)?,
)?;
}
writer_offset += region.len();
if write_size > 0 {
writer.write_all_volatile(
&region.get_slice(MemoryRegionAddress(dirty_batch_start), write_size)?,
)?;

Check warning on line 344 in src/vmm/src/vstate/memory.rs

View check run for this annotation

Codecov / codecov/patch

src/vmm/src/vstate/memory.rs#L344

Added line #L344 was not covered by tests
}
writer_offset += region.len();

Ok(())
});
Ok(())
});

if write_result.is_err() {
self.iter().enumerate().for_each(|(slot, region)| {
let kvm_bitmap = dirty_bitmap.get(&slot).unwrap();
let firecracker_bitmap = region.bitmap();

for (i, v) in kvm_bitmap.iter().enumerate() {
for j in 0..64 {
let is_kvm_page_dirty = ((v >> j) & 1u64) != 0u64;

if is_kvm_page_dirty {
let page_offset = ((i * 64) + j) * page_size;

firecracker_bitmap.mark_dirty(page_offset, 1)
}
}
}
});
self.store_dirty_bitmap(dirty_bitmap, page_size);

Check warning on line 352 in src/vmm/src/vstate/memory.rs

View check run for this annotation

Codecov / codecov/patch

src/vmm/src/vstate/memory.rs#L352

Added line #L352 was not covered by tests
} else {
self.reset_dirty();
}
Expand All @@ -382,6 +365,26 @@ impl GuestMemoryExtension for GuestMemoryMmap {
}
})
}

/// Stores the dirty bitmap inside into the internal bitmap
fn store_dirty_bitmap(&self, dirty_bitmap: &DirtyBitmap, page_size: usize) {
self.iter().enumerate().for_each(|(slot, region)| {
let kvm_bitmap = dirty_bitmap.get(&slot).unwrap();
let firecracker_bitmap = region.bitmap();

for (i, v) in kvm_bitmap.iter().enumerate() {
for j in 0..64 {
let is_kvm_page_dirty = ((v >> j) & 1u64) != 0u64;

if is_kvm_page_dirty {
let page_offset = ((i * 64) + j) * page_size;

firecracker_bitmap.mark_dirty(page_offset, 1)
}
}
}
});
}
}

fn create_memfd(
Expand Down Expand Up @@ -833,6 +836,42 @@ mod tests {
assert_eq!(expected_first_region, diff_file_content);
}

#[test]
fn test_store_dirty_bitmap() {
let page_size = get_page_size().unwrap();

// Two regions of three pages each, with a one page gap between them.
let region_1_address = GuestAddress(0);
let region_2_address = GuestAddress(page_size as u64 * 4);
let region_size = page_size * 3;
let mem_regions = [
(region_1_address, region_size),
(region_2_address, region_size),
];
let guest_memory =
GuestMemoryMmap::from_raw_regions(&mem_regions, true, HugePageConfig::None).unwrap();

// Check that Firecracker bitmap is clean.
guest_memory.iter().for_each(|r| {
assert!(!r.bitmap().dirty_at(0));
assert!(!r.bitmap().dirty_at(page_size));
assert!(!r.bitmap().dirty_at(page_size * 2));
});

let mut dirty_bitmap: DirtyBitmap = HashMap::new();
dirty_bitmap.insert(0, vec![0b101]);
dirty_bitmap.insert(1, vec![0b101]);

guest_memory.store_dirty_bitmap(&dirty_bitmap, page_size);

// Assert that the bitmap now reports as being dirty maching the dirty bitmap
guest_memory.iter().for_each(|r| {
assert!(r.bitmap().dirty_at(0));
assert!(!r.bitmap().dirty_at(page_size));
assert!(r.bitmap().dirty_at(page_size * 2));
});
}

#[test]
fn test_create_memfd() {
let size = 1;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -46,8 +46,12 @@ def test_diff_snapshot_works_after_error(

# Create a large file dynamically based on available space
fill = chroot / "fill"
free_space = int(subprocess.check_output(f"df {chroot} | tail -1 | awk '{{print $4}}'", shell=True)) # in bytes
target_size = round(((free_space ) / 1024) * 0.9) # Attempt to fill 90% of free space
free_space = int(
subprocess.check_output(
f"df {chroot} | tail -1 | awk '{{print $4}}'", shell=True
)
)
target_size = round((free_space / 1024) * 0.9) # Attempt to fill 90% of free space

subprocess.check_call(f"fallocate -l {target_size}M {fill}", shell=True)

Expand Down

0 comments on commit f497630

Please sign in to comment.