Skip to content

Commit

Permalink
refactor: centralize GuestMemoryMmap creation
Browse files Browse the repository at this point in the history
In this day and age, Firecracker supports theoretically 4 different ways
of backing guest memory:

1. Normal MAP_ANONYMOUS | MAP_PRIVATE memory
2. memfd backed memory, mapped as shared
3. direct mapping of a snapshot file
4. MAP_ANONYMOUS again, but this time regions are described by snapshot
   file.

We have 3 different functions for creating these different backing
stores, which then call each other and vm_memory's APIs. Clean this up
by consolidating these into just one function that can be called with
generic memory backing options, plus 3 wrappers for the three actually
used ways of backing memory.

For this, hoist up the hugepages/file-based restore incompatibility
check, as with a dedicated function for dealing with the "snapshot
restored by mapping file" case, this function simply will not take a
huge pages argument, so we have to check this somewhere else.

Signed-off-by: Patrick Roy <[email protected]>
  • Loading branch information
roypat committed Jan 30, 2025
1 parent a3c6693 commit 6a14208
Show file tree
Hide file tree
Showing 7 changed files with 134 additions and 261 deletions.
12 changes: 5 additions & 7 deletions src/vmm/src/devices/virtio/block/vhost_user/device.rs
Original file line number Diff line number Diff line change
Expand Up @@ -378,7 +378,7 @@ mod tests {
use crate::devices::virtio::block::virtio::device::FileEngineType;
use crate::devices::virtio::mmio::VIRTIO_MMIO_INT_CONFIG;
use crate::test_utils::create_tmp_socket;
use crate::vstate::memory::{FileOffset, GuestAddress, GuestMemoryExtension};
use crate::vstate::memory::{GuestAddress, GuestMemoryExtension};

#[test]
fn test_from_config() {
Expand Down Expand Up @@ -778,12 +778,10 @@ mod tests {
let region_size = 0x10000;
let file = TempFile::new().unwrap().into_file();
file.set_len(region_size as u64).unwrap();
let regions = vec![(
FileOffset::new(file.try_clone().unwrap(), 0x0),
GuestAddress(0x0),
region_size,
)];
let guest_memory = GuestMemoryMmap::from_raw_regions_file(regions, false, false).unwrap();
let regions = vec![(GuestAddress(0x0), region_size)];
let guest_memory =
GuestMemoryMmap::create(regions.into_iter(), libc::MAP_PRIVATE, Some(file), false)
.unwrap();

// During actiavion of the device features, memory and queues should be set and activated.
vhost_block.activate(guest_memory).unwrap();
Expand Down
8 changes: 6 additions & 2 deletions src/vmm/src/devices/virtio/block/virtio/io/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -230,8 +230,12 @@ pub mod tests {
}

fn create_mem() -> GuestMemoryMmap {
GuestMemoryMmap::from_raw_regions(&[(GuestAddress(0), MEM_LEN)], true, HugePageConfig::None)
.unwrap()
GuestMemoryMmap::anonymous(
[(GuestAddress(0), MEM_LEN)].into_iter(),
true,
HugePageConfig::None,
)
.unwrap()
}

fn check_dirty_mem(mem: &GuestMemoryMmap, addr: GuestAddress, len: u32) {
Expand Down
28 changes: 10 additions & 18 deletions src/vmm/src/devices/virtio/vhost_user.rs
Original file line number Diff line number Diff line change
Expand Up @@ -466,7 +466,7 @@ mod tests {

use super::*;
use crate::test_utils::create_tmp_socket;
use crate::vstate::memory::{FileOffset, GuestAddress, GuestMemoryExtension};
use crate::vstate::memory::{GuestAddress, GuestMemoryExtension};

#[test]
fn test_new() {
Expand Down Expand Up @@ -759,19 +759,13 @@ mod tests {
let file_size = 2 * region_size;
file.set_len(file_size as u64).unwrap();
let regions = vec![
(
FileOffset::new(file.try_clone().unwrap(), 0x0),
GuestAddress(0x0),
region_size,
),
(
FileOffset::new(file.try_clone().unwrap(), 0x10000),
GuestAddress(0x10000),
region_size,
),
(GuestAddress(0x0), region_size),
(GuestAddress(0x10000), region_size),
];

let guest_memory = GuestMemoryMmap::from_raw_regions_file(regions, false, false).unwrap();
let guest_memory =
GuestMemoryMmap::create(regions.into_iter(), libc::MAP_PRIVATE, Some(file), false)
.unwrap();

vuh.update_mem_table(&guest_memory).unwrap();

Expand Down Expand Up @@ -883,13 +877,11 @@ mod tests {
let region_size = 0x10000;
let file = TempFile::new().unwrap().into_file();
file.set_len(region_size as u64).unwrap();
let regions = vec![(
FileOffset::new(file.try_clone().unwrap(), 0x0),
GuestAddress(0x0),
region_size,
)];
let regions = vec![(GuestAddress(0x0), region_size)];

let guest_memory = GuestMemoryMmap::from_raw_regions_file(regions, false, false).unwrap();
let guest_memory =
GuestMemoryMmap::create(regions.into_iter(), libc::MAP_PRIVATE, Some(file), false)
.unwrap();

let mut queue = Queue::new(69);
queue.initialize(&guest_memory).unwrap();
Expand Down
30 changes: 17 additions & 13 deletions src/vmm/src/persist.rs
Original file line number Diff line number Diff line change
Expand Up @@ -448,16 +448,19 @@ pub fn restore_from_snapshot(
let mem_state = &microvm_state.memory_state;

let (guest_memory, uffd) = match params.mem_backend.backend_type {
MemBackendType::File => (
guest_memory_from_file(
mem_backend_path,
mem_state,
track_dirty_pages,
vm_resources.machine_config.huge_pages,
MemBackendType::File => {
if vm_resources.machine_config.huge_pages.is_hugetlbfs() {
return Err(RestoreFromSnapshotGuestMemoryError::File(
GuestMemoryFromFileError::HugetlbfsSnapshot,
)
.into());

Check warning on line 456 in src/vmm/src/persist.rs

View check run for this annotation

Codecov / codecov/patch

src/vmm/src/persist.rs#L453-L456

Added lines #L453 - L456 were not covered by tests
}
(
guest_memory_from_file(mem_backend_path, mem_state, track_dirty_pages)
.map_err(RestoreFromSnapshotGuestMemoryError::File)?,
None,
)
.map_err(RestoreFromSnapshotGuestMemoryError::File)?,
None,
),
}
MemBackendType::Uffd => guest_memory_from_uffd(
mem_backend_path,
mem_state,
Expand Down Expand Up @@ -513,17 +516,17 @@ pub enum GuestMemoryFromFileError {
File(#[from] std::io::Error),
/// Failed to restore guest memory: {0}
Restore(#[from] MemoryError),
/// Cannot restore hugetlbfs backed snapshot by mapping the memory file. Please use uffd.
HugetlbfsSnapshot,
}

fn guest_memory_from_file(
mem_file_path: &Path,
mem_state: &GuestMemoryState,
track_dirty_pages: bool,
huge_pages: HugePageConfig,
) -> Result<GuestMemoryMmap, GuestMemoryFromFileError> {
let mem_file = File::open(mem_file_path)?;
let guest_mem =
GuestMemoryMmap::from_state(Some(&mem_file), mem_state, track_dirty_pages, huge_pages)?;
let guest_mem = GuestMemoryMmap::snapshot_file(mem_file, mem_state, track_dirty_pages)?;
Ok(guest_mem)
}

Expand Down Expand Up @@ -582,7 +585,8 @@ fn create_guest_memory(
track_dirty_pages: bool,
huge_pages: HugePageConfig,
) -> Result<(GuestMemoryMmap, Vec<GuestRegionUffdMapping>), GuestMemoryFromUffdError> {
let guest_memory = GuestMemoryMmap::from_state(None, mem_state, track_dirty_pages, huge_pages)?;
let guest_memory =
GuestMemoryMmap::anonymous(mem_state.regions(), track_dirty_pages, huge_pages)?;
let mut backend_mappings = Vec::with_capacity(guest_memory.num_regions());
let mut offset = 0;
for mem_region in guest_memory.iter() {
Expand Down
4 changes: 2 additions & 2 deletions src/vmm/src/resources.rs
Original file line number Diff line number Diff line change
Expand Up @@ -472,8 +472,8 @@ impl VmResources {
)
} else {
let regions = crate::arch::arch_memory_regions(self.machine_config.mem_size_mib << 20);
GuestMemoryMmap::from_raw_regions(
&regions,
GuestMemoryMmap::anonymous(
regions.into_iter(),
self.machine_config.track_dirty_pages,
self.machine_config.huge_pages,
)
Expand Down
2 changes: 1 addition & 1 deletion src/vmm/src/test_utils/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ pub fn single_region_mem_at(at: u64, size: usize) -> GuestMemoryMmap {

/// Creates a [`GuestMemoryMmap`] with multiple regions and without dirty page tracking.
pub fn multi_region_mem(regions: &[(GuestAddress, usize)]) -> GuestMemoryMmap {
GuestMemoryMmap::from_raw_regions(regions, false, HugePageConfig::None)
GuestMemoryMmap::anonymous(regions.iter().copied(), false, HugePageConfig::None)
.expect("Cannot initialize memory")
}

Expand Down
Loading

0 comments on commit 6a14208

Please sign in to comment.