diff --git a/src/guest_memory.rs b/src/guest_memory.rs index 1c37f94b..811214df 100644 --- a/src/guest_memory.rs +++ b/src/guest_memory.rs @@ -29,8 +29,10 @@ use std::convert::From; use std::fmt::{self, Display}; +use std::fs::File; use std::io::{self, Read, Write}; use std::ops::{BitAnd, BitOr}; +use std::sync::Arc; use address::{Address, AddressValue}; use bytes::Bytes; @@ -115,6 +117,40 @@ impl_address_ops!(MemoryRegionAddress, u64); /// Type of the raw value stored in a GuestAddress object. pub type GuestUsize = ::V; +/// Represents the start point within a `File` that backs a `GuestMemoryRegion`. +#[derive(Clone, Debug)] +pub struct FileOffset { + file: Arc, + start: u64, +} + +impl FileOffset { + /// Creates a new `FileOffset` object. + pub fn new(file: File, start: u64) -> Self { + FileOffset::from_arc(Arc::new(file), start) + } + + /// Creates a new `FileOffset` object based on an exiting `Arc`. + pub fn from_arc(file: Arc, start: u64) -> Self { + FileOffset { file, start } + } + + /// Returns a reference to the inner `File` object. + pub fn file(&self) -> &File { + &self.file.as_ref() + } + + /// Return a reference to the inner `Arc` object. + pub fn arc(&self) -> &Arc { + &self.file + } + + /// Returns the start offset within the file. + pub fn start(&self) -> u64 { + self.start + } +} + /// Represents a continuous region of guest physical memory. #[allow(clippy::len_without_is_empty)] pub trait GuestMemoryRegion: Bytes { @@ -164,6 +200,9 @@ pub trait GuestMemoryRegion: Bytes { .and_then(|offset| self.check_address(MemoryRegionAddress(offset))) } + /// Returns information regarding the file and offset backing this memory region. + fn file_offset(&self) -> Option<&FileOffset>; + /// Return a slice corresponding to the data in the region; unsafe because of /// possible aliasing. Return None if the region does not support slice-based /// access. @@ -514,4 +553,16 @@ mod tests { assert_eq!(Some(GuestAddress(0x0f)), a.checked_sub(0xf0)); assert!(a.checked_sub(0xffff).is_none()); } + + #[test] + fn test_file_offset() { + let file = tempfile::tempfile().unwrap(); + let start = 1234; + let file_offset = FileOffset::new(file, start); + assert_eq!(file_offset.start(), start); + assert_eq!( + file_offset.file() as *const File, + file_offset.arc().as_ref() as *const File + ); + } } diff --git a/src/lib.rs b/src/lib.rs index f78b8142..073d6d7f 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -48,7 +48,7 @@ mod mmap_windows; #[cfg(feature = "backend-mmap")] pub mod mmap; #[cfg(feature = "backend-mmap")] -pub use mmap::{GuestMemoryMmap, GuestRegionMmap, MmapError, MmapRegion}; +pub use mmap::{Error, GuestMemoryMmap, GuestRegionMmap, MmapRegion}; pub mod volatile_memory; pub use volatile_memory::{ diff --git a/src/mmap.rs b/src/mmap.rs index 1a60e57e..a9c02b32 100644 --- a/src/mmap.rs +++ b/src/mmap.rs @@ -20,20 +20,26 @@ //! - [GuestMemoryMmap](struct.GuestMemoryMmap.html): provides methods to access a collection of //! GuestRegionMmap objects. -use std::io::{self, Read, Write}; +use std::borrow::Borrow; +use std::io::{Read, Write}; use std::ops::Deref; +use std::result; use std::sync::Arc; use address::Address; -use guest_memory::*; +use guest_memory::{ + self, FileOffset, GuestAddress, GuestMemory, GuestMemoryRegion, GuestUsize, MemoryRegionAddress, +}; use volatile_memory::VolatileMemory; use Bytes; #[cfg(unix)] -pub use mmap_unix::MmapRegion; +pub use mmap_unix::{Error as MmapRegionError, MmapRegion}; #[cfg(windows)] pub use mmap_windows::MmapRegion; +#[cfg(windows)] +pub use std::io::Error as MmapRegionError; // For MmapRegion pub(crate) trait AsSlice { @@ -45,9 +51,12 @@ pub(crate) trait AsSlice { /// Errors that can happen when creating a memory map #[derive(Debug)] -pub enum MmapError { - /// Syscall returned the given error. - SystemCallFailed(io::Error), +pub enum Error { + /// Adding the guest base address to the length of the underlying mapping resulted + /// in an overflow. + InvalidGuestRegion, + /// Error creating a `MmapRegion` object. + MmapRegion(MmapRegionError), /// No memory region found. NoMemoryRegion, /// Some of the memory regions intersect with each other. @@ -64,12 +73,14 @@ pub struct GuestRegionMmap { impl GuestRegionMmap { /// Create a new memory-mapped memory region for guest's physical memory. - /// Note: caller needs to ensure that (mapping.len() + guest_base) doesn't wrapping around. - pub fn new(mapping: MmapRegion, guest_base: GuestAddress) -> Self { - GuestRegionMmap { + pub fn new(mapping: MmapRegion, guest_base: GuestAddress) -> result::Result { + if guest_base.0.checked_add(mapping.len() as u64).is_none() { + return Err(Error::InvalidGuestRegion); + } + Ok(GuestRegionMmap { mapping, guest_base, - } + }) } /// Convert an absolute address into an address space (GuestMemory) @@ -91,7 +102,7 @@ impl Deref for GuestRegionMmap { } impl Bytes for GuestRegionMmap { - type E = Error; + type E = guest_memory::Error; /// # Examples /// * Write a slice at guest address 0x1200. @@ -103,7 +114,7 @@ impl Bytes for GuestRegionMmap { /// let res = gm.write(&[1,2,3,4,5], GuestAddress(0x1200)).unwrap(); /// assert_eq!(5, res); /// ``` - fn write(&self, buf: &[u8], addr: MemoryRegionAddress) -> Result { + fn write(&self, buf: &[u8], addr: MemoryRegionAddress) -> guest_memory::Result { let maddr = addr.raw_value() as usize; self.as_volatile_slice() .write(buf, maddr) @@ -121,21 +132,21 @@ impl Bytes for GuestRegionMmap { /// let res = gm.read(buf, GuestAddress(0x1200)).unwrap(); /// assert_eq!(16, res); /// ``` - fn read(&self, buf: &mut [u8], addr: MemoryRegionAddress) -> Result { + fn read(&self, buf: &mut [u8], addr: MemoryRegionAddress) -> guest_memory::Result { let maddr = addr.raw_value() as usize; self.as_volatile_slice() .read(buf, maddr) .map_err(Into::into) } - fn write_slice(&self, buf: &[u8], addr: MemoryRegionAddress) -> Result<()> { + fn write_slice(&self, buf: &[u8], addr: MemoryRegionAddress) -> guest_memory::Result<()> { let maddr = addr.raw_value() as usize; self.as_volatile_slice() .write_slice(buf, maddr) .map_err(Into::into) } - fn read_slice(&self, buf: &mut [u8], addr: MemoryRegionAddress) -> Result<()> { + fn read_slice(&self, buf: &mut [u8], addr: MemoryRegionAddress) -> guest_memory::Result<()> { let maddr = addr.raw_value() as usize; self.as_volatile_slice() .read_slice(buf, maddr) @@ -162,7 +173,12 @@ impl Bytes for GuestRegionMmap { /// let read_addr = addr.checked_add(8).unwrap(); /// let _: u32 = gm.read_obj(read_addr).unwrap(); /// ``` - fn read_from(&self, addr: MemoryRegionAddress, src: &mut F, count: usize) -> Result + fn read_from( + &self, + addr: MemoryRegionAddress, + src: &mut F, + count: usize, + ) -> guest_memory::Result where F: Read, { @@ -194,7 +210,12 @@ impl Bytes for GuestRegionMmap { /// let read_addr = addr.checked_add(8).unwrap(); /// let _: u32 = gm.read_obj(read_addr).unwrap(); /// ``` - fn read_exact_from(&self, addr: MemoryRegionAddress, src: &mut F, count: usize) -> Result<()> + fn read_exact_from( + &self, + addr: MemoryRegionAddress, + src: &mut F, + count: usize, + ) -> guest_memory::Result<()> where F: Read, { @@ -221,7 +242,12 @@ impl Bytes for GuestRegionMmap { /// let mut mem = [0u8; 1024]; /// gm.write_to(start_addr, &mut file, 128).unwrap(); /// ``` - fn write_to(&self, addr: MemoryRegionAddress, dst: &mut F, count: usize) -> Result + fn write_to( + &self, + addr: MemoryRegionAddress, + dst: &mut F, + count: usize, + ) -> guest_memory::Result where F: Write, { @@ -248,7 +274,12 @@ impl Bytes for GuestRegionMmap { /// let mut mem = [0u8; 1024]; /// gm.write_all_to(start_addr, &mut file, 128).unwrap(); /// ``` - fn write_all_to(&self, addr: MemoryRegionAddress, dst: &mut F, count: usize) -> Result<()> + fn write_all_to( + &self, + addr: MemoryRegionAddress, + dst: &mut F, + count: usize, + ) -> guest_memory::Result<()> where F: Write, { @@ -268,6 +299,10 @@ impl GuestMemoryRegion for GuestRegionMmap { self.guest_base } + fn file_offset(&self) -> Option<&FileOffset> { + self.mapping.file_offset() + } + unsafe fn as_slice(&self) -> Option<&[u8]> { Some(self.mapping.as_slice()) } @@ -285,56 +320,55 @@ pub struct GuestMemoryMmap { impl GuestMemoryMmap { /// Creates a container and allocates anonymous memory for guest memory regions. - /// Valid memory regions are specified as a Vec of (Address, Size) tuples sorted by Address. - pub fn new(ranges: &[(GuestAddress, usize)]) -> std::result::Result { - if ranges.is_empty() { - return Err(MmapError::NoMemoryRegion); - } - - let mut regions = Vec::::new(); - for range in ranges.iter() { - if let Some(last) = regions.last() { - if last - .guest_base - .checked_add(last.mapping.len() as GuestUsize) - .map_or(true, |a| a > range.0) - { - return Err(MmapError::MemoryRegionOverlap); - } - } - - let mapping = MmapRegion::new(range.1).map_err(MmapError::SystemCallFailed)?; - regions.push(GuestRegionMmap { - mapping, - guest_base: range.0, - }); - } + /// Valid memory regions are specified as a slice of (Address, Size) tuples sorted by Address. + pub fn new(ranges: &[(GuestAddress, usize)]) -> result::Result { + Self::with_files(ranges.iter().map(|r| (r.0, r.1, None))) + } - Ok(Self { - regions: Arc::new(regions), - }) + /// Creates a container and allocates anonymous memory for guest memory regions. + /// Valid memory regions are specified as a sequence of (Address, Size, Option) + /// tuples sorted by Address. + pub fn with_files(ranges: T) -> result::Result + where + A: Borrow<(GuestAddress, usize, Option)>, + T: IntoIterator, + { + Self::from_regions( + ranges + .into_iter() + .map(|x| { + let guest_base = x.borrow().0; + let size = x.borrow().1; + + if let Some(ref f_off) = x.borrow().2 { + MmapRegion::from_file(f_off.clone(), size) + } else { + MmapRegion::new(size) + } + .map_err(Error::MmapRegion) + .and_then(|r| GuestRegionMmap::new(r, guest_base)) + }) + .collect::, Error>>()?, + ) } /// Creates a container and adds an existing set of mappings to it. - pub fn from_regions(ranges: Vec) -> std::result::Result { - if ranges.is_empty() { - return Err(MmapError::NoMemoryRegion); + pub fn from_regions(regions: Vec) -> result::Result { + if regions.is_empty() { + return Err(Error::NoMemoryRegion); } - for rangei in 1..ranges.len() { - let range = &ranges[rangei]; - let last = &ranges[rangei - 1]; - if last - .guest_base - .checked_add(last.mapping.len() as GuestUsize) - .map_or(true, |a| a > range.start_addr()) - { - return Err(MmapError::MemoryRegionOverlap); + for i in 1..regions.len() { + let region = ®ions[i]; + let last = ®ions[i - 1]; + + if last.guest_base.0 + last.mapping.len() as u64 > region.start_addr().0 { + return Err(Error::MemoryRegionOverlap); } } Ok(Self { - regions: Arc::new(ranges), + regions: Arc::new(regions), }) } @@ -362,9 +396,9 @@ impl GuestMemory for GuestMemoryMmap { None } - fn with_regions(&self, cb: F) -> std::result::Result<(), E> + fn with_regions(&self, cb: F) -> result::Result<(), E> where - F: Fn(usize, &Self::R) -> std::result::Result<(), E>, + F: Fn(usize, &Self::R) -> result::Result<(), E>, { for (index, region) in self.regions.iter().enumerate() { cb(index, region)?; @@ -372,9 +406,9 @@ impl GuestMemory for GuestMemoryMmap { Ok(()) } - fn with_regions_mut(&self, mut cb: F) -> std::result::Result<(), E> + fn with_regions_mut(&self, mut cb: F) -> result::Result<(), E> where - F: FnMut(usize, &Self::R) -> std::result::Result<(), E>, + F: FnMut(usize, &Self::R) -> result::Result<(), E>, { for (index, region) in self.regions.iter().enumerate() { cb(index, region)?; @@ -395,8 +429,9 @@ impl GuestMemory for GuestMemoryMmap { mod tests { extern crate tempfile; - use self::tempfile::tempfile; use super::*; + + use self::tempfile::tempfile; use std::fs::File; use std::mem; use std::path::Path; @@ -409,12 +444,6 @@ mod tests { assert_eq!(1024, m.len()); } - #[test] - fn map_invalid_size() { - let e = MmapRegion::new(0).unwrap_err(); - assert_eq!(e.raw_os_error(), Some(libc::EINVAL)); - } - #[test] fn slice_addr() { let m = MmapRegion::new(5).unwrap(); @@ -428,7 +457,7 @@ mod tests { let sample_buf = &[1, 2, 3, 4, 5]; assert!(f.write_all(sample_buf).is_ok()); - let mem_map = MmapRegion::from_fd(&f, sample_buf.len(), 0).unwrap(); + let mem_map = MmapRegion::from_file(FileOffset::new(f, 0), sample_buf.len()).unwrap(); let buf = &mut [0u8; 16]; assert_eq!( mem_map.as_volatile_slice().read(buf, 0).unwrap(), @@ -442,96 +471,180 @@ mod tests { // No regions provided should return error. assert_eq!( format!("{:?}", GuestMemoryMmap::new(&[]).err().unwrap()), - format!("{:?}", MmapError::NoMemoryRegion) + format!("{:?}", Error::NoMemoryRegion) ); + assert_eq!( + format!("{:?}", GuestMemoryMmap::with_files(&[]).err().unwrap()), + format!("{:?}", Error::NoMemoryRegion) + ); + + let f1 = tempfile().unwrap(); + f1.set_len(0x400).unwrap(); + let f2 = tempfile().unwrap(); + f2.set_len(0x400).unwrap(); let start_addr1 = GuestAddress(0x0); let start_addr2 = GuestAddress(0x800); let guest_mem = GuestMemoryMmap::new(&[(start_addr1, 0x400), (start_addr2, 0x400)]).unwrap(); - assert_eq!(guest_mem.num_regions(), 2); - assert_eq!(guest_mem.end_addr(), GuestAddress(0xbff)); - assert!(guest_mem.find_region(GuestAddress(0x200)).is_some()); - assert!(guest_mem.find_region(GuestAddress(0x600)).is_none()); - assert!(guest_mem.find_region(GuestAddress(0xa00)).is_some()); - assert!(guest_mem.find_region(GuestAddress(0xc00)).is_none()); + let guest_mem_backed_by_file = GuestMemoryMmap::with_files(&[ + (start_addr1, 0x400, Some(FileOffset::new(f1, 0))), + (start_addr2, 0x400, Some(FileOffset::new(f2, 0))), + ]) + .unwrap(); + + let guest_mem_list = vec![guest_mem, guest_mem_backed_by_file]; + for guest_mem in guest_mem_list.iter() { + assert_eq!(guest_mem.num_regions(), 2); + assert_eq!(guest_mem.end_addr(), GuestAddress(0xbff)); + assert!(guest_mem.find_region(GuestAddress(0x200)).is_some()); + assert!(guest_mem.find_region(GuestAddress(0x600)).is_none()); + assert!(guest_mem.find_region(GuestAddress(0xa00)).is_some()); + assert!(guest_mem.find_region(GuestAddress(0xc00)).is_none()); + } } #[test] fn test_address_in_range() { + let f1 = tempfile().unwrap(); + f1.set_len(0x400).unwrap(); + let f2 = tempfile().unwrap(); + f2.set_len(0x400).unwrap(); + let start_addr1 = GuestAddress(0x0); let start_addr2 = GuestAddress(0x800); let guest_mem = GuestMemoryMmap::new(&[(start_addr1, 0x400), (start_addr2, 0x400)]).unwrap(); - assert!(guest_mem.address_in_range(GuestAddress(0x200))); - assert!(!guest_mem.address_in_range(GuestAddress(0x600))); - assert!(guest_mem.address_in_range(GuestAddress(0xa00))); - assert!(!guest_mem.address_in_range(GuestAddress(0xc00))); + let guest_mem_backed_by_file = GuestMemoryMmap::with_files(&[ + (start_addr1, 0x400, Some(FileOffset::new(f1, 0))), + (start_addr2, 0x400, Some(FileOffset::new(f2, 0))), + ]) + .unwrap(); + + let guest_mem_list = vec![guest_mem, guest_mem_backed_by_file]; + for guest_mem in guest_mem_list.iter() { + assert!(guest_mem.address_in_range(GuestAddress(0x200))); + assert!(!guest_mem.address_in_range(GuestAddress(0x600))); + assert!(guest_mem.address_in_range(GuestAddress(0xa00))); + assert!(!guest_mem.address_in_range(GuestAddress(0xc00))); + } } #[test] fn test_check_address() { + let f1 = tempfile().unwrap(); + f1.set_len(0x400).unwrap(); + let f2 = tempfile().unwrap(); + f2.set_len(0x400).unwrap(); + let start_addr1 = GuestAddress(0x0); let start_addr2 = GuestAddress(0x800); let guest_mem = GuestMemoryMmap::new(&[(start_addr1, 0x400), (start_addr2, 0x400)]).unwrap(); - assert_eq!( - guest_mem.check_address(GuestAddress(0x200)), - Some(GuestAddress(0x200)) - ); - assert_eq!(guest_mem.check_address(GuestAddress(0x600)), None); - assert_eq!( - guest_mem.check_address(GuestAddress(0xa00)), - Some(GuestAddress(0xa00)) - ); - assert_eq!(guest_mem.check_address(GuestAddress(0xc00)), None); + let guest_mem_backed_by_file = GuestMemoryMmap::with_files(&[ + (start_addr1, 0x400, Some(FileOffset::new(f1, 0))), + (start_addr2, 0x400, Some(FileOffset::new(f2, 0))), + ]) + .unwrap(); + + let guest_mem_list = vec![guest_mem, guest_mem_backed_by_file]; + for guest_mem in guest_mem_list.iter() { + assert_eq!( + guest_mem.check_address(GuestAddress(0x200)), + Some(GuestAddress(0x200)) + ); + assert_eq!(guest_mem.check_address(GuestAddress(0x600)), None); + assert_eq!( + guest_mem.check_address(GuestAddress(0xa00)), + Some(GuestAddress(0xa00)) + ); + assert_eq!(guest_mem.check_address(GuestAddress(0xc00)), None); + } } #[test] fn test_to_region_addr() { + let f1 = tempfile().unwrap(); + f1.set_len(0x400).unwrap(); + let f2 = tempfile().unwrap(); + f2.set_len(0x400).unwrap(); + let start_addr1 = GuestAddress(0x0); let start_addr2 = GuestAddress(0x800); let guest_mem = GuestMemoryMmap::new(&[(start_addr1, 0x400), (start_addr2, 0x400)]).unwrap(); - assert!(guest_mem.to_region_addr(GuestAddress(0x600)).is_none()); - let (r0, addr0) = guest_mem.to_region_addr(GuestAddress(0x800)).unwrap(); - let (r1, addr1) = guest_mem.to_region_addr(GuestAddress(0xa00)).unwrap(); - assert!(r0.as_ptr() == r1.as_ptr()); - assert_eq!(addr0, MemoryRegionAddress(0)); - assert_eq!(addr1, MemoryRegionAddress(0x200)); + let guest_mem_backed_by_file = GuestMemoryMmap::with_files(&[ + (start_addr1, 0x400, Some(FileOffset::new(f1, 0))), + (start_addr2, 0x400, Some(FileOffset::new(f2, 0))), + ]) + .unwrap(); + + let guest_mem_list = vec![guest_mem, guest_mem_backed_by_file]; + for guest_mem in guest_mem_list.iter() { + assert!(guest_mem.to_region_addr(GuestAddress(0x600)).is_none()); + let (r0, addr0) = guest_mem.to_region_addr(GuestAddress(0x800)).unwrap(); + let (r1, addr1) = guest_mem.to_region_addr(GuestAddress(0xa00)).unwrap(); + assert!(r0.as_ptr() == r1.as_ptr()); + assert_eq!(addr0, MemoryRegionAddress(0)); + assert_eq!(addr1, MemoryRegionAddress(0x200)); + } } #[test] fn test_get_host_address() { + let f1 = tempfile().unwrap(); + f1.set_len(0x400).unwrap(); + let f2 = tempfile().unwrap(); + f2.set_len(0x400).unwrap(); + let start_addr1 = GuestAddress(0x0); let start_addr2 = GuestAddress(0x800); let guest_mem = GuestMemoryMmap::new(&[(start_addr1, 0x400), (start_addr2, 0x400)]).unwrap(); - assert!(guest_mem.get_host_address(GuestAddress(0x600)).is_none()); - let ptr0 = guest_mem.get_host_address(GuestAddress(0x800)).unwrap(); - let ptr1 = guest_mem.get_host_address(GuestAddress(0xa00)).unwrap(); - assert_eq!( - ptr0, - guest_mem.find_region(GuestAddress(0x800)).unwrap().as_ptr() - ); - assert_eq!(unsafe { ptr0.offset(0x200) }, ptr1); + let guest_mem_backed_by_file = GuestMemoryMmap::with_files(&[ + (start_addr1, 0x400, Some(FileOffset::new(f1, 0))), + (start_addr2, 0x400, Some(FileOffset::new(f2, 0))), + ]) + .unwrap(); + + let guest_mem_list = vec![guest_mem, guest_mem_backed_by_file]; + for guest_mem in guest_mem_list.iter() { + assert!(guest_mem.get_host_address(GuestAddress(0x600)).is_none()); + let ptr0 = guest_mem.get_host_address(GuestAddress(0x800)).unwrap(); + let ptr1 = guest_mem.get_host_address(GuestAddress(0xa00)).unwrap(); + assert_eq!( + ptr0, + guest_mem.find_region(GuestAddress(0x800)).unwrap().as_ptr() + ); + assert_eq!(unsafe { ptr0.offset(0x200) }, ptr1); + } } #[test] fn test_deref() { + let f = tempfile().unwrap(); + f.set_len(0x400).unwrap(); + let start_addr = GuestAddress(0x0); let guest_mem = GuestMemoryMmap::new(&[(start_addr, 0x400)]).unwrap(); - let sample_buf = &[1, 2, 3, 4, 5]; - - assert_eq!(guest_mem.write(sample_buf, start_addr).unwrap(), 5); - let slice = guest_mem - .find_region(GuestAddress(0)) - .unwrap() - .as_volatile_slice(); - - let buf = &mut [0, 0, 0, 0, 0]; - assert_eq!(slice.read(buf, 0).unwrap(), 5); - assert_eq!(buf, sample_buf); + let guest_mem_backed_by_file = + GuestMemoryMmap::with_files(&[(start_addr, 0x400, Some(FileOffset::new(f, 0)))]) + .unwrap(); + + let guest_mem_list = vec![guest_mem, guest_mem_backed_by_file]; + for guest_mem in guest_mem_list.iter() { + let sample_buf = &[1, 2, 3, 4, 5]; + + assert_eq!(guest_mem.write(sample_buf, start_addr).unwrap(), 5); + let slice = guest_mem + .find_region(GuestAddress(0)) + .unwrap() + .as_volatile_slice(); + + let buf = &mut [0, 0, 0, 0, 0]; + assert_eq!(slice.read(buf, 0).unwrap(), 5); + assert_eq!(buf, sample_buf); + } } #[test] @@ -540,8 +653,8 @@ mod tests { let empty_buf = &[0; 16384]; assert!(f.write_all(empty_buf).is_ok()); - let mem_map = MmapRegion::from_fd(&f, empty_buf.len(), 0).unwrap(); - let guest_reg = GuestRegionMmap::new(mem_map, GuestAddress(0x8000)); + let mem_map = MmapRegion::from_file(FileOffset::new(f, 0), empty_buf.len()).unwrap(); + let guest_reg = GuestRegionMmap::new(mem_map, GuestAddress(0x8000)).unwrap(); let mut region_vec = Vec::new(); region_vec.push(guest_reg); let guest_mem = GuestMemoryMmap::from_regions(region_vec).unwrap(); @@ -552,17 +665,35 @@ mod tests { #[test] fn overlap_memory() { + let f1 = tempfile().unwrap(); + f1.set_len(0x2000).unwrap(); + let f2 = tempfile().unwrap(); + f2.set_len(0x2000).unwrap(); + let start_addr1 = GuestAddress(0x0); let start_addr2 = GuestAddress(0x1000); - let res = GuestMemoryMmap::new(&[(start_addr1, 0x2000), (start_addr2, 0x2000)]); + let guest_mem = GuestMemoryMmap::new(&[(start_addr1, 0x2000), (start_addr2, 0x2000)]); + let guest_mem_backed_by_file = GuestMemoryMmap::with_files(&[ + (start_addr1, 0x2000, Some(FileOffset::new(f1, 0))), + (start_addr2, 0x2000, Some(FileOffset::new(f2, 0))), + ]); + assert_eq!( + format!("{:?}", guest_mem.err().unwrap()), + format!("{:?}", Error::MemoryRegionOverlap) + ); assert_eq!( - format!("{:?}", res.err().unwrap()), - format!("{:?}", MmapError::MemoryRegionOverlap) + format!("{:?}", guest_mem_backed_by_file.err().unwrap()), + format!("{:?}", Error::MemoryRegionOverlap) ); } #[test] fn test_read_u64() { + let f1 = tempfile().unwrap(); + f1.set_len(0x1000).unwrap(); + let f2 = tempfile().unwrap(); + f2.set_len(0x1000).unwrap(); + let start_addr1 = GuestAddress(0x0); let start_addr2 = GuestAddress(0x1000); let bad_addr = GuestAddress(0x2001); @@ -570,75 +701,107 @@ mod tests { let max_addr = GuestAddress(0x2000); let gm = GuestMemoryMmap::new(&[(start_addr1, 0x1000), (start_addr2, 0x1000)]).unwrap(); - - let val1: u64 = 0xaa55_aa55_aa55_aa55; - let val2: u64 = 0x55aa_55aa_55aa_55aa; - assert_eq!( - format!("{:?}", gm.write_obj(val1, bad_addr).err().unwrap()), - format!("InvalidGuestAddress({:?})", bad_addr,) - ); - assert_eq!( - format!("{:?}", gm.write_obj(val1, bad_addr2).err().unwrap()), - format!( - "PartialBuffer {{ expected: {:?}, completed: {:?} }}", - mem::size_of::(), - max_addr.checked_offset_from(bad_addr2).unwrap() - ) - ); - - gm.write_obj(val1, GuestAddress(0x500)).unwrap(); - gm.write_obj(val2, GuestAddress(0x1000 + 32)).unwrap(); - let num1: u64 = gm.read_obj(GuestAddress(0x500)).unwrap(); - let num2: u64 = gm.read_obj(GuestAddress(0x1000 + 32)).unwrap(); - assert_eq!(val1, num1); - assert_eq!(val2, num2); + let gm_backed_by_file = GuestMemoryMmap::with_files(&[ + (start_addr1, 0x1000, Some(FileOffset::new(f1, 0))), + (start_addr2, 0x1000, Some(FileOffset::new(f2, 0))), + ]) + .unwrap(); + + let gm_list = vec![gm, gm_backed_by_file]; + for gm in gm_list.iter() { + let val1: u64 = 0xaa55_aa55_aa55_aa55; + let val2: u64 = 0x55aa_55aa_55aa_55aa; + assert_eq!( + format!("{:?}", gm.write_obj(val1, bad_addr).err().unwrap()), + format!("InvalidGuestAddress({:?})", bad_addr,) + ); + assert_eq!( + format!("{:?}", gm.write_obj(val1, bad_addr2).err().unwrap()), + format!( + "PartialBuffer {{ expected: {:?}, completed: {:?} }}", + mem::size_of::(), + max_addr.checked_offset_from(bad_addr2).unwrap() + ) + ); + + gm.write_obj(val1, GuestAddress(0x500)).unwrap(); + gm.write_obj(val2, GuestAddress(0x1000 + 32)).unwrap(); + let num1: u64 = gm.read_obj(GuestAddress(0x500)).unwrap(); + let num2: u64 = gm.read_obj(GuestAddress(0x1000 + 32)).unwrap(); + assert_eq!(val1, num1); + assert_eq!(val2, num2); + } } #[test] fn write_and_read() { + let f = tempfile().unwrap(); + f.set_len(0x400).unwrap(); + let mut start_addr = GuestAddress(0x1000); let gm = GuestMemoryMmap::new(&[(start_addr, 0x400)]).unwrap(); - let sample_buf = &[1, 2, 3, 4, 5]; + let gm_backed_by_file = + GuestMemoryMmap::with_files(&[(start_addr, 0x400, Some(FileOffset::new(f, 0)))]) + .unwrap(); + + let gm_list = vec![gm, gm_backed_by_file]; + for gm in gm_list.iter() { + let sample_buf = &[1, 2, 3, 4, 5]; - assert_eq!(gm.write(sample_buf, start_addr).unwrap(), 5); + assert_eq!(gm.write(sample_buf, start_addr).unwrap(), 5); - let buf = &mut [0u8; 5]; - assert_eq!(gm.read(buf, start_addr).unwrap(), 5); - assert_eq!(buf, sample_buf); + let buf = &mut [0u8; 5]; + assert_eq!(gm.read(buf, start_addr).unwrap(), 5); + assert_eq!(buf, sample_buf); - start_addr = GuestAddress(0x13ff); - assert_eq!(gm.write(sample_buf, start_addr).unwrap(), 1); - assert_eq!(gm.read(buf, start_addr).unwrap(), 1); - assert_eq!(buf[0], sample_buf[0]); + start_addr = GuestAddress(0x13ff); + assert_eq!(gm.write(sample_buf, start_addr).unwrap(), 1); + assert_eq!(gm.read(buf, start_addr).unwrap(), 1); + assert_eq!(buf[0], sample_buf[0]); + start_addr = GuestAddress(0x1000); + } } #[test] fn read_to_and_write_from_mem() { + let f = tempfile().unwrap(); + f.set_len(0x400).unwrap(); + let gm = GuestMemoryMmap::new(&[(GuestAddress(0x1000), 0x400)]).unwrap(); - let addr = GuestAddress(0x1010); - let mut file = if cfg!(unix) { - File::open(Path::new("/dev/zero")).unwrap() - } else { - File::open(Path::new("c:\\Windows\\system32\\ntoskrnl.exe")).unwrap() - }; - gm.write_obj(!0u32, addr).unwrap(); - gm.read_exact_from(addr, &mut file, mem::size_of::()) - .unwrap(); - let value: u32 = gm.read_obj(addr).unwrap(); - if cfg!(unix) { - assert_eq!(value, 0); - } else { - assert_eq!(value, 0x0090_5a4d); - } + let gm_backed_by_file = GuestMemoryMmap::with_files(&[( + GuestAddress(0x1000), + 0x400, + Some(FileOffset::new(f, 0)), + )]) + .unwrap(); + + let gm_list = vec![gm, gm_backed_by_file]; + for gm in gm_list.iter() { + let addr = GuestAddress(0x1010); + let mut file = if cfg!(unix) { + File::open(Path::new("/dev/zero")).unwrap() + } else { + File::open(Path::new("c:\\Windows\\system32\\ntoskrnl.exe")).unwrap() + }; + gm.write_obj(!0u32, addr).unwrap(); + gm.read_exact_from(addr, &mut file, mem::size_of::()) + .unwrap(); + let value: u32 = gm.read_obj(addr).unwrap(); + if cfg!(unix) { + assert_eq!(value, 0); + } else { + assert_eq!(value, 0x0090_5a4d); + } - let mut sink = Vec::new(); - gm.write_all_to(addr, &mut sink, mem::size_of::()) - .unwrap(); - if cfg!(unix) { - assert_eq!(sink, vec![0; mem::size_of::()]); - } else { - assert_eq!(sink, vec![0x4d, 0x5a, 0x90, 0x00]); - }; + let mut sink = Vec::new(); + gm.write_all_to(addr, &mut sink, mem::size_of::()) + .unwrap(); + if cfg!(unix) { + assert_eq!(sink, vec![0; mem::size_of::()]); + } else { + assert_eq!(sink, vec![0x4d, 0x5a, 0x90, 0x00]); + }; + } } #[test] @@ -650,32 +813,95 @@ mod tests { ]; let mut iterated_regions = Vec::new(); let gm = GuestMemoryMmap::new(®ions).unwrap(); - - let res: Result<()> = gm.with_regions(|_, region| { + let res: guest_memory::Result<()> = gm.with_regions(|_, region| { assert_eq!(region.len(), region_size as GuestUsize); Ok(()) }); assert!(res.is_ok()); - - let res: Result<()> = gm.with_regions_mut(|_, region| { + let res: guest_memory::Result<()> = gm.with_regions_mut(|_, region| { iterated_regions.push((region.start_addr(), region.len() as usize)); Ok(()) }); assert!(res.is_ok()); assert_eq!(regions, iterated_regions); + + assert!(regions + .iter() + .map(|x| (x.0, x.1)) + .eq(iterated_regions.iter().map(|x| *x))); + assert_eq!(gm.clone().regions[0].guest_base, regions[0].0); assert_eq!(gm.clone().regions[1].guest_base, regions[1].0); } #[test] fn test_access_cross_boundary() { + let f1 = tempfile().unwrap(); + f1.set_len(0x1000).unwrap(); + let f2 = tempfile().unwrap(); + f2.set_len(0x1000).unwrap(); + let start_addr1 = GuestAddress(0x0); let start_addr2 = GuestAddress(0x1000); let gm = GuestMemoryMmap::new(&[(start_addr1, 0x1000), (start_addr2, 0x1000)]).unwrap(); - let sample_buf = &[1, 2, 3, 4, 5]; - assert_eq!(gm.write(sample_buf, GuestAddress(0xffc)).unwrap(), 5); - let buf = &mut [0u8; 5]; - assert_eq!(gm.read(buf, GuestAddress(0xffc)).unwrap(), 5); - assert_eq!(buf, sample_buf); + let gm_backed_by_file = GuestMemoryMmap::with_files(&[ + (start_addr1, 0x1000, Some(FileOffset::new(f1, 0))), + (start_addr2, 0x1000, Some(FileOffset::new(f2, 0))), + ]) + .unwrap(); + + let gm_list = vec![gm, gm_backed_by_file]; + for gm in gm_list.iter() { + let sample_buf = &[1, 2, 3, 4, 5]; + assert_eq!(gm.write(sample_buf, GuestAddress(0xffc)).unwrap(), 5); + let buf = &mut [0u8; 5]; + assert_eq!(gm.read(buf, GuestAddress(0xffc)).unwrap(), 5); + assert_eq!(buf, sample_buf); + } + } + + #[test] + fn test_retrieve_fd_backing_memory_region() { + let f = tempfile().unwrap(); + f.set_len(0x400).unwrap(); + + let start_addr = GuestAddress(0x0); + let gm = GuestMemoryMmap::new(&[(start_addr, 0x400)]).unwrap(); + assert!(gm.find_region(start_addr).is_some()); + let region = gm.find_region(start_addr).unwrap(); + assert!(region.file_offset().is_none()); + + let gm = GuestMemoryMmap::with_files(&[(start_addr, 0x400, Some(FileOffset::new(f, 0)))]) + .unwrap(); + assert!(gm.find_region(start_addr).is_some()); + let region = gm.find_region(start_addr).unwrap(); + assert!(region.file_offset().is_some()); + } + + // Windows needs a dedicated test where it will retrieve the allocation + // granularity to determine a proper offset (other than 0) that can be + // used for the backing file. Refer to Microsoft docs here: + // https://docs.microsoft.com/en-us/windows/desktop/api/memoryapi/nf-memoryapi-mapviewoffile + #[test] + #[cfg(unix)] + fn test_retrieve_offset_from_fd_backing_memory_region() { + let f = tempfile().unwrap(); + f.set_len(0x1400).unwrap(); + // Needs to be aligned on 4k, otherwise mmap will fail. + let offset = 0x1000; + + let start_addr = GuestAddress(0x0); + let gm = GuestMemoryMmap::new(&[(start_addr, 0x400)]).unwrap(); + assert!(gm.find_region(start_addr).is_some()); + let region = gm.find_region(start_addr).unwrap(); + assert!(region.file_offset().is_none()); + + let gm = + GuestMemoryMmap::with_files(&[(start_addr, 0x400, Some(FileOffset::new(f, offset)))]) + .unwrap(); + assert!(gm.find_region(start_addr).is_some()); + let region = gm.find_region(start_addr).unwrap(); + assert!(region.file_offset().is_some()); + assert_eq!(region.file_offset().unwrap().start(), offset); } } diff --git a/src/mmap_unix.rs b/src/mmap_unix.rs index 4ebf8f41..1130876d 100644 --- a/src/mmap_unix.rs +++ b/src/mmap_unix.rs @@ -20,14 +20,33 @@ //! - [GuestMemoryMmap](struct.GuestMemoryMmap.html): provides methods to access a collection of //! GuestRegionMmap objects. -use libc; use std::io; +use std::os::unix::io::AsRawFd; use std::ptr::null_mut; +use std::result; +use libc; + +use guest_memory::FileOffset; use mmap::AsSlice; use volatile_memory::{self, compute_offset, VolatileMemory, VolatileSlice}; -use std::os::unix::io::AsRawFd; +/// Error conditions that may arise when creating a new `MmapRegion` object. +#[derive(Debug)] +pub enum Error { + /// The specified file offset and length cause overflow when added. + InvalidOffsetLength, + /// The forbidden `MAP_FIXED` flag was specified. + MapFixed, + /// Mappings using the same fd overlap in terms of file offset and length. + MappingOverlap, + /// A mapping with offset + length > EOF was attempted. + MappingPastEof, + /// The `mmap` call returned an error. + Mmap(io::Error), +} + +pub type Result = result::Result; /// A backend driver to access guest's physical memory by mmapping guest's memory into the current /// process. @@ -38,6 +57,9 @@ use std::os::unix::io::AsRawFd; pub struct MmapRegion { addr: *mut u8, size: usize, + file_offset: Option, + prot: i32, + flags: i32, } // Send and Sync aren't automatically inherited for the raw address pointer. @@ -52,53 +74,75 @@ impl MmapRegion { /// /// # Arguments /// * `size` - Size of memory region in bytes. - pub fn new(size: usize) -> io::Result { - // This is safe because we are creating an anonymous mapping in a place not already used by - // any other area in this process. - let addr = unsafe { - libc::mmap( - null_mut(), - size, - libc::PROT_READ | libc::PROT_WRITE, - libc::MAP_ANONYMOUS | libc::MAP_SHARED | libc::MAP_NORESERVE, - -1, - 0, - ) - }; - if addr == libc::MAP_FAILED { - return Err(io::Error::last_os_error()); - } - Ok(Self { - addr: addr as *mut u8, + pub fn new(size: usize) -> Result { + Self::build( + None, size, - }) + libc::PROT_READ | libc::PROT_WRITE, + libc::MAP_ANONYMOUS | libc::MAP_NORESERVE | libc::MAP_PRIVATE, + ) } /// Maps the `size` bytes starting at `offset` bytes of the given `fd`. /// /// # Arguments - /// * `fd` - File descriptor to mmap from. + /// * `file_offset` - File object and offset to mmap from. /// * `size` - Size of memory region in bytes. - /// * `offset` - Offset in bytes from the beginning of `fd` to start the mmap. - pub fn from_fd(fd: &AsRawFd, size: usize, offset: libc::off_t) -> io::Result { - // This is safe because we are creating a mapping in a place not already used by any other - // area in this process. - let addr = unsafe { - libc::mmap( - null_mut(), - size, - libc::PROT_READ | libc::PROT_WRITE, - libc::MAP_SHARED, - fd.as_raw_fd(), - offset, - ) + pub fn from_file(file_offset: FileOffset, size: usize) -> Result { + Self::build( + Some(file_offset), + size, + libc::PROT_READ | libc::PROT_WRITE, + libc::MAP_NORESERVE | libc::MAP_SHARED, + ) + } + + /// Creates a new mapping based on the provided arguments. + pub fn build( + file_offset: Option, + size: usize, + prot: i32, + flags: i32, + ) -> Result { + // Forbid MAP_FIXED, as it doesn't make sense in this context, and is pretty dangerous + // in general. + if flags & libc::MAP_FIXED != 0 { + return Err(Error::MapFixed); + } + + let (fd, offset) = if let Some(ref f_off) = file_offset { + let file = f_off.file(); + let start = f_off.start(); + + if let Some(end) = start.checked_add(size as u64) { + if let Ok(metadata) = file.metadata() { + if metadata.len() < end { + return Err(Error::MappingPastEof); + } + } + } else { + return Err(Error::InvalidOffsetLength); + } + + (file.as_raw_fd(), start) + } else { + (-1, 0) }; + + // This is safe because we're not allowing MAP_FIXED, and invalid parameters cannot break + // Rust safety guarantees (things may change if we're mapping /dev/mem or some wacky file). + let addr = unsafe { libc::mmap(null_mut(), size, prot, flags, fd, offset as libc::off_t) }; + if addr == libc::MAP_FAILED { - return Err(io::Error::last_os_error()); + return Err(Error::Mmap(io::Error::last_os_error())); } + Ok(Self { addr: addr as *mut u8, size, + file_offset, + prot, + flags, }) } @@ -107,6 +151,49 @@ impl MmapRegion { pub fn as_ptr(&self) -> *mut u8 { self.addr } + + /// Returns the size of this region. + pub fn size(&self) -> usize { + self.size + } + + /// Returns information regarding the offset into the file backing this region (if any). + pub fn file_offset(&self) -> Option<&FileOffset> { + self.file_offset.as_ref() + } + + /// Returns the value of the `prot` parameter passed to `mmap` when mapping this region. + pub fn prot(&self) -> i32 { + self.prot + } + + /// Returns the value of the `flags` parameter passed to `mmap` when mapping this region. + pub fn flags(&self) -> i32 { + self.flags + } + + /// Returns true if `self` and `other` map the same file descriptor, and the `(offset, size)` + /// pairs overlap. This is mostly a sanity check available for convenience, as different file + /// descriptors can alias the same file. + pub fn fds_overlap(&self, other: &MmapRegion) -> bool { + if let Some(f_off1) = self.file_offset() { + if let Some(f_off2) = other.file_offset() { + if f_off1.file().as_raw_fd() == f_off2.file().as_raw_fd() { + let s1 = f_off1.start(); + let s2 = f_off2.start(); + let l1 = self.len() as u64; + let l2 = other.len() as u64; + + if s1 < s2 { + return s1 + l1 > s2; + } else { + return s2 + l2 > s1; + } + } + } + } + false + } } impl AsSlice for MmapRegion { @@ -156,13 +243,147 @@ impl Drop for MmapRegion { #[cfg(test)] mod tests { - use mmap_unix::MmapRegion; - use std::os::unix::io::FromRawFd; + use super::*; + + use std::io::Write; + use std::slice; + use std::sync::Arc; + + // Adding a helper method to extract the errno within an Error::Mmap(e), or return a + // distinctive value when the error is represented by another variant. + impl Error { + pub fn raw_os_error(&self) -> i32 { + return match self { + Error::Mmap(e) => e.raw_os_error().unwrap(), + _ => std::i32::MIN, + }; + } + } + + #[test] + fn test_mmap_region_new() { + assert!(MmapRegion::new(0).is_err()); + + let size = 4096; + + let r = MmapRegion::new(4096).unwrap(); + assert_eq!(r.size(), size); + assert!(r.file_offset().is_none()); + assert_eq!(r.prot(), libc::PROT_READ | libc::PROT_WRITE); + assert_eq!( + r.flags(), + libc::MAP_ANONYMOUS | libc::MAP_NORESERVE | libc::MAP_PRIVATE + ); + } + + #[test] + fn test_mmap_region_from_file() { + let mut f = tempfile::tempfile().unwrap(); + let offset: usize = 0; + let buf1 = [1u8, 2, 3, 4, 5]; + + f.write_all(buf1.as_ref()).unwrap(); + let r = MmapRegion::from_file(FileOffset::new(f, offset as u64), buf1.len()).unwrap(); + + assert_eq!(r.size(), buf1.len() - offset); + assert_eq!(r.file_offset().unwrap().start(), offset as u64); + assert_eq!(r.prot(), libc::PROT_READ | libc::PROT_WRITE); + assert_eq!(r.flags(), libc::MAP_NORESERVE | libc::MAP_SHARED); + + let buf2 = unsafe { slice::from_raw_parts(r.as_ptr(), buf1.len() - offset) }; + assert_eq!(&buf1[offset..], buf2); + } + + #[test] + fn test_mmap_region_build() { + let a = Arc::new(tempfile::tempfile().unwrap()); + + let prot = libc::PROT_READ | libc::PROT_WRITE; + let flags = libc::MAP_NORESERVE | libc::MAP_PRIVATE; + let offset = 4096; + let size = 1000; + + // Offset + size will overflow. + let r = MmapRegion::build( + Some(FileOffset::from_arc(a.clone(), std::u64::MAX)), + size, + prot, + flags, + ); + assert_eq!(format!("{:?}", r.unwrap_err()), "InvalidOffsetLength"); + + // Offset + size is greater than the size of the file (which is 0 at this point). + let r = MmapRegion::build( + Some(FileOffset::from_arc(a.clone(), offset)), + size, + prot, + flags, + ); + assert_eq!(format!("{:?}", r.unwrap_err()), "MappingPastEof"); + + // MAP_FIXED was specified among the flags. + let r = MmapRegion::build( + Some(FileOffset::from_arc(a.clone(), offset)), + size, + prot, + flags | libc::MAP_FIXED, + ); + assert_eq!(format!("{:?}", r.unwrap_err()), "MapFixed"); + + // Let's resize the file. + assert_eq!(unsafe { libc::ftruncate(a.as_raw_fd(), 1024 * 10) }, 0); + + // The offset is not properly aligned. + let r = MmapRegion::build( + Some(FileOffset::from_arc(a.clone(), offset - 1)), + size, + prot, + flags, + ); + assert_eq!(r.unwrap_err().raw_os_error(), libc::EINVAL); + + // The build should be successful now. + let r = MmapRegion::build( + Some(FileOffset::from_arc(a.clone(), offset)), + size, + prot, + flags, + ) + .unwrap(); + + assert_eq!(r.size(), size); + assert_eq!(r.file_offset().unwrap().start(), offset as u64); + assert_eq!(r.prot(), libc::PROT_READ | libc::PROT_WRITE); + assert_eq!(r.flags(), libc::MAP_NORESERVE | libc::MAP_PRIVATE); + } #[test] - fn map_invalid_fd() { - let fd = unsafe { std::fs::File::from_raw_fd(-1) }; - let e = MmapRegion::from_fd(&fd, 1024, 0).unwrap_err(); - assert_eq!(e.raw_os_error(), Some(libc::EBADF)); + fn test_mmap_region_fds_overlap() { + let a = Arc::new(tempfile::tempfile().unwrap()); + assert_eq!(unsafe { libc::ftruncate(a.as_raw_fd(), 1024 * 10) }, 0); + + let r1 = MmapRegion::from_file(FileOffset::from_arc(a.clone(), 0), 4096).unwrap(); + let r2 = MmapRegion::from_file(FileOffset::from_arc(a.clone(), 4096), 4096).unwrap(); + assert!(!r1.fds_overlap(&r2)); + + let r1 = MmapRegion::from_file(FileOffset::from_arc(a.clone(), 0), 5000).unwrap(); + assert!(r1.fds_overlap(&r2)); + + let r2 = MmapRegion::from_file(FileOffset::from_arc(a.clone(), 0), 1000).unwrap(); + assert!(r1.fds_overlap(&r2)); + + // Different files, so there's not overlap. + let new_file = tempfile::tempfile().unwrap(); + // Resize before mapping. + assert_eq!( + unsafe { libc::ftruncate(new_file.as_raw_fd(), 1024 * 10) }, + 0 + ); + let r2 = MmapRegion::from_file(FileOffset::new(new_file, 0), 5000).unwrap(); + assert!(!r1.fds_overlap(&r2)); + + // R2 is not file backed, so no overlap. + let r2 = MmapRegion::new(5000).unwrap(); + assert!(!r1.fds_overlap(&r2)); } } diff --git a/src/mmap_windows.rs b/src/mmap_windows.rs index 70233582..d24fc77b 100644 --- a/src/mmap_windows.rs +++ b/src/mmap_windows.rs @@ -17,6 +17,7 @@ use libc; use std::io; use std::ptr::null_mut; +use guest_memory::FileOffset; use mmap::AsSlice; use volatile_memory::{self, compute_offset, VolatileMemory, VolatileSlice}; @@ -78,6 +79,7 @@ pub const ERROR_INVALID_PARAMETER: i32 = 87; pub struct MmapRegion { addr: *mut u8, size: usize, + file_offset: Option, } // Send and Sync aren't automatically inherited for the raw address pointer. @@ -105,6 +107,7 @@ impl MmapRegion { Ok(Self { addr: addr as *mut u8, size, + file_offset: None, }) } @@ -114,8 +117,8 @@ impl MmapRegion { /// * `file` - Raw handle to a file to map into the address space. /// * `size` - Size of memory region in bytes. /// * `offset` - Offset in bytes from the beginning of `file` to start the mapping. - pub fn from_fd(file: &AsRawHandle, size: usize, offset: libc::off_t) -> io::Result { - let handle = file.as_raw_handle(); + pub fn from_file(file_offset: FileOffset, size: usize) -> io::Result { + let handle = file_offset.file().as_raw_handle(); if handle == INVALID_HANDLE_VALUE { return Err(io::Error::from_raw_os_error(libc::EBADF)); } @@ -134,13 +137,15 @@ impl MmapRegion { return Err(io::Error::last_os_error()); } + let offset = file_offset.start(); + // This is safe because we are creating a mapping in a place not already used by any other // area in this process. let addr = unsafe { MapViewOfFile( mapping, FILE_MAP_ALL_ACCESS, - (offset as u64 >> 32) as u32, + (offset >> 32) as u32, offset as u32, size, ) @@ -156,6 +161,7 @@ impl MmapRegion { Ok(Self { addr: addr as *mut u8, size, + file_offset: Some(file_offset), }) } @@ -164,6 +170,16 @@ impl MmapRegion { pub fn as_ptr(&self) -> *mut u8 { self.addr } + + /// Returns the size of this region. + pub fn size(&self) -> usize { + self.size + } + + /// Returns information regarding the offset into the file backing this region (if any). + pub fn file_offset(&self) -> Option<&FileOffset> { + self.file_offset.as_ref() + } } impl AsSlice for MmapRegion { @@ -213,13 +229,15 @@ impl Drop for MmapRegion { #[cfg(test)] mod tests { + use guest_memory::FileOffset; use mmap_windows::{MmapRegion, INVALID_HANDLE_VALUE}; use std::os::windows::io::FromRawHandle; #[test] fn map_invalid_handle() { - let fd = unsafe { std::fs::File::from_raw_handle(INVALID_HANDLE_VALUE) }; - let e = MmapRegion::from_fd(&fd, 1024, 0).unwrap_err(); + let file = unsafe { std::fs::File::from_raw_handle(INVALID_HANDLE_VALUE) }; + let file_offset = FileOffset::new(file, 0); + let e = MmapRegion::from_file(file_offset, 1024).unwrap_err(); assert_eq!(e.raw_os_error(), Some(libc::EBADF)); } }