|
| 1 | +// Copyright 2025 Amazon.com, Inc. or its affiliates. All Rights Reserved. |
| 2 | +// SPDX-License-Identifier: Apache-2.0 |
| 3 | + |
| 4 | +//! Module temporarily containing vendored in-review vm-memory features |
| 5 | +//! |
| 6 | +//! TODO: To be removed once https://github.com/rust-vmm/vm-memory/pull/312 is merged |
| 7 | +
|
| 8 | +#![allow(clippy::cast_possible_truncation)] // vm-memory has different clippy configuration |
| 9 | + |
| 10 | +use std::io::{Read, Write}; |
| 11 | +use std::sync::Arc; |
| 12 | +use std::sync::atomic::Ordering; |
| 13 | + |
| 14 | +use vm_memory::guest_memory::Result; |
| 15 | +use vm_memory::{ |
| 16 | + Address, AtomicAccess, Bytes, Error, GuestAddress, GuestMemory, GuestMemoryError, |
| 17 | + GuestMemoryRegion, MemoryRegionAddress, |
| 18 | +}; |
| 19 | + |
| 20 | +use crate::vstate::memory::GuestRegionMmapExt; |
| 21 | + |
| 22 | +/// [`GuestMemory`](trait.GuestMemory.html) implementation based on a homogeneous collection |
| 23 | +/// of [`GuestMemoryRegion`] implementations. |
| 24 | +/// |
| 25 | +/// Represents a sorted set of non-overlapping physical guest memory regions. |
| 26 | +#[derive(Debug)] |
| 27 | +pub struct GuestRegionCollection<R> { |
| 28 | + regions: Vec<Arc<R>>, |
| 29 | +} |
| 30 | + |
| 31 | +impl<R> Default for GuestRegionCollection<R> { |
| 32 | + fn default() -> Self { |
| 33 | + Self { |
| 34 | + regions: Vec::new(), |
| 35 | + } |
| 36 | + } |
| 37 | +} |
| 38 | + |
| 39 | +impl<R> Clone for GuestRegionCollection<R> { |
| 40 | + fn clone(&self) -> Self { |
| 41 | + GuestRegionCollection { |
| 42 | + regions: self.regions.iter().map(Arc::clone).collect(), |
| 43 | + } |
| 44 | + } |
| 45 | +} |
| 46 | + |
| 47 | +impl<R: GuestMemoryRegion> GuestRegionCollection<R> { |
| 48 | + /// Creates a new [`GuestRegionCollection`] from a vector of regions. |
| 49 | + /// |
| 50 | + /// # Arguments |
| 51 | + /// |
| 52 | + /// * `regions` - The vector of regions. The regions shouldn't overlap, and they should be |
| 53 | + /// sorted by the starting address. |
| 54 | + pub fn from_regions(mut regions: Vec<R>) -> std::result::Result<Self, Error> { |
| 55 | + Self::from_arc_regions(regions.drain(..).map(Arc::new).collect()) |
| 56 | + } |
| 57 | + |
| 58 | + /// Creates a new [`GuestRegionCollection`] from a vector of Arc regions. |
| 59 | + /// |
| 60 | + /// Similar to the constructor `from_regions()` as it returns a |
| 61 | + /// [`GuestRegionCollection`]. The need for this constructor is to provide a way for |
| 62 | + /// consumer of this API to create a new [`GuestRegionCollection`] based on existing |
| 63 | + /// regions coming from an existing [`GuestRegionCollection`] instance. |
| 64 | + /// |
| 65 | + /// # Arguments |
| 66 | + /// |
| 67 | + /// * `regions` - The vector of `Arc` regions. The regions shouldn't overlap and they should be |
| 68 | + /// sorted by the starting address. |
| 69 | + pub fn from_arc_regions(regions: Vec<Arc<R>>) -> std::result::Result<Self, Error> { |
| 70 | + if regions.is_empty() { |
| 71 | + return Err(Error::NoMemoryRegion); |
| 72 | + } |
| 73 | + |
| 74 | + for window in regions.windows(2) { |
| 75 | + let prev = &window[0]; |
| 76 | + let next = &window[1]; |
| 77 | + |
| 78 | + if prev.start_addr() > next.start_addr() { |
| 79 | + return Err(Error::UnsortedMemoryRegions); |
| 80 | + } |
| 81 | + |
| 82 | + if prev.last_addr() >= next.start_addr() { |
| 83 | + return Err(Error::MemoryRegionOverlap); |
| 84 | + } |
| 85 | + } |
| 86 | + |
| 87 | + Ok(Self { regions }) |
| 88 | + } |
| 89 | + |
| 90 | + /// Insert a region into the `GuestMemoryMmap` object and return a new `GuestMemoryMmap`. |
| 91 | + /// |
| 92 | + /// # Arguments |
| 93 | + /// * `region`: the memory region to insert into the guest memory object. |
| 94 | + pub fn insert_region( |
| 95 | + &self, |
| 96 | + region: Arc<R>, |
| 97 | + ) -> std::result::Result<GuestRegionCollection<R>, Error> { |
| 98 | + let mut regions = self.regions.clone(); |
| 99 | + regions.push(region); |
| 100 | + regions.sort_by_key(|x| x.start_addr()); |
| 101 | + |
| 102 | + Self::from_arc_regions(regions) |
| 103 | + } |
| 104 | +} |
| 105 | + |
| 106 | +impl<R: GuestMemoryRegion> GuestMemory for GuestRegionCollection<R> { |
| 107 | + type R = R; |
| 108 | + |
| 109 | + fn num_regions(&self) -> usize { |
| 110 | + self.regions.len() |
| 111 | + } |
| 112 | + |
| 113 | + fn find_region(&self, addr: GuestAddress) -> Option<&R> { |
| 114 | + let index = match self.regions.binary_search_by_key(&addr, |x| x.start_addr()) { |
| 115 | + Ok(x) => Some(x), |
| 116 | + // Within the closest region with starting address < addr |
| 117 | + Err(x) if (x > 0 && addr <= self.regions[x - 1].last_addr()) => Some(x - 1), |
| 118 | + _ => None, |
| 119 | + }; |
| 120 | + index.map(|x| self.regions[x].as_ref()) |
| 121 | + } |
| 122 | + |
| 123 | + fn iter(&self) -> impl Iterator<Item = &Self::R> { |
| 124 | + self.regions.iter().map(AsRef::as_ref) |
| 125 | + } |
| 126 | +} |
| 127 | + |
| 128 | +// This impl will be subsumed by the default impl in vm-memory#312 |
| 129 | +impl Bytes<MemoryRegionAddress> for GuestRegionMmapExt { |
| 130 | + type E = GuestMemoryError; |
| 131 | + |
| 132 | + /// # Examples |
| 133 | + /// * Write a slice at guest address 0x1200. |
| 134 | + /// |
| 135 | + /// ``` |
| 136 | + /// # #[cfg(feature = "backend-mmap")] |
| 137 | + /// # use vm_memory::{Bytes, GuestAddress, GuestMemoryMmap}; |
| 138 | + /// # |
| 139 | + /// # #[cfg(feature = "backend-mmap")] |
| 140 | + /// # { |
| 141 | + /// # let start_addr = GuestAddress(0x1000); |
| 142 | + /// # let mut gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr, 0x400)]) |
| 143 | + /// # .expect("Could not create guest memory"); |
| 144 | + /// # |
| 145 | + /// let res = gm |
| 146 | + /// .write(&[1, 2, 3, 4, 5], GuestAddress(0x1200)) |
| 147 | + /// .expect("Could not write to guest memory"); |
| 148 | + /// assert_eq!(5, res); |
| 149 | + /// # } |
| 150 | + /// ``` |
| 151 | + fn write(&self, buf: &[u8], addr: MemoryRegionAddress) -> Result<usize> { |
| 152 | + let maddr = addr.raw_value() as usize; |
| 153 | + self.as_volatile_slice()? |
| 154 | + .write(buf, maddr) |
| 155 | + .map_err(Into::into) |
| 156 | + } |
| 157 | + |
| 158 | + /// # Examples |
| 159 | + /// * Read a slice of length 16 at guestaddress 0x1200. |
| 160 | + /// |
| 161 | + /// ``` |
| 162 | + /// # #[cfg(feature = "backend-mmap")] |
| 163 | + /// # use vm_memory::{Bytes, GuestAddress, GuestMemoryMmap}; |
| 164 | + /// # |
| 165 | + /// # #[cfg(feature = "backend-mmap")] |
| 166 | + /// # { |
| 167 | + /// # let start_addr = GuestAddress(0x1000); |
| 168 | + /// # let mut gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr, 0x400)]) |
| 169 | + /// # .expect("Could not create guest memory"); |
| 170 | + /// # |
| 171 | + /// let buf = &mut [0u8; 16]; |
| 172 | + /// let res = gm |
| 173 | + /// .read(buf, GuestAddress(0x1200)) |
| 174 | + /// .expect("Could not read from guest memory"); |
| 175 | + /// assert_eq!(16, res); |
| 176 | + /// # } |
| 177 | + /// ``` |
| 178 | + fn read(&self, buf: &mut [u8], addr: MemoryRegionAddress) -> Result<usize> { |
| 179 | + let maddr = addr.raw_value() as usize; |
| 180 | + self.as_volatile_slice()? |
| 181 | + .read(buf, maddr) |
| 182 | + .map_err(Into::into) |
| 183 | + } |
| 184 | + |
| 185 | + fn write_slice(&self, buf: &[u8], addr: MemoryRegionAddress) -> Result<()> { |
| 186 | + let maddr = addr.raw_value() as usize; |
| 187 | + self.as_volatile_slice()? |
| 188 | + .write_slice(buf, maddr) |
| 189 | + .map_err(Into::into) |
| 190 | + } |
| 191 | + |
| 192 | + fn read_slice(&self, buf: &mut [u8], addr: MemoryRegionAddress) -> Result<()> { |
| 193 | + let maddr = addr.raw_value() as usize; |
| 194 | + self.as_volatile_slice()? |
| 195 | + .read_slice(buf, maddr) |
| 196 | + .map_err(Into::into) |
| 197 | + } |
| 198 | + |
| 199 | + fn store<T: AtomicAccess>( |
| 200 | + &self, |
| 201 | + val: T, |
| 202 | + addr: MemoryRegionAddress, |
| 203 | + order: Ordering, |
| 204 | + ) -> Result<()> { |
| 205 | + self.as_volatile_slice().and_then(|s| { |
| 206 | + s.store(val, addr.raw_value() as usize, order) |
| 207 | + .map_err(Into::into) |
| 208 | + }) |
| 209 | + } |
| 210 | + |
| 211 | + fn load<T: AtomicAccess>(&self, addr: MemoryRegionAddress, order: Ordering) -> Result<T> { |
| 212 | + self.as_volatile_slice() |
| 213 | + .and_then(|s| s.load(addr.raw_value() as usize, order).map_err(Into::into)) |
| 214 | + } |
| 215 | + |
| 216 | + // All remaining functions are deprecated and have been removed in vm-memory/main. |
| 217 | + // Firecracker does not use them, so no point in writing out implementations here. |
| 218 | + fn read_from<F>( |
| 219 | + &self, |
| 220 | + _addr: MemoryRegionAddress, |
| 221 | + _src: &mut F, |
| 222 | + _count: usize, |
| 223 | + ) -> std::result::Result<usize, Self::E> |
| 224 | + where |
| 225 | + F: Read, |
| 226 | + { |
| 227 | + unimplemented!() |
| 228 | + } |
| 229 | + |
| 230 | + fn read_exact_from<F>( |
| 231 | + &self, |
| 232 | + _addr: MemoryRegionAddress, |
| 233 | + _src: &mut F, |
| 234 | + _count: usize, |
| 235 | + ) -> std::result::Result<(), Self::E> |
| 236 | + where |
| 237 | + F: Read, |
| 238 | + { |
| 239 | + unimplemented!() |
| 240 | + } |
| 241 | + |
| 242 | + fn write_to<F>( |
| 243 | + &self, |
| 244 | + _addr: MemoryRegionAddress, |
| 245 | + _dst: &mut F, |
| 246 | + _count: usize, |
| 247 | + ) -> std::result::Result<usize, Self::E> |
| 248 | + where |
| 249 | + F: Write, |
| 250 | + { |
| 251 | + unimplemented!() |
| 252 | + } |
| 253 | + |
| 254 | + fn write_all_to<F>( |
| 255 | + &self, |
| 256 | + _addr: MemoryRegionAddress, |
| 257 | + _dst: &mut F, |
| 258 | + _count: usize, |
| 259 | + ) -> std::result::Result<(), Self::E> |
| 260 | + where |
| 261 | + F: Write, |
| 262 | + { |
| 263 | + unimplemented!() |
| 264 | + } |
| 265 | +} |
0 commit comments