|
12 | 12 | //! IOTLB misses require sending a notification to the front-end and awaiting a reply that supplies
|
13 | 13 | //! the desired mapping.
|
14 | 14 |
|
15 |
| -use crate::{GuestAddress, Permissions}; |
| 15 | +use crate::guest_memory::{Error as GuestMemoryError, Result as GuestMemoryResult}; |
| 16 | +use crate::{ |
| 17 | + bitmap, GuestAddress, GuestMemory, IoMemory, MemoryRegionAddress, Permissions, VolatileSlice, |
| 18 | +}; |
16 | 19 | use rangemap::RangeMap;
|
17 | 20 | use std::cmp;
|
18 | 21 | use std::fmt::Debug;
|
19 | 22 | use std::num::Wrapping;
|
20 | 23 | use std::ops::{Deref, Range};
|
| 24 | +use std::sync::Arc; |
21 | 25 |
|
22 | 26 | /// Errors associated with IOMMU address translation.
|
23 | 27 | #[derive(Debug, thiserror::Error)]
|
@@ -172,6 +176,22 @@ pub struct IotlbFails {
|
172 | 176 | pub access_fails: Vec<IovaRange>,
|
173 | 177 | }
|
174 | 178 |
|
| 179 | +/// [`IoMemory`] type that consists of an underlying [`GuestMemory`] object plus an [`Iommu`]. |
| 180 | +/// |
| 181 | +/// The underlying [`GuestMemory`] is basically the physical memory, and the [`Iommu`] translates |
| 182 | +/// the I/O virtual address space that `IommuMemory` provides into that underlying physical address |
| 183 | +/// space. |
| 184 | +#[derive(Debug, Default)] |
| 185 | +pub struct IommuMemory<M: GuestMemory, I: Iommu> { |
| 186 | + /// Physical memory |
| 187 | + inner: M, |
| 188 | + /// IOMMU to translate IOVAs into physical addresses |
| 189 | + iommu: Arc<I>, |
| 190 | + /// Whether the IOMMU is even to be used or not; disabling it makes this a pass-through to |
| 191 | + /// `inner`. |
| 192 | + use_iommu: bool, |
| 193 | +} |
| 194 | + |
175 | 195 | impl IommuMapping {
|
176 | 196 | /// Create a new mapping.
|
177 | 197 | fn new(source_base: u64, target_base: u64, permissions: Permissions) -> Self {
|
@@ -330,3 +350,166 @@ impl TryFrom<Range<u64>> for IovaRange {
|
330 | 350 | })
|
331 | 351 | }
|
332 | 352 | }
|
| 353 | + |
| 354 | +impl<M: GuestMemory, I: Iommu> IommuMemory<M, I> { |
| 355 | + /// Create a new `IommuMemory` instance. |
| 356 | + pub fn new(inner: M, iommu: Arc<I>, use_iommu: bool) -> Self { |
| 357 | + IommuMemory { |
| 358 | + inner, |
| 359 | + iommu, |
| 360 | + use_iommu, |
| 361 | + } |
| 362 | + } |
| 363 | + |
| 364 | + /// Create a new version of `self` with the underlying physical memory replaced. |
| 365 | + /// |
| 366 | + /// Note that the inner `Arc` reference to the IOMMU is cloned, i.e. both the existing and the |
| 367 | + /// new `IommuMemory` object will share an IOMMU instance. (The `use_iommu` flag however is |
| 368 | + /// copied, so is independent between the two instances.) |
| 369 | + pub fn inner_replaced(&self, inner: M) -> Self { |
| 370 | + IommuMemory { |
| 371 | + inner, |
| 372 | + iommu: Arc::clone(&self.iommu), |
| 373 | + use_iommu: self.use_iommu, |
| 374 | + } |
| 375 | + } |
| 376 | + |
| 377 | + /// Enable or disable the IOMMU. |
| 378 | + /// |
| 379 | + /// Disabling the IOMMU switches to pass-through mode, where every access is done directly on |
| 380 | + /// the underlying physical memory. |
| 381 | + pub fn set_iommu_enabled(&mut self, enabled: bool) { |
| 382 | + self.use_iommu = enabled; |
| 383 | + } |
| 384 | + |
| 385 | + /// Return a reference to the IOMMU. |
| 386 | + pub fn iommu(&self) -> &Arc<I> { |
| 387 | + &self.iommu |
| 388 | + } |
| 389 | + |
| 390 | + /// Return a reference to the inner physical memory object. |
| 391 | + pub fn inner(&self) -> &M { |
| 392 | + &self.inner |
| 393 | + } |
| 394 | +} |
| 395 | + |
| 396 | +impl<M: GuestMemory + Clone, I: Iommu> Clone for IommuMemory<M, I> { |
| 397 | + fn clone(&self) -> Self { |
| 398 | + IommuMemory { |
| 399 | + inner: self.inner.clone(), |
| 400 | + iommu: Arc::clone(&self.iommu), |
| 401 | + use_iommu: self.use_iommu, |
| 402 | + } |
| 403 | + } |
| 404 | +} |
| 405 | + |
| 406 | +impl<M: GuestMemory, I: Iommu> IoMemory for IommuMemory<M, I> { |
| 407 | + type PhysicalMemory = M; |
| 408 | + |
| 409 | + fn range_accessible(&self, addr: GuestAddress, count: usize, access: Permissions) -> bool { |
| 410 | + if !self.use_iommu { |
| 411 | + return self.inner.range_accessible(addr, count, access); |
| 412 | + } |
| 413 | + |
| 414 | + let Ok(mut translated_iter) = self.iommu.translate(addr, count, access) else { |
| 415 | + return false; |
| 416 | + }; |
| 417 | + |
| 418 | + translated_iter.all(|translated| { |
| 419 | + self.inner |
| 420 | + .range_accessible(translated.base, translated.length, access) |
| 421 | + }) |
| 422 | + } |
| 423 | + |
| 424 | + fn try_access<'a, F>( |
| 425 | + &'a self, |
| 426 | + count: usize, |
| 427 | + addr: GuestAddress, |
| 428 | + access: Permissions, |
| 429 | + mut f: F, |
| 430 | + ) -> GuestMemoryResult<usize> |
| 431 | + where |
| 432 | + F: FnMut( |
| 433 | + usize, |
| 434 | + usize, |
| 435 | + MemoryRegionAddress, |
| 436 | + &'a <Self::PhysicalMemory as GuestMemory>::R, |
| 437 | + ) -> GuestMemoryResult<usize>, |
| 438 | + { |
| 439 | + if !self.use_iommu { |
| 440 | + return self.inner.try_access(count, addr, f); |
| 441 | + } |
| 442 | + |
| 443 | + let translated = self |
| 444 | + .iommu |
| 445 | + .translate(addr, count, access) |
| 446 | + .map_err(GuestMemoryError::IommuError)?; |
| 447 | + |
| 448 | + let mut total = 0; |
| 449 | + for mapping in translated { |
| 450 | + let handled = self.inner.try_access( |
| 451 | + mapping.length, |
| 452 | + mapping.base, |
| 453 | + |inner_offset, count, in_region_addr, region| { |
| 454 | + f(total + inner_offset, count, in_region_addr, region) |
| 455 | + }, |
| 456 | + )?; |
| 457 | + |
| 458 | + if handled == 0 { |
| 459 | + break; |
| 460 | + } else if handled > count { |
| 461 | + return Err(GuestMemoryError::CallbackOutOfRange); |
| 462 | + } |
| 463 | + |
| 464 | + total += handled; |
| 465 | + // `GuestMemory::try_access()` only returns a short count when no more data needs to be |
| 466 | + // processed, so we can stop here |
| 467 | + if handled < mapping.length { |
| 468 | + break; |
| 469 | + } |
| 470 | + } |
| 471 | + |
| 472 | + Ok(total) |
| 473 | + } |
| 474 | + |
| 475 | + fn get_slice( |
| 476 | + &self, |
| 477 | + addr: GuestAddress, |
| 478 | + count: usize, |
| 479 | + access: Permissions, |
| 480 | + ) -> GuestMemoryResult<VolatileSlice<bitmap::MS<M>>> { |
| 481 | + if !self.use_iommu { |
| 482 | + return self.inner.get_slice(addr, count); |
| 483 | + } |
| 484 | + |
| 485 | + // Ensure `count` is at least 1 so we can translate something |
| 486 | + let adj_count = cmp::max(count, 1); |
| 487 | + |
| 488 | + let mut translated = self |
| 489 | + .iommu |
| 490 | + .translate(addr, adj_count, access) |
| 491 | + .map_err(GuestMemoryError::IommuError)?; |
| 492 | + |
| 493 | + let mapping = translated.next().unwrap(); |
| 494 | + if translated.next().is_some() { |
| 495 | + return Err(GuestMemoryError::IommuError(Error::Fragmented { |
| 496 | + iova_range: IovaRange { |
| 497 | + base: addr, |
| 498 | + length: count, |
| 499 | + }, |
| 500 | + continuous_length: mapping.length, |
| 501 | + })); |
| 502 | + } |
| 503 | + |
| 504 | + assert!(mapping.length == count || (count == 0 && mapping.length == 1)); |
| 505 | + self.inner.get_slice(mapping.base, count) |
| 506 | + } |
| 507 | + |
| 508 | + fn physical_memory(&self) -> Option<&Self::PhysicalMemory> { |
| 509 | + if self.use_iommu { |
| 510 | + None |
| 511 | + } else { |
| 512 | + Some(&self.inner) |
| 513 | + } |
| 514 | + } |
| 515 | +} |
0 commit comments