|
12 | 12 | //! IOTLB misses require sending a notification to the front-end and awaiting a reply that supplies
|
13 | 13 | //! the desired mapping.
|
14 | 14 |
|
15 |
| -use crate::{GuestAddress, Permissions}; |
| 15 | +use crate::guest_memory::{ |
| 16 | + Error as GuestMemoryError, GuestMemorySliceIterator, Result as GuestMemoryResult, |
| 17 | +}; |
| 18 | +use crate::{ |
| 19 | + bitmap, GuestAddress, GuestMemory, IoMemory, MemoryRegionAddress, Permissions, VolatileSlice, |
| 20 | +}; |
16 | 21 | use rangemap::RangeMap;
|
17 | 22 | use std::cmp;
|
18 | 23 | use std::fmt::Debug;
|
19 | 24 | use std::num::Wrapping;
|
20 | 25 | use std::ops::{Deref, Range};
|
| 26 | +use std::sync::Arc; |
21 | 27 |
|
22 | 28 | /// Errors associated with IOMMU address translation.
|
23 | 29 | #[derive(Debug, thiserror::Error)]
|
@@ -172,6 +178,22 @@ pub struct IotlbFails {
|
172 | 178 | pub access_fails: Vec<IovaRange>,
|
173 | 179 | }
|
174 | 180 |
|
| 181 | +/// [`IoMemory`] type that consists of an underlying [`GuestMemory`] object plus an [`Iommu`]. |
| 182 | +/// |
| 183 | +/// The underlying [`GuestMemory`] is basically the physical memory, and the [`Iommu`] translates |
| 184 | +/// the I/O virtual address space that `IommuMemory` provides into that underlying physical address |
| 185 | +/// space. |
| 186 | +#[derive(Debug, Default)] |
| 187 | +pub struct IommuMemory<M: GuestMemory, I: Iommu> { |
| 188 | + /// Physical memory |
| 189 | + inner: M, |
| 190 | + /// IOMMU to translate IOVAs into physical addresses |
| 191 | + iommu: Arc<I>, |
| 192 | + /// Whether the IOMMU is even to be used or not; disabling it makes this a pass-through to |
| 193 | + /// `inner`. |
| 194 | + use_iommu: bool, |
| 195 | +} |
| 196 | + |
175 | 197 | impl IommuMapping {
|
176 | 198 | /// Create a new mapping.
|
177 | 199 | fn new(source_base: u64, target_base: u64, permissions: Permissions) -> Self {
|
@@ -330,3 +352,249 @@ impl TryFrom<Range<u64>> for IovaRange {
|
330 | 352 | })
|
331 | 353 | }
|
332 | 354 | }
|
| 355 | + |
| 356 | +impl<M: GuestMemory, I: Iommu> IommuMemory<M, I> { |
| 357 | + /// Create a new `IommuMemory` instance. |
| 358 | + pub fn new(inner: M, iommu: I, use_iommu: bool) -> Self { |
| 359 | + IommuMemory { |
| 360 | + inner, |
| 361 | + iommu: Arc::new(iommu), |
| 362 | + use_iommu, |
| 363 | + } |
| 364 | + } |
| 365 | + |
| 366 | + /// Create a new version of `self` with the underlying physical memory replaced. |
| 367 | + /// |
| 368 | + /// Note that the inner `Arc` reference to the IOMMU is cloned, i.e. both the existing and the |
| 369 | + /// new `IommuMemory` object will share an IOMMU instance. (The `use_iommu` flag however is |
| 370 | + /// copied, so is independent between the two instances.) |
| 371 | + pub fn inner_replaced(&self, inner: M) -> Self { |
| 372 | + IommuMemory { |
| 373 | + inner, |
| 374 | + iommu: Arc::clone(&self.iommu), |
| 375 | + use_iommu: self.use_iommu, |
| 376 | + } |
| 377 | + } |
| 378 | + |
| 379 | + /// Enable or disable the IOMMU. |
| 380 | + /// |
| 381 | + /// Disabling the IOMMU switches to pass-through mode, where every access is done directly on |
| 382 | + /// the underlying physical memory. |
| 383 | + pub fn set_iommu_enabled(&mut self, enabled: bool) { |
| 384 | + self.use_iommu = enabled; |
| 385 | + } |
| 386 | + |
| 387 | + /// Return a reference to the IOMMU. |
| 388 | + pub fn iommu(&self) -> &Arc<I> { |
| 389 | + &self.iommu |
| 390 | + } |
| 391 | + |
| 392 | + /// Return a reference to the inner physical memory object. |
| 393 | + pub fn inner(&self) -> &M { |
| 394 | + &self.inner |
| 395 | + } |
| 396 | +} |
| 397 | + |
| 398 | +impl<M: GuestMemory + Clone, I: Iommu> Clone for IommuMemory<M, I> { |
| 399 | + fn clone(&self) -> Self { |
| 400 | + IommuMemory { |
| 401 | + inner: self.inner.clone(), |
| 402 | + iommu: Arc::clone(&self.iommu), |
| 403 | + use_iommu: self.use_iommu, |
| 404 | + } |
| 405 | + } |
| 406 | +} |
| 407 | + |
| 408 | +impl<M: GuestMemory, I: Iommu> IoMemory for IommuMemory<M, I> { |
| 409 | + type PhysicalMemory = M; |
| 410 | + |
| 411 | + fn range_accessible(&self, addr: GuestAddress, count: usize, access: Permissions) -> bool { |
| 412 | + if !self.use_iommu { |
| 413 | + return self.inner.range_accessible(addr, count, access); |
| 414 | + } |
| 415 | + |
| 416 | + let Ok(mut translated_iter) = self.iommu.translate(addr, count, access) else { |
| 417 | + return false; |
| 418 | + }; |
| 419 | + |
| 420 | + translated_iter.all(|translated| { |
| 421 | + self.inner |
| 422 | + .range_accessible(translated.base, translated.length, access) |
| 423 | + }) |
| 424 | + } |
| 425 | + |
| 426 | + fn try_access<F>( |
| 427 | + &self, |
| 428 | + count: usize, |
| 429 | + addr: GuestAddress, |
| 430 | + access: Permissions, |
| 431 | + mut f: F, |
| 432 | + ) -> GuestMemoryResult<usize> |
| 433 | + where |
| 434 | + F: FnMut( |
| 435 | + usize, |
| 436 | + usize, |
| 437 | + MemoryRegionAddress, |
| 438 | + &<Self::PhysicalMemory as GuestMemory>::R, |
| 439 | + ) -> GuestMemoryResult<usize>, |
| 440 | + { |
| 441 | + if !self.use_iommu { |
| 442 | + return self.inner.try_access(count, addr, f); |
| 443 | + } |
| 444 | + |
| 445 | + let translated = self |
| 446 | + .iommu |
| 447 | + .translate(addr, count, access) |
| 448 | + .map_err(GuestMemoryError::IommuError)?; |
| 449 | + |
| 450 | + let mut total = 0; |
| 451 | + for mapping in translated { |
| 452 | + let handled = self.inner.try_access( |
| 453 | + mapping.length, |
| 454 | + mapping.base, |
| 455 | + |inner_offset, count, in_region_addr, region| { |
| 456 | + f(total + inner_offset, count, in_region_addr, region) |
| 457 | + }, |
| 458 | + )?; |
| 459 | + |
| 460 | + if handled == 0 { |
| 461 | + break; |
| 462 | + } else if handled > count { |
| 463 | + return Err(GuestMemoryError::CallbackOutOfRange); |
| 464 | + } |
| 465 | + |
| 466 | + total += handled; |
| 467 | + // `GuestMemory::try_access()` only returns a short count when no more data needs to be |
| 468 | + // processed, so we can stop here |
| 469 | + if handled < mapping.length { |
| 470 | + break; |
| 471 | + } |
| 472 | + } |
| 473 | + |
| 474 | + Ok(total) |
| 475 | + } |
| 476 | + |
| 477 | + fn get_slices<'a>( |
| 478 | + &'a self, |
| 479 | + addr: GuestAddress, |
| 480 | + count: usize, |
| 481 | + access: Permissions, |
| 482 | + ) -> GuestMemoryResult< |
| 483 | + impl Iterator<Item = GuestMemoryResult<VolatileSlice<'a, bitmap::MS<'a, M>>>>, |
| 484 | + > { |
| 485 | + if self.use_iommu { |
| 486 | + IommuMemorySliceIterator::virt(self, addr, count, access) |
| 487 | + .map_err(GuestMemoryError::IommuError) |
| 488 | + } else { |
| 489 | + Ok(IommuMemorySliceIterator::phys(self, addr, count)) |
| 490 | + } |
| 491 | + } |
| 492 | + |
| 493 | + fn physical_memory(&self) -> Option<&Self::PhysicalMemory> { |
| 494 | + if self.use_iommu { |
| 495 | + None |
| 496 | + } else { |
| 497 | + Some(&self.inner) |
| 498 | + } |
| 499 | + } |
| 500 | +} |
| 501 | + |
| 502 | +/// Iterates over [`VolatileSlice`]s that together form an area in an `IommuMemory`. |
| 503 | +/// |
| 504 | +/// Returned by [`IommuMemory::get_slices()`] |
| 505 | +#[derive(Debug)] |
| 506 | +pub struct IommuMemorySliceIterator<'a, M: GuestMemory, I: Iommu + 'a> { |
| 507 | + /// Underlying physical memory (i.e. not the `IommuMemory`) |
| 508 | + phys_mem: &'a M, |
| 509 | + /// IOMMU translation result (i.e. remaining physical regions to visit) |
| 510 | + translation: Option<IotlbIterator<I::IotlbGuard<'a>>>, |
| 511 | + /// Iterator in the currently visited physical region |
| 512 | + current_translated_iter: Option<GuestMemorySliceIterator<'a, M>>, |
| 513 | +} |
| 514 | + |
| 515 | +impl<'a, M: GuestMemory, I: Iommu> IommuMemorySliceIterator<'a, M, I> { |
| 516 | + /// Create an iterator over the physical region `[addr, addr + count)`. |
| 517 | + /// |
| 518 | + /// “Physical” means that the IOMMU is not used to translate this address range. The resulting |
| 519 | + /// iterator is effectively the same as would be returned by [`GuestMemory::get_slices()`] on |
| 520 | + /// the underlying physical memory for the given address range. |
| 521 | + fn phys(mem: &'a IommuMemory<M, I>, addr: GuestAddress, count: usize) -> Self { |
| 522 | + IommuMemorySliceIterator { |
| 523 | + phys_mem: &mem.inner, |
| 524 | + translation: None, |
| 525 | + current_translated_iter: Some(mem.inner.get_slices(addr, count)), |
| 526 | + } |
| 527 | + } |
| 528 | + |
| 529 | + /// Create an iterator over the IOVA region `[addr, addr + count)`. |
| 530 | + /// |
| 531 | + /// This address range is translated using the IOMMU, and the resulting mappings are then |
| 532 | + /// separately visited via [`GuestMemory::get_slices()`]. |
| 533 | + fn virt( |
| 534 | + mem: &'a IommuMemory<M, I>, |
| 535 | + addr: GuestAddress, |
| 536 | + count: usize, |
| 537 | + access: Permissions, |
| 538 | + ) -> Result<Self, Error> { |
| 539 | + let translation = mem.iommu.translate(addr, count, access)?; |
| 540 | + Ok(IommuMemorySliceIterator { |
| 541 | + phys_mem: &mem.inner, |
| 542 | + translation: Some(translation), |
| 543 | + current_translated_iter: None, |
| 544 | + }) |
| 545 | + } |
| 546 | + |
| 547 | + /// Helper function for [`<Self as Iterator>::next()`]. |
| 548 | + /// |
| 549 | + /// Get the next slice and update the internal state. If there is an element left in |
| 550 | + /// `self.current_translated_iter`, return that; otherwise, move to the next mapping left in |
| 551 | + /// `self.translation` until there are no more mappings left. |
| 552 | + /// |
| 553 | + /// If both fields are `None`, always return `None`. |
| 554 | + /// |
| 555 | + /// # Safety |
| 556 | + /// |
| 557 | + /// This function never resets `self.current_translated_iter` or `self.translation` to `None`, |
| 558 | + /// particularly not in case of error; calling this function with these fields not reset after |
| 559 | + /// an error is ill-defined, so the caller must check the return value, and in case of an |
| 560 | + /// error, reset these fields to `None`. |
| 561 | + /// |
| 562 | + /// (This is why this function exists, so this reset can happen in a single central location.) |
| 563 | + unsafe fn do_next( |
| 564 | + &mut self, |
| 565 | + ) -> Option<GuestMemoryResult<VolatileSlice<'a, bitmap::MS<'a, M>>>> { |
| 566 | + loop { |
| 567 | + if let Some(item) = self |
| 568 | + .current_translated_iter |
| 569 | + .as_mut() |
| 570 | + .and_then(|iter| iter.next()) |
| 571 | + { |
| 572 | + return Some(item); |
| 573 | + } |
| 574 | + |
| 575 | + let next_mapping = self.translation.as_mut()?.next()?; |
| 576 | + self.current_translated_iter = Some( |
| 577 | + self.phys_mem |
| 578 | + .get_slices(next_mapping.base, next_mapping.length), |
| 579 | + ); |
| 580 | + } |
| 581 | + } |
| 582 | +} |
| 583 | + |
| 584 | +impl<'a, M: GuestMemory, I: Iommu> Iterator for IommuMemorySliceIterator<'a, M, I> { |
| 585 | + type Item = GuestMemoryResult<VolatileSlice<'a, bitmap::MS<'a, M>>>; |
| 586 | + |
| 587 | + fn next(&mut self) -> Option<Self::Item> { |
| 588 | + // SAFETY: |
| 589 | + // We reset `current_translated_iter` and `translation` to `None` in case of error |
| 590 | + match unsafe { self.do_next() } { |
| 591 | + Some(Ok(slice)) => Some(Ok(slice)), |
| 592 | + other => { |
| 593 | + // On error (or end), clear both so iteration remains stopped |
| 594 | + self.current_translated_iter.take(); |
| 595 | + self.translation.take(); |
| 596 | + other |
| 597 | + } |
| 598 | + } |
| 599 | + } |
| 600 | +} |
0 commit comments