@@ -52,7 +52,7 @@ use std::sync::atomic::Ordering;
52
52
use std:: sync:: Arc ;
53
53
54
54
use crate :: address:: { Address , AddressValue } ;
55
- use crate :: bitmap:: MS ;
55
+ use crate :: bitmap:: { BitmapSlice , MS } ;
56
56
use crate :: bytes:: { AtomicAccess , Bytes } ;
57
57
use crate :: io:: { ReadVolatile , WriteVolatile } ;
58
58
use crate :: volatile_memory:: { self , VolatileSlice } ;
@@ -541,11 +541,7 @@ impl<'a, M: GuestMemory + ?Sized> GuestMemorySliceIterator<'a, M> {
541
541
/// when it encounters an error after successfully producing at least one slice.
542
542
/// Return an error if requesting the first slice returns an error.
543
543
pub fn stop_on_error ( self ) -> Result < impl Iterator < Item = VolatileSlice < ' a , MS < ' a , M > > > > {
544
- let mut peek = self . peekable ( ) ;
545
- if let Some ( err) = peek. next_if ( Result :: is_err) {
546
- return Err ( err. unwrap_err ( ) ) ;
547
- }
548
- Ok ( peek. filter_map ( Result :: ok) )
544
+ <Self as IoMemorySliceIterator < ' a , MS < ' a , M > > >:: stop_on_error ( self )
549
545
}
550
546
}
551
547
@@ -566,6 +562,11 @@ impl<'a, M: GuestMemory + ?Sized> Iterator for GuestMemorySliceIterator<'a, M> {
566
562
}
567
563
}
568
564
565
+ impl < ' a , M : GuestMemory + ?Sized > IoMemorySliceIterator < ' a , MS < ' a , M > >
566
+ for GuestMemorySliceIterator < ' a , M >
567
+ {
568
+ }
569
+
569
570
/// This iterator continues to return `None` when exhausted.
570
571
///
571
572
/// [`<Self as Iterator>::next()`](GuestMemorySliceIterator::next) sets `self.count` to 0 when
@@ -723,6 +724,157 @@ impl<T: GuestMemory + ?Sized> Bytes<GuestAddress> for T {
723
724
}
724
725
}
725
726
727
+ /// Permissions for accessing virtual memory.
728
+ #[ derive( Clone , Copy , Debug , Eq , PartialEq ) ]
729
+ #[ repr( u8 ) ]
730
+ pub enum Permissions {
731
+ /// No permissions
732
+ No = 0b00 ,
733
+ /// Read-only
734
+ Read = 0b01 ,
735
+ /// Write-only
736
+ Write = 0b10 ,
737
+ /// Allow both reading and writing
738
+ ReadWrite = 0b11 ,
739
+ }
740
+
741
+ impl Permissions {
742
+ /// Convert the numerical representation into the enum.
743
+ ///
744
+ /// # Panics
745
+ ///
746
+ /// Panics if `raw` is not a valid representation of any `Permissions` variant.
747
+ fn from_repr ( raw : u8 ) -> Self {
748
+ use Permissions :: * ;
749
+
750
+ match raw {
751
+ value if value == No as u8 => No ,
752
+ value if value == Read as u8 => Read ,
753
+ value if value == Write as u8 => Write ,
754
+ value if value == ReadWrite as u8 => ReadWrite ,
755
+ _ => panic ! ( "{raw:x} is not a valid raw Permissions value" ) ,
756
+ }
757
+ }
758
+
759
+ /// Check whether the permissions `self` allow the given `access`.
760
+ pub fn allow ( & self , access : Self ) -> bool {
761
+ * self & access == access
762
+ }
763
+ }
764
+
765
+ impl std:: ops:: BitOr for Permissions {
766
+ type Output = Permissions ;
767
+
768
+ /// Return the union of `self` and `rhs`.
769
+ fn bitor ( self , rhs : Permissions ) -> Self :: Output {
770
+ Self :: from_repr ( self as u8 | rhs as u8 )
771
+ }
772
+ }
773
+
774
+ impl std:: ops:: BitAnd for Permissions {
775
+ type Output = Permissions ;
776
+
777
+ /// Return the intersection of `self` and `rhs`.
778
+ fn bitand ( self , rhs : Permissions ) -> Self :: Output {
779
+ Self :: from_repr ( self as u8 & rhs as u8 )
780
+ }
781
+ }
782
+
783
+ /// Represents virtual I/O memory.
784
+ ///
785
+ /// `IoMemory` is generally backed by some “physical” `GuestMemory`, which then consists for
786
+ /// `GuestMemoryRegion` objects. However, the mapping from I/O virtual addresses (IOVAs) to
787
+ /// physical addresses may be arbitrarily fragmented. Translation is done via an IOMMU.
788
+ ///
789
+ /// Note in contrast to `GuestMemory`:
790
+ /// - Any IOVA range may consist of arbitrarily many underlying ranges in physical memory.
791
+ /// - Accessing an IOVA requires passing the intended access mode, and the IOMMU will check whether
792
+ /// the given access mode is permitted for the given IOVA.
793
+ /// - The translation result for a given IOVA may change over time (i.e. the physical address
794
+ /// associated with an IOVA may change).
795
+ pub trait IoMemory {
796
+ /// Underlying `GuestMemory` type.
797
+ type PhysicalMemory : GuestMemory + ?Sized ;
798
+
799
+ /// Return `true` if `addr..(addr + count)` is accessible with `access`.
800
+ fn check_range ( & self , addr : GuestAddress , count : usize , access : Permissions ) -> bool ;
801
+
802
+ /// Returns a [`VolatileSlice`](struct.VolatileSlice.html) of `count` bytes starting at
803
+ /// `addr`.
804
+ ///
805
+ /// Note that because of the fragmented nature of virtual memory, it can easily happen that the
806
+ /// range `[addr, addr + count)` is not backed by a continuous region in our own virtual
807
+ /// memory, which will make generating the slice impossible.
808
+ ///
809
+ /// The iterator’s items are wrapped in [`Result`], i.e. there may be errors reported on
810
+ /// individual items. If there is no such error, the cumulative length of all items will be
811
+ /// equal to `count`. Any error will end iteration immediately, i.e. there are no items past
812
+ /// the first error.
813
+ ///
814
+ /// If `count` is 0, an empty iterator will be returned.
815
+ fn get_slices < ' a > (
816
+ & ' a self ,
817
+ addr : GuestAddress ,
818
+ count : usize ,
819
+ access : Permissions ,
820
+ ) -> Result < impl IoMemorySliceIterator < ' a , MS < ' a , Self :: PhysicalMemory > > > ;
821
+
822
+ /// If this virtual memory is just a plain `GuestMemory` object underneath without an IOMMU
823
+ /// translation layer in between, return that `GuestMemory` object.
824
+ fn physical_memory ( & self ) -> Option < & Self :: PhysicalMemory > {
825
+ None
826
+ }
827
+ }
828
+
829
+ /// Iterates over [`VolatileSlice`]s that together form an I/O memory area.
830
+ ///
831
+ /// Returned by [`IoMemory::get_slices()`].
832
+ pub trait IoMemorySliceIterator < ' a , B : BitmapSlice > :
833
+ Iterator < Item = Result < VolatileSlice < ' a , B > > > + Sized
834
+ {
835
+ /// Adapts this [`IoMemorySliceIterator`] to return `None` (e.g. gracefully terminate) when it
836
+ /// encounters an error after successfully producing at least one slice.
837
+ /// Return an error if requesting the first slice returns an error.
838
+ fn stop_on_error ( self ) -> Result < impl Iterator < Item = VolatileSlice < ' a , B > > > {
839
+ let mut peek = self . peekable ( ) ;
840
+ if let Some ( err) = peek. next_if ( Result :: is_err) {
841
+ return Err ( err. unwrap_err ( ) ) ;
842
+ }
843
+ Ok ( peek. filter_map ( Result :: ok) )
844
+ }
845
+ }
846
+
847
+ /// Allow accessing every [`GuestMemory`] via [`IoMemory`].
848
+ ///
849
+ /// [`IoMemory`] is a generalization of [`GuestMemory`]: Every object implementing the former is a
850
+ /// subset of an object implementing the latter (there always is an underlying [`GuestMemory`]),
851
+ /// with an opaque internal mapping on top, e.g. provided by an IOMMU.
852
+ ///
853
+ /// Every [`GuestMemory`] is therefore trivially also an [`IoMemory`], assuming a complete identity
854
+ /// mapping (which we must assume, so that accessing such objects via either trait will yield the
855
+ /// same result): Basically, all [`IoMemory`] methods are implemented as trivial wrappers around
856
+ /// the same [`GuestMemory`] methods (if available), discarding the `access` parameter.
857
+ impl < M : GuestMemory + ?Sized > IoMemory for M {
858
+ type PhysicalMemory = M ;
859
+
860
+ fn check_range ( & self , addr : GuestAddress , count : usize , _access : Permissions ) -> bool {
861
+ <M as GuestMemory >:: check_range ( self , addr, count)
862
+ }
863
+
864
+ fn get_slices < ' a > (
865
+ & ' a self ,
866
+ addr : GuestAddress ,
867
+ count : usize ,
868
+ _access : Permissions ,
869
+ ) -> Result < impl IoMemorySliceIterator < ' a , MS < ' a , Self :: PhysicalMemory > > > {
870
+ Ok ( <M as GuestMemory >:: get_slices ( self , addr, count) )
871
+ }
872
+
873
+ fn physical_memory ( & self ) -> Option < & Self :: PhysicalMemory > {
874
+ Some ( self )
875
+ }
876
+ }
877
+
726
878
#[ cfg( test) ]
727
879
mod tests {
728
880
#![ allow( clippy:: undocumented_unsafe_blocks) ]
0 commit comments