@@ -795,6 +795,7 @@ mod tests {
795795}
796796
797797#[ cfg( kani) ]
798+ #[ allow( dead_code) ] // Avoid warning when using stubs
798799mod verification {
799800 use std:: mem:: ManuallyDrop ;
800801
@@ -803,7 +804,9 @@ mod verification {
803804 use vm_memory:: VolatileSlice ;
804805
805806 use super :: { IoVecBuffer , IoVecBufferMut , IoVecVec } ;
807+ use crate :: arch:: PAGE_SIZE ;
806808 use crate :: devices:: virtio:: iov_deque:: IovDeque ;
809+ use crate :: devices:: virtio:: queue:: FIRECRACKER_MAX_QUEUE_SIZE ;
807810
808811 // Maximum memory size to use for our buffers. For the time being 1KB.
809812 const GUEST_MEMORY_SIZE : usize = 1 << 10 ;
@@ -815,6 +818,50 @@ mod verification {
815818 // >= 1.
816819 const MAX_DESC_LENGTH : usize = 4 ;
817820
821+ mod stubs {
822+ use super :: * ;
823+
824+ /// This is a stub for the `IovDeque::push_back` method.
825+ ///
826+ /// `IovDeque` relies on a special allocation of two pages of virtual memory, where both of
827+ /// these point to the same underlying physical page. This way, the contents of the first
828+ /// page of virtual memory are automatically mirrored in the second virtual page. We do
829+ /// that in order to always have the elements that are currently in the ring buffer in
830+ /// consecutive (virtual) memory.
831+ ///
832+ /// To build this particular memory layout we create a new `memfd` object, allocate memory
833+ /// with `mmap` and call `mmap` again to make sure both pages point to the page allocated
834+ /// via the `memfd` object. These ffi calls make kani complain, so here we mock the
835+ /// `IovDeque` object memory with a normal memory allocation of two pages worth of data.
836+ ///
837+ /// This stub helps imitate the effect of mirroring without all the elaborate memory
838+ /// allocation trick.
839+ pub fn push_back ( deque : & mut IovDeque , iov : iovec ) {
840+ // This should NEVER happen, since our ring buffer is as big as the maximum queue size.
841+ // We also check for the sanity of the VirtIO queues, in queue.rs, which means that if
842+ // we ever try to add something in a full ring buffer, there is an internal
843+ // bug in the device emulation logic. Panic here because the device is
844+ // hopelessly broken.
845+ assert ! (
846+ !deque. is_full( ) ,
847+ "The number of `iovec` objects is bigger than the available space"
848+ ) ;
849+
850+ let offset = ( deque. start + deque. len ) as usize ;
851+ let mirror = if offset >= FIRECRACKER_MAX_QUEUE_SIZE as usize {
852+ offset - FIRECRACKER_MAX_QUEUE_SIZE as usize
853+ } else {
854+ offset + FIRECRACKER_MAX_QUEUE_SIZE as usize
855+ } ;
856+
857+ // SAFETY: self.iov is a valid pointer and `self.start + self.len` is within range (we
858+ // asserted before that the buffer is not full).
859+ unsafe { deque. iov . add ( offset) . write_volatile ( iov) } ;
860+ unsafe { deque. iov . add ( mirror) . write_volatile ( iov) } ;
861+ deque. len += 1 ;
862+ }
863+ }
864+
818865 fn create_iovecs ( mem : * mut u8 , size : usize , nr_descs : usize ) -> ( IoVecVec , u32 ) {
819866 let mut vecs: Vec < iovec > = Vec :: with_capacity ( nr_descs) ;
820867 let mut len = 0u32 ;
@@ -845,8 +892,37 @@ mod verification {
845892 }
846893 }
847894
895+ fn create_iov_deque ( ) -> IovDeque {
896+ // SAFETY: safe because the layout has non-zero size
897+ let mem = unsafe {
898+ std:: alloc:: alloc ( std:: alloc:: Layout :: from_size_align_unchecked (
899+ 2 * PAGE_SIZE ,
900+ PAGE_SIZE ,
901+ ) )
902+ } ;
903+ IovDeque {
904+ iov : mem. cast ( ) ,
905+ start : kani:: any_where ( |& start| start < FIRECRACKER_MAX_QUEUE_SIZE ) ,
906+ len : 0 ,
907+ }
908+ }
909+
910+ /// During `IovDeque::drop` we call `memunmap` to deallocate the backing memory of the
911+ /// `IovDeque`. For the purpose of the proof we substitute the allocation with a normal
912+ /// allocation, so deallocate the memory here.
913+ pub fn drop_iov_deque ( deque : & mut IovDeque ) {
914+ // We previously allocated this memory with a call to `std::alloc::alloc` with the same
915+ // allocator
916+ unsafe {
917+ std:: alloc:: dealloc (
918+ deque. iov . cast ( ) ,
919+ std:: alloc:: Layout :: from_size_align_unchecked ( 2 * PAGE_SIZE , PAGE_SIZE ) ,
920+ )
921+ }
922+ }
923+
848924 fn create_iovecs_mut ( mem : * mut u8 , size : usize , nr_descs : usize ) -> ( IovDeque , u32 ) {
849- let mut vecs = IovDeque :: new ( ) . unwrap ( ) ;
925+ let mut vecs = create_iov_deque ( ) ;
850926 let mut len = 0u32 ;
851927 for _ in 0 ..nr_descs {
852928 // The `IoVecBufferMut` constructors ensure that the memory region described by every
@@ -955,6 +1031,7 @@ mod verification {
9551031 #[ kani:: proof]
9561032 #[ kani:: unwind( 5 ) ]
9571033 #[ kani:: solver( cadical) ]
1034+ #[ kani:: stub( IovDeque :: push_back, stubs:: push_back) ]
9581035 fn verify_write_to_iovec ( ) {
9591036 for nr_descs in 0 ..MAX_DESC_LENGTH {
9601037 let mut iov_mut = IoVecBufferMut :: any_of_length ( nr_descs) ;
@@ -979,6 +1056,8 @@ mod verification {
9791056 . unwrap( ) ,
9801057 buf. len( ) . min( iov_mut. len( ) . saturating_sub( offset) as usize )
9811058 ) ;
1059+ drop_iov_deque ( & mut iov_mut. vecs ) ;
1060+ std:: mem:: forget ( iov_mut. vecs ) ;
9821061 }
9831062 }
9841063}
0 commit comments