Skip to content

Commit b2fbade

Browse files
priyasiddharthstefano-garzarella
authored andcommitted
virtio-queue: add stub memory region for kani proofs
Kani proofs do not finish in practical time if we use the production memory region. So we implement a stub region with a simple vector backing it. This will help subsequent proofs work and also enable stateful proofs. Signed-off-by: Siddharth Priya <[email protected]>
1 parent 59c092f commit b2fbade

File tree

1 file changed

+296
-58
lines changed

1 file changed

+296
-58
lines changed

virtio-queue/src/queue/verification.rs

Lines changed: 296 additions & 58 deletions
Original file line numberDiff line numberDiff line change
@@ -2,23 +2,307 @@
22
use std::mem::ManuallyDrop;
33
use std::num::Wrapping;
44

5-
use vm_memory::{FileOffset, GuestMemoryRegion, MemoryRegionAddress, MmapRegion};
5+
use vm_memory::{AtomicAccess, GuestMemoryError, GuestMemoryRegion, MemoryRegionAddress};
6+
7+
use std::io::{Read, Write};
8+
use std::mem::MaybeUninit;
9+
use vm_memory::ByteValued;
610

711
use super::*;
812

13+
struct StubRegion {
14+
buffer: *mut u8,
15+
region_len: usize,
16+
region_start: GuestAddress,
17+
}
18+
19+
// A stub region that implements the GuestMemoryRegion and Bytes traits. This is
20+
// used to create a single memory region for testing purposes in Kani. It allows
21+
// us to simulate a memory region without needing a full memory management
22+
// implementation. The region is backed by a raw pointer to a buffer, which is
23+
// allocated and leaked to ensure a stable address. The region supports reading
24+
// and writing bytes, objects, and slices. It also provides methods to convert
25+
// between guest addresses and memory region addresses, and to check offsets
26+
// within the region.
27+
impl StubRegion {
28+
fn new(buf_ptr: *mut u8, buf_len: usize, start_offset: u64) -> Self {
29+
Self {
30+
buffer: buf_ptr,
31+
region_len: buf_len,
32+
region_start: GuestAddress(start_offset),
33+
}
34+
}
35+
36+
fn to_region_addr(&self, addr: GuestAddress) -> Option<MemoryRegionAddress> {
37+
let offset = addr
38+
.raw_value()
39+
.checked_sub(self.region_start.raw_value())?;
40+
if offset < self.region_len as u64 {
41+
Some(MemoryRegionAddress(offset))
42+
} else {
43+
None
44+
}
45+
}
46+
47+
fn checked_offset(
48+
&self,
49+
addr: MemoryRegionAddress,
50+
count: usize,
51+
) -> Option<MemoryRegionAddress> {
52+
let end = addr.0.checked_add(count as u64)?;
53+
if end <= self.region_len as u64 {
54+
Some(MemoryRegionAddress(end))
55+
} else {
56+
None
57+
}
58+
}
59+
}
60+
61+
impl GuestMemoryRegion for StubRegion {
62+
type B = ();
63+
64+
fn len(&self) -> <GuestAddress as vm_memory::AddressValue>::V {
65+
self.region_len.try_into().unwrap()
66+
}
67+
68+
fn start_addr(&self) -> GuestAddress {
69+
self.region_start
70+
}
71+
72+
fn bitmap(&self) -> &Self::B {
73+
// For Kani, we do not need a bitmap, so we return an empty tuple.
74+
&()
75+
}
76+
}
77+
78+
// Implements the Bytes trait for StubRegion, allowing it to read and write
79+
// bytes and objects. This is used to simulate memory operations in Kani proofs.
80+
impl Bytes<MemoryRegionAddress> for StubRegion {
81+
type E = GuestMemoryError;
82+
83+
fn write(&self, buf: &[u8], addr: MemoryRegionAddress) -> Result<usize, Self::E> {
84+
let offset = addr.0 as usize;
85+
let end = offset
86+
.checked_add(buf.len())
87+
.ok_or(GuestMemoryError::InvalidGuestAddress(GuestAddress(addr.0)))?;
88+
if end > self.region_len as usize {
89+
return Err(GuestMemoryError::InvalidGuestAddress(GuestAddress(addr.0)));
90+
}
91+
unsafe {
92+
std::ptr::copy_nonoverlapping(buf.as_ptr(), self.buffer.add(offset), buf.len());
93+
}
94+
Ok(buf.len())
95+
}
96+
97+
fn read(&self, buf: &mut [u8], addr: MemoryRegionAddress) -> Result<usize, Self::E> {
98+
let offset = addr.0 as usize;
99+
let end = offset
100+
.checked_add(buf.len())
101+
.ok_or(GuestMemoryError::InvalidGuestAddress(GuestAddress(addr.0)))?;
102+
if end > self.region_len as usize {
103+
return Err(GuestMemoryError::InvalidGuestAddress(GuestAddress(addr.0)));
104+
}
105+
unsafe {
106+
std::ptr::copy_nonoverlapping(self.buffer.add(offset), buf.as_mut_ptr(), buf.len());
107+
}
108+
Ok(buf.len())
109+
}
110+
111+
fn write_slice(&self, buf: &[u8], addr: MemoryRegionAddress) -> Result<(), Self::E> {
112+
self.write(buf, addr)?;
113+
Ok(())
114+
}
115+
116+
fn read_slice(&self, buf: &mut [u8], addr: MemoryRegionAddress) -> Result<(), Self::E> {
117+
self.read(buf, addr)?;
118+
Ok(())
119+
}
120+
121+
fn read_from<F: Read>(
122+
&self,
123+
addr: MemoryRegionAddress,
124+
src: &mut F,
125+
count: usize,
126+
) -> Result<usize, Self::E> {
127+
let mut temp = vec![0u8; count];
128+
src.read_exact(&mut temp)
129+
.map_err(|_| GuestMemoryError::PartialBuffer {
130+
expected: count,
131+
completed: 0,
132+
})?;
133+
self.write(&temp, addr)
134+
}
135+
136+
fn read_exact_from<F: Read>(
137+
&self,
138+
addr: MemoryRegionAddress,
139+
src: &mut F,
140+
count: usize,
141+
) -> Result<(), Self::E> {
142+
let mut temp = vec![0u8; count];
143+
src.read_exact(&mut temp)
144+
.map_err(|_| GuestMemoryError::PartialBuffer {
145+
expected: count,
146+
completed: 0,
147+
})?;
148+
self.write_slice(&temp, addr)
149+
}
150+
151+
fn read_obj<T: ByteValued>(&self, addr: MemoryRegionAddress) -> Result<T, Self::E> {
152+
let size = std::mem::size_of::<T>();
153+
let offset = addr.0 as usize;
154+
let end = offset
155+
.checked_add(size)
156+
.ok_or(GuestMemoryError::InvalidGuestAddress(GuestAddress(addr.0)))?;
157+
if end > self.region_len as usize {
158+
return Err(GuestMemoryError::InvalidGuestAddress(GuestAddress(addr.0)));
159+
}
160+
let mut result: T = unsafe { MaybeUninit::<T>::zeroed().assume_init() };
161+
unsafe {
162+
std::ptr::copy_nonoverlapping(
163+
self.buffer.add(offset),
164+
result.as_mut_slice().as_mut_ptr(),
165+
size,
166+
);
167+
}
168+
Ok(result)
169+
}
170+
171+
fn write_to<F: Write>(
172+
&self,
173+
addr: MemoryRegionAddress,
174+
dst: &mut F,
175+
count: usize,
176+
) -> Result<usize, Self::E> {
177+
let offset = addr.0 as usize;
178+
let end = offset
179+
.checked_add(count)
180+
.ok_or(GuestMemoryError::InvalidGuestAddress(GuestAddress(addr.0)))?;
181+
if end > self.region_len as usize {
182+
return Err(GuestMemoryError::InvalidGuestAddress(GuestAddress(addr.0)));
183+
}
184+
unsafe {
185+
let slice = std::slice::from_raw_parts(self.buffer.add(offset), count);
186+
dst.write_all(slice)
187+
.map_err(|_| GuestMemoryError::PartialBuffer {
188+
expected: count,
189+
completed: 0,
190+
})?;
191+
}
192+
Ok(count)
193+
}
194+
195+
fn write_obj<T: ByteValued>(&self, val: T, addr: MemoryRegionAddress) -> Result<(), Self::E> {
196+
let size = std::mem::size_of::<T>();
197+
let offset = addr.0 as usize;
198+
let end = offset
199+
.checked_add(size)
200+
.ok_or(GuestMemoryError::InvalidGuestAddress(GuestAddress(addr.0)))?;
201+
if end > self.region_len as usize {
202+
return Err(GuestMemoryError::InvalidGuestAddress(GuestAddress(addr.0)));
203+
}
204+
let bytes = val.as_slice();
205+
unsafe {
206+
std::ptr::copy_nonoverlapping(bytes.as_ptr(), self.buffer.add(offset), size);
207+
}
208+
Ok(())
209+
}
210+
211+
fn write_all_to<F: Write>(
212+
&self,
213+
addr: MemoryRegionAddress,
214+
dst: &mut F,
215+
count: usize,
216+
) -> Result<(), Self::E> {
217+
self.write_to(addr, dst, count)?;
218+
Ok(())
219+
}
220+
221+
fn store<T: AtomicAccess>(
222+
&self,
223+
val: T,
224+
addr: MemoryRegionAddress,
225+
_order: Ordering,
226+
) -> Result<(), Self::E> {
227+
let size = std::mem::size_of::<T>();
228+
let offset = addr.0 as usize;
229+
let end = offset
230+
.checked_add(size)
231+
.ok_or(GuestMemoryError::InvalidGuestAddress(GuestAddress(addr.0)))?;
232+
if end > self.region_len as usize {
233+
return Err(GuestMemoryError::InvalidGuestAddress(GuestAddress(addr.0)));
234+
}
235+
let bytes = val.as_slice();
236+
unsafe {
237+
std::ptr::copy_nonoverlapping(bytes.as_ptr(), self.buffer.add(offset), size);
238+
}
239+
Ok(())
240+
}
241+
242+
fn load<T: AtomicAccess>(
243+
&self,
244+
addr: MemoryRegionAddress,
245+
_order: Ordering,
246+
) -> Result<T, Self::E> {
247+
let size = std::mem::size_of::<T>();
248+
let offset = addr.0 as usize;
249+
let end = offset
250+
.checked_add(size)
251+
.ok_or(GuestMemoryError::InvalidGuestAddress(GuestAddress(addr.0)))?;
252+
if end > self.region_len as usize {
253+
return Err(GuestMemoryError::InvalidGuestAddress(GuestAddress(addr.0)));
254+
}
255+
unsafe {
256+
let slice = std::slice::from_raw_parts(self.buffer.add(offset), size);
257+
T::from_slice(slice)
258+
.ok_or_else(|| GuestMemoryError::InvalidGuestAddress(GuestAddress(addr.0)))
259+
.copied()
260+
}
261+
}
262+
}
263+
264+
// A proof that write to StubRegion and read back gives the same bytes.
265+
#[kani::proof]
266+
#[kani::unwind(0)]
267+
fn verify_stubregion_write_read() {
268+
// Prepare a buffer and a StubRegion
269+
let mut buffer = kani::vec::exact_vec::<u8, 16>();
270+
let region = StubRegion::new(buffer.as_mut_ptr(), buffer.len(), 0);
271+
272+
// Arbitrary bytes to write
273+
let bytes: [u8; 16] = kani::any();
274+
// Write bytes to region at offset 0
275+
let write_offset: usize = kani::any();
276+
kani::assume(write_offset <= buffer.len() - 16);
277+
assert!(region
278+
.write(&bytes, MemoryRegionAddress(write_offset as u64))
279+
.is_ok());
280+
281+
// Read back into a new buffer
282+
let mut readback = kani::vec::exact_vec::<u8, 16>();
283+
assert!(region
284+
.read(&mut readback, MemoryRegionAddress(write_offset as u64))
285+
.is_ok());
286+
287+
// Choose a nondet index and check both match
288+
let idx: usize = kani::any();
289+
kani::assume(idx < 16);
290+
assert_eq!(bytes[idx], readback[idx]);
291+
}
292+
9293
/// A made-for-kani version of `vm_memory::GuestMemoryMmap`. Unlike the real
10294
/// `GuestMemoryMmap`, which manages a list of regions and then does a binary
11295
/// search to determine which region a specific read or write request goes to,
12296
/// this only uses a single region. Eliminating this binary search significantly
13297
/// speeds up all queue proofs, because it eliminates the only loop contained herein,
14298
/// meaning we can use `kani::unwind(0)` instead of `kani::unwind(2)`. Functionally,
15299
/// it works identically to `GuestMemoryMmap` with only a single contained region.
16-
pub struct SingleRegionGuestMemory {
17-
the_region: vm_memory::GuestRegionMmap,
300+
struct SingleRegionGuestMemory {
301+
the_region: StubRegion,
18302
}
19303

20304
impl GuestMemory for SingleRegionGuestMemory {
21-
type R = vm_memory::GuestRegionMmap;
305+
type R = StubRegion;
22306

23307
fn num_regions(&self) -> usize {
24308
1
@@ -64,25 +348,16 @@ impl GuestMemory for SingleRegionGuestMemory {
64348

65349
impl kani::Arbitrary for SingleRegionGuestMemory {
66350
fn any() -> Self {
67-
guest_memory(
68-
ManuallyDrop::new(kani::vec::exact_vec::<u8, GUEST_MEMORY_SIZE>()).as_mut_ptr(),
69-
)
351+
let memory =
352+
ManuallyDrop::new(kani::vec::exact_vec::<u8, GUEST_MEMORY_SIZE>()).as_mut_ptr();
353+
Self {
354+
the_region: StubRegion::new(memory, GUEST_MEMORY_SIZE, GUEST_MEMORY_BASE),
355+
}
70356
}
71357
}
72-
pub struct ProofContext {
73-
pub queue: Queue,
74-
pub memory: SingleRegionGuestMemory,
75-
}
76-
77-
pub struct MmapRegionStub {
78-
_addr: *mut u8,
79-
_size: usize,
80-
_bitmap: (),
81-
_file_offset: Option<FileOffset>,
82-
_prot: i32,
83-
_flags: i32,
84-
_owned: bool,
85-
_hugetlbfs: Option<bool>,
358+
struct ProofContext {
359+
queue: Queue,
360+
memory: SingleRegionGuestMemory,
86361
}
87362

88363
/// We start the first guest memory region at an offset so that harnesses using
@@ -97,43 +372,6 @@ const GUEST_MEMORY_BASE: u64 = 0;
97372
// able to change its address, as it is 16-byte aligned.
98373
const GUEST_MEMORY_SIZE: usize = QUEUE_END as usize + 30;
99374

100-
fn guest_memory(memory: *mut u8) -> SingleRegionGuestMemory {
101-
// Ideally, we'd want to do
102-
// let region = unsafe {MmapRegionBuilder::new(GUEST_MEMORY_SIZE)
103-
// .with_raw_mmap_pointer(bytes.as_mut_ptr())
104-
// .build()
105-
// .unwrap()};
106-
// However, .build() calls to .build_raw(), which contains a call to libc::sysconf.
107-
// Since kani 0.34.0, stubbing out foreign functions is supported, but due to the rust
108-
// standard library using a special version of the libc crate, it runs into some problems
109-
// [1] Even if we work around those problems, we run into performance problems [2].
110-
// Therefore, for now we stick to this ugly transmute hack (which only works because
111-
// the kani compiler will never re-order fields, so we can treat repr(Rust) as repr(C)).
112-
//
113-
// [1]: https://github.com/model-checking/kani/issues/2673
114-
// [2]: https://github.com/model-checking/kani/issues/2538
115-
let region_stub = MmapRegionStub {
116-
_addr: memory,
117-
_size: GUEST_MEMORY_SIZE,
118-
_bitmap: Default::default(),
119-
_file_offset: None,
120-
_prot: 0,
121-
_flags: libc::MAP_ANONYMOUS | libc::MAP_PRIVATE,
122-
_owned: false,
123-
_hugetlbfs: None,
124-
};
125-
126-
let region: MmapRegion<()> = unsafe { std::mem::transmute(region_stub) };
127-
128-
let guest_region =
129-
vm_memory::GuestRegionMmap::new(region, GuestAddress(GUEST_MEMORY_BASE)).unwrap();
130-
131-
// Use a single memory region for guests of size < 2GB
132-
SingleRegionGuestMemory {
133-
the_region: guest_region,
134-
}
135-
}
136-
137375
const MAX_QUEUE_SIZE: u16 = 4;
138376

139377
// Constants describing the in-memory layout of a queue of size MAX_QUEUE_SIZE starting

0 commit comments

Comments
 (0)