Skip to content

Commit 7f83b55

Browse files
MatiasVarapriyasiddharth
authored andcommitted
feat(kani): add proofs for verify_add_used and notifications
The virtIO queue has two rings - Avail and Used. Avail is written by the driver and read by the device. * It has a `used_event` field that the driver writes to tell the device how many buffers to process before notifying it Used is written by the device and read by the driver. * It has a `avail_event` field that the device writes to tell the driver how many buffers it can push for processing before notifying the device. The `enable_notification` method is called by the device and tells the driver to send a notification when the `avail_ring.next_avail` slot is filled up by the driver. The `needs_notification` method is called by the driver and tell the device to send a notification if the condition c for triggering a call is true. ``` c = old =< used_event \< next_used where, old - is the previous entry on which the driver was notified by device(`needs_notification` was called). used_event - is the driver written index for which it wants to be notified next_used - is the next entry that will be put in the used ring ``` Note that the ordering logic has to account for wrapping. See `pred_in_wrapping_range` for details. Signed-off-by: Siddharth Priya <[email protected]>
1 parent f730282 commit 7f83b55

File tree

3 files changed

+517
-311
lines changed

3 files changed

+517
-311
lines changed

virtio-queue/Cargo.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@ edition = "2021"
1313
test-utils = []
1414

1515
[dependencies]
16-
vm-memory = { workspace = true }
16+
vm-memory = { workspace = true, features = ["backend-mmap", "backend-bitmap"] }
1717
vmm-sys-util = { workspace = true }
1818
log = "0.4.17"
1919
virtio-bindings = { path="../virtio-bindings", version = "0.2.6" }

virtio-queue/src/queue.rs

Lines changed: 6 additions & 253 deletions
Original file line numberDiff line numberDiff line change
@@ -272,250 +272,6 @@ impl Queue {
272272
}
273273
}
274274

275-
#[cfg(kani)]
276-
#[allow(dead_code)]
277-
mod verification {
278-
use std::mem::ManuallyDrop;
279-
use std::num::Wrapping;
280-
281-
use vm_memory::{FileOffset, GuestMemoryRegion, MemoryRegionAddress, MmapRegion};
282-
283-
use super::*;
284-
285-
/// A made-for-kani version of `vm_memory::GuestMemoryMmap`. Unlike the real
286-
/// `GuestMemoryMmap`, which manages a list of regions and then does a binary
287-
/// search to determine which region a specific read or write request goes to,
288-
/// this only uses a single region. Eliminating this binary search significantly
289-
/// speeds up all queue proofs, because it eliminates the only loop contained herein,
290-
/// meaning we can use `kani::unwind(0)` instead of `kani::unwind(2)`. Functionally,
291-
/// it works identically to `GuestMemoryMmap` with only a single contained region.
292-
pub struct ProofGuestMemory {
293-
the_region: vm_memory::GuestRegionMmap,
294-
}
295-
296-
impl GuestMemory for ProofGuestMemory {
297-
type R = vm_memory::GuestRegionMmap;
298-
299-
fn num_regions(&self) -> usize {
300-
1
301-
}
302-
303-
fn find_region(&self, addr: GuestAddress) -> Option<&Self::R> {
304-
self.the_region
305-
.to_region_addr(addr)
306-
.map(|_| &self.the_region)
307-
}
308-
309-
fn iter(&self) -> impl Iterator<Item = &Self::R> {
310-
std::iter::once(&self.the_region)
311-
}
312-
313-
fn try_access<F>(
314-
&self,
315-
count: usize,
316-
addr: GuestAddress,
317-
mut f: F,
318-
) -> vm_memory::guest_memory::Result<usize>
319-
where
320-
F: FnMut(
321-
usize,
322-
usize,
323-
MemoryRegionAddress,
324-
&Self::R,
325-
) -> vm_memory::guest_memory::Result<usize>,
326-
{
327-
// We only have a single region, meaning a lot of the complications of the default
328-
// try_access implementation for dealing with reads/writes across multiple
329-
// regions does not apply.
330-
let region_addr = self
331-
.the_region
332-
.to_region_addr(addr)
333-
.ok_or(vm_memory::guest_memory::Error::InvalidGuestAddress(addr))?;
334-
self.the_region
335-
.checked_offset(region_addr, count)
336-
.ok_or(vm_memory::guest_memory::Error::InvalidGuestAddress(addr))?;
337-
f(0, count, region_addr, &self.the_region)
338-
}
339-
}
340-
341-
pub struct ProofContext(pub Queue, pub ProofGuestMemory);
342-
343-
pub struct MmapRegionStub {
344-
addr: *mut u8,
345-
size: usize,
346-
bitmap: (),
347-
file_offset: Option<FileOffset>,
348-
prot: i32,
349-
flags: i32,
350-
owned: bool,
351-
hugetlbfs: Option<bool>,
352-
}
353-
354-
/// We start the first guest memory region at an offset so that harnesses using
355-
/// Queue::any() will be exposed to queue segments both before and after valid guest memory.
356-
/// This is conforming to MockSplitQueue::new() that uses `0` as starting address of the
357-
/// virtqueue. Also, QUEUE_END is the size only if GUEST_MEMORY_BASE is `0`
358-
const GUEST_MEMORY_BASE: u64 = 0;
359-
360-
// We size our guest memory to fit a properly aligned queue, plus some wiggles bytes
361-
// to make sure we not only test queues where all segments are consecutively aligned.
362-
// We need to give at least 16 bytes of buffer space for the descriptor table to be
363-
// able to change its address, as it is 16-byte aligned.
364-
const GUEST_MEMORY_SIZE: usize = QUEUE_END as usize + 30;
365-
366-
fn guest_memory(memory: *mut u8) -> ProofGuestMemory {
367-
// Ideally, we'd want to do
368-
// let region = unsafe {MmapRegionBuilder::new(GUEST_MEMORY_SIZE)
369-
// .with_raw_mmap_pointer(bytes.as_mut_ptr())
370-
// .build()
371-
// .unwrap()};
372-
// However, .build() calls to .build_raw(), which contains a call to libc::sysconf.
373-
// Since kani 0.34.0, stubbing out foreign functions is supported, but due to the rust
374-
// standard library using a special version of the libc crate, it runs into some problems
375-
// [1] Even if we work around those problems, we run into performance problems [2].
376-
// Therefore, for now we stick to this ugly transmute hack (which only works because
377-
// the kani compiler will never re-order fields, so we can treat repr(Rust) as repr(C)).
378-
//
379-
// [1]: https://github.com/model-checking/kani/issues/2673
380-
// [2]: https://github.com/model-checking/kani/issues/2538
381-
let region_stub = MmapRegionStub {
382-
addr: memory,
383-
size: GUEST_MEMORY_SIZE,
384-
bitmap: Default::default(),
385-
file_offset: None,
386-
prot: 0,
387-
flags: libc::MAP_ANONYMOUS | libc::MAP_PRIVATE,
388-
owned: false,
389-
hugetlbfs: None,
390-
};
391-
392-
let region: MmapRegion<()> = unsafe { std::mem::transmute(region_stub) };
393-
394-
let guest_region =
395-
vm_memory::GuestRegionMmap::new(region, GuestAddress(GUEST_MEMORY_BASE)).unwrap();
396-
397-
// Use a single memory region, just as firecracker does for guests of size < 2GB
398-
// For largest guests, firecracker uses two regions (due to the MMIO gap being
399-
// at the top of 32-bit address space)
400-
ProofGuestMemory {
401-
the_region: guest_region,
402-
}
403-
}
404-
405-
// can't implement kani::Arbitrary for the relevant types due to orphan rules
406-
fn setup_kani_guest_memory() -> ProofGuestMemory {
407-
// Non-deterministic Vec that will be used as the guest memory. We use `exact_vec` for now
408-
// as `any_vec` will likely result in worse performance. We do not loose much from
409-
// `exact_vec`, as our proofs do not make any assumptions about "filling" guest
410-
// memory: Since everything is placed at non-deterministic addresses with
411-
// non-deterministic lengths, we still cover all scenarios that would be covered by
412-
// smaller guest memory closely. We leak the memory allocated here, so that it
413-
// doesnt get deallocated at the end of this function. We do not explicitly
414-
// de-allocate, but since this is a kani proof, that does not matter.
415-
guest_memory(
416-
ManuallyDrop::new(kani::vec::exact_vec::<u8, GUEST_MEMORY_SIZE>()).as_mut_ptr(),
417-
)
418-
}
419-
420-
const MAX_QUEUE_SIZE: u16 = 256;
421-
422-
// Constants describing the in-memory layout of a queue of size MAX_QUEUE_SIZE starting
423-
// at the beginning of guest memory. These are based on Section 2.7 of the VirtIO 1.2
424-
// specification.
425-
const QUEUE_BASE_ADDRESS: u64 = GUEST_MEMORY_BASE;
426-
427-
/// descriptor table has 16 bytes per entry, avail ring starts right after
428-
const AVAIL_RING_BASE_ADDRESS: u64 = QUEUE_BASE_ADDRESS + MAX_QUEUE_SIZE as u64 * 16;
429-
430-
/// Used ring starts after avail ring (which has size 6 + 2 * MAX_QUEUE_SIZE),
431-
/// and needs 2 bytes of padding
432-
const USED_RING_BASE_ADDRESS: u64 = AVAIL_RING_BASE_ADDRESS + 6 + 2 * MAX_QUEUE_SIZE as u64 + 2;
433-
434-
/// The address of the first byte after the queue. Since our queue starts at guest physical
435-
/// address 0, this is also the size of the memory area occupied by the queue.
436-
/// Note that the used ring structure has size 6 + 8 * MAX_QUEUE_SIZE
437-
const QUEUE_END: u64 = USED_RING_BASE_ADDRESS + 6 + 8 * MAX_QUEUE_SIZE as u64;
438-
439-
impl kani::Arbitrary for ProofContext {
440-
fn any() -> Self {
441-
let mem = setup_kani_guest_memory();
442-
443-
let mut queue = Queue::new(MAX_QUEUE_SIZE).unwrap();
444-
445-
queue.ready = true;
446-
447-
queue.set_desc_table_address(
448-
Some(QUEUE_BASE_ADDRESS as u32),
449-
Some((QUEUE_BASE_ADDRESS >> 32) as u32),
450-
);
451-
452-
queue.set_avail_ring_address(
453-
Some(AVAIL_RING_BASE_ADDRESS as u32),
454-
Some((AVAIL_RING_BASE_ADDRESS >> 32) as u32),
455-
);
456-
457-
queue.set_used_ring_address(
458-
Some(USED_RING_BASE_ADDRESS as u32),
459-
Some((USED_RING_BASE_ADDRESS >> 32) as u32),
460-
);
461-
462-
queue.set_next_avail(kani::any());
463-
queue.set_next_used(kani::any());
464-
queue.set_event_idx(kani::any());
465-
queue.num_added = Wrapping(kani::any());
466-
467-
kani::assume(queue.is_valid(&mem));
468-
469-
ProofContext(queue, mem)
470-
}
471-
}
472-
473-
#[kani::proof]
474-
#[kani::unwind(0)] // There are no loops anywhere, but kani really enjoys getting stuck in std::ptr::drop_in_place.
475-
// This is a compiler intrinsic that has a "dummy" implementation in stdlib that just
476-
// recursively calls itself. Kani will generally unwind this recursion infinitely
477-
fn verify_spec_2_7_7_2() {
478-
// Section 2.7.7.2 deals with device-to-driver notification suppression.
479-
// It describes a mechanism by which the driver can tell the device that it does not
480-
// want notifications (IRQs) about the device finishing processing individual buffers
481-
// (descriptor chain heads) from the avail ring until a specific number of descriptors
482-
// has been processed. This is done by the driver
483-
// defining a "used_event" index, which tells the device "please do not notify me until
484-
// used.ring[used_event] has been written to by you".
485-
let ProofContext(mut queue, mem) = kani::any();
486-
487-
let num_added_old = queue.num_added.0;
488-
let needs_notification = queue.needs_notification(&mem);
489-
490-
// event_idx_enabled equivalent to VIRTIO_F_EVENT_IDX negotiated
491-
if !queue.event_idx_enabled {
492-
// The specification here says
493-
// After the device writes a descriptor index into the used ring:
494-
// – If flags is 1, the device SHOULD NOT send a notification.
495-
// – If flags is 0, the device MUST send a notification
496-
// flags is the first field in the avail_ring_address, which we completely ignore. We
497-
// always send a notification, and as there only is a SHOULD NOT, that is okay
498-
assert!(needs_notification.unwrap());
499-
} else {
500-
// next_used - 1 is where the previous descriptor was placed
501-
if Wrapping(queue.used_event(&mem, Ordering::Relaxed).unwrap())
502-
== std::num::Wrapping(queue.next_used - Wrapping(1))
503-
&& num_added_old > 0
504-
{
505-
// If the idx field in the used ring (which determined where that descriptor index
506-
// was placed) was equal to used_event, the device MUST send a
507-
// notification.
508-
assert!(needs_notification.unwrap());
509-
510-
kani::cover!();
511-
}
512-
513-
// The other case is handled by a "SHOULD NOT send a notification" in the spec.
514-
// So we do not care
515-
}
516-
}
517-
}
518-
519275
impl<'a> QueueGuard<'a> for Queue {
520276
type G = &'a mut Self;
521277
}
@@ -715,18 +471,15 @@ impl QueueT for Queue {
715471
.ok_or(Error::AddressOverflow)?;
716472
mem.write_obj(VirtqUsedElem::new(head_index.into(), len), addr)
717473
.map_err(Error::GuestMemory)?;
718-
719474
self.next_used += Wrapping(1);
720475
self.num_added += Wrapping(1);
721476

722-
mem.store(
723-
u16::to_le(self.next_used.0),
724-
self.used_ring
725-
.checked_add(2)
726-
.ok_or(Error::AddressOverflow)?,
727-
Ordering::Release,
728-
)
729-
.map_err(Error::GuestMemory)
477+
let addr: GuestAddress = self
478+
.used_ring
479+
.checked_add(2)
480+
.ok_or(Error::AddressOverflow)?;
481+
mem.store(u16::to_le(self.next_used.0), addr, Ordering::Release)
482+
.map_err(Error::GuestMemory)
730483
}
731484

732485
fn enable_notification<M: GuestMemory>(&mut self, mem: &M) -> Result<bool, Error> {

0 commit comments

Comments
 (0)