Skip to content

Commit 72a0ae7

Browse files
committed
refactor(queue): update calculations in the avail ring
Replace numbers with more descriptive `size_of` methods. Signed-off-by: Egor Lazarchuk <[email protected]>
1 parent f85cd72 commit 72a0ae7

File tree

1 file changed

+17
-22
lines changed

1 file changed

+17
-22
lines changed

src/vmm/src/devices/virtio/queue.rs

Lines changed: 17 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -402,33 +402,28 @@ impl Queue {
402402
// In a naive notation, that would be:
403403
// `descriptor_table[avail_ring[next_avail]]`.
404404
//
405-
// First, we compute the byte-offset (into `self.avail_ring`) of the index of the next
406-
// available descriptor. `self.avail_ring` stores the address of a `struct
407-
// virtq_avail`, as defined by the VirtIO spec:
408-
//
409-
// ```C
410-
// struct virtq_avail {
411-
// le16 flags;
412-
// le16 idx;
413-
// le16 ring[QUEUE_SIZE];
414-
// le16 used_event
405+
// Avail ring has layout:
406+
// struct AvailRing {
407+
// flags: u16,
408+
// idx: u16,
409+
// ring: [u16; <queue size>],
410+
// used_event: u16,
415411
// }
416-
// ```
417-
//
418-
// We use `self.next_avail` to store the position, in `ring`, of the next available
419-
// descriptor index, with a twist: we always only increment `self.next_avail`, so the
420-
// actual position will be `self.next_avail % self.actual_size()`.
421-
// We are now looking for the offset of `ring[self.next_avail % self.actual_size()]`.
422-
// `ring` starts after `flags` and `idx` (4 bytes into `struct virtq_avail`), and holds
423-
// 2-byte items, so the offset will be:
424-
let index_offset = 4 + 2 * (self.next_avail.0 % self.actual_size());
412+
// We calculate offset into `ring` field.
413+
// We use `self.next_avail` to store the position, of the next available descriptor
414+
// index in the `ring` field. Because `self.next_avail` is only incremented, the actual
415+
// index into `AvailRing` is `self.next_avail % self.actual_size()`.
416+
let desc_index_offset = std::mem::size_of::<u16>()
417+
+ std::mem::size_of::<u16>()
418+
+ std::mem::size_of::<u16>() * usize::from(self.next_avail.0 % self.actual_size());
419+
let desc_index_address = self
420+
.avail_ring
421+
.unchecked_add(usize_to_u64(desc_index_offset));
425422

426423
// `self.is_valid()` already performed all the bound checks on the descriptor table
427424
// and virtq rings, so it's safe to unwrap guest memory reads and to use unchecked
428425
// offsets.
429-
let desc_index: u16 = mem
430-
.read_obj(self.avail_ring.unchecked_add(u64::from(index_offset)))
431-
.unwrap();
426+
let desc_index: u16 = mem.read_obj(desc_index_address).unwrap();
432427

433428
DescriptorChain::checked_new(mem, self.desc_table, self.actual_size(), desc_index).map(
434429
|dc| {

0 commit comments

Comments
 (0)