@@ -402,33 +402,28 @@ impl Queue {
402
402
// In a naive notation, that would be:
403
403
// `descriptor_table[avail_ring[next_avail]]`.
404
404
//
405
- // First, we compute the byte-offset (into `self.avail_ring`) of the index of the next
406
- // available descriptor. `self.avail_ring` stores the address of a `struct
407
- // virtq_avail`, as defined by the VirtIO spec:
408
- //
409
- // ```C
410
- // struct virtq_avail {
411
- // le16 flags;
412
- // le16 idx;
413
- // le16 ring[QUEUE_SIZE];
414
- // le16 used_event
405
+ // Avail ring has layout:
406
+ // struct AvailRing {
407
+ // flags: u16,
408
+ // idx: u16,
409
+ // ring: [u16; <queue size>],
410
+ // used_event: u16,
415
411
// }
416
- // ```
417
- //
418
- // We use `self.next_avail` to store the position, in `ring`, of the next available
419
- // descriptor index, with a twist: we always only increment `self.next_avail`, so the
420
- // actual position will be `self.next_avail % self.actual_size()`.
421
- // We are now looking for the offset of `ring[self.next_avail % self.actual_size()]`.
422
- // `ring` starts after `flags` and `idx` (4 bytes into `struct virtq_avail`), and holds
423
- // 2-byte items, so the offset will be:
424
- let index_offset = 4 + 2 * ( self . next_avail . 0 % self . actual_size ( ) ) ;
412
+ // We calculate offset into `ring` field.
413
+ // We use `self.next_avail` to store the position, of the next available descriptor
414
+ // index in the `ring` field. Because `self.next_avail` is only incremented, the actual
415
+ // index into `AvailRing` is `self.next_avail % self.actual_size()`.
416
+ let desc_index_offset = std:: mem:: size_of :: < u16 > ( )
417
+ + std:: mem:: size_of :: < u16 > ( )
418
+ + std:: mem:: size_of :: < u16 > ( ) * usize:: from ( self . next_avail . 0 % self . actual_size ( ) ) ;
419
+ let desc_index_address = self
420
+ . avail_ring
421
+ . unchecked_add ( usize_to_u64 ( desc_index_offset) ) ;
425
422
426
423
// `self.is_valid()` already performed all the bound checks on the descriptor table
427
424
// and virtq rings, so it's safe to unwrap guest memory reads and to use unchecked
428
425
// offsets.
429
- let desc_index: u16 = mem
430
- . read_obj ( self . avail_ring . unchecked_add ( u64:: from ( index_offset) ) )
431
- . unwrap ( ) ;
426
+ let desc_index: u16 = mem. read_obj ( desc_index_address) . unwrap ( ) ;
432
427
433
428
DescriptorChain :: checked_new ( mem, self . desc_table , self . actual_size ( ) , desc_index) . map (
434
429
|dc| {
0 commit comments