@@ -584,28 +584,13 @@ impl Queue {
584
584
// In a naive notation, that would be:
585
585
// `descriptor_table[avail_ring[next_avail]]`.
586
586
//
587
- // Avail ring has layout:
588
- // struct AvailRing {
589
- // flags: u16,
590
- // idx: u16,
591
- // ring: [u16; <queue size>],
592
- // used_event: u16,
593
- // }
594
- // We calculate offset into `ring` field.
595
- // We use `self.next_avail` to store the position, of the next available descriptor
596
- // index in the `ring` field. Because `self.next_avail` is only incremented, the actual
597
- // index into `AvailRing` is `self.next_avail % self.actual_size()`.
598
- let desc_index_offset = std:: mem:: size_of :: < u16 > ( )
599
- + std:: mem:: size_of :: < u16 > ( )
600
- + std:: mem:: size_of :: < u16 > ( ) * usize:: from ( self . next_avail . 0 % self . actual_size ( ) ) ;
601
- let desc_index_address = self
602
- . avail_ring_address
603
- . unchecked_add ( usize_to_u64 ( desc_index_offset) ) ;
604
-
605
- // `self.is_valid()` already performed all the bound checks on the descriptor table
606
- // and virtq rings, so it's safe to unwrap guest memory reads and to use unchecked
607
- // offsets.
608
- let desc_index: u16 = mem. read_obj ( desc_index_address) . unwrap ( ) ;
587
+ // We use `self.next_avail` to store the position, in `ring`, of the next available
588
+ // descriptor index, with a twist: we always only increment `self.next_avail`, so the
589
+ // actual position will be `self.next_avail % self.actual_size()`.
590
+ let idx = self . next_avail . 0 % self . actual_size ( ) ;
591
+ // SAFETY:
592
+ // index is bound by the queue size
593
+ let desc_index = unsafe { self . avail_ring_ring_get ( usize:: from ( idx) ) } ;
609
594
610
595
DescriptorChain :: checked_new ( mem, self . desc_table_address , self . actual_size ( ) , desc_index)
611
596
. map ( |dc| {
0 commit comments