@@ -156,68 +156,74 @@ impl AvailableDescriptors {
156156
157157 /// Read new descriptor chains from the queue.
158158 pub fn read_new_desc_chains ( & mut self , queue : & mut Queue , mem : & GuestMemoryMmap ) {
159- #[ repr( C ) ]
160- struct AvailRing {
161- flags : u16 ,
162- idx : u16 ,
163- ring : [ u16 ; 256 ] ,
164- used_event : u16 ,
165- }
166- #[ repr( C ) ]
167- #[ derive( Default , Clone , Copy ) ]
168- struct Descriptor {
169- addr : u64 ,
170- len : u32 ,
171- flags : u16 ,
172- next : u16 ,
173- }
174-
175- // SAFETY:
176- // avail_ring in the queue is a valid guest address
177- let avail_ring: & AvailRing =
178- unsafe { std:: mem:: transmute ( mem. get_host_address ( queue. avail_ring ) . unwrap ( ) ) } ;
179-
180- // SAFETY:
181- // desc_table in the queue is a valid guest address
182- let desc_table: & [ Descriptor ; 256 ] =
183- unsafe { std:: mem:: transmute ( mem. get_host_address ( queue. desc_table ) . unwrap ( ) ) } ;
184-
185- let avail_idx = queue. avail_idx ( mem) ;
186- let actual_size = queue. actual_size ( ) ;
187-
188- while queue. next_avail . 0 != avail_idx. 0 {
189- let Some ( next_iovec_buf) = self . iov_ring . next_available ( ) else {
190- break ;
191- } ;
192-
193- let avail_index = queue. next_avail . 0 % actual_size;
194- queue. next_avail += Wrapping ( 1 ) ;
195-
196- let desc_index = avail_ring. ring [ avail_index as usize ] ;
197- let mut desc = & desc_table[ desc_index as usize ] ;
198-
199- next_iovec_buf. clear ( ) ;
200- next_iovec_buf. head_index = desc_index;
201-
202- let iov = libc:: iovec {
203- iov_base : mem. get_host_address ( GuestAddress ( desc. addr ) ) . unwrap ( ) . cast ( ) ,
204- iov_len : desc. len as usize ,
205- } ;
206- next_iovec_buf. vecs . push ( iov) ;
207- next_iovec_buf. len += desc. len ;
208- self . valid_ring . push ( true ) ;
209-
210- while desc. flags & crate :: devices:: virtio:: queue:: VIRTQ_DESC_F_NEXT != 0 {
211- desc = & desc_table[ desc. next as usize ] ;
212- let iov = libc:: iovec {
213- iov_base : mem. get_host_address ( GuestAddress ( desc. addr ) ) . unwrap ( ) . cast ( ) ,
214- iov_len : desc. len as usize ,
215- } ;
216- next_iovec_buf. vecs . push ( iov) ;
217- next_iovec_buf. len += desc. len ;
218- self . valid_ring . push ( true ) ;
219- }
220- }
159+ // #[repr(C)]
160+ // struct AvailRing {
161+ // flags: u16,
162+ // idx: u16,
163+ // ring: [u16; 256],
164+ // used_event: u16,
165+ // }
166+ // #[repr(C)]
167+ // #[derive(Default, Clone, Copy)]
168+ // struct Descriptor {
169+ // addr: u64,
170+ // len: u32,
171+ // flags: u16,
172+ // next: u16,
173+ // }
174+ //
175+ // // SAFETY:
176+ // // avail_ring in the queue is a valid guest address
177+ // let avail_ring: &AvailRing =
178+ // unsafe { std::mem::transmute(mem.get_host_address(queue.avail_ring).unwrap()) };
179+ //
180+ // // SAFETY:
181+ // // desc_table in the queue is a valid guest address
182+ // let desc_table: &[Descriptor; 256] =
183+ // unsafe { std::mem::transmute(mem.get_host_address(queue.desc_table).unwrap()) };
184+ //
185+ // let avail_idx = queue.avail_idx(mem);
186+ // let actual_size = queue.actual_size();
187+ //
188+ // while queue.next_avail.0 != avail_idx.0 {
189+ // let Some(next_iovec_buf) = self.iov_ring.next_available() else {
190+ // break;
191+ // };
192+ //
193+ // let avail_index = queue.next_avail.0 % actual_size;
194+ // queue.next_avail += Wrapping(1);
195+ //
196+ // let desc_index = avail_ring.ring[avail_index as usize];
197+ // let mut desc = &desc_table[desc_index as usize];
198+ //
199+ // next_iovec_buf.clear();
200+ // next_iovec_buf.head_index = desc_index;
201+ //
202+ // let iov = libc::iovec {
203+ // iov_base: mem
204+ // .get_host_address(GuestAddress(desc.addr))
205+ // .unwrap()
206+ // .cast(),
207+ // iov_len: desc.len as usize,
208+ // };
209+ // next_iovec_buf.vecs.push(iov);
210+ // next_iovec_buf.len += desc.len;
211+ // self.valid_ring.push(true);
212+ //
213+ // while desc.flags & crate::devices::virtio::queue::VIRTQ_DESC_F_NEXT != 0 {
214+ // desc = &desc_table[desc.next as usize];
215+ // let iov = libc::iovec {
216+ // iov_base: mem
217+ // .get_host_address(GuestAddress(desc.addr))
218+ // .unwrap()
219+ // .cast(),
220+ // iov_len: desc.len as usize,
221+ // };
222+ // next_iovec_buf.vecs.push(iov);
223+ // next_iovec_buf.len += desc.len;
224+ // self.valid_ring.push(true);
225+ // }
226+ // }
221227
222228 // for _ in 0..queue.len(mem) {
223229 // let Some(next_iovec_buf) = self.iov_ring.next_available() else {
@@ -236,6 +242,28 @@ impl AvailableDescriptors {
236242 // }
237243 // }
238244 // }
245+
246+ for _ in 0 ..queue. len_opt ( ) {
247+ let Some ( next_iovec_buf) = self . iov_ring . next_available ( ) else {
248+ break ;
249+ } ;
250+
251+ match queue. do_pop_unchecked_opt ( ) {
252+ Some ( desc_chain) => {
253+ // SAFETY:
254+ // This descriptor chain is only processed once.
255+ let valid = unsafe {
256+ next_iovec_buf
257+ . load_descriptor_chain_opt ( desc_chain, mem)
258+ . is_ok ( )
259+ } ;
260+ self . valid_ring . push ( valid) ;
261+ }
262+ None => {
263+ self . valid_ring . push ( false ) ;
264+ }
265+ }
266+ }
239267 }
240268
241269 /// Pop first descriptor chain.
@@ -503,7 +531,8 @@ impl Net {
503531 // This is safe since we checked in the event handler that the device is activated.
504532 let mem = self . device_state . mem ( ) . unwrap ( ) ;
505533
506- if self . rx_avail_desc . is_empty ( ) && self . queues [ RX_INDEX ] . is_empty ( mem) {
534+ // if self.rx_avail_desc.is_empty() && self.queues[RX_INDEX].is_empty(mem) {
535+ if self . rx_avail_desc . is_empty ( ) && self . queues [ RX_INDEX ] . is_empty_opt ( ) {
507536 self . metrics . no_rx_avail_buffer . inc ( ) ;
508537 return Err ( FrontendError :: EmptyQueue ) ;
509538 }
@@ -582,8 +611,12 @@ impl Net {
582611 // SAFETY:
583612 // This should never panic as we provide index in
584613 // correct bounds.
614+ // self.queues[RX_INDEX]
615+ // .write_used_ring(mem, next_used_index.0, used_element)
616+ // .unwrap();
617+
585618 self . queues [ RX_INDEX ]
586- . write_used_ring ( mem , next_used_index. 0 , used_element)
619+ . write_used_ring_opt ( next_used_index. 0 , used_element)
587620 . unwrap ( ) ;
588621
589622 used_heads += 1 ;
@@ -604,9 +637,13 @@ impl Net {
604637 // about descriptor heads used for partialy written packets.
605638 // Otherwise guest will see that we used those descriptors and
606639 // will try to process them.
607- self . queues [ RX_INDEX ] . advance_used_ring ( mem, used_heads) ;
640+ // self.queues[RX_INDEX].advance_used_ring(mem, used_heads);
641+ // let next_avail = self.queues[RX_INDEX].next_avail.0;
642+ // self.queues[RX_INDEX].set_used_ring_avail_event(next_avail, mem);
643+
644+ self . queues [ RX_INDEX ] . advance_used_ring_opt ( used_heads) ;
608645 let next_avail = self . queues [ RX_INDEX ] . next_avail . 0 ;
609- self . queues [ RX_INDEX ] . set_used_ring_avail_event ( next_avail , mem ) ;
646+ * self . queues [ RX_INDEX ] . ur . avail_event ( ) = next_avail ;
610647
611648 // Clear partial write info if there was one
612649 self . rx_partial_write = None ;
@@ -620,7 +657,9 @@ impl Net {
620657
621658 // We used `used_heads` descriptors to process the packet.
622659 // Because this is an error case, we discard those descriptors.
623- self . queues [ RX_INDEX ] . discard_used ( mem, used_heads) ;
660+ // self.queues[RX_INDEX].discard_used(mem, used_heads);
661+
662+ self . queues [ RX_INDEX ] . discard_used_opt ( used_heads) ;
624663
625664 Err ( err)
626665 } else if slice. is_empty ( ) {
@@ -1149,6 +1188,8 @@ impl VirtioDevice for Net {
11491188 }
11501189
11511190 fn activate ( & mut self , mem : GuestMemoryMmap ) -> Result < ( ) , ActivateError > {
1191+ self . queues [ RX_INDEX ] . set_pointers ( & mem) ;
1192+
11521193 let event_idx = self . has_feature ( u64:: from ( VIRTIO_RING_F_EVENT_IDX ) ) ;
11531194 if event_idx {
11541195 for queue in & mut self . queues {
0 commit comments