7
7
8
8
use std:: mem;
9
9
use std:: net:: Ipv4Addr ;
10
+ use std:: num:: NonZeroUsize ;
10
11
use std:: sync:: { Arc , Mutex } ;
11
12
12
13
use libc:: EAGAIN ;
@@ -107,9 +108,9 @@ pub struct Net {
107
108
pub ( crate ) rx_rate_limiter : RateLimiter ,
108
109
pub ( crate ) tx_rate_limiter : RateLimiter ,
109
110
110
- pub ( crate ) rx_deferred_frame : bool ,
111
-
112
- rx_bytes_read : usize ,
111
+ /// Used to store last RX packet size and
112
+ /// rate limit RX queue.
113
+ deferred_rx_bytes : Option < NonZeroUsize > ,
113
114
rx_frame_buf : [ u8 ; MAX_BUFFER_SIZE ] ,
114
115
115
116
tx_frame_headers : [ u8 ; frame_hdr_len ( ) ] ,
@@ -175,8 +176,7 @@ impl Net {
175
176
queue_evts,
176
177
rx_rate_limiter,
177
178
tx_rate_limiter,
178
- rx_deferred_frame : false ,
179
- rx_bytes_read : 0 ,
179
+ deferred_rx_bytes : None ,
180
180
rx_frame_buf : [ 0u8 ; MAX_BUFFER_SIZE ] ,
181
181
tx_frame_headers : [ 0u8 ; frame_hdr_len ( ) ] ,
182
182
irq_trigger : IrqTrigger :: new ( ) . map_err ( NetError :: EventFd ) ?,
@@ -298,16 +298,22 @@ impl Net {
298
298
// Attempts to copy a single frame into the guest if there is enough
299
299
// rate limiting budget.
300
300
// Returns true on successful frame delivery.
301
- fn rate_limited_rx_single_frame ( & mut self ) -> bool {
302
- let rx_queue = & mut self . queues [ RX_INDEX ] ;
303
- if !Self :: rate_limiter_consume_op ( & mut self . rx_rate_limiter , self . rx_bytes_read as u64 ) {
304
- self . metrics . rx_rate_limiter_throttled . inc ( ) ;
305
- return false ;
301
+ fn send_deferred_rx_bytes ( & mut self ) -> bool {
302
+ match self . deferred_rx_bytes {
303
+ Some ( bytes) => {
304
+ if Self :: rate_limiter_consume_op ( & mut self . rx_rate_limiter , bytes. get ( ) as u64 ) {
305
+ // The packet is good to go, reset `deferred_rx_bytes`.
306
+ self . deferred_rx_bytes = None ;
307
+ // Attempt frame delivery.
308
+ self . rx_buffer . notify_queue ( & mut self . queues [ RX_INDEX ] ) ;
309
+ true
310
+ } else {
311
+ self . metrics . rx_rate_limiter_throttled . inc ( ) ;
312
+ false
313
+ }
314
+ }
315
+ None => true ,
306
316
}
307
-
308
- // Attempt frame delivery.
309
- self . rx_buffer . notify_queue ( rx_queue) ;
310
- true
311
317
}
312
318
313
319
/// Parse available RX `DescriptorChains` from the queue and
@@ -456,6 +462,10 @@ impl Net {
456
462
457
463
/// Read as many frames as possible.
458
464
fn process_rx ( & mut self ) -> Result < ( ) , DeviceError > {
465
+ if !self . send_deferred_rx_bytes ( ) {
466
+ return Ok ( ( ) ) ;
467
+ }
468
+
459
469
self . parse_rx_descriptors ( ) ;
460
470
461
471
loop {
@@ -465,11 +475,10 @@ impl Net {
465
475
break ;
466
476
}
467
477
Ok ( count) => {
468
- self . rx_bytes_read = count;
478
+ self . deferred_rx_bytes = NonZeroUsize :: new ( count) ;
469
479
self . metrics . rx_count . inc ( ) ;
470
480
self . metrics . rx_packets_count . inc ( ) ;
471
- if !self . rate_limited_rx_single_frame ( ) {
472
- self . rx_deferred_frame = true ;
481
+ if !self . send_deferred_rx_bytes ( ) {
473
482
break ;
474
483
}
475
484
}
@@ -495,26 +504,6 @@ impl Net {
495
504
self . try_signal_queue ( NetQueue :: Rx )
496
505
}
497
506
498
- // Process the deferred frame first, then continue reading from tap.
499
- fn handle_deferred_frame ( & mut self ) -> Result < ( ) , DeviceError > {
500
- if self . rate_limited_rx_single_frame ( ) {
501
- self . rx_deferred_frame = false ;
502
- // process_rx() was interrupted possibly before consuming all
503
- // packets in the tap; try continuing now.
504
- return self . process_rx ( ) ;
505
- }
506
-
507
- self . try_signal_queue ( NetQueue :: Rx )
508
- }
509
-
510
- fn resume_rx ( & mut self ) -> Result < ( ) , DeviceError > {
511
- if self . rx_deferred_frame {
512
- self . handle_deferred_frame ( )
513
- } else {
514
- self . process_rx ( )
515
- }
516
- }
517
-
518
507
fn process_tx ( & mut self ) -> Result < ( ) , DeviceError > {
519
508
// This is safe since we checked in the event handler that the device is activated.
520
509
let mem = self . device_state . mem ( ) . unwrap ( ) ;
@@ -573,7 +562,7 @@ impl Net {
573
562
& self . metrics ,
574
563
)
575
564
. unwrap_or ( false ) ;
576
- if frame_consumed_by_mmds && ! self . rx_deferred_frame {
565
+ if frame_consumed_by_mmds {
577
566
// MMDS consumed this frame/request, let's also try to process the response.
578
567
process_rx_for_mmds = true ;
579
568
}
@@ -680,7 +669,7 @@ impl Net {
680
669
self . metrics . rx_rate_limiter_throttled . inc ( ) ;
681
670
} else {
682
671
// If the limiter is not blocked, resume the receiving of bytes.
683
- self . resume_rx ( )
672
+ self . process_rx ( )
684
673
. unwrap_or_else ( |err| report_net_event_fail ( & self . metrics , err) ) ;
685
674
}
686
675
}
@@ -689,31 +678,14 @@ impl Net {
689
678
// This is safe since we checked in the event handler that the device is activated.
690
679
self . metrics . rx_tap_event_count . inc ( ) ;
691
680
692
- // While there are no available RX queue buffers and there's a deferred_frame
693
- // don't process any more incoming. Otherwise start processing a frame. In the
694
- // process the deferred_frame flag will be set in order to avoid freezing the
695
- // RX queue.
696
- if self . queues [ RX_INDEX ] . is_empty ( ) && self . rx_deferred_frame {
697
- self . metrics . no_rx_avail_buffer . inc ( ) ;
698
- return ;
699
- }
700
-
701
681
// While limiter is blocked, don't process any more incoming.
702
682
if self . rx_rate_limiter . is_blocked ( ) {
703
683
self . metrics . rx_rate_limiter_throttled . inc ( ) ;
704
684
return ;
705
685
}
706
686
707
- if self . rx_deferred_frame
708
- // Process a deferred frame first if available. Don't read from tap again
709
- // until we manage to receive this deferred frame.
710
- {
711
- self . handle_deferred_frame ( )
712
- . unwrap_or_else ( |err| report_net_event_fail ( & self . metrics , err) ) ;
713
- } else {
714
- self . process_rx ( )
715
- . unwrap_or_else ( |err| report_net_event_fail ( & self . metrics , err) ) ;
716
- }
687
+ self . process_rx ( )
688
+ . unwrap_or_else ( |err| report_net_event_fail ( & self . metrics , err) ) ;
717
689
}
718
690
719
691
/// Process a single TX queue event.
@@ -743,7 +715,7 @@ impl Net {
743
715
match self . rx_rate_limiter . event_handler ( ) {
744
716
Ok ( _) => {
745
717
// There might be enough budget now to receive the frame.
746
- self . resume_rx ( )
718
+ self . process_rx ( )
747
719
. unwrap_or_else ( |err| report_net_event_fail ( & self . metrics , err) ) ;
748
720
}
749
721
Err ( err) => {
@@ -772,7 +744,7 @@ impl Net {
772
744
773
745
/// Process device virtio queue(s).
774
746
pub fn process_virtio_queues ( & mut self ) {
775
- let _ = self . resume_rx ( ) ;
747
+ let _ = self . process_rx ( ) ;
776
748
let _ = self . process_tx ( ) ;
777
749
}
778
750
}
@@ -1164,7 +1136,7 @@ pub mod tests {
1164
1136
th. rxq . check_used_elem ( 0 , 0 , 0 ) ;
1165
1137
th. rxq . check_used_elem ( 1 , 3 , 0 ) ;
1166
1138
// Check that the frame wasn't deferred.
1167
- assert ! ( ! th. net( ) . rx_deferred_frame ) ;
1139
+ assert ! ( th. net( ) . deferred_rx_bytes . is_none ( ) ) ;
1168
1140
// Check that the frame has been written successfully to the valid Rx descriptor chain.
1169
1141
th. rxq
1170
1142
. check_used_elem ( 2 , 4 , frame. len ( ) . try_into ( ) . unwrap ( ) ) ;
@@ -1198,7 +1170,7 @@ pub mod tests {
1198
1170
) ;
1199
1171
1200
1172
// Check that the frame wasn't deferred.
1201
- assert ! ( ! th. net( ) . rx_deferred_frame ) ;
1173
+ assert ! ( th. net( ) . deferred_rx_bytes . is_none ( ) ) ;
1202
1174
// Check that the used queue has advanced.
1203
1175
assert_eq ! ( th. rxq. used. idx. get( ) , 1 ) ;
1204
1176
assert ! ( & th. net( ) . irq_trigger. has_pending_irq( IrqType :: Vring ) ) ;
@@ -1238,7 +1210,7 @@ pub mod tests {
1238
1210
) ;
1239
1211
1240
1212
// Check that the frames weren't deferred.
1241
- assert ! ( ! th. net( ) . rx_deferred_frame ) ;
1213
+ assert ! ( th. net( ) . deferred_rx_bytes . is_none ( ) ) ;
1242
1214
// Check that the used queue has advanced.
1243
1215
assert_eq ! ( th. rxq. used. idx. get( ) , 2 ) ;
1244
1216
assert ! ( & th. net( ) . irq_trigger. has_pending_irq( IrqType :: Vring ) ) ;
@@ -1666,20 +1638,7 @@ pub mod tests {
1666
1638
// SAFETY: its a valid fd
1667
1639
unsafe { libc:: close ( th. net . lock ( ) . unwrap ( ) . tap . as_raw_fd ( ) ) } ;
1668
1640
1669
- // The RX queue is empty and rx_deffered_frame is set.
1670
- th. net ( ) . rx_deferred_frame = true ;
1671
- check_metric_after_block ! (
1672
- th. net( ) . metrics. no_rx_avail_buffer,
1673
- 1 ,
1674
- th. simulate_event( NetEvent :: Tap )
1675
- ) ;
1676
-
1677
- // We need to set this here to false, otherwise the device will try to
1678
- // handle a deferred frame, it will fail and will never try to read from
1679
- // the tap.
1680
- th. net ( ) . rx_deferred_frame = false ;
1681
-
1682
- // Fake an avail buffer; this time, tap reading should error out.
1641
+ // Fake an avail buffer; tap reading should error out.
1683
1642
th. add_desc_chain ( NetQueue :: Rx , 0 , & [ ( 0 , 4096 , VIRTQ_DESC_F_WRITE ) ] ) ;
1684
1643
check_metric_after_block ! (
1685
1644
th. net( ) . metrics. tap_read_fails,
@@ -1688,59 +1647,6 @@ pub mod tests {
1688
1647
) ;
1689
1648
}
1690
1649
1691
- #[ test]
1692
- fn test_deferred_frame ( ) {
1693
- let mem = single_region_mem ( 2 * MAX_BUFFER_SIZE ) ;
1694
- let mut th = TestHelper :: get_default ( & mem) ;
1695
- th. activate_net ( ) ;
1696
-
1697
- let rx_packets_count = th. net ( ) . metrics . rx_packets_count . count ( ) ;
1698
- let _ = inject_tap_tx_frame ( & th. net ( ) , 1000 ) ;
1699
- // Trigger a Tap event that. This should fail since there
1700
- // are not any available descriptors in the queue
1701
- check_metric_after_block ! (
1702
- th. net( ) . metrics. no_rx_avail_buffer,
1703
- 1 ,
1704
- th. simulate_event( NetEvent :: Tap )
1705
- ) ;
1706
- // The frame we read from the tap should be deferred now and
1707
- // no frames should have been transmitted
1708
- assert ! ( th. net( ) . rx_deferred_frame) ;
1709
- assert_eq ! ( th. net( ) . metrics. rx_packets_count. count( ) , rx_packets_count) ;
1710
-
1711
- // Let's add a second frame, which should really have the same
1712
- // fate.
1713
- let _ = inject_tap_tx_frame ( & th. net ( ) , 1000 ) ;
1714
-
1715
- // Adding a descriptor in the queue. This should handle the first deferred
1716
- // frame. However, this should try to handle the second tap as well and fail
1717
- // since there's only one Descriptor Chain in the queue.
1718
- th. add_desc_chain ( NetQueue :: Rx , 0 , & [ ( 0 , 4096 , VIRTQ_DESC_F_WRITE ) ] ) ;
1719
- check_metric_after_block ! (
1720
- th. net( ) . metrics. no_rx_avail_buffer,
1721
- 1 ,
1722
- th. simulate_event( NetEvent :: Tap )
1723
- ) ;
1724
- // We should still have a deferred frame
1725
- assert ! ( th. net( ) . rx_deferred_frame) ;
1726
- // However, we should have delivered the first frame
1727
- assert_eq ! (
1728
- th. net( ) . metrics. rx_packets_count. count( ) ,
1729
- rx_packets_count + 1
1730
- ) ;
1731
-
1732
- // Let's add one more descriptor and try to handle the last frame as well.
1733
- th. add_desc_chain ( NetQueue :: Rx , 0 , & [ ( 0 , 4096 , VIRTQ_DESC_F_WRITE ) ] ) ;
1734
- check_metric_after_block ! (
1735
- th. net( ) . metrics. rx_packets_count,
1736
- 1 ,
1737
- th. simulate_event( NetEvent :: RxQueue )
1738
- ) ;
1739
-
1740
- // We should be done with any deferred frame
1741
- assert ! ( !th. net( ) . rx_deferred_frame) ;
1742
- }
1743
-
1744
1650
#[ test]
1745
1651
fn test_rx_rate_limiter_handling ( ) {
1746
1652
let mem = single_region_mem ( 2 * MAX_BUFFER_SIZE ) ;
@@ -1853,7 +1759,7 @@ pub mod tests {
1853
1759
let mut rl = RateLimiter :: new ( 1000 , 0 , 500 , 0 , 0 , 0 ) . unwrap ( ) ;
1854
1760
1855
1761
// set up RX
1856
- assert ! ( ! th. net( ) . rx_deferred_frame ) ;
1762
+ assert ! ( th. net( ) . deferred_rx_bytes . is_none ( ) ) ;
1857
1763
th. add_desc_chain ( NetQueue :: Rx , 0 , & [ ( 0 , 4096 , VIRTQ_DESC_F_WRITE ) ] ) ;
1858
1764
1859
1765
let frame = inject_tap_tx_frame ( & th. net ( ) , 1000 ) ;
@@ -1873,7 +1779,7 @@ pub mod tests {
1873
1779
// assert that limiter is blocked
1874
1780
assert ! ( th. net( ) . rx_rate_limiter. is_blocked( ) ) ;
1875
1781
assert_eq ! ( th. net( ) . metrics. rx_rate_limiter_throttled. count( ) , 1 ) ;
1876
- assert ! ( th. net( ) . rx_deferred_frame ) ;
1782
+ assert ! ( th. net( ) . deferred_rx_bytes . is_some ( ) ) ;
1877
1783
// assert that no operation actually completed (limiter blocked it)
1878
1784
assert ! ( & th. net( ) . irq_trigger. has_pending_irq( IrqType :: Vring ) ) ;
1879
1785
// make sure the data is still queued for processing
@@ -1971,7 +1877,7 @@ pub mod tests {
1971
1877
let mut rl = RateLimiter :: new ( 0 , 0 , 0 , 1 , 0 , 500 ) . unwrap ( ) ;
1972
1878
1973
1879
// set up RX
1974
- assert ! ( ! th. net( ) . rx_deferred_frame ) ;
1880
+ assert ! ( th. net( ) . deferred_rx_bytes . is_none ( ) ) ;
1975
1881
th. add_desc_chain ( NetQueue :: Rx , 0 , & [ ( 0 , 4096 , VIRTQ_DESC_F_WRITE ) ] ) ;
1976
1882
let frame = inject_tap_tx_frame ( & th. net ( ) , 1234 ) ;
1977
1883
@@ -1993,7 +1899,7 @@ pub mod tests {
1993
1899
// assert that limiter is blocked
1994
1900
assert ! ( th. net( ) . rx_rate_limiter. is_blocked( ) ) ;
1995
1901
assert ! ( th. net( ) . metrics. rx_rate_limiter_throttled. count( ) >= 1 ) ;
1996
- assert ! ( th. net( ) . rx_deferred_frame ) ;
1902
+ assert ! ( th. net( ) . deferred_rx_bytes . is_some ( ) ) ;
1997
1903
// assert that no operation actually completed (limiter blocked it)
1998
1904
assert ! ( & th. net( ) . irq_trigger. has_pending_irq( IrqType :: Vring ) ) ;
1999
1905
// make sure the data is still queued for processing
0 commit comments