diff --git a/src/vmm/benches/block_request.rs b/src/vmm/benches/block_request.rs index 1ccf3e7c3b6..d93e915ba9a 100644 --- a/src/vmm/benches/block_request.rs +++ b/src/vmm/benches/block_request.rs @@ -24,7 +24,7 @@ pub fn block_request_benchmark(c: &mut Criterion) { chain.set_header(request_header); let mut queue = virt_queue.create_queue(); - let desc = queue.pop().unwrap(); + let desc = queue.pop().unwrap().unwrap(); c.bench_function("request_parse", |b| { b.iter(|| { diff --git a/src/vmm/benches/queue.rs b/src/vmm/benches/queue.rs index b5536fa7ef1..d757abeded6 100644 --- a/src/vmm/benches/queue.rs +++ b/src/vmm/benches/queue.rs @@ -61,7 +61,7 @@ pub fn queue_benchmark(c: &mut Criterion) { set_dtable_one_chain(&rxq, 16); queue.next_avail = Wrapping(0); - let desc = queue.pop().unwrap(); + let desc = queue.pop().unwrap().unwrap(); c.bench_function("next_descriptor_16", |b| { b.iter(|| { let mut head = Some(desc); @@ -75,7 +75,7 @@ pub fn queue_benchmark(c: &mut Criterion) { c.bench_function("queue_pop_16", |b| { b.iter(|| { queue.next_avail = Wrapping(0); - while let Some(desc) = queue.pop() { + while let Some(desc) = queue.pop().unwrap() { std::hint::black_box(desc); } }) diff --git a/src/vmm/src/device_manager/mmio.rs b/src/vmm/src/device_manager/mmio.rs index 99bde6e2e78..394935fe5c1 100644 --- a/src/vmm/src/device_manager/mmio.rs +++ b/src/vmm/src/device_manager/mmio.rs @@ -461,7 +461,7 @@ impl MMIODeviceManager { // Stats queue doesn't need kicking as it is notified via a `timer_fd`. if balloon.is_activated() { info!("kick balloon {}.", id); - balloon.process_virtio_queues(); + balloon.process_virtio_queues().unwrap(); } } TYPE_BLOCK => { @@ -475,7 +475,7 @@ impl MMIODeviceManager { // any inflight `timer_fd` events can be safely discarded. if block.is_activated() { info!("kick block {}.", id); - block.process_virtio_queues(); + block.process_virtio_queues().unwrap() } } } @@ -487,7 +487,7 @@ impl MMIODeviceManager { // any inflight `timer_fd` events can be safely discarded. if net.is_activated() { info!("kick net {}.", id); - net.process_virtio_queues(); + net.process_virtio_queues().unwrap(); } } TYPE_VSOCK => { @@ -510,7 +510,7 @@ impl MMIODeviceManager { let entropy = virtio.as_mut_any().downcast_mut::().unwrap(); if entropy.is_activated() { info!("kick entropy {id}."); - entropy.process_virtio_queues(); + entropy.process_virtio_queues().unwrap(); } } _ => (), diff --git a/src/vmm/src/devices/mod.rs b/src/vmm/src/devices/mod.rs index 0ca445b6f82..495e1507edd 100644 --- a/src/vmm/src/devices/mod.rs +++ b/src/vmm/src/devices/mod.rs @@ -19,7 +19,7 @@ pub use bus::{Bus, BusDevice, BusError}; use log::error; use crate::devices::virtio::net::metrics::NetDeviceMetrics; -use crate::devices::virtio::queue::QueueError; +use crate::devices::virtio::queue::{InvalidAvailIdx, QueueError}; use crate::devices::virtio::vsock::VsockError; use crate::logger::IncMetric; @@ -28,6 +28,9 @@ use crate::logger::IncMetric; // network metrics is reported per device so we need a handle to each net device's // metrics `net_iface_metrics` to report metrics for that device. pub(crate) fn report_net_event_fail(net_iface_metrics: &NetDeviceMetrics, err: DeviceError) { + if let DeviceError::InvalidAvailIdx(err) = err { + panic!("{}", err); + } error!("{:?}", err); net_iface_metrics.event_fails.inc(); } @@ -45,7 +48,9 @@ pub enum DeviceError { /// Device received malformed descriptor. MalformedDescriptor, /// Error during queue processing: {0} - QueueError(QueueError), + QueueError(#[from] QueueError), + /// {0} + InvalidAvailIdx(#[from] InvalidAvailIdx), /// Vsock device error: {0} - VsockError(VsockError), + VsockError(#[from] VsockError), } diff --git a/src/vmm/src/devices/virtio/balloon/device.rs b/src/vmm/src/devices/virtio/balloon/device.rs index 186f09275bc..bd1a0bafa09 100644 --- a/src/vmm/src/devices/virtio/balloon/device.rs +++ b/src/vmm/src/devices/virtio/balloon/device.rs @@ -26,6 +26,7 @@ use super::{ use crate::devices::virtio::balloon::BalloonError; use crate::devices::virtio::device::{IrqTrigger, IrqType}; use crate::devices::virtio::generated::virtio_config::VIRTIO_F_VERSION_1; +use crate::devices::virtio::queue::InvalidAvailIdx; use crate::logger::IncMetric; use crate::utils::u64_to_usize; use crate::vstate::memory::{Address, ByteValued, Bytes, GuestAddress, GuestMemoryMmap}; @@ -297,7 +298,7 @@ impl Balloon { // Internal loop processes descriptors and acummulates the pfns in `pfn_buffer`. // Breaks out when there is not enough space in `pfn_buffer` to completely process // the next descriptor. - while let Some(head) = queue.pop() { + while let Some(head) = queue.pop()? { let len = head.len as usize; let max_len = MAX_PAGES_IN_DESC * SIZE_OF_U32; valid_descs_found = true; @@ -339,7 +340,7 @@ impl Balloon { // Acknowledge the receipt of the descriptor. // 0 is number of bytes the device has written to memory. - queue.add_used(head.index, 0).map_err(BalloonError::Queue)?; + queue.add_used(head.index, 0)?; needs_interrupt = true; } @@ -375,8 +376,8 @@ impl Balloon { let queue = &mut self.queues[DEFLATE_INDEX]; let mut needs_interrupt = false; - while let Some(head) = queue.pop() { - queue.add_used(head.index, 0).map_err(BalloonError::Queue)?; + while let Some(head) = queue.pop()? { + queue.add_used(head.index, 0)?; needs_interrupt = true; } @@ -392,14 +393,12 @@ impl Balloon { let mem = self.device_state.mem().unwrap(); METRICS.stats_updates_count.inc(); - while let Some(head) = self.queues[STATS_INDEX].pop() { + while let Some(head) = self.queues[STATS_INDEX].pop()? { if let Some(prev_stats_desc) = self.stats_desc_index { // We shouldn't ever have an extra buffer if the driver follows // the protocol, but return it if we find one. error!("balloon: driver is not compliant, more than one stats buffer received"); - self.queues[STATS_INDEX] - .add_used(prev_stats_desc, 0) - .map_err(BalloonError::Queue)?; + self.queues[STATS_INDEX].add_used(prev_stats_desc, 0)?; } for index in (0..head.len).step_by(SIZE_OF_STAT) { // Read the address at position `index`. The only case @@ -433,9 +432,15 @@ impl Balloon { } /// Process device virtio queue(s). - pub fn process_virtio_queues(&mut self) { - let _ = self.process_inflate(); - let _ = self.process_deflate_queue(); + pub fn process_virtio_queues(&mut self) -> Result<(), InvalidAvailIdx> { + if let Err(BalloonError::InvalidAvailIdx(err)) = self.process_inflate() { + return Err(err); + } + if let Err(BalloonError::InvalidAvailIdx(err)) = self.process_deflate_queue() { + return Err(err); + } + + Ok(()) } /// Provides the ID of this balloon device. @@ -447,9 +452,7 @@ impl Balloon { // The communication is driven by the device by using the buffer // and sending a used buffer notification if let Some(index) = self.stats_desc_index.take() { - self.queues[STATS_INDEX] - .add_used(index, 0) - .map_err(BalloonError::Queue)?; + self.queues[STATS_INDEX].add_used(index, 0)?; self.signal_used_queue() } else { error!("Failed to update balloon stats, missing descriptor."); @@ -1084,7 +1087,7 @@ pub(crate) mod tests { balloon.set_queue(DEFLATE_INDEX, defq.create_queue()); balloon.activate(mem).unwrap(); - balloon.process_virtio_queues() + balloon.process_virtio_queues().unwrap(); } #[test] diff --git a/src/vmm/src/devices/virtio/balloon/mod.rs b/src/vmm/src/devices/virtio/balloon/mod.rs index 21f96d3ba56..5af1e17288a 100644 --- a/src/vmm/src/devices/virtio/balloon/mod.rs +++ b/src/vmm/src/devices/virtio/balloon/mod.rs @@ -11,10 +11,9 @@ pub mod test_utils; mod util; use log::error; -use vm_memory::GuestMemoryError; pub use self::device::{Balloon, BalloonConfig, BalloonStats}; -use super::queue::QueueError; +use super::queue::{InvalidAvailIdx, QueueError}; use crate::devices::virtio::balloon::metrics::METRICS; use crate::devices::virtio::queue::FIRECRACKER_MAX_QUEUE_SIZE; use crate::logger::IncMetric; @@ -68,16 +67,12 @@ const VIRTIO_BALLOON_S_HTLB_PGFAIL: u16 = 9; /// Balloon device related errors. #[derive(Debug, thiserror::Error, displaydoc::Display)] pub enum BalloonError { - /// Activation error: {0} - Activate(super::ActivateError), /// No balloon device found. DeviceNotFound, /// Device not activated yet. DeviceNotActive, /// EventFd error: {0} EventFd(std::io::Error), - /// Guest gave us bad memory addresses: {0} - GuestMemory(GuestMemoryError), /// Received error while sending an interrupt: {0} InterruptError(std::io::Error), /// Guest gave us a malformed descriptor. @@ -93,9 +88,9 @@ pub enum BalloonError { /// Amount of pages requested cannot fit in `u32`. TooManyPagesRequested, /// Error while processing the virt queues: {0} - Queue(QueueError), - /// Error removing a memory region at inflate time: {0} - RemoveMemoryRegion(RemoveRegionError), + Queue(#[from] QueueError), + /// {0} + InvalidAvailIdx(#[from] InvalidAvailIdx), /// Error creating the statistics timer: {0} Timer(std::io::Error), } @@ -115,6 +110,9 @@ pub enum RemoveRegionError { } pub(super) fn report_balloon_event_fail(err: BalloonError) { + if let BalloonError::InvalidAvailIdx(err) = err { + panic!("{}", err); + } error!("{:?}", err); METRICS.event_fails.inc(); } diff --git a/src/vmm/src/devices/virtio/block/device.rs b/src/vmm/src/devices/virtio/block/device.rs index bf3043bcdd4..5d41eb04078 100644 --- a/src/vmm/src/devices/virtio/block/device.rs +++ b/src/vmm/src/devices/virtio/block/device.rs @@ -9,7 +9,7 @@ use super::persist::{BlockConstructorArgs, BlockState}; use super::vhost_user::device::{VhostUserBlock, VhostUserBlockConfig}; use super::virtio::device::{VirtioBlock, VirtioBlockConfig}; use crate::devices::virtio::device::{IrqTrigger, VirtioDevice}; -use crate::devices::virtio::queue::Queue; +use crate::devices::virtio::queue::{InvalidAvailIdx, Queue}; use crate::devices::virtio::{ActivateError, TYPE_BLOCK}; use crate::rate_limiter::BucketUpdate; use crate::snapshot::Persist; @@ -83,10 +83,10 @@ impl Block { } } - pub fn process_virtio_queues(&mut self) { + pub fn process_virtio_queues(&mut self) -> Result<(), InvalidAvailIdx> { match self { Self::Virtio(b) => b.process_virtio_queues(), - Self::VhostUser(_) => {} + Self::VhostUser(_) => Ok(()), } } diff --git a/src/vmm/src/devices/virtio/block/virtio/device.rs b/src/vmm/src/devices/virtio/block/virtio/device.rs index b11c757d43c..bdd169ff171 100644 --- a/src/vmm/src/devices/virtio/block/virtio/device.rs +++ b/src/vmm/src/devices/virtio/block/virtio/device.rs @@ -29,7 +29,7 @@ use crate::devices::virtio::generated::virtio_blk::{ }; use crate::devices::virtio::generated::virtio_config::VIRTIO_F_VERSION_1; use crate::devices::virtio::generated::virtio_ring::VIRTIO_RING_F_EVENT_IDX; -use crate::devices::virtio::queue::Queue; +use crate::devices::virtio::queue::{InvalidAvailIdx, Queue}; use crate::devices::virtio::{ActivateError, TYPE_BLOCK}; use crate::logger::{IncMetric, error, warn}; use crate::rate_limiter::{BucketUpdate, RateLimiter}; @@ -366,13 +366,13 @@ impl VirtioBlock { } else if self.is_io_engine_throttled { self.metrics.io_engine_throttled_events.inc(); } else { - self.process_virtio_queues(); + self.process_virtio_queues().unwrap() } } /// Process device virtio queue(s). - pub fn process_virtio_queues(&mut self) { - self.process_queue(0); + pub fn process_virtio_queues(&mut self) -> Result<(), InvalidAvailIdx> { + self.process_queue(0) } pub(crate) fn process_rate_limiter_event(&mut self) { @@ -380,7 +380,7 @@ impl VirtioBlock { // Upon rate limiter event, call the rate limiter handler // and restart processing the queue. if self.rate_limiter.event_handler().is_ok() { - self.process_queue(0); + self.process_queue(0).unwrap() } } @@ -403,14 +403,14 @@ impl VirtioBlock { } /// Device specific function for peaking inside a queue and processing descriptors. - pub fn process_queue(&mut self, queue_index: usize) { + pub fn process_queue(&mut self, queue_index: usize) -> Result<(), InvalidAvailIdx> { // This is safe since we checked in the event handler that the device is activated. let mem = self.device_state.mem().unwrap(); let queue = &mut self.queues[queue_index]; let mut used_any = false; - while let Some(head) = queue.pop_or_enable_notification() { + while let Some(head) = queue.pop_or_enable_notification()? { self.metrics.remaining_reqs_count.add(queue.len().into()); let processing_result = match Request::parse(&head, mem, self.disk.nsectors) { Ok(request) => { @@ -463,6 +463,8 @@ impl VirtioBlock { if !used_any { self.metrics.no_avail_buffer.inc(); } + + Ok(()) } fn process_async_completion_queue(&mut self) { @@ -516,7 +518,7 @@ impl VirtioBlock { if self.is_io_engine_throttled { self.is_io_engine_throttled = false; - self.process_queue(0); + self.process_queue(0).unwrap() } } } diff --git a/src/vmm/src/devices/virtio/block/virtio/request.rs b/src/vmm/src/devices/virtio/block/virtio/request.rs index 00aba034943..8fc83cf43da 100644 --- a/src/vmm/src/devices/virtio/block/virtio/request.rs +++ b/src/vmm/src/devices/virtio/block/virtio/request.rs @@ -484,7 +484,7 @@ mod tests { let memory = self.driver_queue.memory(); assert!(matches!( - Request::parse(&q.pop().unwrap(), memory, NUM_DISK_SECTORS), + Request::parse(&q.pop().unwrap().unwrap(), memory, NUM_DISK_SECTORS), Err(_e) )); } @@ -492,7 +492,8 @@ mod tests { fn check_parse(&self, check_data: bool) { let mut q = self.driver_queue.create_queue(); let memory = self.driver_queue.memory(); - let request = Request::parse(&q.pop().unwrap(), memory, NUM_DISK_SECTORS).unwrap(); + let request = + Request::parse(&q.pop().unwrap().unwrap(), memory, NUM_DISK_SECTORS).unwrap(); let expected_header = self.header(); assert_eq!( @@ -959,7 +960,7 @@ mod tests { fn parse_random_requests() { let cfg = ProptestConfig::with_cases(1000); proptest!(cfg, |(mut request in random_request_parse())| { - let result = Request::parse(&request.2.pop().unwrap(), &request.1, NUM_DISK_SECTORS); + let result = Request::parse(&request.2.pop().unwrap().unwrap(), &request.1, NUM_DISK_SECTORS); match result { Ok(r) => prop_assert!(r == request.0.unwrap()), Err(err) => { diff --git a/src/vmm/src/devices/virtio/iovec.rs b/src/vmm/src/devices/virtio/iovec.rs index 51352699660..80c5071fbe6 100644 --- a/src/vmm/src/devices/virtio/iovec.rs +++ b/src/vmm/src/devices/virtio/iovec.rs @@ -611,22 +611,22 @@ mod tests { fn test_access_mode() { let mem = default_mem(); let (mut q, _) = read_only_chain(&mem); - let head = q.pop().unwrap(); + let head = q.pop().unwrap().unwrap(); // SAFETY: This descriptor chain is only loaded into one buffer unsafe { IoVecBuffer::from_descriptor_chain(&mem, head).unwrap() }; let (mut q, _) = write_only_chain(&mem); - let head = q.pop().unwrap(); + let head = q.pop().unwrap().unwrap(); // SAFETY: This descriptor chain is only loaded into one buffer unsafe { IoVecBuffer::from_descriptor_chain(&mem, head).unwrap_err() }; let (mut q, _) = read_only_chain(&mem); - let head = q.pop().unwrap(); + let head = q.pop().unwrap().unwrap(); // SAFETY: This descriptor chain is only loaded into one buffer unsafe { IoVecBufferMutDefault::from_descriptor_chain(&mem, head).unwrap_err() }; let (mut q, _) = write_only_chain(&mem); - let head = q.pop().unwrap(); + let head = q.pop().unwrap().unwrap(); // SAFETY: This descriptor chain is only loaded into one buffer unsafe { IoVecBufferMutDefault::from_descriptor_chain(&mem, head).unwrap() }; } @@ -635,7 +635,7 @@ mod tests { fn test_iovec_length() { let mem = default_mem(); let (mut q, _) = read_only_chain(&mem); - let head = q.pop().unwrap(); + let head = q.pop().unwrap().unwrap(); // SAFETY: This descriptor chain is only loaded once in this test let iovec = unsafe { IoVecBuffer::from_descriptor_chain(&mem, head).unwrap() }; @@ -646,7 +646,7 @@ mod tests { fn test_iovec_mut_length() { let mem = default_mem(); let (mut q, _) = write_only_chain(&mem); - let head = q.pop().unwrap(); + let head = q.pop().unwrap().unwrap(); // SAFETY: This descriptor chain is only loaded once in this test let mut iovec = @@ -658,7 +658,7 @@ mod tests { // (concpetually) associated with a single `Queue`. We just do this here to be able to test // the appending logic. let (mut q, _) = write_only_chain(&mem); - let head = q.pop().unwrap(); + let head = q.pop().unwrap().unwrap(); // SAFETY: it is actually unsafe, but we just want to check the length of the // `IoVecBufferMut` after appending. let _ = unsafe { iovec.append_descriptor_chain(&mem, head).unwrap() }; @@ -669,7 +669,7 @@ mod tests { fn test_iovec_read_at() { let mem = default_mem(); let (mut q, _) = read_only_chain(&mem); - let head = q.pop().unwrap(); + let head = q.pop().unwrap().unwrap(); // SAFETY: This descriptor chain is only loaded once in this test let iovec = unsafe { IoVecBuffer::from_descriptor_chain(&mem, head).unwrap() }; @@ -724,7 +724,7 @@ mod tests { let (mut q, vq) = write_only_chain(&mem); // This is a descriptor chain with 4 elements 64 bytes long each. - let head = q.pop().unwrap(); + let head = q.pop().unwrap().unwrap(); // SAFETY: This descriptor chain is only loaded into one buffer let mut iovec = diff --git a/src/vmm/src/devices/virtio/net/device.rs b/src/vmm/src/devices/virtio/net/device.rs index fff04d1da1a..47e1d3a4042 100755 --- a/src/vmm/src/devices/virtio/net/device.rs +++ b/src/vmm/src/devices/virtio/net/device.rs @@ -31,7 +31,7 @@ use crate::devices::virtio::net::tap::Tap; use crate::devices::virtio::net::{ MAX_BUFFER_SIZE, NET_QUEUE_SIZES, NetError, NetQueue, RX_INDEX, TX_INDEX, generated, }; -use crate::devices::virtio::queue::{DescriptorChain, Queue}; +use crate::devices::virtio::queue::{DescriptorChain, InvalidAvailIdx, Queue}; use crate::devices::virtio::{ActivateError, TYPE_NET}; use crate::devices::{DeviceError, report_net_event_fail}; use crate::dumbo::pdu::arp::ETH_IPV4_FRAME_LEN; @@ -461,11 +461,11 @@ impl Net { } /// Parse available RX `DescriptorChains` from the queue - pub fn parse_rx_descriptors(&mut self) { + pub fn parse_rx_descriptors(&mut self) -> Result<(), InvalidAvailIdx> { // This is safe since we checked in the event handler that the device is activated. let mem = self.device_state.mem().unwrap(); let queue = &mut self.queues[RX_INDEX]; - while let Some(head) = queue.pop_or_enable_notification() { + while let Some(head) = queue.pop_or_enable_notification()? { let index = head.index; // SAFETY: we are only using this `DescriptorChain` here. if let Err(err) = unsafe { self.rx_buffer.add_buffer(mem, head) } { @@ -491,6 +491,8 @@ impl Net { self.rx_buffer.used_descriptors += 1; } } + + Ok(()) } // Tries to detour the frame to MMDS and if MMDS doesn't accept it, sends it on the host TAP. @@ -574,7 +576,7 @@ impl Net { // * MAX_BUFFER_SIZE is constant and fits into u32 #[allow(clippy::cast_possible_truncation)] if self.rx_buffer.capacity() < MAX_BUFFER_SIZE as u32 { - self.parse_rx_descriptors(); + self.parse_rx_descriptors()?; // If after parsing the RX queue we still don't have enough capacity, stop processing RX // frames. @@ -656,6 +658,9 @@ impl Net { }; break; } + Err(NetError::InvalidAvailIdx(err)) => { + return Err(DeviceError::InvalidAvailIdx(err)); + } Err(err) => { error!("Spurious error in network RX: {:?}", err); } @@ -690,7 +695,7 @@ impl Net { let mut used_any = false; let tx_queue = &mut self.queues[TX_INDEX]; - while let Some(head) = tx_queue.pop_or_enable_notification() { + while let Some(head) = tx_queue.pop_or_enable_notification()? { self.metrics .tx_remaining_reqs_count .add(tx_queue.len().into()); @@ -701,9 +706,7 @@ impl Net { // are live at the same time, meaning this has exclusive ownership over the memory if unsafe { self.tx_buffer.load_descriptor_chain(mem, head).is_err() } { self.metrics.tx_fails.inc(); - tx_queue - .add_used(head_index, 0) - .map_err(DeviceError::QueueError)?; + tx_queue.add_used(head_index, 0)?; continue; }; @@ -711,9 +714,7 @@ impl Net { if self.tx_buffer.len() as usize > MAX_BUFFER_SIZE { error!("net: received too big frame from driver"); self.metrics.tx_malformed_frames.inc(); - tx_queue - .add_used(head_index, 0) - .map_err(DeviceError::QueueError)?; + tx_queue.add_used(head_index, 0)?; continue; } @@ -741,9 +742,7 @@ impl Net { process_rx_for_mmds = true; } - tx_queue - .add_used(head_index, 0) - .map_err(DeviceError::QueueError)?; + tx_queue.add_used(head_index, 0)?; used_any = true; } @@ -846,7 +845,7 @@ impl Net { self.metrics.event_fails.inc(); return; } else { - self.parse_rx_descriptors(); + self.parse_rx_descriptors().unwrap(); } if self.rx_rate_limiter.is_blocked() { @@ -927,9 +926,15 @@ impl Net { } /// Process device virtio queue(s). - pub fn process_virtio_queues(&mut self) { - let _ = self.resume_rx(); - let _ = self.process_tx(); + pub fn process_virtio_queues(&mut self) -> Result<(), InvalidAvailIdx> { + if let Err(DeviceError::InvalidAvailIdx(err)) = self.resume_rx() { + return Err(err); + } + if let Err(DeviceError::InvalidAvailIdx(err)) = self.process_tx() { + return Err(err); + } + + Ok(()) } } diff --git a/src/vmm/src/devices/virtio/net/mod.rs b/src/vmm/src/devices/virtio/net/mod.rs index 31b0f8a178c..a13b3dadf0f 100644 --- a/src/vmm/src/devices/virtio/net/mod.rs +++ b/src/vmm/src/devices/virtio/net/mod.rs @@ -31,6 +31,7 @@ use vm_memory::VolatileMemoryError; pub use self::device::Net; use super::iovec::IoVecError; +use crate::devices::virtio::queue::{InvalidAvailIdx, QueueError}; /// Enum representing the Net device queue types #[derive(Debug)] @@ -58,4 +59,8 @@ pub enum NetError { VnetHeaderMissing, /// IoVecBuffer(Mut) error: {0} IoVecError(#[from] IoVecError), + /// virtio queue error: {0} + QueueError(#[from] QueueError), + /// {0} + InvalidAvailIdx(#[from] InvalidAvailIdx), } diff --git a/src/vmm/src/devices/virtio/net/persist.rs b/src/vmm/src/devices/virtio/net/persist.rs index 5f2d6f560b4..50e761273db 100644 --- a/src/vmm/src/devices/virtio/net/persist.rs +++ b/src/vmm/src/devices/virtio/net/persist.rs @@ -163,7 +163,8 @@ impl Persist<'_> for Net { // Recreate `Net::rx_buffer`. We do it by re-parsing the RX queue. We're temporarily // rolling back `next_avail` in the RX queue and call `parse_rx_descriptors`. net.queues[RX_INDEX].next_avail -= state.rx_buffers_state.parsed_descriptor_chains_nr; - net.parse_rx_descriptors(); + net.parse_rx_descriptors() + .map_err(|e| NetPersistError::VirtioState(VirtioStateError::InvalidAvailIdx(e)))?; net.rx_buffer.used_descriptors = state.rx_buffers_state.used_descriptors; net.rx_buffer.used_bytes = state.rx_buffers_state.used_bytes; } diff --git a/src/vmm/src/devices/virtio/net/test_utils.rs b/src/vmm/src/devices/virtio/net/test_utils.rs index 5762123be68..2df7891e034 100644 --- a/src/vmm/src/devices/virtio/net/test_utils.rs +++ b/src/vmm/src/devices/virtio/net/test_utils.rs @@ -12,12 +12,11 @@ use std::str::FromStr; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::{Arc, Mutex}; -use crate::devices::DeviceError; use crate::devices::virtio::net::Net; #[cfg(test)] use crate::devices::virtio::net::device::vnet_hdr_len; use crate::devices::virtio::net::tap::{IfReqBuilder, Tap}; -use crate::devices::virtio::queue::{Queue, QueueError}; +use crate::devices::virtio::queue::Queue; use crate::devices::virtio::test_utils::VirtQueue; use crate::mmds::data_store::Mmds; use crate::mmds::ns::MmdsNetworkStack; @@ -265,25 +264,6 @@ pub(crate) fn inject_tap_tx_frame(net: &Net, len: usize) -> Vec { frame } -pub fn write_element_in_queue(net: &Net, idx: u16, val: u64) -> Result<(), DeviceError> { - if idx as usize > net.queue_evts.len() { - return Err(DeviceError::QueueError(QueueError::DescIndexOutOfBounds( - idx, - ))); - } - net.queue_evts[idx as usize].write(val).unwrap(); - Ok(()) -} - -pub fn get_element_from_queue(net: &Net, idx: u16) -> Result { - if idx as usize > net.queue_evts.len() { - return Err(DeviceError::QueueError(QueueError::DescIndexOutOfBounds( - idx, - ))); - } - Ok(u64::try_from(net.queue_evts[idx as usize].as_raw_fd()).unwrap()) -} - pub fn default_guest_mac() -> MacAddr { MacAddr::from_str("11:22:33:44:55:66").unwrap() } diff --git a/src/vmm/src/devices/virtio/persist.rs b/src/vmm/src/devices/virtio/persist.rs index 7c861352317..38dd50e7c7f 100644 --- a/src/vmm/src/devices/virtio/persist.rs +++ b/src/vmm/src/devices/virtio/persist.rs @@ -9,7 +9,7 @@ use std::sync::{Arc, Mutex}; use serde::{Deserialize, Serialize}; -use super::queue::QueueError; +use super::queue::{InvalidAvailIdx, QueueError}; use crate::devices::virtio::device::VirtioDevice; use crate::devices::virtio::generated::virtio_ring::VIRTIO_RING_F_EVENT_IDX; use crate::devices::virtio::mmio::MmioTransport; @@ -24,6 +24,8 @@ pub enum PersistError { InvalidInput, /// Could not restore queue: {0} QueueConstruction(QueueError), + /// {0} + InvalidAvailIdx(#[from] InvalidAvailIdx), } /// Queue information saved in snapshot. diff --git a/src/vmm/src/devices/virtio/queue.rs b/src/vmm/src/devices/virtio/queue.rs index efe42bfc3dc..1d316ac21da 100644 --- a/src/vmm/src/devices/virtio/queue.rs +++ b/src/vmm/src/devices/virtio/queue.rs @@ -28,8 +28,6 @@ pub(super) const FIRECRACKER_MAX_QUEUE_SIZE: u16 = 256; #[derive(Debug, thiserror::Error, displaydoc::Display)] pub enum QueueError { - /// Virtio queue number of available descriptors {0} is greater than queue size {1}. - InvalidQueueSize(u16, u16), /// Descriptor index out of bounds: {0}. DescIndexOutOfBounds(u16), /// Failed to write value into the virtio queue used ring: {0} @@ -38,6 +36,22 @@ pub enum QueueError { PointerNotAligned(usize, u8), } +/// Error type indicating the guest configured a virtio queue such that the avail_idx field would +/// indicate there are more descriptors to process than the queue actually has space for. +/// +/// Should this error bubble up to the event loop, we exit Firecracker, since this could be a +/// potential malicious driver scenario. This way we also eliminate the risk of repeatedly +/// logging and potentially clogging the microVM through the log system. +#[derive(Debug, thiserror::Error, PartialEq, Eq)] +#[error( + "The number of available virtio descriptors {reported_len} is greater than queue size: \ + {queue_size}!" +)] +pub struct InvalidAvailIdx { + queue_size: u16, + reported_len: u16, +} + /// A virtio descriptor constraints with C representative. /// Taken from Virtio spec: /// https://docs.oasis-open.org/virtio/virtio/v1.1/csprd01/virtio-v1.1-csprd01.html#x1-430008 @@ -522,7 +536,16 @@ impl Queue { } /// Pop the first available descriptor chain from the avail ring. - pub fn pop(&mut self) -> Option { + /// + /// If this function returns an error at runtime, then the guest has requested Firecracker + /// to process more virtio descriptors than there can possibly be given the queue's size. + /// This can be a malicious guest driver scenario, and hence a DoS attempt. If encountered + /// and runtime, correct handling is to panic! + /// + /// This function however is also called on paths that can (and should) just report + /// the error to the user (e.g. loading a corrupt snapshot file), and hence cannot panic on its + /// own. + pub fn pop(&mut self) -> Result, InvalidAvailIdx> { let len = self.len(); // The number of descriptor chain heads to process should always // be smaller or equal to the queue size, as the driver should @@ -531,34 +554,42 @@ impl Queue { // can prevent potential hanging and Denial-of-Service from // happening on the VMM side. if len > self.actual_size() { - // We are choosing to interrupt execution since this could be a potential malicious - // driver scenario. This way we also eliminate the risk of repeatedly - // logging and potentially clogging the microVM through the log system. - panic!( - "The number of available virtio descriptors {len} is greater than queue size: {}!", - self.actual_size() - ); + return Err(InvalidAvailIdx { + queue_size: self.actual_size(), + reported_len: len, + }); } if len == 0 { - return None; + return Ok(None); } - self.pop_unchecked() + Ok(self.pop_unchecked()) } /// Try to pop the first available descriptor chain from the avail ring. /// If no descriptor is available, enable notifications. - pub fn pop_or_enable_notification(&mut self) -> Option { + /// + /// If this function returns an error at runtime, then the guest has requested Firecracker + /// to process more virtio descriptors than there can possibly be given the queue's size. + /// This can be a malicious guest driver scenario, and hence a DoS attempt. If encountered + /// and runtime, correct handling is to panic! + /// + /// This function however is also called on paths that can (and should) just report + /// the error to the user (e.g. loading a corrupt snapshot file), and hence cannot panic on its + /// own. + pub fn pop_or_enable_notification( + &mut self, + ) -> Result, InvalidAvailIdx> { if !self.uses_notif_suppression { return self.pop(); } - if self.try_enable_notification() { - return None; + if self.try_enable_notification()? { + return Ok(None); } - self.pop_unchecked() + Ok(self.pop_unchecked()) } /// Pop the first available descriptor chain from the avail ring. @@ -647,11 +678,11 @@ impl Queue { /// successfully enabled. Otherwise it means that one or more descriptors can still be consumed /// from the available ring and we can't guarantee that there will be a notification. In this /// case the caller might want to consume the mentioned descriptors and call this method again. - pub fn try_enable_notification(&mut self) -> bool { + fn try_enable_notification(&mut self) -> Result { // If the device doesn't use notification suppression, we'll continue to get notifications // no matter what. if !self.uses_notif_suppression { - return true; + return Ok(true); } let len = self.len(); @@ -659,17 +690,12 @@ impl Queue { // The number of descriptor chain heads to process should always // be smaller or equal to the queue size. if len > self.actual_size() { - // We are choosing to interrupt execution since this could be a potential malicious - // driver scenario. This way we also eliminate the risk of - // repeatedly logging and potentially clogging the microVM through - // the log system. - panic!( - "The number of available virtio descriptors {len} is greater than queue size: \ - {}!", - self.actual_size() - ); + return Err(InvalidAvailIdx { + queue_size: self.actual_size(), + reported_len: len, + }); } - return false; + return Ok(false); } // Set the next expected avail_idx as avail_event. @@ -680,7 +706,7 @@ impl Queue { // If the actual avail_idx is different than next_avail one or more descriptors can still // be consumed from the available ring. - self.next_avail.0 == self.avail_ring_idx_get() + Ok(self.next_avail.0 == self.avail_ring_idx_get()) } /// Enable notification suppression. @@ -1175,7 +1201,7 @@ mod verification { let next_avail = queue.next_avail; - if let Some(_) = queue.pop() { + if let Some(_) = queue.pop().unwrap() { // Can't get anything out of an empty queue, assert queue_len != 0 assert_ne!(queue_len, 0); assert_eq!(queue.next_avail, next_avail + Wrapping(1)); @@ -1192,7 +1218,7 @@ mod verification { kani::assume(queue.len() <= queue.actual_size()); let queue_clone = queue.clone(); - if let Some(_) = queue.pop() { + if let Some(_) = queue.pop().unwrap() { queue.undo_pop(); assert_eq!(queue, queue_clone); @@ -1207,7 +1233,7 @@ mod verification { kani::assume(queue.len() <= queue.actual_size()); - if queue.try_enable_notification() && queue.uses_notif_suppression { + if queue.try_enable_notification().unwrap() && queue.uses_notif_suppression { // We only require new notifications if the queue is empty (e.g. we've processed // everything we've been notified about), or if suppression is disabled. assert!(queue.is_empty()); @@ -1387,7 +1413,7 @@ mod tests { assert_eq!(q.len(), 2); // The first chain should hold exactly two descriptors. - let d = q.pop().unwrap().next_descriptor().unwrap(); + let d = q.pop().unwrap().unwrap().next_descriptor().unwrap(); assert!(!d.has_next()); assert!(d.next_descriptor().is_none()); @@ -1398,6 +1424,7 @@ mod tests { let d = q .pop() .unwrap() + .unwrap() .next_descriptor() .unwrap() .next_descriptor() @@ -1407,7 +1434,7 @@ mod tests { // We've popped both chains, so the queue should be empty. assert!(q.is_empty()); - assert!(q.pop().is_none()); + assert!(q.pop().unwrap().is_none()); // Undoing the last pop should let us walk the last chain again. q.undo_pop(); @@ -1417,6 +1444,7 @@ mod tests { let d = q .pop() .unwrap() + .unwrap() .next_descriptor() .unwrap() .next_descriptor() @@ -1432,6 +1460,7 @@ mod tests { let d = q .pop_or_enable_notification() .unwrap() + .unwrap() .next_descriptor() .unwrap() .next_descriptor() @@ -1442,20 +1471,17 @@ mod tests { // There are no more descriptors, but notification suppression is disabled. // Calling pop_or_enable_notification() should simply return None. assert_eq!(q.used_ring_avail_event_get(), 0); - assert!(q.pop_or_enable_notification().is_none()); + assert!(q.pop_or_enable_notification().unwrap().is_none()); assert_eq!(q.used_ring_avail_event_get(), 0); // There are no more descriptors and notification suppression is enabled. Calling // pop_or_enable_notification() should enable notifications. q.enable_notif_suppression(); - assert!(q.pop_or_enable_notification().is_none()); + assert!(q.pop_or_enable_notification().unwrap().is_none()); assert_eq!(q.used_ring_avail_event_get(), 2); } #[test] - #[should_panic( - expected = "The number of available virtio descriptors 5 is greater than queue size: 4!" - )] fn test_invalid_avail_idx_no_notification() { // This test ensures constructing a descriptor chain succeeds // with valid available ring indexes while it produces an error with invalid @@ -1483,7 +1509,7 @@ mod tests { assert_eq!(q.len(), 2); // We process the first descriptor. - let d = q.pop().unwrap().next_descriptor(); + let d = q.pop().unwrap().unwrap().next_descriptor(); assert!(matches!(d, Some(x) if !x.has_next())); // We confuse the device and set the available index as being 6. vq.avail.idx.set(6); @@ -1494,13 +1520,16 @@ mod tests { // However, since the apparent length set by the driver is more than the queue size, // we would be running the risk of going through some descriptors more than once. // As such, we expect to panic. - q.pop(); + assert_eq!( + q.pop().unwrap_err(), + InvalidAvailIdx { + reported_len: 5, + queue_size: 4 + } + ); } #[test] - #[should_panic( - expected = "The number of available virtio descriptors 6 is greater than queue size: 4!" - )] fn test_invalid_avail_idx_with_notification() { // This test ensures constructing a descriptor chain succeeds // with valid available ring indexes while it produces an error with invalid @@ -1525,7 +1554,13 @@ mod tests { // driver sets available index to suspicious value. vq.avail.idx.set(6); - q.pop_or_enable_notification(); + assert_eq!( + q.pop_or_enable_notification().unwrap_err(), + InvalidAvailIdx { + queue_size: 4, + reported_len: 6 + } + ); } #[test] @@ -1647,18 +1682,18 @@ mod tests { assert_eq!(q.len(), 1); // Notification suppression is disabled. try_enable_notification shouldn't do anything. - assert!(q.try_enable_notification()); + assert!(q.try_enable_notification().unwrap()); assert_eq!(q.used_ring_avail_event_get(), 0); // Enable notification suppression and check again. There is 1 available descriptor chain. // Again nothing should happen. q.enable_notif_suppression(); - assert!(!q.try_enable_notification()); + assert!(!q.try_enable_notification().unwrap()); assert_eq!(q.used_ring_avail_event_get(), 0); // Consume the descriptor. avail_event should be modified - assert!(q.pop().is_some()); - assert!(q.try_enable_notification()); + assert!(q.pop().unwrap().is_some()); + assert!(q.try_enable_notification().unwrap()); assert_eq!(q.used_ring_avail_event_get(), 1); } diff --git a/src/vmm/src/devices/virtio/rng/device.rs b/src/vmm/src/devices/virtio/rng/device.rs index 97ac8676e0a..fae6b925619 100644 --- a/src/vmm/src/devices/virtio/rng/device.rs +++ b/src/vmm/src/devices/virtio/rng/device.rs @@ -16,7 +16,7 @@ use crate::devices::virtio::device::{DeviceState, IrqTrigger, IrqType, VirtioDev use crate::devices::virtio::generated::virtio_config::VIRTIO_F_VERSION_1; use crate::devices::virtio::iov_deque::IovDequeError; use crate::devices::virtio::iovec::IoVecBufferMut; -use crate::devices::virtio::queue::{FIRECRACKER_MAX_QUEUE_SIZE, Queue}; +use crate::devices::virtio::queue::{FIRECRACKER_MAX_QUEUE_SIZE, InvalidAvailIdx, Queue}; use crate::devices::virtio::{ActivateError, TYPE_RNG}; use crate::logger::{IncMetric, debug, error}; use crate::rate_limiter::{RateLimiter, TokenType}; @@ -128,9 +128,9 @@ impl Entropy { Ok(self.buffer.len()) } - fn process_entropy_queue(&mut self) { + fn process_entropy_queue(&mut self) -> Result<(), InvalidAvailIdx> { let mut used_any = false; - while let Some(desc) = self.queues[RNG_QUEUE].pop() { + while let Some(desc) = self.queues[RNG_QUEUE].pop()? { // This is safe since we checked in the event handler that the device is activated. let mem = self.device_state.mem().unwrap(); let index = desc.index; @@ -192,6 +192,8 @@ impl Entropy { METRICS.entropy_event_fails.inc() }); } + + Ok(()) } pub(crate) fn process_entropy_queue_event(&mut self) { @@ -200,7 +202,7 @@ impl Entropy { METRICS.entropy_event_fails.inc(); } else if !self.rate_limiter.is_blocked() { // We are not throttled, handle the entropy queue - self.process_entropy_queue(); + self.process_entropy_queue().unwrap() } else { METRICS.rate_limiter_event_count.inc(); } @@ -211,7 +213,7 @@ impl Entropy { match self.rate_limiter.event_handler() { Ok(_) => { // There might be enough budget now to process entropy requests. - self.process_entropy_queue(); + self.process_entropy_queue().unwrap() } Err(err) => { error!("entropy: Failed to handle rate-limiter event: {err:?}"); @@ -220,8 +222,8 @@ impl Entropy { } } - pub fn process_virtio_queues(&mut self) { - self.process_entropy_queue(); + pub fn process_virtio_queues(&mut self) -> Result<(), InvalidAvailIdx> { + self.process_entropy_queue() } pub fn rate_limiter(&self) -> &RateLimiter { @@ -438,7 +440,7 @@ mod tests { let mut entropy_dev = th.device(); // This should succeed, we just added two descriptors - let desc = entropy_dev.queues_mut()[RNG_QUEUE].pop().unwrap(); + let desc = entropy_dev.queues_mut()[RNG_QUEUE].pop().unwrap().unwrap(); assert!(matches!( // SAFETY: This descriptor chain is only loaded into one buffer unsafe { IoVecBufferMut::<256>::from_descriptor_chain(&mem, desc) }, @@ -446,7 +448,7 @@ mod tests { )); // This should succeed, we should have one more descriptor - let desc = entropy_dev.queues_mut()[RNG_QUEUE].pop().unwrap(); + let desc = entropy_dev.queues_mut()[RNG_QUEUE].pop().unwrap().unwrap(); // SAFETY: This descriptor chain is only loaded into one buffer entropy_dev.buffer = unsafe { IoVecBufferMut::from_descriptor_chain(&mem, desc).unwrap() }; entropy_dev.handle_one().unwrap(); diff --git a/src/vmm/src/devices/virtio/vsock/csm/connection.rs b/src/vmm/src/devices/virtio/vsock/csm/connection.rs index c9bd5b2c0f7..a5a2f4aec5b 100644 --- a/src/vmm/src/devices/virtio/vsock/csm/connection.rs +++ b/src/vmm/src/devices/virtio/vsock/csm/connection.rs @@ -864,14 +864,14 @@ mod tests { rx_pkt .parse( &vsock_test_ctx.mem, - handler_ctx.device.queues[RXQ_INDEX].pop().unwrap(), + handler_ctx.device.queues[RXQ_INDEX].pop().unwrap().unwrap(), ) .unwrap(); let mut tx_pkt = VsockPacketTx::default(); tx_pkt .parse( &vsock_test_ctx.mem, - handler_ctx.device.queues[TXQ_INDEX].pop().unwrap(), + handler_ctx.device.queues[TXQ_INDEX].pop().unwrap().unwrap(), ) .unwrap(); let conn = match conn_state { diff --git a/src/vmm/src/devices/virtio/vsock/device.rs b/src/vmm/src/devices/virtio/vsock/device.rs index aa114f6cccb..0f00e7c6adc 100644 --- a/src/vmm/src/devices/virtio/vsock/device.rs +++ b/src/vmm/src/devices/virtio/vsock/device.rs @@ -32,7 +32,7 @@ use super::{VsockBackend, defs}; use crate::devices::virtio::ActivateError; use crate::devices::virtio::device::{DeviceState, IrqTrigger, IrqType, VirtioDevice}; use crate::devices::virtio::generated::virtio_config::{VIRTIO_F_IN_ORDER, VIRTIO_F_VERSION_1}; -use crate::devices::virtio::queue::Queue as VirtQueue; +use crate::devices::virtio::queue::{InvalidAvailIdx, Queue as VirtQueue}; use crate::devices::virtio::vsock::VsockError; use crate::devices::virtio::vsock::metrics::METRICS; use crate::logger::IncMetric; @@ -145,13 +145,13 @@ where /// Walk the driver-provided RX queue buffers and attempt to fill them up with any data that we /// have pending. Return `true` if descriptors have been added to the used ring, and `false` /// otherwise. - pub fn process_rx(&mut self) -> bool { + pub fn process_rx(&mut self) -> Result { // This is safe since we checked in the event handler that the device is activated. let mem = self.device_state.mem().unwrap(); let mut have_used = false; - while let Some(head) = self.queues[RXQ_INDEX].pop() { + while let Some(head) = self.queues[RXQ_INDEX].pop()? { let index = head.index; let used_len = match self.rx_packet.parse(mem, head) { Ok(()) => { @@ -192,19 +192,19 @@ where }); } - have_used + Ok(have_used) } /// Walk the driver-provided TX queue buffers, package them up as vsock packets, and send them /// to the backend for processing. Return `true` if descriptors have been added to the used /// ring, and `false` otherwise. - pub fn process_tx(&mut self) -> bool { + pub fn process_tx(&mut self) -> Result { // This is safe since we checked in the event handler that the device is activated. let mem = self.device_state.mem().unwrap(); let mut have_used = false; - while let Some(head) = self.queues[TXQ_INDEX].pop() { + while let Some(head) = self.queues[TXQ_INDEX].pop()? { let index = head.index; // let pkt = match VsockPacket::from_tx_virtq_head(mem, head) { match self.tx_packet.parse(mem, head) { @@ -234,7 +234,7 @@ where }); } - have_used + Ok(have_used) } // Send TRANSPORT_RESET_EVENT to driver. According to specs, the driver shuts down established @@ -244,7 +244,7 @@ where // This is safe since we checked in the caller function that the device is activated. let mem = self.device_state.mem().unwrap(); - let head = self.queues[EVQ_INDEX].pop().ok_or_else(|| { + let head = self.queues[EVQ_INDEX].pop()?.ok_or_else(|| { METRICS.ev_queue_event_fails.inc(); DeviceError::VsockError(VsockError::EmptyQueue) })?; diff --git a/src/vmm/src/devices/virtio/vsock/event_handler.rs b/src/vmm/src/devices/virtio/vsock/event_handler.rs index 632148546e5..7d3b9faf1c9 100755 --- a/src/vmm/src/devices/virtio/vsock/event_handler.rs +++ b/src/vmm/src/devices/virtio/vsock/event_handler.rs @@ -58,7 +58,9 @@ where error!("Failed to get vsock rx queue event: {:?}", err); METRICS.rx_queue_event_fails.inc(); } else if self.backend.has_pending_rx() { - raise_irq |= self.process_rx(); + // OK to unwrap: Only QueueError::InvalidAvailIdx is returned, and we explicitly + // want to panic on that one. + raise_irq |= self.process_rx().unwrap(); METRICS.rx_queue_event_count.inc(); } raise_irq @@ -76,13 +78,15 @@ where error!("Failed to get vsock tx queue event: {:?}", err); METRICS.tx_queue_event_fails.inc(); } else { - raise_irq |= self.process_tx(); + // OK to unwrap: Only QueueError::InvalidAvailIdx is returned, and we explicitly + // want to panic on that one. + raise_irq |= self.process_tx().unwrap(); METRICS.tx_queue_event_count.inc(); // The backend may have queued up responses to the packets we sent during // TX queue processing. If that happened, we need to fetch those responses // and place them into RX buffers. if self.backend.has_pending_rx() { - raise_irq |= self.process_rx(); + raise_irq |= self.process_rx().unwrap(); } } raise_irq @@ -110,9 +114,11 @@ where // In particular, if `self.backend.send_pkt()` halted the TX queue processing (by // returning an error) at some point in the past, now is the time to try walking the // TX queue again. - let mut raise_irq = self.process_tx(); + // OK to unwrap: Only QueueError::InvalidAvailIdx is returned, and we explicitly + // want to panic on that one. + let mut raise_irq = self.process_tx().unwrap(); if self.backend.has_pending_rx() { - raise_irq |= self.process_rx(); + raise_irq |= self.process_rx().unwrap(); } raise_irq } @@ -351,7 +357,7 @@ mod tests { ctx.guest_rxvq.dtable[1].len.set(0); // The chain should've been processed, without employing the backend. - assert!(ctx.device.process_rx()); + assert!(ctx.device.process_rx().unwrap()); assert_eq!(ctx.guest_rxvq.used.idx.get(), 1); assert_eq!(ctx.device.backend.rx_ok_cnt, 0); } @@ -436,7 +442,7 @@ mod tests { ctx.guest_rxvq.dtable[desc_idx].len.set(len); // If the descriptor chain is already declared invalid, there's no reason to assemble // a packet. - if let Some(rx_desc) = ctx.device.queues[RXQ_INDEX].pop() { + if let Some(rx_desc) = ctx.device.queues[RXQ_INDEX].pop().unwrap() { VsockPacketRx::new() .unwrap() .parse(&test_ctx.mem, rx_desc) @@ -461,7 +467,7 @@ mod tests { ctx.guest_txvq.dtable[desc_idx].addr.set(addr); ctx.guest_txvq.dtable[desc_idx].len.set(len); - if let Some(tx_desc) = ctx.device.queues[TXQ_INDEX].pop() { + if let Some(tx_desc) = ctx.device.queues[TXQ_INDEX].pop().unwrap() { VsockPacketTx::default() .parse(&test_ctx.mem, tx_desc) .unwrap_err(); @@ -492,7 +498,7 @@ mod tests { // The default configured descriptor chains are valid. { let mut ctx = test_ctx.create_event_handler_context(); - let rx_desc = ctx.device.queues[RXQ_INDEX].pop().unwrap(); + let rx_desc = ctx.device.queues[RXQ_INDEX].pop().unwrap().unwrap(); VsockPacketRx::new() .unwrap() .parse(&test_ctx.mem, rx_desc) @@ -501,7 +507,7 @@ mod tests { { let mut ctx = test_ctx.create_event_handler_context(); - let tx_desc = ctx.device.queues[TXQ_INDEX].pop().unwrap(); + let tx_desc = ctx.device.queues[TXQ_INDEX].pop().unwrap().unwrap(); VsockPacketTx::default() .parse(&test_ctx.mem, tx_desc) .unwrap(); diff --git a/src/vmm/src/devices/virtio/vsock/packet.rs b/src/vmm/src/devices/virtio/vsock/packet.rs index 78130af4b12..7253f41ce76 100644 --- a/src/vmm/src/devices/virtio/vsock/packet.rs +++ b/src/vmm/src/devices/virtio/vsock/packet.rs @@ -417,7 +417,7 @@ mod tests { let mut pkt = VsockPacketTx::default(); pkt.parse( &test_ctx.mem, - handler_ctx.device.queues[TXQ_INDEX].pop().unwrap(), + handler_ctx.device.queues[TXQ_INDEX].pop().unwrap().unwrap(), ) .unwrap(); @@ -436,7 +436,7 @@ mod tests { assert!(matches!( VsockPacketTx::default().parse( &test_ctx.mem, - handler_ctx.device.queues[TXQ_INDEX].pop().unwrap(), + handler_ctx.device.queues[TXQ_INDEX].pop().unwrap().unwrap(), ), Err(VsockError::UnreadableDescriptor) )) @@ -452,7 +452,7 @@ mod tests { assert!(matches!( VsockPacketTx::default().parse( &test_ctx.mem, - handler_ctx.device.queues[TXQ_INDEX].pop().unwrap(), + handler_ctx.device.queues[TXQ_INDEX].pop().unwrap().unwrap(), ), Err(VsockError::DescChainTooShortForHeader(_)) )) @@ -465,7 +465,7 @@ mod tests { VsockPacketTx::default() .parse( &test_ctx.mem, - handler_ctx.device.queues[TXQ_INDEX].pop().unwrap(), + handler_ctx.device.queues[TXQ_INDEX].pop().unwrap().unwrap(), ) .unwrap(); } @@ -481,7 +481,7 @@ mod tests { assert!(matches!( VsockPacketTx::default().parse( &test_ctx.mem, - handler_ctx.device.queues[TXQ_INDEX].pop().unwrap(), + handler_ctx.device.queues[TXQ_INDEX].pop().unwrap().unwrap(), ), Err(VsockError::InvalidPktLen(_)) )) @@ -497,7 +497,7 @@ mod tests { assert!(matches!( VsockPacketTx::default().parse( &test_ctx.mem, - handler_ctx.device.queues[TXQ_INDEX].pop().unwrap(), + handler_ctx.device.queues[TXQ_INDEX].pop().unwrap().unwrap(), ), Err(VsockError::DescChainTooShortForPacket(44, 1024)) )) @@ -512,7 +512,7 @@ mod tests { assert!(matches!( VsockPacketTx::default().parse( &test_ctx.mem, - handler_ctx.device.queues[TXQ_INDEX].pop().unwrap(), + handler_ctx.device.queues[TXQ_INDEX].pop().unwrap().unwrap(), ), Err(VsockError::UnreadableDescriptor) )) @@ -527,7 +527,7 @@ mod tests { assert!(matches!( VsockPacketTx::default().parse( &test_ctx.mem, - handler_ctx.device.queues[TXQ_INDEX].pop().unwrap(), + handler_ctx.device.queues[TXQ_INDEX].pop().unwrap().unwrap(), ), Err(VsockError::DescChainTooShortForPacket(4140, 8192)) )) @@ -542,7 +542,7 @@ mod tests { let mut pkt = VsockPacketRx::new().unwrap(); pkt.parse( &test_ctx.mem, - handler_ctx.device.queues[RXQ_INDEX].pop().unwrap(), + handler_ctx.device.queues[RXQ_INDEX].pop().unwrap().unwrap(), ) .unwrap(); assert_eq!(pkt.buf_size(), handler_ctx.guest_rxvq.dtable[1].len.get()); @@ -555,7 +555,7 @@ mod tests { assert!(matches!( VsockPacketRx::new().unwrap().parse( &test_ctx.mem, - handler_ctx.device.queues[RXQ_INDEX].pop().unwrap(), + handler_ctx.device.queues[RXQ_INDEX].pop().unwrap().unwrap(), ), Err(VsockError::UnwritableDescriptor) )) @@ -571,7 +571,7 @@ mod tests { assert!(matches!( VsockPacketRx::new().unwrap().parse( &test_ctx.mem, - handler_ctx.device.queues[RXQ_INDEX].pop().unwrap(), + handler_ctx.device.queues[RXQ_INDEX].pop().unwrap().unwrap(), ), Err(VsockError::DescChainTooShortForHeader(_)) )) @@ -642,13 +642,13 @@ mod tests { let mut pkt = VsockPacketRx::new().unwrap(); pkt.parse( &test_ctx.mem, - handler_ctx.device.queues[RXQ_INDEX].pop().unwrap(), + handler_ctx.device.queues[RXQ_INDEX].pop().unwrap().unwrap(), ) .unwrap(); let mut pkt2 = VsockPacketTx::default(); pkt2.parse( &test_ctx.mem, - handler_ctx.device.queues[TXQ_INDEX].pop().unwrap(), + handler_ctx.device.queues[TXQ_INDEX].pop().unwrap().unwrap(), ) .unwrap(); diff --git a/src/vmm/src/devices/virtio/vsock/unix/muxer.rs b/src/vmm/src/devices/virtio/vsock/unix/muxer.rs index 478d5c7318d..ad979b4bdeb 100644 --- a/src/vmm/src/devices/virtio/vsock/unix/muxer.rs +++ b/src/vmm/src/devices/virtio/vsock/unix/muxer.rs @@ -838,14 +838,14 @@ mod tests { rx_pkt .parse( &vsock_test_ctx.mem, - handler_ctx.device.queues[RXQ_INDEX].pop().unwrap(), + handler_ctx.device.queues[RXQ_INDEX].pop().unwrap().unwrap(), ) .unwrap(); let mut tx_pkt = VsockPacketTx::default(); tx_pkt .parse( &vsock_test_ctx.mem, - handler_ctx.device.queues[TXQ_INDEX].pop().unwrap(), + handler_ctx.device.queues[TXQ_INDEX].pop().unwrap().unwrap(), ) .unwrap();