Skip to content

Commit 96d6729

Browse files
committed
TEST: update queue impl to do less memory checks
Signed-off-by: Egor Lazarchuk <[email protected]>
1 parent b61f394 commit 96d6729

File tree

4 files changed

+461
-68
lines changed

4 files changed

+461
-68
lines changed

src/vmm/src/devices/virtio/iovec.rs

Lines changed: 43 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,9 @@ use vm_memory::{
1010
};
1111

1212
use crate::devices::virtio::queue::DescriptorChain;
13-
use crate::vstate::memory::{Bitmap, GuestMemory};
13+
use crate::vstate::memory::{Bitmap, GuestMemory, GuestMemoryMmap};
14+
15+
use super::queue::DescriptorChainOpt;
1416

1517
#[derive(Debug, thiserror::Error, displaydoc::Display)]
1618
pub enum IoVecError {
@@ -270,6 +272,46 @@ impl IoVecBufferMut {
270272
Ok(())
271273
}
272274

275+
pub unsafe fn load_descriptor_chain_opt(
276+
&mut self,
277+
head: DescriptorChainOpt,
278+
mem: &GuestMemoryMmap,
279+
) -> Result<(), IoVecError> {
280+
self.clear();
281+
self.head_index = head.index;
282+
283+
let mut next_descriptor = Some(head);
284+
while let Some(desc) = next_descriptor {
285+
if !desc.is_write_only() {
286+
return Err(IoVecError::ReadOnlyDescriptor);
287+
}
288+
289+
// We use get_slice instead of `get_host_address` here in order to have the whole
290+
// range of the descriptor chain checked, i.e. [addr, addr + len) is a valid memory
291+
// region in the GuestMemoryMmap.
292+
let slice = mem.get_slice(desc.addr, desc.len as usize)?;
293+
294+
// We need to mark the area of guest memory that will be mutated through this
295+
// IoVecBufferMut as dirty ahead of time, as we loose access to all
296+
// vm-memory related information after converting down to iovecs.
297+
slice.bitmap().mark_dirty(0, desc.len as usize);
298+
299+
let iov_base = slice.ptr_guard_mut().as_ptr().cast::<c_void>();
300+
self.vecs.push(iovec {
301+
iov_base,
302+
iov_len: desc.len as size_t,
303+
});
304+
self.len = self
305+
.len
306+
.checked_add(desc.len)
307+
.ok_or(IoVecError::OverflowedDescriptor)?;
308+
309+
next_descriptor = desc.next_descriptor();
310+
}
311+
312+
Ok(())
313+
}
314+
273315
/// Create an `IoVecBuffer` from a `DescriptorChain`
274316
///
275317
/// # Safety

src/vmm/src/devices/virtio/net/device.rs

Lines changed: 108 additions & 67 deletions
Original file line numberDiff line numberDiff line change
@@ -156,68 +156,74 @@ impl AvailableDescriptors {
156156

157157
/// Read new descriptor chains from the queue.
158158
pub fn read_new_desc_chains(&mut self, queue: &mut Queue, mem: &GuestMemoryMmap) {
159-
#[repr(C)]
160-
struct AvailRing {
161-
flags: u16,
162-
idx: u16,
163-
ring: [u16; 256],
164-
used_event: u16,
165-
}
166-
#[repr(C)]
167-
#[derive(Default, Clone, Copy)]
168-
struct Descriptor {
169-
addr: u64,
170-
len: u32,
171-
flags: u16,
172-
next: u16,
173-
}
174-
175-
// SAFETY:
176-
// avail_ring in the queue is a valid guest address
177-
let avail_ring: &AvailRing =
178-
unsafe { std::mem::transmute(mem.get_host_address(queue.avail_ring).unwrap()) };
179-
180-
// SAFETY:
181-
// desc_table in the queue is a valid guest address
182-
let desc_table: &[Descriptor; 256] =
183-
unsafe { std::mem::transmute(mem.get_host_address(queue.desc_table).unwrap()) };
184-
185-
let avail_idx = queue.avail_idx(mem);
186-
let actual_size = queue.actual_size();
187-
188-
while queue.next_avail.0 != avail_idx.0 {
189-
let Some(next_iovec_buf) = self.iov_ring.next_available() else {
190-
break;
191-
};
192-
193-
let avail_index = queue.next_avail.0 % actual_size;
194-
queue.next_avail += Wrapping(1);
195-
196-
let desc_index = avail_ring.ring[avail_index as usize];
197-
let mut desc = &desc_table[desc_index as usize];
198-
199-
next_iovec_buf.clear();
200-
next_iovec_buf.head_index = desc_index;
201-
202-
let iov = libc::iovec {
203-
iov_base: mem.get_host_address(GuestAddress(desc.addr)).unwrap().cast(),
204-
iov_len: desc.len as usize,
205-
};
206-
next_iovec_buf.vecs.push(iov);
207-
next_iovec_buf.len += desc.len;
208-
self.valid_ring.push(true);
209-
210-
while desc.flags & crate::devices::virtio::queue::VIRTQ_DESC_F_NEXT != 0 {
211-
desc = &desc_table[desc.next as usize];
212-
let iov = libc::iovec {
213-
iov_base: mem.get_host_address(GuestAddress(desc.addr)).unwrap().cast(),
214-
iov_len: desc.len as usize,
215-
};
216-
next_iovec_buf.vecs.push(iov);
217-
next_iovec_buf.len += desc.len;
218-
self.valid_ring.push(true);
219-
}
220-
}
159+
// #[repr(C)]
160+
// struct AvailRing {
161+
// flags: u16,
162+
// idx: u16,
163+
// ring: [u16; 256],
164+
// used_event: u16,
165+
// }
166+
// #[repr(C)]
167+
// #[derive(Default, Clone, Copy)]
168+
// struct Descriptor {
169+
// addr: u64,
170+
// len: u32,
171+
// flags: u16,
172+
// next: u16,
173+
// }
174+
//
175+
// // SAFETY:
176+
// // avail_ring in the queue is a valid guest address
177+
// let avail_ring: &AvailRing =
178+
// unsafe { std::mem::transmute(mem.get_host_address(queue.avail_ring).unwrap()) };
179+
//
180+
// // SAFETY:
181+
// // desc_table in the queue is a valid guest address
182+
// let desc_table: &[Descriptor; 256] =
183+
// unsafe { std::mem::transmute(mem.get_host_address(queue.desc_table).unwrap()) };
184+
//
185+
// let avail_idx = queue.avail_idx(mem);
186+
// let actual_size = queue.actual_size();
187+
//
188+
// while queue.next_avail.0 != avail_idx.0 {
189+
// let Some(next_iovec_buf) = self.iov_ring.next_available() else {
190+
// break;
191+
// };
192+
//
193+
// let avail_index = queue.next_avail.0 % actual_size;
194+
// queue.next_avail += Wrapping(1);
195+
//
196+
// let desc_index = avail_ring.ring[avail_index as usize];
197+
// let mut desc = &desc_table[desc_index as usize];
198+
//
199+
// next_iovec_buf.clear();
200+
// next_iovec_buf.head_index = desc_index;
201+
//
202+
// let iov = libc::iovec {
203+
// iov_base: mem
204+
// .get_host_address(GuestAddress(desc.addr))
205+
// .unwrap()
206+
// .cast(),
207+
// iov_len: desc.len as usize,
208+
// };
209+
// next_iovec_buf.vecs.push(iov);
210+
// next_iovec_buf.len += desc.len;
211+
// self.valid_ring.push(true);
212+
//
213+
// while desc.flags & crate::devices::virtio::queue::VIRTQ_DESC_F_NEXT != 0 {
214+
// desc = &desc_table[desc.next as usize];
215+
// let iov = libc::iovec {
216+
// iov_base: mem
217+
// .get_host_address(GuestAddress(desc.addr))
218+
// .unwrap()
219+
// .cast(),
220+
// iov_len: desc.len as usize,
221+
// };
222+
// next_iovec_buf.vecs.push(iov);
223+
// next_iovec_buf.len += desc.len;
224+
// self.valid_ring.push(true);
225+
// }
226+
// }
221227

222228
// for _ in 0..queue.len(mem) {
223229
// let Some(next_iovec_buf) = self.iov_ring.next_available() else {
@@ -236,6 +242,28 @@ impl AvailableDescriptors {
236242
// }
237243
// }
238244
// }
245+
246+
for _ in 0..queue.len_opt() {
247+
let Some(next_iovec_buf) = self.iov_ring.next_available() else {
248+
break;
249+
};
250+
251+
match queue.do_pop_unchecked_opt() {
252+
Some(desc_chain) => {
253+
// SAFETY:
254+
// This descriptor chain is only processed once.
255+
let valid = unsafe {
256+
next_iovec_buf
257+
.load_descriptor_chain_opt(desc_chain, mem)
258+
.is_ok()
259+
};
260+
self.valid_ring.push(valid);
261+
}
262+
None => {
263+
self.valid_ring.push(false);
264+
}
265+
}
266+
}
239267
}
240268

241269
/// Pop first descriptor chain.
@@ -503,7 +531,8 @@ impl Net {
503531
// This is safe since we checked in the event handler that the device is activated.
504532
let mem = self.device_state.mem().unwrap();
505533

506-
if self.rx_avail_desc.is_empty() && self.queues[RX_INDEX].is_empty(mem) {
534+
// if self.rx_avail_desc.is_empty() && self.queues[RX_INDEX].is_empty(mem) {
535+
if self.rx_avail_desc.is_empty() && self.queues[RX_INDEX].is_empty_opt() {
507536
self.metrics.no_rx_avail_buffer.inc();
508537
return Err(FrontendError::EmptyQueue);
509538
}
@@ -582,8 +611,12 @@ impl Net {
582611
// SAFETY:
583612
// This should never panic as we provide index in
584613
// correct bounds.
614+
// self.queues[RX_INDEX]
615+
// .write_used_ring(mem, next_used_index.0, used_element)
616+
// .unwrap();
617+
585618
self.queues[RX_INDEX]
586-
.write_used_ring(mem, next_used_index.0, used_element)
619+
.write_used_ring_opt(next_used_index.0, used_element)
587620
.unwrap();
588621

589622
used_heads += 1;
@@ -604,9 +637,13 @@ impl Net {
604637
// about descriptor heads used for partialy written packets.
605638
// Otherwise guest will see that we used those descriptors and
606639
// will try to process them.
607-
self.queues[RX_INDEX].advance_used_ring(mem, used_heads);
640+
// self.queues[RX_INDEX].advance_used_ring(mem, used_heads);
641+
// let next_avail = self.queues[RX_INDEX].next_avail.0;
642+
// self.queues[RX_INDEX].set_used_ring_avail_event(next_avail, mem);
643+
644+
self.queues[RX_INDEX].advance_used_ring_opt(used_heads);
608645
let next_avail = self.queues[RX_INDEX].next_avail.0;
609-
self.queues[RX_INDEX].set_used_ring_avail_event(next_avail, mem);
646+
*self.queues[RX_INDEX].ur.avail_event() = next_avail;
610647

611648
// Clear partial write info if there was one
612649
self.rx_partial_write = None;
@@ -620,7 +657,9 @@ impl Net {
620657

621658
// We used `used_heads` descriptors to process the packet.
622659
// Because this is an error case, we discard those descriptors.
623-
self.queues[RX_INDEX].discard_used(mem, used_heads);
660+
// self.queues[RX_INDEX].discard_used(mem, used_heads);
661+
662+
self.queues[RX_INDEX].discard_used_opt(used_heads);
624663

625664
Err(err)
626665
} else if slice.is_empty() {
@@ -1149,6 +1188,8 @@ impl VirtioDevice for Net {
11491188
}
11501189

11511190
fn activate(&mut self, mem: GuestMemoryMmap) -> Result<(), ActivateError> {
1191+
self.queues[RX_INDEX].set_pointers(&mem);
1192+
11521193
let event_idx = self.has_feature(u64::from(VIRTIO_RING_F_EVENT_IDX));
11531194
if event_idx {
11541195
for queue in &mut self.queues {

src/vmm/src/devices/virtio/persist.rs

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,8 @@ use crate::devices::virtio::queue::Queue;
1616
use crate::snapshot::Persist;
1717
use crate::vstate::memory::{GuestAddress, GuestMemoryMmap};
1818

19+
use super::queue::DescTable;
20+
1921
/// Errors thrown during restoring virtio state.
2022
#[derive(Debug, thiserror::Error, displaydoc::Display)]
2123
pub enum PersistError {
@@ -78,6 +80,11 @@ impl Persist<'_> for Queue {
7880
desc_table: GuestAddress(state.desc_table),
7981
avail_ring: GuestAddress(state.avail_ring),
8082
used_ring: GuestAddress(state.used_ring),
83+
84+
dt: Default::default(),
85+
ar: Default::default(),
86+
ur: Default::default(),
87+
8188
next_avail: state.next_avail,
8289
next_used: state.next_used,
8390
uses_notif_suppression: false,

0 commit comments

Comments
 (0)