Skip to content

Commit 4e48855

Browse files
committed
refactor: more descriptive used ring usage calculations
Replace offset calculations with hard coded values with more descriptive `std::mem::sizeof` with better comments of why the offset should be calculated this way. Additionally replace setting used element fields one by one with single call to `write_obj`. Signed-off-by: Egor Lazarchuk <[email protected]>
1 parent e375238 commit 4e48855

File tree

1 file changed

+80
-19
lines changed

1 file changed

+80
-19
lines changed

src/vmm/src/devices/virtio/queue.rs

Lines changed: 80 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,8 @@ use std::cmp::min;
99
use std::num::Wrapping;
1010
use std::sync::atomic::{fence, Ordering};
1111

12+
use utils::usize_to_u64;
13+
1214
use crate::logger::error;
1315
use crate::vstate::memory::{
1416
Address, ByteValued, Bytes, GuestAddress, GuestMemory, GuestMemoryMmap,
@@ -49,6 +51,17 @@ struct Descriptor {
4951
// SAFETY: `Descriptor` is a POD and contains no padding.
5052
unsafe impl ByteValued for Descriptor {}
5153

54+
/// A virtio used element in the used ring.
55+
#[repr(C)]
56+
#[derive(Default, Clone, Copy)]
57+
struct UsedElement {
58+
id: u32,
59+
len: u32,
60+
}
61+
62+
// SAFETY: `UsedElement` is a POD and contains no padding.
63+
unsafe impl ByteValued for UsedElement {}
64+
5265
/// A virtio descriptor chain.
5366
#[derive(Debug)]
5467
pub struct DescriptorChain<'a, M: GuestMemory = GuestMemoryMmap> {
@@ -430,31 +443,52 @@ impl Queue {
430443
) -> Result<(), QueueError> {
431444
debug_assert!(self.is_layout_valid(mem));
432445

433-
if desc_index >= self.actual_size() {
434-
error!(
435-
"attempted to add out of bounds descriptor to used ring: {}",
436-
desc_index
437-
);
438-
return Err(QueueError::DescIndexOutOfBounds(desc_index));
439-
}
440-
441-
let used_ring = self.used_ring;
442-
let next_used = u64::from(self.next_used.0 % self.actual_size());
443-
let used_elem = used_ring.unchecked_add(4 + next_used * 8);
444-
445-
mem.write_obj(u32::from(desc_index), used_elem)?;
446-
447-
let len_addr = used_elem.unchecked_add(4);
448-
mem.write_obj(len, len_addr)?;
446+
let next_used = self.next_used.0 % self.actual_size();
447+
self.write_used_ring(mem, next_used, desc_index, len)?;
449448

450449
self.num_added += Wrapping(1);
451450
self.next_used += Wrapping(1);
452451

453452
// This fence ensures all descriptor writes are visible before the index update is.
454453
fence(Ordering::Release);
455454

456-
let next_used_addr = used_ring.unchecked_add(2);
457-
mem.write_obj(self.next_used.0, next_used_addr)
455+
self.set_next_used(self.next_used.0, mem);
456+
Ok(())
457+
}
458+
459+
fn write_used_ring<M: GuestMemory>(
460+
&self,
461+
mem: &M,
462+
index: u16,
463+
id: u16,
464+
len: u32,
465+
) -> Result<(), QueueError> {
466+
if id >= self.actual_size() {
467+
error!(
468+
"attempted to add out of bounds descriptor to used ring: {}",
469+
id
470+
);
471+
return Err(QueueError::DescIndexOutOfBounds(id));
472+
}
473+
474+
// Used ring has layout:
475+
// struct UsedRing {
476+
// flags: u16,
477+
// idx: u16,
478+
// ring: [UsedElement; <queue size>],
479+
// avail_event: u16,
480+
// }
481+
// We calculate offset into `ring` field.
482+
let used_ring_offset = std::mem::size_of::<u16>()
483+
+ std::mem::size_of::<u16>()
484+
+ std::mem::size_of::<UsedElement>() * usize::from(index);
485+
let used_element_address = self.used_ring.unchecked_add(usize_to_u64(used_ring_offset));
486+
let used_element = UsedElement {
487+
id: u32::from(id),
488+
len,
489+
};
490+
491+
mem.write_obj(used_element, used_element_address)
458492
.map_err(QueueError::UsedRing)
459493
}
460494

@@ -488,13 +522,40 @@ impl Queue {
488522
fn set_avail_event<M: GuestMemory>(&mut self, val: u16, mem: &M) {
489523
debug_assert!(self.is_layout_valid(mem));
490524

525+
// Used ring has layout:
526+
// struct UsedRing {
527+
// flags: u16,
528+
// idx: u16,
529+
// ring: [UsedElement; <queue size>],
530+
// avail_event: u16,
531+
// }
532+
// We calculate offset into `avail_event` field.
533+
let avail_event_offset = std::mem::size_of::<u16>()
534+
+ std::mem::size_of::<u16>()
535+
+ std::mem::size_of::<UsedElement>() * usize::from(self.actual_size());
491536
let avail_event_addr = self
492537
.used_ring
493-
.unchecked_add(u64::from(4 + 8 * self.actual_size()));
538+
.unchecked_add(usize_to_u64(avail_event_offset));
494539

495540
mem.write_obj(val, avail_event_addr).unwrap();
496541
}
497542

543+
fn set_next_used<M: GuestMemory>(&mut self, val: u16, mem: &M) {
544+
debug_assert!(self.is_layout_valid(mem));
545+
546+
// Used ring has layout:
547+
// struct UsedRing {
548+
// flags: u16,
549+
// idx: u16,
550+
// ring: [UsedElement; <queue size>],
551+
// avail_event: u16,
552+
// }
553+
// We calculate offset into `idx` field.
554+
let idx_offset = std::mem::size_of::<u16>();
555+
let next_used_addr = self.used_ring.unchecked_add(usize_to_u64(idx_offset));
556+
mem.write_obj(val, next_used_addr).unwrap();
557+
}
558+
498559
/// Try to enable notification events from the guest driver. Returns true if notifications were
499560
/// successfully enabled. Otherwise it means that one or more descriptors can still be consumed
500561
/// from the available ring and we can't guarantee that there will be a notification. In this

0 commit comments

Comments
 (0)