Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 8 additions & 0 deletions src/vmm/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -61,5 +61,13 @@ tracing = ["log-instrument"]
name = "cpu_templates"
harness = false

[[bench]]
name = "queue"
harness = false

[[bench]]
name = "block_request"
harness = false

[lints]
workspace = true
45 changes: 45 additions & 0 deletions src/vmm/benches/block_request.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
// Copyright 2024 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Benchmarking cases:
// * `Queue.pop`
// * `Queue.add_used`
// * `DescriptorChain.next_descriptor`

use criterion::{criterion_group, criterion_main, Criterion};
use vm_memory::GuestAddress;
use vmm::devices::virtio::block::virtio::test_utils::RequestDescriptorChain;
use vmm::devices::virtio::block::virtio::{Request, RequestHeader, VIRTIO_BLK_T_IN};
use vmm::devices::virtio::test_utils::VirtQueue;
use vmm::utilities::test_utils::single_region_mem;

pub fn block_request_benchmark(c: &mut Criterion) {
let mem = single_region_mem(2 * 65562);
let virt_queue = VirtQueue::new(GuestAddress(0), &mem, 16);

// We don't really care about what request is. We just
// need it to be valid.
let chain = RequestDescriptorChain::new(&virt_queue);
let request_header = RequestHeader::new(VIRTIO_BLK_T_IN, 99);
chain.set_header(request_header);

let mut queue = virt_queue.create_queue();
let desc = queue.pop(&mem).unwrap();

c.bench_function("request_parse", |b| {
b.iter(|| {
let desc = std::hint::black_box(&desc);
_ = Request::parse(desc, &mem, 1024);
})
});
}

criterion_group! {
name = block_request_benches;
config = Criterion::default().sample_size(200).noise_threshold(0.05);
targets = block_request_benchmark
}

criterion_main! {
block_request_benches
}
187 changes: 187 additions & 0 deletions src/vmm/benches/queue.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,187 @@
// Copyright 2024 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Benchmarking cases:
// * `Queue.pop`
// * `Queue.add_used`
// * `DescriptorChain.next_descriptor`

use std::num::Wrapping;

use criterion::{criterion_group, criterion_main, Criterion};
use vm_memory::GuestAddress;
use vmm::devices::virtio::queue::{VIRTQ_DESC_F_NEXT, VIRTQ_DESC_F_WRITE};
use vmm::devices::virtio::test_utils::VirtQueue;
use vmm::utilities::test_utils::single_region_mem;

/// Create one chain with n descriptors
/// Descriptor buffers will leave at the offset of 2048 bytes
/// to leave some room for queue objects.
/// We don't really care about sizes of descriptors,
/// so pick 1024.
fn set_dtable_one_chain(rxq: &VirtQueue, n: usize) {
let desc_size = 1024;
for i in 0..n {
rxq.dtable[i].set(
(2048 + desc_size * i) as u64,
desc_size as u32,
VIRTQ_DESC_F_WRITE | VIRTQ_DESC_F_NEXT,
(i + 1) as u16,
);
}
rxq.dtable[n - 1].flags.set(VIRTQ_DESC_F_WRITE);
rxq.dtable[n - 1].next.set(0);
rxq.avail.ring[0].set(0);
rxq.avail.idx.set(n as u16);
}

/// Create n chains with 1 descriptors each
/// Descriptor buffers will leave at the offset of 2048 bytes
/// to leave some room for queue objects.
/// We don't really care about sizes of descriptors,
/// so pick 1024.
fn set_dtable_many_chains(rxq: &VirtQueue, n: usize) {
let desc_size = 1024;
for i in 0..n {
rxq.dtable[i].set(
(2048 + desc_size * i) as u64,
desc_size as u32,
VIRTQ_DESC_F_WRITE,
0,
);
rxq.avail.ring[i].set(i as u16);
}
rxq.avail.idx.set(n as u16);
}

pub fn queue_benchmark(c: &mut Criterion) {
let mem = single_region_mem(65562);
let rxq = VirtQueue::new(GuestAddress(0), &mem, 256);
let mut queue = rxq.create_queue();

set_dtable_one_chain(&rxq, 1);
queue.next_avail = Wrapping(0);
let desc = queue.pop(&mem).unwrap();
c.bench_function("next_descriptor_1", |b| {
b.iter(|| {
let mut head = Some(desc.clone());
while let Some(d) = head {
head = std::hint::black_box(d.next_descriptor());
}
})
});

set_dtable_one_chain(&rxq, 2);
queue.next_avail = Wrapping(0);
let desc = queue.pop(&mem).unwrap();
c.bench_function("next_descriptor_2", |b| {
b.iter(|| {
let mut head = Some(desc.clone());
while let Some(d) = head {
head = std::hint::black_box(d.next_descriptor());
}
})
});

set_dtable_one_chain(&rxq, 4);
queue.next_avail = Wrapping(0);
let desc = queue.pop(&mem).unwrap();
c.bench_function("next_descriptor_4", |b| {
b.iter(|| {
let mut head = Some(desc.clone());
while let Some(d) = head {
head = std::hint::black_box(d.next_descriptor());
}
})
});

set_dtable_one_chain(&rxq, 16);
queue.next_avail = Wrapping(0);
let desc = queue.pop(&mem).unwrap();
c.bench_function("next_descriptor_16", |b| {
b.iter(|| {
let mut head = Some(desc.clone());
while let Some(d) = head {
head = std::hint::black_box(d.next_descriptor());
}
})
});

// Queue pop

set_dtable_many_chains(&rxq, 1);
c.bench_function("queue_pop_1", |b| {
b.iter(|| {
queue.next_avail = Wrapping(0);
while let Some(desc) = queue.pop(&mem) {
std::hint::black_box(desc);
}
})
});

set_dtable_many_chains(&rxq, 4);
c.bench_function("queue_pop_4", |b| {
b.iter(|| {
queue.next_avail = Wrapping(0);
while let Some(desc) = queue.pop(&mem) {
std::hint::black_box(desc);
}
})
});

set_dtable_many_chains(&rxq, 16);
c.bench_function("queue_pop_16", |b| {
b.iter(|| {
queue.next_avail = Wrapping(0);
while let Some(desc) = queue.pop(&mem) {
std::hint::black_box(desc);
}
})
});

c.bench_function("queue_add_used_1", |b| {
b.iter(|| {
queue.num_added = Wrapping(0);
queue.next_used = Wrapping(0);
for i in 0_u16..1_u16 {
let index = std::hint::black_box(i);
let len = std::hint::black_box(i + 1);
_ = queue.add_used(&mem, index as u16, len as u32);
}
})
});

c.bench_function("queue_add_used_16", |b| {
b.iter(|| {
queue.num_added = Wrapping(0);
queue.next_used = Wrapping(0);
for i in 0_u16..16_u16 {
let index = std::hint::black_box(i);
let len = std::hint::black_box(i + 1);
_ = queue.add_used(&mem, index as u16, len as u32);
}
})
});

c.bench_function("queue_add_used_256", |b| {
b.iter(|| {
queue.num_added = Wrapping(0);
queue.next_used = Wrapping(0);
for i in 0_u16..256_u16 {
let index = std::hint::black_box(i);
let len = std::hint::black_box(i + 1);
_ = queue.add_used(&mem, index as u16, len as u32);
}
})
});
}

criterion_group! {
name = queue_benches;
config = Criterion::default().sample_size(200).noise_threshold(0.05);
targets = queue_benchmark
}

criterion_main! {
queue_benches
}
16 changes: 8 additions & 8 deletions src/vmm/src/devices/virtio/queue.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,8 @@ use crate::vstate::memory::{
Address, ByteValued, Bytes, GuestAddress, GuestMemory, GuestMemoryMmap,
};

pub(super) const VIRTQ_DESC_F_NEXT: u16 = 0x1;
pub(super) const VIRTQ_DESC_F_WRITE: u16 = 0x2;
pub const VIRTQ_DESC_F_NEXT: u16 = 0x1;
pub const VIRTQ_DESC_F_WRITE: u16 = 0x2;

/// Max size of virtio queues offered by firecracker's virtio devices.
pub(super) const FIRECRACKER_MAX_QUEUE_SIZE: u16 = 256;
Expand Down Expand Up @@ -69,7 +69,7 @@ struct UsedElement {
unsafe impl ByteValued for UsedElement {}

/// A virtio descriptor chain.
#[derive(Debug)]
#[derive(Debug, Copy, Clone)]
pub struct DescriptorChain<'a, M: GuestMemory = GuestMemoryMmap> {
desc_table: GuestAddress,
queue_size: u16,
Expand Down Expand Up @@ -205,7 +205,7 @@ impl<'a> Iterator for DescriptorIterator<'a> {
/// A virtio queue's parameters.
pub struct Queue {
/// The maximal size in elements offered by the device
pub(crate) max_size: u16,
pub max_size: u16,

/// The queue size in elements the driver selected
pub size: u16,
Expand All @@ -222,13 +222,13 @@ pub struct Queue {
/// Guest physical address of the used ring
pub used_ring: GuestAddress,

pub(crate) next_avail: Wrapping<u16>,
pub(crate) next_used: Wrapping<u16>,
pub next_avail: Wrapping<u16>,
pub next_used: Wrapping<u16>,

/// VIRTIO_F_RING_EVENT_IDX negotiated (notification suppression enabled)
pub(crate) uses_notif_suppression: bool,
pub uses_notif_suppression: bool,
/// The number of added used buffers since last guest kick
pub(crate) num_added: Wrapping<u16>,
pub num_added: Wrapping<u16>,
}

#[allow(clippy::len_without_is_empty)]
Expand Down
Loading