Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ include = [
backtrace = "0.3"
log = "0.4"
thiserror = "1.0"
presser = { version = "0.3" }
# Only needed for vulkan. Disable all default features as good practice,
# such as the ability to link/load a Vulkan library.
ash = { version = ">=0.34, <=0.37", optional = true, default-features = false, features = ["debug"] }
Expand Down
86 changes: 35 additions & 51 deletions examples/vulkan-visualization/imgui_renderer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -309,7 +309,7 @@ impl ImGuiRenderer {
let image_view = unsafe { device.create_image_view(&view_create_info, None) }?;

// Create upload buffer
let (upload_buffer, upload_buffer_memory) = {
let (upload_buffer, mut upload_buffer_memory) = {
let create_info = vk::BufferCreateInfo::builder()
.size((font_atlas.width * font_atlas.height * 4) as u64)
.usage(vk::BufferUsageFlags::TRANSFER_SRC);
Expand Down Expand Up @@ -338,14 +338,10 @@ impl ImGuiRenderer {
};

// Copy font data to upload buffer
let dst = upload_buffer_memory.mapped_ptr().unwrap().cast().as_ptr();
unsafe {
std::ptr::copy_nonoverlapping(
font_atlas.data.as_ptr(),
dst,
(font_atlas.width * font_atlas.height * 4) as usize,
);
}
let copy_record =
presser::copy_from_slice_to_offset(font_atlas.data, &mut upload_buffer_memory, 0)
.unwrap();
assert_eq!(copy_record.copy_start_offset, 0);

// Copy upload buffer to image
record_and_submit_command_buffer(
Expand Down Expand Up @@ -633,13 +629,9 @@ impl ImGuiRenderer {
],
};

unsafe {
std::ptr::copy_nonoverlapping(
&cbuffer_data,
self.cb_allocation.mapped_ptr().unwrap().cast().as_ptr(),
1,
)
};
let copy_record =
presser::copy_to_offset(&cbuffer_data, &mut self.cb_allocation, 0).unwrap();
assert_eq!(copy_record.copy_start_offset, 0);
}

let render_pass_begin_info = vk::RenderPassBeginInfo::builder()
Expand Down Expand Up @@ -716,49 +708,41 @@ impl ImGuiRenderer {
let mut ib_offset = 0;

for draw_list in imgui_draw_data.draw_lists() {
unsafe {
device.cmd_bind_vertex_buffers(
cmd,
0,
&[self.vertex_buffer],
&[vb_offset as u64 * std::mem::size_of::<imgui::DrawVert>() as u64],
)
};
unsafe {
device.cmd_bind_index_buffer(
cmd,
self.index_buffer,
ib_offset as u64 * std::mem::size_of::<imgui::DrawIdx>() as u64,
vk::IndexType::UINT16,
)
};

{
let vertices = draw_list.vtx_buffer();
let dst_ptr = self
.vb_allocation
.mapped_ptr()
.unwrap()
.cast::<imgui::DrawVert>()
.as_ptr();
let dst_ptr = unsafe { dst_ptr.offset(vb_offset) };
let copy_record = presser::copy_from_slice_to_offset(
vertices,
&mut self.vb_allocation,
vb_offset,
)
.unwrap();
vb_offset = copy_record.copy_end_offset_padded;

unsafe {
std::ptr::copy_nonoverlapping(vertices.as_ptr(), dst_ptr, vertices.len())
device.cmd_bind_vertex_buffers(
cmd,
0,
&[self.vertex_buffer],
&[copy_record.copy_start_offset as _],
)
};
vb_offset += vertices.len() as isize;
}

{
let indices = draw_list.idx_buffer();
let dst_ptr = self
.ib_allocation
.mapped_ptr()
.unwrap()
.cast::<imgui::DrawIdx>()
.as_ptr();
let dst_ptr = unsafe { dst_ptr.offset(ib_offset) };
unsafe { std::ptr::copy_nonoverlapping(indices.as_ptr(), dst_ptr, indices.len()) };
ib_offset += indices.len() as isize;
let copy_record =
presser::copy_from_slice_to_offset(indices, &mut self.ib_allocation, ib_offset)
.unwrap();
ib_offset = copy_record.copy_end_offset_padded;

unsafe {
device.cmd_bind_index_buffer(
cmd,
self.index_buffer,
copy_record.copy_start_offset as _,
vk::IndexType::UINT16,
)
};
}

for command in draw_list.commands() {
Expand Down
166 changes: 166 additions & 0 deletions src/vulkan/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,90 @@ pub struct AllocatorCreateDesc {
pub buffer_device_address: bool,
}

/// A piece of allocated memory.
///
/// Could be contained in its own individual underlying memory object or as a sub-region
/// of a larger allocation.
///
/// # Copying data into a CPU-mapped [`Allocation`]
///
/// You'll very likely want to copy data into CPU-mapped [`Allocation`]s in order to send that data to the GPU.
/// Doing this data transfer correctly without invoking undefined behavior can be quite fraught and non-obvious<sup>[\[1\]]</sup>.
///
/// To help you do this correctly, [`Allocation`] implements [`presser::Slab`], which means you can directly
/// pass it in to many of `presser`'s [helper functions] (for example, [`copy_from_slice_to_offset`]).
///
/// In most cases, this will work perfectly. However, note that if you try to use an [`Allocation`] as a
/// [`Slab`] and it is not valid to do so (if it is not CPU-mapped or if its `size > isize::MAX`),
/// you will cause a panic. If you aren't sure about these conditions, you may use [`Allocation::try_as_mapped_slab`].
///
/// ## Example
///
/// Say we've created an [`Allocation`] called `my_allocation`, which is CPU-mapped.
/// ```ignore
/// let mut my_allocation: Allocation = my_allocator.allocate(...)?;
/// ```
///
/// And we want to fill it with some data in the form of a `my_gpu_data: Vec<MyGpuVector>`, defined as such:
///
/// ```ignore
/// // note that this is size(12) but align(16), thus we have 4 padding bytes.
/// // this would mean a `&[MyGpuVector]` is invalid to cast as a `&[u8]`, but
/// // we can still use `presser` to copy it directly in a valid manner.
/// #[repr(C, align(16))]
/// #[derive(Clone, Copy)]
/// struct MyGpuVertex {
/// x: f32,
/// y: f32,
/// z: f32,
/// }
///
/// let my_gpu_data: Vec<MyGpuData> = make_vertex_data();
/// ```
///
/// Depending on how the data we're copying will be used, the vulkan device may have a minimum
/// alignment requirement for that data:
///
/// ```ignore
/// let min_gpu_align = my_vulkan_device_specifications.min_alignment_thing;
/// ```
///
/// Finally, we can use [`presser::copy_from_slice_to_offset_with_align`] to perform the copy,
/// simply passing `&mut my_allocation` since [`Allocation`] implements [`Slab`].
///
/// ```ignore
/// let copy_record = presser::copy_from_slice_to_offset_with_align(
/// &my_gpu_data[..], // a slice containing all elements of my_gpu_data
/// &mut my_allocation, // our Allocation
/// 0, // start as close to the beginning of the allocation as possible
/// min_gpu_align, // the minimum alignment we queried previously
/// )?;
/// ```
///
/// It's important to note that the data may not have actually been copied starting at the requested
/// `start_offset` (0 in the example above) depending on the alignment of the underlying allocation
/// as well as the alignment requirements of `MyGpuVector` and the `min_gpu_align` we passed in. Thus,
/// we can query the `copy_record` for the actual starting offset:
///
/// ```ignore
/// let actual_data_start_offset = copy_record.copy_start_offset;
/// ```
///
/// ## Safety
///
/// It is technically not fully safe to use an [`Allocation`] as a [`presser::Slab`] because we can't validate that the
/// GPU is not using the data in the buffer while `self` is borrowed. However, trying
/// to validate this statically is really hard and the community has basically decided that
/// requiring `unsafe` for functions like this creates too much "unsafe-noise", ultimately making it
/// harder to debug more insidious unsafety that is unrelated to GPU-CPU sync issues.
///
/// So, as would always be the case, you must ensure the GPU
/// is not using the data in `self` for the duration that you hold the returned [`MappedAllocationSlab`].
///
/// [`Slab`]: presser::Slab
/// [`copy_from_slice_to_offset`]: presser::copy_from_slice_to_offset
/// [helper functions]: presser#functions
/// [\[1\]]: presser#motivation
#[derive(Debug)]
pub struct Allocation {
chunk_id: Option<std::num::NonZeroU64>,
Expand All @@ -57,6 +141,39 @@ unsafe impl Send for Allocation {}
unsafe impl Sync for Allocation {}

impl Allocation {
/// Tries to borrow the CPU-mapped memory that backs this allocation as a [`presser::Slab`], which you can then
/// use to safely copy data into the raw, potentially-uninitialized buffer.
/// See [the documentation of Allocation][Allocation#example] for an example of this.
///
/// Returns [`None`] if `self.mapped_ptr()` is `None`, or if `self.size()` is greater than `isize::MAX` because
/// this could lead to undefined behavior.
///
/// Note that [`Allocation`] implements [`Slab`] natively, so you can actually pass this allocation as a [`Slab`]
/// directly. However, if `self` is not actually a valid [`Slab`] (this function would return `None` as described above),
/// then trying to use it as a [`Slab`] will panic.
///
/// # Safety
///
/// See the note about safety in [the documentation of Allocation][Allocation#safety]
///
/// [`Slab`]: presser::Slab
pub fn try_as_mapped_slab(&mut self) -> Option<MappedAllocationSlab<'_>> {
let mapped_ptr = self.mapped_ptr()?.cast().as_ptr();

if self.size > isize::MAX as _ {
return None;
}

// this will always succeed since size is <= isize::MAX which is < usize::MAX
let size = self.size as usize;

Some(MappedAllocationSlab {
_borrowed_alloc: self,
mapped_ptr,
size,
})
}

pub fn chunk_id(&self) -> Option<std::num::NonZeroU64> {
self.chunk_id
}
Expand Down Expand Up @@ -126,6 +243,55 @@ impl Default for Allocation {
}
}

/// A wrapper struct over a borrowed [`Allocation`] that infallibly implements [`presser::Slab`].
///
/// This type should be acquired by calling [`Allocation::try_as_mapped_slab`].
pub struct MappedAllocationSlab<'a> {
_borrowed_alloc: &'a mut Allocation,

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If we don't actually use _borrowed_alloc then we can hold a PhantomData<&'a mut Allocation>. It will give us the same ownership semantics without actually using any memory.

mapped_ptr: *mut u8,
size: usize,
}

// SAFETY: See the safety comment of Allocation::as_mapped_slab above.
unsafe impl<'a> presser::Slab for MappedAllocationSlab<'a> {
fn base_ptr(&self) -> *const u8 {
self.mapped_ptr
}

fn base_ptr_mut(&mut self) -> *mut u8 {
self.mapped_ptr
}

fn size(&self) -> usize {
self.size
}
}

// SAFETY: See the safety comment of Allocation::as_mapped_slab above.
unsafe impl presser::Slab for Allocation {
fn base_ptr(&self) -> *const u8 {
self.mapped_ptr
.expect("tried to use a non-mapped Allocation as a Slab")
.as_ptr()
.cast()
}

fn base_ptr_mut(&mut self) -> *mut u8 {
self.mapped_ptr
.expect("tried to use a non-mapped Allocation as a Slab")
.as_ptr()
.cast()
}

fn size(&self) -> usize {
if self.size > isize::MAX as _ {
panic!("tried to use an Allocation with size > isize::MAX as a Slab")
}
// this will always work if the above passed
self.size as usize
}
}

#[derive(Debug)]
pub(crate) struct MemoryBlock {
pub(crate) device_memory: vk::DeviceMemory,
Expand Down