diff --git a/CHANGELOG.md b/CHANGELOG.md index be9c1ba..56a8325 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -18,6 +18,9 @@ - removed the Completed Work section from `INVENTORY.md` and documented its use - added `Bytes::try_unwrap_owner` to reclaim the owner when uniquely held - simplified `Bytes::try_unwrap_owner` implementation +- added `ByteBuffer` for owning aligned byte allocations +- compile-time assertion that `ALIGN` is a power of two +- added `reserve_total` to `ByteBuffer` for reserving absolute capacity - fixed potential UB in `Bytes::try_unwrap_owner` for custom owners - prevent dangling `data` by dropping references before unwrapping the owner - refined `Bytes::try_unwrap_owner` to cast the data slice to a pointer only diff --git a/src/buffer.rs b/src/buffer.rs new file mode 100644 index 0000000..936c9a8 --- /dev/null +++ b/src/buffer.rs @@ -0,0 +1,154 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * Copyright (c) Jan-Paul Bultmann + * + * This source code is licensed under the MIT license found in the + * LICENSE file in the root directory of this source tree. +*/ + +//! Owned byte buffer with fixed alignment. + +use core::alloc::Layout; +use core::ops::{Deref, DerefMut}; +use core::ptr::{self, NonNull}; + +/// A raw byte buffer with a fixed alignment. +/// +/// `ByteBuffer` owns its allocation and guarantees that the backing +/// memory is aligned to `ALIGN` bytes. +#[derive(Debug)] +pub struct ByteBuffer { + ptr: NonNull, + len: usize, + cap: usize, +} + +unsafe impl Send for ByteBuffer {} +unsafe impl Sync for ByteBuffer {} + +impl ByteBuffer { + const _ASSERT_POWER_OF_TWO: () = assert!(ALIGN.is_power_of_two(), "ALIGN must be power-of-two"); + /// Create an empty buffer. + pub const fn new() -> Self { + Self { + ptr: NonNull::dangling(), + len: 0, + cap: 0, + } + } + + /// Create a buffer with the given capacity. + pub fn with_capacity(cap: usize) -> Self { + if cap == 0 { + return Self::new(); + } + unsafe { + let layout = Layout::from_size_align_unchecked(cap, ALIGN); + let ptr = std::alloc::alloc(layout); + let ptr = NonNull::new(ptr).expect("alloc failed"); + Self { ptr, len: 0, cap } + } + } + + /// Current length of the buffer. + pub fn len(&self) -> usize { + self.len + } + + /// Capacity of the buffer. + pub fn capacity(&self) -> usize { + self.cap + } + + /// Ensure that the buffer can hold at least `total` bytes. + /// + /// Does nothing if the current capacity is already sufficient. + pub fn reserve_total(&mut self, total: usize) { + if total <= self.cap { + return; + } + unsafe { + let old = Layout::from_size_align_unchecked(self.cap.max(1), ALIGN); + let new_layout = Layout::from_size_align_unchecked(total, ALIGN); + let new_ptr = if self.cap == 0 { + std::alloc::alloc(new_layout) + } else { + std::alloc::realloc(self.ptr.as_ptr(), old, total) + }; + let new_ptr = NonNull::new(new_ptr).expect("realloc failed"); + self.ptr = new_ptr; + } + self.cap = total; + } + + #[inline] + fn reserve_more(&mut self, additional: usize) { + let needed = self.len.checked_add(additional).expect("overflow"); + if needed <= self.cap { + return; + } + let new_cap = core::cmp::max(self.cap * 2, needed); + unsafe { + let old = Layout::from_size_align_unchecked(self.cap.max(1), ALIGN); + let new_layout = Layout::from_size_align_unchecked(new_cap, ALIGN); + let new_ptr = if self.cap == 0 { + std::alloc::alloc(new_layout) + } else { + std::alloc::realloc(self.ptr.as_ptr(), old, new_cap) + }; + let new_ptr = NonNull::new(new_ptr).expect("realloc failed"); + self.ptr = new_ptr; + } + self.cap = new_cap; + } + + /// Push a byte to the end of the buffer. + pub fn push(&mut self, byte: u8) { + self.reserve_more(1); + unsafe { + ptr::write(self.ptr.as_ptr().add(self.len), byte); + } + self.len += 1; + } + + /// Returns a raw pointer to the buffer's memory. + pub fn as_ptr(&self) -> *const u8 { + self.ptr.as_ptr() + } +} + +impl Drop for ByteBuffer { + fn drop(&mut self) { + if self.cap != 0 { + unsafe { + let layout = Layout::from_size_align_unchecked(self.cap, ALIGN); + std::alloc::dealloc(self.ptr.as_ptr(), layout); + } + } + } +} + +impl Deref for ByteBuffer { + type Target = [u8]; + fn deref(&self) -> &[u8] { + unsafe { core::slice::from_raw_parts(self.ptr.as_ptr(), self.len) } + } +} + +impl DerefMut for ByteBuffer { + fn deref_mut(&mut self) -> &mut [u8] { + unsafe { core::slice::from_raw_parts_mut(self.ptr.as_ptr(), self.len) } + } +} + +impl AsRef<[u8]> for ByteBuffer { + fn as_ref(&self) -> &[u8] { + self + } +} + +impl AsMut<[u8]> for ByteBuffer { + fn as_mut(&mut self) -> &mut [u8] { + self + } +} diff --git a/src/lib.rs b/src/lib.rs index 4fc8111..2acd048 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -9,6 +9,7 @@ #![doc = include_str!("../README.md")] #![warn(missing_docs)] +pub mod buffer; /// Core byte container types and traits. pub mod bytes; mod sources; @@ -28,6 +29,7 @@ pub mod winnow; #[cfg(test)] mod tests; +pub use crate::buffer::ByteBuffer; pub use crate::bytes::ByteOwner; pub use crate::bytes::ByteSource; pub use crate::bytes::Bytes; diff --git a/src/sources.rs b/src/sources.rs index 68a15f2..eb1c04d 100644 --- a/src/sources.rs +++ b/src/sources.rs @@ -42,7 +42,7 @@ use zerocopy::Immutable; use zerocopy::IntoBytes; #[allow(unused_imports)] -use crate::{bytes::ByteOwner, ByteSource}; +use crate::{buffer::ByteBuffer, bytes::ByteOwner, ByteSource}; #[cfg(feature = "zerocopy")] unsafe impl ByteSource for &'static [T] @@ -133,6 +133,18 @@ unsafe impl ByteSource for Vec { } } +unsafe impl ByteSource for ByteBuffer { + type Owner = Self; + + fn as_bytes(&self) -> &[u8] { + self.as_ref() + } + + fn get_owner(self) -> Self::Owner { + self + } +} + unsafe impl ByteSource for String { type Owner = Self; diff --git a/src/tests.rs b/src/tests.rs index eebf021..f4d8903 100644 --- a/src/tests.rs +++ b/src/tests.rs @@ -302,3 +302,40 @@ fn test_cow_zerocopy_borrowed_source() { zerocopy::IntoBytes::as_bytes(borrowed.as_ref()) ); } + +#[test] +fn test_bytebuffer_push_and_bytes() { + use crate::ByteBuffer; + + let mut buf: ByteBuffer<8> = ByteBuffer::with_capacity(2); + buf.push(1); + buf.push(2); + buf.push(3); + assert_eq!(buf.as_ref(), &[1, 2, 3]); + + let bytes: Bytes = buf.into(); + assert_eq!(bytes.as_ref(), &[1, 2, 3]); +} + +#[test] +fn test_bytebuffer_alignment() { + use crate::ByteBuffer; + + let mut buf: ByteBuffer<64> = ByteBuffer::with_capacity(1); + buf.push(1); + assert_eq!((buf.as_ptr() as usize) % 64, 0); +} + +#[test] +fn test_bytebuffer_reserve_total() { + use crate::ByteBuffer; + + let mut buf: ByteBuffer<8> = ByteBuffer::new(); + buf.reserve_total(10); + assert!(buf.capacity() >= 10); + for _ in 0..10 { + buf.push(1); + } + assert_eq!(buf.len(), 10); + assert!(buf.capacity() >= 10); +}