Skip to content

Commit 25b9b06

Browse files
committed
CAS Pool x86_64: lazily initialized ANCHOR
this reduces the chances of hitting the 32-bit address space issue on x86_64 instead of (always) using a static ANCHOR variable located in .bss we lazily initialize the ANCHOR variable using the value passed to the first `Ptr::new` invocation. In practice, this means the very first `Pool::grow` (on x86_64) call is guaranteed to work (use the given memory). Follow up `grow` invocations are *more likely* to work (but not guaranteed) *if* all given memory comes from the heap. We still need an ANCHOR in .bss as a fallback because it's possible to allocate ZST on a pool without calling `Pool::grow` (= the lazily init ANCHOR is never initialized *but* it can be read)
1 parent 9563b35 commit 25b9b06

File tree

2 files changed

+25
-5
lines changed

2 files changed

+25
-5
lines changed

Cargo.toml

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,9 @@ atomic-polyfill = { version = "0.1.2", optional = true }
3535
[dependencies]
3636
hash32 = "0.2.1"
3737

38+
[target.'cfg(target_arch = "x86_64")'.dependencies]
39+
spin = "0.9.2"
40+
3841
[dependencies.serde]
3942
version = "1"
4043
optional = true

src/pool/cas.rs

Lines changed: 22 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -87,9 +87,26 @@ impl<T> Stack<T> {
8787
}
8888

8989
#[cfg(target_arch = "x86_64")]
90-
fn anchor<T>() -> *mut T {
91-
static mut ANCHOR: u8 = 0;
92-
(unsafe { &mut ANCHOR } as *mut u8 as usize & !(core::mem::align_of::<T>() - 1)) as *mut T
90+
fn anchor<T>(init: Option<*mut T>) -> *mut T {
91+
use core::sync::atomic::AtomicU8;
92+
93+
use spin::Once;
94+
95+
static LAZY_ANCHOR: Once<usize> = Once::new();
96+
97+
let likely_unaligned_address = if let Some(init) = init {
98+
*LAZY_ANCHOR.call_once(|| init as usize)
99+
} else {
100+
LAZY_ANCHOR.get().copied().unwrap_or_else(|| {
101+
// we may hit this branch with Pool of ZSTs where `grow` does not need to be called
102+
static BSS_ANCHOR: AtomicU8 = AtomicU8::new(0);
103+
&BSS_ANCHOR as *const _ as usize
104+
})
105+
};
106+
107+
let alignment_mask = !(core::mem::align_of::<T>() - 1);
108+
let well_aligned_address = likely_unaligned_address & alignment_mask;
109+
well_aligned_address as *mut T
93110
}
94111

95112
/// On x86_64, anchored pointer. This is a (signed) 32-bit offset from `anchor` plus a 32-bit tag
@@ -116,7 +133,7 @@ impl<T> Ptr<T> {
116133
pub fn new(p: *mut T) -> Option<Self> {
117134
use core::convert::TryFrom;
118135

119-
i32::try_from((p as isize).wrapping_sub(anchor::<T>() as isize))
136+
i32::try_from((p as isize).wrapping_sub(anchor::<T>(Some(p)) as isize))
120137
.ok()
121138
.map(|offset| unsafe { Ptr::from_parts(initial_tag_value(), offset) })
122139
}
@@ -166,7 +183,7 @@ impl<T> Ptr<T> {
166183
fn as_raw(&self) -> NonNull<T> {
167184
unsafe {
168185
NonNull::new_unchecked(
169-
(anchor::<T>() as *mut u8).offset(self.offset() as isize) as *mut T
186+
(anchor::<T>(None) as *mut u8).offset(self.offset() as isize) as *mut T,
170187
)
171188
}
172189
}

0 commit comments

Comments
 (0)