Skip to content

Commit 9c69c18

Browse files
committed
adjust cache aligned alloc to detect at runtime
1 parent c811bf9 commit 9c69c18

File tree

1 file changed

+39
-6
lines changed

1 file changed

+39
-6
lines changed

src/queue_alloc_helpers.rs

Lines changed: 39 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@ cfg_if::cfg_if! {
1313
}
1414

1515
use crate::{YCQueue, YCQueueError, YCQueueSharedMeta};
16+
use std::mem::size_of;
1617
use std::sync::atomic::{AtomicU16, AtomicU64};
1718
use yep_cache_line_size::{CacheLevel, CacheType, get_cache_line_size};
1819

@@ -27,26 +28,48 @@ fn cache_line_size() -> usize {
2728

2829
/// Cache-line padded atomic wrapper to keep heavily contended values isolated for owned metadata.
2930
#[derive(Debug)]
30-
#[repr(align(64))]
31-
pub struct CachePaddedAtomicU64(AtomicU64);
31+
pub struct CachePaddedAtomicU64 {
32+
storage: Box<[AtomicU64]>,
33+
aligned_index: usize,
34+
}
3235

3336
impl CachePaddedAtomicU64 {
34-
pub(crate) const fn new(value: u64) -> Self {
35-
CachePaddedAtomicU64(AtomicU64::new(value))
37+
pub(crate) fn new(value: u64) -> Self {
38+
let line_size = cache_line_size();
39+
let atoms_per_line = (line_size / size_of::<u64>()).max(1);
40+
41+
let mut storage = Vec::with_capacity(atoms_per_line);
42+
for _ in 0..atoms_per_line {
43+
storage.push(AtomicU64::new(value));
44+
}
45+
let storage = storage.into_boxed_slice();
46+
47+
let aligned_index = (0..storage.len())
48+
.find(|i| (unsafe { storage.as_ptr().add(*i) } as usize).is_multiple_of(line_size))
49+
.unwrap_or(0);
50+
51+
debug_assert!(
52+
(unsafe { storage.as_ptr().add(aligned_index) } as usize).is_multiple_of(line_size)
53+
);
54+
55+
CachePaddedAtomicU64 {
56+
storage,
57+
aligned_index,
58+
}
3659
}
3760
}
3861

3962
impl std::ops::Deref for CachePaddedAtomicU64 {
4063
type Target = AtomicU64;
4164

4265
fn deref(&self) -> &Self::Target {
43-
&self.0
66+
&self.storage[self.aligned_index]
4467
}
4568
}
4669

4770
impl std::ops::DerefMut for CachePaddedAtomicU64 {
4871
fn deref_mut(&mut self) -> &mut Self::Target {
49-
&mut self.0
72+
&mut self.storage[self.aligned_index]
5073
}
5174
}
5275

@@ -336,6 +359,16 @@ mod queue_alloc_helpers_tests {
336359
use super::*;
337360
use std::sync::atomic::Ordering;
338361

362+
#[test]
363+
fn cache_padded_atomic_is_aligned() {
364+
let atomic = CachePaddedAtomicU64::new(7);
365+
let ptr = (&*atomic as *const AtomicU64) as usize;
366+
let line_size = cache_line_size();
367+
368+
assert_eq!(ptr % line_size, 0);
369+
assert_eq!(atomic.load(Ordering::Relaxed), 7);
370+
}
371+
339372
#[test]
340373
fn test_shared_meta() {
341374
let slot_count: u16 = 128;

0 commit comments

Comments
 (0)