Skip to content
Open
24 changes: 24 additions & 0 deletions .github/workflows/nostd.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
on:
push:
branches: [main]
pull_request:
name: no-std
jobs:
nostd:
runs-on: ubuntu-latest
name: ${{ matrix.target }}
strategy:
matrix:
target: [thumbv7m-none-eabi, aarch64-unknown-none]
steps:
- uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
target: ${{ matrix.target }}
- uses: actions/checkout@v2
- name: cargo check
uses: actions-rs/cargo@v1
with:
command: check
args: --target ${{ matrix.target }} --no-default-features
4 changes: 4 additions & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -14,5 +14,9 @@ categories = ["concurrency"]
[dependencies]
slab = "0.4"

[features]
default = ["std"]
std = []

[target.'cfg(loom)'.dependencies]
loom = "0.4.0"
29 changes: 16 additions & 13 deletions src/aliasing.rs
Original file line number Diff line number Diff line change
Expand Up @@ -128,9 +128,10 @@
//!
//! But this warrants repeating: **your `D` types for `Aliased` _must_ be private**.

use std::marker::PhantomData;
use std::mem::MaybeUninit;
use std::ops::Deref;
use alloc::{boxed::Box, string::String, vec::Vec};
use core::marker::PhantomData;
use core::mem::MaybeUninit;
use core::ops::Deref;

// Just to make the doc comment linking work.
#[allow(unused_imports)]
Expand Down Expand Up @@ -183,7 +184,7 @@ where
// a) the T is behind a MaybeUninit, and so will cannot be accessed safely; and
// b) we only expose _either_ &T while aliased, or &mut after the aliasing ends.
Aliased {
aliased: std::ptr::read(&self.aliased),
aliased: core::ptr::read(&self.aliased),
drop_behavior: PhantomData,
_no_auto_send: PhantomData,
}
Expand Down Expand Up @@ -211,7 +212,7 @@ where
pub unsafe fn change_drop<D2: DropBehavior>(self) -> Aliased<T, D2> {
Aliased {
// safety:
aliased: std::ptr::read(&self.aliased),
aliased: core::ptr::read(&self.aliased),
drop_behavior: PhantomData,
_no_auto_send: PhantomData,
}
Expand Down Expand Up @@ -247,7 +248,7 @@ where
// That T has not been dropped (getting a Aliased<T, DoDrop> is unsafe).
// T is no longer aliased (by the safety assumption of getting a Aliased<T, DoDrop>),
// so we are allowed to re-take ownership of the T.
unsafe { std::ptr::drop_in_place(self.aliased.as_mut_ptr()) }
unsafe { core::ptr::drop_in_place(self.aliased.as_mut_ptr()) }
}
}
}
Expand Down Expand Up @@ -276,7 +277,7 @@ where
}
}

use std::hash::{Hash, Hasher};
use core::hash::{Hash, Hasher};
impl<T, D> Hash for Aliased<T, D>
where
D: DropBehavior,
Expand All @@ -290,7 +291,7 @@ where
}
}

use std::fmt;
use core::fmt;
impl<T, D> fmt::Debug for Aliased<T, D>
where
D: DropBehavior,
Expand Down Expand Up @@ -323,7 +324,7 @@ where
D: DropBehavior,
T: PartialOrd,
{
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
fn partial_cmp(&self, other: &Self) -> Option<core::cmp::Ordering> {
self.as_ref().partial_cmp(other.as_ref())
}

Expand All @@ -349,12 +350,12 @@ where
D: DropBehavior,
T: Ord,
{
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
fn cmp(&self, other: &Self) -> core::cmp::Ordering {
self.as_ref().cmp(other.as_ref())
}
}

use std::borrow::Borrow;
use core::borrow::Borrow;
impl<T, D> Borrow<T> for Aliased<T, D>
where
D: DropBehavior,
Expand Down Expand Up @@ -385,6 +386,8 @@ where
self.as_ref()
}
}

#[cfg(feature = "std")]
impl<D> Borrow<std::path::Path> for Aliased<std::path::PathBuf, D>
where
D: DropBehavior,
Expand All @@ -410,7 +413,7 @@ where
self.as_ref()
}
}
impl<T, D> Borrow<T> for Aliased<std::sync::Arc<T>, D>
impl<T, D> Borrow<T> for Aliased<alloc::sync::Arc<T>, D>
where
T: ?Sized,
D: DropBehavior,
Expand All @@ -419,7 +422,7 @@ where
self.as_ref()
}
}
impl<T, D> Borrow<T> for Aliased<std::rc::Rc<T>, D>
impl<T, D> Borrow<T> for Aliased<alloc::rc::Rc<T>, D>
where
T: ?Sized,
D: DropBehavior,
Expand Down
227 changes: 227 additions & 0 deletions src/handle_list.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,227 @@
use core::fmt::{Debug, Formatter};

use crate::sync::{Arc, AtomicPtr, AtomicUsize, Ordering};
use alloc::boxed::Box;

// TODO
// * For now I'm just using Ordering::SeqCst, because I havent really looked into what exactly we
// need for the Ordering, so this should probably be made more accurate in the Future

/// A Lock-Free List of Handles
pub struct HandleList {
inner: Arc<InnerList>,
}

struct InnerList {
// The Head of the List
head: AtomicPtr<ListEntry>,
}

/// A Snapshot of the HandleList
///
/// Iterating over this Snapshot only yields the Entries that were present when this Snapshot was taken
pub struct ListSnapshot {
// The Head-Ptr at the time of creation
head: *const ListEntry,

// This entry exists to make sure that we keep the inner List alive and it wont be freed from under us
_list: Arc<InnerList>,
}

/// An Iterator over the Entries in a Snapshot
pub struct SnapshotIter {
// A Pointer to the next Entry that will be yielded
current: *const ListEntry,
}

struct ListEntry {
data: Arc<AtomicUsize>,
// We can use a normal Ptr here because we never append or remove Entries and only add new Entries
// by changing the Head, so we never modify this Ptr and therefore dont need an AtomicPtr
next: *const Self,
}

impl HandleList {
/// Creates a new empty HandleList
pub fn new() -> Self {
Self {
inner: Arc::new(InnerList {
head: AtomicPtr::new(core::ptr::null_mut()),
}),
}
}

/// Adds a new Entry to the List and returns the Counter for the Entry
pub fn new_entry(&self) -> Arc<AtomicUsize> {
let count = Arc::new(AtomicUsize::new(0));

self.add_counter(count.clone());
count
}
fn add_counter(&self, count: Arc<AtomicUsize>) {
let n_node = Box::new(ListEntry {
data: count,
next: core::ptr::null(),
});
let n_node_ptr = Box::into_raw(n_node);

let mut current_head = self.inner.head.load(Ordering::SeqCst);
loop {
// Safety
// This is save, because we have not stored the Ptr elsewhere so we have exclusive
// access.
// The Ptr is also still valid, because we never free Entries on the List
unsafe { (*n_node_ptr).next = current_head };

// Attempt to add the Entry to the List by setting it as the new Head
match self.inner.head.compare_exchange(
current_head,
n_node_ptr,
Ordering::SeqCst,
Ordering::SeqCst,
) {
Ok(_) => return,
Err(n_head) => {
// Store the found Head-Ptr to avoid an extra load at the start of every loop
current_head = n_head;
}
}
}
}

/// Creates a new Snapshot of the List at this Point in Time
pub fn snapshot(&self) -> ListSnapshot {
ListSnapshot {
head: self.inner.head.load(Ordering::SeqCst),
_list: self.inner.clone(),
}
}

/// Inserts the Items of the Iterator, but in reverse order
#[cfg(test)]
pub fn extend<I>(&self, iter: I)
where
I: IntoIterator<Item = Arc<AtomicUsize>>,
{
for item in iter.into_iter() {
self.add_counter(item);
}
}
}

impl Default for HandleList {
fn default() -> Self {
Self::new()
}
}
impl Debug for HandleList {
fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
// TODO
// Figure out how exactly we want the Debug output to look
write!(f, "HandleList")
}
}
impl Clone for HandleList {
fn clone(&self) -> Self {
Self {
inner: Arc::clone(&self.inner),
}
}
}

impl ListSnapshot {
/// Obtain an iterator over the Entries in this Snapshot
pub fn iter(&self) -> SnapshotIter {
SnapshotIter { current: self.head }
}
}

impl Iterator for SnapshotIter {
// TODO
// Maybe don't return an owned Value here
type Item = Arc<AtomicUsize>;

fn next(&mut self) -> Option<Self::Item> {
if self.current.is_null() {
return None;
}

// Safety
// The Ptr is not null, because of the previous if-statement.
// The Data is also not freed, because we never free Entries on the List.
// We also have no one mutating Entries on the List and therefore we can access this without
// any extra synchronization needed.
Copy link
Owner

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This will no longer be true once we have removal of individual elements.

let entry = unsafe { &*self.current };

self.current = entry.next;

Some(entry.data.clone())
}
}

impl Drop for InnerList {
fn drop(&mut self) {
// We iterate over all the Entries of the List and free every Entry of the List
let mut current = self.head.load(Ordering::SeqCst);
while !current.is_null() {
// # Safety
// This is safe, because we only enter the loop body if the Pointer is not null and we
// also know that the Entry is not yet freed because we only free them once we are dropped
// and because we are now in Drop, noone before us has freed any Entry on the List
let current_r = unsafe { &*current };

let next = current_r.next as *mut ListEntry;

// # Safety
// This is safe, because of the same garantuees detailed above for `current_r`
let entry = unsafe { Box::from_raw(current) };
drop(entry);

current = next;
}
}
}

#[cfg(test)]
mod tests {
use super::*;

#[test]
fn create_list() {
let list = HandleList::new();
drop(list);
}

#[test]
fn empty_snapshot() {
let list = HandleList::new();

let snapshot = list.snapshot();

// Assert that the Iterator over the Snapshot is empty
assert_eq!(0, snapshot.iter().count());
}

#[test]
fn snapshots_and_entries() {
let list = HandleList::new();

let empty_snapshot = list.snapshot();
assert_eq!(0, empty_snapshot.iter().count());

let entry = list.new_entry();
entry.store(1, Ordering::SeqCst);

// Make sure that the Snapshot we got before adding a new Entry is still empty
assert_eq!(0, empty_snapshot.iter().count());

let second_snapshot = list.snapshot();
assert_eq!(1, second_snapshot.iter().count());

let snapshot_entry = second_snapshot.iter().next().unwrap();
assert_eq!(
entry.load(Ordering::SeqCst),
snapshot_entry.load(Ordering::SeqCst)
);
}
}
Loading