|
| 1 | +//! The implementation of [::zerogc::CollectorContext] that is |
| 2 | +//! shared among both thread-safe and thread-unsafe code. |
| 3 | +
|
| 4 | +#[cfg(feature = "sync")] |
| 5 | +mod sync; |
| 6 | +#[cfg(not(feature = "sync"))] |
| 7 | +mod simple; |
| 8 | +#[cfg(feature = "sync")] |
| 9 | +pub use self::sync::*; |
| 10 | +#[cfg(not(feature = "sync"))] |
| 11 | +pub use self::simple::*; |
| 12 | + |
| 13 | +use zerogc::prelude::*; |
| 14 | +use super::{SimpleCollector, RawSimpleCollector, DynTrace}; |
| 15 | +use std::mem::ManuallyDrop; |
| 16 | +use std::ptr::NonNull; |
| 17 | + |
| 18 | + |
| 19 | +#[derive(Copy, Clone, Debug, Eq, PartialEq)] |
| 20 | +pub enum ContextState { |
| 21 | + /// The context is active. |
| 22 | + /// |
| 23 | + /// Its contents are potentially being mutated, |
| 24 | + /// so the `shadow_stack` doesn't necessarily |
| 25 | + /// reflect the actual set of thread roots. |
| 26 | + /// |
| 27 | + /// New objects could be allocated that are not |
| 28 | + /// actually being tracked in the `shadow_stack`. |
| 29 | + Active, |
| 30 | + /// The context is waiting at a safepoint |
| 31 | + /// for a collection to complete. |
| 32 | + /// |
| 33 | + /// The mutating thread is blocked for the |
| 34 | + /// duration of the safepoint (until collection completes). |
| 35 | + /// |
| 36 | + /// Therefore, its `shadow_stack` is guarenteed to reflect |
| 37 | + /// the actual set of thread roots. |
| 38 | + SafePoint { |
| 39 | + /// The id of the collection we are waiting for |
| 40 | + collection_id: u64 |
| 41 | + }, |
| 42 | + /// The context is frozen. |
| 43 | + /// Allocation or mutation can't happen |
| 44 | + /// but the mutator thread isn't actually blocked. |
| 45 | + /// |
| 46 | + /// Unlike a safepoint, this is explicitly unfrozen at the |
| 47 | + /// user's discretion. |
| 48 | + /// |
| 49 | + /// Because no allocation or mutation can happen, |
| 50 | + /// its shadow_stack stack is guarenteed to |
| 51 | + /// accurately reflect the roots of the context. |
| 52 | + #[cfg_attr(not(feature = "sync"), allow(unused))] // TODO: Implement frozen for simple contexts? |
| 53 | + Frozen, |
| 54 | +} |
| 55 | +impl ContextState { |
| 56 | + #[cfg_attr(not(feature = "sync"), allow(unused))] // TODO: Implement frozen for simple contexts? |
| 57 | + fn is_frozen(&self) -> bool { |
| 58 | + matches!(*self, ContextState::Frozen) |
| 59 | + } |
| 60 | +} |
| 61 | + |
| 62 | +/* |
| 63 | + * These form a stack of contexts, |
| 64 | + * which all share owns a pointer to the RawContext, |
| 65 | + * The raw context is implicitly bound to a single thread |
| 66 | + * and manages the state of all the contexts. |
| 67 | + * |
| 68 | + * https://llvm.org/docs/GarbageCollection.html#the-shadow-stack-gc |
| 69 | + * Essentially these objects maintain a shadow stack |
| 70 | + * |
| 71 | + * The pointer to the RawContext must be Arc, since the |
| 72 | + * collector maintains a weak reference to it. |
| 73 | + * I use double indirection with a `Rc` because I want |
| 74 | + * `recurse_context` to avoid the cost of atomic operations. |
| 75 | + * |
| 76 | + * SimpleCollectorContexts mirror the application stack. |
| 77 | + * They can be stack allocated inside `recurse_context`. |
| 78 | + * All we would need to do is internally track ownership of the original |
| 79 | + * context. The sub-collector in `recurse_context` is very clearly |
| 80 | + * restricted to the lifetime of the closure |
| 81 | + * which is a subset of the parent's lifetime. |
| 82 | + * |
| 83 | + * We still couldn't be Send, since we use interior mutablity |
| 84 | + * inside of RawContext that is not thread-safe. |
| 85 | + */ |
| 86 | +pub struct SimpleCollectorContext { |
| 87 | + raw: *mut RawContext, |
| 88 | + /// Whether we are the root context |
| 89 | + /// |
| 90 | + /// Only the root actually owns the `Arc` |
| 91 | + /// and is responsible for dropping it |
| 92 | + root: bool |
| 93 | +} |
| 94 | +impl SimpleCollectorContext { |
| 95 | + #[cfg(not(feature = "sync"))] |
| 96 | + pub(crate) unsafe fn from_collector(collector: &SimpleCollector) -> Self { |
| 97 | + SimpleCollectorContext { |
| 98 | + raw: Box::into_raw(ManuallyDrop::into_inner( |
| 99 | + RawContext::from_collector(collector.0.clone()) |
| 100 | + )), |
| 101 | + root: true // We are the exclusive owner |
| 102 | + } |
| 103 | + } |
| 104 | + #[cfg(feature = "sync")] |
| 105 | + pub(crate) unsafe fn register_root(collector: &SimpleCollector) -> Self { |
| 106 | + SimpleCollectorContext { |
| 107 | + raw: Box::into_raw(ManuallyDrop::into_inner( |
| 108 | + RawContext::register_new(&collector.0) |
| 109 | + )), |
| 110 | + root: true, // We are responsible for unregistering |
| 111 | + } |
| 112 | + } |
| 113 | + #[inline] |
| 114 | + pub(crate) fn collector(&self) -> &RawSimpleCollector { |
| 115 | + unsafe { &(*self.raw).collector } |
| 116 | + } |
| 117 | + #[inline(always)] |
| 118 | + unsafe fn with_shadow_stack<R, T: Trace>( |
| 119 | + &self, value: *mut &mut T, func: impl FnOnce() -> R |
| 120 | + ) -> R { |
| 121 | + let old_link = (*(*self.raw).shadow_stack.get()).last; |
| 122 | + let new_link = ShadowStackLink { |
| 123 | + element: NonNull::new_unchecked( |
| 124 | + std::mem::transmute::< |
| 125 | + *mut dyn DynTrace, |
| 126 | + *mut (dyn DynTrace + 'static) |
| 127 | + >(value as *mut dyn DynTrace) |
| 128 | + ), |
| 129 | + prev: old_link |
| 130 | + }; |
| 131 | + (*(*self.raw).shadow_stack.get()).last = &new_link; |
| 132 | + let result = func(); |
| 133 | + debug_assert_eq!( |
| 134 | + (*(*self.raw).shadow_stack.get()).last, |
| 135 | + &new_link |
| 136 | + ); |
| 137 | + (*(*self.raw).shadow_stack.get()).last = new_link.prev; |
| 138 | + result |
| 139 | + } |
| 140 | + #[cold] |
| 141 | + unsafe fn trigger_basic_safepoint<T: Trace>(&self, element: &mut &mut T) { |
| 142 | + self.with_shadow_stack(element, || { |
| 143 | + (*self.raw).trigger_safepoint(); |
| 144 | + }) |
| 145 | + } |
| 146 | +} |
| 147 | +impl Drop for SimpleCollectorContext { |
| 148 | + #[inline] |
| 149 | + fn drop(&mut self) { |
| 150 | + if self.root { |
| 151 | + unsafe { |
| 152 | + self.collector().free_context(self.raw); |
| 153 | + } |
| 154 | + } |
| 155 | + } |
| 156 | +} |
| 157 | +unsafe impl GcContext for SimpleCollectorContext { |
| 158 | + type System = SimpleCollector; |
| 159 | + |
| 160 | + #[inline] |
| 161 | + unsafe fn basic_safepoint<T: Trace>(&mut self, value: &mut &mut T) { |
| 162 | + debug_assert_eq!((*self.raw).state.get(), ContextState::Active); |
| 163 | + if (*self.raw).collector.should_collect() { |
| 164 | + self.trigger_basic_safepoint(value); |
| 165 | + } |
| 166 | + debug_assert_eq!((*self.raw).state.get(), ContextState::Active); |
| 167 | + } |
| 168 | + |
| 169 | + unsafe fn freeze(&mut self) { |
| 170 | + (*self.raw).collector.manager.freeze_context(&*self.raw); |
| 171 | + } |
| 172 | + |
| 173 | + unsafe fn unfreeze(&mut self) { |
| 174 | + (*self.raw).collector.manager.unfreeze_context(&*self.raw); |
| 175 | + } |
| 176 | + |
| 177 | + #[inline] |
| 178 | + unsafe fn recurse_context<T, F, R>(&self, value: &mut &mut T, func: F) -> R |
| 179 | + where T: Trace, F: for<'gc> FnOnce(&'gc mut Self, &'gc mut T) -> R { |
| 180 | + debug_assert_eq!((*self.raw).state.get(), ContextState::Active); |
| 181 | + self.with_shadow_stack(value, || { |
| 182 | + let mut sub_context = ManuallyDrop::new(SimpleCollectorContext { |
| 183 | + /* |
| 184 | + * safe to copy because we wont drop it |
| 185 | + * Lifetime is guarenteed to be restricted to |
| 186 | + * the closure. |
| 187 | + */ |
| 188 | + raw: self.raw, |
| 189 | + root: false /* don't drop our pointer!!! */ |
| 190 | + }); |
| 191 | + let result = func(&mut *sub_context, value); |
| 192 | + debug_assert!(!sub_context.root); |
| 193 | + // No need to run drop code on context..... |
| 194 | + std::mem::forget(sub_context); |
| 195 | + debug_assert_eq!((*self.raw).state.get(), ContextState::Active); |
| 196 | + result |
| 197 | + }) |
| 198 | + } |
| 199 | +} |
| 200 | + |
| 201 | +/// It's not safe for a context to be sent across threads. |
| 202 | +/// |
| 203 | +/// We use (thread-unsafe) interior mutability to maintain the |
| 204 | +/// shadow stack. Since we could potentially be cloned via `safepoint_recurse!`, |
| 205 | +/// implementing `Send` would allow another thread to obtain a |
| 206 | +/// reference to our internal `&RefCell`. Further mutation/access |
| 207 | +/// would be undefined..... |
| 208 | +impl !Send for SimpleCollectorContext {} |
| 209 | + |
| 210 | +// |
| 211 | +// Root tracking |
| 212 | +// |
| 213 | + |
| 214 | +#[repr(C)] |
| 215 | +#[derive(Debug)] |
| 216 | +pub(crate) struct ShadowStackLink { |
| 217 | + pub element: NonNull<dyn DynTrace>, |
| 218 | + /// The previous link in the chain, |
| 219 | + /// or NULL if there isn't any |
| 220 | + pub prev: *const ShadowStackLink |
| 221 | +} |
| 222 | + |
| 223 | +#[derive(Clone, Debug)] |
| 224 | +pub struct ShadowStack { |
| 225 | + /// The last element in the shadow stack, |
| 226 | + /// or NULL if it's empty |
| 227 | + pub(crate) last: *const ShadowStackLink |
| 228 | +} |
| 229 | +impl ShadowStack { |
| 230 | + unsafe fn as_vec(&self) -> Vec<*mut dyn DynTrace> { |
| 231 | + let mut result: Vec<_> = self.reverse_iter().collect(); |
| 232 | + result.reverse(); |
| 233 | + result |
| 234 | + } |
| 235 | + #[inline] |
| 236 | + pub(crate) unsafe fn reverse_iter(&self) -> impl Iterator<Item=*mut dyn DynTrace> + '_ { |
| 237 | + std::iter::successors( |
| 238 | + self.last.as_ref(), |
| 239 | + |link| link.prev.as_ref() |
| 240 | + ).map(|link| link.element.as_ptr()) |
| 241 | + } |
| 242 | +} |
0 commit comments