Skip to content
Open
Show file tree
Hide file tree
Changes from 20 commits
Commits
Show all changes
45 commits
Select commit Hold shift + click to select a range
fb12863
WIP
tianleq Jul 25, 2025
9ac22bd
minor
tianleq Jul 25, 2025
04fee38
Merge branch 'master' of github.com:tianleq/mmtk-core into concurrent…
tianleq Jul 28, 2025
100c049
minor
tianleq Jul 28, 2025
2bbb200
Add ref/finalizer packets for final pause. Use log instead of println.
qinsoon Jul 31, 2025
90f4518
schedule_concurrent_packets before resuming mutators
qinsoon Jul 31, 2025
521adea
eBPF tracing tools for concurrent Immix
wks Jul 31, 2025
4f14b2f
Wake up workers immediately for concurrent work
wks Jul 31, 2025
865e24c
Fix clippy warnings and formatting
wks Jul 31, 2025
7b13272
Move concurrent_marking_active to the plan. Add a flag allocate_as_live
qinsoon Aug 1, 2025
1db4743
Rename load_reference to load_weak_reference
wks Aug 1, 2025
5c38704
Replace `swap` with `replace` and `take`
wks Aug 1, 2025
a95e94a
Remove schedule_concurrent_collection
qinsoon Aug 5, 2025
eacc959
Rename gc_pause_start. Merge gc_pause_end with end_of_gc
qinsoon Aug 5, 2025
028bd0e
Merge branch 'master' into concurrent-immix
qinsoon Aug 5, 2025
fe29529
Disallow new weak reference before ref enqueue
qinsoon Aug 6, 2025
26c2a54
Introduce ConcurrentPlan. Make ConcurrentTraceObjects trace objects in
qinsoon Aug 13, 2025
80024dd
Merge branch 'master' into concurrent-immix
qinsoon Aug 13, 2025
7487c1b
Fix rayon-core version for MSRV
qinsoon Aug 13, 2025
6bb03c0
More assertions and minor fix.
qinsoon Aug 14, 2025
ad41d7e
Wrong assertions
qinsoon Aug 14, 2025
a938110
Fix style check
qinsoon Aug 25, 2025
321c6e2
Refactor log bits
qinsoon Aug 8, 2025
c9315b9
Merge branch 'master' into concurrent-immix
qinsoon Aug 25, 2025
12112d2
Fix style check
qinsoon Aug 25, 2025
77330fe
Use Concurrent bucket for concurrent work
qinsoon Aug 22, 2025
74572eb
Merge branch 'master' into concurrent-immix
qinsoon Aug 26, 2025
bdcf723
Re-enable weak ref buckets. Calculate allocated pages using used pages.
qinsoon Aug 26, 2025
1368ac5
Remove NUM_CONCURRENT_TRACING_PACKETS. Fix issues about enabling
qinsoon Aug 26, 2025
f906db3
Dont use TRACE_KIND_FAST in ConcurrentTraceObjects
qinsoon Aug 26, 2025
4e5c772
Fix docs
qinsoon Aug 26, 2025
47708b7
Allow defrag for STW full heap collection
qinsoon Aug 26, 2025
35cf25a
Put generated concurrent work to the concurrent bucket. Don't need pr…
qinsoon Aug 26, 2025
2653716
Properly call post_scan_object in ConcurrentTraceObject
qinsoon Aug 26, 2025
9cdcb7a
Remove the use of Pause in StopMutators
qinsoon Aug 26, 2025
d43c9e5
Use normal StopMutators for initial marking
qinsoon Aug 27, 2025
e3163d8
Remove Collection::set_concurrent_marking_state. Introduce active for
qinsoon Aug 27, 2025
4966131
Remove the GCCause type
qinsoon Aug 27, 2025
c70c03f
Cleanup
qinsoon Aug 27, 2025
c79fe8e
Fix style check
qinsoon Aug 27, 2025
a23bfbb
Fix style check
qinsoon Aug 27, 2025
3e076a9
Fix style check
qinsoon Aug 28, 2025
553227a
Remove object_reference_clone_pre
wks Aug 28, 2025
8ad9ed8
Remove the gc_cause field.
wks Aug 28, 2025
acea83a
Postpone full GC after FinalMark
wks Aug 28, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,7 @@ portable-atomic = "1.4.3"
probe = "0.5"
regex = "1.7.0"
rustversion = "1.0"
rayon-core = "=1.12.1" # We can remove this dependency when we use MSRV 1.80+
spin = "0.9.5"
static_assertions = "1.1.0"
strum = "0.27.1"
Expand Down
2 changes: 2 additions & 0 deletions src/global_state.rs
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,7 @@ pub struct GlobalState {
pub(crate) malloc_bytes: AtomicUsize,
/// This stores the live bytes and the used bytes (by pages) for each space in last GC. This counter is only updated in the GC release phase.
pub(crate) live_bytes_in_last_gc: AtomicRefCell<HashMap<&'static str, LiveBytesStats>>,
pub(crate) concurrent_marking_threshold: AtomicUsize,
}

impl GlobalState {
Expand Down Expand Up @@ -206,6 +207,7 @@ impl Default for GlobalState {
#[cfg(feature = "malloc_counted_size")]
malloc_bytes: AtomicUsize::new(0),
live_bytes_in_last_gc: AtomicRefCell::new(HashMap::new()),
concurrent_marking_threshold: AtomicUsize::new(0),
}
}
}
Expand Down
8 changes: 8 additions & 0 deletions src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,8 @@ extern crate static_assertions;
extern crate probe;

mod mmtk;
use std::sync::atomic::AtomicUsize;

pub use mmtk::MMTKBuilder;
pub(crate) use mmtk::MMAPPER;
pub use mmtk::MMTK;
Expand All @@ -51,3 +53,9 @@ pub mod vm;
pub use crate::plan::{
AllocationSemantics, BarrierSelector, Mutator, MutatorContext, ObjectQueue, Plan,
};

static NUM_CONCURRENT_TRACING_PACKETS: AtomicUsize = AtomicUsize::new(0);

fn concurrent_marking_packets_drained() -> bool {
crate::NUM_CONCURRENT_TRACING_PACKETS.load(std::sync::atomic::Ordering::SeqCst) == 0
}
99 changes: 99 additions & 0 deletions src/plan/barriers.rs
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ pub enum BarrierSelector {
NoBarrier,
/// Object remembering barrier is used.
ObjectBarrier,
SATBBarrier,
}

impl BarrierSelector {
Expand All @@ -45,6 +46,19 @@ impl BarrierSelector {
pub trait Barrier<VM: VMBinding>: 'static + Send + Downcast {
fn flush(&mut self) {}

/// Weak reference loading barrier. A mutator should call this when loading from a weak
/// reference field, for example, when executing `java.lang.ref.Reference.get()` in JVM, or
/// loading from a global weak table in CRuby.
///
/// Note: Merely loading from a field holding weak reference into a local variable will create a
/// strong reference from the stack to the referent, changing its reachablilty from weakly
/// reachable to strongly reachable. Concurrent garbage collectors may need to handle such
/// events specially. See [SATBBarrier::load_weak_reference] for a concrete example.
///
/// Arguments:
/// * `referent`: The referent object which the weak reference is pointing to.
fn load_weak_reference(&mut self, _referent: ObjectReference) {}

/// Subsuming barrier for object reference write
fn object_reference_write(
&mut self,
Expand Down Expand Up @@ -92,6 +106,8 @@ pub trait Barrier<VM: VMBinding>: 'static + Send + Downcast {
self.memory_region_copy_post(src, dst);
}

fn object_reference_clone_pre(&mut self, _obj: ObjectReference) {}

/// Full pre-barrier for array copy
fn memory_region_copy_pre(&mut self, _src: VM::VMMemorySlice, _dst: VM::VMMemorySlice) {}

Expand Down Expand Up @@ -159,6 +175,10 @@ pub trait BarrierSemantics: 'static + Send {

/// Object will probably be modified
fn object_probable_write_slow(&mut self, _obj: ObjectReference) {}

fn load_weak_reference(&mut self, _o: ObjectReference) {}

fn object_reference_clone_pre(&mut self, _obj: ObjectReference) {}
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This method only has an blank implementation in BarrierSemantics::object_reference_clone_pre, and SATBBarrier::object_reference_clone_pre calls that. What is this method intended for?

}

/// Generic object barrier with a type argument defining it's slow-path behaviour.
Expand Down Expand Up @@ -250,3 +270,82 @@ impl<S: BarrierSemantics> Barrier<S::VM> for ObjectBarrier<S> {
}
}
}

pub struct SATBBarrier<S: BarrierSemantics> {
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This seems to be a pre-write ObjectBarrier. The current ObjectBarrier only implements the post-write functions, and this implementation implements the pre-write functions.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I don't plan to do this refactoring in this PR.

semantics: S,
}

impl<S: BarrierSemantics> SATBBarrier<S> {
pub fn new(semantics: S) -> Self {
Self { semantics }
}
fn object_is_unlogged(&self, object: ObjectReference) -> bool {
// unsafe { S::UNLOG_BIT_SPEC.load::<S::VM, u8>(object, None) != 0 }
S::UNLOG_BIT_SPEC.load_atomic::<S::VM, u8>(object, None, Ordering::SeqCst) != 0
}
}

impl<S: BarrierSemantics> Barrier<S::VM> for SATBBarrier<S> {
fn flush(&mut self) {
self.semantics.flush();
}

fn load_weak_reference(&mut self, o: ObjectReference) {
self.semantics.load_weak_reference(o)
}

fn object_reference_clone_pre(&mut self, obj: ObjectReference) {
self.semantics.object_reference_clone_pre(obj);
}

fn object_probable_write(&mut self, obj: ObjectReference) {
self.semantics.object_probable_write_slow(obj);
}

fn object_reference_write_pre(
&mut self,
src: ObjectReference,
slot: <S::VM as VMBinding>::VMSlot,
target: Option<ObjectReference>,
) {
if self.object_is_unlogged(src) {
self.semantics
.object_reference_write_slow(src, slot, target);
}
}

fn object_reference_write_post(
&mut self,
_src: ObjectReference,
_slot: <S::VM as VMBinding>::VMSlot,
_target: Option<ObjectReference>,
) {
unimplemented!()
}

fn object_reference_write_slow(
&mut self,
src: ObjectReference,
slot: <S::VM as VMBinding>::VMSlot,
target: Option<ObjectReference>,
) {
self.semantics
.object_reference_write_slow(src, slot, target);
}

fn memory_region_copy_pre(
&mut self,
src: <S::VM as VMBinding>::VMMemorySlice,
dst: <S::VM as VMBinding>::VMMemorySlice,
) {
self.semantics.memory_region_copy_slow(src, dst);
}

fn memory_region_copy_post(
&mut self,
_src: <S::VM as VMBinding>::VMMemorySlice,
_dst: <S::VM as VMBinding>::VMMemorySlice,
) {
unimplemented!()
}
}
152 changes: 152 additions & 0 deletions src/plan/concurrent/barrier.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,152 @@
use std::sync::atomic::Ordering;

use super::{concurrent_marking_work::ProcessModBufSATB, Pause};
use crate::plan::global::PlanTraceObject;
use crate::{
plan::{barriers::BarrierSemantics, concurrent::global::ConcurrentPlan, VectorQueue},
scheduler::WorkBucketStage,
util::ObjectReference,
vm::{
slot::{MemorySlice, Slot},
VMBinding,
},
MMTK,
};

pub struct SATBBarrierSemantics<VM: VMBinding, P: ConcurrentPlan<VM = VM> + PlanTraceObject<VM>> {
mmtk: &'static MMTK<VM>,
satb: VectorQueue<ObjectReference>,
refs: VectorQueue<ObjectReference>,
plan: &'static P,
}

impl<VM: VMBinding, P: ConcurrentPlan<VM = VM> + PlanTraceObject<VM>> SATBBarrierSemantics<VM, P> {
pub fn new(mmtk: &'static MMTK<VM>) -> Self {
Self {
mmtk,
satb: VectorQueue::default(),
refs: VectorQueue::default(),
plan: mmtk.get_plan().downcast_ref::<P>().unwrap(),
}
}

fn slow(&mut self, _src: Option<ObjectReference>, _slot: VM::VMSlot, old: ObjectReference) {
self.satb.push(old);
if self.satb.is_full() {
self.flush_satb();
}
}

fn enqueue_node(
&mut self,
src: Option<ObjectReference>,
slot: VM::VMSlot,
_new: Option<ObjectReference>,
) -> bool {
if let Some(old) = slot.load() {
self.slow(src, slot, old);
}
true
}

/// Attempt to atomically log an object.
/// Returns true if the object is not logged previously.
fn log_object(&self, object: ObjectReference) -> bool {
Self::UNLOG_BIT_SPEC.store_atomic::<VM, u8>(object, 0, None, Ordering::SeqCst);
true
}

fn flush_satb(&mut self) {
if !self.satb.is_empty() {
if self.should_create_satb_packets() {
let satb = self.satb.take();
let bucket = if self.plan.concurrent_work_in_progress() {
WorkBucketStage::Unconstrained
} else {
debug_assert_eq!(self.plan.current_pause(), Some(Pause::FinalMark));
WorkBucketStage::Closure
};
self.mmtk.scheduler.work_buckets[bucket].add(ProcessModBufSATB::<VM, P>::new(satb));
} else {
let _ = self.satb.take();
};
}
}

#[cold]
fn flush_weak_refs(&mut self) {
if !self.refs.is_empty() {
// debug_assert!(self.should_create_satb_packets());
let nodes = self.refs.take();
let bucket = if self.plan.concurrent_work_in_progress() {
WorkBucketStage::Unconstrained
} else {
debug_assert_eq!(self.plan.current_pause(), Some(Pause::FinalMark));
WorkBucketStage::Closure
};
self.mmtk.scheduler.work_buckets[bucket].add(ProcessModBufSATB::<VM, P>::new(nodes));
}
}

fn should_create_satb_packets(&self) -> bool {
self.plan.concurrent_work_in_progress()
|| self.plan.current_pause() == Some(Pause::FinalMark)
}
}

impl<VM: VMBinding, P: ConcurrentPlan<VM = VM> + PlanTraceObject<VM>> BarrierSemantics
for SATBBarrierSemantics<VM, P>
{
type VM = VM;

#[cold]
fn flush(&mut self) {
self.flush_satb();
self.flush_weak_refs();
}

fn object_reference_write_slow(
&mut self,
src: ObjectReference,
_slot: <Self::VM as VMBinding>::VMSlot,
_target: Option<ObjectReference>,
) {
self.object_probable_write_slow(src);
self.log_object(src);
}

fn memory_region_copy_slow(
&mut self,
_src: <Self::VM as VMBinding>::VMMemorySlice,
dst: <Self::VM as VMBinding>::VMMemorySlice,
) {
for s in dst.iter_slots() {
self.enqueue_node(None, s, None);
}
}

/// Enqueue the referent during concurrent marking.
///
/// Note: During concurrent marking, a collector based on snapshot-at-the-beginning (SATB) will
/// not reach objects that were weakly reachable at the time of `InitialMark`. But if a mutator
/// loads from a weak reference field during concurrent marking, it will make the referent
/// strongly reachable, yet the referent is still not part of the SATB. We must conservatively
/// enqueue the referent even though its reachability has not yet been established, otherwise it
/// (and its children) may be treated as garbage if it happened to be weakly reachable at the
/// time of `InitialMark`.
fn load_weak_reference(&mut self, o: ObjectReference) {
if !self.plan.concurrent_work_in_progress() {
return;
}
self.refs.push(o);
if self.refs.is_full() {
self.flush_weak_refs();
}
}

fn object_probable_write_slow(&mut self, obj: ObjectReference) {
obj.iterate_fields::<VM, _>(|s| {
self.enqueue_node(Some(obj), s, None);
});
}
}
Loading
Loading