diff --git a/docs/userguide/src/tutorial/code/mygc_semispace/global.rs b/docs/userguide/src/tutorial/code/mygc_semispace/global.rs index 51f9f9361b..aefd86cebe 100644 --- a/docs/userguide/src/tutorial/code/mygc_semispace/global.rs +++ b/docs/userguide/src/tutorial/code/mygc_semispace/global.rs @@ -181,9 +181,9 @@ impl MyGC { let res = MyGC { hi: AtomicBool::new(false), // ANCHOR: copyspace_new - copyspace0: CopySpace::new(plan_args.get_space_args("copyspace0", true, false, VMRequest::discontiguous()), false), + copyspace0: CopySpace::new(plan_args.get_normal_space_args("copyspace0", true, false, VMRequest::discontiguous()), false), // ANCHOR_END: copyspace_new - copyspace1: CopySpace::new(plan_args.get_space_args("copyspace1", true, false, VMRequest::discontiguous()), true), + copyspace1: CopySpace::new(plan_args.get_normal_space_args("copyspace1", true, false, VMRequest::discontiguous()), true), common: CommonPlan::new(plan_args), }; diff --git a/src/plan/compressor/global.rs b/src/plan/compressor/global.rs index 882ca51418..62a2a84657 100644 --- a/src/plan/compressor/global.rs +++ b/src/plan/compressor/global.rs @@ -183,7 +183,7 @@ impl Compressor { }; let res = Compressor { - compressor_space: CompressorSpace::new(plan_args.get_space_args( + compressor_space: CompressorSpace::new(plan_args.get_normal_space_args( "compressor_space", true, false, diff --git a/src/plan/generational/copying/global.rs b/src/plan/generational/copying/global.rs index b23a81c998..66540ffb97 100644 --- a/src/plan/generational/copying/global.rs +++ b/src/plan/generational/copying/global.rs @@ -110,6 +110,9 @@ impl Plan for GenCopy { let full_heap = !self.gen.is_current_gc_nursery(); self.gen.release(tls); if full_heap { + if VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.is_on_side() { + self.fromspace().clear_side_log_bits(); + } self.fromspace().release(); } } @@ -209,11 +212,11 @@ impl GenCopy { }; let copyspace0 = CopySpace::new( - plan_args.get_space_args("copyspace0", true, false, VMRequest::discontiguous()), + plan_args.get_mature_space_args("copyspace0", true, false, VMRequest::discontiguous()), false, ); let copyspace1 = CopySpace::new( - plan_args.get_space_args("copyspace1", true, false, VMRequest::discontiguous()), + plan_args.get_mature_space_args("copyspace1", true, false, VMRequest::discontiguous()), true, ); diff --git a/src/plan/generational/global.rs b/src/plan/generational/global.rs index 0404fc93bb..54e94a6334 100644 --- a/src/plan/generational/global.rs +++ b/src/plan/generational/global.rs @@ -41,7 +41,7 @@ pub struct CommonGenPlan { impl CommonGenPlan { pub fn new(mut args: CreateSpecificPlanArgs) -> Self { let nursery = CopySpace::new( - args.get_space_args("nursery", true, false, VMRequest::discontiguous()), + args.get_nursery_space_args("nursery", true, false, VMRequest::discontiguous()), true, ); let full_heap_gc_count = args diff --git a/src/plan/generational/immix/global.rs b/src/plan/generational/immix/global.rs index ff76d5ac55..41e7a70768 100644 --- a/src/plan/generational/immix/global.rs +++ b/src/plan/generational/immix/global.rs @@ -129,6 +129,9 @@ impl Plan for GenImmix { let full_heap = !self.gen.is_current_gc_nursery(); self.gen.prepare(tls); if full_heap { + if VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.is_on_side() { + self.immix_space.clear_side_log_bits(); + } self.immix_space.prepare( full_heap, Some(crate::policy::immix::defrag::StatsForDefrag::new(self)), @@ -247,12 +250,14 @@ impl GenImmix { crate::plan::generational::new_generational_global_metadata_specs::(), }; let immix_space = ImmixSpace::new( - plan_args.get_space_args("immix_mature", true, false, VMRequest::discontiguous()), + plan_args.get_mature_space_args( + "immix_mature", + true, + false, + VMRequest::discontiguous(), + ), ImmixSpaceArgs { - // We need to unlog objects at tracing time since we currently clear all log bits during a major GC - unlog_object_when_traced: true, // In GenImmix, young objects are not allocated in ImmixSpace directly. - #[cfg(feature = "vo_bit")] mixed_age: false, never_move_objects: false, }, diff --git a/src/plan/generational/mod.rs b/src/plan/generational/mod.rs index 15b63abb33..dc5c747063 100644 --- a/src/plan/generational/mod.rs +++ b/src/plan/generational/mod.rs @@ -45,6 +45,7 @@ pub const FULL_NURSERY_GC: bool = false; pub const GEN_CONSTRAINTS: PlanConstraints = PlanConstraints { moves_objects: true, needs_log_bit: ACTIVE_BARRIER.equals(BarrierSelector::ObjectBarrier), + generational: true, barrier: ACTIVE_BARRIER, // We may trace duplicate edges in sticky immix (or any plan that uses object remembering barrier). See https://github.com/mmtk/mmtk-core/issues/743. may_trace_duplicate_edges: ACTIVE_BARRIER.equals(BarrierSelector::ObjectBarrier), diff --git a/src/plan/global.rs b/src/plan/global.rs index 8b6865f42c..481a4063a1 100644 --- a/src/plan/global.rs +++ b/src/plan/global.rs @@ -404,11 +404,13 @@ pub struct CreateSpecificPlanArgs<'a, VM: VMBinding> { impl CreateSpecificPlanArgs<'_, VM> { /// Get a PlanCreateSpaceArgs that can be used to create a space - pub fn get_space_args( + pub fn _get_space_args( &mut self, name: &'static str, zeroed: bool, permission_exec: bool, + unlog_allocated_object: bool, + unlog_traced_object: bool, vmrequest: VMRequest, ) -> PlanCreateSpaceArgs<'_, VM> { PlanCreateSpaceArgs { @@ -416,6 +418,8 @@ impl CreateSpecificPlanArgs<'_, VM> { zeroed, permission_exec, vmrequest, + unlog_allocated_object, + unlog_traced_object, global_side_metadata_specs: self.global_side_metadata_specs.clone(), vm_map: self.global_args.vm_map, mmapper: self.global_args.mmapper, @@ -427,39 +431,121 @@ impl CreateSpecificPlanArgs<'_, VM> { global_state: self.global_args.state.clone(), } } + + // The following are some convenience methods for common presets. + // These are not an exhaustive list -- it is just common presets that are used by most plans. + + /// Get a preset for a nursery space (where young objects are located). + pub fn get_nursery_space_args( + &mut self, + name: &'static str, + zeroed: bool, + permission_exec: bool, + vmrequest: VMRequest, + ) -> PlanCreateSpaceArgs<'_, VM> { + // Objects are allocatd as young, and when traced, they stay young. If they are copied out of the nursery space, they will be moved to a mature space, + // and log bits will be set in that case by the mature space. + self._get_space_args(name, zeroed, permission_exec, false, false, vmrequest) + } + + /// Get a preset for a mature space (where mature objects are located). + pub fn get_mature_space_args( + &mut self, + name: &'static str, + zeroed: bool, + permission_exec: bool, + vmrequest: VMRequest, + ) -> PlanCreateSpaceArgs<'_, VM> { + // Objects are allocated as mature (pre-tenured), and when traced, they stay mature. + // If an object gets copied into a mature space, the object is also mature, + self._get_space_args(name, zeroed, permission_exec, true, true, vmrequest) + } + + // Get a preset for a mixed age space (where both young and mature objects are located). + pub fn get_mixed_age_space_args( + &mut self, + name: &'static str, + zeroed: bool, + permission_exec: bool, + vmrequest: VMRequest, + ) -> PlanCreateSpaceArgs<'_, VM> { + // Objects are allocated as young, and when traced, they become mature objects. + self._get_space_args(name, zeroed, permission_exec, false, true, vmrequest) + } + + /// Get a preset for spaces in a non-generational plan. + pub fn get_normal_space_args( + &mut self, + name: &'static str, + zeroed: bool, + permission_exec: bool, + vmrequest: VMRequest, + ) -> PlanCreateSpaceArgs<'_, VM> { + // Non generational plan: we do not use any of the flags about log bits. + self._get_space_args(name, zeroed, permission_exec, false, false, vmrequest) + } + + /// Get a preset for spaces in [`crate::plan::global::CommonPlan`]. + /// Spaces like LOS which may include both young and mature objects should not use this method. + pub fn get_common_space_args( + &mut self, + generational: bool, + name: &'static str, + ) -> PlanCreateSpaceArgs<'_, VM> { + self.get_base_space_args( + generational, + name, + false, // Common spaces are not executable. + ) + } + + /// Get a preset for spaces in [`crate::plan::global::BasePlan`]. + pub fn get_base_space_args( + &mut self, + generational: bool, + name: &'static str, + permission_exec: bool, + ) -> PlanCreateSpaceArgs<'_, VM> { + if generational { + // In generational plans, common/base spaces behave like a mature space: + // * the objects in these spaces are not traced in a nursery GC + // * the log bits for the objects are maintained exactly the same as a mature space. + // Thus we consider them as mature spaces. + self.get_mature_space_args(name, true, permission_exec, VMRequest::discontiguous()) + } else { + self.get_normal_space_args(name, true, permission_exec, VMRequest::discontiguous()) + } + } } impl BasePlan { #[allow(unused_mut)] // 'args' only needs to be mutable for certain features pub fn new(mut args: CreateSpecificPlanArgs) -> BasePlan { + let _generational = args.constraints.generational; BasePlan { #[cfg(feature = "code_space")] - code_space: ImmortalSpace::new(args.get_space_args( + code_space: ImmortalSpace::new(args.get_base_space_args( + _generational, "code_space", true, - true, - VMRequest::discontiguous(), )), #[cfg(feature = "code_space")] - code_lo_space: ImmortalSpace::new(args.get_space_args( + code_lo_space: ImmortalSpace::new(args.get_base_space_args( + _generational, "code_lo_space", true, - true, - VMRequest::discontiguous(), )), #[cfg(feature = "ro_space")] - ro_space: ImmortalSpace::new(args.get_space_args( + ro_space: ImmortalSpace::new(args.get_base_space_args( + _generational, "ro_space", - true, false, - VMRequest::discontiguous(), )), #[cfg(feature = "vm_space")] - vm_space: VMSpace::new(args.get_space_args( + vm_space: VMSpace::new(args.get_base_space_args( + _generational, "vm_space", - false, false, // it doesn't matter -- we are not mmapping for VM space. - VMRequest::discontiguous(), )), global_state: args.global_args.state.clone(), @@ -517,6 +603,28 @@ impl BasePlan { self.vm_space.release(); } + pub fn clear_side_log_bits(&self) { + #[cfg(feature = "code_space")] + self.code_space.clear_side_log_bits(); + #[cfg(feature = "code_space")] + self.code_lo_space.clear_side_log_bits(); + #[cfg(feature = "ro_space")] + self.ro_space.clear_side_log_bits(); + #[cfg(feature = "vm_space")] + self.vm_space.clear_side_log_bits(); + } + + pub fn set_side_log_bits(&self) { + #[cfg(feature = "code_space")] + self.code_space.set_side_log_bits(); + #[cfg(feature = "code_space")] + self.code_lo_space.set_side_log_bits(); + #[cfg(feature = "ro_space")] + self.ro_space.set_side_log_bits(); + #[cfg(feature = "vm_space")] + self.vm_space.set_side_log_bits(); + } + pub fn end_of_gc(&mut self, _tls: VMWorkerThread) { // Do nothing here. None of the spaces needs end_of_gc. } @@ -584,16 +692,19 @@ pub struct CommonPlan { impl CommonPlan { pub fn new(mut args: CreateSpecificPlanArgs) -> CommonPlan { + let needs_log_bit = args.constraints.needs_log_bit; + let generational = args.constraints.generational; CommonPlan { - immortal: ImmortalSpace::new(args.get_space_args( - "immortal", - true, - false, - VMRequest::discontiguous(), - )), + immortal: ImmortalSpace::new(args.get_common_space_args(generational, "immortal")), los: LargeObjectSpace::new( - args.get_space_args("los", true, false, VMRequest::discontiguous()), + // LOS is a bit special, as it is a mixed age space. It has a logical nursery. + if generational { + args.get_mixed_age_space_args("los", true, false, VMRequest::discontiguous()) + } else { + args.get_normal_space_args("los", true, false, VMRequest::discontiguous()) + }, false, + needs_log_bit, ), nonmoving: Self::new_nonmoving_space(&mut args), base: BasePlan::new(args), @@ -621,6 +732,18 @@ impl CommonPlan { self.base.release(tls, full_heap) } + pub fn clear_side_log_bits(&self) { + self.immortal.clear_side_log_bits(); + self.los.clear_side_log_bits(); + self.base.clear_side_log_bits(); + } + + pub fn set_side_log_bits(&self) { + self.immortal.set_side_log_bits(); + self.los.set_side_log_bits(); + self.base.set_side_log_bits(); + } + pub fn end_of_gc(&mut self, tls: VMWorkerThread) { self.end_of_gc_nonmoving_space(); self.base.end_of_gc(tls); @@ -639,7 +762,7 @@ impl CommonPlan { } fn new_nonmoving_space(args: &mut CreateSpecificPlanArgs) -> NonMovingSpace { - let space_args = args.get_space_args("nonmoving", true, false, VMRequest::discontiguous()); + let space_args = args.get_common_space_args(args.constraints.generational, "nonmoving"); cfg_if::cfg_if! { if #[cfg(any(feature = "immortal_as_nonmoving", feature = "marksweep_as_nonmoving"))] { NonMovingSpace::new(space_args) @@ -648,8 +771,6 @@ impl CommonPlan { NonMovingSpace::new( space_args, crate::policy::immix::ImmixSpaceArgs { - unlog_object_when_traced: false, - #[cfg(feature = "vo_bit")] mixed_age: false, never_move_objects: true, }, diff --git a/src/plan/immix/global.rs b/src/plan/immix/global.rs index d8b5f40935..136db58c43 100644 --- a/src/plan/immix/global.rs +++ b/src/plan/immix/global.rs @@ -138,8 +138,6 @@ impl Immix { Self::new_with_args( plan_args, ImmixSpaceArgs { - unlog_object_when_traced: false, - #[cfg(feature = "vo_bit")] mixed_age: false, never_move_objects: false, }, @@ -152,7 +150,21 @@ impl Immix { ) -> Self { let immix = Immix { immix_space: ImmixSpace::new( - plan_args.get_space_args("immix", true, false, VMRequest::discontiguous()), + if space_args.mixed_age { + plan_args.get_mixed_age_space_args( + "immix", + true, + false, + VMRequest::discontiguous(), + ) + } else { + plan_args.get_normal_space_args( + "immix", + true, + false, + VMRequest::discontiguous(), + ) + }, space_args, ), common: CommonPlan::new(plan_args), diff --git a/src/plan/markcompact/global.rs b/src/plan/markcompact/global.rs index f776840bfd..3be0903117 100644 --- a/src/plan/markcompact/global.rs +++ b/src/plan/markcompact/global.rs @@ -198,7 +198,7 @@ impl MarkCompact { global_side_metadata_specs, }; - let mc_space = MarkCompactSpace::new(plan_args.get_space_args( + let mc_space = MarkCompactSpace::new(plan_args.get_normal_space_args( "mc", true, false, diff --git a/src/plan/marksweep/global.rs b/src/plan/marksweep/global.rs index f6d4b9449b..239c1a5102 100644 --- a/src/plan/marksweep/global.rs +++ b/src/plan/marksweep/global.rs @@ -112,7 +112,7 @@ impl MarkSweep { }; let res = MarkSweep { - ms: MarkSweepSpace::new(plan_args.get_space_args( + ms: MarkSweepSpace::new(plan_args.get_normal_space_args( "ms", true, false, diff --git a/src/plan/nogc/global.rs b/src/plan/nogc/global.rs index d6e6306fef..199b9c6185 100644 --- a/src/plan/nogc/global.rs +++ b/src/plan/nogc/global.rs @@ -99,19 +99,19 @@ impl NoGC { }; let res = NoGC { - nogc_space: NoGCImmortalSpace::new(plan_args.get_space_args( + nogc_space: NoGCImmortalSpace::new(plan_args.get_normal_space_args( "nogc_space", cfg!(not(feature = "nogc_no_zeroing")), false, VMRequest::discontiguous(), )), - immortal: ImmortalSpace::new(plan_args.get_space_args( + immortal: ImmortalSpace::new(plan_args.get_normal_space_args( "immortal", true, false, VMRequest::discontiguous(), )), - los: ImmortalSpace::new(plan_args.get_space_args( + los: ImmortalSpace::new(plan_args.get_normal_space_args( "los", true, false, diff --git a/src/plan/pageprotect/global.rs b/src/plan/pageprotect/global.rs index 5d0d868f95..94a909c9e4 100644 --- a/src/plan/pageprotect/global.rs +++ b/src/plan/pageprotect/global.rs @@ -108,8 +108,14 @@ impl PageProtect { let ret = PageProtect { space: LargeObjectSpace::new( - plan_args.get_space_args("pageprotect", true, false, VMRequest::discontiguous()), + plan_args.get_normal_space_args( + "pageprotect", + true, + false, + VMRequest::discontiguous(), + ), true, + false, // PageProtect does not use log bit ), common: CommonPlan::new(plan_args), }; diff --git a/src/plan/plan_constraints.rs b/src/plan/plan_constraints.rs index 3110eb7538..f86de5e05f 100644 --- a/src/plan/plan_constraints.rs +++ b/src/plan/plan_constraints.rs @@ -45,6 +45,8 @@ pub struct PlanConstraints { /// `MutatorConfig::prepare_func`). Those plans can set this to `false` so that the /// `PrepareMutator` work packets will not be created at all. pub needs_prepare_mutator: bool, + /// Is this plan generational? + pub generational: bool, } impl PlanConstraints { @@ -67,6 +69,7 @@ impl PlanConstraints { barrier: BarrierSelector::NoBarrier, // If we use mark sweep as non moving space, we need to prepare mutator. See [`common_prepare_func`]. needs_prepare_mutator: cfg!(feature = "marksweep_as_nonmoving"), + generational: false, } } } diff --git a/src/plan/semispace/global.rs b/src/plan/semispace/global.rs index 0205c7a77f..98fed5fe46 100644 --- a/src/plan/semispace/global.rs +++ b/src/plan/semispace/global.rs @@ -146,11 +146,21 @@ impl SemiSpace { let res = SemiSpace { hi: AtomicBool::new(false), copyspace0: CopySpace::new( - plan_args.get_space_args("copyspace0", true, false, VMRequest::discontiguous()), + plan_args.get_normal_space_args( + "copyspace0", + true, + false, + VMRequest::discontiguous(), + ), false, ), copyspace1: CopySpace::new( - plan_args.get_space_args("copyspace1", true, false, VMRequest::discontiguous()), + plan_args.get_normal_space_args( + "copyspace1", + true, + false, + VMRequest::discontiguous(), + ), true, ), common: CommonPlan::new(plan_args), diff --git a/src/plan/sticky/immix/global.rs b/src/plan/sticky/immix/global.rs index a06dee9816..7dcd83ad3b 100644 --- a/src/plan/sticky/immix/global.rs +++ b/src/plan/sticky/immix/global.rs @@ -46,6 +46,7 @@ pub const STICKY_IMMIX_CONSTRAINTS: PlanConstraints = PlanConstraints { barrier: crate::plan::BarrierSelector::ObjectBarrier, // We may trace duplicate edges in sticky immix (or any plan that uses object remembering barrier). See https://github.com/mmtk/mmtk-core/issues/743. may_trace_duplicate_edges: true, + generational: true, ..immix::IMMIX_CONSTRAINTS }; @@ -122,6 +123,9 @@ impl Plan for StickyImmix { self.immix.common.los.prepare(false); } else { self.full_heap_gc_count.lock().unwrap().inc(); + if VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.is_on_side() { + self.immix.immix_space.clear_side_log_bits(); + } self.immix.prepare(tls); } } @@ -325,12 +329,7 @@ impl StickyImmix { let immix = immix::Immix::new_with_args( plan_args, crate::policy::immix::ImmixSpaceArgs { - // Every object we trace in nursery GC becomes a mature object. - // Every object we trace in full heap GC is a mature object. Thus in both cases, - // they should be unlogged. - unlog_object_when_traced: true, // In StickyImmix, both young and old objects are allocated in the ImmixSpace. - #[cfg(feature = "vo_bit")] mixed_age: true, never_move_objects: false, }, diff --git a/src/policy/compressor/compressorspace.rs b/src/policy/compressor/compressorspace.rs index 4b7be4fdd1..0a2226c243 100644 --- a/src/policy/compressor/compressorspace.rs +++ b/src/policy/compressor/compressorspace.rs @@ -146,6 +146,14 @@ impl Space for CompressorSpace { fn enumerate_objects(&self, enumerator: &mut dyn ObjectEnumerator) { object_enum::enumerate_blocks_from_monotonic_page_resource(enumerator, &self.pr); } + + fn clear_side_log_bits(&self) { + unimplemented!() + } + + fn set_side_log_bits(&self) { + unimplemented!() + } } impl crate::policy::gc_work::PolicyTraceObject for CompressorSpace { diff --git a/src/policy/copy_context.rs b/src/policy/copy_context.rs index 0954834874..d25a524b9e 100644 --- a/src/policy/copy_context.rs +++ b/src/policy/copy_context.rs @@ -22,5 +22,5 @@ pub trait PolicyCopyContext: 'static + Send { align: usize, offset: usize, ) -> Address; - fn post_copy(&mut self, _obj: ObjectReference, _bytes: usize) {} + fn post_copy(&mut self, _obj: ObjectReference, _bytes: usize); } diff --git a/src/policy/copyspace.rs b/src/policy/copyspace.rs index 7067f65d43..e34c799c7e 100644 --- a/src/policy/copyspace.rs +++ b/src/policy/copyspace.rs @@ -143,6 +143,20 @@ impl Space for CopySpace { fn enumerate_objects(&self, enumerator: &mut dyn ObjectEnumerator) { object_enum::enumerate_blocks_from_monotonic_page_resource(enumerator, &self.pr); } + + fn clear_side_log_bits(&self) { + let log_bit = VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.extract_side_spec(); + for (start, size) in self.pr.iterate_allocated_regions() { + log_bit.bzero_metadata(start, size); + } + } + + fn set_side_log_bits(&self) { + let log_bit = VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.extract_side_spec(); + for (start, size) in self.pr.iterate_allocated_regions() { + log_bit.bset_metadata(start, size); + } + } } impl crate::policy::gc_work::PolicyTraceObject for CopySpace { @@ -201,12 +215,6 @@ impl CopySpace { side_forwarding_status_table.bzero_metadata(start, size); } - if self.common.needs_log_bit { - if let MetadataSpec::OnSide(side) = *VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC { - side.bzero_metadata(start, size); - } - } - // Clear VO bits because all objects in the space are dead. #[cfg(feature = "vo_bit")] crate::util::metadata::vo_bit::bzero_vo_bit(start, size); @@ -336,6 +344,13 @@ impl PolicyCopyContext for CopySpaceCopyContext { ) -> Address { self.copy_allocator.alloc(bytes, align, offset) } + + fn post_copy(&mut self, obj: ObjectReference, _bytes: usize) { + if self.copy_allocator.get_space().common().unlog_traced_object { + VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC + .mark_byte_as_unlogged::(obj, Ordering::Relaxed); + } + } } impl CopySpaceCopyContext { @@ -348,9 +363,7 @@ impl CopySpaceCopyContext { copy_allocator: BumpAllocator::new(tls.0, tospace, context), } } -} -impl CopySpaceCopyContext { pub fn rebind(&mut self, space: &CopySpace) { self.copy_allocator .rebind(unsafe { &*{ space as *const _ } }); diff --git a/src/policy/immix/immixspace.rs b/src/policy/immix/immixspace.rs index a8bcd54fda..bc3a585eff 100644 --- a/src/policy/immix/immixspace.rs +++ b/src/policy/immix/immixspace.rs @@ -60,19 +60,11 @@ pub struct ImmixSpace { /// Some arguments for Immix Space. pub struct ImmixSpaceArgs { - /// Mark an object as unlogged when we trace an object. - /// Normally we set the log bit when we copy an object with [`crate::util::copy::CopySemantics::PromoteToMature`]. - /// In sticky immix, we 'promote' an object to mature when we trace the object - /// (no matter we copy an object or not). So we have to use `PromoteToMature`, and instead - /// just set the log bit in the space when an object is traced. - pub unlog_object_when_traced: bool, /// Whether this ImmixSpace instance contains both young and old objects. /// This affects the updating of valid-object bits. If some lines or blocks of this ImmixSpace /// instance contain young objects, their VO bits need to be updated during this GC. Currently /// only StickyImmix is affected. GenImmix allocates young objects in a separete CopySpace /// nursery and its VO bits can be cleared in bulk. - // Currently only used when "vo_bit" is enabled. Using #[cfg(...)] to eliminate dead code warning. - #[cfg(feature = "vo_bit")] pub mixed_age: bool, /// Disable copying for this Immix space. pub never_move_objects: bool, @@ -203,6 +195,20 @@ impl Space for ImmixSpace { fn enumerate_objects(&self, enumerator: &mut dyn ObjectEnumerator) { object_enum::enumerate_blocks_from_chunk_map::(enumerator, &self.chunk_map); } + + fn clear_side_log_bits(&self) { + let log_bit = VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.extract_side_spec(); + for chunk in self.chunk_map.all_chunks() { + log_bit.bzero_metadata(chunk.start(), Chunk::BYTES); + } + } + + fn set_side_log_bits(&self) { + let log_bit = VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.extract_side_spec(); + for chunk in self.chunk_map.all_chunks() { + log_bit.bset_metadata(chunk.start(), Chunk::BYTES); + } + } } impl crate::policy::gc_work::PolicyTraceObject for ImmixSpace { @@ -301,7 +307,7 @@ impl ImmixSpace { args: crate::policy::space::PlanCreateSpaceArgs, mut space_args: ImmixSpaceArgs, ) -> Self { - if space_args.unlog_object_when_traced { + if args.unlog_traced_object { assert!( args.constraints.needs_log_bit, "Invalid args when the plan does not use log bit" @@ -421,14 +427,6 @@ impl ImmixSpace { unimplemented!("cyclic mark bits is not supported at the moment"); } - if self.common.needs_log_bit { - if let MetadataSpec::OnSide(side) = *VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC { - for chunk in self.chunk_map.all_chunks() { - side.bzero_metadata(chunk.start(), Chunk::BYTES); - } - } - } - // Prepare defrag info if self.is_defrag_enabled() { self.defrag.prepare(self, plan_stats.unwrap()); @@ -700,6 +698,8 @@ impl ImmixSpace { self.mark_lines(object); } + self.unlog_object_if_needed(object); + object } else { // We are forwarding objects. When the copy allocator allocates the block, it should @@ -709,9 +709,16 @@ impl ImmixSpace { object, semantics, copy_context, - |_new_object| { + |new_object| { + // post_copy should have set the unlog bit + // if `unlog_traced_object` is true. + debug_assert!( + !self.common.unlog_traced_object + || VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC + .is_unlogged::(new_object, Ordering::Relaxed) + ); #[cfg(feature = "vo_bit")] - vo_bit::helper::on_object_forwarded::(_new_object); + vo_bit::helper::on_object_forwarded::(new_object); }, ) }; @@ -722,13 +729,12 @@ impl ImmixSpace { queue.enqueue(new_object); debug_assert!(new_object.is_live()); - self.unlog_object_if_needed(new_object); new_object } } fn unlog_object_if_needed(&self, object: ObjectReference) { - if self.space_args.unlog_object_when_traced { + if self.common.unlog_traced_object { // Make sure the side metadata for the line can fit into one byte. For smaller line size, we should // use `mark_as_unlogged` instead to mark the bit. const_assert!( @@ -877,6 +883,10 @@ impl ImmixSpace { if !super::MARK_LINE_AT_SCAN_TIME { self.mark_lines(object); } + if self.common.unlog_traced_object { + VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC + .mark_byte_as_unlogged::(object, Ordering::Relaxed); + } } pub(crate) fn prefer_copy_on_nursery_gc(&self) -> bool { diff --git a/src/policy/immortalspace.rs b/src/policy/immortalspace.rs index c9eedd365f..325ddc1c81 100644 --- a/src/policy/immortalspace.rs +++ b/src/policy/immortalspace.rs @@ -56,7 +56,7 @@ impl SFT for ImmortalSpace { fn initialize_object_metadata(&self, object: ObjectReference, _alloc: bool) { self.mark_state .on_object_metadata_initialization::(object); - if self.common.needs_log_bit { + if self.common.unlog_allocated_object { VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.mark_as_unlogged::(object, Ordering::SeqCst); } #[cfg(feature = "vo_bit")] @@ -115,6 +115,20 @@ impl Space for ImmortalSpace { fn enumerate_objects(&self, enumerator: &mut dyn ObjectEnumerator) { object_enum::enumerate_blocks_from_monotonic_page_resource(enumerator, &self.pr); } + + fn clear_side_log_bits(&self) { + let log_bit = VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.extract_side_spec(); + for (start, size) in self.pr.iterate_allocated_regions() { + log_bit.bzero_metadata(start, size); + } + } + + fn set_side_log_bits(&self) { + let log_bit = VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.extract_side_spec(); + for (start, size) in self.pr.iterate_allocated_regions() { + log_bit.bset_metadata(start, size); + } + } } use crate::scheduler::GCWorker; @@ -186,7 +200,7 @@ impl ImmortalSpace { ); if self.mark_state.test_and_mark::(object) { // Set the unlog bit if required - if self.common.needs_log_bit { + if self.common.unlog_traced_object { VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.store_atomic::( object, 1, diff --git a/src/policy/largeobjectspace.rs b/src/policy/largeobjectspace.rs index abe7976082..e7d3107e45 100644 --- a/src/policy/largeobjectspace.rs +++ b/src/policy/largeobjectspace.rs @@ -9,6 +9,7 @@ use crate::util::alloc::allocator::AllocationOptions; use crate::util::constants::BYTES_IN_PAGE; use crate::util::heap::{FreeListPageResource, PageResource}; use crate::util::metadata; +use crate::util::object_enum::ClosureObjectEnumerator; use crate::util::object_enum::ObjectEnumerator; use crate::util::opaque_pointer::*; use crate::util::treadmill::TreadMill; @@ -30,6 +31,7 @@ pub struct LargeObjectSpace { mark_state: u8, in_nursery_gc: bool, treadmill: TreadMill, + clear_log_bit_on_sweep: bool, } impl SFT for LargeObjectSpace { @@ -76,7 +78,7 @@ impl SFT for LargeObjectSpace { ); // If this object is freshly allocated, we do not set it as unlogged - if !alloc && self.common.needs_log_bit { + if !alloc && self.common.unlog_allocated_object { VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.mark_as_unlogged::(object, Ordering::SeqCst); } @@ -192,6 +194,22 @@ impl Space for LargeObjectSpace { fn enumerate_objects(&self, enumerator: &mut dyn ObjectEnumerator) { self.treadmill.enumerate_objects(enumerator); } + + fn clear_side_log_bits(&self) { + let mut enumator = ClosureObjectEnumerator::<_, VM>::new(|object| { + VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.clear::(object, Ordering::SeqCst); + }); + self.treadmill.enumerate_objects(&mut enumator); + } + + fn set_side_log_bits(&self) { + debug_assert!(self.treadmill.is_from_space_empty()); + debug_assert!(self.treadmill.is_nursery_empty()); + let mut enumator = ClosureObjectEnumerator::<_, VM>::new(|object| { + VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.mark_as_unlogged::(object, Ordering::SeqCst); + }); + self.treadmill.enumerate_objects(&mut enumator); + } } use crate::scheduler::GCWorker; @@ -216,6 +234,7 @@ impl LargeObjectSpace { pub fn new( args: crate::policy::space::PlanCreateSpaceArgs, protect_memory_on_release: bool, + clear_log_bit_on_sweep: bool, ) -> Self { let is_discontiguous = args.vmrequest.is_discontiguous(); let vm_map = args.vm_map; @@ -240,6 +259,7 @@ impl LargeObjectSpace { mark_state: 0, in_nursery_gc: false, treadmill: TreadMill::new(), + clear_log_bit_on_sweep, } } @@ -288,7 +308,7 @@ impl LargeObjectSpace { // We just moved the object out of the logical nursery, mark it as unlogged. // We also unlog mature objects as their unlog bit may have been unset before the // full-heap GC - if self.common.needs_log_bit { + if self.common.unlog_traced_object { VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC .mark_as_unlogged::(object, Ordering::SeqCst); } @@ -308,7 +328,7 @@ impl LargeObjectSpace { #[cfg(feature = "vo_bit")] crate::util::metadata::vo_bit::unset_vo_bit(object); // Clear log bits for dead objects to prevent a new nursery object having the unlog bit set - if self.common.needs_log_bit { + if self.clear_log_bit_on_sweep { VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.clear::(object, Ordering::SeqCst); } self.pr diff --git a/src/policy/lockfreeimmortalspace.rs b/src/policy/lockfreeimmortalspace.rs index 76e44c55ee..3a3264b05b 100644 --- a/src/policy/lockfreeimmortalspace.rs +++ b/src/policy/lockfreeimmortalspace.rs @@ -181,6 +181,14 @@ impl Space for LockFreeImmortalSpace { fn enumerate_objects(&self, enumerator: &mut dyn ObjectEnumerator) { enumerator.visit_address_range(self.start, self.start + self.total_bytes); } + + fn clear_side_log_bits(&self) { + unimplemented!() + } + + fn set_side_log_bits(&self) { + unimplemented!() + } } use crate::plan::{ObjectQueue, VectorObjectQueue}; diff --git a/src/policy/markcompactspace.rs b/src/policy/markcompactspace.rs index c709ab499a..d7fa7d8a9c 100644 --- a/src/policy/markcompactspace.rs +++ b/src/policy/markcompactspace.rs @@ -143,6 +143,14 @@ impl Space for MarkCompactSpace { fn enumerate_objects(&self, enumerator: &mut dyn ObjectEnumerator) { object_enum::enumerate_blocks_from_monotonic_page_resource(enumerator, &self.pr); } + + fn clear_side_log_bits(&self) { + unimplemented!() + } + + fn set_side_log_bits(&self) { + unimplemented!() + } } impl crate::policy::gc_work::PolicyTraceObject for MarkCompactSpace { diff --git a/src/policy/marksweepspace/malloc_ms/global.rs b/src/policy/marksweepspace/malloc_ms/global.rs index fd473dd0ee..acf3369554 100644 --- a/src/policy/marksweepspace/malloc_ms/global.rs +++ b/src/policy/marksweepspace/malloc_ms/global.rs @@ -241,6 +241,14 @@ impl Space for MallocSpace { fn enumerate_objects(&self, _enumerator: &mut dyn ObjectEnumerator) { unimplemented!() } + + fn clear_side_log_bits(&self) { + unimplemented!() + } + + fn set_side_log_bits(&self) { + unimplemented!() + } } use crate::scheduler::GCWorker; diff --git a/src/policy/marksweepspace/native_ms/global.rs b/src/policy/marksweepspace/native_ms/global.rs index 1dc87cd04c..93697e8b04 100644 --- a/src/policy/marksweepspace/native_ms/global.rs +++ b/src/policy/marksweepspace/native_ms/global.rs @@ -254,6 +254,20 @@ impl Space for MarkSweepSpace { fn enumerate_objects(&self, enumerator: &mut dyn ObjectEnumerator) { object_enum::enumerate_blocks_from_chunk_map::(enumerator, &self.chunk_map); } + + fn clear_side_log_bits(&self) { + let log_bit = VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.extract_side_spec(); + for chunk in self.chunk_map.all_chunks() { + log_bit.bzero_metadata(chunk.start(), Chunk::BYTES); + } + } + + fn set_side_log_bits(&self) { + let log_bit = VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.extract_side_spec(); + for chunk in self.chunk_map.all_chunks() { + log_bit.bset_metadata(chunk.start(), Chunk::BYTES); + } + } } impl crate::policy::gc_work::PolicyTraceObject for MarkSweepSpace { @@ -406,15 +420,7 @@ impl MarkSweepSpace { self.chunk_map.set_allocated(block.chunk(), true); } - pub fn prepare(&mut self, full_heap: bool) { - if self.common.needs_log_bit && full_heap { - if let MetadataSpec::OnSide(side) = *VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC { - for chunk in self.chunk_map.all_chunks() { - side.bzero_metadata(chunk.start(), Chunk::BYTES); - } - } - } - + pub fn prepare(&mut self, _full_heap: bool) { #[cfg(debug_assertions)] self.abandoned_in_gc.lock().unwrap().assert_empty(); diff --git a/src/policy/space.rs b/src/policy/space.rs index e44874fe5b..654bcb22a6 100644 --- a/src/policy/space.rs +++ b/src/policy/space.rs @@ -433,6 +433,14 @@ pub trait Space: 'static + SFT + Sync + Downcast { /// the execution time. For LOS, it will be cheaper to enumerate individual objects than /// scanning VO bits because it is sparse. fn enumerate_objects(&self, enumerator: &mut dyn ObjectEnumerator); + + /// Clear the side log bits for allocated regions in this space. + /// This method is only called if the plan knows the log bits are side metadata. + fn clear_side_log_bits(&self); + + /// Set the side log bits for allocated regions in this space. + /// This method is only called if the plan knows the log bits are side metadata. + fn set_side_log_bits(&self); } /// Print the VM map for a space. @@ -524,6 +532,8 @@ pub struct CommonSpace { /// This field equals to needs_log_bit in the plan constraints. // TODO: This should be a constant for performance. pub needs_log_bit: bool, + pub unlog_allocated_object: bool, + pub unlog_traced_object: bool, /// A lock used during acquire() to make sure only one thread can allocate. pub acquire_lock: Mutex<()>, @@ -548,6 +558,8 @@ pub struct PlanCreateSpaceArgs<'a, VM: VMBinding> { pub name: &'static str, pub zeroed: bool, pub permission_exec: bool, + pub unlog_allocated_object: bool, + pub unlog_traced_object: bool, pub vmrequest: VMRequest, pub global_side_metadata_specs: Vec, pub vm_map: &'static dyn VMMap, @@ -594,6 +606,8 @@ impl CommonSpace { vm_map: args.plan_args.vm_map, mmapper: args.plan_args.mmapper, needs_log_bit: args.plan_args.constraints.needs_log_bit, + unlog_allocated_object: args.plan_args.unlog_allocated_object, + unlog_traced_object: args.plan_args.unlog_traced_object, gc_trigger: args.plan_args.gc_trigger, metadata: SideMetadataContext { global: args.plan_args.global_side_metadata_specs, diff --git a/src/policy/vmspace.rs b/src/policy/vmspace.rs index ab1101aaf5..9a97aea033 100644 --- a/src/policy/vmspace.rs +++ b/src/policy/vmspace.rs @@ -61,7 +61,7 @@ impl SFT for VMSpace { fn initialize_object_metadata(&self, object: ObjectReference, _alloc: bool) { self.mark_state .on_object_metadata_initialization::(object); - if self.common.needs_log_bit { + if self.common.unlog_allocated_object { VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.mark_as_unlogged::(object, Ordering::SeqCst); } #[cfg(feature = "vo_bit")] @@ -154,6 +154,22 @@ impl Space for VMSpace { enumerator.visit_address_range(ep.start, ep.end); } } + + fn clear_side_log_bits(&self) { + let log_bit = VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.extract_side_spec(); + let external_pages = self.pr.get_external_pages(); + for ep in external_pages.iter() { + log_bit.bzero_metadata(ep.start, ep.end - ep.start); + } + } + + fn set_side_log_bits(&self) { + let log_bit = VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.extract_side_spec(); + let external_pages = self.pr.get_external_pages(); + for ep in external_pages.iter() { + log_bit.bset_metadata(ep.start, ep.end - ep.start); + } + } } use crate::scheduler::GCWorker; @@ -287,7 +303,7 @@ impl VMSpace { // Flip the per-object unlogged bits to "unlogged" state for objects inside the // bootimage #[cfg(feature = "set_unlog_bits_vm_space")] - if self.common.needs_log_bit { + if self.common.unlog_traced_object { VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.store_atomic::( object, 1, diff --git a/src/util/copy/mod.rs b/src/util/copy/mod.rs index aaea3b425a..9b070cc01c 100644 --- a/src/util/copy/mod.rs +++ b/src/util/copy/mod.rs @@ -14,7 +14,6 @@ use crate::util::{Address, ObjectReference}; use crate::vm::ObjectModel; use crate::vm::VMBinding; use crate::MMTK; -use std::sync::atomic::Ordering; use enum_map::Enum; use enum_map::EnumMap; @@ -119,12 +118,6 @@ impl GCWorkerCopyContext { object )); } - // If we are copying objects in mature space, we would need to mark the object as mature. - if semantics.is_mature() && self.config.constraints.needs_log_bit { - // If the plan uses unlogged bit, we set the unlogged bit (the object is unlogged/mature) - VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC - .mark_byte_as_unlogged::(object, Ordering::Relaxed); - } // Policy specific post copy. match self.config.copy_mapping[semantics] { CopySelector::CopySpace(index) => {