|
| 1 | +use super::gc_work::CompressorWorkContext; |
| 2 | +use super::gc_work::{ |
| 3 | + CalculateForwardingAddress, Compact, ForwardingProcessEdges, MarkingProcessEdges, |
| 4 | + UpdateReferences, |
| 5 | +}; |
| 6 | +use crate::plan::compressor::mutator::ALLOCATOR_MAPPING; |
| 7 | +use crate::plan::global::CreateGeneralPlanArgs; |
| 8 | +use crate::plan::global::CreateSpecificPlanArgs; |
| 9 | +use crate::plan::global::{BasePlan, CommonPlan}; |
| 10 | +use crate::plan::plan_constraints::MAX_NON_LOS_ALLOC_BYTES_COPYING_PLAN; |
| 11 | +use crate::plan::AllocationSemantics; |
| 12 | +use crate::plan::Plan; |
| 13 | +use crate::plan::PlanConstraints; |
| 14 | +use crate::policy::compressor::CompressorSpace; |
| 15 | +use crate::policy::space::Space; |
| 16 | +use crate::scheduler::gc_work::*; |
| 17 | +use crate::scheduler::GCWorkScheduler; |
| 18 | +use crate::scheduler::WorkBucketStage; |
| 19 | +use crate::util::alloc::allocators::AllocatorSelector; |
| 20 | +use crate::util::heap::gc_trigger::SpaceStats; |
| 21 | +#[allow(unused_imports)] |
| 22 | +use crate::util::heap::VMRequest; |
| 23 | +use crate::util::metadata::side_metadata::SideMetadataContext; |
| 24 | +use crate::util::opaque_pointer::*; |
| 25 | +use crate::vm::VMBinding; |
| 26 | +use enum_map::EnumMap; |
| 27 | +use mmtk_macros::{HasSpaces, PlanTraceObject}; |
| 28 | + |
| 29 | +/// Compressor implements a stop-the-world and serial implementation of |
| 30 | +/// the Compressor, as described in Kermany and Petrank, |
| 31 | +/// [The Compressor: concurrent, incremental, and parallel compaction](https://dl.acm.org/doi/10.1145/1133255.1134023). |
| 32 | +#[derive(HasSpaces, PlanTraceObject)] |
| 33 | +pub struct Compressor<VM: VMBinding> { |
| 34 | + #[parent] |
| 35 | + pub common: CommonPlan<VM>, |
| 36 | + #[space] |
| 37 | + pub compressor_space: CompressorSpace<VM>, |
| 38 | +} |
| 39 | + |
| 40 | +/// The plan constraints for the Compressor plan. |
| 41 | +pub const COMPRESSOR_CONSTRAINTS: PlanConstraints = PlanConstraints { |
| 42 | + max_non_los_default_alloc_bytes: MAX_NON_LOS_ALLOC_BYTES_COPYING_PLAN, |
| 43 | + moves_objects: true, |
| 44 | + needs_forward_after_liveness: true, |
| 45 | + ..PlanConstraints::default() |
| 46 | +}; |
| 47 | + |
| 48 | +impl<VM: VMBinding> Plan for Compressor<VM> { |
| 49 | + fn constraints(&self) -> &'static PlanConstraints { |
| 50 | + &COMPRESSOR_CONSTRAINTS |
| 51 | + } |
| 52 | + |
| 53 | + fn collection_required(&self, space_full: bool, _space: Option<SpaceStats<Self::VM>>) -> bool { |
| 54 | + self.base().collection_required(self, space_full) |
| 55 | + } |
| 56 | + |
| 57 | + fn common(&self) -> &CommonPlan<VM> { |
| 58 | + &self.common |
| 59 | + } |
| 60 | + |
| 61 | + fn base(&self) -> &BasePlan<VM> { |
| 62 | + &self.common.base |
| 63 | + } |
| 64 | + |
| 65 | + fn base_mut(&mut self) -> &mut BasePlan<Self::VM> { |
| 66 | + &mut self.common.base |
| 67 | + } |
| 68 | + |
| 69 | + fn prepare(&mut self, tls: VMWorkerThread) { |
| 70 | + self.common.prepare(tls, true); |
| 71 | + self.compressor_space.prepare(); |
| 72 | + } |
| 73 | + |
| 74 | + fn release(&mut self, tls: VMWorkerThread) { |
| 75 | + self.common.release(tls, true); |
| 76 | + self.compressor_space.release(); |
| 77 | + } |
| 78 | + |
| 79 | + fn end_of_gc(&mut self, tls: VMWorkerThread) { |
| 80 | + self.common.end_of_gc(tls); |
| 81 | + } |
| 82 | + |
| 83 | + fn get_allocator_mapping(&self) -> &'static EnumMap<AllocationSemantics, AllocatorSelector> { |
| 84 | + &ALLOCATOR_MAPPING |
| 85 | + } |
| 86 | + |
| 87 | + fn schedule_collection(&'static self, scheduler: &GCWorkScheduler<VM>) { |
| 88 | + // TODO use schedule_common once it can work with the Compressor |
| 89 | + // The main issue there is that we need to ForwardingProcessEdges |
| 90 | + // in FinalizableForwarding. |
| 91 | + |
| 92 | + // Stop & scan mutators (mutator scanning can happen before STW) |
| 93 | + scheduler.work_buckets[WorkBucketStage::Unconstrained] |
| 94 | + .add(StopMutators::<CompressorWorkContext<VM>>::new()); |
| 95 | + |
| 96 | + // Prepare global/collectors/mutators |
| 97 | + scheduler.work_buckets[WorkBucketStage::Prepare] |
| 98 | + .add(Prepare::<CompressorWorkContext<VM>>::new(self)); |
| 99 | + |
| 100 | + scheduler.work_buckets[WorkBucketStage::CalculateForwarding].add( |
| 101 | + CalculateForwardingAddress::<VM>::new(&self.compressor_space), |
| 102 | + ); |
| 103 | + // do another trace to update references |
| 104 | + scheduler.work_buckets[WorkBucketStage::SecondRoots].add(UpdateReferences::<VM>::new()); |
| 105 | + scheduler.work_buckets[WorkBucketStage::Compact] |
| 106 | + .add(Compact::<VM>::new(&self.compressor_space, &self.common.los)); |
| 107 | + |
| 108 | + // Release global/collectors/mutators |
| 109 | + scheduler.work_buckets[WorkBucketStage::Release] |
| 110 | + .add(Release::<CompressorWorkContext<VM>>::new(self)); |
| 111 | + |
| 112 | + // Reference processing |
| 113 | + if !*self.base().options.no_reference_types { |
| 114 | + use crate::util::reference_processor::{ |
| 115 | + PhantomRefProcessing, SoftRefProcessing, WeakRefProcessing, |
| 116 | + }; |
| 117 | + scheduler.work_buckets[WorkBucketStage::SoftRefClosure] |
| 118 | + .add(SoftRefProcessing::<MarkingProcessEdges<VM>>::new()); |
| 119 | + scheduler.work_buckets[WorkBucketStage::WeakRefClosure] |
| 120 | + .add(WeakRefProcessing::<VM>::new()); |
| 121 | + scheduler.work_buckets[WorkBucketStage::PhantomRefClosure] |
| 122 | + .add(PhantomRefProcessing::<VM>::new()); |
| 123 | + |
| 124 | + use crate::util::reference_processor::RefForwarding; |
| 125 | + scheduler.work_buckets[WorkBucketStage::RefForwarding] |
| 126 | + .add(RefForwarding::<ForwardingProcessEdges<VM>>::new()); |
| 127 | + |
| 128 | + use crate::util::reference_processor::RefEnqueue; |
| 129 | + scheduler.work_buckets[WorkBucketStage::Release].add(RefEnqueue::<VM>::new()); |
| 130 | + } |
| 131 | + |
| 132 | + // Finalization |
| 133 | + if !*self.base().options.no_finalizer { |
| 134 | + use crate::util::finalizable_processor::{Finalization, ForwardFinalization}; |
| 135 | + // finalization |
| 136 | + // treat finalizable objects as roots and perform a closure (marking) |
| 137 | + // must be done before calculating forwarding pointers |
| 138 | + scheduler.work_buckets[WorkBucketStage::FinalRefClosure] |
| 139 | + .add(Finalization::<MarkingProcessEdges<VM>>::new()); |
| 140 | + // update finalizable object references |
| 141 | + // must be done before compacting |
| 142 | + scheduler.work_buckets[WorkBucketStage::FinalizableForwarding] |
| 143 | + .add(ForwardFinalization::<ForwardingProcessEdges<VM>>::new()); |
| 144 | + } |
| 145 | + |
| 146 | + // VM-specific weak ref processing |
| 147 | + scheduler.work_buckets[WorkBucketStage::VMRefClosure] |
| 148 | + .set_sentinel(Box::new(VMProcessWeakRefs::<MarkingProcessEdges<VM>>::new())); |
| 149 | + |
| 150 | + // VM-specific weak ref forwarding |
| 151 | + scheduler.work_buckets[WorkBucketStage::VMRefForwarding] |
| 152 | + .add(VMForwardWeakRefs::<ForwardingProcessEdges<VM>>::new()); |
| 153 | + |
| 154 | + // VM-specific work after forwarding, possible to implement ref enququing. |
| 155 | + scheduler.work_buckets[WorkBucketStage::Release].add(VMPostForwarding::<VM>::default()); |
| 156 | + |
| 157 | + // Analysis GC work |
| 158 | + #[cfg(feature = "analysis")] |
| 159 | + { |
| 160 | + use crate::util::analysis::GcHookWork; |
| 161 | + scheduler.work_buckets[WorkBucketStage::Unconstrained].add(GcHookWork); |
| 162 | + } |
| 163 | + #[cfg(feature = "sanity")] |
| 164 | + scheduler.work_buckets[WorkBucketStage::Final] |
| 165 | + .add(crate::util::sanity::sanity_checker::ScheduleSanityGC::<Self>::new(self)); |
| 166 | + } |
| 167 | + |
| 168 | + fn current_gc_may_move_object(&self) -> bool { |
| 169 | + true |
| 170 | + } |
| 171 | + |
| 172 | + fn get_used_pages(&self) -> usize { |
| 173 | + self.compressor_space.reserved_pages() + self.common.get_used_pages() |
| 174 | + } |
| 175 | +} |
| 176 | + |
| 177 | +impl<VM: VMBinding> Compressor<VM> { |
| 178 | + pub fn new(args: CreateGeneralPlanArgs<VM>) -> Self { |
| 179 | + let mut plan_args = CreateSpecificPlanArgs { |
| 180 | + global_args: args, |
| 181 | + constraints: &COMPRESSOR_CONSTRAINTS, |
| 182 | + global_side_metadata_specs: SideMetadataContext::new_global_specs(&[]), |
| 183 | + }; |
| 184 | + |
| 185 | + let res = Compressor { |
| 186 | + compressor_space: CompressorSpace::new(plan_args.get_space_args( |
| 187 | + "compressor_space", |
| 188 | + true, |
| 189 | + false, |
| 190 | + VMRequest::discontiguous(), |
| 191 | + )), |
| 192 | + common: CommonPlan::new(plan_args), |
| 193 | + }; |
| 194 | + |
| 195 | + res.verify_side_metadata_sanity(); |
| 196 | + |
| 197 | + res |
| 198 | + } |
| 199 | +} |
0 commit comments