diff --git a/src/plan/markcompact/global.rs b/src/plan/markcompact/global.rs index e9a2ec037d..71e756de3f 100644 --- a/src/plan/markcompact/global.rs +++ b/src/plan/markcompact/global.rs @@ -112,9 +112,9 @@ impl Plan for MarkCompact { scheduler.work_buckets[WorkBucketStage::SoftRefClosure] .add(SoftRefProcessing::>::new()); scheduler.work_buckets[WorkBucketStage::WeakRefClosure] - .add(WeakRefProcessing::>::new()); + .add(WeakRefProcessing::::new()); scheduler.work_buckets[WorkBucketStage::PhantomRefClosure] - .add(PhantomRefProcessing::>::new()); + .add(PhantomRefProcessing::::new()); use crate::util::reference_processor::RefForwarding; scheduler.work_buckets[WorkBucketStage::RefForwarding] diff --git a/src/policy/markcompactspace.rs b/src/policy/markcompactspace.rs index e6d332919e..ce125e8288 100644 --- a/src/policy/markcompactspace.rs +++ b/src/policy/markcompactspace.rs @@ -251,7 +251,12 @@ impl MarkCompactSpace { queue.enqueue(object); } - Self::get_header_forwarding_pointer(object) + let result = Self::get_header_forwarding_pointer(object); + debug_assert!( + !result.is_null(), + "Object {object} does not have a forwarding pointer" + ); + result } pub fn test_and_mark(object: ObjectReference) -> bool { diff --git a/src/scheduler/scheduler.rs b/src/scheduler/scheduler.rs index 8c754d6696..1c91609895 100644 --- a/src/scheduler/scheduler.rs +++ b/src/scheduler/scheduler.rs @@ -172,10 +172,9 @@ impl GCWorkScheduler { }; self.work_buckets[WorkBucketStage::SoftRefClosure] .add(SoftRefProcessing::::new()); - self.work_buckets[WorkBucketStage::WeakRefClosure] - .add(WeakRefProcessing::::new()); + self.work_buckets[WorkBucketStage::WeakRefClosure].add(WeakRefProcessing::::new()); self.work_buckets[WorkBucketStage::PhantomRefClosure] - .add(PhantomRefProcessing::::new()); + .add(PhantomRefProcessing::::new()); use crate::util::reference_processor::RefForwarding; if plan.constraints().needs_forward_after_liveness { diff --git a/src/util/finalizable_processor.rs b/src/util/finalizable_processor.rs index 77ab7edbfc..698aad06fa 100644 --- a/src/util/finalizable_processor.rs +++ b/src/util/finalizable_processor.rs @@ -1,6 +1,7 @@ use crate::plan::is_nursery_gc; use crate::scheduler::gc_work::ProcessEdgesWork; use crate::scheduler::{GCWork, GCWorker, WorkBucketStage}; +use crate::util::reference_processor::RescanReferences; use crate::util::ObjectReference; use crate::util::VMWorkerThread; use crate::vm::Finalizable; @@ -140,6 +141,17 @@ pub struct Finalization(PhantomData); impl GCWork for Finalization { fn do_work(&mut self, worker: &mut GCWorker, mmtk: &'static MMTK) { + if !*mmtk.options.no_reference_types { + // Rescan soft and weak references at the end of the transitive closure from resurrected + // objects. New soft and weak references may be discovered during this. + let rescan = Box::new(RescanReferences { + soft: true, + weak: true, + phantom_data: PhantomData, + }); + worker.scheduler().work_buckets[WorkBucketStage::FinalRefClosure].set_sentinel(rescan); + } + let mut finalizable_processor = mmtk.finalizable_processor.lock().unwrap(); debug!( "Finalization, {} objects in candidates, {} objects ready to finalize", diff --git a/src/util/reference_processor.rs b/src/util/reference_processor.rs index a0ce86cbd1..f7a4a17241 100644 --- a/src/util/reference_processor.rs +++ b/src/util/reference_processor.rs @@ -81,32 +81,24 @@ impl ReferenceProcessors { // Methods for scanning weak references. It needs to be called in a decreasing order of reference strengths, i.e. soft > weak > phantom + pub fn retain_soft_refs(&self, trace: &mut E, mmtk: &'static MMTK) { + self.soft.retain::(trace, is_nursery_gc(mmtk.get_plan())); + } + /// Scan soft references. - pub fn scan_soft_refs(&self, trace: &mut E, mmtk: &'static MMTK) { - // For soft refs, it is up to the VM to decide when to reclaim this. - // If this is not an emergency collection, we have no heap stress. We simply retain soft refs. - if !mmtk.state.is_emergency_collection() { - // This step only retains the referents (keep the referents alive), it does not update its addresses. - // We will call soft.scan() again with retain=false to update its addresses based on liveness. - self.soft.retain::(trace, is_nursery_gc(mmtk.get_plan())); - } + pub fn scan_soft_refs(&self, mmtk: &'static MMTK) { // This will update the references (and the referents). - self.soft.scan::(trace, is_nursery_gc(mmtk.get_plan())); + self.soft.scan::(is_nursery_gc(mmtk.get_plan())); } /// Scan weak references. - pub fn scan_weak_refs(&self, trace: &mut E, mmtk: &'static MMTK) { - self.weak.scan::(trace, is_nursery_gc(mmtk.get_plan())); + pub fn scan_weak_refs(&self, mmtk: &'static MMTK) { + self.weak.scan::(is_nursery_gc(mmtk.get_plan())); } /// Scan phantom references. - pub fn scan_phantom_refs( - &self, - trace: &mut E, - mmtk: &'static MMTK, - ) { - self.phantom - .scan::(trace, is_nursery_gc(mmtk.get_plan())); + pub fn scan_phantom_refs(&self, mmtk: &'static MMTK) { + self.phantom.scan::(is_nursery_gc(mmtk.get_plan())); } } @@ -208,26 +200,40 @@ impl ReferenceProcessor { self.allow_new_candidate.store(true, Ordering::SeqCst); } - // These funcions simply call `trace_object()`, which does two things: 1. to make sure the object is kept alive - // and 2. to get the new object reference if the object is copied. The functions are intended to make the code - // easier to understand. + // These functions call `ObjectReference::get_forwarded_object`, not `trace_object()`. + // They are used by steps that do not expand the transitive closure. Processing weak and + // phantom references never expand the transitive closure. Soft references, when not retained, + // do not expand the transitive closure, either. + // These functions are intended to make the code easier to understand. - fn get_forwarded_referent( - e: &mut E, - referent: ObjectReference, - ) -> ObjectReference { + /// Return the new `ObjectReference` of a referent if it is already moved, or its current + /// `ObjectReference` otherwise. The referent must be live when calling this function. + fn get_forwarded_referent(referent: ObjectReference) -> ObjectReference { + debug_assert!(referent.is_live::()); debug_assert!(!referent.is_null()); - e.trace_object(referent) + referent.get_forwarded_object::().unwrap_or(referent) } - fn get_forwarded_reference( - e: &mut E, - object: ObjectReference, - ) -> ObjectReference { + /// Return the new `ObjectReference` of a reference object if it is already moved, or its + /// current `ObjectReference` otherwise. The reference object must be live when calling this + /// function. + fn get_forwarded_reference(object: ObjectReference) -> ObjectReference { + debug_assert!(object.is_live::()); debug_assert!(!object.is_null()); - e.trace_object(object) + object.get_forwarded_object::().unwrap_or(object) } + // These funcions call `trace_object()`, which will ensure the object and its descendents will + // be traced. They are only called in steps that expand the transitive closure. That include + // retaining soft references, and (for MarkSweep) tracing objects for forwarding. + // Note that finalizers also expand the transitive closure. + // These functions are intended to make the code easier to understand. + + /// This function is called when retaining soft reference. It + /// - keeps the referent alive, and + /// - adds the referent to the tracing queue if not yet reached, so that its children will be + /// kept alive, too, and + /// - gets the new object reference of the referent if it is moved. fn keep_referent_alive( e: &mut E, referent: ObjectReference, @@ -236,6 +242,18 @@ impl ReferenceProcessor { e.trace_object(referent) } + /// This function is called when forwarding the references and referents (for MarkCompact). It + /// - adds the reference or the referent to the tracing queue if not yet reached, so that + /// the children of the reference or referent will be kept alive, too, and + /// - gets the forwarded object reference of the object. + fn trace_forward_object( + e: &mut E, + referent: ObjectReference, + ) -> ObjectReference { + debug_assert!(!referent.is_null()); + e.trace_object(referent) + } + /// Inform the binding to enqueue the weak references whose referents were cleared in this GC. pub fn enqueue(&self, tls: VMWorkerThread) { let mut sync = self.sync.lock().unwrap(); @@ -298,7 +316,7 @@ impl ReferenceProcessor { } if !::VMReferenceGlue::is_referent_cleared(old_referent) { - let new_referent = ReferenceProcessor::get_forwarded_referent(trace, old_referent); + let new_referent = ReferenceProcessor::trace_forward_object(trace, old_referent); ::VMReferenceGlue::set_referent(reference, new_referent); trace!( @@ -308,7 +326,7 @@ impl ReferenceProcessor { ); } - let new_reference = ReferenceProcessor::get_forwarded_reference(trace, reference); + let new_reference = ReferenceProcessor::trace_forward_object(trace, reference); trace!(" reference: forwarded to {}", new_reference); debug_assert!( @@ -338,10 +356,11 @@ impl ReferenceProcessor { } /// Scan the reference table, and update each reference/referent. + /// It doesn't keep the reference or the referent alive. // TODO: nursery is currently ignored. We used to use Vec for the reference table, and use an int // to point to the reference that we last scanned. However, when we use HashSet for reference table, // we can no longer do that. - fn scan(&self, trace: &mut E, _nursery: bool) { + fn scan(&self, _nursery: bool) { let mut sync = self.sync.lock().unwrap(); debug!("Starting ReferenceProcessor.scan({:?})", self.semantics); @@ -352,7 +371,7 @@ impl ReferenceProcessor { sync.references ); - debug_assert!(sync.enqueued_references.is_empty()); + //debug_assert!(sync.enqueued_references.is_empty()); // Put enqueued reference in this vec let mut enqueued_references = vec![]; @@ -360,7 +379,7 @@ impl ReferenceProcessor { let new_set: HashSet = sync .references .iter() - .filter_map(|reff| self.process_reference(trace, *reff, &mut enqueued_references)) + .filter_map(|reff| self.process_reference::(*reff, &mut enqueued_references)) .collect(); debug!( @@ -371,7 +390,7 @@ impl ReferenceProcessor { enqueued_references.len() ); sync.references = new_set; - sync.enqueued_references = enqueued_references; + sync.enqueued_references.extend(enqueued_references); debug!("Ending ReferenceProcessor.scan({:?})", self.semantics); } @@ -421,9 +440,8 @@ impl ReferenceProcessor { /// /// If a None value is returned, the reference can be removed from the reference table. Otherwise, the updated reference should be kept /// in the reference table. - fn process_reference( + fn process_reference( &self, - trace: &mut E, reference: ObjectReference, enqueued_references: &mut Vec, ) -> Option { @@ -433,34 +451,34 @@ impl ReferenceProcessor { // If the reference is dead, we're done with it. Let it (and // possibly its referent) be garbage-collected. - if !reference.is_live::() { - ::VMReferenceGlue::clear_referent(reference); + if !reference.is_live::() { + VM::VMReferenceGlue::clear_referent(reference); trace!(" UNREACHABLE reference: {}", reference); trace!(" (unreachable)"); return None; } // The reference object is live - let new_reference = Self::get_forwarded_reference(trace, reference); - let old_referent = ::VMReferenceGlue::get_referent(reference); + let new_reference = Self::get_forwarded_reference::(reference); + let old_referent = VM::VMReferenceGlue::get_referent(reference); trace!(" ~> {}", old_referent); // If the application has cleared the referent the Java spec says // this does not cause the Reference object to be enqueued. We // simply allow the Reference object to fall out of our // waiting list. - if ::VMReferenceGlue::is_referent_cleared(old_referent) { + if VM::VMReferenceGlue::is_referent_cleared(old_referent) { trace!(" (cleared referent) "); return None; } trace!(" => {}", new_reference); - if old_referent.is_live::() { + if old_referent.is_live::() { // Referent is still reachable in a way that is as strong as // or stronger than the current reference level. - let new_referent = Self::get_forwarded_referent(trace, old_referent); - debug_assert!(new_referent.is_live::()); + let new_referent = Self::get_forwarded_referent::(old_referent); + debug_assert!(new_referent.is_live::()); trace!(" ~> {}", new_referent); // The reference object stays on the waiting list, and the @@ -470,13 +488,13 @@ impl ReferenceProcessor { // copying collector. // Update the referent - ::VMReferenceGlue::set_referent(new_reference, new_referent); + VM::VMReferenceGlue::set_referent(new_reference, new_referent); Some(new_reference) } else { // Referent is unreachable. Clear the referent and enqueue the reference object. trace!(" UNREACHABLE referent: {}", old_referent); - ::VMReferenceGlue::clear_referent(new_reference); + VM::VMReferenceGlue::clear_referent(new_reference); enqueued_references.push(new_reference); None } @@ -488,14 +506,48 @@ use crate::scheduler::GCWorker; use crate::MMTK; use std::marker::PhantomData; +#[derive(Default)] +pub(crate) struct RescanReferences { + pub soft: bool, + pub weak: bool, + pub phantom_data: PhantomData, +} + +impl GCWork for RescanReferences { + fn do_work(&mut self, _worker: &mut GCWorker, mmtk: &'static MMTK) { + if self.soft { + mmtk.reference_processors.scan_soft_refs(mmtk); + } + if self.weak { + mmtk.reference_processors.scan_weak_refs(mmtk); + } + } +} + #[derive(Default)] pub(crate) struct SoftRefProcessing(PhantomData); impl GCWork for SoftRefProcessing { fn do_work(&mut self, worker: &mut GCWorker, mmtk: &'static MMTK) { - let mut w = E::new(vec![], false, mmtk, WorkBucketStage::SoftRefClosure); - w.set_worker(worker); - mmtk.reference_processors.scan_soft_refs(&mut w, mmtk); - w.flush(); + if !mmtk.state.is_emergency_collection() { + // Postpone the scanning to the end of the transitive closure from strongly reachable + // soft references. + let rescan = Box::new(RescanReferences { + soft: true, + weak: false, + phantom_data: PhantomData, + }); + worker.scheduler().work_buckets[WorkBucketStage::SoftRefClosure].set_sentinel(rescan); + + // Retain soft references. This will expand the transitive closure. We create an + // instance of `E` for this. + let mut w = E::new(vec![], false, mmtk, WorkBucketStage::SoftRefClosure); + w.set_worker(worker); + mmtk.reference_processors.retain_soft_refs(&mut w, mmtk); + w.flush(); + } else { + // Scan soft references immediately without retaining. + mmtk.reference_processors.scan_soft_refs(mmtk); + } } } impl SoftRefProcessing { @@ -505,32 +557,26 @@ impl SoftRefProcessing { } #[derive(Default)] -pub(crate) struct WeakRefProcessing(PhantomData); -impl GCWork for WeakRefProcessing { - fn do_work(&mut self, worker: &mut GCWorker, mmtk: &'static MMTK) { - let mut w = E::new(vec![], false, mmtk, WorkBucketStage::WeakRefClosure); - w.set_worker(worker); - mmtk.reference_processors.scan_weak_refs(&mut w, mmtk); - w.flush(); +pub(crate) struct WeakRefProcessing(PhantomData); +impl GCWork for WeakRefProcessing { + fn do_work(&mut self, _worker: &mut GCWorker, mmtk: &'static MMTK) { + mmtk.reference_processors.scan_weak_refs(mmtk); } } -impl WeakRefProcessing { +impl WeakRefProcessing { pub fn new() -> Self { Self(PhantomData) } } #[derive(Default)] -pub(crate) struct PhantomRefProcessing(PhantomData); -impl GCWork for PhantomRefProcessing { - fn do_work(&mut self, worker: &mut GCWorker, mmtk: &'static MMTK) { - let mut w = E::new(vec![], false, mmtk, WorkBucketStage::PhantomRefClosure); - w.set_worker(worker); - mmtk.reference_processors.scan_phantom_refs(&mut w, mmtk); - w.flush(); +pub(crate) struct PhantomRefProcessing(PhantomData); +impl GCWork for PhantomRefProcessing { + fn do_work(&mut self, _worker: &mut GCWorker, mmtk: &'static MMTK) { + mmtk.reference_processors.scan_phantom_refs(mmtk); } } -impl PhantomRefProcessing { +impl PhantomRefProcessing { pub fn new() -> Self { Self(PhantomData) }