From 230fcdf5caf0621bb6609823926332191eb09f0c Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Wed, 16 Mar 2022 09:04:13 +1100 Subject: [PATCH 01/25] Get ReferenceProcessor work --- src/plan/markcompact/global.rs | 6 +- src/scheduler/scheduler.rs | 100 ++++++++++----- src/scheduler/work_bucket.rs | 8 +- src/util/address.rs | 2 + src/util/reference_processor.rs | 208 +++++++++++++++++++++++++++----- src/vm/reference_glue.rs | 14 +-- 6 files changed, 262 insertions(+), 76 deletions(-) diff --git a/src/plan/markcompact/global.rs b/src/plan/markcompact/global.rs index bbc45a4223..d72600ea6e 100644 --- a/src/plan/markcompact/global.rs +++ b/src/plan/markcompact/global.rs @@ -98,7 +98,7 @@ impl Plan for MarkCompact { .add(Prepare::>::new(self)); // VM-specific weak ref processing - scheduler.work_buckets[WorkBucketStage::RefClosure] + scheduler.work_buckets[WorkBucketStage::WeakRefClosure] .add(ProcessWeakRefs::>::new()); scheduler.work_buckets[WorkBucketStage::CalculateForwarding] @@ -117,11 +117,11 @@ impl Plan for MarkCompact { // finalization // treat finalizable objects as roots and perform a closure (marking) // must be done before calculating forwarding pointers - scheduler.work_buckets[WorkBucketStage::RefClosure] + scheduler.work_buckets[WorkBucketStage::FinalRefClosure] .add(Finalization::>::new()); // update finalizable object references // must be done before compacting - scheduler.work_buckets[WorkBucketStage::RefForwarding] + scheduler.work_buckets[WorkBucketStage::FinalizableForwarding] .add(ForwardFinalization::>::new()); } diff --git a/src/scheduler/scheduler.rs b/src/scheduler/scheduler.rs index 1ddefbe6cd..a80abb38ae 100644 --- a/src/scheduler/scheduler.rs +++ b/src/scheduler/scheduler.rs @@ -57,9 +57,13 @@ impl GCWorkScheduler { WorkBucketStage::Unconstrained => WorkBucket::new(true, worker_monitor.clone()), WorkBucketStage::Prepare => WorkBucket::new(false, worker_monitor.clone()), WorkBucketStage::Closure => WorkBucket::new(false, worker_monitor.clone()), - WorkBucketStage::RefClosure => WorkBucket::new(false, worker_monitor.clone()), + WorkBucketStage::SoftRefClosure => WorkBucket::new(false, worker_monitor.clone()), + WorkBucketStage::WeakRefClosure => WorkBucket::new(false, worker_monitor.clone()), + WorkBucketStage::FinalRefClosure => WorkBucket::new(false, worker_monitor.clone()), + WorkBucketStage::PhantomRefClosure => WorkBucket::new(false, worker_monitor.clone()), WorkBucketStage::CalculateForwarding => WorkBucket::new(false, worker_monitor.clone()), WorkBucketStage::RefForwarding => WorkBucket::new(false, worker_monitor.clone()), + WorkBucketStage::FinalizableForwarding => WorkBucket::new(false, worker_monitor.clone()), WorkBucketStage::Compact => WorkBucket::new(false, worker_monitor.clone()), WorkBucketStage::Release => WorkBucket::new(false, worker_monitor.clone()), WorkBucketStage::Final => WorkBucket::new(false, worker_monitor.clone()), @@ -77,7 +81,7 @@ impl GCWorkScheduler { let should_open = scheduler.are_buckets_drained(&cur_stages) && scheduler.all_workers_parked(); // Additional check before the `RefClosure` bucket opens. - if should_open && s == WorkBucketStage::RefClosure { + if should_open && s == crate::scheduler::work_bucket::LAST_CLOSURE_BUCKET { if let Some(closure_end) = scheduler.closure_end.lock().unwrap().as_ref() { if closure_end() { // Don't open `RefClosure` if `closure_end` added more works to `Closure`. @@ -91,9 +95,13 @@ impl GCWorkScheduler { }; open_next(Closure); - open_next(RefClosure); + open_next(SoftRefClosure); + open_next(WeakRefClosure); + open_next(FinalRefClosure); + open_next(PhantomRefClosure); open_next(CalculateForwarding); open_next(RefForwarding); + open_next(FinalizableForwarding); open_next(Compact); open_next(Release); open_next(Final); @@ -182,7 +190,7 @@ impl GCWorkScheduler { self.work_buckets[WorkBucketStage::Prepare].add(Prepare::::new(plan)); // VM-specific weak ref processing - self.work_buckets[WorkBucketStage::RefClosure] + self.work_buckets[WorkBucketStage::WeakRefClosure] .add(ProcessWeakRefs::::new()); // Release global/collectors/mutators @@ -203,15 +211,26 @@ impl GCWorkScheduler { .add(ScheduleSanityGC::::new(plan)); } + // Reference processing + if !*plan.base().options.no_reference_types { + use crate::util::reference_processor::{SoftRefProcessing, WeakRefProcessing, PhantomRefProcessing}; + self.work_buckets[WorkBucketStage::SoftRefClosure].add(SoftRefProcessing::::new()); + self.work_buckets[WorkBucketStage::WeakRefClosure].add(WeakRefProcessing::::new()); + self.work_buckets[WorkBucketStage::PhantomRefClosure].add(PhantomRefProcessing::::new()); + + use crate::util::reference_processor::RefForwarding; + self.work_buckets[WorkBucketStage::RefForwarding].add(RefForwarding::::new()); + } + // Finalization if !*plan.base().options.no_finalizer { use crate::util::finalizable_processor::{Finalization, ForwardFinalization}; // finalization - self.work_buckets[WorkBucketStage::RefClosure] + self.work_buckets[WorkBucketStage::FinalRefClosure] .add(Finalization::::new()); // forward refs if plan.constraints().needs_forward_after_liveness { - self.work_buckets[WorkBucketStage::RefForwarding] + self.work_buckets[WorkBucketStage::FinalizableForwarding] .add(ForwardFinalization::::new()); } } @@ -246,36 +265,57 @@ impl GCWorkScheduler { } pub fn deactivate_all(&self) { - self.work_buckets[WorkBucketStage::Prepare].deactivate(); - self.work_buckets[WorkBucketStage::Closure].deactivate(); - self.work_buckets[WorkBucketStage::RefClosure].deactivate(); - self.work_buckets[WorkBucketStage::CalculateForwarding].deactivate(); - self.work_buckets[WorkBucketStage::RefForwarding].deactivate(); - self.work_buckets[WorkBucketStage::Compact].deactivate(); - self.work_buckets[WorkBucketStage::Release].deactivate(); - self.work_buckets[WorkBucketStage::Final].deactivate(); + for (stage, bucket) in self.work_buckets.iter() { + if stage == WorkBucketStage::Unconstrained { + continue; + } + + bucket.deactivate(); + } + // self.work_buckets[WorkBucketStage::Prepare].deactivate(); + // self.work_buckets[WorkBucketStage::Closure].deactivate(); + // self.work_buckets[WorkBucketStage::RefClosure].deactivate(); + // self.work_buckets[WorkBucketStage::CalculateForwarding].deactivate(); + // self.work_buckets[WorkBucketStage::RefForwarding].deactivate(); + // self.work_buckets[WorkBucketStage::Compact].deactivate(); + // self.work_buckets[WorkBucketStage::Release].deactivate(); + // self.work_buckets[WorkBucketStage::Final].deactivate(); } pub fn reset_state(&self) { - // self.work_buckets[WorkBucketStage::Prepare].deactivate(); - self.work_buckets[WorkBucketStage::Closure].deactivate(); - self.work_buckets[WorkBucketStage::RefClosure].deactivate(); - self.work_buckets[WorkBucketStage::CalculateForwarding].deactivate(); - self.work_buckets[WorkBucketStage::RefForwarding].deactivate(); - self.work_buckets[WorkBucketStage::Compact].deactivate(); - self.work_buckets[WorkBucketStage::Release].deactivate(); - self.work_buckets[WorkBucketStage::Final].deactivate(); + for (stage, bucket) in self.work_buckets.iter() { + if stage == WorkBucketStage::Unconstrained || stage == WorkBucketStage::Prepare { + continue; + } + + bucket.deactivate(); + } + // // self.work_buckets[WorkBucketStage::Prepare].deactivate(); + // self.work_buckets[WorkBucketStage::Closure].deactivate(); + // self.work_buckets[WorkBucketStage::RefClosure].deactivate(); + // self.work_buckets[WorkBucketStage::CalculateForwarding].deactivate(); + // self.work_buckets[WorkBucketStage::RefForwarding].deactivate(); + // self.work_buckets[WorkBucketStage::Compact].deactivate(); + // self.work_buckets[WorkBucketStage::Release].deactivate(); + // self.work_buckets[WorkBucketStage::Final].deactivate(); } pub fn debug_assert_all_buckets_deactivated(&self) { - debug_assert!(!self.work_buckets[WorkBucketStage::Prepare].is_activated()); - debug_assert!(!self.work_buckets[WorkBucketStage::Closure].is_activated()); - debug_assert!(!self.work_buckets[WorkBucketStage::RefClosure].is_activated()); - debug_assert!(!self.work_buckets[WorkBucketStage::CalculateForwarding].is_activated()); - debug_assert!(!self.work_buckets[WorkBucketStage::RefForwarding].is_activated()); - debug_assert!(!self.work_buckets[WorkBucketStage::Compact].is_activated()); - debug_assert!(!self.work_buckets[WorkBucketStage::Release].is_activated()); - debug_assert!(!self.work_buckets[WorkBucketStage::Final].is_activated()); + for (stage, bucket) in self.work_buckets.iter() { + if stage == WorkBucketStage::Unconstrained { + return; + } + + debug_assert!(!bucket.is_activated()); + } + // debug_assert!(!self.work_buckets[WorkBucketStage::Prepare].is_activated()); + // debug_assert!(!self.work_buckets[WorkBucketStage::Closure].is_activated()); + // debug_assert!(!self.work_buckets[WorkBucketStage::RefClosure].is_activated()); + // debug_assert!(!self.work_buckets[WorkBucketStage::CalculateForwarding].is_activated()); + // debug_assert!(!self.work_buckets[WorkBucketStage::RefForwarding].is_activated()); + // debug_assert!(!self.work_buckets[WorkBucketStage::Compact].is_activated()); + // debug_assert!(!self.work_buckets[WorkBucketStage::Release].is_activated()); + // debug_assert!(!self.work_buckets[WorkBucketStage::Final].is_activated()); } pub fn add_coordinator_work(&self, work: impl CoordinatorWork, worker: &GCWorker) { diff --git a/src/scheduler/work_bucket.rs b/src/scheduler/work_bucket.rs index cc6badd08b..e27399faed 100644 --- a/src/scheduler/work_bucket.rs +++ b/src/scheduler/work_bucket.rs @@ -156,10 +156,16 @@ pub enum WorkBucketStage { Closure, // TODO: We only support final reference at the moment. If we have references of multiple strengths, // we may need more than one buckets for each reference strength. - RefClosure, + SoftRefClosure, + WeakRefClosure, + FinalRefClosure, + PhantomRefClosure, CalculateForwarding, RefForwarding, + FinalizableForwarding, Compact, Release, Final, } + +pub const LAST_CLOSURE_BUCKET: WorkBucketStage = WorkBucketStage::PhantomRefClosure; diff --git a/src/util/address.rs b/src/util/address.rs index 4e44d757eb..02689fd566 100644 --- a/src/util/address.rs +++ b/src/util/address.rs @@ -453,6 +453,8 @@ mod tests { pub struct ObjectReference(usize); impl ObjectReference { + pub const NULL: ObjectReference = ObjectReference(0); + /// converts the ObjectReference to an Address #[inline(always)] pub fn to_address(self) -> Address { diff --git a/src/util/reference_processor.rs b/src/util/reference_processor.rs index 17d279e15e..9cbe7f26e0 100644 --- a/src/util/reference_processor.rs +++ b/src/util/reference_processor.rs @@ -2,11 +2,12 @@ use std::cell::UnsafeCell; use std::sync::Mutex; use std::vec::Vec; -use crate::plan::TraceLocal; +// use crate::plan::TraceLocal; use crate::util::opaque_pointer::*; use crate::util::{Address, ObjectReference}; use crate::vm::ReferenceGlue; use crate::vm::VMBinding; +use crate::scheduler::ProcessEdgesWork; pub struct ReferenceProcessors { soft: ReferenceProcessor, @@ -42,6 +43,7 @@ impl ReferenceProcessors { reff: ObjectReference, referent: ObjectReference, ) { + trace!("Add soft candidate: {} -> {}", reff, referent); self.soft.add_candidate::(reff, referent); } @@ -50,6 +52,7 @@ impl ReferenceProcessors { reff: ObjectReference, referent: ObjectReference, ) { + trace!("Add weak candidate: {} -> {}", reff, referent); self.weak.add_candidate::(reff, referent); } @@ -58,30 +61,31 @@ impl ReferenceProcessors { reff: ObjectReference, referent: ObjectReference, ) { + trace!("Add phantom candidate: {} -> {}", reff, referent); self.phantom.add_candidate::(reff, referent); } - pub fn forward_refs(&self, trace: &mut T) { - self.soft.forward::(trace, false); - self.weak.forward::(trace, false); - self.phantom.forward::(trace, false); + pub fn forward_refs(&self, trace: &mut E) { + self.soft.forward::(trace, false); + self.weak.forward::(trace, false); + self.phantom.forward::(trace, false); } - pub fn scan_weak_refs(&self, trace: &mut T, tls: VMWorkerThread) { - self.soft.scan::(trace, false, false, tls); - self.weak.scan::(trace, false, false, tls); + pub fn scan_weak_refs(&self, trace: &mut E, tls: VMWorkerThread) { + self.soft.scan::(trace, false, false, tls); + self.weak.scan::(trace, false, false, tls); } - pub fn scan_soft_refs(&self, trace: &mut T, tls: VMWorkerThread) { - self.soft.scan::(trace, false, false, tls); + pub fn scan_soft_refs(&self, trace: &mut E, tls: VMWorkerThread) { + self.soft.scan::(trace, false, false, tls); } - pub fn scan_phantom_refs( + pub fn scan_phantom_refs( &self, - trace: &mut T, + trace: &mut E, tls: VMWorkerThread, ) { - self.phantom.scan::(trace, false, false, tls); + self.phantom.scan::(trace, false, false, tls); } } @@ -92,10 +96,10 @@ impl Default for ReferenceProcessors { } // Debug flags -pub const TRACE: bool = false; -pub const TRACE_UNREACHABLE: bool = false; -pub const TRACE_DETAIL: bool = false; -pub const TRACE_FORWARD: bool = false; +pub const TRACE: bool = true; +pub const TRACE_UNREACHABLE: bool = true; +pub const TRACE_DETAIL: bool = true; +pub const TRACE_FORWARD: bool = true; // XXX: We differ from the original implementation // by ignoring "stress," i.e. where the array @@ -188,7 +192,19 @@ impl ReferenceProcessor { sync.references.push(reff.to_address()); } - pub fn forward(&self, trace: &mut T, _nursery: bool) { + fn get_forwarded_referent(e: &mut E, object: ObjectReference) -> ObjectReference { + e.trace_object(object) + } + + fn get_forwarded_reference(e: &mut E, object: ObjectReference) -> ObjectReference { + e.trace_object(object) + } + + fn retain_referent(e: &mut E, object: ObjectReference) -> ObjectReference { + e.trace_object(object) + } + + pub fn forward(&self, trace: &mut E, _nursery: bool) { let mut sync = unsafe { self.sync_mut() }; let references: &mut Vec
= &mut sync.references; // XXX: Copies `unforwarded_references` out. Should be fine since it's not accessed @@ -215,11 +231,11 @@ impl ReferenceProcessor { if TRACE_DETAIL { trace!("slot {:?}: forwarding {:?}", i, reference); } - VM::VMReferenceGlue::set_referent( + ::VMReferenceGlue::set_referent( reference, - trace.get_forwarded_referent(VM::VMReferenceGlue::get_referent(reference)), + Self::get_forwarded_referent(trace, ::VMReferenceGlue::get_referent(reference)), ); - let new_reference = trace.get_forwarded_reference(reference); + let new_reference = Self::get_forwarded_reference(trace, reference); *unforwarded_ref = new_reference.to_address(); } @@ -229,9 +245,9 @@ impl ReferenceProcessor { sync.unforwarded_references = None; } - fn scan( + fn scan( &self, - trace: &mut T, + trace: &mut E, nursery: bool, retain: bool, tls: VMWorkerThread, @@ -252,14 +268,14 @@ impl ReferenceProcessor { if retain { for addr in references.iter().skip(from_index) { let reference = unsafe { addr.to_object_reference() }; - self.retain_referent::(trace, reference); + self.scan_retain_referent::(trace, reference); } } else { for i in from_index..references.len() { let reference = unsafe { references[i].to_object_reference() }; /* Determine liveness (and forward if necessary) the reference */ - let new_reference = VM::VMReferenceGlue::process_reference(trace, reference, tls); + let new_reference = self.process_reference(trace, reference, tls); if !new_reference.is_null() { references[to_index] = new_reference.to_address(); to_index += 1; @@ -287,7 +303,7 @@ impl ReferenceProcessor { /* flush out any remset entries generated during the above activities */ // FIXME: We are calling mutator() for a worker thread - panic!("We are calling mutator() for a worker tls. We need to fix this."); + // panic!("We are calling mutator() for a worker tls. We need to fix this."); // unsafe { VM::VMActivePlan::mutator(tls)) }.flush_remembered_sets(); // if TRACE { // trace!("Ending ReferenceProcessor.scan({:?})", self.semantics); @@ -301,9 +317,9 @@ impl ReferenceProcessor { * be the address of a heap object, depending on the VM. * @param trace the thread local trace element. */ - fn retain_referent( + fn scan_retain_referent( &self, - trace: &mut T, + trace: &mut E, reference: ObjectReference, ) { debug_assert!(!reference.is_null()); @@ -324,12 +340,144 @@ impl ReferenceProcessor { /* * Reference is definitely reachable. Retain the referent. */ - let referent = VM::VMReferenceGlue::get_referent(reference); + let referent = ::VMReferenceGlue::get_referent(reference); if !referent.is_null() { - trace.retain_referent(referent); + Self::retain_referent(trace, referent); } if TRACE_DETAIL { trace!(" ~> {:?} (retained)", referent.to_address()); } } + + /// Process a reference with the current semantics and return an updated reference (e.g. with a new address) + /// if the reference is still alive, otherwise return a null object reference. + /// + /// Arguments: + /// * `trace`: A reference to a `TraceLocal` object for this reference. + /// * `reference`: The address of the reference. This may or may not be the address of a heap object, depending on the VM. + /// * `tls`: The GC thread that is processing this reference. + fn process_reference( + &self, + trace: &mut E, + reference: ObjectReference, + tls: VMWorkerThread, + ) -> ObjectReference { + debug_assert!(!reference.is_null()); + + if TRACE_DETAIL { trace!("Process reference: {}", reference); } + + // If the reference is dead, we're done with it. Let it (and + // possibly its referent) be garbage-collected. + if !reference.is_live() { + ::VMReferenceGlue::clear_referent(reference); + if TRACE_UNREACHABLE { trace!(" UNREACHABLE reference: {}", reference); } + if TRACE_DETAIL { trace!(" (unreachable)"); } + return ObjectReference::NULL; + } + + // The reference object is live + let new_reference = Self::get_forwarded_reference(trace, reference); + let old_referent = ::VMReferenceGlue::get_referent(reference); + + if TRACE_DETAIL { trace!(" ~> {}", old_referent); } + + // If the application has cleared the referent the Java spec says + // this does not cause the Reference object to be enqueued. We + // simply allow the Reference object to fall out of our + // waiting list. + if old_referent.is_null() { + if TRACE_DETAIL { trace!(" (null referent) "); } + return ObjectReference::NULL; + } + + if TRACE_DETAIL { trace!(" => {}", new_reference); } + + if old_referent.is_live() { + // Referent is still reachable in a way that is as strong as + // or stronger than the current reference level. + let new_referent = Self::get_forwarded_referent(trace, old_referent); + + if TRACE_DETAIL { trace!(" ~> {}", new_referent); } + debug_assert!(new_referent.is_live()); + + // The reference object stays on the waiting list, and the + // referent is untouched. The only thing we must do is + // ensure that the former addresses are updated with the + // new forwarding addresses in case the collector is a + // copying collector. + + // Update the referent + ::VMReferenceGlue::set_referent(new_reference, new_referent); + return new_reference; + } else { + // Referent is unreachable. Clear the referent and enqueue the reference object. + if TRACE_DETAIL { trace!(" UNREACHABLE"); } + else if TRACE_UNREACHABLE { trace!(" UNREACHABLE referent: {}", old_referent); } + + ::VMReferenceGlue::clear_referent(new_reference); + ::VMReferenceGlue::enqueue_reference(new_reference); + return ObjectReference::NULL; + } + } +} + +use crate::scheduler::GCWork; +use crate::scheduler::GCWorker; +use crate::MMTK; +use std::marker::PhantomData; + +pub struct SoftRefProcessing(PhantomData); +impl GCWork for SoftRefProcessing { + fn do_work(&mut self, worker: &mut GCWorker, mmtk: &'static MMTK) { + let mut w = E::new(vec![], false, mmtk); + w.set_worker(worker); + mmtk.reference_processors.scan_soft_refs(&mut w, worker.tls); + } +} +impl SoftRefProcessing { + pub fn new() -> Self { + Self(PhantomData) + } +} + +pub struct WeakRefProcessing(PhantomData); +impl GCWork for WeakRefProcessing { + fn do_work(&mut self, worker: &mut GCWorker, mmtk: &'static MMTK) { + let mut w = E::new(vec![], false, mmtk); + w.set_worker(worker); + mmtk.reference_processors.scan_weak_refs(&mut w, worker.tls); + } +} +impl WeakRefProcessing { + pub fn new() -> Self { + Self(PhantomData) + } +} + +pub struct PhantomRefProcessing(PhantomData); +impl GCWork for PhantomRefProcessing { + fn do_work(&mut self, worker: &mut GCWorker, mmtk: &'static MMTK) { + let mut w = E::new(vec![], false, mmtk); + w.set_worker(worker); + mmtk.reference_processors.scan_phantom_refs(&mut w, worker.tls); + } +} +impl PhantomRefProcessing { + pub fn new() -> Self { + Self(PhantomData) + } +} + +pub struct RefForwarding(PhantomData); +impl GCWork for RefForwarding { + fn do_work(&mut self, worker: &mut GCWorker, mmtk: &'static MMTK) { + let mut w = E::new(vec![], false, mmtk); + w.set_worker(worker); + mmtk.reference_processors.forward_refs(&mut w); + } +} +impl RefForwarding { + pub fn new() -> Self { + Self(PhantomData) + } } diff --git a/src/vm/reference_glue.rs b/src/vm/reference_glue.rs index e3520f77eb..f7caa5d9d0 100644 --- a/src/vm/reference_glue.rs +++ b/src/vm/reference_glue.rs @@ -3,6 +3,7 @@ use crate::util::opaque_pointer::*; use crate::util::Address; use crate::util::ObjectReference; use crate::vm::VMBinding; +use crate::scheduler::ProcessEdgesWork; /// VM-specific methods for reference processing. pub trait ReferenceGlue { @@ -30,16 +31,5 @@ pub trait ReferenceGlue { /// * `referent`: The referent object reference. fn set_referent(reff: ObjectReference, referent: ObjectReference); - /// Process a reference with the current semantics and return an updated reference (e.g. with a new address) - /// if the reference is still alive, otherwise return a null object reference. - /// - /// Arguments: - /// * `trace`: A reference to a `TraceLocal` object for this reference. - /// * `reference`: The address of the reference. This may or may not be the address of a heap object, depending on the VM. - /// * `tls`: The GC thread that is processing this reference. - fn process_reference( - trace: &mut T, - reference: ObjectReference, - tls: VMWorkerThread, - ) -> ObjectReference; + fn enqueue_reference(object: ObjectReference); } From 63e1fdace31caf8494c612f56e0aced7af7eb644 Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Wed, 16 Mar 2022 09:20:34 +1100 Subject: [PATCH 02/25] No need to set referent when a reference is added --- src/util/reference_processor.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/util/reference_processor.rs b/src/util/reference_processor.rs index 9cbe7f26e0..f43e08d49c 100644 --- a/src/util/reference_processor.rs +++ b/src/util/reference_processor.rs @@ -188,7 +188,7 @@ impl ReferenceProcessor { pub fn add_candidate(&self, reff: ObjectReference, referent: ObjectReference) { let mut sync = self.sync().lock().unwrap(); - VM::VMReferenceGlue::set_referent(reff, referent); + // VM::VMReferenceGlue::set_referent(reff, referent); sync.references.push(reff.to_address()); } From 3589cfe9153df5d9136e5bc5af4ac1362634347b Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Wed, 16 Mar 2022 10:55:49 +1100 Subject: [PATCH 03/25] Fix Immix post copy --- src/plan/immix/global.rs | 2 ++ src/policy/immix/immixspace.rs | 28 +++++++++++++++++++++++----- 2 files changed, 25 insertions(+), 5 deletions(-) diff --git a/src/plan/immix/global.rs b/src/plan/immix/global.rs index 71643c0065..d70b93a46b 100644 --- a/src/plan/immix/global.rs +++ b/src/plan/immix/global.rs @@ -88,8 +88,10 @@ impl Plan for Immix { // The blocks are not identical, clippy is wrong. Probably it does not recognize the constant type parameter. #[allow(clippy::if_same_then_else)] if in_defrag { + info!("Defrag GC"); scheduler.schedule_common_work::>(self); } else { + info!("Fast GC"); scheduler.schedule_common_work::>(self); } } diff --git a/src/policy/immix/immixspace.rs b/src/policy/immix/immixspace.rs index 0566050aae..f4edd541e8 100644 --- a/src/policy/immix/immixspace.rs +++ b/src/policy/immix/immixspace.rs @@ -14,7 +14,7 @@ use crate::util::heap::HeapMeta; use crate::util::heap::PageResource; use crate::util::heap::VMRequest; use crate::util::metadata::side_metadata::{self, *}; -use crate::util::metadata::{self, compare_exchange_metadata, load_metadata, MetadataSpec}; +use crate::util::metadata::{self, compare_exchange_metadata, load_metadata, MetadataSpec, store_metadata}; use crate::util::object_forwarding as ForwardingWord; use crate::util::{Address, ObjectReference}; use crate::vm::*; @@ -407,14 +407,12 @@ impl ImmixSpace { crate::util::alloc_bit::unset_alloc_bit(object); ForwardingWord::forward_object::(object, semantics, copy_context) }; - if !super::MARK_LINE_AT_SCAN_TIME { - self.mark_lines(new_object); - } debug_assert_eq!( Block::containing::(new_object).get_state(), BlockState::Marked ); trace.process_node(new_object); + debug_assert!(new_object.is_live()); new_object } } @@ -643,12 +641,26 @@ impl PolicyCopyContext for ImmixCopyContext { align: usize, offset: isize, ) -> Address { - if self.defrag_allocator.immix_space().in_defrag() { + if self.get_space().in_defrag() { self.defrag_allocator.alloc(bytes, align, offset) } else { self.copy_allocator.alloc(bytes, align, offset) } } + #[inline(always)] + fn post_copy(&mut self, obj: ObjectReference, _bytes: usize) { + // mark the object + store_metadata::( + &VM::VMObjectModel::LOCAL_MARK_BIT_SPEC, + obj, + self.get_space().mark_state as usize, + None, + Some(Ordering::SeqCst), + ); + if !super::MARK_LINE_AT_SCAN_TIME { + self.get_space().mark_lines(obj); + } + } } impl ImmixCopyContext { @@ -662,4 +674,10 @@ impl ImmixCopyContext { defrag_allocator: ImmixAllocator::new(tls.0, Some(space), plan, true), } } + + fn get_space(&self) -> &ImmixSpace { + // Same space + debug_assert_eq!(self.defrag_allocator.immix_space().common().descriptor, self.copy_allocator.immix_space().common().descriptor); + self.defrag_allocator.immix_space() + } } From 9373a71bc9b7a3f5c390ddb9f364300f2499e0ea Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Wed, 16 Mar 2022 11:00:22 +1100 Subject: [PATCH 04/25] Add post_copy for ImmixCopyContext: mark object and line --- src/policy/immix/immixspace.rs | 31 ++++++++++++++++++++++++++----- 1 file changed, 26 insertions(+), 5 deletions(-) diff --git a/src/policy/immix/immixspace.rs b/src/policy/immix/immixspace.rs index 0566050aae..92000fc444 100644 --- a/src/policy/immix/immixspace.rs +++ b/src/policy/immix/immixspace.rs @@ -14,7 +14,7 @@ use crate::util::heap::HeapMeta; use crate::util::heap::PageResource; use crate::util::heap::VMRequest; use crate::util::metadata::side_metadata::{self, *}; -use crate::util::metadata::{self, compare_exchange_metadata, load_metadata, MetadataSpec}; +use crate::util::metadata::{self, compare_exchange_metadata, load_metadata, MetadataSpec, store_metadata}; use crate::util::object_forwarding as ForwardingWord; use crate::util::{Address, ObjectReference}; use crate::vm::*; @@ -407,14 +407,12 @@ impl ImmixSpace { crate::util::alloc_bit::unset_alloc_bit(object); ForwardingWord::forward_object::(object, semantics, copy_context) }; - if !super::MARK_LINE_AT_SCAN_TIME { - self.mark_lines(new_object); - } debug_assert_eq!( Block::containing::(new_object).get_state(), BlockState::Marked ); trace.process_node(new_object); + debug_assert!(new_object.is_live()); new_object } } @@ -643,12 +641,27 @@ impl PolicyCopyContext for ImmixCopyContext { align: usize, offset: isize, ) -> Address { - if self.defrag_allocator.immix_space().in_defrag() { + if self.get_space().in_defrag() { self.defrag_allocator.alloc(bytes, align, offset) } else { self.copy_allocator.alloc(bytes, align, offset) } } + #[inline(always)] + fn post_copy(&mut self, obj: ObjectReference, _bytes: usize) { + // Mark the object + store_metadata::( + &VM::VMObjectModel::LOCAL_MARK_BIT_SPEC, + obj, + self.get_space().mark_state as usize, + None, + Some(Ordering::SeqCst), + ); + // Mark the line + if !super::MARK_LINE_AT_SCAN_TIME { + self.get_space().mark_lines(obj); + } + } } impl ImmixCopyContext { @@ -662,4 +675,12 @@ impl ImmixCopyContext { defrag_allocator: ImmixAllocator::new(tls.0, Some(space), plan, true), } } + + #[inline(always)] + fn get_space(&self) -> &ImmixSpace { + // Both copy allocators should point to the same space. + debug_assert_eq!(self.defrag_allocator.immix_space().common().descriptor, self.copy_allocator.immix_space().common().descriptor); + // Just get the space from either allocator + self.defrag_allocator.immix_space() + } } From ea23295608e85aa2e8df87e0a7ec8f26e0ebf805 Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Wed, 16 Mar 2022 11:08:10 +1100 Subject: [PATCH 05/25] Cargo fmt --- src/policy/immix/immixspace.rs | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/src/policy/immix/immixspace.rs b/src/policy/immix/immixspace.rs index 92000fc444..655159528f 100644 --- a/src/policy/immix/immixspace.rs +++ b/src/policy/immix/immixspace.rs @@ -14,7 +14,9 @@ use crate::util::heap::HeapMeta; use crate::util::heap::PageResource; use crate::util::heap::VMRequest; use crate::util::metadata::side_metadata::{self, *}; -use crate::util::metadata::{self, compare_exchange_metadata, load_metadata, MetadataSpec, store_metadata}; +use crate::util::metadata::{ + self, compare_exchange_metadata, load_metadata, store_metadata, MetadataSpec, +}; use crate::util::object_forwarding as ForwardingWord; use crate::util::{Address, ObjectReference}; use crate::vm::*; @@ -679,7 +681,10 @@ impl ImmixCopyContext { #[inline(always)] fn get_space(&self) -> &ImmixSpace { // Both copy allocators should point to the same space. - debug_assert_eq!(self.defrag_allocator.immix_space().common().descriptor, self.copy_allocator.immix_space().common().descriptor); + debug_assert_eq!( + self.defrag_allocator.immix_space().common().descriptor, + self.copy_allocator.immix_space().common().descriptor + ); // Just get the space from either allocator self.defrag_allocator.immix_space() } From e0484d0109d008db6a4f36955e177b2ed5a73372 Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Mon, 21 Mar 2022 09:50:04 +1100 Subject: [PATCH 06/25] Add reference processing work for MarkCompact. Add SecondRoots bucket. --- src/plan/markcompact/gc_work.rs | 4 ++-- src/plan/markcompact/global.rs | 13 ++++++++++++- src/scheduler/scheduler.rs | 2 ++ src/scheduler/work_bucket.rs | 1 + 4 files changed, 17 insertions(+), 3 deletions(-) diff --git a/src/plan/markcompact/gc_work.rs b/src/plan/markcompact/gc_work.rs index 143d48911b..848fdd2d8e 100644 --- a/src/plan/markcompact/gc_work.rs +++ b/src/plan/markcompact/gc_work.rs @@ -50,11 +50,11 @@ impl GCWork for UpdateReferences { // scheduler.work_buckets[WorkBucketStage::RefForwarding] // .add(ScanStackRoots::>::new()); for mutator in VM::VMActivePlan::mutators() { - mmtk.scheduler.work_buckets[WorkBucketStage::RefForwarding] + mmtk.scheduler.work_buckets[WorkBucketStage::SecondRoots] .add(ScanStackRoot::>(mutator)); } - mmtk.scheduler.work_buckets[WorkBucketStage::RefForwarding] + mmtk.scheduler.work_buckets[WorkBucketStage::SecondRoots] .add(ScanVMSpecificRoots::>::new()); } } diff --git a/src/plan/markcompact/global.rs b/src/plan/markcompact/global.rs index d72600ea6e..19f554ebba 100644 --- a/src/plan/markcompact/global.rs +++ b/src/plan/markcompact/global.rs @@ -104,13 +104,24 @@ impl Plan for MarkCompact { scheduler.work_buckets[WorkBucketStage::CalculateForwarding] .add(CalculateForwardingAddress::::new(&self.mc_space)); // do another trace to update references - scheduler.work_buckets[WorkBucketStage::RefForwarding].add(UpdateReferences::::new()); + scheduler.work_buckets[WorkBucketStage::SecondRoots].add(UpdateReferences::::new()); scheduler.work_buckets[WorkBucketStage::Compact].add(Compact::::new(&self.mc_space)); // Release global/collectors/mutators scheduler.work_buckets[WorkBucketStage::Release] .add(Release::>::new(self)); + // Reference processing + if !*self.base().options.no_reference_types { + use crate::util::reference_processor::{SoftRefProcessing, WeakRefProcessing, PhantomRefProcessing}; + scheduler.work_buckets[WorkBucketStage::SoftRefClosure].add(SoftRefProcessing::>::new()); + scheduler.work_buckets[WorkBucketStage::WeakRefClosure].add(WeakRefProcessing::>::new()); + scheduler.work_buckets[WorkBucketStage::PhantomRefClosure].add(PhantomRefProcessing::>::new()); + + use crate::util::reference_processor::RefForwarding; + scheduler.work_buckets[WorkBucketStage::RefForwarding].add(RefForwarding::>::new()); + } + // Finalization if !*self.base().options.no_finalizer { use crate::util::finalizable_processor::{Finalization, ForwardFinalization}; diff --git a/src/scheduler/scheduler.rs b/src/scheduler/scheduler.rs index a80abb38ae..72440692dd 100644 --- a/src/scheduler/scheduler.rs +++ b/src/scheduler/scheduler.rs @@ -62,6 +62,7 @@ impl GCWorkScheduler { WorkBucketStage::FinalRefClosure => WorkBucket::new(false, worker_monitor.clone()), WorkBucketStage::PhantomRefClosure => WorkBucket::new(false, worker_monitor.clone()), WorkBucketStage::CalculateForwarding => WorkBucket::new(false, worker_monitor.clone()), + WorkBucketStage::SecondRoots => WorkBucket::new(false, worker_monitor.clone()), WorkBucketStage::RefForwarding => WorkBucket::new(false, worker_monitor.clone()), WorkBucketStage::FinalizableForwarding => WorkBucket::new(false, worker_monitor.clone()), WorkBucketStage::Compact => WorkBucket::new(false, worker_monitor.clone()), @@ -100,6 +101,7 @@ impl GCWorkScheduler { open_next(FinalRefClosure); open_next(PhantomRefClosure); open_next(CalculateForwarding); + open_next(SecondRoots); open_next(RefForwarding); open_next(FinalizableForwarding); open_next(Compact); diff --git a/src/scheduler/work_bucket.rs b/src/scheduler/work_bucket.rs index e27399faed..09b7764ba7 100644 --- a/src/scheduler/work_bucket.rs +++ b/src/scheduler/work_bucket.rs @@ -161,6 +161,7 @@ pub enum WorkBucketStage { FinalRefClosure, PhantomRefClosure, CalculateForwarding, + SecondRoots, RefForwarding, FinalizableForwarding, Compact, From 32dad458e9f6c8951f37c4c31bbb9712848ff70a Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Mon, 21 Mar 2022 14:32:21 +1100 Subject: [PATCH 07/25] Fix mark compact --- src/plan/markcompact/global.rs | 3 ++ src/policy/markcompactspace.rs | 94 +++++++++++++++++++++++----------- 2 files changed, 66 insertions(+), 31 deletions(-) diff --git a/src/plan/markcompact/global.rs b/src/plan/markcompact/global.rs index 19f554ebba..0bb3ae024e 100644 --- a/src/plan/markcompact/global.rs +++ b/src/plan/markcompact/global.rs @@ -120,6 +120,9 @@ impl Plan for MarkCompact { use crate::util::reference_processor::RefForwarding; scheduler.work_buckets[WorkBucketStage::RefForwarding].add(RefForwarding::>::new()); + + use crate::util::reference_processor::RefEnqueue; + scheduler.work_buckets[WorkBucketStage::Release].add(RefEnqueue::>::new()); } // Finalization diff --git a/src/policy/markcompactspace.rs b/src/policy/markcompactspace.rs index f937a0b66a..7c41d7d15f 100644 --- a/src/policy/markcompactspace.rs +++ b/src/policy/markcompactspace.rs @@ -18,6 +18,9 @@ pub struct MarkCompactSpace { const GC_MARK_BIT_MASK: usize = 1; +/// For each MarkCompact object, we need one extra word for storing forwarding pointer (Lisp-2 implementation). +/// Note that considering the object alignment, we may end up allocating/reserving more than one word per object. +/// See [`MarkCompactSpace::HEADER_RESERVED_IN_BYTES`]. pub const GC_EXTRA_HEADER_WORD: usize = 1; const GC_EXTRA_HEADER_BYTES: usize = GC_EXTRA_HEADER_WORD << LOG_BYTES_IN_WORD; @@ -27,11 +30,13 @@ impl SFT for MarkCompactSpace { } #[inline(always)] - fn get_forwarded_object(&self, _object: ObjectReference) -> Option { - // the current forwarding pointer implementation will store forwarding pointers - // in dead objects' header, which is not the case in mark compact. Mark compact - // implements its own forwarding mechanism - unimplemented!() + fn get_forwarded_object(&self, object: ObjectReference) -> Option { + let forwarding_pointer = Self::get_header_forwarding_pointer(object); + if forwarding_pointer.is_null() { + None + } else { + Some(forwarding_pointer) + } } fn is_live(&self, object: ObjectReference) -> bool { @@ -93,6 +98,8 @@ impl Space for MarkCompactSpace { } impl MarkCompactSpace { + /// We need one extra header word for each object. Considering the alignment requirement, this is + /// the actual bytes we need to reserve for each allocation. pub const HEADER_RESERVED_IN_BYTES: usize = if VM::MAX_ALIGNMENT > GC_EXTRA_HEADER_BYTES { VM::MAX_ALIGNMENT } else { @@ -100,6 +107,33 @@ impl MarkCompactSpace { } .next_power_of_two(); + // The following are a few functions for manipulating header forwarding poiner. + // Basically for each allocation request, we allocate extra bytes of [`HEADER_RESERVED_IN_BYTES`]. + // From the allocation result we get (e.g. `alloc_res`), `alloc_res + HEADER_RESERVED_IN_BYTES` is the cell + // address we return to the binding. It ensures we have at least one word (`GC_EXTRA_HEADER_WORD`) before + // the cell address, and ensures the cell address is properly aligned. + // From the cell address, `cell - GC_EXTRA_HEADER_WORD` is where we store the header forwarding pointer. + + /// Get the address for header forwarding pointer + fn header_forwarding_pointer_address(object: ObjectReference) -> Address { + VM::VMObjectModel::object_start_ref(object) - GC_EXTRA_HEADER_BYTES + } + + /// Get header forwarding pointer for an object + fn get_header_forwarding_pointer(object: ObjectReference) -> ObjectReference { + unsafe { Self::header_forwarding_pointer_address(object).load::() } + } + + /// Store header forwarding pointer for an object + fn store_header_forwarding_pointer(object: ObjectReference, forwarding_pointer: ObjectReference) { + unsafe { Self::header_forwarding_pointer_address(object).store::(forwarding_pointer); } + } + + // Clear header forwarding pointer for an object + fn clear_header_forwarding_pointer(object: ObjectReference) { + crate::util::memory::zero(Self::header_forwarding_pointer_address(object), GC_EXTRA_HEADER_BYTES); + } + #[allow(clippy::too_many_arguments)] pub fn new( name: &'static str, @@ -174,13 +208,7 @@ impl MarkCompactSpace { trace.process_node(object); } - // For markcompact, only one word is required to store the forwarding pointer - // and it is always stored in front of the object start address. However, the - // number of extra bytes required is determined by the object alignment - let forwarding_pointer = - unsafe { (object.to_address() - GC_EXTRA_HEADER_BYTES).load::
() }; - - unsafe { (forwarding_pointer + Self::HEADER_RESERVED_IN_BYTES).to_object_reference() } + Self::get_header_forwarding_pointer(object) } pub fn test_and_mark(object: ObjectReference) -> bool { @@ -268,10 +296,15 @@ impl MarkCompactSpace { let align = VM::VMObjectModel::get_align_when_copied(obj); let offset = VM::VMObjectModel::get_align_offset_when_copied(obj); to = align_allocation_no_fill::(to, align, offset); - let forwarding_pointer_addr = obj.to_address() - GC_EXTRA_HEADER_BYTES; - unsafe { forwarding_pointer_addr.store(to) } + let new_obj = VM::VMObjectModel::get_reference_when_copied_to(obj, to + Self::HEADER_RESERVED_IN_BYTES); + + Self::store_header_forwarding_pointer(obj, new_obj); + + trace!("Calculate forward: {} (size when copied = {}) ~> {} (size = {})", obj, VM::VMObjectModel::get_size_when_copied(obj), to, copied_size); + to += copied_size; } + debug!("Calculate forward end: to = {}", to); } pub fn compact(&self) { @@ -287,28 +320,27 @@ impl MarkCompactSpace { // clear the alloc bit alloc_bit::unset_addr_alloc_bit(obj.to_address()); - let forwarding_pointer_addr = obj.to_address() - GC_EXTRA_HEADER_BYTES; - let forwarding_pointer = unsafe { forwarding_pointer_addr.load::
() }; - if forwarding_pointer != Address::ZERO { + let forwarding_pointer = Self::get_header_forwarding_pointer(obj); + + trace!("Compact {} to {}", obj, forwarding_pointer); + if !forwarding_pointer.is_null() { let copied_size = - VM::VMObjectModel::get_size_when_copied(obj) + Self::HEADER_RESERVED_IN_BYTES; - to = forwarding_pointer; - let object_addr = forwarding_pointer + Self::HEADER_RESERVED_IN_BYTES; - // clear forwarding pointer - crate::util::memory::zero( - forwarding_pointer + Self::HEADER_RESERVED_IN_BYTES - GC_EXTRA_HEADER_BYTES, - GC_EXTRA_HEADER_BYTES, - ); - crate::util::memory::zero(forwarding_pointer_addr, GC_EXTRA_HEADER_BYTES); + VM::VMObjectModel::get_size_when_copied(obj); + let new_object = forwarding_pointer; + Self::clear_header_forwarding_pointer(new_object); + // copy object - let target = unsafe { object_addr.to_object_reference() }; - VM::VMObjectModel::copy_to(obj, target, Address::ZERO); + trace!(" copy from {} to {}", obj, new_object); + let end_of_new_object = VM::VMObjectModel::copy_to(obj, new_object, Address::ZERO); // update alloc_bit, - alloc_bit::set_alloc_bit(target); - to += copied_size + alloc_bit::set_alloc_bit(new_object); + to = new_object.to_address() + copied_size; + debug_assert_eq!(end_of_new_object, to); } } + debug!("Compact end: to = {}", to); + // reset the bump pointer self.pr.reset_cursor(to); } @@ -319,6 +351,6 @@ impl crate::util::linear_scan::LinearScanObjectSize for MarkCompa #[inline(always)] fn size(object: ObjectReference) -> usize { VM::VMObjectModel::get_current_size(object) - + MarkCompactSpace::::HEADER_RESERVED_IN_BYTES + // + MarkCompactSpace::::HEADER_RESERVED_IN_BYTES } } From f47e85bdf2b6bf95807b425b184135cfdbf4de14 Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Mon, 21 Mar 2022 14:32:33 +1100 Subject: [PATCH 08/25] Enqueue references in Release stage --- src/scheduler/scheduler.rs | 3 ++ src/util/reference_processor.rs | 67 ++++++++++++++++++++++++++++++++- 2 files changed, 68 insertions(+), 2 deletions(-) diff --git a/src/scheduler/scheduler.rs b/src/scheduler/scheduler.rs index 72440692dd..58b45fcd47 100644 --- a/src/scheduler/scheduler.rs +++ b/src/scheduler/scheduler.rs @@ -222,6 +222,9 @@ impl GCWorkScheduler { use crate::util::reference_processor::RefForwarding; self.work_buckets[WorkBucketStage::RefForwarding].add(RefForwarding::::new()); + + use crate::util::reference_processor::RefEnqueue; + self.work_buckets[WorkBucketStage::Release].add(RefEnqueue::::new()); } // Finalization diff --git a/src/util/reference_processor.rs b/src/util/reference_processor.rs index f43e08d49c..3c5ac2c7c4 100644 --- a/src/util/reference_processor.rs +++ b/src/util/reference_processor.rs @@ -65,6 +65,12 @@ impl ReferenceProcessors { self.phantom.add_candidate::(reff, referent); } + pub fn enqueue_refs(&self, trace: &mut E) { + self.soft.enqueue::(trace, false); + self.weak.enqueue::(trace, false); + self.phantom.enqueue::(trace, false); + } + pub fn forward_refs(&self, trace: &mut E) { self.soft.forward::(trace, false); self.weak.forward::(trace, false); @@ -148,6 +154,8 @@ struct ReferenceProcessorSync { */ unforwarded_references: Option>, + enqueued_references: Vec, + /** * Index into the references table for the start of * the reference nursery. @@ -161,6 +169,7 @@ impl ReferenceProcessor { sync: UnsafeCell::new(Mutex::new(ReferenceProcessorSync { references: Vec::with_capacity(INITIAL_SIZE), unforwarded_references: None, + enqueued_references: vec![], nursery_index: 0, })), semantics, @@ -204,6 +213,14 @@ impl ReferenceProcessor { e.trace_object(object) } + pub fn enqueue(&self, trace: &mut E, _nursery: bool) { + let mut sync = unsafe { self.sync_mut() }; + + for obj in sync.enqueued_references.drain(..) { + ::VMReferenceGlue::enqueue_reference(obj); + } + } + pub fn forward(&self, trace: &mut E, _nursery: bool) { let mut sync = unsafe { self.sync_mut() }; let references: &mut Vec
= &mut sync.references; @@ -239,6 +256,32 @@ impl ReferenceProcessor { *unforwarded_ref = new_reference.to_address(); } + let mut new_queue = vec![]; + for obj in sync.enqueued_references.drain(..) { + let old_referent = ::VMReferenceGlue::get_referent(obj); + let new_referent = Self::get_forwarded_referent(trace, old_referent); + if !old_referent.is_null() { + // make sure MC forwards the referent + // debug_assert!(old_referent != new_referent); + } + ::VMReferenceGlue::set_referent( + obj, + new_referent, + ); + let new_reference = Self::get_forwarded_reference(trace, obj); + // debug_assert!(!obj.is_null()); + // debug_assert!(obj != new_reference); + if TRACE_DETAIL { + use crate::vm::ObjectModel; + trace!("Forwarding enqueued reference: {} (size: {})", obj, ::VMObjectModel::get_current_size(obj)); + trace!(" referent: {} (forwarded to {})", old_referent, new_referent); + trace!(" reference: forwarded to {}", new_reference); + } + // ::VMReferenceGlue::enqueue_reference(new_reference); + new_queue.push(new_reference); + } + sync.enqueued_references = new_queue; + if TRACE { trace!("Ending ReferenceProcessor.forward({:?})", self.semantics) } @@ -275,7 +318,7 @@ impl ReferenceProcessor { let reference = unsafe { references[i].to_object_reference() }; /* Determine liveness (and forward if necessary) the reference */ - let new_reference = self.process_reference(trace, reference, tls); + let new_reference = self.process_reference(trace, reference, tls, &mut sync.enqueued_references); if !new_reference.is_null() { references[to_index] = new_reference.to_address(); to_index += 1; @@ -361,6 +404,7 @@ impl ReferenceProcessor { trace: &mut E, reference: ObjectReference, tls: VMWorkerThread, + enqueued_references: &mut Vec, ) -> ObjectReference { debug_assert!(!reference.is_null()); @@ -377,6 +421,8 @@ impl ReferenceProcessor { // The reference object is live let new_reference = Self::get_forwarded_reference(trace, reference); + // This assert should be true for mark compact + // debug_assert_eq!(reference, new_reference); let old_referent = ::VMReferenceGlue::get_referent(reference); if TRACE_DETAIL { trace!(" ~> {}", old_referent); } @@ -396,6 +442,8 @@ impl ReferenceProcessor { // Referent is still reachable in a way that is as strong as // or stronger than the current reference level. let new_referent = Self::get_forwarded_referent(trace, old_referent); + // This assert should be true for mark compact + // debug_assert_eq!(old_referent, new_referent); if TRACE_DETAIL { trace!(" ~> {}", new_referent); } debug_assert!(new_referent.is_live()); @@ -415,7 +463,8 @@ impl ReferenceProcessor { else if TRACE_UNREACHABLE { trace!(" UNREACHABLE referent: {}", old_referent); } ::VMReferenceGlue::clear_referent(new_reference); - ::VMReferenceGlue::enqueue_reference(new_reference); + // ::VMReferenceGlue::enqueue_reference(new_reference); + enqueued_references.push(new_reference); return ObjectReference::NULL; } } @@ -481,3 +530,17 @@ impl RefForwarding { Self(PhantomData) } } + +pub struct RefEnqueue(PhantomData); +impl GCWork for RefEnqueue { + fn do_work(&mut self, worker: &mut GCWorker, mmtk: &'static MMTK) { + let mut w = E::new(vec![], false, mmtk); + w.set_worker(worker); + mmtk.reference_processors.enqueue_refs(&mut w); + } +} +impl RefEnqueue { + pub fn new() -> Self { + Self(PhantomData) + } +} From c4374d1f15b6d73a703b7bee8c92fe91eba899be Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Mon, 21 Mar 2022 09:50:04 +1100 Subject: [PATCH 09/25] Add reference processing work for MarkCompact. Add SecondRoots bucket. --- src/plan/markcompact/gc_work.rs | 4 ++-- src/plan/markcompact/global.rs | 13 ++++++++++++- src/scheduler/scheduler.rs | 2 ++ src/scheduler/work_bucket.rs | 1 + 4 files changed, 17 insertions(+), 3 deletions(-) diff --git a/src/plan/markcompact/gc_work.rs b/src/plan/markcompact/gc_work.rs index 143d48911b..848fdd2d8e 100644 --- a/src/plan/markcompact/gc_work.rs +++ b/src/plan/markcompact/gc_work.rs @@ -50,11 +50,11 @@ impl GCWork for UpdateReferences { // scheduler.work_buckets[WorkBucketStage::RefForwarding] // .add(ScanStackRoots::>::new()); for mutator in VM::VMActivePlan::mutators() { - mmtk.scheduler.work_buckets[WorkBucketStage::RefForwarding] + mmtk.scheduler.work_buckets[WorkBucketStage::SecondRoots] .add(ScanStackRoot::>(mutator)); } - mmtk.scheduler.work_buckets[WorkBucketStage::RefForwarding] + mmtk.scheduler.work_buckets[WorkBucketStage::SecondRoots] .add(ScanVMSpecificRoots::>::new()); } } diff --git a/src/plan/markcompact/global.rs b/src/plan/markcompact/global.rs index bbc45a4223..22439a5f35 100644 --- a/src/plan/markcompact/global.rs +++ b/src/plan/markcompact/global.rs @@ -104,13 +104,24 @@ impl Plan for MarkCompact { scheduler.work_buckets[WorkBucketStage::CalculateForwarding] .add(CalculateForwardingAddress::::new(&self.mc_space)); // do another trace to update references - scheduler.work_buckets[WorkBucketStage::RefForwarding].add(UpdateReferences::::new()); + scheduler.work_buckets[WorkBucketStage::SecondRoots].add(UpdateReferences::::new()); scheduler.work_buckets[WorkBucketStage::Compact].add(Compact::::new(&self.mc_space)); // Release global/collectors/mutators scheduler.work_buckets[WorkBucketStage::Release] .add(Release::>::new(self)); + // Reference processing + if !*self.base().options.no_reference_types { + use crate::util::reference_processor::{SoftRefProcessing, WeakRefProcessing, PhantomRefProcessing}; + scheduler.work_buckets[WorkBucketStage::SoftRefClosure].add(SoftRefProcessing::>::new()); + scheduler.work_buckets[WorkBucketStage::WeakRefClosure].add(WeakRefProcessing::>::new()); + scheduler.work_buckets[WorkBucketStage::PhantomRefClosure].add(PhantomRefProcessing::>::new()); + + use crate::util::reference_processor::RefForwarding; + scheduler.work_buckets[WorkBucketStage::RefForwarding].add(RefForwarding::>::new()); + } + // Finalization if !*self.base().options.no_finalizer { use crate::util::finalizable_processor::{Finalization, ForwardFinalization}; diff --git a/src/scheduler/scheduler.rs b/src/scheduler/scheduler.rs index 1ddefbe6cd..a092e2b0cc 100644 --- a/src/scheduler/scheduler.rs +++ b/src/scheduler/scheduler.rs @@ -59,6 +59,7 @@ impl GCWorkScheduler { WorkBucketStage::Closure => WorkBucket::new(false, worker_monitor.clone()), WorkBucketStage::RefClosure => WorkBucket::new(false, worker_monitor.clone()), WorkBucketStage::CalculateForwarding => WorkBucket::new(false, worker_monitor.clone()), + WorkBucketStage::SecondRoots => WorkBucket::new(false, worker_monitor.clone()), WorkBucketStage::RefForwarding => WorkBucket::new(false, worker_monitor.clone()), WorkBucketStage::Compact => WorkBucket::new(false, worker_monitor.clone()), WorkBucketStage::Release => WorkBucket::new(false, worker_monitor.clone()), @@ -93,6 +94,7 @@ impl GCWorkScheduler { open_next(Closure); open_next(RefClosure); open_next(CalculateForwarding); + open_next(SecondRoots); open_next(RefForwarding); open_next(Compact); open_next(Release); diff --git a/src/scheduler/work_bucket.rs b/src/scheduler/work_bucket.rs index cc6badd08b..140143b2ec 100644 --- a/src/scheduler/work_bucket.rs +++ b/src/scheduler/work_bucket.rs @@ -158,6 +158,7 @@ pub enum WorkBucketStage { // we may need more than one buckets for each reference strength. RefClosure, CalculateForwarding, + SecondRoots, RefForwarding, Compact, Release, From d60b2c1731fb7038e9b547bd266b47b338f15ae7 Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Mon, 21 Mar 2022 14:32:21 +1100 Subject: [PATCH 10/25] Fix mark compact --- src/plan/markcompact/global.rs | 3 ++ src/policy/markcompactspace.rs | 94 +++++++++++++++++++++++----------- 2 files changed, 66 insertions(+), 31 deletions(-) diff --git a/src/plan/markcompact/global.rs b/src/plan/markcompact/global.rs index 22439a5f35..3780a7135f 100644 --- a/src/plan/markcompact/global.rs +++ b/src/plan/markcompact/global.rs @@ -120,6 +120,9 @@ impl Plan for MarkCompact { use crate::util::reference_processor::RefForwarding; scheduler.work_buckets[WorkBucketStage::RefForwarding].add(RefForwarding::>::new()); + + use crate::util::reference_processor::RefEnqueue; + scheduler.work_buckets[WorkBucketStage::Release].add(RefEnqueue::>::new()); } // Finalization diff --git a/src/policy/markcompactspace.rs b/src/policy/markcompactspace.rs index f937a0b66a..7c41d7d15f 100644 --- a/src/policy/markcompactspace.rs +++ b/src/policy/markcompactspace.rs @@ -18,6 +18,9 @@ pub struct MarkCompactSpace { const GC_MARK_BIT_MASK: usize = 1; +/// For each MarkCompact object, we need one extra word for storing forwarding pointer (Lisp-2 implementation). +/// Note that considering the object alignment, we may end up allocating/reserving more than one word per object. +/// See [`MarkCompactSpace::HEADER_RESERVED_IN_BYTES`]. pub const GC_EXTRA_HEADER_WORD: usize = 1; const GC_EXTRA_HEADER_BYTES: usize = GC_EXTRA_HEADER_WORD << LOG_BYTES_IN_WORD; @@ -27,11 +30,13 @@ impl SFT for MarkCompactSpace { } #[inline(always)] - fn get_forwarded_object(&self, _object: ObjectReference) -> Option { - // the current forwarding pointer implementation will store forwarding pointers - // in dead objects' header, which is not the case in mark compact. Mark compact - // implements its own forwarding mechanism - unimplemented!() + fn get_forwarded_object(&self, object: ObjectReference) -> Option { + let forwarding_pointer = Self::get_header_forwarding_pointer(object); + if forwarding_pointer.is_null() { + None + } else { + Some(forwarding_pointer) + } } fn is_live(&self, object: ObjectReference) -> bool { @@ -93,6 +98,8 @@ impl Space for MarkCompactSpace { } impl MarkCompactSpace { + /// We need one extra header word for each object. Considering the alignment requirement, this is + /// the actual bytes we need to reserve for each allocation. pub const HEADER_RESERVED_IN_BYTES: usize = if VM::MAX_ALIGNMENT > GC_EXTRA_HEADER_BYTES { VM::MAX_ALIGNMENT } else { @@ -100,6 +107,33 @@ impl MarkCompactSpace { } .next_power_of_two(); + // The following are a few functions for manipulating header forwarding poiner. + // Basically for each allocation request, we allocate extra bytes of [`HEADER_RESERVED_IN_BYTES`]. + // From the allocation result we get (e.g. `alloc_res`), `alloc_res + HEADER_RESERVED_IN_BYTES` is the cell + // address we return to the binding. It ensures we have at least one word (`GC_EXTRA_HEADER_WORD`) before + // the cell address, and ensures the cell address is properly aligned. + // From the cell address, `cell - GC_EXTRA_HEADER_WORD` is where we store the header forwarding pointer. + + /// Get the address for header forwarding pointer + fn header_forwarding_pointer_address(object: ObjectReference) -> Address { + VM::VMObjectModel::object_start_ref(object) - GC_EXTRA_HEADER_BYTES + } + + /// Get header forwarding pointer for an object + fn get_header_forwarding_pointer(object: ObjectReference) -> ObjectReference { + unsafe { Self::header_forwarding_pointer_address(object).load::() } + } + + /// Store header forwarding pointer for an object + fn store_header_forwarding_pointer(object: ObjectReference, forwarding_pointer: ObjectReference) { + unsafe { Self::header_forwarding_pointer_address(object).store::(forwarding_pointer); } + } + + // Clear header forwarding pointer for an object + fn clear_header_forwarding_pointer(object: ObjectReference) { + crate::util::memory::zero(Self::header_forwarding_pointer_address(object), GC_EXTRA_HEADER_BYTES); + } + #[allow(clippy::too_many_arguments)] pub fn new( name: &'static str, @@ -174,13 +208,7 @@ impl MarkCompactSpace { trace.process_node(object); } - // For markcompact, only one word is required to store the forwarding pointer - // and it is always stored in front of the object start address. However, the - // number of extra bytes required is determined by the object alignment - let forwarding_pointer = - unsafe { (object.to_address() - GC_EXTRA_HEADER_BYTES).load::
() }; - - unsafe { (forwarding_pointer + Self::HEADER_RESERVED_IN_BYTES).to_object_reference() } + Self::get_header_forwarding_pointer(object) } pub fn test_and_mark(object: ObjectReference) -> bool { @@ -268,10 +296,15 @@ impl MarkCompactSpace { let align = VM::VMObjectModel::get_align_when_copied(obj); let offset = VM::VMObjectModel::get_align_offset_when_copied(obj); to = align_allocation_no_fill::(to, align, offset); - let forwarding_pointer_addr = obj.to_address() - GC_EXTRA_HEADER_BYTES; - unsafe { forwarding_pointer_addr.store(to) } + let new_obj = VM::VMObjectModel::get_reference_when_copied_to(obj, to + Self::HEADER_RESERVED_IN_BYTES); + + Self::store_header_forwarding_pointer(obj, new_obj); + + trace!("Calculate forward: {} (size when copied = {}) ~> {} (size = {})", obj, VM::VMObjectModel::get_size_when_copied(obj), to, copied_size); + to += copied_size; } + debug!("Calculate forward end: to = {}", to); } pub fn compact(&self) { @@ -287,28 +320,27 @@ impl MarkCompactSpace { // clear the alloc bit alloc_bit::unset_addr_alloc_bit(obj.to_address()); - let forwarding_pointer_addr = obj.to_address() - GC_EXTRA_HEADER_BYTES; - let forwarding_pointer = unsafe { forwarding_pointer_addr.load::
() }; - if forwarding_pointer != Address::ZERO { + let forwarding_pointer = Self::get_header_forwarding_pointer(obj); + + trace!("Compact {} to {}", obj, forwarding_pointer); + if !forwarding_pointer.is_null() { let copied_size = - VM::VMObjectModel::get_size_when_copied(obj) + Self::HEADER_RESERVED_IN_BYTES; - to = forwarding_pointer; - let object_addr = forwarding_pointer + Self::HEADER_RESERVED_IN_BYTES; - // clear forwarding pointer - crate::util::memory::zero( - forwarding_pointer + Self::HEADER_RESERVED_IN_BYTES - GC_EXTRA_HEADER_BYTES, - GC_EXTRA_HEADER_BYTES, - ); - crate::util::memory::zero(forwarding_pointer_addr, GC_EXTRA_HEADER_BYTES); + VM::VMObjectModel::get_size_when_copied(obj); + let new_object = forwarding_pointer; + Self::clear_header_forwarding_pointer(new_object); + // copy object - let target = unsafe { object_addr.to_object_reference() }; - VM::VMObjectModel::copy_to(obj, target, Address::ZERO); + trace!(" copy from {} to {}", obj, new_object); + let end_of_new_object = VM::VMObjectModel::copy_to(obj, new_object, Address::ZERO); // update alloc_bit, - alloc_bit::set_alloc_bit(target); - to += copied_size + alloc_bit::set_alloc_bit(new_object); + to = new_object.to_address() + copied_size; + debug_assert_eq!(end_of_new_object, to); } } + debug!("Compact end: to = {}", to); + // reset the bump pointer self.pr.reset_cursor(to); } @@ -319,6 +351,6 @@ impl crate::util::linear_scan::LinearScanObjectSize for MarkCompa #[inline(always)] fn size(object: ObjectReference) -> usize { VM::VMObjectModel::get_current_size(object) - + MarkCompactSpace::::HEADER_RESERVED_IN_BYTES + // + MarkCompactSpace::::HEADER_RESERVED_IN_BYTES } } From ea0f19ef8af26665e28082653d43bdb40f4ccc62 Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Mon, 21 Mar 2022 14:47:19 +1100 Subject: [PATCH 11/25] cargo fmt --- src/plan/markcompact/global.rs | 14 ------------- src/policy/markcompactspace.rs | 36 ++++++++++++++++++++++++++-------- 2 files changed, 28 insertions(+), 22 deletions(-) diff --git a/src/plan/markcompact/global.rs b/src/plan/markcompact/global.rs index 3780a7135f..6562b75e72 100644 --- a/src/plan/markcompact/global.rs +++ b/src/plan/markcompact/global.rs @@ -111,20 +111,6 @@ impl Plan for MarkCompact { scheduler.work_buckets[WorkBucketStage::Release] .add(Release::>::new(self)); - // Reference processing - if !*self.base().options.no_reference_types { - use crate::util::reference_processor::{SoftRefProcessing, WeakRefProcessing, PhantomRefProcessing}; - scheduler.work_buckets[WorkBucketStage::SoftRefClosure].add(SoftRefProcessing::>::new()); - scheduler.work_buckets[WorkBucketStage::WeakRefClosure].add(WeakRefProcessing::>::new()); - scheduler.work_buckets[WorkBucketStage::PhantomRefClosure].add(PhantomRefProcessing::>::new()); - - use crate::util::reference_processor::RefForwarding; - scheduler.work_buckets[WorkBucketStage::RefForwarding].add(RefForwarding::>::new()); - - use crate::util::reference_processor::RefEnqueue; - scheduler.work_buckets[WorkBucketStage::Release].add(RefEnqueue::>::new()); - } - // Finalization if !*self.base().options.no_finalizer { use crate::util::finalizable_processor::{Finalization, ForwardFinalization}; diff --git a/src/policy/markcompactspace.rs b/src/policy/markcompactspace.rs index 7c41d7d15f..82bdf269a8 100644 --- a/src/policy/markcompactspace.rs +++ b/src/policy/markcompactspace.rs @@ -115,23 +115,36 @@ impl MarkCompactSpace { // From the cell address, `cell - GC_EXTRA_HEADER_WORD` is where we store the header forwarding pointer. /// Get the address for header forwarding pointer + #[inline(always)] fn header_forwarding_pointer_address(object: ObjectReference) -> Address { VM::VMObjectModel::object_start_ref(object) - GC_EXTRA_HEADER_BYTES } /// Get header forwarding pointer for an object + #[inline(always)] fn get_header_forwarding_pointer(object: ObjectReference) -> ObjectReference { unsafe { Self::header_forwarding_pointer_address(object).load::() } } /// Store header forwarding pointer for an object - fn store_header_forwarding_pointer(object: ObjectReference, forwarding_pointer: ObjectReference) { - unsafe { Self::header_forwarding_pointer_address(object).store::(forwarding_pointer); } + #[inline(always)] + fn store_header_forwarding_pointer( + object: ObjectReference, + forwarding_pointer: ObjectReference, + ) { + unsafe { + Self::header_forwarding_pointer_address(object) + .store::(forwarding_pointer); + } } // Clear header forwarding pointer for an object + #[inline(always)] fn clear_header_forwarding_pointer(object: ObjectReference) { - crate::util::memory::zero(Self::header_forwarding_pointer_address(object), GC_EXTRA_HEADER_BYTES); + crate::util::memory::zero( + Self::header_forwarding_pointer_address(object), + GC_EXTRA_HEADER_BYTES, + ); } #[allow(clippy::too_many_arguments)] @@ -296,11 +309,20 @@ impl MarkCompactSpace { let align = VM::VMObjectModel::get_align_when_copied(obj); let offset = VM::VMObjectModel::get_align_offset_when_copied(obj); to = align_allocation_no_fill::(to, align, offset); - let new_obj = VM::VMObjectModel::get_reference_when_copied_to(obj, to + Self::HEADER_RESERVED_IN_BYTES); + let new_obj = VM::VMObjectModel::get_reference_when_copied_to( + obj, + to + Self::HEADER_RESERVED_IN_BYTES, + ); Self::store_header_forwarding_pointer(obj, new_obj); - trace!("Calculate forward: {} (size when copied = {}) ~> {} (size = {})", obj, VM::VMObjectModel::get_size_when_copied(obj), to, copied_size); + trace!( + "Calculate forward: {} (size when copied = {}) ~> {} (size = {})", + obj, + VM::VMObjectModel::get_size_when_copied(obj), + to, + copied_size + ); to += copied_size; } @@ -324,8 +346,7 @@ impl MarkCompactSpace { trace!("Compact {} to {}", obj, forwarding_pointer); if !forwarding_pointer.is_null() { - let copied_size = - VM::VMObjectModel::get_size_when_copied(obj); + let copied_size = VM::VMObjectModel::get_size_when_copied(obj); let new_object = forwarding_pointer; Self::clear_header_forwarding_pointer(new_object); @@ -351,6 +372,5 @@ impl crate::util::linear_scan::LinearScanObjectSize for MarkCompa #[inline(always)] fn size(object: ObjectReference) -> usize { VM::VMObjectModel::get_current_size(object) - // + MarkCompactSpace::::HEADER_RESERVED_IN_BYTES } } From 89b07f4d5234e2185308897b98db57694d99fcbd Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Mon, 21 Mar 2022 15:35:14 +1100 Subject: [PATCH 12/25] Properly deactivate buckets --- src/scheduler/scheduler.rs | 45 ++++++++++++++++++-------------------- 1 file changed, 21 insertions(+), 24 deletions(-) diff --git a/src/scheduler/scheduler.rs b/src/scheduler/scheduler.rs index a092e2b0cc..4bc68bad7b 100644 --- a/src/scheduler/scheduler.rs +++ b/src/scheduler/scheduler.rs @@ -248,36 +248,33 @@ impl GCWorkScheduler { } pub fn deactivate_all(&self) { - self.work_buckets[WorkBucketStage::Prepare].deactivate(); - self.work_buckets[WorkBucketStage::Closure].deactivate(); - self.work_buckets[WorkBucketStage::RefClosure].deactivate(); - self.work_buckets[WorkBucketStage::CalculateForwarding].deactivate(); - self.work_buckets[WorkBucketStage::RefForwarding].deactivate(); - self.work_buckets[WorkBucketStage::Compact].deactivate(); - self.work_buckets[WorkBucketStage::Release].deactivate(); - self.work_buckets[WorkBucketStage::Final].deactivate(); + for (stage, bucket) in self.work_buckets.iter() { + if stage == WorkBucketStage::Unconstrained { + continue; + } + + bucket.deactivate(); + } } pub fn reset_state(&self) { - // self.work_buckets[WorkBucketStage::Prepare].deactivate(); - self.work_buckets[WorkBucketStage::Closure].deactivate(); - self.work_buckets[WorkBucketStage::RefClosure].deactivate(); - self.work_buckets[WorkBucketStage::CalculateForwarding].deactivate(); - self.work_buckets[WorkBucketStage::RefForwarding].deactivate(); - self.work_buckets[WorkBucketStage::Compact].deactivate(); - self.work_buckets[WorkBucketStage::Release].deactivate(); - self.work_buckets[WorkBucketStage::Final].deactivate(); + for (stage, bucket) in self.work_buckets.iter() { + if stage == WorkBucketStage::Unconstrained || stage == WorkBucketStage::Prepare { + continue; + } + + bucket.deactivate(); + } } pub fn debug_assert_all_buckets_deactivated(&self) { - debug_assert!(!self.work_buckets[WorkBucketStage::Prepare].is_activated()); - debug_assert!(!self.work_buckets[WorkBucketStage::Closure].is_activated()); - debug_assert!(!self.work_buckets[WorkBucketStage::RefClosure].is_activated()); - debug_assert!(!self.work_buckets[WorkBucketStage::CalculateForwarding].is_activated()); - debug_assert!(!self.work_buckets[WorkBucketStage::RefForwarding].is_activated()); - debug_assert!(!self.work_buckets[WorkBucketStage::Compact].is_activated()); - debug_assert!(!self.work_buckets[WorkBucketStage::Release].is_activated()); - debug_assert!(!self.work_buckets[WorkBucketStage::Final].is_activated()); + for (stage, bucket) in self.work_buckets.iter() { + if stage == WorkBucketStage::Unconstrained { + return; + } + + debug_assert!(!bucket.is_activated()); + } } pub fn add_coordinator_work(&self, work: impl CoordinatorWork, worker: &GCWorker) { From 722546b37e7579aee9d093f51b97f5a729a553d5 Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Tue, 22 Mar 2022 10:51:21 +1100 Subject: [PATCH 13/25] Bulk enqueue references --- src/util/reference_processor.rs | 7 +++++-- src/vm/reference_glue.rs | 8 +++++++- 2 files changed, 12 insertions(+), 3 deletions(-) diff --git a/src/util/reference_processor.rs b/src/util/reference_processor.rs index 3c5ac2c7c4..a19a33a3dd 100644 --- a/src/util/reference_processor.rs +++ b/src/util/reference_processor.rs @@ -216,8 +216,10 @@ impl ReferenceProcessor { pub fn enqueue(&self, trace: &mut E, _nursery: bool) { let mut sync = unsafe { self.sync_mut() }; - for obj in sync.enqueued_references.drain(..) { - ::VMReferenceGlue::enqueue_reference(obj); + if !sync.enqueued_references.is_empty() { + debug!("enqueue: {:?}", sync.enqueued_references); + ::VMReferenceGlue::enqueue_references(&sync.enqueued_references); + sync.enqueued_references.clear(); } } @@ -278,6 +280,7 @@ impl ReferenceProcessor { trace!(" reference: forwarded to {}", new_reference); } // ::VMReferenceGlue::enqueue_reference(new_reference); + debug_assert!(!new_reference.is_null(), "reference {:?}'s forwarding pointer is NULL", obj); new_queue.push(new_reference); } sync.enqueued_references = new_queue; diff --git a/src/vm/reference_glue.rs b/src/vm/reference_glue.rs index f7caa5d9d0..6a480573f2 100644 --- a/src/vm/reference_glue.rs +++ b/src/vm/reference_glue.rs @@ -31,5 +31,11 @@ pub trait ReferenceGlue { /// * `referent`: The referent object reference. fn set_referent(reff: ObjectReference, referent: ObjectReference); - fn enqueue_reference(object: ObjectReference); + /// For reference types, if the referent is changed during GC, the reference + /// will be added to a queue, and MMTk will call this method to inform + /// the VM aobut the changes for those references. + /// Note that this method is called for each type of weak references during GC, and + /// the references slice will be cleared once this call is returned. That means + /// MMTk will no longer keep these references alive once this method is returned. + fn enqueue_references(references: &[ObjectReference]); } From a55db731e38618f2143a14b5152390dcc7ba4faf Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Wed, 23 Mar 2022 10:11:43 +1100 Subject: [PATCH 14/25] * needs_forward_after_liveness for RefForwarding * references: Vec * remove unforwarded reference * avoid duplicate references in the queue * forward all references * do not clear references after GC * references: Vec -> HashSet --- src/scheduler/gc_work.rs | 2 +- src/scheduler/scheduler.rs | 4 +- src/util/reference_processor.rs | 102 ++++++++++++++++++++------------ 3 files changed, 69 insertions(+), 39 deletions(-) diff --git a/src/scheduler/gc_work.rs b/src/scheduler/gc_work.rs index f7204df600..1b6780cf65 100644 --- a/src/scheduler/gc_work.rs +++ b/src/scheduler/gc_work.rs @@ -121,7 +121,7 @@ impl GCWork for Release { w.local_work_bucket.add(ReleaseCollector); } // TODO: Process weak references properly - mmtk.reference_processors.clear(); + // mmtk.reference_processors.clear(); } } diff --git a/src/scheduler/scheduler.rs b/src/scheduler/scheduler.rs index af3f6a8e26..a227c59d11 100644 --- a/src/scheduler/scheduler.rs +++ b/src/scheduler/scheduler.rs @@ -221,7 +221,9 @@ impl GCWorkScheduler { self.work_buckets[WorkBucketStage::PhantomRefClosure].add(PhantomRefProcessing::::new()); use crate::util::reference_processor::RefForwarding; - self.work_buckets[WorkBucketStage::RefForwarding].add(RefForwarding::::new()); + if plan.constraints().needs_forward_after_liveness { + self.work_buckets[WorkBucketStage::RefForwarding].add(RefForwarding::::new()); + } use crate::util::reference_processor::RefEnqueue; self.work_buckets[WorkBucketStage::Release].add(RefEnqueue::::new()); diff --git a/src/util/reference_processor.rs b/src/util/reference_processor.rs index a19a33a3dd..9244ae6f35 100644 --- a/src/util/reference_processor.rs +++ b/src/util/reference_processor.rs @@ -145,14 +145,14 @@ struct ReferenceProcessorSync { /** * The table of reference objects for the current semantics */ - references: Vec
, + references: Vec, /** * In a MarkCompact (or similar) collector, we need to update the {@code references} * field, and then update its contents. We implement this by saving the pointer in * this untraced field for use during the {@code forward} pass. */ - unforwarded_references: Option>, + // unforwarded_references: Option>, enqueued_references: Vec, @@ -168,7 +168,7 @@ impl ReferenceProcessor { ReferenceProcessor { sync: UnsafeCell::new(Mutex::new(ReferenceProcessorSync { references: Vec::with_capacity(INITIAL_SIZE), - unforwarded_references: None, + // unforwarded_references: None, enqueued_references: vec![], nursery_index: 0, })), @@ -191,14 +191,17 @@ impl ReferenceProcessor { pub fn clear(&self) { let mut sync = self.sync().lock().unwrap(); sync.references.clear(); - sync.unforwarded_references = None; + // sync.unforwarded_references = None; sync.nursery_index = 0; } pub fn add_candidate(&self, reff: ObjectReference, referent: ObjectReference) { let mut sync = self.sync().lock().unwrap(); // VM::VMReferenceGlue::set_referent(reff, referent); - sync.references.push(reff.to_address()); + if sync.references.iter().any(|r| *r == reff) { + return; + } + sync.references.push(reff); } fn get_forwarded_referent(e: &mut E, object: ObjectReference) -> ObjectReference { @@ -225,38 +228,65 @@ impl ReferenceProcessor { pub fn forward(&self, trace: &mut E, _nursery: bool) { let mut sync = unsafe { self.sync_mut() }; - let references: &mut Vec
= &mut sync.references; + let references: &mut Vec = &mut sync.references; // XXX: Copies `unforwarded_references` out. Should be fine since it's not accessed // concurrently & it's set to `None` at the end anyway.. - let mut unforwarded_references: Vec
= sync.unforwarded_references.clone().unwrap(); + // let mut unforwarded_references: Vec
= sync.unforwarded_references.clone().unwrap(); if TRACE { trace!("Starting ReferenceProcessor.forward({:?})", self.semantics); } - if TRACE_DETAIL { - trace!("{:?} Reference table is {:?}", self.semantics, references); - trace!( - "{:?} unforwardedReferences is {:?}", - self.semantics, - unforwarded_references - ); - } + // if TRACE_DETAIL { + // trace!("{:?} Reference table is {:?}", self.semantics, references); + // trace!( + // "{:?} unforwardedReferences is {:?}", + // self.semantics, + // unforwarded_references + // ); + // } - for (i, unforwarded_ref) in unforwarded_references - .iter_mut() - .enumerate() - .take(references.len()) - { - let reference = unsafe { unforwarded_ref.to_object_reference() }; - if TRACE_DETAIL { - trace!("slot {:?}: forwarding {:?}", i, reference); + // for (i, unforwarded_ref) in unforwarded_references + // .iter_mut() + // .enumerate() + // .take(references.len()) + // { + // let reference = unsafe { unforwarded_ref.to_object_reference() }; + // if TRACE_DETAIL { + // trace!("slot {:?}: forwarding {:?}", i, reference); + // } + // ::VMReferenceGlue::set_referent( + // reference, + // Self::get_forwarded_referent(trace, ::VMReferenceGlue::get_referent(reference)), + // ); + // let new_reference = Self::get_forwarded_reference(trace, reference); + // *unforwarded_ref = new_reference.to_address(); + // } + + let mut new_queue = vec![]; + for obj in sync.references.drain(..) { + let old_referent = ::VMReferenceGlue::get_referent(obj); + let new_referent = Self::get_forwarded_referent(trace, old_referent); + if !old_referent.is_null() { + // make sure MC forwards the referent + // debug_assert!(old_referent != new_referent); } ::VMReferenceGlue::set_referent( - reference, - Self::get_forwarded_referent(trace, ::VMReferenceGlue::get_referent(reference)), + obj, + new_referent, ); - let new_reference = Self::get_forwarded_reference(trace, reference); - *unforwarded_ref = new_reference.to_address(); + let new_reference = Self::get_forwarded_reference(trace, obj); + // debug_assert!(!obj.is_null()); + // debug_assert!(obj != new_reference); + if TRACE_DETAIL { + use crate::vm::ObjectModel; + trace!("Forwarding reference: {} (size: {})", obj, ::VMObjectModel::get_current_size(obj)); + trace!(" referent: {} (forwarded to {})", old_referent, new_referent); + trace!(" reference: forwarded to {}", new_reference); + } + // ::VMReferenceGlue::enqueue_reference(new_reference); + debug_assert!(!new_reference.is_null(), "reference {:?}'s forwarding pointer is NULL", obj); + new_queue.push(new_reference); } + sync.references = new_queue; let mut new_queue = vec![]; for obj in sync.enqueued_references.drain(..) { @@ -288,7 +318,7 @@ impl ReferenceProcessor { if TRACE { trace!("Ending ReferenceProcessor.forward({:?})", self.semantics) } - sync.unforwarded_references = None; + // sync.unforwarded_references = None; } fn scan( @@ -299,8 +329,8 @@ impl ReferenceProcessor { tls: VMWorkerThread, ) { let sync = unsafe { self.sync_mut() }; - sync.unforwarded_references = Some(sync.references.clone()); - let references: &mut Vec
= &mut sync.references; + // sync.unforwarded_references = Some(sync.references.clone()); + let references: &mut Vec = &mut sync.references; if TRACE { trace!("Starting ReferenceProcessor.scan({:?})", self.semantics); @@ -312,26 +342,24 @@ impl ReferenceProcessor { trace!("{:?} Reference table is {:?}", self.semantics, references); } if retain { - for addr in references.iter().skip(from_index) { - let reference = unsafe { addr.to_object_reference() }; - self.scan_retain_referent::(trace, reference); + for reference in references.iter().skip(from_index) { + self.scan_retain_referent::(trace, *reference); } } else { for i in from_index..references.len() { - let reference = unsafe { references[i].to_object_reference() }; + let reference = references[i]; /* Determine liveness (and forward if necessary) the reference */ let new_reference = self.process_reference(trace, reference, tls, &mut sync.enqueued_references); if !new_reference.is_null() { - references[to_index] = new_reference.to_address(); + references[to_index] = new_reference; to_index += 1; if TRACE_DETAIL { let index = to_index - 1; trace!( - "SCANNED {} {:?} -> {:?}", + "SCANNED {} {:?}", index, references[index], - unsafe { references[index].to_object_reference() } ); } } From 40e21528976204a200496c7c2e765547bd62a8f8 Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Thu, 24 Mar 2022 14:41:42 +1100 Subject: [PATCH 15/25] Massive code clean up for reference processor. Do proper nursery reference scanning. Retain soft reference. --- src/memory_manager.rs | 9 +- src/plan/markcompact/global.rs | 2 +- src/plan/plan_constraints.rs | 2 + src/scheduler/gc_work.rs | 7 + src/scheduler/scheduler.rs | 2 +- src/util/reference_processor.rs | 393 +++++++++++++------------------- src/vm/reference_glue.rs | 10 +- 7 files changed, 183 insertions(+), 242 deletions(-) diff --git a/src/memory_manager.rs b/src/memory_manager.rs index 907042828e..35c98d8109 100644 --- a/src/memory_manager.rs +++ b/src/memory_manager.rs @@ -416,7 +416,8 @@ pub fn modify_check(mmtk: &MMTK, object: ObjectReference) { mmtk.plan.modify_check(object); } -/// Add a reference to the list of weak references. +/// Add a reference to the list of weak references. A binding may +/// call this either when a weak reference is created, or when a weak reference is traced during GC. /// /// Arguments: /// * `mmtk`: A reference to an MMTk instance. @@ -431,7 +432,8 @@ pub fn add_weak_candidate( .add_weak_candidate::(reff, referent); } -/// Add a reference to the list of soft references. +/// Add a reference to the list of soft references. A binding may +/// call this either when a weak reference is created, or when a weak reference is traced during GC. /// /// Arguments: /// * `mmtk`: A reference to an MMTk instance. @@ -446,7 +448,8 @@ pub fn add_soft_candidate( .add_soft_candidate::(reff, referent); } -/// Add a reference to the list of phantom references. +/// Add a reference to the list of phantom references. A binding may +/// call this either when a weak reference is created, or when a weak reference is traced during GC. /// /// Arguments: /// * `mmtk`: A reference to an MMTk instance. diff --git a/src/plan/markcompact/global.rs b/src/plan/markcompact/global.rs index 0bb3ae024e..108739301b 100644 --- a/src/plan/markcompact/global.rs +++ b/src/plan/markcompact/global.rs @@ -122,7 +122,7 @@ impl Plan for MarkCompact { scheduler.work_buckets[WorkBucketStage::RefForwarding].add(RefForwarding::>::new()); use crate::util::reference_processor::RefEnqueue; - scheduler.work_buckets[WorkBucketStage::Release].add(RefEnqueue::>::new()); + scheduler.work_buckets[WorkBucketStage::Release].add(RefEnqueue::::new()); } // Finalization diff --git a/src/plan/plan_constraints.rs b/src/plan/plan_constraints.rs index a0eea981ee..700f379d56 100644 --- a/src/plan/plan_constraints.rs +++ b/src/plan/plan_constraints.rs @@ -30,6 +30,8 @@ pub struct PlanConstraints { pub needs_linear_scan: bool, pub needs_concurrent_workers: bool, pub generate_gc_trace: bool, + /// Some policies do object forwarding after the first liveness transitive closure, such as mark compact. + /// For plans that use those policies, they should set this as true. pub needs_forward_after_liveness: bool, } diff --git a/src/scheduler/gc_work.rs b/src/scheduler/gc_work.rs index 1b6780cf65..36ef1d8760 100644 --- a/src/scheduler/gc_work.rs +++ b/src/scheduler/gc_work.rs @@ -383,6 +383,13 @@ impl ProcessEdgesBase { } /// Scan & update a list of object slots +// +// Note: be very careful when using this trait. process_node() will push objects +// to the buffer, and it is expected that at the end of the operation, flush() +// is called to create new scan work from the buffered objects. If flush() +// is not called, we may miss the objects in the GC and have dangling pointers. +// FIXME: We possibly want to enforce Drop on this trait, and require calling +// flush() in Drop. pub trait ProcessEdgesWork: Send + 'static + Sized + DerefMut + Deref> { diff --git a/src/scheduler/scheduler.rs b/src/scheduler/scheduler.rs index a227c59d11..5e4aba4b0a 100644 --- a/src/scheduler/scheduler.rs +++ b/src/scheduler/scheduler.rs @@ -226,7 +226,7 @@ impl GCWorkScheduler { } use crate::util::reference_processor::RefEnqueue; - self.work_buckets[WorkBucketStage::Release].add(RefEnqueue::::new()); + self.work_buckets[WorkBucketStage::Release].add(RefEnqueue::::new()); } // Finalization diff --git a/src/util/reference_processor.rs b/src/util/reference_processor.rs index 9244ae6f35..ebe225033f 100644 --- a/src/util/reference_processor.rs +++ b/src/util/reference_processor.rs @@ -1,14 +1,14 @@ -use std::cell::UnsafeCell; use std::sync::Mutex; use std::vec::Vec; -// use crate::plan::TraceLocal; -use crate::util::opaque_pointer::*; -use crate::util::{Address, ObjectReference}; +use crate::util::ObjectReference; use crate::vm::ReferenceGlue; use crate::vm::VMBinding; use crate::scheduler::ProcessEdgesWork; +/// Holds all reference processors for each weak reference Semantics. +/// Currently this is based on Java's weak reference semantics (soft/weak/phantom). +/// We should make changes to make this general rather than Java specific. pub struct ReferenceProcessors { soft: ReferenceProcessor, weak: ReferenceProcessor, @@ -32,18 +32,12 @@ impl ReferenceProcessors { } } - pub fn clear(&self) { - self.soft.clear(); - self.weak.clear(); - self.phantom.clear(); - } - pub fn add_soft_candidate( &self, reff: ObjectReference, referent: ObjectReference, ) { - trace!("Add soft candidate: {} -> {}", reff, referent); + trace!("Add soft candidate: {} ~> {}", reff, referent); self.soft.add_candidate::(reff, referent); } @@ -52,7 +46,7 @@ impl ReferenceProcessors { reff: ObjectReference, referent: ObjectReference, ) { - trace!("Add weak candidate: {} -> {}", reff, referent); + trace!("Add weak candidate: {} ~> {}", reff, referent); self.weak.add_candidate::(reff, referent); } @@ -61,37 +55,56 @@ impl ReferenceProcessors { reff: ObjectReference, referent: ObjectReference, ) { - trace!("Add phantom candidate: {} -> {}", reff, referent); + trace!("Add phantom candidate: {} ~> {}", reff, referent); self.phantom.add_candidate::(reff, referent); } - pub fn enqueue_refs(&self, trace: &mut E) { - self.soft.enqueue::(trace, false); - self.weak.enqueue::(trace, false); - self.phantom.enqueue::(trace, false); + /// This will invoke enqueue for each reference processor, which will + /// call back to the VM to enqueue references whose referents are cleared + /// in this GC. + pub fn enqueue_refs(&self) { + self.soft.enqueue::(); + self.weak.enqueue::(); + self.phantom.enqueue::(); } - pub fn forward_refs(&self, trace: &mut E) { - self.soft.forward::(trace, false); - self.weak.forward::(trace, false); - self.phantom.forward::(trace, false); + /// A separate reference forwarding step. Normally when we scan refs, we deal with forwarding. + /// However, for some plans like mark compact, at the point we do ref scanning, we do not know + /// the forwarding addresses yet, thus we cannot do forwarding during scan refs. And for those + /// plans, this separate step is required. + pub fn forward_refs(&self, trace: &mut E, mmtk: &'static MMTK) { + debug_assert!(mmtk.plan.constraints().needs_forward_after_liveness, "A plan with needs_forward_after_liveness=false does not need a separate forward step"); + self.soft.forward::(trace, mmtk.plan.is_current_gc_nursery()); + self.weak.forward::(trace, mmtk.plan.is_current_gc_nursery()); + self.phantom.forward::(trace, mmtk.plan.is_current_gc_nursery()); } - pub fn scan_weak_refs(&self, trace: &mut E, tls: VMWorkerThread) { - self.soft.scan::(trace, false, false, tls); - self.weak.scan::(trace, false, false, tls); + // Methods for scanning weak references. It needs to be called in a decreasing order of reference strengths, i.e. soft > weak > phantom + + /// Scan weak references. + pub fn scan_weak_refs(&self, trace: &mut E, mmtk: &'static MMTK) { + self.soft.scan::(trace, mmtk.plan.is_current_gc_nursery(), false); + self.weak.scan::(trace, mmtk.plan.is_current_gc_nursery(), false); } - pub fn scan_soft_refs(&self, trace: &mut E, tls: VMWorkerThread) { - self.soft.scan::(trace, false, false, tls); + /// Scan soft references. + pub fn scan_soft_refs(&self, trace: &mut E, mmtk: &'static MMTK) { + // For soft refs, it is up to the VM to decide when to reclaim this. + // If this is not an emergency collection, we have no heap stress. We simply retain soft refs. + if !mmtk.plan.is_emergency_collection() { + // This step only retains the referents (keep the referents alive), it does not update its addresses. + // We will call soft.scan() again with retain=false to update its addresses based on liveness. + self.soft.scan::(trace, mmtk.plan.is_current_gc_nursery(), true); + } } + /// Scan phantom references. pub fn scan_phantom_refs( &self, trace: &mut E, - tls: VMWorkerThread, + _mmtk: &'static MMTK ) { - self.phantom.scan::(trace, false, false, tls); + self.phantom.scan::(trace, mmtk.plan.is_current_gc_nursery(), false); } } @@ -116,21 +129,21 @@ pub const TRACE_FORWARD: bool = true; // luckily this is also the value used by Java MMTk.) const INITIAL_SIZE: usize = 256; +/// We create a reference processor for each semantics. Generally we expect these +/// to happen for each processor: +/// 1. The VM adds reference candidates. They could either do it when a weak reference +/// is created, or when a weak reference is traced during GC. +/// 2. We scan references after the GC determins liveness. +/// 3. We forward references if the GC needs forwarding after liveness. +/// 4. We inform the binding of references whose referents are cleared during this GC by enqueue'ing. pub struct ReferenceProcessor { - // XXX: To support the possibility of the collector working - // on the reference in parallel, we wrap the structure - // in an UnsafeCell. - sync: UnsafeCell>, - - /** - * Semantics - */ + /// Most of the reference processor is protected by a mutex. + sync: Mutex, + + /// The semantics for the reference processor semantics: Semantics, } -// TODO: We should carefully examine the unsync with UnsafeCell. We should be able to provide a safe implementation. -unsafe impl Sync for ReferenceProcessor {} - #[derive(Debug, PartialEq)] pub enum Semantics { SOFT, @@ -139,227 +152,165 @@ pub enum Semantics { } struct ReferenceProcessorSync { - // XXX: A data race on any of these fields is UB. If - // parallelizing this code, change the types to - // have the correct semantics. - /** - * The table of reference objects for the current semantics - */ + /// The table of reference objects for the current semantics. We add references to this table by + /// add_candidate(). After scanning this table, a reference in the table should either + /// stay in the table (if the referent is alive) or go to enqueued_reference (if the referent is dead and cleared). + /// Note that this table should not have duplicate entries, otherwise we will scan the duplicates multiple times, and + /// that may lead to incorrect results. references: Vec, - /** - * In a MarkCompact (or similar) collector, we need to update the {@code references} - * field, and then update its contents. We implement this by saving the pointer in - * this untraced field for use during the {@code forward} pass. - */ - // unforwarded_references: Option>, - + /// References whose referents are cleared during this GC. We add references to this table during + /// scanning, and we pop from this table during the enqueue work at the end of GC. enqueued_references: Vec, - /** - * Index into the references table for the start of - * the reference nursery. - */ + /// Index into the references table for the start of nursery objects nursery_index: usize, } impl ReferenceProcessor { pub fn new(semantics: Semantics) -> Self { ReferenceProcessor { - sync: UnsafeCell::new(Mutex::new(ReferenceProcessorSync { + sync: Mutex::new(ReferenceProcessorSync { references: Vec::with_capacity(INITIAL_SIZE), - // unforwarded_references: None, enqueued_references: vec![], nursery_index: 0, - })), + }), semantics, } } - fn sync(&self) -> &Mutex { - unsafe { &*self.sync.get() } - } - - // UNSAFE: Bypasses mutex - // It is designed to allow getting mut ref from UnsafeCell. - // TODO: We may need to rework on this to remove the unsafety. - #[allow(clippy::mut_from_ref)] - unsafe fn sync_mut(&self) -> &mut ReferenceProcessorSync { - (*self.sync.get()).get_mut().unwrap() - } - - pub fn clear(&self) { - let mut sync = self.sync().lock().unwrap(); - sync.references.clear(); - // sync.unforwarded_references = None; - sync.nursery_index = 0; - } - - pub fn add_candidate(&self, reff: ObjectReference, referent: ObjectReference) { - let mut sync = self.sync().lock().unwrap(); - // VM::VMReferenceGlue::set_referent(reff, referent); + /// Add a candidate. + // TODO: do we need the referent argument? + pub fn add_candidate(&self, reff: ObjectReference, _referent: ObjectReference) { + let mut sync = self.sync.lock().unwrap(); + // We make sure that we do not have duplicate entries + // TODO: Should we use hash set instead? if sync.references.iter().any(|r| *r == reff) { return; } sync.references.push(reff); } - fn get_forwarded_referent(e: &mut E, object: ObjectReference) -> ObjectReference { - e.trace_object(object) + // These funcions simply call `trace_object()`, which does two things: 1. to make sure the object is kept alive + // and 2. to get the new object reference if the object is copied. The functions are intended to make the code + // easier to understand. + + #[inline(always)] + fn get_forwarded_referent(e: &mut E, referent: ObjectReference) -> ObjectReference { + e.trace_object(referent) } + #[inline(always)] fn get_forwarded_reference(e: &mut E, object: ObjectReference) -> ObjectReference { e.trace_object(object) } - fn retain_referent(e: &mut E, object: ObjectReference) -> ObjectReference { - e.trace_object(object) + #[inline(always)] + fn keep_referent_alive(e: &mut E, referent: ObjectReference) -> ObjectReference { + e.trace_object(referent) } - pub fn enqueue(&self, trace: &mut E, _nursery: bool) { - let mut sync = unsafe { self.sync_mut() }; + /// Inform the binding to enqueue the weak references whose referents were cleared in this GC. + pub fn enqueue(&self) { + let mut sync = self.sync.lock().unwrap(); if !sync.enqueued_references.is_empty() { debug!("enqueue: {:?}", sync.enqueued_references); - ::VMReferenceGlue::enqueue_references(&sync.enqueued_references); + VM::VMReferenceGlue::enqueue_references(&sync.enqueued_references); sync.enqueued_references.clear(); } } + /// Forward the reference tables in the reference processor. This is only needed if a plan does not forward + /// objects in their first transitive closure. + /// nursery is not used for this. pub fn forward(&self, trace: &mut E, _nursery: bool) { - let mut sync = unsafe { self.sync_mut() }; - let references: &mut Vec = &mut sync.references; - // XXX: Copies `unforwarded_references` out. Should be fine since it's not accessed - // concurrently & it's set to `None` at the end anyway.. - // let mut unforwarded_references: Vec
= sync.unforwarded_references.clone().unwrap(); + let mut sync = self.sync.lock().unwrap(); if TRACE { trace!("Starting ReferenceProcessor.forward({:?})", self.semantics); } - // if TRACE_DETAIL { - // trace!("{:?} Reference table is {:?}", self.semantics, references); - // trace!( - // "{:?} unforwardedReferences is {:?}", - // self.semantics, - // unforwarded_references - // ); - // } - - // for (i, unforwarded_ref) in unforwarded_references - // .iter_mut() - // .enumerate() - // .take(references.len()) - // { - // let reference = unsafe { unforwarded_ref.to_object_reference() }; - // if TRACE_DETAIL { - // trace!("slot {:?}: forwarding {:?}", i, reference); - // } - // ::VMReferenceGlue::set_referent( - // reference, - // Self::get_forwarded_referent(trace, ::VMReferenceGlue::get_referent(reference)), - // ); - // let new_reference = Self::get_forwarded_reference(trace, reference); - // *unforwarded_ref = new_reference.to_address(); - // } - - let mut new_queue = vec![]; - for obj in sync.references.drain(..) { - let old_referent = ::VMReferenceGlue::get_referent(obj); - let new_referent = Self::get_forwarded_referent(trace, old_referent); - if !old_referent.is_null() { - // make sure MC forwards the referent - // debug_assert!(old_referent != new_referent); - } - ::VMReferenceGlue::set_referent( - obj, - new_referent, - ); - let new_reference = Self::get_forwarded_reference(trace, obj); - // debug_assert!(!obj.is_null()); - // debug_assert!(obj != new_reference); - if TRACE_DETAIL { - use crate::vm::ObjectModel; - trace!("Forwarding reference: {} (size: {})", obj, ::VMObjectModel::get_current_size(obj)); - trace!(" referent: {} (forwarded to {})", old_referent, new_referent); - trace!(" reference: forwarded to {}", new_reference); - } - // ::VMReferenceGlue::enqueue_reference(new_reference); - debug_assert!(!new_reference.is_null(), "reference {:?}'s forwarding pointer is NULL", obj); - new_queue.push(new_reference); - } - sync.references = new_queue; - let mut new_queue = vec![]; - for obj in sync.enqueued_references.drain(..) { - let old_referent = ::VMReferenceGlue::get_referent(obj); - let new_referent = Self::get_forwarded_referent(trace, old_referent); - if !old_referent.is_null() { - // make sure MC forwards the referent - // debug_assert!(old_referent != new_referent); - } + // Forward a single reference + #[inline(always)] + fn forward_reference(trace: &mut E, reference: ObjectReference) -> ObjectReference { + let old_referent = ::VMReferenceGlue::get_referent(reference); + let new_referent = ReferenceProcessor::get_forwarded_referent(trace, old_referent); ::VMReferenceGlue::set_referent( - obj, + reference, new_referent, ); - let new_reference = Self::get_forwarded_reference(trace, obj); - // debug_assert!(!obj.is_null()); - // debug_assert!(obj != new_reference); + let new_reference = ReferenceProcessor::get_forwarded_reference(trace, reference); if TRACE_DETAIL { use crate::vm::ObjectModel; - trace!("Forwarding enqueued reference: {} (size: {})", obj, ::VMObjectModel::get_current_size(obj)); + trace!("Forwarding reference: {} (size: {})", reference, ::VMObjectModel::get_current_size(reference)); trace!(" referent: {} (forwarded to {})", old_referent, new_referent); trace!(" reference: forwarded to {}", new_reference); } - // ::VMReferenceGlue::enqueue_reference(new_reference); - debug_assert!(!new_reference.is_null(), "reference {:?}'s forwarding pointer is NULL", obj); - new_queue.push(new_reference); + debug_assert!(!new_reference.is_null(), "reference {:?}'s forwarding pointer is NULL", reference); + new_reference } - sync.enqueued_references = new_queue; + + sync.references.iter_mut().for_each(|slot: &mut ObjectReference| { + let reference = *slot; + *slot = forward_reference::(trace, reference); + }); + + sync.enqueued_references.iter_mut().for_each(|slot: &mut ObjectReference| { + let reference = *slot; + *slot = forward_reference::(trace, reference); + }); if TRACE { trace!("Ending ReferenceProcessor.forward({:?})", self.semantics) } - // sync.unforwarded_references = None; } + /// Scan the reference table. fn scan( &self, trace: &mut E, nursery: bool, retain: bool, - tls: VMWorkerThread, ) { - let sync = unsafe { self.sync_mut() }; - // sync.unforwarded_references = Some(sync.references.clone()); - let references: &mut Vec = &mut sync.references; + let mut sync = self.sync.lock().unwrap(); if TRACE { trace!("Starting ReferenceProcessor.scan({:?})", self.semantics); } - let mut to_index = if nursery { sync.nursery_index } else { 0 }; - let from_index = to_index; + // Start scanning from here + let from_index = if nursery { sync.nursery_index } else { 0 }; if TRACE_DETAIL { - trace!("{:?} Reference table is {:?}", self.semantics, references); + trace!("{:?} Reference table is {:?}", self.semantics, sync.references); } if retain { - for reference in references.iter().skip(from_index) { - self.scan_retain_referent::(trace, *reference); + for reference in sync.references.iter().skip(from_index) { + // Retain the referent. This does not update the reference table. + // There will be a later scan pass that update the reference table. + self.retain_referent::(trace, *reference); } } else { - for i in from_index..references.len() { - let reference = references[i]; - - /* Determine liveness (and forward if necessary) the reference */ - let new_reference = self.process_reference(trace, reference, tls, &mut sync.enqueued_references); + // A cursor. Live reference will be stored at the cursor. + let mut to_index = from_index; + + // Iterate from from_index, process/forward for each reference. + // If the reference is alive (process_reference() returned a non-NULL value), + // store the forwarded reference back at the cursor. + for i in from_index..sync.references.len() { + let reference = sync.references[i]; + + // Determine liveness (and forward if necessary) the reference + let new_reference = self.process_reference(trace, reference, &mut sync.enqueued_references); + // If the reference is alive, put it back to the array if !new_reference.is_null() { - references[to_index] = new_reference; + sync.references[to_index] = new_reference; to_index += 1; if TRACE_DETAIL { let index = to_index - 1; trace!( "SCANNED {} {:?}", index, - references[index], + sync.references[index], ); } } @@ -367,31 +318,21 @@ impl ReferenceProcessor { trace!( "{:?} references: {} -> {}", self.semantics, - references.len(), + sync.references.len(), to_index ); sync.nursery_index = to_index; - references.truncate(to_index); + sync.references.truncate(to_index); } - /* flush out any remset entries generated during the above activities */ - - // FIXME: We are calling mutator() for a worker thread - // panic!("We are calling mutator() for a worker tls. We need to fix this."); - // unsafe { VM::VMActivePlan::mutator(tls)) }.flush_remembered_sets(); - // if TRACE { - // trace!("Ending ReferenceProcessor.scan({:?})", self.semantics); - // } + if TRACE { + trace!("Ending ReferenceProcessor.scan({:?})", self.semantics); + } } - /** - * This method deals only with soft references. It retains the referent - * if the reference is definitely reachable. - * @param reference the address of the reference. This may or may not - * be the address of a heap object, depending on the VM. - * @param trace the thread local trace element. - */ - fn scan_retain_referent( + /// This method deals only with soft references. It retains the referent + /// if the reference is definitely reachable. + fn retain_referent( &self, trace: &mut E, reference: ObjectReference, @@ -404,37 +345,32 @@ impl ReferenceProcessor { } if !reference.is_live() { - /* - * Reference is currently unreachable but may get reachable by the - * following trace. We postpone the decision. - */ + // Reference is currently unreachable but may get reachable by the + // following trace. We postpone the decision. return; } - /* - * Reference is definitely reachable. Retain the referent. - */ + // Reference is definitely reachable. Retain the referent. let referent = ::VMReferenceGlue::get_referent(reference); if !referent.is_null() { - Self::retain_referent(trace, referent); + Self::keep_referent_alive(trace, referent); } if TRACE_DETAIL { trace!(" ~> {:?} (retained)", referent.to_address()); } } - /// Process a reference with the current semantics and return an updated reference (e.g. with a new address) - /// if the reference is still alive, otherwise return a null object reference. + /// Process a reference. + /// * If both the reference and the referent is alive, return the updated reference and update its referent properly. + /// * If the reference is alive, and the referent is not null but not alive, return a null pointer and the reference (with cleared referent) is enqueued. + /// * For other cases, return a null pointer. /// - /// Arguments: - /// * `trace`: A reference to a `TraceLocal` object for this reference. - /// * `reference`: The address of the reference. This may or may not be the address of a heap object, depending on the VM. - /// * `tls`: The GC thread that is processing this reference. + /// If a null pointer is returned, the reference can be removed from the reference table. Otherwise, the updated reference should be kept + /// in the reference table. fn process_reference( &self, trace: &mut E, reference: ObjectReference, - tls: VMWorkerThread, enqueued_references: &mut Vec, ) -> ObjectReference { debug_assert!(!reference.is_null()); @@ -452,10 +388,7 @@ impl ReferenceProcessor { // The reference object is live let new_reference = Self::get_forwarded_reference(trace, reference); - // This assert should be true for mark compact - // debug_assert_eq!(reference, new_reference); let old_referent = ::VMReferenceGlue::get_referent(reference); - if TRACE_DETAIL { trace!(" ~> {}", old_referent); } // If the application has cleared the referent the Java spec says @@ -473,11 +406,8 @@ impl ReferenceProcessor { // Referent is still reachable in a way that is as strong as // or stronger than the current reference level. let new_referent = Self::get_forwarded_referent(trace, old_referent); - // This assert should be true for mark compact - // debug_assert_eq!(old_referent, new_referent); - - if TRACE_DETAIL { trace!(" ~> {}", new_referent); } debug_assert!(new_referent.is_live()); + if TRACE_DETAIL { trace!(" ~> {}", new_referent); } // The reference object stays on the waiting list, and the // referent is untouched. The only thing we must do is @@ -494,7 +424,6 @@ impl ReferenceProcessor { else if TRACE_UNREACHABLE { trace!(" UNREACHABLE referent: {}", old_referent); } ::VMReferenceGlue::clear_referent(new_reference); - // ::VMReferenceGlue::enqueue_reference(new_reference); enqueued_references.push(new_reference); return ObjectReference::NULL; } @@ -511,7 +440,8 @@ impl GCWork for SoftRefProcessing { fn do_work(&mut self, worker: &mut GCWorker, mmtk: &'static MMTK) { let mut w = E::new(vec![], false, mmtk); w.set_worker(worker); - mmtk.reference_processors.scan_soft_refs(&mut w, worker.tls); + mmtk.reference_processors.scan_soft_refs(&mut w, mmtk); + w.flush(); } } impl SoftRefProcessing { @@ -525,7 +455,8 @@ impl GCWork for WeakRefProcessing { fn do_work(&mut self, worker: &mut GCWorker, mmtk: &'static MMTK) { let mut w = E::new(vec![], false, mmtk); w.set_worker(worker); - mmtk.reference_processors.scan_weak_refs(&mut w, worker.tls); + mmtk.reference_processors.scan_weak_refs(&mut w, mmtk); + w.flush(); } } impl WeakRefProcessing { @@ -539,7 +470,8 @@ impl GCWork for PhantomRefProcessing { fn do_work(&mut self, worker: &mut GCWorker, mmtk: &'static MMTK) { let mut w = E::new(vec![], false, mmtk); w.set_worker(worker); - mmtk.reference_processors.scan_phantom_refs(&mut w, worker.tls); + mmtk.reference_processors.scan_phantom_refs(&mut w, mmtk); + w.flush(); } } impl PhantomRefProcessing { @@ -553,7 +485,8 @@ impl GCWork for RefForwarding { fn do_work(&mut self, worker: &mut GCWorker, mmtk: &'static MMTK) { let mut w = E::new(vec![], false, mmtk); w.set_worker(worker); - mmtk.reference_processors.forward_refs(&mut w); + mmtk.reference_processors.forward_refs(&mut w, mmtk); + w.flush(); } } impl RefForwarding { @@ -562,15 +495,13 @@ impl RefForwarding { } } -pub struct RefEnqueue(PhantomData); -impl GCWork for RefEnqueue { - fn do_work(&mut self, worker: &mut GCWorker, mmtk: &'static MMTK) { - let mut w = E::new(vec![], false, mmtk); - w.set_worker(worker); - mmtk.reference_processors.enqueue_refs(&mut w); +pub struct RefEnqueue(PhantomData); +impl GCWork for RefEnqueue { + fn do_work(&mut self, _worker: &mut GCWorker, mmtk: &'static MMTK) { + mmtk.reference_processors.enqueue_refs::(); } } -impl RefEnqueue { +impl RefEnqueue { pub fn new() -> Self { Self(PhantomData) } diff --git a/src/vm/reference_glue.rs b/src/vm/reference_glue.rs index 6a480573f2..7bc1c0af38 100644 --- a/src/vm/reference_glue.rs +++ b/src/vm/reference_glue.rs @@ -1,9 +1,6 @@ -use crate::plan::TraceLocal; -use crate::util::opaque_pointer::*; use crate::util::Address; use crate::util::ObjectReference; use crate::vm::VMBinding; -use crate::scheduler::ProcessEdgesWork; /// VM-specific methods for reference processing. pub trait ReferenceGlue { @@ -31,11 +28,12 @@ pub trait ReferenceGlue { /// * `referent`: The referent object reference. fn set_referent(reff: ObjectReference, referent: ObjectReference); - /// For reference types, if the referent is changed during GC, the reference + /// For reference types, if the referent is cleared during GC, the reference /// will be added to a queue, and MMTk will call this method to inform - /// the VM aobut the changes for those references. + /// the VM about the changes for those references. This method is used + /// to implement Java's ReferenceQueue. /// Note that this method is called for each type of weak references during GC, and - /// the references slice will be cleared once this call is returned. That means + /// the references slice will be cleared after this call is returned. That means /// MMTk will no longer keep these references alive once this method is returned. fn enqueue_references(references: &[ObjectReference]); } From dde48d1a056046459aabad0ffbd28fd400a28bf7 Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Thu, 24 Mar 2022 15:01:25 +1100 Subject: [PATCH 16/25] Fix build/style --- src/plan/markcompact/global.rs | 16 +- src/scheduler/scheduler.rs | 16 +- src/util/reference_processor.rs | 189 ++++++++++++----------- vmbindings/dummyvm/src/reference_glue.rs | 4 +- 4 files changed, 121 insertions(+), 104 deletions(-) diff --git a/src/plan/markcompact/global.rs b/src/plan/markcompact/global.rs index 108739301b..89d6bdffc1 100644 --- a/src/plan/markcompact/global.rs +++ b/src/plan/markcompact/global.rs @@ -113,13 +113,19 @@ impl Plan for MarkCompact { // Reference processing if !*self.base().options.no_reference_types { - use crate::util::reference_processor::{SoftRefProcessing, WeakRefProcessing, PhantomRefProcessing}; - scheduler.work_buckets[WorkBucketStage::SoftRefClosure].add(SoftRefProcessing::>::new()); - scheduler.work_buckets[WorkBucketStage::WeakRefClosure].add(WeakRefProcessing::>::new()); - scheduler.work_buckets[WorkBucketStage::PhantomRefClosure].add(PhantomRefProcessing::>::new()); + use crate::util::reference_processor::{ + PhantomRefProcessing, SoftRefProcessing, WeakRefProcessing, + }; + scheduler.work_buckets[WorkBucketStage::SoftRefClosure] + .add(SoftRefProcessing::>::new()); + scheduler.work_buckets[WorkBucketStage::WeakRefClosure] + .add(WeakRefProcessing::>::new()); + scheduler.work_buckets[WorkBucketStage::PhantomRefClosure] + .add(PhantomRefProcessing::>::new()); use crate::util::reference_processor::RefForwarding; - scheduler.work_buckets[WorkBucketStage::RefForwarding].add(RefForwarding::>::new()); + scheduler.work_buckets[WorkBucketStage::RefForwarding] + .add(RefForwarding::>::new()); use crate::util::reference_processor::RefEnqueue; scheduler.work_buckets[WorkBucketStage::Release].add(RefEnqueue::::new()); diff --git a/src/scheduler/scheduler.rs b/src/scheduler/scheduler.rs index 5e4aba4b0a..21029095ca 100644 --- a/src/scheduler/scheduler.rs +++ b/src/scheduler/scheduler.rs @@ -215,14 +215,20 @@ impl GCWorkScheduler { // Reference processing if !*plan.base().options.no_reference_types { - use crate::util::reference_processor::{SoftRefProcessing, WeakRefProcessing, PhantomRefProcessing}; - self.work_buckets[WorkBucketStage::SoftRefClosure].add(SoftRefProcessing::::new()); - self.work_buckets[WorkBucketStage::WeakRefClosure].add(WeakRefProcessing::::new()); - self.work_buckets[WorkBucketStage::PhantomRefClosure].add(PhantomRefProcessing::::new()); + use crate::util::reference_processor::{ + PhantomRefProcessing, SoftRefProcessing, WeakRefProcessing, + }; + self.work_buckets[WorkBucketStage::SoftRefClosure] + .add(SoftRefProcessing::::new()); + self.work_buckets[WorkBucketStage::WeakRefClosure] + .add(WeakRefProcessing::::new()); + self.work_buckets[WorkBucketStage::PhantomRefClosure] + .add(PhantomRefProcessing::::new()); use crate::util::reference_processor::RefForwarding; if plan.constraints().needs_forward_after_liveness { - self.work_buckets[WorkBucketStage::RefForwarding].add(RefForwarding::::new()); + self.work_buckets[WorkBucketStage::RefForwarding] + .add(RefForwarding::::new()); } use crate::util::reference_processor::RefEnqueue; diff --git a/src/util/reference_processor.rs b/src/util/reference_processor.rs index ebe225033f..9fe8d310c2 100644 --- a/src/util/reference_processor.rs +++ b/src/util/reference_processor.rs @@ -1,10 +1,10 @@ use std::sync::Mutex; use std::vec::Vec; +use crate::scheduler::ProcessEdgesWork; use crate::util::ObjectReference; use crate::vm::ReferenceGlue; use crate::vm::VMBinding; -use crate::scheduler::ProcessEdgesWork; /// Holds all reference processors for each weak reference Semantics. /// Currently this is based on Java's weak reference semantics (soft/weak/phantom). @@ -73,18 +73,26 @@ impl ReferenceProcessors { /// the forwarding addresses yet, thus we cannot do forwarding during scan refs. And for those /// plans, this separate step is required. pub fn forward_refs(&self, trace: &mut E, mmtk: &'static MMTK) { - debug_assert!(mmtk.plan.constraints().needs_forward_after_liveness, "A plan with needs_forward_after_liveness=false does not need a separate forward step"); - self.soft.forward::(trace, mmtk.plan.is_current_gc_nursery()); - self.weak.forward::(trace, mmtk.plan.is_current_gc_nursery()); - self.phantom.forward::(trace, mmtk.plan.is_current_gc_nursery()); + debug_assert!( + mmtk.plan.constraints().needs_forward_after_liveness, + "A plan with needs_forward_after_liveness=false does not need a separate forward step" + ); + self.soft + .forward::(trace, mmtk.plan.is_current_gc_nursery()); + self.weak + .forward::(trace, mmtk.plan.is_current_gc_nursery()); + self.phantom + .forward::(trace, mmtk.plan.is_current_gc_nursery()); } // Methods for scanning weak references. It needs to be called in a decreasing order of reference strengths, i.e. soft > weak > phantom /// Scan weak references. pub fn scan_weak_refs(&self, trace: &mut E, mmtk: &'static MMTK) { - self.soft.scan::(trace, mmtk.plan.is_current_gc_nursery(), false); - self.weak.scan::(trace, mmtk.plan.is_current_gc_nursery(), false); + self.soft + .scan::(trace, mmtk.plan.is_current_gc_nursery(), false); + self.weak + .scan::(trace, mmtk.plan.is_current_gc_nursery(), false); } /// Scan soft references. @@ -94,7 +102,8 @@ impl ReferenceProcessors { if !mmtk.plan.is_emergency_collection() { // This step only retains the referents (keep the referents alive), it does not update its addresses. // We will call soft.scan() again with retain=false to update its addresses based on liveness. - self.soft.scan::(trace, mmtk.plan.is_current_gc_nursery(), true); + self.soft + .scan::(trace, mmtk.plan.is_current_gc_nursery(), true); } } @@ -102,9 +111,10 @@ impl ReferenceProcessors { pub fn scan_phantom_refs( &self, trace: &mut E, - _mmtk: &'static MMTK + mmtk: &'static MMTK, ) { - self.phantom.scan::(trace, mmtk.plan.is_current_gc_nursery(), false); + self.phantom + .scan::(trace, mmtk.plan.is_current_gc_nursery(), false); } } @@ -114,12 +124,6 @@ impl Default for ReferenceProcessors { } } -// Debug flags -pub const TRACE: bool = true; -pub const TRACE_UNREACHABLE: bool = true; -pub const TRACE_DETAIL: bool = true; -pub const TRACE_FORWARD: bool = true; - // XXX: We differ from the original implementation // by ignoring "stress," i.e. where the array // of references is grown by 1 each time. We @@ -196,17 +200,26 @@ impl ReferenceProcessor { // easier to understand. #[inline(always)] - fn get_forwarded_referent(e: &mut E, referent: ObjectReference) -> ObjectReference { + fn get_forwarded_referent( + e: &mut E, + referent: ObjectReference, + ) -> ObjectReference { e.trace_object(referent) } #[inline(always)] - fn get_forwarded_reference(e: &mut E, object: ObjectReference) -> ObjectReference { + fn get_forwarded_reference( + e: &mut E, + object: ObjectReference, + ) -> ObjectReference { e.trace_object(object) } #[inline(always)] - fn keep_referent_alive(e: &mut E, referent: ObjectReference) -> ObjectReference { + fn keep_referent_alive( + e: &mut E, + referent: ObjectReference, + ) -> ObjectReference { e.trace_object(referent) } @@ -226,63 +239,69 @@ impl ReferenceProcessor { /// nursery is not used for this. pub fn forward(&self, trace: &mut E, _nursery: bool) { let mut sync = self.sync.lock().unwrap(); - if TRACE { - trace!("Starting ReferenceProcessor.forward({:?})", self.semantics); - } + debug!("Starting ReferenceProcessor.forward({:?})", self.semantics); // Forward a single reference #[inline(always)] - fn forward_reference(trace: &mut E, reference: ObjectReference) -> ObjectReference { + fn forward_reference( + trace: &mut E, + reference: ObjectReference, + ) -> ObjectReference { let old_referent = ::VMReferenceGlue::get_referent(reference); let new_referent = ReferenceProcessor::get_forwarded_referent(trace, old_referent); - ::VMReferenceGlue::set_referent( - reference, - new_referent, - ); + ::VMReferenceGlue::set_referent(reference, new_referent); let new_reference = ReferenceProcessor::get_forwarded_reference(trace, reference); - if TRACE_DETAIL { + { use crate::vm::ObjectModel; - trace!("Forwarding reference: {} (size: {})", reference, ::VMObjectModel::get_current_size(reference)); - trace!(" referent: {} (forwarded to {})", old_referent, new_referent); + trace!( + "Forwarding reference: {} (size: {})", + reference, + ::VMObjectModel::get_current_size(reference) + ); + trace!( + " referent: {} (forwarded to {})", + old_referent, + new_referent + ); trace!(" reference: forwarded to {}", new_reference); } - debug_assert!(!new_reference.is_null(), "reference {:?}'s forwarding pointer is NULL", reference); + debug_assert!( + !new_reference.is_null(), + "reference {:?}'s forwarding pointer is NULL", + reference + ); new_reference } - sync.references.iter_mut().for_each(|slot: &mut ObjectReference| { - let reference = *slot; - *slot = forward_reference::(trace, reference); - }); - - sync.enqueued_references.iter_mut().for_each(|slot: &mut ObjectReference| { - let reference = *slot; - *slot = forward_reference::(trace, reference); - }); - - if TRACE { - trace!("Ending ReferenceProcessor.forward({:?})", self.semantics) - } + sync.references + .iter_mut() + .for_each(|slot: &mut ObjectReference| { + let reference = *slot; + *slot = forward_reference::(trace, reference); + }); + + sync.enqueued_references + .iter_mut() + .for_each(|slot: &mut ObjectReference| { + let reference = *slot; + *slot = forward_reference::(trace, reference); + }); + + debug!("Ending ReferenceProcessor.forward({:?})", self.semantics) } /// Scan the reference table. - fn scan( - &self, - trace: &mut E, - nursery: bool, - retain: bool, - ) { + fn scan(&self, trace: &mut E, nursery: bool, retain: bool) { let mut sync = self.sync.lock().unwrap(); - if TRACE { - trace!("Starting ReferenceProcessor.scan({:?})", self.semantics); - } + debug!("Starting ReferenceProcessor.scan({:?})", self.semantics); // Start scanning from here let from_index = if nursery { sync.nursery_index } else { 0 }; - if TRACE_DETAIL { - trace!("{:?} Reference table is {:?}", self.semantics, sync.references); - } + debug!( + "{:?} Reference table is {:?}", + self.semantics, sync.references + ); if retain { for reference in sync.references.iter().skip(from_index) { // Retain the referent. This does not update the reference table. @@ -300,22 +319,16 @@ impl ReferenceProcessor { let reference = sync.references[i]; // Determine liveness (and forward if necessary) the reference - let new_reference = self.process_reference(trace, reference, &mut sync.enqueued_references); + let new_reference = + self.process_reference(trace, reference, &mut sync.enqueued_references); // If the reference is alive, put it back to the array if !new_reference.is_null() { sync.references[to_index] = new_reference; to_index += 1; - if TRACE_DETAIL { - let index = to_index - 1; - trace!( - "SCANNED {} {:?}", - index, - sync.references[index], - ); - } + trace!("SCANNED {} {:?}", i, new_reference,); } } - trace!( + debug!( "{:?} references: {} -> {}", self.semantics, sync.references.len(), @@ -325,24 +338,16 @@ impl ReferenceProcessor { sync.references.truncate(to_index); } - if TRACE { - trace!("Ending ReferenceProcessor.scan({:?})", self.semantics); - } + debug!("Ending ReferenceProcessor.scan({:?})", self.semantics); } /// This method deals only with soft references. It retains the referent /// if the reference is definitely reachable. - fn retain_referent( - &self, - trace: &mut E, - reference: ObjectReference, - ) { + fn retain_referent(&self, trace: &mut E, reference: ObjectReference) { debug_assert!(!reference.is_null()); debug_assert!(self.semantics == Semantics::SOFT); - if TRACE_DETAIL { - trace!("Processing reference: {:?}", reference); - } + trace!("Processing reference: {:?}", reference); if !reference.is_live() { // Reference is currently unreachable but may get reachable by the @@ -355,9 +360,7 @@ impl ReferenceProcessor { if !referent.is_null() { Self::keep_referent_alive(trace, referent); } - if TRACE_DETAIL { - trace!(" ~> {:?} (retained)", referent.to_address()); - } + trace!(" ~> {:?} (retained)", referent.to_address()); } /// Process a reference. @@ -375,39 +378,39 @@ impl ReferenceProcessor { ) -> ObjectReference { debug_assert!(!reference.is_null()); - if TRACE_DETAIL { trace!("Process reference: {}", reference); } + trace!("Process reference: {}", reference); // If the reference is dead, we're done with it. Let it (and // possibly its referent) be garbage-collected. if !reference.is_live() { ::VMReferenceGlue::clear_referent(reference); - if TRACE_UNREACHABLE { trace!(" UNREACHABLE reference: {}", reference); } - if TRACE_DETAIL { trace!(" (unreachable)"); } + trace!(" UNREACHABLE reference: {}", reference); + trace!(" (unreachable)"); return ObjectReference::NULL; } // The reference object is live let new_reference = Self::get_forwarded_reference(trace, reference); let old_referent = ::VMReferenceGlue::get_referent(reference); - if TRACE_DETAIL { trace!(" ~> {}", old_referent); } + trace!(" ~> {}", old_referent); // If the application has cleared the referent the Java spec says // this does not cause the Reference object to be enqueued. We // simply allow the Reference object to fall out of our // waiting list. if old_referent.is_null() { - if TRACE_DETAIL { trace!(" (null referent) "); } + trace!(" (null referent) "); return ObjectReference::NULL; } - if TRACE_DETAIL { trace!(" => {}", new_reference); } + trace!(" => {}", new_reference); if old_referent.is_live() { // Referent is still reachable in a way that is as strong as // or stronger than the current reference level. let new_referent = Self::get_forwarded_referent(trace, old_referent); debug_assert!(new_referent.is_live()); - if TRACE_DETAIL { trace!(" ~> {}", new_referent); } + trace!(" ~> {}", new_referent); // The reference object stays on the waiting list, and the // referent is untouched. The only thing we must do is @@ -417,15 +420,14 @@ impl ReferenceProcessor { // Update the referent ::VMReferenceGlue::set_referent(new_reference, new_referent); - return new_reference; + new_reference } else { // Referent is unreachable. Clear the referent and enqueue the reference object. - if TRACE_DETAIL { trace!(" UNREACHABLE"); } - else if TRACE_UNREACHABLE { trace!(" UNREACHABLE referent: {}", old_referent); } + trace!(" UNREACHABLE referent: {}", old_referent); ::VMReferenceGlue::clear_referent(new_reference); enqueued_references.push(new_reference); - return ObjectReference::NULL; + ObjectReference::NULL } } } @@ -435,6 +437,7 @@ use crate::scheduler::GCWorker; use crate::MMTK; use std::marker::PhantomData; +#[derive(Default)] pub struct SoftRefProcessing(PhantomData); impl GCWork for SoftRefProcessing { fn do_work(&mut self, worker: &mut GCWorker, mmtk: &'static MMTK) { @@ -450,6 +453,7 @@ impl SoftRefProcessing { } } +#[derive(Default)] pub struct WeakRefProcessing(PhantomData); impl GCWork for WeakRefProcessing { fn do_work(&mut self, worker: &mut GCWorker, mmtk: &'static MMTK) { @@ -465,6 +469,7 @@ impl WeakRefProcessing { } } +#[derive(Default)] pub struct PhantomRefProcessing(PhantomData); impl GCWork for PhantomRefProcessing { fn do_work(&mut self, worker: &mut GCWorker, mmtk: &'static MMTK) { @@ -480,6 +485,7 @@ impl PhantomRefProcessing { } } +#[derive(Default)] pub struct RefForwarding(PhantomData); impl GCWork for RefForwarding { fn do_work(&mut self, worker: &mut GCWorker, mmtk: &'static MMTK) { @@ -495,6 +501,7 @@ impl RefForwarding { } } +#[derive(Default)] pub struct RefEnqueue(PhantomData); impl GCWork for RefEnqueue { fn do_work(&mut self, _worker: &mut GCWorker, mmtk: &'static MMTK) { diff --git a/vmbindings/dummyvm/src/reference_glue.rs b/vmbindings/dummyvm/src/reference_glue.rs index 086f1cfaeb..77d33782e7 100644 --- a/vmbindings/dummyvm/src/reference_glue.rs +++ b/vmbindings/dummyvm/src/reference_glue.rs @@ -1,7 +1,5 @@ use mmtk::vm::ReferenceGlue; use mmtk::util::ObjectReference; -use mmtk::TraceLocal; -use mmtk::util::opaque_pointer::*; use crate::DummyVM; pub struct VMReferenceGlue {} @@ -13,7 +11,7 @@ impl ReferenceGlue for VMReferenceGlue { fn get_referent(_object: ObjectReference) -> ObjectReference { unimplemented!() } - fn process_reference(_trace: &mut T, _reference: ObjectReference, _tls: VMWorkerThread) -> ObjectReference { + fn enqueue_references(_references: &[ObjectReference]) { unimplemented!() } } From 88bbc64d65f93e3df529fcf48fce991382e97146 Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Fri, 25 Mar 2022 13:44:55 +1100 Subject: [PATCH 17/25] Add some assertions in enqueue. --- src/util/reference_processor.rs | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/src/util/reference_processor.rs b/src/util/reference_processor.rs index 9fe8d310c2..e503229760 100644 --- a/src/util/reference_processor.rs +++ b/src/util/reference_processor.rs @@ -227,6 +227,32 @@ impl ReferenceProcessor { pub fn enqueue(&self) { let mut sync = self.sync.lock().unwrap(); + // This is the end of a GC. We do some assertions here to make sure our reference tables are correct. + #[cfg(debug_assertions)] + { + // For references in the table, the reference needs to be valid, and if the referent is not null, it should be valid as well + sync.references.iter().for_each(|reff| { + debug_assert!(!reff.is_null()); + debug_assert!(reff.is_in_any_space()); + let referent = VM::VMReferenceGlue::get_referent(*reff); + if !referent.is_null() { + debug_assert!( + referent.is_in_any_space(), + "Referent {:?} (of reference {:?}) is not in any space", + referent, + reff + ); + } + }); + // For references that will be enqueue'd, the referent needs to be valid, and the referent needs to be null. + sync.enqueued_references.iter().for_each(|reff| { + debug_assert!(!reff.is_null()); + debug_assert!(reff.is_in_any_space()); + let referent = VM::VMReferenceGlue::get_referent(*reff); + debug_assert!(referent.is_null()); + }); + } + if !sync.enqueued_references.is_empty() { debug!("enqueue: {:?}", sync.enqueued_references); VM::VMReferenceGlue::enqueue_references(&sync.enqueued_references); From 612ee339ce909cca7b74857dc62e16da5eb5a3f3 Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Fri, 25 Mar 2022 15:10:38 +1100 Subject: [PATCH 18/25] No longer accept new candidates when we start scanning --- src/util/reference_processor.rs | 33 +++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/src/util/reference_processor.rs b/src/util/reference_processor.rs index e503229760..07fceb7101 100644 --- a/src/util/reference_processor.rs +++ b/src/util/reference_processor.rs @@ -1,5 +1,7 @@ use std::sync::Mutex; use std::vec::Vec; +use std::sync::atomic::AtomicBool; +use std::sync::atomic::Ordering; use crate::scheduler::ProcessEdgesWork; use crate::util::ObjectReference; @@ -146,6 +148,18 @@ pub struct ReferenceProcessor { /// The semantics for the reference processor semantics: Semantics, + + /// Is it allowed to add candidate to this reference processor? The value is true for most of the time, + /// but it is set to false once we start scanning references (that is after the liveness transitive closure), + /// at which point we do not expect to encounter any 'new' reference in the same GC, because 1. no + /// new reference is created during the GC, and 2. existing references should have been traced and added + /// to the reference processor. This makes sure that no new entry will be added to our reference table once + /// we start scanning until the end of that GC. + // This also avoids the issue that we may have forwarded references in our table, and the binding + // traces and finds an unforwarded reference and attempts to add it as a candidate again. + // I have observed this with mark compact & OpenJDK. I am not 100% sure what is the root cause for it, but + // this fixes it. + accept_candidate: AtomicBool, } #[derive(Debug, PartialEq)] @@ -180,12 +194,18 @@ impl ReferenceProcessor { nursery_index: 0, }), semantics, + accept_candidate: AtomicBool::new(true), } } /// Add a candidate. // TODO: do we need the referent argument? + #[inline(always)] pub fn add_candidate(&self, reff: ObjectReference, _referent: ObjectReference) { + if !self.accept_candidate.load(Ordering::SeqCst) { + return; + } + let mut sync = self.sync.lock().unwrap(); // We make sure that we do not have duplicate entries // TODO: Should we use hash set instead? @@ -195,6 +215,14 @@ impl ReferenceProcessor { sync.references.push(reff); } + fn disallow_new_candidate(&self) { + self.accept_candidate.store(false, Ordering::SeqCst); + } + + fn allow_new_candidate(&self) { + self.accept_candidate.store(true, Ordering::SeqCst); + } + // These funcions simply call `trace_object()`, which does two things: 1. to make sure the object is kept alive // and 2. to get the new object reference if the object is copied. The functions are intended to make the code // easier to understand. @@ -258,6 +286,8 @@ impl ReferenceProcessor { VM::VMReferenceGlue::enqueue_references(&sync.enqueued_references); sync.enqueued_references.clear(); } + + self.allow_new_candidate(); } /// Forward the reference tables in the reference processor. This is only needed if a plan does not forward @@ -318,6 +348,9 @@ impl ReferenceProcessor { /// Scan the reference table. fn scan(&self, trace: &mut E, nursery: bool, retain: bool) { + // We start scanning. No longer accept new candidates. + self.disallow_new_candidate(); + let mut sync = self.sync.lock().unwrap(); debug!("Starting ReferenceProcessor.scan({:?})", self.semantics); From 4024354f125bc16f987023dcad4d359949f33546 Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Mon, 28 Mar 2022 15:38:17 +1100 Subject: [PATCH 19/25] No longer accept weak ref candidates after forwarding. --- src/util/reference_processor.rs | 42 +++++++++++++++++---------------- 1 file changed, 22 insertions(+), 20 deletions(-) diff --git a/src/util/reference_processor.rs b/src/util/reference_processor.rs index 07fceb7101..9105a95fbc 100644 --- a/src/util/reference_processor.rs +++ b/src/util/reference_processor.rs @@ -1,7 +1,7 @@ -use std::sync::Mutex; -use std::vec::Vec; use std::sync::atomic::AtomicBool; use std::sync::atomic::Ordering; +use std::sync::Mutex; +use std::vec::Vec; use crate::scheduler::ProcessEdgesWork; use crate::util::ObjectReference; @@ -150,16 +150,18 @@ pub struct ReferenceProcessor { semantics: Semantics, /// Is it allowed to add candidate to this reference processor? The value is true for most of the time, - /// but it is set to false once we start scanning references (that is after the liveness transitive closure), - /// at which point we do not expect to encounter any 'new' reference in the same GC, because 1. no - /// new reference is created during the GC, and 2. existing references should have been traced and added - /// to the reference processor. This makes sure that no new entry will be added to our reference table once - /// we start scanning until the end of that GC. - // This also avoids the issue that we may have forwarded references in our table, and the binding - // traces and finds an unforwarded reference and attempts to add it as a candidate again. - // I have observed this with mark compact & OpenJDK. I am not 100% sure what is the root cause for it, but - // this fixes it. - accept_candidate: AtomicBool, + /// but it is set to false once we finish forwarding references, at which point we do not expect to encounter + /// any 'new' reference in the same GC. This makes sure that no new entry will be added to our reference table once + /// we finish forwarding, as we will not be able to process the entry in that GC. + // This avoids an issue in the following scenario in mark compact: + // 1. First trace: add a candidate WR + // 2. Weak reference scan: scan the reference table, as MC does not forward object in the first trace. This scan does not update any reference. + // 3. Second trace: call add_candidate again with WR, but WR gets ignored as we already have WR in our reference table. + // 4. Weak reference forward: call trace_object for WR, which pushes WR to the node buffer and update WR -> WR' in our reference table. + // 5. When we trace objects in the node buffer, we will attempt to add WR as a candidate. As we have updated WR to WR' in our reference + // table, we would accept WR as a candidate. But we will not trace WR again, and WR will be invalid after this GC. + // This flag is set to false after Step 4, so in Step 5, we will ignore adding WR. + allow_new_candidate: AtomicBool, } #[derive(Debug, PartialEq)] @@ -194,7 +196,7 @@ impl ReferenceProcessor { nursery_index: 0, }), semantics, - accept_candidate: AtomicBool::new(true), + allow_new_candidate: AtomicBool::new(true), } } @@ -202,7 +204,7 @@ impl ReferenceProcessor { // TODO: do we need the referent argument? #[inline(always)] pub fn add_candidate(&self, reff: ObjectReference, _referent: ObjectReference) { - if !self.accept_candidate.load(Ordering::SeqCst) { + if !self.allow_new_candidate.load(Ordering::SeqCst) { return; } @@ -216,11 +218,11 @@ impl ReferenceProcessor { } fn disallow_new_candidate(&self) { - self.accept_candidate.store(false, Ordering::SeqCst); + self.allow_new_candidate.store(false, Ordering::SeqCst); } fn allow_new_candidate(&self) { - self.accept_candidate.store(true, Ordering::SeqCst); + self.allow_new_candidate.store(true, Ordering::SeqCst); } // These funcions simply call `trace_object()`, which does two things: 1. to make sure the object is kept alive @@ -343,14 +345,14 @@ impl ReferenceProcessor { *slot = forward_reference::(trace, reference); }); - debug!("Ending ReferenceProcessor.forward({:?})", self.semantics) + debug!("Ending ReferenceProcessor.forward({:?})", self.semantics); + + // We finish forwarding. No longer accept new candidates. + self.disallow_new_candidate(); } /// Scan the reference table. fn scan(&self, trace: &mut E, nursery: bool, retain: bool) { - // We start scanning. No longer accept new candidates. - self.disallow_new_candidate(); - let mut sync = self.sync.lock().unwrap(); debug!("Starting ReferenceProcessor.scan({:?})", self.semantics); From 3d29846b887431436cc8787d289353a7f7b06d4a Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Tue, 29 Mar 2022 14:50:19 +1100 Subject: [PATCH 20/25] Use HasSet for the reference table. --- src/util/reference_processor.rs | 93 +++++++++++++-------------------- 1 file changed, 37 insertions(+), 56 deletions(-) diff --git a/src/util/reference_processor.rs b/src/util/reference_processor.rs index 9105a95fbc..1c32cadfc2 100644 --- a/src/util/reference_processor.rs +++ b/src/util/reference_processor.rs @@ -1,3 +1,4 @@ +use std::collections::HashSet; use std::sync::atomic::AtomicBool; use std::sync::atomic::Ordering; use std::sync::Mutex; @@ -89,14 +90,6 @@ impl ReferenceProcessors { // Methods for scanning weak references. It needs to be called in a decreasing order of reference strengths, i.e. soft > weak > phantom - /// Scan weak references. - pub fn scan_weak_refs(&self, trace: &mut E, mmtk: &'static MMTK) { - self.soft - .scan::(trace, mmtk.plan.is_current_gc_nursery(), false); - self.weak - .scan::(trace, mmtk.plan.is_current_gc_nursery(), false); - } - /// Scan soft references. pub fn scan_soft_refs(&self, trace: &mut E, mmtk: &'static MMTK) { // For soft refs, it is up to the VM to decide when to reclaim this. @@ -109,6 +102,14 @@ impl ReferenceProcessors { } } + /// Scan weak references. + pub fn scan_weak_refs(&self, trace: &mut E, mmtk: &'static MMTK) { + self.soft + .scan::(trace, mmtk.plan.is_current_gc_nursery(), false); + self.weak + .scan::(trace, mmtk.plan.is_current_gc_nursery(), false); + } + /// Scan phantom references. pub fn scan_phantom_refs( &self, @@ -177,7 +178,7 @@ struct ReferenceProcessorSync { /// stay in the table (if the referent is alive) or go to enqueued_reference (if the referent is dead and cleared). /// Note that this table should not have duplicate entries, otherwise we will scan the duplicates multiple times, and /// that may lead to incorrect results. - references: Vec, + references: HashSet, /// References whose referents are cleared during this GC. We add references to this table during /// scanning, and we pop from this table during the enqueue work at the end of GC. @@ -191,7 +192,7 @@ impl ReferenceProcessor { pub fn new(semantics: Semantics) -> Self { ReferenceProcessor { sync: Mutex::new(ReferenceProcessorSync { - references: Vec::with_capacity(INITIAL_SIZE), + references: HashSet::with_capacity(INITIAL_SIZE), enqueued_references: vec![], nursery_index: 0, }), @@ -209,12 +210,7 @@ impl ReferenceProcessor { } let mut sync = self.sync.lock().unwrap(); - // We make sure that we do not have duplicate entries - // TODO: Should we use hash set instead? - if sync.references.iter().any(|r| *r == reff) { - return; - } - sync.references.push(reff); + sync.references.insert(reff); } fn disallow_new_candidate(&self) { @@ -331,19 +327,17 @@ impl ReferenceProcessor { new_reference } - sync.references - .iter_mut() - .for_each(|slot: &mut ObjectReference| { - let reference = *slot; - *slot = forward_reference::(trace, reference); - }); + sync.references = sync + .references + .iter() + .map(|reff| forward_reference::(trace, *reff)) + .collect(); - sync.enqueued_references - .iter_mut() - .for_each(|slot: &mut ObjectReference| { - let reference = *slot; - *slot = forward_reference::(trace, reference); - }); + sync.enqueued_references = sync + .enqueued_references + .iter() + .map(|reff| forward_reference::(trace, *reff)) + .collect(); debug!("Ending ReferenceProcessor.forward({:?})", self.semantics); @@ -352,51 +346,38 @@ impl ReferenceProcessor { } /// Scan the reference table. - fn scan(&self, trace: &mut E, nursery: bool, retain: bool) { + // TODO: nursery is currently ignored. We used to use Vec for the reference table, and use an int + // to point to the reference that we last scanned. However, when we use HashSet for reference table, + // we can no longer do that. + fn scan(&self, trace: &mut E, _nursery: bool, retain: bool) { let mut sync = self.sync.lock().unwrap(); debug!("Starting ReferenceProcessor.scan({:?})", self.semantics); - // Start scanning from here - let from_index = if nursery { sync.nursery_index } else { 0 }; debug!( "{:?} Reference table is {:?}", self.semantics, sync.references ); if retain { - for reference in sync.references.iter().skip(from_index) { - // Retain the referent. This does not update the reference table. - // There will be a later scan pass that update the reference table. + for reference in sync.references.iter() { self.retain_referent::(trace, *reference); } } else { - // A cursor. Live reference will be stored at the cursor. - let mut to_index = from_index; - - // Iterate from from_index, process/forward for each reference. - // If the reference is alive (process_reference() returned a non-NULL value), - // store the forwarded reference back at the cursor. - for i in from_index..sync.references.len() { - let reference = sync.references[i]; + // Put processed references in this set + let mut new_set = HashSet::with_capacity(INITIAL_SIZE); + debug_assert!(sync.enqueued_references.is_empty()); + // Put enqueued reference in this vec + let mut enqueued_references = vec![]; + for reff in sync.references.iter() { // Determine liveness (and forward if necessary) the reference - let new_reference = - self.process_reference(trace, reference, &mut sync.enqueued_references); - // If the reference is alive, put it back to the array + let new_reference = self.process_reference(trace, *reff, &mut enqueued_references); if !new_reference.is_null() { - sync.references[to_index] = new_reference; - to_index += 1; - trace!("SCANNED {} {:?}", i, new_reference,); + new_set.insert(new_reference); } } - debug!( - "{:?} references: {} -> {}", - self.semantics, - sync.references.len(), - to_index - ); - sync.nursery_index = to_index; - sync.references.truncate(to_index); + sync.references = new_set; + sync.enqueued_references = enqueued_references; } debug!("Ending ReferenceProcessor.scan({:?})", self.semantics); From 4fd84690e27b6527c74fc75430ff8fdc93251cc3 Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Tue, 5 Apr 2022 10:44:48 +1000 Subject: [PATCH 21/25] Provide get_options --- src/mmtk.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/mmtk.rs b/src/mmtk.rs index 989fd85baf..17a8c763aa 100644 --- a/src/mmtk.rs +++ b/src/mmtk.rs @@ -101,6 +101,11 @@ impl MMTK { pub fn get_plan(&self) -> &dyn Plan { self.plan.as_ref() } + + #[inline(always)] + pub fn get_options(&self) -> &Options { + &self.options + } } impl Default for MMTK { From 03fb2d35b4e5a3ba1942c85815beb38ed747f87a Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Wed, 6 Apr 2022 11:40:45 +1000 Subject: [PATCH 22/25] Add tls to enqueue_reference() --- src/util/reference_processor.rs | 29 ++++++++++++++++-------- src/vm/reference_glue.rs | 3 ++- vmbindings/dummyvm/src/reference_glue.rs | 3 ++- 3 files changed, 23 insertions(+), 12 deletions(-) diff --git a/src/util/reference_processor.rs b/src/util/reference_processor.rs index 1c32cadfc2..9b843f80cb 100644 --- a/src/util/reference_processor.rs +++ b/src/util/reference_processor.rs @@ -6,6 +6,7 @@ use std::vec::Vec; use crate::scheduler::ProcessEdgesWork; use crate::util::ObjectReference; +use crate::util::VMWorkerThread; use crate::vm::ReferenceGlue; use crate::vm::VMBinding; @@ -65,10 +66,10 @@ impl ReferenceProcessors { /// This will invoke enqueue for each reference processor, which will /// call back to the VM to enqueue references whose referents are cleared /// in this GC. - pub fn enqueue_refs(&self) { - self.soft.enqueue::(); - self.weak.enqueue::(); - self.phantom.enqueue::(); + pub fn enqueue_refs(&self, tls: VMWorkerThread) { + self.soft.enqueue::(tls); + self.weak.enqueue::(tls); + self.phantom.enqueue::(tls); } /// A separate reference forwarding step. Normally when we scan refs, we deal with forwarding. @@ -250,7 +251,7 @@ impl ReferenceProcessor { } /// Inform the binding to enqueue the weak references whose referents were cleared in this GC. - pub fn enqueue(&self) { + pub fn enqueue(&self, tls: VMWorkerThread) { let mut sync = self.sync.lock().unwrap(); // This is the end of a GC. We do some assertions here to make sure our reference tables are correct. @@ -281,7 +282,7 @@ impl ReferenceProcessor { if !sync.enqueued_references.is_empty() { debug!("enqueue: {:?}", sync.enqueued_references); - VM::VMReferenceGlue::enqueue_references(&sync.enqueued_references); + VM::VMReferenceGlue::enqueue_references(&sync.enqueued_references, tls); sync.enqueued_references.clear(); } @@ -354,9 +355,10 @@ impl ReferenceProcessor { debug!("Starting ReferenceProcessor.scan({:?})", self.semantics); - debug!( + trace!( "{:?} Reference table is {:?}", - self.semantics, sync.references + self.semantics, + sync.references ); if retain { for reference in sync.references.iter() { @@ -376,6 +378,13 @@ impl ReferenceProcessor { new_set.insert(new_reference); } } + debug!( + "{:?} reference table from {} to {} ({} enqueued)", + self.semantics, + sync.references.len(), + new_set.len(), + enqueued_references.len() + ); sync.references = new_set; sync.enqueued_references = enqueued_references; } @@ -546,8 +555,8 @@ impl RefForwarding { #[derive(Default)] pub struct RefEnqueue(PhantomData); impl GCWork for RefEnqueue { - fn do_work(&mut self, _worker: &mut GCWorker, mmtk: &'static MMTK) { - mmtk.reference_processors.enqueue_refs::(); + fn do_work(&mut self, worker: &mut GCWorker, mmtk: &'static MMTK) { + mmtk.reference_processors.enqueue_refs::(worker.tls); } } impl RefEnqueue { diff --git a/src/vm/reference_glue.rs b/src/vm/reference_glue.rs index 7bc1c0af38..a88263edd4 100644 --- a/src/vm/reference_glue.rs +++ b/src/vm/reference_glue.rs @@ -1,5 +1,6 @@ use crate::util::Address; use crate::util::ObjectReference; +use crate::util::VMWorkerThread; use crate::vm::VMBinding; /// VM-specific methods for reference processing. @@ -35,5 +36,5 @@ pub trait ReferenceGlue { /// Note that this method is called for each type of weak references during GC, and /// the references slice will be cleared after this call is returned. That means /// MMTk will no longer keep these references alive once this method is returned. - fn enqueue_references(references: &[ObjectReference]); + fn enqueue_references(references: &[ObjectReference], tls: VMWorkerThread); } diff --git a/vmbindings/dummyvm/src/reference_glue.rs b/vmbindings/dummyvm/src/reference_glue.rs index 77d33782e7..bf1ce233f9 100644 --- a/vmbindings/dummyvm/src/reference_glue.rs +++ b/vmbindings/dummyvm/src/reference_glue.rs @@ -1,5 +1,6 @@ use mmtk::vm::ReferenceGlue; use mmtk::util::ObjectReference; +use mmtk::util::opaque_pointer::VMWorkerThread; use crate::DummyVM; pub struct VMReferenceGlue {} @@ -11,7 +12,7 @@ impl ReferenceGlue for VMReferenceGlue { fn get_referent(_object: ObjectReference) -> ObjectReference { unimplemented!() } - fn enqueue_references(_references: &[ObjectReference]) { + fn enqueue_references(_references: &[ObjectReference], _tls: VMWorkerThread) { unimplemented!() } } From 1767b5272355ceb8171148672cffa0d4aa27b2c3 Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Thu, 7 Apr 2022 09:39:42 +1000 Subject: [PATCH 23/25] Set the default for no_reference_types to true. Adding weak candidates no longer needs referent as its argument. --- examples/mmtk.h | 6 +++--- src/memory_manager.rs | 30 ++++++------------------------ src/util/options.rs | 2 +- src/util/reference_processor.rs | 33 ++++++++++----------------------- vmbindings/dummyvm/api/mmtk.h | 6 +++--- vmbindings/dummyvm/src/api.rs | 12 ++++++------ 6 files changed, 29 insertions(+), 60 deletions(-) diff --git a/examples/mmtk.h b/examples/mmtk.h index 0b4cfb528c..6f0843d729 100644 --- a/examples/mmtk.h +++ b/examples/mmtk.h @@ -107,13 +107,13 @@ extern void* mmtk_starting_heap_address(); extern void* mmtk_last_heap_address(); // Add a reference to the list of weak references -extern void mmtk_add_weak_candidate(void* ref, void* referent); +extern void mmtk_add_weak_candidate(void* ref); // Add a reference to the list of soft references -extern void mmtk_add_soft_candidate(void* ref, void* referent); +extern void mmtk_add_soft_candidate(void* ref); // Add a reference to the list of phantom references -extern void mmtk_add_phantom_candidate(void* ref, void* referent); +extern void mmtk_add_phantom_candidate(void* ref); // Generic hook to allow benchmarks to be harnessed extern void mmtk_harness_begin(void* tls); diff --git a/src/memory_manager.rs b/src/memory_manager.rs index 35c98d8109..42831f725e 100644 --- a/src/memory_manager.rs +++ b/src/memory_manager.rs @@ -422,14 +422,8 @@ pub fn modify_check(mmtk: &MMTK, object: ObjectReference) { /// Arguments: /// * `mmtk`: A reference to an MMTk instance. /// * `reff`: The weak reference to add. -/// * `referent`: The object that the reference points to. -pub fn add_weak_candidate( - mmtk: &MMTK, - reff: ObjectReference, - referent: ObjectReference, -) { - mmtk.reference_processors - .add_weak_candidate::(reff, referent); +pub fn add_weak_candidate(mmtk: &MMTK, reff: ObjectReference) { + mmtk.reference_processors.add_weak_candidate::(reff); } /// Add a reference to the list of soft references. A binding may @@ -438,14 +432,8 @@ pub fn add_weak_candidate( /// Arguments: /// * `mmtk`: A reference to an MMTk instance. /// * `reff`: The soft reference to add. -/// * `referent`: The object that the reference points to. -pub fn add_soft_candidate( - mmtk: &MMTK, - reff: ObjectReference, - referent: ObjectReference, -) { - mmtk.reference_processors - .add_soft_candidate::(reff, referent); +pub fn add_soft_candidate(mmtk: &MMTK, reff: ObjectReference) { + mmtk.reference_processors.add_soft_candidate::(reff); } /// Add a reference to the list of phantom references. A binding may @@ -454,14 +442,8 @@ pub fn add_soft_candidate( /// Arguments: /// * `mmtk`: A reference to an MMTk instance. /// * `reff`: The phantom reference to add. -/// * `referent`: The object that the reference points to. -pub fn add_phantom_candidate( - mmtk: &MMTK, - reff: ObjectReference, - referent: ObjectReference, -) { - mmtk.reference_processors - .add_phantom_candidate::(reff, referent); +pub fn add_phantom_candidate(mmtk: &MMTK, reff: ObjectReference) { + mmtk.reference_processors.add_phantom_candidate::(reff); } /// Generic hook to allow benchmarks to be harnessed. We do a full heap diff --git a/src/util/options.rs b/src/util/options.rs index 2463b6ecaa..66f8354f29 100644 --- a/src/util/options.rs +++ b/src/util/options.rs @@ -331,7 +331,7 @@ options! { // Should finalization be disabled? no_finalizer: bool [env_var: true, command_line: true] [always_valid] = false, // Should reference type processing be disabled? - no_reference_types: bool [env_var: true, command_line: true] [always_valid] = false, + no_reference_types: bool [env_var: true, command_line: true] [always_valid] = true, // The zeroing approach to use for new object allocations. Affects each plan differently. (not supported) nursery_zeroing: NurseryZeroingOptions[env_var: true, command_line: true] [always_valid] = NurseryZeroingOptions::Temporal, // How frequent (every X bytes) should we do a stress GC? diff --git a/src/util/reference_processor.rs b/src/util/reference_processor.rs index 9b843f80cb..678b0022fc 100644 --- a/src/util/reference_processor.rs +++ b/src/util/reference_processor.rs @@ -36,31 +36,19 @@ impl ReferenceProcessors { } } - pub fn add_soft_candidate( - &self, - reff: ObjectReference, - referent: ObjectReference, - ) { - trace!("Add soft candidate: {} ~> {}", reff, referent); - self.soft.add_candidate::(reff, referent); + pub fn add_soft_candidate(&self, reff: ObjectReference) { + trace!("Add soft candidate: {}", reff); + self.soft.add_candidate::(reff); } - pub fn add_weak_candidate( - &self, - reff: ObjectReference, - referent: ObjectReference, - ) { - trace!("Add weak candidate: {} ~> {}", reff, referent); - self.weak.add_candidate::(reff, referent); + pub fn add_weak_candidate(&self, reff: ObjectReference) { + trace!("Add weak candidate: {}", reff); + self.weak.add_candidate::(reff); } - pub fn add_phantom_candidate( - &self, - reff: ObjectReference, - referent: ObjectReference, - ) { - trace!("Add phantom candidate: {} ~> {}", reff, referent); - self.phantom.add_candidate::(reff, referent); + pub fn add_phantom_candidate(&self, reff: ObjectReference) { + trace!("Add phantom candidate: {}", reff); + self.phantom.add_candidate::(reff); } /// This will invoke enqueue for each reference processor, which will @@ -203,9 +191,8 @@ impl ReferenceProcessor { } /// Add a candidate. - // TODO: do we need the referent argument? #[inline(always)] - pub fn add_candidate(&self, reff: ObjectReference, _referent: ObjectReference) { + pub fn add_candidate(&self, reff: ObjectReference) { if !self.allow_new_candidate.load(Ordering::SeqCst) { return; } diff --git a/vmbindings/dummyvm/api/mmtk.h b/vmbindings/dummyvm/api/mmtk.h index 0b4cfb528c..6f0843d729 100644 --- a/vmbindings/dummyvm/api/mmtk.h +++ b/vmbindings/dummyvm/api/mmtk.h @@ -107,13 +107,13 @@ extern void* mmtk_starting_heap_address(); extern void* mmtk_last_heap_address(); // Add a reference to the list of weak references -extern void mmtk_add_weak_candidate(void* ref, void* referent); +extern void mmtk_add_weak_candidate(void* ref); // Add a reference to the list of soft references -extern void mmtk_add_soft_candidate(void* ref, void* referent); +extern void mmtk_add_soft_candidate(void* ref); // Add a reference to the list of phantom references -extern void mmtk_add_phantom_candidate(void* ref, void* referent); +extern void mmtk_add_phantom_candidate(void* ref); // Generic hook to allow benchmarks to be harnessed extern void mmtk_harness_begin(void* tls); diff --git a/vmbindings/dummyvm/src/api.rs b/vmbindings/dummyvm/src/api.rs index 3c5aa55a8e..61585ed67d 100644 --- a/vmbindings/dummyvm/src/api.rs +++ b/vmbindings/dummyvm/src/api.rs @@ -127,18 +127,18 @@ pub extern "C" fn mmtk_handle_user_collection_request(tls: VMMutatorThread) { } #[no_mangle] -pub extern "C" fn mmtk_add_weak_candidate(reff: ObjectReference, referent: ObjectReference) { - memory_manager::add_weak_candidate(&SINGLETON, reff, referent) +pub extern "C" fn mmtk_add_weak_candidate(reff: ObjectReference) { + memory_manager::add_weak_candidate(&SINGLETON, reff) } #[no_mangle] -pub extern "C" fn mmtk_add_soft_candidate(reff: ObjectReference, referent: ObjectReference) { - memory_manager::add_soft_candidate(&SINGLETON, reff, referent) +pub extern "C" fn mmtk_add_soft_candidate(reff: ObjectReference) { + memory_manager::add_soft_candidate(&SINGLETON, reff) } #[no_mangle] -pub extern "C" fn mmtk_add_phantom_candidate(reff: ObjectReference, referent: ObjectReference) { - memory_manager::add_phantom_candidate(&SINGLETON, reff, referent) +pub extern "C" fn mmtk_add_phantom_candidate(reff: ObjectReference) { + memory_manager::add_phantom_candidate(&SINGLETON, reff) } #[no_mangle] From 5ef5afc27b4765531c70f4079f9ba030b0b511a2 Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Thu, 7 Apr 2022 11:22:18 +1000 Subject: [PATCH 24/25] Rename ProcessWeakRefs to VMProcessWeakRefs --- src/plan/markcompact/global.rs | 8 ++++---- src/scheduler/gc_work.rs | 8 +++----- src/scheduler/scheduler.rs | 8 ++++---- src/util/options.rs | 4 ++++ 4 files changed, 15 insertions(+), 13 deletions(-) diff --git a/src/plan/markcompact/global.rs b/src/plan/markcompact/global.rs index 89d6bdffc1..fe521cdb42 100644 --- a/src/plan/markcompact/global.rs +++ b/src/plan/markcompact/global.rs @@ -97,10 +97,6 @@ impl Plan for MarkCompact { scheduler.work_buckets[WorkBucketStage::Prepare] .add(Prepare::>::new(self)); - // VM-specific weak ref processing - scheduler.work_buckets[WorkBucketStage::WeakRefClosure] - .add(ProcessWeakRefs::>::new()); - scheduler.work_buckets[WorkBucketStage::CalculateForwarding] .add(CalculateForwardingAddress::::new(&self.mc_space)); // do another trace to update references @@ -123,6 +119,10 @@ impl Plan for MarkCompact { scheduler.work_buckets[WorkBucketStage::PhantomRefClosure] .add(PhantomRefProcessing::>::new()); + // VM-specific weak ref processing + scheduler.work_buckets[WorkBucketStage::WeakRefClosure] + .add(VMProcessWeakRefs::>::new()); + use crate::util::reference_processor::RefForwarding; scheduler.work_buckets[WorkBucketStage::RefForwarding] .add(RefForwarding::>::new()); diff --git a/src/scheduler/gc_work.rs b/src/scheduler/gc_work.rs index 4585d641a5..3e289f9597 100644 --- a/src/scheduler/gc_work.rs +++ b/src/scheduler/gc_work.rs @@ -120,8 +120,6 @@ impl GCWork for Release { for w in &mmtk.scheduler.workers_shared { w.local_work_bucket.add(ReleaseCollector); } - // TODO: Process weak references properly - // mmtk.reference_processors.clear(); } } @@ -249,15 +247,15 @@ impl CoordinatorWork for EndOfGC {} /// processing of those weakrefs may be more complex. For such case, we delegate to the /// VM binding to process weak references. #[derive(Default)] -pub struct ProcessWeakRefs(PhantomData); +pub struct VMProcessWeakRefs(PhantomData); -impl ProcessWeakRefs { +impl VMProcessWeakRefs { pub fn new() -> Self { Self(PhantomData) } } -impl GCWork for ProcessWeakRefs { +impl GCWork for VMProcessWeakRefs { fn do_work(&mut self, worker: &mut GCWorker, _mmtk: &'static MMTK) { trace!("ProcessWeakRefs"); ::VMCollection::process_weak_refs::(worker); diff --git a/src/scheduler/scheduler.rs b/src/scheduler/scheduler.rs index 17f43f44df..d39538acd2 100644 --- a/src/scheduler/scheduler.rs +++ b/src/scheduler/scheduler.rs @@ -191,10 +191,6 @@ impl GCWorkScheduler { // Prepare global/collectors/mutators self.work_buckets[WorkBucketStage::Prepare].add(Prepare::::new(plan)); - // VM-specific weak ref processing - self.work_buckets[WorkBucketStage::WeakRefClosure] - .add(ProcessWeakRefs::::new()); - // Release global/collectors/mutators self.work_buckets[WorkBucketStage::Release].add(Release::::new(plan)); @@ -225,6 +221,10 @@ impl GCWorkScheduler { self.work_buckets[WorkBucketStage::PhantomRefClosure] .add(PhantomRefProcessing::::new()); + // VM-specific weak ref processing + self.work_buckets[WorkBucketStage::WeakRefClosure] + .add(VMProcessWeakRefs::::new()); + use crate::util::reference_processor::RefForwarding; if plan.constraints().needs_forward_after_liveness { self.work_buckets[WorkBucketStage::RefForwarding] diff --git a/src/util/options.rs b/src/util/options.rs index 66f8354f29..78aa396ec9 100644 --- a/src/util/options.rs +++ b/src/util/options.rs @@ -331,6 +331,10 @@ options! { // Should finalization be disabled? no_finalizer: bool [env_var: true, command_line: true] [always_valid] = false, // Should reference type processing be disabled? + // If reference type processing is disabled, no weak reference processing work is scheduled, + // and we expect a binding to treat weak references as strong references. + // We disable weak reference processing by default, as we are still working on it. This will be changed to `false` + // once weak reference processing is implemented properly. no_reference_types: bool [env_var: true, command_line: true] [always_valid] = true, // The zeroing approach to use for new object allocations. Affects each plan differently. (not supported) nursery_zeroing: NurseryZeroingOptions[env_var: true, command_line: true] [always_valid] = NurseryZeroingOptions::Temporal, From 065377b3f0b32d0833c740fa5e179611fd176eed Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Fri, 8 Apr 2022 10:03:59 +1000 Subject: [PATCH 25/25] Separate retain from scan. process_reference returns Option. --- src/scheduler/work_bucket.rs | 2 - src/util/reference_processor.rs | 128 +++++++++++++++++--------------- 2 files changed, 70 insertions(+), 60 deletions(-) diff --git a/src/scheduler/work_bucket.rs b/src/scheduler/work_bucket.rs index db300c86b2..269d2f4f07 100644 --- a/src/scheduler/work_bucket.rs +++ b/src/scheduler/work_bucket.rs @@ -154,8 +154,6 @@ pub enum WorkBucketStage { Unconstrained, Prepare, Closure, - // TODO: We only support final reference at the moment. If we have references of multiple strengths, - // we may need more than one buckets for each reference strength. SoftRefClosure, WeakRefClosure, FinalRefClosure, diff --git a/src/util/reference_processor.rs b/src/util/reference_processor.rs index 678b0022fc..7ab4c9c31b 100644 --- a/src/util/reference_processor.rs +++ b/src/util/reference_processor.rs @@ -87,16 +87,19 @@ impl ReferenceProcessors { // This step only retains the referents (keep the referents alive), it does not update its addresses. // We will call soft.scan() again with retain=false to update its addresses based on liveness. self.soft - .scan::(trace, mmtk.plan.is_current_gc_nursery(), true); + .retain::(trace, mmtk.plan.is_current_gc_nursery()); } + // This will update the references (and the referents). + self.soft + .scan::(trace, mmtk.plan.is_current_gc_nursery()); } /// Scan weak references. pub fn scan_weak_refs(&self, trace: &mut E, mmtk: &'static MMTK) { self.soft - .scan::(trace, mmtk.plan.is_current_gc_nursery(), false); + .scan::(trace, mmtk.plan.is_current_gc_nursery()); self.weak - .scan::(trace, mmtk.plan.is_current_gc_nursery(), false); + .scan::(trace, mmtk.plan.is_current_gc_nursery()); } /// Scan phantom references. @@ -106,7 +109,7 @@ impl ReferenceProcessors { mmtk: &'static MMTK, ) { self.phantom - .scan::(trace, mmtk.plan.is_current_gc_nursery(), false); + .scan::(trace, mmtk.plan.is_current_gc_nursery()); } } @@ -268,7 +271,7 @@ impl ReferenceProcessor { } if !sync.enqueued_references.is_empty() { - debug!("enqueue: {:?}", sync.enqueued_references); + trace!("enqueue: {:?}", sync.enqueued_references); VM::VMReferenceGlue::enqueue_references(&sync.enqueued_references, tls); sync.enqueued_references.clear(); } @@ -333,11 +336,11 @@ impl ReferenceProcessor { self.disallow_new_candidate(); } - /// Scan the reference table. + /// Scan the reference table, and update each reference/referent. // TODO: nursery is currently ignored. We used to use Vec for the reference table, and use an int // to point to the reference that we last scanned. However, when we use HashSet for reference table, // we can no longer do that. - fn scan(&self, trace: &mut E, _nursery: bool, retain: bool) { + fn scan(&self, trace: &mut E, _nursery: bool) { let mut sync = self.sync.lock().unwrap(); debug!("Starting ReferenceProcessor.scan({:?})", self.semantics); @@ -347,73 +350,82 @@ impl ReferenceProcessor { self.semantics, sync.references ); - if retain { - for reference in sync.references.iter() { - self.retain_referent::(trace, *reference); - } - } else { - // Put processed references in this set - let mut new_set = HashSet::with_capacity(INITIAL_SIZE); - debug_assert!(sync.enqueued_references.is_empty()); - // Put enqueued reference in this vec - let mut enqueued_references = vec![]; - - for reff in sync.references.iter() { - // Determine liveness (and forward if necessary) the reference - let new_reference = self.process_reference(trace, *reff, &mut enqueued_references); - if !new_reference.is_null() { - new_set.insert(new_reference); - } - } - debug!( - "{:?} reference table from {} to {} ({} enqueued)", - self.semantics, - sync.references.len(), - new_set.len(), - enqueued_references.len() - ); - sync.references = new_set; - sync.enqueued_references = enqueued_references; - } + + debug_assert!(sync.enqueued_references.is_empty()); + // Put enqueued reference in this vec + let mut enqueued_references = vec![]; + + // Determinine liveness for each reference and only keep the refs if `process_reference()` returns Some. + let new_set: HashSet = sync + .references + .iter() + .filter_map(|reff| self.process_reference(trace, *reff, &mut enqueued_references)) + .collect(); + + debug!( + "{:?} reference table from {} to {} ({} enqueued)", + self.semantics, + sync.references.len(), + new_set.len(), + enqueued_references.len() + ); + sync.references = new_set; + sync.enqueued_references = enqueued_references; debug!("Ending ReferenceProcessor.scan({:?})", self.semantics); } - /// This method deals only with soft references. It retains the referent - /// if the reference is definitely reachable. - fn retain_referent(&self, trace: &mut E, reference: ObjectReference) { - debug_assert!(!reference.is_null()); + /// Retain referent in the reference table. This method deals only with soft references. + /// It retains the referent if the reference is definitely reachable. This method does + /// not update reference or referent. So after this method, scan() should be used to update + /// the references/referents. + fn retain(&self, trace: &mut E, _nursery: bool) { debug_assert!(self.semantics == Semantics::SOFT); - trace!("Processing reference: {:?}", reference); + let sync = self.sync.lock().unwrap(); - if !reference.is_live() { - // Reference is currently unreachable but may get reachable by the - // following trace. We postpone the decision. - return; - } + debug!("Starting ReferenceProcessor.retain({:?})", self.semantics); + trace!( + "{:?} Reference table is {:?}", + self.semantics, + sync.references + ); - // Reference is definitely reachable. Retain the referent. - let referent = ::VMReferenceGlue::get_referent(reference); - if !referent.is_null() { - Self::keep_referent_alive(trace, referent); + for reference in sync.references.iter() { + debug_assert!(!reference.is_null()); + + trace!("Processing reference: {:?}", reference); + + if !reference.is_live() { + // Reference is currently unreachable but may get reachable by the + // following trace. We postpone the decision. + continue; + } + + // Reference is definitely reachable. Retain the referent. + let referent = ::VMReferenceGlue::get_referent(*reference); + if !referent.is_null() { + Self::keep_referent_alive(trace, referent); + } + trace!(" ~> {:?} (retained)", referent.to_address()); } - trace!(" ~> {:?} (retained)", referent.to_address()); + + debug!("Ending ReferenceProcessor.retain({:?})", self.semantics); } /// Process a reference. /// * If both the reference and the referent is alive, return the updated reference and update its referent properly. - /// * If the reference is alive, and the referent is not null but not alive, return a null pointer and the reference (with cleared referent) is enqueued. - /// * For other cases, return a null pointer. + /// * If the reference is alive, and the referent is not null but not alive, return None and the reference (with cleared referent) is enqueued. + /// * For other cases, return None. /// - /// If a null pointer is returned, the reference can be removed from the reference table. Otherwise, the updated reference should be kept + /// If a None value is returned, the reference can be removed from the reference table. Otherwise, the updated reference should be kept /// in the reference table. fn process_reference( &self, trace: &mut E, reference: ObjectReference, enqueued_references: &mut Vec, - ) -> ObjectReference { + ) -> Option { debug_assert!(!reference.is_null()); trace!("Process reference: {}", reference); @@ -424,7 +436,7 @@ impl ReferenceProcessor { ::VMReferenceGlue::clear_referent(reference); trace!(" UNREACHABLE reference: {}", reference); trace!(" (unreachable)"); - return ObjectReference::NULL; + return None; } // The reference object is live @@ -438,7 +450,7 @@ impl ReferenceProcessor { // waiting list. if old_referent.is_null() { trace!(" (null referent) "); - return ObjectReference::NULL; + return None; } trace!(" => {}", new_reference); @@ -458,14 +470,14 @@ impl ReferenceProcessor { // Update the referent ::VMReferenceGlue::set_referent(new_reference, new_referent); - new_reference + Some(new_reference) } else { // Referent is unreachable. Clear the referent and enqueue the reference object. trace!(" UNREACHABLE referent: {}", old_referent); ::VMReferenceGlue::clear_referent(new_reference); enqueued_references.push(new_reference); - ObjectReference::NULL + None } } }