From dbffe0e11384800ac78e62dd80686a85afd34fb4 Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Wed, 12 Oct 2022 15:15:58 +0800 Subject: [PATCH 01/14] Rename feature and module --- Cargo.toml | 6 +++--- src/plan/markcompact/global.rs | 8 ++++---- src/plan/marksweep/global.rs | 8 ++++---- src/policy/copyspace.rs | 16 ++++++++-------- src/policy/immix/block.rs | 4 ++-- src/policy/immix/immixspace.rs | 12 ++++++------ src/policy/immortalspace.rs | 8 ++++---- src/policy/largeobjectspace.rs | 16 ++++++++-------- src/policy/lockfreeimmortalspace.rs | 4 ++-- src/policy/mallocspace/global.rs | 6 +++--- src/policy/mallocspace/metadata.rs | 10 +++++----- src/policy/markcompactspace.rs | 12 ++++++------ src/policy/sft.rs | 4 ++-- src/util/alloc/immix_allocator.rs | 4 ++-- src/util/is_mmtk_object.rs | 2 +- src/util/linear_scan.rs | 6 +++--- src/util/metadata/side_metadata/constants.rs | 2 +- src/util/metadata/side_metadata/global.rs | 8 ++++---- src/util/mod.rs | 4 ++-- src/util/object_forwarding.rs | 4 ++-- src/util/{alloc_bit.rs => vo_bit.rs} | 10 ++++++++++ 21 files changed, 82 insertions(+), 72 deletions(-) rename src/util/{alloc_bit.rs => vo_bit.rs} (82%) diff --git a/Cargo.toml b/Cargo.toml index 8f8b9d4e38..fc1f1c5c6b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -78,11 +78,11 @@ ro_space = [] # TODO: This is not properly implemented yet. We currently use an immortal space instead, and all our spaces have execution permission at the moment. code_space = [] -# metadata -global_alloc_bit = [] +# Valid-object bit. +vo_bit = [] # conservative garbage collection support -is_mmtk_object = ["global_alloc_bit"] +is_mmtk_object = ["vo_bit"] # The following two features are useful for using Immix for VMs that do not support moving GC. diff --git a/src/plan/markcompact/global.rs b/src/plan/markcompact/global.rs index 4195f4ae75..66d8b1bf7a 100644 --- a/src/plan/markcompact/global.rs +++ b/src/plan/markcompact/global.rs @@ -15,8 +15,8 @@ use crate::policy::space::Space; use crate::scheduler::gc_work::*; use crate::scheduler::*; use crate::util::alloc::allocators::AllocatorSelector; -#[cfg(not(feature = "global_alloc_bit"))] -use crate::util::alloc_bit::ALLOC_SIDE_METADATA_SPEC; +#[cfg(not(feature = "vo_bit"))] +use crate::util::vo_bit::ALLOC_SIDE_METADATA_SPEC; use crate::util::copy::CopySemantics; use crate::util::heap::layout::heap_layout::Mmapper; use crate::util::heap::layout::heap_layout::VMMap; @@ -181,11 +181,11 @@ impl MarkCompact { let mut heap = HeapMeta::new(&options); // if global_alloc_bit is enabled, ALLOC_SIDE_METADATA_SPEC will be added to // SideMetadataContext by default, so we don't need to add it here. - #[cfg(feature = "global_alloc_bit")] + #[cfg(feature = "vo_bit")] let global_metadata_specs = SideMetadataContext::new_global_specs(&[]); // if global_alloc_bit is NOT enabled, // we need to add ALLOC_SIDE_METADATA_SPEC to SideMetadataContext here. - #[cfg(not(feature = "global_alloc_bit"))] + #[cfg(not(feature = "vo_bit"))] let global_metadata_specs = SideMetadataContext::new_global_specs(&[ALLOC_SIDE_METADATA_SPEC]); diff --git a/src/plan/marksweep/global.rs b/src/plan/marksweep/global.rs index 8029a13b79..9d0c73f039 100644 --- a/src/plan/marksweep/global.rs +++ b/src/plan/marksweep/global.rs @@ -11,8 +11,8 @@ use crate::policy::mallocspace::MallocSpace; use crate::policy::space::Space; use crate::scheduler::*; use crate::util::alloc::allocators::AllocatorSelector; -#[cfg(not(feature = "global_alloc_bit"))] -use crate::util::alloc_bit::ALLOC_SIDE_METADATA_SPEC; +#[cfg(not(feature = "vo_bit"))] +use crate::util::vo_bit::ALLOC_SIDE_METADATA_SPEC; use crate::util::heap::layout::heap_layout::Mmapper; use crate::util::heap::layout::heap_layout::VMMap; use crate::util::heap::HeapMeta; @@ -99,12 +99,12 @@ impl MarkSweep { let heap = HeapMeta::new(&options); // if global_alloc_bit is enabled, ALLOC_SIDE_METADATA_SPEC will be added to // SideMetadataContext by default, so we don't need to add it here. - #[cfg(feature = "global_alloc_bit")] + #[cfg(feature = "vo_bit")] let global_metadata_specs = SideMetadataContext::new_global_specs(&[ACTIVE_CHUNK_METADATA_SPEC]); // if global_alloc_bit is NOT enabled, // we need to add ALLOC_SIDE_METADATA_SPEC to SideMetadataContext here. - #[cfg(not(feature = "global_alloc_bit"))] + #[cfg(not(feature = "vo_bit"))] let global_metadata_specs = SideMetadataContext::new_global_specs(&[ ALLOC_SIDE_METADATA_SPEC, ACTIVE_CHUNK_METADATA_SPEC, diff --git a/src/policy/copyspace.rs b/src/policy/copyspace.rs index 34368e01f7..6b65dde6f1 100644 --- a/src/policy/copyspace.rs +++ b/src/policy/copyspace.rs @@ -7,7 +7,7 @@ use crate::policy::space::{CommonSpace, Space}; use crate::scheduler::GCWorker; use crate::util::copy::*; use crate::util::heap::layout::heap_layout::{Mmapper, VMMap}; -#[cfg(feature = "global_alloc_bit")] +#[cfg(feature = "vo_bit")] use crate::util::heap::layout::vm_layout_constants::BYTES_IN_CHUNK; use crate::util::heap::HeapMeta; use crate::util::heap::VMRequest; @@ -46,8 +46,8 @@ impl SFT for CopySpace { } fn initialize_object_metadata(&self, _object: ObjectReference, _alloc: bool) { - #[cfg(feature = "global_alloc_bit")] - crate::util::alloc_bit::set_alloc_bit(_object); + #[cfg(feature = "vo_bit")] + crate::util::vo_bit::set_alloc_bit(_object); } #[inline(always)] @@ -182,7 +182,7 @@ impl CopySpace { pub fn release(&self) { unsafe { - #[cfg(feature = "global_alloc_bit")] + #[cfg(feature = "vo_bit")] self.reset_alloc_bit(); self.pr.reset(); } @@ -190,13 +190,13 @@ impl CopySpace { self.from_space.store(false, Ordering::SeqCst); } - #[cfg(feature = "global_alloc_bit")] + #[cfg(feature = "vo_bit")] unsafe fn reset_alloc_bit(&self) { let current_chunk = self.pr.get_current_chunk(); if self.common.contiguous { // If we have allocated something into this space, we need to clear its alloc bit. if current_chunk != self.common.start { - crate::util::alloc_bit::bzero_alloc_bit( + crate::util::vo_bit::bzero_alloc_bit( self.common.start, current_chunk + BYTES_IN_CHUNK - self.common.start, ); @@ -229,9 +229,9 @@ impl CopySpace { // This object is in from space, we will copy. Make sure we have a valid copy semantic. debug_assert!(semantics.is_some()); - #[cfg(feature = "global_alloc_bit")] + #[cfg(feature = "vo_bit")] debug_assert!( - crate::util::alloc_bit::is_alloced(object), + crate::util::vo_bit::is_alloced(object), "{:x}: alloc bit not set", object ); diff --git a/src/policy/immix/block.rs b/src/policy/immix/block.rs index 930568ed39..396074eeab 100644 --- a/src/policy/immix/block.rs +++ b/src/policy/immix/block.rs @@ -183,8 +183,8 @@ impl Block { /// Deinitalize a block before releasing. #[inline] pub fn deinit(&self) { - #[cfg(feature = "global_alloc_bit")] - crate::util::alloc_bit::bzero_alloc_bit(self.start(), Self::BYTES); + #[cfg(feature = "vo_bit")] + crate::util::vo_bit::bzero_alloc_bit(self.start(), Self::BYTES); self.set_state(BlockState::Unallocated); } diff --git a/src/policy/immix/immixspace.rs b/src/policy/immix/immixspace.rs index f625275312..78899f9680 100644 --- a/src/policy/immix/immixspace.rs +++ b/src/policy/immix/immixspace.rs @@ -76,8 +76,8 @@ impl SFT for ImmixSpace { true } fn initialize_object_metadata(&self, _object: ObjectReference, _alloc: bool) { - #[cfg(feature = "global_alloc_bit")] - crate::util::alloc_bit::set_alloc_bit(_object); + #[cfg(feature = "vo_bit")] + crate::util::vo_bit::set_alloc_bit(_object); } #[inline(always)] fn sft_trace_object( @@ -388,9 +388,9 @@ impl ImmixSpace { semantics: CopySemantics, worker: &mut GCWorker, ) -> ObjectReference { - #[cfg(feature = "global_alloc_bit")] + #[cfg(feature = "vo_bit")] debug_assert!( - crate::util::alloc_bit::is_alloced(object), + crate::util::vo_bit::is_alloced(object), "{:x}: alloc bit not set", object ); @@ -482,8 +482,8 @@ impl ImmixSpace { Block::containing::(object).set_state(BlockState::Marked); object } else { - #[cfg(feature = "global_alloc_bit")] - crate::util::alloc_bit::unset_alloc_bit(object); + #[cfg(feature = "vo_bit")] + crate::util::vo_bit::unset_alloc_bit(object); ForwardingWord::forward_object::(object, semantics, copy_context) }; debug_assert_eq!( diff --git a/src/policy/immortalspace.rs b/src/policy/immortalspace.rs index 68d699b295..079e52cfb3 100644 --- a/src/policy/immortalspace.rs +++ b/src/policy/immortalspace.rs @@ -69,8 +69,8 @@ impl SFT for ImmortalSpace { if self.common.needs_log_bit { VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.mark_as_unlogged::(object, Ordering::SeqCst); } - #[cfg(feature = "global_alloc_bit")] - crate::util::alloc_bit::set_alloc_bit(object); + #[cfg(feature = "vo_bit")] + crate::util::vo_bit::set_alloc_bit(object); } #[inline(always)] fn sft_trace_object( @@ -207,9 +207,9 @@ impl ImmortalSpace { queue: &mut Q, object: ObjectReference, ) -> ObjectReference { - #[cfg(feature = "global_alloc_bit")] + #[cfg(feature = "vo_bit")] debug_assert!( - crate::util::alloc_bit::is_alloced(object), + crate::util::vo_bit::is_alloced(object), "{:x}: alloc bit not set", object ); diff --git a/src/policy/largeobjectspace.rs b/src/policy/largeobjectspace.rs index cbb7d9a76a..6c0f3d1990 100644 --- a/src/policy/largeobjectspace.rs +++ b/src/policy/largeobjectspace.rs @@ -72,8 +72,8 @@ impl SFT for LargeObjectSpace { VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.mark_as_unlogged::(object, Ordering::SeqCst); } - #[cfg(feature = "global_alloc_bit")] - crate::util::alloc_bit::set_alloc_bit(object); + #[cfg(feature = "vo_bit")] + crate::util::vo_bit::set_alloc_bit(object); let cell = VM::VMObjectModel::object_start_ref(object); self.treadmill.add_to_treadmill(cell, alloc); } @@ -203,9 +203,9 @@ impl LargeObjectSpace { queue: &mut Q, object: ObjectReference, ) -> ObjectReference { - #[cfg(feature = "global_alloc_bit")] + #[cfg(feature = "vo_bit")] debug_assert!( - crate::util::alloc_bit::is_alloced(object), + crate::util::vo_bit::is_alloced(object), "{:x}: alloc bit not set", object ); @@ -234,15 +234,15 @@ impl LargeObjectSpace { if sweep_nursery { for cell in self.treadmill.collect_nursery() { // println!("- cn {}", cell); - #[cfg(feature = "global_alloc_bit")] - crate::util::alloc_bit::unset_addr_alloc_bit(cell); + #[cfg(feature = "vo_bit")] + crate::util::vo_bit::unset_addr_alloc_bit(cell); self.pr.release_pages(get_super_page(cell)); } } else { for cell in self.treadmill.collect() { // println!("- ts {}", cell); - #[cfg(feature = "global_alloc_bit")] - crate::util::alloc_bit::unset_addr_alloc_bit(cell); + #[cfg(feature = "vo_bit")] + crate::util::vo_bit::unset_addr_alloc_bit(cell); self.pr.release_pages(get_super_page(cell)); } } diff --git a/src/policy/lockfreeimmortalspace.rs b/src/policy/lockfreeimmortalspace.rs index c836b32982..5e6ac2c923 100644 --- a/src/policy/lockfreeimmortalspace.rs +++ b/src/policy/lockfreeimmortalspace.rs @@ -58,8 +58,8 @@ impl SFT for LockFreeImmortalSpace { unimplemented!() } fn initialize_object_metadata(&self, _object: ObjectReference, _alloc: bool) { - #[cfg(feature = "global_alloc_bit")] - crate::util::alloc_bit::set_alloc_bit(_object); + #[cfg(feature = "vo_bit")] + crate::util::vo_bit::set_alloc_bit(_object); } fn sft_trace_object( &self, diff --git a/src/policy/mallocspace/global.rs b/src/policy/mallocspace/global.rs index 1a9c248091..7cfae81762 100644 --- a/src/policy/mallocspace/global.rs +++ b/src/policy/mallocspace/global.rs @@ -482,7 +482,7 @@ impl MallocSpace { let chunk_end = chunk_start + BYTES_IN_CHUNK; debug_assert!( - crate::util::alloc_bit::ALLOC_SIDE_METADATA_SPEC.log_bytes_in_region + crate::util::vo_bit::ALLOC_SIDE_METADATA_SPEC.log_bytes_in_region == mark_bit_spec.log_bytes_in_region, "Alloc-bit and mark-bit metadata have different minimum object sizes!" ); @@ -490,7 +490,7 @@ impl MallocSpace { // For bulk xor'ing 128-bit vectors on architectures with vector instructions // Each bit represents an object of LOG_MIN_OBJ_SIZE size let bulk_load_size: usize = - 128 * (1 << crate::util::alloc_bit::ALLOC_SIDE_METADATA_SPEC.log_bytes_in_region); + 128 * (1 << crate::util::vo_bit::ALLOC_SIDE_METADATA_SPEC.log_bytes_in_region); // The start of a possibly empty page. This will be updated during the sweeping, and always points to the next page of last live objects. let mut empty_page_start = Address::ZERO; @@ -498,7 +498,7 @@ impl MallocSpace { // Scan the chunk by every 'bulk_load_size' region. while address < chunk_end { let alloc_128: u128 = - unsafe { load128(&crate::util::alloc_bit::ALLOC_SIDE_METADATA_SPEC, address) }; + unsafe { load128(&crate::util::vo_bit::ALLOC_SIDE_METADATA_SPEC, address) }; let mark_128: u128 = unsafe { load128(&mark_bit_spec, address) }; // Check if there are dead objects in the bulk loaded region diff --git a/src/policy/mallocspace/metadata.rs b/src/policy/mallocspace/metadata.rs index 07c4b8e439..1128e66436 100644 --- a/src/policy/mallocspace/metadata.rs +++ b/src/policy/mallocspace/metadata.rs @@ -1,4 +1,4 @@ -use crate::util::alloc_bit; +use crate::util::vo_bit; use crate::util::conversions; use crate::util::heap::layout::vm_layout_constants::BYTES_IN_CHUNK; use crate::util::metadata::side_metadata; @@ -162,7 +162,7 @@ pub fn is_alloced_by_malloc(object: ObjectReference) -> bool { /// This function doesn't check if `addr` is aligned. /// If not, it will try to load the alloc bit for the address rounded down to the metadata's granularity. pub fn has_object_alloced_by_malloc(addr: Address) -> bool { - is_meta_space_mapped_for_address(addr) && alloc_bit::is_alloced_object(addr) + is_meta_space_mapped_for_address(addr) && vo_bit::is_alloced_object(addr) } pub fn is_marked(object: ObjectReference, ordering: Ordering) -> bool { @@ -205,7 +205,7 @@ pub unsafe fn is_chunk_marked_unsafe(chunk_start: Address) -> bool { } pub fn set_alloc_bit(object: ObjectReference) { - alloc_bit::set_alloc_bit(object); + vo_bit::set_alloc_bit(object); } pub fn set_mark_bit(object: ObjectReference, ordering: Ordering) { @@ -214,7 +214,7 @@ pub fn set_mark_bit(object: ObjectReference, ordering: Ordering) #[allow(unused)] pub fn unset_alloc_bit(object: ObjectReference) { - alloc_bit::unset_alloc_bit(object); + vo_bit::unset_alloc_bit(object); } pub(super) fn set_page_mark(page_addr: Address) { @@ -238,7 +238,7 @@ pub(super) unsafe fn unset_offset_malloc_bit_unsafe(address: Address) { } pub unsafe fn unset_alloc_bit_unsafe(object: ObjectReference) { - alloc_bit::unset_alloc_bit_unsafe(object); + vo_bit::unset_alloc_bit_unsafe(object); } #[allow(unused)] diff --git a/src/policy/markcompactspace.rs b/src/policy/markcompactspace.rs index a6f3da1dd7..32192e28d3 100644 --- a/src/policy/markcompactspace.rs +++ b/src/policy/markcompactspace.rs @@ -11,7 +11,7 @@ use crate::util::heap::layout::heap_layout::{Mmapper, VMMap}; use crate::util::heap::{HeapMeta, MonotonePageResource, PageResource, VMRequest}; use crate::util::metadata::extract_side_metadata; use crate::util::metadata::side_metadata::{SideMetadataContext, SideMetadataSpec}; -use crate::util::{alloc_bit, Address, ObjectReference}; +use crate::util::{vo_bit, Address, ObjectReference}; use crate::{vm::*, ObjectQueue}; use atomic::Ordering; @@ -57,7 +57,7 @@ impl SFT for MarkCompactSpace { } fn initialize_object_metadata(&self, object: ObjectReference, _alloc: bool) { - crate::util::alloc_bit::set_alloc_bit(object); + crate::util::vo_bit::set_alloc_bit(object); } #[cfg(feature = "sanity")] @@ -231,7 +231,7 @@ impl MarkCompactSpace { object: ObjectReference, ) -> ObjectReference { debug_assert!( - crate::util::alloc_bit::is_alloced(object), + crate::util::vo_bit::is_alloced(object), "{:x}: alloc bit not set", object ); @@ -247,7 +247,7 @@ impl MarkCompactSpace { object: ObjectReference, ) -> ObjectReference { debug_assert!( - crate::util::alloc_bit::is_alloced(object), + crate::util::vo_bit::is_alloced(object), "{:x}: alloc bit not set", object ); @@ -377,7 +377,7 @@ impl MarkCompactSpace { ); for obj in linear_scan { // clear the alloc bit - alloc_bit::unset_addr_alloc_bit(obj.to_address()); + vo_bit::unset_addr_alloc_bit(obj.to_address()); let forwarding_pointer = Self::get_header_forwarding_pointer(obj); @@ -391,7 +391,7 @@ impl MarkCompactSpace { trace!(" copy from {} to {}", obj, new_object); let end_of_new_object = VM::VMObjectModel::copy_to(obj, new_object, Address::ZERO); // update alloc_bit, - alloc_bit::set_alloc_bit(new_object); + vo_bit::set_alloc_bit(new_object); to = new_object.to_address() + copied_size; debug_assert_eq!(end_of_new_object, to); } diff --git a/src/policy/sft.rs b/src/policy/sft.rs index 7036d34f16..6aa359d78e 100644 --- a/src/policy/sft.rs +++ b/src/policy/sft.rs @@ -1,7 +1,7 @@ use crate::plan::VectorObjectQueue; use crate::scheduler::GCWorker; #[cfg(feature = "is_mmtk_object")] -use crate::util::alloc_bit; +use crate::util::vo_bit; use crate::util::conversions; use crate::util::*; use crate::vm::VMBinding; @@ -78,7 +78,7 @@ pub trait SFT { return false; } // The `addr` is mapped. We use the global alloc bit to get the exact answer. - alloc_bit::is_alloced_object(addr) + vo_bit::is_alloced_object(addr) } /// Initialize object metadata (in the header, or in the side metadata). diff --git a/src/util/alloc/immix_allocator.rs b/src/util/alloc/immix_allocator.rs index d73c0143ef..50a934c973 100644 --- a/src/util/alloc/immix_allocator.rs +++ b/src/util/alloc/immix_allocator.rs @@ -257,8 +257,8 @@ impl ImmixAllocator { end_line, self.tls ); - #[cfg(feature = "global_alloc_bit")] - crate::util::alloc_bit::bzero_alloc_bit(self.cursor, self.limit - self.cursor); + #[cfg(feature = "vo_bit")] + crate::util::vo_bit::bzero_alloc_bit(self.cursor, self.limit - self.cursor); crate::util::memory::zero(self.cursor, self.limit - self.cursor); debug_assert!( align_allocation_no_fill::(self.cursor, align, offset) + size <= self.limit diff --git a/src/util/is_mmtk_object.rs b/src/util/is_mmtk_object.rs index 85b8d4315f..71c5f7549e 100644 --- a/src/util/is_mmtk_object.rs +++ b/src/util/is_mmtk_object.rs @@ -1,4 +1,4 @@ /// The region size (in bytes) of the `ALLOC_BIT` side metadata. /// The VM can use this to check if an object is properly aligned. pub const ALLOC_BIT_REGION_SIZE: usize = - 1usize << crate::util::alloc_bit::ALLOC_SIDE_METADATA_SPEC.log_bytes_in_region; + 1usize << crate::util::vo_bit::ALLOC_SIDE_METADATA_SPEC.log_bytes_in_region; diff --git a/src/util/linear_scan.rs b/src/util/linear_scan.rs index 3212c2efef..fb2059cd89 100644 --- a/src/util/linear_scan.rs +++ b/src/util/linear_scan.rs @@ -1,4 +1,4 @@ -use crate::util::alloc_bit; +use crate::util::vo_bit; use crate::util::Address; use crate::util::ObjectReference; use crate::vm::ObjectModel; @@ -41,9 +41,9 @@ impl fn next(&mut self) -> Option<::Item> { while self.cursor < self.end { let is_object = if ATOMIC_LOAD_ALLOC_BIT { - alloc_bit::is_alloced_object(self.cursor) + vo_bit::is_alloced_object(self.cursor) } else { - unsafe { alloc_bit::is_alloced_object_unsafe(self.cursor) } + unsafe { vo_bit::is_alloced_object_unsafe(self.cursor) } }; if is_object { diff --git a/src/util/metadata/side_metadata/constants.rs b/src/util/metadata/side_metadata/constants.rs index 6e660e52b7..fa15677977 100644 --- a/src/util/metadata/side_metadata/constants.rs +++ b/src/util/metadata/side_metadata/constants.rs @@ -30,7 +30,7 @@ pub(crate) const GLOBAL_SIDE_METADATA_BASE_OFFSET: SideMetadataOffset = SideMetadataOffset::addr(GLOBAL_SIDE_METADATA_BASE_ADDRESS); // Base address of alloc bit, public to VM bindings which may need to use this. -pub const ALLOC_SIDE_METADATA_ADDR: Address = crate::util::alloc_bit::ALLOC_SIDE_METADATA_ADDR; +pub const ALLOC_SIDE_METADATA_ADDR: Address = crate::util::vo_bit::ALLOC_SIDE_METADATA_ADDR; /// This constant represents the worst-case ratio of source data size to global side metadata. /// A value of 2 means the space required for global side metadata must be less than 1/4th of the source data. diff --git a/src/util/metadata/side_metadata/global.rs b/src/util/metadata/side_metadata/global.rs index fca15b738d..e052bfbc81 100644 --- a/src/util/metadata/side_metadata/global.rs +++ b/src/util/metadata/side_metadata/global.rs @@ -1,6 +1,6 @@ use super::*; -#[cfg(feature = "global_alloc_bit")] -use crate::util::alloc_bit::ALLOC_SIDE_METADATA_SPEC; +#[cfg(feature = "vo_bit")] +use crate::util::vo_bit::ALLOC_SIDE_METADATA_SPEC; use crate::util::constants::{BYTES_IN_PAGE, LOG_BITS_IN_BYTE}; use crate::util::heap::layout::vm_layout_constants::BYTES_IN_CHUNK; use crate::util::memory; @@ -158,7 +158,7 @@ impl SideMetadataSpec { let _lock = sanity::SANITY_LOCK.lock().unwrap(); // yiluowei: Not Sure but this assertion seems too strict for Immix recycled lines - #[cfg(not(feature = "global_alloc_bit"))] + #[cfg(not(feature = "vo_bit"))] debug_assert!(start.is_aligned_to(BYTES_IN_PAGE) && meta_byte_lshift(self, start) == 0); #[cfg(feature = "extreme_assertions")] @@ -713,7 +713,7 @@ impl SideMetadataContext { pub fn new_global_specs(specs: &[SideMetadataSpec]) -> Vec { let mut ret = vec![]; - #[cfg(feature = "global_alloc_bit")] + #[cfg(feature = "vo_bit")] ret.push(ALLOC_SIDE_METADATA_SPEC); { diff --git a/src/util/mod.rs b/src/util/mod.rs index 771e253ab4..b06006eb63 100644 --- a/src/util/mod.rs +++ b/src/util/mod.rs @@ -29,8 +29,6 @@ pub mod options; pub mod reference_processor; // The following modules are only public in the mmtk crate. They should only be used in MMTk core. -/// Alloc bit -pub(crate) mod alloc_bit; /// An analysis framework for collecting data and profiling in GC. #[cfg(feature = "analysis")] pub(crate) mod analysis; @@ -65,6 +63,8 @@ pub(crate) mod statistics; pub(crate) mod test_util; /// A treadmill implementation. pub(crate) mod treadmill; +/// Valid object bit +pub(crate) mod vo_bit; // These modules are private. They are only used by other util modules. diff --git a/src/util/object_forwarding.rs b/src/util/object_forwarding.rs index 8b83830554..d0a1920b90 100644 --- a/src/util/object_forwarding.rs +++ b/src/util/object_forwarding.rs @@ -81,8 +81,8 @@ pub fn forward_object( copy_context: &mut GCWorkerCopyContext, ) -> ObjectReference { let new_object = VM::VMObjectModel::copy(object, semantics, copy_context); - #[cfg(feature = "global_alloc_bit")] - crate::util::alloc_bit::set_alloc_bit(new_object); + #[cfg(feature = "vo_bit")] + crate::util::vo_bit::set_alloc_bit(new_object); if let Some(shift) = forwarding_bits_offset_in_forwarding_pointer::() { VM::VMObjectModel::LOCAL_FORWARDING_POINTER_SPEC.store_atomic::( object, diff --git a/src/util/alloc_bit.rs b/src/util/vo_bit.rs similarity index 82% rename from src/util/alloc_bit.rs rename to src/util/vo_bit.rs index e9bb9f93ca..ecb49043ed 100644 --- a/src/util/alloc_bit.rs +++ b/src/util/vo_bit.rs @@ -1,3 +1,13 @@ +//! Valid-object bit (VO-bit) +//! +//! The valid-object bit (VO-bit) metadata is a one-bit-per-object side metadata. It is set for +//! every object at allocation time (more precisely, during `post_alloc`), and cleared when either +//! - the object reclaims by the GC, or +//! - the VM explicitly clears the VO-bit of the object. +//! +//! The main purpose of VO-bit is supporting conservative GC. It is the canonical source of +//! information about whether there is an object in the MMTk heap at any given address. + use atomic::Ordering; use crate::util::heap::layout::vm_layout_constants::BYTES_IN_CHUNK; From d84878f306ca70f9728a43ab8eec593f4ffeef3d Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Wed, 12 Oct 2022 16:22:26 +0800 Subject: [PATCH 02/14] Rename functions and constants --- src/plan/markcompact/global.rs | 2 +- src/plan/marksweep/global.rs | 2 +- src/policy/copyspace.rs | 10 ++-- src/policy/immix/block.rs | 2 +- src/policy/immix/immixspace.rs | 6 +- src/policy/immortalspace.rs | 4 +- src/policy/largeobjectspace.rs | 8 +-- src/policy/lockfreeimmortalspace.rs | 2 +- src/policy/mallocspace/global.rs | 6 +- src/policy/mallocspace/metadata.rs | 8 +-- src/policy/markcompactspace.rs | 10 ++-- src/policy/sft.rs | 2 +- src/util/alloc/immix_allocator.rs | 2 +- src/util/is_mmtk_object.rs | 2 +- src/util/linear_scan.rs | 4 +- src/util/metadata/side_metadata/constants.rs | 2 +- src/util/metadata/side_metadata/global.rs | 4 +- src/util/metadata/side_metadata/spec_defs.rs | 4 +- src/util/object_forwarding.rs | 2 +- src/util/vo_bit.rs | 61 ++++++++------------ 20 files changed, 66 insertions(+), 77 deletions(-) diff --git a/src/plan/markcompact/global.rs b/src/plan/markcompact/global.rs index 66d8b1bf7a..61d4ba8389 100644 --- a/src/plan/markcompact/global.rs +++ b/src/plan/markcompact/global.rs @@ -16,7 +16,7 @@ use crate::scheduler::gc_work::*; use crate::scheduler::*; use crate::util::alloc::allocators::AllocatorSelector; #[cfg(not(feature = "vo_bit"))] -use crate::util::vo_bit::ALLOC_SIDE_METADATA_SPEC; +use crate::util::vo_bit::VO_BIT_SIDE_METADATA_SPEC; use crate::util::copy::CopySemantics; use crate::util::heap::layout::heap_layout::Mmapper; use crate::util::heap::layout::heap_layout::VMMap; diff --git a/src/plan/marksweep/global.rs b/src/plan/marksweep/global.rs index 9d0c73f039..b56f06663c 100644 --- a/src/plan/marksweep/global.rs +++ b/src/plan/marksweep/global.rs @@ -12,7 +12,7 @@ use crate::policy::space::Space; use crate::scheduler::*; use crate::util::alloc::allocators::AllocatorSelector; #[cfg(not(feature = "vo_bit"))] -use crate::util::vo_bit::ALLOC_SIDE_METADATA_SPEC; +use crate::util::vo_bit::VO_BIT_SIDE_METADATA_SPEC; use crate::util::heap::layout::heap_layout::Mmapper; use crate::util::heap::layout::heap_layout::VMMap; use crate::util::heap::HeapMeta; diff --git a/src/policy/copyspace.rs b/src/policy/copyspace.rs index 6b65dde6f1..866a20c0c6 100644 --- a/src/policy/copyspace.rs +++ b/src/policy/copyspace.rs @@ -47,7 +47,7 @@ impl SFT for CopySpace { fn initialize_object_metadata(&self, _object: ObjectReference, _alloc: bool) { #[cfg(feature = "vo_bit")] - crate::util::vo_bit::set_alloc_bit(_object); + crate::util::vo_bit::set_vo_bit(_object); } #[inline(always)] @@ -183,7 +183,7 @@ impl CopySpace { pub fn release(&self) { unsafe { #[cfg(feature = "vo_bit")] - self.reset_alloc_bit(); + self.reset_vo_bit(); self.pr.reset(); } self.common.metadata.reset(); @@ -191,12 +191,12 @@ impl CopySpace { } #[cfg(feature = "vo_bit")] - unsafe fn reset_alloc_bit(&self) { + unsafe fn reset_vo_bit(&self) { let current_chunk = self.pr.get_current_chunk(); if self.common.contiguous { // If we have allocated something into this space, we need to clear its alloc bit. if current_chunk != self.common.start { - crate::util::vo_bit::bzero_alloc_bit( + crate::util::vo_bit::bzero_vo_bit( self.common.start, current_chunk + BYTES_IN_CHUNK - self.common.start, ); @@ -231,7 +231,7 @@ impl CopySpace { #[cfg(feature = "vo_bit")] debug_assert!( - crate::util::vo_bit::is_alloced(object), + crate::util::vo_bit::is_vo_bit_set(object), "{:x}: alloc bit not set", object ); diff --git a/src/policy/immix/block.rs b/src/policy/immix/block.rs index 396074eeab..ee32e86783 100644 --- a/src/policy/immix/block.rs +++ b/src/policy/immix/block.rs @@ -184,7 +184,7 @@ impl Block { #[inline] pub fn deinit(&self) { #[cfg(feature = "vo_bit")] - crate::util::vo_bit::bzero_alloc_bit(self.start(), Self::BYTES); + crate::util::vo_bit::bzero_vo_bit(self.start(), Self::BYTES); self.set_state(BlockState::Unallocated); } diff --git a/src/policy/immix/immixspace.rs b/src/policy/immix/immixspace.rs index 78899f9680..302d82326d 100644 --- a/src/policy/immix/immixspace.rs +++ b/src/policy/immix/immixspace.rs @@ -77,7 +77,7 @@ impl SFT for ImmixSpace { } fn initialize_object_metadata(&self, _object: ObjectReference, _alloc: bool) { #[cfg(feature = "vo_bit")] - crate::util::vo_bit::set_alloc_bit(_object); + crate::util::vo_bit::set_vo_bit(_object); } #[inline(always)] fn sft_trace_object( @@ -390,7 +390,7 @@ impl ImmixSpace { ) -> ObjectReference { #[cfg(feature = "vo_bit")] debug_assert!( - crate::util::vo_bit::is_alloced(object), + crate::util::vo_bit::is_vo_bit_set(object), "{:x}: alloc bit not set", object ); @@ -483,7 +483,7 @@ impl ImmixSpace { object } else { #[cfg(feature = "vo_bit")] - crate::util::vo_bit::unset_alloc_bit(object); + crate::util::vo_bit::unset_vo_bit(object); ForwardingWord::forward_object::(object, semantics, copy_context) }; debug_assert_eq!( diff --git a/src/policy/immortalspace.rs b/src/policy/immortalspace.rs index 079e52cfb3..e6752afd55 100644 --- a/src/policy/immortalspace.rs +++ b/src/policy/immortalspace.rs @@ -70,7 +70,7 @@ impl SFT for ImmortalSpace { VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.mark_as_unlogged::(object, Ordering::SeqCst); } #[cfg(feature = "vo_bit")] - crate::util::vo_bit::set_alloc_bit(object); + crate::util::vo_bit::set_vo_bit(object); } #[inline(always)] fn sft_trace_object( @@ -209,7 +209,7 @@ impl ImmortalSpace { ) -> ObjectReference { #[cfg(feature = "vo_bit")] debug_assert!( - crate::util::vo_bit::is_alloced(object), + crate::util::vo_bit::is_vo_bit_set(object), "{:x}: alloc bit not set", object ); diff --git a/src/policy/largeobjectspace.rs b/src/policy/largeobjectspace.rs index 6c0f3d1990..c9ab70bde3 100644 --- a/src/policy/largeobjectspace.rs +++ b/src/policy/largeobjectspace.rs @@ -73,7 +73,7 @@ impl SFT for LargeObjectSpace { } #[cfg(feature = "vo_bit")] - crate::util::vo_bit::set_alloc_bit(object); + crate::util::vo_bit::set_vo_bit(object); let cell = VM::VMObjectModel::object_start_ref(object); self.treadmill.add_to_treadmill(cell, alloc); } @@ -205,7 +205,7 @@ impl LargeObjectSpace { ) -> ObjectReference { #[cfg(feature = "vo_bit")] debug_assert!( - crate::util::vo_bit::is_alloced(object), + crate::util::vo_bit::is_vo_bit_set(object), "{:x}: alloc bit not set", object ); @@ -235,14 +235,14 @@ impl LargeObjectSpace { for cell in self.treadmill.collect_nursery() { // println!("- cn {}", cell); #[cfg(feature = "vo_bit")] - crate::util::vo_bit::unset_addr_alloc_bit(cell); + crate::util::vo_bit::unset_vo_bit_for_addr(cell); self.pr.release_pages(get_super_page(cell)); } } else { for cell in self.treadmill.collect() { // println!("- ts {}", cell); #[cfg(feature = "vo_bit")] - crate::util::vo_bit::unset_addr_alloc_bit(cell); + crate::util::vo_bit::unset_vo_bit_for_addr(cell); self.pr.release_pages(get_super_page(cell)); } } diff --git a/src/policy/lockfreeimmortalspace.rs b/src/policy/lockfreeimmortalspace.rs index 5e6ac2c923..e0f0f82258 100644 --- a/src/policy/lockfreeimmortalspace.rs +++ b/src/policy/lockfreeimmortalspace.rs @@ -59,7 +59,7 @@ impl SFT for LockFreeImmortalSpace { } fn initialize_object_metadata(&self, _object: ObjectReference, _alloc: bool) { #[cfg(feature = "vo_bit")] - crate::util::vo_bit::set_alloc_bit(_object); + crate::util::vo_bit::set_vo_bit(_object); } fn sft_trace_object( &self, diff --git a/src/policy/mallocspace/global.rs b/src/policy/mallocspace/global.rs index 7cfae81762..1886cd2f29 100644 --- a/src/policy/mallocspace/global.rs +++ b/src/policy/mallocspace/global.rs @@ -482,7 +482,7 @@ impl MallocSpace { let chunk_end = chunk_start + BYTES_IN_CHUNK; debug_assert!( - crate::util::vo_bit::ALLOC_SIDE_METADATA_SPEC.log_bytes_in_region + crate::util::vo_bit::VO_BIT_SIDE_METADATA_SPEC.log_bytes_in_region == mark_bit_spec.log_bytes_in_region, "Alloc-bit and mark-bit metadata have different minimum object sizes!" ); @@ -490,7 +490,7 @@ impl MallocSpace { // For bulk xor'ing 128-bit vectors on architectures with vector instructions // Each bit represents an object of LOG_MIN_OBJ_SIZE size let bulk_load_size: usize = - 128 * (1 << crate::util::vo_bit::ALLOC_SIDE_METADATA_SPEC.log_bytes_in_region); + 128 * (1 << crate::util::vo_bit::VO_BIT_SIDE_METADATA_SPEC.log_bytes_in_region); // The start of a possibly empty page. This will be updated during the sweeping, and always points to the next page of last live objects. let mut empty_page_start = Address::ZERO; @@ -498,7 +498,7 @@ impl MallocSpace { // Scan the chunk by every 'bulk_load_size' region. while address < chunk_end { let alloc_128: u128 = - unsafe { load128(&crate::util::vo_bit::ALLOC_SIDE_METADATA_SPEC, address) }; + unsafe { load128(&crate::util::vo_bit::VO_BIT_SIDE_METADATA_SPEC, address) }; let mark_128: u128 = unsafe { load128(&mark_bit_spec, address) }; // Check if there are dead objects in the bulk loaded region diff --git a/src/policy/mallocspace/metadata.rs b/src/policy/mallocspace/metadata.rs index 1128e66436..5dc80c6718 100644 --- a/src/policy/mallocspace/metadata.rs +++ b/src/policy/mallocspace/metadata.rs @@ -162,7 +162,7 @@ pub fn is_alloced_by_malloc(object: ObjectReference) -> bool { /// This function doesn't check if `addr` is aligned. /// If not, it will try to load the alloc bit for the address rounded down to the metadata's granularity. pub fn has_object_alloced_by_malloc(addr: Address) -> bool { - is_meta_space_mapped_for_address(addr) && vo_bit::is_alloced_object(addr) + is_meta_space_mapped_for_address(addr) && vo_bit::is_vo_bit_set_for_addr(addr) } pub fn is_marked(object: ObjectReference, ordering: Ordering) -> bool { @@ -205,7 +205,7 @@ pub unsafe fn is_chunk_marked_unsafe(chunk_start: Address) -> bool { } pub fn set_alloc_bit(object: ObjectReference) { - vo_bit::set_alloc_bit(object); + vo_bit::set_vo_bit(object); } pub fn set_mark_bit(object: ObjectReference, ordering: Ordering) { @@ -214,7 +214,7 @@ pub fn set_mark_bit(object: ObjectReference, ordering: Ordering) #[allow(unused)] pub fn unset_alloc_bit(object: ObjectReference) { - vo_bit::unset_alloc_bit(object); + vo_bit::unset_vo_bit(object); } pub(super) fn set_page_mark(page_addr: Address) { @@ -238,7 +238,7 @@ pub(super) unsafe fn unset_offset_malloc_bit_unsafe(address: Address) { } pub unsafe fn unset_alloc_bit_unsafe(object: ObjectReference) { - vo_bit::unset_alloc_bit_unsafe(object); + vo_bit::unset_vo_bit_unsafe(object); } #[allow(unused)] diff --git a/src/policy/markcompactspace.rs b/src/policy/markcompactspace.rs index 32192e28d3..24f7384cd1 100644 --- a/src/policy/markcompactspace.rs +++ b/src/policy/markcompactspace.rs @@ -57,7 +57,7 @@ impl SFT for MarkCompactSpace { } fn initialize_object_metadata(&self, object: ObjectReference, _alloc: bool) { - crate::util::vo_bit::set_alloc_bit(object); + crate::util::vo_bit::set_vo_bit(object); } #[cfg(feature = "sanity")] @@ -231,7 +231,7 @@ impl MarkCompactSpace { object: ObjectReference, ) -> ObjectReference { debug_assert!( - crate::util::vo_bit::is_alloced(object), + crate::util::vo_bit::is_vo_bit_set(object), "{:x}: alloc bit not set", object ); @@ -247,7 +247,7 @@ impl MarkCompactSpace { object: ObjectReference, ) -> ObjectReference { debug_assert!( - crate::util::vo_bit::is_alloced(object), + crate::util::vo_bit::is_vo_bit_set(object), "{:x}: alloc bit not set", object ); @@ -377,7 +377,7 @@ impl MarkCompactSpace { ); for obj in linear_scan { // clear the alloc bit - vo_bit::unset_addr_alloc_bit(obj.to_address()); + vo_bit::unset_vo_bit_for_addr(obj.to_address()); let forwarding_pointer = Self::get_header_forwarding_pointer(obj); @@ -391,7 +391,7 @@ impl MarkCompactSpace { trace!(" copy from {} to {}", obj, new_object); let end_of_new_object = VM::VMObjectModel::copy_to(obj, new_object, Address::ZERO); // update alloc_bit, - vo_bit::set_alloc_bit(new_object); + vo_bit::set_vo_bit(new_object); to = new_object.to_address() + copied_size; debug_assert_eq!(end_of_new_object, to); } diff --git a/src/policy/sft.rs b/src/policy/sft.rs index 6aa359d78e..e347d4ba5d 100644 --- a/src/policy/sft.rs +++ b/src/policy/sft.rs @@ -78,7 +78,7 @@ pub trait SFT { return false; } // The `addr` is mapped. We use the global alloc bit to get the exact answer. - vo_bit::is_alloced_object(addr) + vo_bit::is_vo_bit_set_for_addr(addr) } /// Initialize object metadata (in the header, or in the side metadata). diff --git a/src/util/alloc/immix_allocator.rs b/src/util/alloc/immix_allocator.rs index 50a934c973..72430e0260 100644 --- a/src/util/alloc/immix_allocator.rs +++ b/src/util/alloc/immix_allocator.rs @@ -258,7 +258,7 @@ impl ImmixAllocator { self.tls ); #[cfg(feature = "vo_bit")] - crate::util::vo_bit::bzero_alloc_bit(self.cursor, self.limit - self.cursor); + crate::util::vo_bit::bzero_vo_bit(self.cursor, self.limit - self.cursor); crate::util::memory::zero(self.cursor, self.limit - self.cursor); debug_assert!( align_allocation_no_fill::(self.cursor, align, offset) + size <= self.limit diff --git a/src/util/is_mmtk_object.rs b/src/util/is_mmtk_object.rs index 71c5f7549e..44beec3dc1 100644 --- a/src/util/is_mmtk_object.rs +++ b/src/util/is_mmtk_object.rs @@ -1,4 +1,4 @@ /// The region size (in bytes) of the `ALLOC_BIT` side metadata. /// The VM can use this to check if an object is properly aligned. pub const ALLOC_BIT_REGION_SIZE: usize = - 1usize << crate::util::vo_bit::ALLOC_SIDE_METADATA_SPEC.log_bytes_in_region; + 1usize << crate::util::vo_bit::VO_BIT_SIDE_METADATA_SPEC.log_bytes_in_region; diff --git a/src/util/linear_scan.rs b/src/util/linear_scan.rs index fb2059cd89..c9ce60b557 100644 --- a/src/util/linear_scan.rs +++ b/src/util/linear_scan.rs @@ -41,9 +41,9 @@ impl fn next(&mut self) -> Option<::Item> { while self.cursor < self.end { let is_object = if ATOMIC_LOAD_ALLOC_BIT { - vo_bit::is_alloced_object(self.cursor) + vo_bit::is_vo_bit_set_for_addr(self.cursor) } else { - unsafe { vo_bit::is_alloced_object_unsafe(self.cursor) } + unsafe { vo_bit::is_vo_bit_set_for_addr_unsafe(self.cursor) } }; if is_object { diff --git a/src/util/metadata/side_metadata/constants.rs b/src/util/metadata/side_metadata/constants.rs index fa15677977..f8d16fcbd9 100644 --- a/src/util/metadata/side_metadata/constants.rs +++ b/src/util/metadata/side_metadata/constants.rs @@ -30,7 +30,7 @@ pub(crate) const GLOBAL_SIDE_METADATA_BASE_OFFSET: SideMetadataOffset = SideMetadataOffset::addr(GLOBAL_SIDE_METADATA_BASE_ADDRESS); // Base address of alloc bit, public to VM bindings which may need to use this. -pub const ALLOC_SIDE_METADATA_ADDR: Address = crate::util::vo_bit::ALLOC_SIDE_METADATA_ADDR; +pub const ALLOC_SIDE_METADATA_ADDR: Address = crate::util::vo_bit::VO_BIT_SIDE_METADATA_ADDR; /// This constant represents the worst-case ratio of source data size to global side metadata. /// A value of 2 means the space required for global side metadata must be less than 1/4th of the source data. diff --git a/src/util/metadata/side_metadata/global.rs b/src/util/metadata/side_metadata/global.rs index e052bfbc81..67284b985c 100644 --- a/src/util/metadata/side_metadata/global.rs +++ b/src/util/metadata/side_metadata/global.rs @@ -1,6 +1,6 @@ use super::*; #[cfg(feature = "vo_bit")] -use crate::util::vo_bit::ALLOC_SIDE_METADATA_SPEC; +use crate::util::vo_bit::VO_BIT_SIDE_METADATA_SPEC; use crate::util::constants::{BYTES_IN_PAGE, LOG_BITS_IN_BYTE}; use crate::util::heap::layout::vm_layout_constants::BYTES_IN_CHUNK; use crate::util::memory; @@ -714,7 +714,7 @@ impl SideMetadataContext { let mut ret = vec![]; #[cfg(feature = "vo_bit")] - ret.push(ALLOC_SIDE_METADATA_SPEC); + ret.push(VO_BIT_SIDE_METADATA_SPEC); { use crate::policy::sft_map::SFTMap; diff --git a/src/util/metadata/side_metadata/spec_defs.rs b/src/util/metadata/side_metadata/spec_defs.rs index 52955fe4db..bc586569ad 100644 --- a/src/util/metadata/side_metadata/spec_defs.rs +++ b/src/util/metadata/side_metadata/spec_defs.rs @@ -54,8 +54,8 @@ macro_rules! define_side_metadata_specs { // This defines all GLOBAL side metadata used by mmtk-core. define_side_metadata_specs!( last_spec_as LAST_GLOBAL_SIDE_METADATA_SPEC, - // Mark the start of an object - ALLOC_BIT = (global: true, log_num_of_bits: 0, log_bytes_in_region: LOG_MIN_OBJECT_SIZE as usize), + // Valid object bit. + VO_BIT = (global: true, log_num_of_bits: 0, log_bytes_in_region: LOG_MIN_OBJECT_SIZE as usize), // Track chunks used by (malloc) marksweep MS_ACTIVE_CHUNK = (global: true, log_num_of_bits: 3, log_bytes_in_region: LOG_BYTES_IN_CHUNK as usize), // Track the index in SFT map for a chunk (only used for SFT sparse chunk map) diff --git a/src/util/object_forwarding.rs b/src/util/object_forwarding.rs index d0a1920b90..3cb10ce59a 100644 --- a/src/util/object_forwarding.rs +++ b/src/util/object_forwarding.rs @@ -82,7 +82,7 @@ pub fn forward_object( ) -> ObjectReference { let new_object = VM::VMObjectModel::copy(object, semantics, copy_context); #[cfg(feature = "vo_bit")] - crate::util::vo_bit::set_alloc_bit(new_object); + crate::util::vo_bit::set_vo_bit(new_object); if let Some(shift) = forwarding_bits_offset_in_forwarding_pointer::() { VM::VMObjectModel::LOCAL_FORWARDING_POINTER_SPEC.store_atomic::( object, diff --git a/src/util/vo_bit.rs b/src/util/vo_bit.rs index ecb49043ed..a5bdecba6e 100644 --- a/src/util/vo_bit.rs +++ b/src/util/vo_bit.rs @@ -10,71 +10,60 @@ use atomic::Ordering; -use crate::util::heap::layout::vm_layout_constants::BYTES_IN_CHUNK; -use crate::util::metadata::side_metadata::SideMetadataContext; use crate::util::metadata::side_metadata::SideMetadataSpec; use crate::util::Address; use crate::util::ObjectReference; -/// An alloc-bit is required per min-object-size aligned address , rather than per object, and can only exist as side metadata. -pub(crate) const ALLOC_SIDE_METADATA_SPEC: SideMetadataSpec = - crate::util::metadata::side_metadata::spec_defs::ALLOC_BIT; +/// An VO-bit is required per min-object-size aligned address , rather than per object, and can only exist as side metadata. +pub(crate) const VO_BIT_SIDE_METADATA_SPEC: SideMetadataSpec = + crate::util::metadata::side_metadata::spec_defs::VO_BIT; -pub const ALLOC_SIDE_METADATA_ADDR: Address = ALLOC_SIDE_METADATA_SPEC.get_absolute_offset(); +pub const VO_BIT_SIDE_METADATA_ADDR: Address = VO_BIT_SIDE_METADATA_SPEC.get_absolute_offset(); -pub fn map_meta_space_for_chunk(metadata: &SideMetadataContext, chunk_start: Address) { - let mmap_metadata_result = metadata.try_map_metadata_space(chunk_start, BYTES_IN_CHUNK); - debug_assert!( - mmap_metadata_result.is_ok(), - "mmap sidemetadata failed for chunk_start ({})", - chunk_start - ); -} - -pub fn set_alloc_bit(object: ObjectReference) { - debug_assert!(!is_alloced(object), "{:x}: alloc bit already set", object); - ALLOC_SIDE_METADATA_SPEC.store_atomic::(object.to_address(), 1, Ordering::SeqCst); +pub fn set_vo_bit(object: ObjectReference) { + debug_assert!(!is_vo_bit_set(object), "{:x}: VO-bit already set", object); + VO_BIT_SIDE_METADATA_SPEC.store_atomic::(object.to_address(), 1, Ordering::SeqCst); } -pub fn unset_addr_alloc_bit(address: Address) { +pub fn unset_vo_bit_for_addr(address: Address) { debug_assert!( - is_alloced_object(address), - "{:x}: alloc bit not set", + is_vo_bit_set_for_addr(address), + "{:x}: VO-bit not set", address ); - ALLOC_SIDE_METADATA_SPEC.store_atomic::(address, 0, Ordering::SeqCst); + VO_BIT_SIDE_METADATA_SPEC.store_atomic::(address, 0, Ordering::SeqCst); } -pub fn unset_alloc_bit(object: ObjectReference) { - debug_assert!(is_alloced(object), "{:x}: alloc bit not set", object); - ALLOC_SIDE_METADATA_SPEC.store_atomic::(object.to_address(), 0, Ordering::SeqCst); +pub fn unset_vo_bit(object: ObjectReference) { + debug_assert!(is_vo_bit_set(object), "{:x}: VO-bit not set", object); + VO_BIT_SIDE_METADATA_SPEC.store_atomic::(object.to_address(), 0, Ordering::SeqCst); } /// # Safety /// /// This is unsafe: check the comment on `side_metadata::store` /// -pub unsafe fn unset_alloc_bit_unsafe(object: ObjectReference) { - debug_assert!(is_alloced(object), "{:x}: alloc bit not set", object); - ALLOC_SIDE_METADATA_SPEC.store::(object.to_address(), 0); +pub unsafe fn unset_vo_bit_unsafe(object: ObjectReference) { + debug_assert!(is_vo_bit_set(object), "{:x}: VO-bit not set", object); + VO_BIT_SIDE_METADATA_SPEC.store::(object.to_address(), 0); } -pub fn is_alloced(object: ObjectReference) -> bool { - is_alloced_object(object.to_address()) +pub fn is_vo_bit_set(object: ObjectReference) -> bool { + is_vo_bit_set_for_addr(object.to_address()) } -pub fn is_alloced_object(address: Address) -> bool { - ALLOC_SIDE_METADATA_SPEC.load_atomic::(address, Ordering::SeqCst) == 1 +pub fn is_vo_bit_set_for_addr(address: Address) -> bool { + VO_BIT_SIDE_METADATA_SPEC.load_atomic::(address, Ordering::SeqCst) == 1 } /// # Safety /// /// This is unsafe: check the comment on `side_metadata::load` /// -pub unsafe fn is_alloced_object_unsafe(address: Address) -> bool { - ALLOC_SIDE_METADATA_SPEC.load::(address) == 1 +pub unsafe fn is_vo_bit_set_for_addr_unsafe(address: Address) -> bool { + VO_BIT_SIDE_METADATA_SPEC.load::(address) == 1 } -pub fn bzero_alloc_bit(start: Address, size: usize) { - ALLOC_SIDE_METADATA_SPEC.bzero_metadata(start, size); +pub fn bzero_vo_bit(start: Address, size: usize) { + VO_BIT_SIDE_METADATA_SPEC.bzero_metadata(start, size); } From 37b525e2ce68594983a101ffb7bf3e7c0a1ca107 Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Wed, 12 Oct 2022 16:42:11 +0800 Subject: [PATCH 03/14] Rename comments and some missed constants. I did not rename comments and functions in malloc space and mark-compact space because when they mention "alloc bit", they do mean a one-bit-per-allocation metadata. They should have used their own local metadata for that purpose. --- src/memory_manager.rs | 14 +++++++------- src/plan/global.rs | 2 +- src/plan/markcompact/global.rs | 4 ++-- src/plan/marksweep/global.rs | 4 ++-- src/policy/copyspace.rs | 4 ++-- src/policy/immix/immixspace.rs | 2 +- src/policy/immortalspace.rs | 2 +- src/policy/largeobjectspace.rs | 2 +- src/policy/mallocspace/metadata.rs | 4 ++++ src/policy/markcompactspace.rs | 10 ++++++---- src/policy/sft.rs | 2 +- src/util/is_mmtk_object.rs | 4 ++-- src/util/linear_scan.rs | 2 ++ src/util/metadata/mod.rs | 2 +- src/util/metadata/side_metadata/constants.rs | 6 +++--- vmbindings/dummyvm/src/tests/conservatism.rs | 4 ++-- 16 files changed, 38 insertions(+), 30 deletions(-) diff --git a/src/memory_manager.rs b/src/memory_manager.rs index ec14799225..730fb21e79 100644 --- a/src/memory_manager.rs +++ b/src/memory_manager.rs @@ -578,20 +578,20 @@ pub fn is_live_object(object: ObjectReference) -> bool { /// 2. Also return true if there exists an `objref: ObjectReference` such that /// - `objref` is a valid object reference to an object in any space in MMTk, and /// - `lo <= objref.to_address() < hi`, where -/// - `lo = addr.align_down(ALLOC_BIT_REGION_SIZE)` and -/// - `hi = lo + ALLOC_BIT_REGION_SIZE` and -/// - `ALLOC_BIT_REGION_SIZE` is [`crate::util::is_mmtk_object::ALLOC_BIT_REGION_SIZE`]. -/// It is the byte granularity of the alloc bit. +/// - `lo = addr.align_down(VO_BIT_REGION_SIZE)` and +/// - `hi = lo + VO_BIT_REGION_SIZE` and +/// - `VO_BIT_REGION_SIZE` is [`crate::util::is_mmtk_object::VO_BIT_REGION_SIZE`]. +/// It is the byte granularity of the VO-bit. /// 3. Return false otherwise. This function never panics. /// /// Case 2 means **this function is imprecise for misaligned addresses**. -/// This function uses the "alloc bits" side metadata, i.e. a bitmap. +/// This function uses the VO-bit side metadata, i.e. a bitmap. /// For space efficiency, each bit of the bitmap governs a small region of memory. /// The size of a region is currently defined as the [minimum object size](crate::util::constants::MIN_OBJECT_SIZE), /// which is currently defined as the [word size](crate::util::constants::BYTES_IN_WORD), /// which is 4 bytes on 32-bit systems or 8 bytes on 64-bit systems. /// The alignment of a region is also the region size. -/// If an alloc bit is `1`, the bitmap cannot tell which address within the 4-byte or 8-byte region +/// If a VO-bit is `1`, the bitmap cannot tell which address within the 4-byte or 8-byte region /// is the valid object reference. /// Therefore, if the input `addr` is not properly aligned, but is close to a valid object /// reference, this function may still return true. @@ -599,7 +599,7 @@ pub fn is_live_object(object: ObjectReference) -> bool { /// For the reason above, the VM **must check if `addr` is properly aligned** before calling this /// function. For most VMs, valid object references are always aligned to the word size, so /// checking `addr.is_aligned_to(BYTES_IN_WORD)` should usually work. If you are paranoid, you can -/// always check against [`crate::util::is_mmtk_object::ALLOC_BIT_REGION_SIZE`]. +/// always check against [`crate::util::is_mmtk_object::VO_BIT_REGION_SIZE`]. /// /// This function is useful for conservative root scanning. The VM can iterate through all words in /// a stack, filter out zeros, misaligned words, obviously out-of-range words (such as addresses diff --git a/src/plan/global.rs b/src/plan/global.rs index 840f1046dd..85f148a2ce 100644 --- a/src/plan/global.rs +++ b/src/plan/global.rs @@ -422,7 +422,7 @@ pub struct BasePlan { /// If VM space is present, it has some special interaction with the /// `memory_manager::is_mmtk_object` and the `memory_manager::is_in_mmtk_spaces` functions. /// - /// - The `is_mmtk_object` funciton requires the alloc_bit side metadata to identify objects, + /// - The `is_mmtk_object` function requires the VO-bit side metadata to identify objects, /// but currently we do not require the boot image to provide it, so it will not work if the /// address argument is in the VM space. /// diff --git a/src/plan/markcompact/global.rs b/src/plan/markcompact/global.rs index 61d4ba8389..3230e8f124 100644 --- a/src/plan/markcompact/global.rs +++ b/src/plan/markcompact/global.rs @@ -179,11 +179,11 @@ impl Plan for MarkCompact { impl MarkCompact { pub fn new(vm_map: &'static VMMap, mmapper: &'static Mmapper, options: Arc) -> Self { let mut heap = HeapMeta::new(&options); - // if global_alloc_bit is enabled, ALLOC_SIDE_METADATA_SPEC will be added to + // if vo_bit is enabled, ALLOC_SIDE_METADATA_SPEC will be added to // SideMetadataContext by default, so we don't need to add it here. #[cfg(feature = "vo_bit")] let global_metadata_specs = SideMetadataContext::new_global_specs(&[]); - // if global_alloc_bit is NOT enabled, + // if vo_bit is NOT enabled, // we need to add ALLOC_SIDE_METADATA_SPEC to SideMetadataContext here. #[cfg(not(feature = "vo_bit"))] let global_metadata_specs = diff --git a/src/plan/marksweep/global.rs b/src/plan/marksweep/global.rs index b56f06663c..74a207245d 100644 --- a/src/plan/marksweep/global.rs +++ b/src/plan/marksweep/global.rs @@ -97,12 +97,12 @@ impl Plan for MarkSweep { impl MarkSweep { pub fn new(vm_map: &'static VMMap, mmapper: &'static Mmapper, options: Arc) -> Self { let heap = HeapMeta::new(&options); - // if global_alloc_bit is enabled, ALLOC_SIDE_METADATA_SPEC will be added to + // if vo_bit is enabled, ALLOC_SIDE_METADATA_SPEC will be added to // SideMetadataContext by default, so we don't need to add it here. #[cfg(feature = "vo_bit")] let global_metadata_specs = SideMetadataContext::new_global_specs(&[ACTIVE_CHUNK_METADATA_SPEC]); - // if global_alloc_bit is NOT enabled, + // if vo_bit is NOT enabled, // we need to add ALLOC_SIDE_METADATA_SPEC to SideMetadataContext here. #[cfg(not(feature = "vo_bit"))] let global_metadata_specs = SideMetadataContext::new_global_specs(&[ diff --git a/src/policy/copyspace.rs b/src/policy/copyspace.rs index 866a20c0c6..1ee92c01da 100644 --- a/src/policy/copyspace.rs +++ b/src/policy/copyspace.rs @@ -194,7 +194,7 @@ impl CopySpace { unsafe fn reset_vo_bit(&self) { let current_chunk = self.pr.get_current_chunk(); if self.common.contiguous { - // If we have allocated something into this space, we need to clear its alloc bit. + // If we have allocated something into this space, we need to clear its VO-bit. if current_chunk != self.common.start { crate::util::vo_bit::bzero_vo_bit( self.common.start, @@ -232,7 +232,7 @@ impl CopySpace { #[cfg(feature = "vo_bit")] debug_assert!( crate::util::vo_bit::is_vo_bit_set(object), - "{:x}: alloc bit not set", + "{:x}: VO-bit not set", object ); diff --git a/src/policy/immix/immixspace.rs b/src/policy/immix/immixspace.rs index 302d82326d..fe335dd419 100644 --- a/src/policy/immix/immixspace.rs +++ b/src/policy/immix/immixspace.rs @@ -391,7 +391,7 @@ impl ImmixSpace { #[cfg(feature = "vo_bit")] debug_assert!( crate::util::vo_bit::is_vo_bit_set(object), - "{:x}: alloc bit not set", + "{:x}: VO-bit not set", object ); if Block::containing::(object).is_defrag_source() { diff --git a/src/policy/immortalspace.rs b/src/policy/immortalspace.rs index e6752afd55..c2c853476c 100644 --- a/src/policy/immortalspace.rs +++ b/src/policy/immortalspace.rs @@ -210,7 +210,7 @@ impl ImmortalSpace { #[cfg(feature = "vo_bit")] debug_assert!( crate::util::vo_bit::is_vo_bit_set(object), - "{:x}: alloc bit not set", + "{:x}: VO-bit not set", object ); if ImmortalSpace::::test_and_mark(object, self.mark_state) { diff --git a/src/policy/largeobjectspace.rs b/src/policy/largeobjectspace.rs index c9ab70bde3..4eba231502 100644 --- a/src/policy/largeobjectspace.rs +++ b/src/policy/largeobjectspace.rs @@ -206,7 +206,7 @@ impl LargeObjectSpace { #[cfg(feature = "vo_bit")] debug_assert!( crate::util::vo_bit::is_vo_bit_set(object), - "{:x}: alloc bit not set", + "{:x}: VO-bit not set", object ); let nursery_object = self.is_in_nursery(object); diff --git a/src/policy/mallocspace/metadata.rs b/src/policy/mallocspace/metadata.rs index 5dc80c6718..67987330b9 100644 --- a/src/policy/mallocspace/metadata.rs +++ b/src/policy/mallocspace/metadata.rs @@ -162,6 +162,7 @@ pub fn is_alloced_by_malloc(object: ObjectReference) -> bool { /// This function doesn't check if `addr` is aligned. /// If not, it will try to load the alloc bit for the address rounded down to the metadata's granularity. pub fn has_object_alloced_by_malloc(addr: Address) -> bool { + // FIXME: MallocSpace should use a local metadata to record allocated units. is_meta_space_mapped_for_address(addr) && vo_bit::is_vo_bit_set_for_addr(addr) } @@ -205,6 +206,7 @@ pub unsafe fn is_chunk_marked_unsafe(chunk_start: Address) -> bool { } pub fn set_alloc_bit(object: ObjectReference) { + // FIXME: MallocSpace should use a local metadata to record allocated units. vo_bit::set_vo_bit(object); } @@ -214,6 +216,7 @@ pub fn set_mark_bit(object: ObjectReference, ordering: Ordering) #[allow(unused)] pub fn unset_alloc_bit(object: ObjectReference) { + // FIXME: MallocSpace should use a local metadata to record allocated units. vo_bit::unset_vo_bit(object); } @@ -238,6 +241,7 @@ pub(super) unsafe fn unset_offset_malloc_bit_unsafe(address: Address) { } pub unsafe fn unset_alloc_bit_unsafe(object: ObjectReference) { + // FIXME: MallocSpace should use a local metadata to record allocated units. vo_bit::unset_vo_bit_unsafe(object); } diff --git a/src/policy/markcompactspace.rs b/src/policy/markcompactspace.rs index 24f7384cd1..d6c0e136cd 100644 --- a/src/policy/markcompactspace.rs +++ b/src/policy/markcompactspace.rs @@ -232,7 +232,7 @@ impl MarkCompactSpace { ) -> ObjectReference { debug_assert!( crate::util::vo_bit::is_vo_bit_set(object), - "{:x}: alloc bit not set", + "{:x}: VO-bit not set", object ); if MarkCompactSpace::::test_and_mark(object) { @@ -248,7 +248,7 @@ impl MarkCompactSpace { ) -> ObjectReference { debug_assert!( crate::util::vo_bit::is_vo_bit_set(object), - "{:x}: alloc bit not set", + "{:x}: VO-bit not set", object ); // from this stage and onwards, mark bit is no longer needed @@ -376,7 +376,8 @@ impl MarkCompactSpace { start, end, ); for obj in linear_scan { - // clear the alloc bit + // clear the VO-bit + // FIXME: MarkCompact should use a local metadata to record allocated units. vo_bit::unset_vo_bit_for_addr(obj.to_address()); let forwarding_pointer = Self::get_header_forwarding_pointer(obj); @@ -390,7 +391,8 @@ impl MarkCompactSpace { // copy object trace!(" copy from {} to {}", obj, new_object); let end_of_new_object = VM::VMObjectModel::copy_to(obj, new_object, Address::ZERO); - // update alloc_bit, + // update VO-bit + // FIXME: MarkCompact should use a local metadata to record allocated units. vo_bit::set_vo_bit(new_object); to = new_object.to_address() + copied_size; debug_assert_eq!(end_of_new_object, to); diff --git a/src/policy/sft.rs b/src/policy/sft.rs index e347d4ba5d..7592e8e592 100644 --- a/src/policy/sft.rs +++ b/src/policy/sft.rs @@ -77,7 +77,7 @@ pub trait SFT { if !addr.is_mapped() { return false; } - // The `addr` is mapped. We use the global alloc bit to get the exact answer. + // The `addr` is mapped. We use the VO-bit to get the exact answer. vo_bit::is_vo_bit_set_for_addr(addr) } diff --git a/src/util/is_mmtk_object.rs b/src/util/is_mmtk_object.rs index 44beec3dc1..d8590a9014 100644 --- a/src/util/is_mmtk_object.rs +++ b/src/util/is_mmtk_object.rs @@ -1,4 +1,4 @@ -/// The region size (in bytes) of the `ALLOC_BIT` side metadata. +/// The region size (in bytes) of the `VO_BIT` side metadata. /// The VM can use this to check if an object is properly aligned. -pub const ALLOC_BIT_REGION_SIZE: usize = +pub const VO_BIT_REGION_SIZE: usize = 1usize << crate::util::vo_bit::VO_BIT_SIDE_METADATA_SPEC.log_bytes_in_region; diff --git a/src/util/linear_scan.rs b/src/util/linear_scan.rs index c9ce60b557..e7cff5553f 100644 --- a/src/util/linear_scan.rs +++ b/src/util/linear_scan.rs @@ -41,6 +41,8 @@ impl fn next(&mut self) -> Option<::Item> { while self.cursor < self.end { let is_object = if ATOMIC_LOAD_ALLOC_BIT { + // FIXME: Any space that supports linear scan (such as MarkCompact space) should + // introduce its own allocation bit rather than using VO-bit. vo_bit::is_vo_bit_set_for_addr(self.cursor) } else { unsafe { vo_bit::is_vo_bit_set_for_addr_unsafe(self.cursor) } diff --git a/src/util/metadata/mod.rs b/src/util/metadata/mod.rs index 23d1150885..4e2004baaa 100644 --- a/src/util/metadata/mod.rs +++ b/src/util/metadata/mod.rs @@ -2,7 +2,7 @@ //! //! This module is designed to enable the implementation of a wide range of GC algorithms for VMs with various combinations of in-object and on-side space for GC-specific metadata (e.g. forwarding bits, marking bit, logging bit, etc.). //! -//! The new metadata design differentiates per-object metadata (e.g. forwarding-bits and marking-bit) from other types of metadata including per-address (e.g. alloc-bit) and per-X (where X != object size), because the per-object metadata can optionally be kept in the object headers. +//! The new metadata design differentiates per-object metadata (e.g. forwarding-bits and marking-bit) from other types of metadata including per-address and per-X (where X != object size), because the per-object metadata can optionally be kept in the object headers. //! //! MMTk acknowledges the VM-dependant nature of the in-object metadata, and asks the VM bindings to contribute by implementing the related parts in the ['ObjectModel'](crate::vm::ObjectModel). //! diff --git a/src/util/metadata/side_metadata/constants.rs b/src/util/metadata/side_metadata/constants.rs index f8d16fcbd9..dda9027633 100644 --- a/src/util/metadata/side_metadata/constants.rs +++ b/src/util/metadata/side_metadata/constants.rs @@ -11,7 +11,7 @@ use crate::util::Address; // reserved addresses such as 0x0. // XXXX: I updated the base address for 32 bit to 0x1000_0000. For what I tested on, the library // and the malloc heap often starts at 0x800_0000. If we start the metadata from the second 4Mb chunk (i.e. the chunk `[0x40_0000, 0x80_0000)`), -// we won't be guaranteed enough space before 0x800_0000. For example, the alloc bit is 1 bit per 4 bytes +// we won't be guaranteed enough space before 0x800_0000. For example, the VO-bit is 1 bit per 4 bytes // (1 word in 32bits), and it will take the address range of [0x40_000, 0x840_0000) which clashes with // the library/heap. So I move this to 0x1000_0000. // This is made public, as VM bingdings may need to use this. @@ -29,8 +29,8 @@ pub const GLOBAL_SIDE_METADATA_BASE_ADDRESS: Address = pub(crate) const GLOBAL_SIDE_METADATA_BASE_OFFSET: SideMetadataOffset = SideMetadataOffset::addr(GLOBAL_SIDE_METADATA_BASE_ADDRESS); -// Base address of alloc bit, public to VM bindings which may need to use this. -pub const ALLOC_SIDE_METADATA_ADDR: Address = crate::util::vo_bit::VO_BIT_SIDE_METADATA_ADDR; +// Base address of VO-bit, public to VM bindings which may need to use this. +pub const VO_BIT_SIDE_METADATA_ADDR: Address = crate::util::vo_bit::VO_BIT_SIDE_METADATA_ADDR; /// This constant represents the worst-case ratio of source data size to global side metadata. /// A value of 2 means the space required for global side metadata must be less than 1/4th of the source data. diff --git a/vmbindings/dummyvm/src/tests/conservatism.rs b/vmbindings/dummyvm/src/tests/conservatism.rs index d66de2d6fa..4ef542d83f 100644 --- a/vmbindings/dummyvm/src/tests/conservatism.rs +++ b/vmbindings/dummyvm/src/tests/conservatism.rs @@ -5,7 +5,7 @@ use crate::api::*; use crate::object_model::OBJECT_REF_OFFSET; use crate::tests::fixtures::{Fixture, SingleObject}; use mmtk::util::constants::LOG_BITS_IN_WORD; -use mmtk::util::is_mmtk_object::ALLOC_BIT_REGION_SIZE; +use mmtk::util::is_mmtk_object::VO_BIT_REGION_SIZE; use mmtk::util::*; lazy_static! { @@ -13,7 +13,7 @@ lazy_static! { } fn basic_filter(addr: Address) -> bool { - !addr.is_zero() && addr.as_usize() % ALLOC_BIT_REGION_SIZE == (OBJECT_REF_OFFSET % ALLOC_BIT_REGION_SIZE) + !addr.is_zero() && addr.as_usize() % VO_BIT_REGION_SIZE == (OBJECT_REF_OFFSET % VO_BIT_REGION_SIZE) } fn assert_filter_pass(addr: Address) { From 476a899970a7357107589c3ba3c7b48e2bef708d Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Thu, 13 Oct 2022 14:55:30 +0800 Subject: [PATCH 04/14] Fixed compilation error when vo_bit is not enabled --- src/plan/markcompact/global.rs | 6 +++--- src/plan/marksweep/global.rs | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/plan/markcompact/global.rs b/src/plan/markcompact/global.rs index 3230e8f124..8dbc8005c7 100644 --- a/src/plan/markcompact/global.rs +++ b/src/plan/markcompact/global.rs @@ -179,15 +179,15 @@ impl Plan for MarkCompact { impl MarkCompact { pub fn new(vm_map: &'static VMMap, mmapper: &'static Mmapper, options: Arc) -> Self { let mut heap = HeapMeta::new(&options); - // if vo_bit is enabled, ALLOC_SIDE_METADATA_SPEC will be added to + // if vo_bit is enabled, VO_BIT_SIDE_METADATA_SPEC will be added to // SideMetadataContext by default, so we don't need to add it here. #[cfg(feature = "vo_bit")] let global_metadata_specs = SideMetadataContext::new_global_specs(&[]); // if vo_bit is NOT enabled, - // we need to add ALLOC_SIDE_METADATA_SPEC to SideMetadataContext here. + // we need to add VO_BIT_SIDE_METADATA_SPEC to SideMetadataContext here. #[cfg(not(feature = "vo_bit"))] let global_metadata_specs = - SideMetadataContext::new_global_specs(&[ALLOC_SIDE_METADATA_SPEC]); + SideMetadataContext::new_global_specs(&[VO_BIT_SIDE_METADATA_SPEC]); let mc_space = MarkCompactSpace::new( "mark_compact_space", diff --git a/src/plan/marksweep/global.rs b/src/plan/marksweep/global.rs index 74a207245d..78ebea24fe 100644 --- a/src/plan/marksweep/global.rs +++ b/src/plan/marksweep/global.rs @@ -97,16 +97,16 @@ impl Plan for MarkSweep { impl MarkSweep { pub fn new(vm_map: &'static VMMap, mmapper: &'static Mmapper, options: Arc) -> Self { let heap = HeapMeta::new(&options); - // if vo_bit is enabled, ALLOC_SIDE_METADATA_SPEC will be added to + // if vo_bit is enabled, VO_BIT_SIDE_METADATA_SPEC will be added to // SideMetadataContext by default, so we don't need to add it here. #[cfg(feature = "vo_bit")] let global_metadata_specs = SideMetadataContext::new_global_specs(&[ACTIVE_CHUNK_METADATA_SPEC]); // if vo_bit is NOT enabled, - // we need to add ALLOC_SIDE_METADATA_SPEC to SideMetadataContext here. + // we need to add VO_BIT_SIDE_METADATA_SPEC to SideMetadataContext here. #[cfg(not(feature = "vo_bit"))] let global_metadata_specs = SideMetadataContext::new_global_specs(&[ - ALLOC_SIDE_METADATA_SPEC, + VO_BIT_SIDE_METADATA_SPEC, ACTIVE_CHUNK_METADATA_SPEC, ]); From df4f9a64eacc796eddcf657bed0eb5d047521db9 Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Thu, 13 Oct 2022 15:40:16 +0800 Subject: [PATCH 05/14] Formatting --- src/plan/markcompact/global.rs | 4 ++-- src/plan/marksweep/global.rs | 4 ++-- src/policy/mallocspace/metadata.rs | 2 +- src/policy/sft.rs | 2 +- src/util/metadata/side_metadata/global.rs | 4 ++-- 5 files changed, 8 insertions(+), 8 deletions(-) diff --git a/src/plan/markcompact/global.rs b/src/plan/markcompact/global.rs index 8dbc8005c7..47c8178c3d 100644 --- a/src/plan/markcompact/global.rs +++ b/src/plan/markcompact/global.rs @@ -15,8 +15,6 @@ use crate::policy::space::Space; use crate::scheduler::gc_work::*; use crate::scheduler::*; use crate::util::alloc::allocators::AllocatorSelector; -#[cfg(not(feature = "vo_bit"))] -use crate::util::vo_bit::VO_BIT_SIDE_METADATA_SPEC; use crate::util::copy::CopySemantics; use crate::util::heap::layout::heap_layout::Mmapper; use crate::util::heap::layout::heap_layout::VMMap; @@ -25,6 +23,8 @@ use crate::util::heap::VMRequest; use crate::util::metadata::side_metadata::{SideMetadataContext, SideMetadataSanity}; use crate::util::opaque_pointer::*; use crate::util::options::Options; +#[cfg(not(feature = "vo_bit"))] +use crate::util::vo_bit::VO_BIT_SIDE_METADATA_SPEC; use crate::vm::VMBinding; use enum_map::EnumMap; diff --git a/src/plan/marksweep/global.rs b/src/plan/marksweep/global.rs index 78ebea24fe..df2231a2db 100644 --- a/src/plan/marksweep/global.rs +++ b/src/plan/marksweep/global.rs @@ -11,13 +11,13 @@ use crate::policy::mallocspace::MallocSpace; use crate::policy::space::Space; use crate::scheduler::*; use crate::util::alloc::allocators::AllocatorSelector; -#[cfg(not(feature = "vo_bit"))] -use crate::util::vo_bit::VO_BIT_SIDE_METADATA_SPEC; use crate::util::heap::layout::heap_layout::Mmapper; use crate::util::heap::layout::heap_layout::VMMap; use crate::util::heap::HeapMeta; use crate::util::metadata::side_metadata::{SideMetadataContext, SideMetadataSanity}; use crate::util::options::Options; +#[cfg(not(feature = "vo_bit"))] +use crate::util::vo_bit::VO_BIT_SIDE_METADATA_SPEC; use crate::util::VMWorkerThread; use crate::vm::VMBinding; use std::sync::Arc; diff --git a/src/policy/mallocspace/metadata.rs b/src/policy/mallocspace/metadata.rs index 67987330b9..da9bff1187 100644 --- a/src/policy/mallocspace/metadata.rs +++ b/src/policy/mallocspace/metadata.rs @@ -1,9 +1,9 @@ -use crate::util::vo_bit; use crate::util::conversions; use crate::util::heap::layout::vm_layout_constants::BYTES_IN_CHUNK; use crate::util::metadata::side_metadata; use crate::util::metadata::side_metadata::SideMetadataContext; use crate::util::metadata::side_metadata::SideMetadataSpec; +use crate::util::vo_bit; use crate::util::Address; use crate::util::ObjectReference; use crate::vm::{ObjectModel, VMBinding}; diff --git a/src/policy/sft.rs b/src/policy/sft.rs index 7592e8e592..c742c9fa8e 100644 --- a/src/policy/sft.rs +++ b/src/policy/sft.rs @@ -1,8 +1,8 @@ use crate::plan::VectorObjectQueue; use crate::scheduler::GCWorker; +use crate::util::conversions; #[cfg(feature = "is_mmtk_object")] use crate::util::vo_bit; -use crate::util::conversions; use crate::util::*; use crate::vm::VMBinding; use std::marker::PhantomData; diff --git a/src/util/metadata/side_metadata/global.rs b/src/util/metadata/side_metadata/global.rs index 67284b985c..3be2996ce9 100644 --- a/src/util/metadata/side_metadata/global.rs +++ b/src/util/metadata/side_metadata/global.rs @@ -1,10 +1,10 @@ use super::*; -#[cfg(feature = "vo_bit")] -use crate::util::vo_bit::VO_BIT_SIDE_METADATA_SPEC; use crate::util::constants::{BYTES_IN_PAGE, LOG_BITS_IN_BYTE}; use crate::util::heap::layout::vm_layout_constants::BYTES_IN_CHUNK; use crate::util::memory; use crate::util::metadata::metadata_val_traits::*; +#[cfg(feature = "vo_bit")] +use crate::util::vo_bit::VO_BIT_SIDE_METADATA_SPEC; use crate::util::Address; use num_traits::FromPrimitive; use std::fmt; From 5c93bd6350b08abc2ef0d34404247503d2c9b696 Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Thu, 13 Oct 2022 15:45:00 +0800 Subject: [PATCH 06/14] Move vo_bit.rs into the metadata directory --- src/plan/markcompact/global.rs | 4 ++-- src/plan/marksweep/global.rs | 4 ++-- src/policy/copyspace.rs | 6 +++--- src/policy/immix/block.rs | 2 +- src/policy/immix/immixspace.rs | 6 +++--- src/policy/immortalspace.rs | 4 ++-- src/policy/largeobjectspace.rs | 8 ++++---- src/policy/lockfreeimmortalspace.rs | 2 +- src/policy/mallocspace/global.rs | 14 +++++++++----- src/policy/mallocspace/metadata.rs | 2 +- src/policy/markcompactspace.rs | 9 +++++---- src/policy/sft.rs | 2 +- src/util/alloc/immix_allocator.rs | 2 +- src/util/is_mmtk_object.rs | 2 +- src/util/linear_scan.rs | 2 +- src/util/metadata/mod.rs | 1 + src/util/metadata/side_metadata/constants.rs | 3 ++- src/util/metadata/side_metadata/global.rs | 2 +- src/util/{ => metadata}/vo_bit.rs | 0 src/util/mod.rs | 2 -- src/util/object_forwarding.rs | 2 +- 21 files changed, 42 insertions(+), 37 deletions(-) rename src/util/{ => metadata}/vo_bit.rs (100%) diff --git a/src/plan/markcompact/global.rs b/src/plan/markcompact/global.rs index 47c8178c3d..27dde6f1b4 100644 --- a/src/plan/markcompact/global.rs +++ b/src/plan/markcompact/global.rs @@ -21,10 +21,10 @@ use crate::util::heap::layout::heap_layout::VMMap; use crate::util::heap::HeapMeta; use crate::util::heap::VMRequest; use crate::util::metadata::side_metadata::{SideMetadataContext, SideMetadataSanity}; +#[cfg(not(feature = "vo_bit"))] +use crate::util::metadata::vo_bit::VO_BIT_SIDE_METADATA_SPEC; use crate::util::opaque_pointer::*; use crate::util::options::Options; -#[cfg(not(feature = "vo_bit"))] -use crate::util::vo_bit::VO_BIT_SIDE_METADATA_SPEC; use crate::vm::VMBinding; use enum_map::EnumMap; diff --git a/src/plan/marksweep/global.rs b/src/plan/marksweep/global.rs index df2231a2db..922c43c66b 100644 --- a/src/plan/marksweep/global.rs +++ b/src/plan/marksweep/global.rs @@ -15,9 +15,9 @@ use crate::util::heap::layout::heap_layout::Mmapper; use crate::util::heap::layout::heap_layout::VMMap; use crate::util::heap::HeapMeta; use crate::util::metadata::side_metadata::{SideMetadataContext, SideMetadataSanity}; -use crate::util::options::Options; #[cfg(not(feature = "vo_bit"))] -use crate::util::vo_bit::VO_BIT_SIDE_METADATA_SPEC; +use crate::util::metadata::vo_bit::VO_BIT_SIDE_METADATA_SPEC; +use crate::util::options::Options; use crate::util::VMWorkerThread; use crate::vm::VMBinding; use std::sync::Arc; diff --git a/src/policy/copyspace.rs b/src/policy/copyspace.rs index 1ee92c01da..e43617dc98 100644 --- a/src/policy/copyspace.rs +++ b/src/policy/copyspace.rs @@ -47,7 +47,7 @@ impl SFT for CopySpace { fn initialize_object_metadata(&self, _object: ObjectReference, _alloc: bool) { #[cfg(feature = "vo_bit")] - crate::util::vo_bit::set_vo_bit(_object); + crate::util::metadata::vo_bit::set_vo_bit(_object); } #[inline(always)] @@ -196,7 +196,7 @@ impl CopySpace { if self.common.contiguous { // If we have allocated something into this space, we need to clear its VO-bit. if current_chunk != self.common.start { - crate::util::vo_bit::bzero_vo_bit( + crate::util::metadata::vo_bit::bzero_vo_bit( self.common.start, current_chunk + BYTES_IN_CHUNK - self.common.start, ); @@ -231,7 +231,7 @@ impl CopySpace { #[cfg(feature = "vo_bit")] debug_assert!( - crate::util::vo_bit::is_vo_bit_set(object), + crate::util::metadata::vo_bit::is_vo_bit_set(object), "{:x}: VO-bit not set", object ); diff --git a/src/policy/immix/block.rs b/src/policy/immix/block.rs index ee32e86783..a292442c7e 100644 --- a/src/policy/immix/block.rs +++ b/src/policy/immix/block.rs @@ -184,7 +184,7 @@ impl Block { #[inline] pub fn deinit(&self) { #[cfg(feature = "vo_bit")] - crate::util::vo_bit::bzero_vo_bit(self.start(), Self::BYTES); + crate::util::metadata::vo_bit::bzero_vo_bit(self.start(), Self::BYTES); self.set_state(BlockState::Unallocated); } diff --git a/src/policy/immix/immixspace.rs b/src/policy/immix/immixspace.rs index fe335dd419..489e91d7c3 100644 --- a/src/policy/immix/immixspace.rs +++ b/src/policy/immix/immixspace.rs @@ -77,7 +77,7 @@ impl SFT for ImmixSpace { } fn initialize_object_metadata(&self, _object: ObjectReference, _alloc: bool) { #[cfg(feature = "vo_bit")] - crate::util::vo_bit::set_vo_bit(_object); + crate::util::metadata::vo_bit::set_vo_bit(_object); } #[inline(always)] fn sft_trace_object( @@ -390,7 +390,7 @@ impl ImmixSpace { ) -> ObjectReference { #[cfg(feature = "vo_bit")] debug_assert!( - crate::util::vo_bit::is_vo_bit_set(object), + crate::util::metadata::vo_bit::is_vo_bit_set(object), "{:x}: VO-bit not set", object ); @@ -483,7 +483,7 @@ impl ImmixSpace { object } else { #[cfg(feature = "vo_bit")] - crate::util::vo_bit::unset_vo_bit(object); + crate::util::metadata::vo_bit::unset_vo_bit(object); ForwardingWord::forward_object::(object, semantics, copy_context) }; debug_assert_eq!( diff --git a/src/policy/immortalspace.rs b/src/policy/immortalspace.rs index c2c853476c..378c4e6398 100644 --- a/src/policy/immortalspace.rs +++ b/src/policy/immortalspace.rs @@ -70,7 +70,7 @@ impl SFT for ImmortalSpace { VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.mark_as_unlogged::(object, Ordering::SeqCst); } #[cfg(feature = "vo_bit")] - crate::util::vo_bit::set_vo_bit(object); + crate::util::metadata::vo_bit::set_vo_bit(object); } #[inline(always)] fn sft_trace_object( @@ -209,7 +209,7 @@ impl ImmortalSpace { ) -> ObjectReference { #[cfg(feature = "vo_bit")] debug_assert!( - crate::util::vo_bit::is_vo_bit_set(object), + crate::util::metadata::vo_bit::is_vo_bit_set(object), "{:x}: VO-bit not set", object ); diff --git a/src/policy/largeobjectspace.rs b/src/policy/largeobjectspace.rs index 4eba231502..999caf7477 100644 --- a/src/policy/largeobjectspace.rs +++ b/src/policy/largeobjectspace.rs @@ -73,7 +73,7 @@ impl SFT for LargeObjectSpace { } #[cfg(feature = "vo_bit")] - crate::util::vo_bit::set_vo_bit(object); + crate::util::metadata::vo_bit::set_vo_bit(object); let cell = VM::VMObjectModel::object_start_ref(object); self.treadmill.add_to_treadmill(cell, alloc); } @@ -205,7 +205,7 @@ impl LargeObjectSpace { ) -> ObjectReference { #[cfg(feature = "vo_bit")] debug_assert!( - crate::util::vo_bit::is_vo_bit_set(object), + crate::util::metadata::vo_bit::is_vo_bit_set(object), "{:x}: VO-bit not set", object ); @@ -235,14 +235,14 @@ impl LargeObjectSpace { for cell in self.treadmill.collect_nursery() { // println!("- cn {}", cell); #[cfg(feature = "vo_bit")] - crate::util::vo_bit::unset_vo_bit_for_addr(cell); + crate::util::metadata::vo_bit::unset_vo_bit_for_addr(cell); self.pr.release_pages(get_super_page(cell)); } } else { for cell in self.treadmill.collect() { // println!("- ts {}", cell); #[cfg(feature = "vo_bit")] - crate::util::vo_bit::unset_vo_bit_for_addr(cell); + crate::util::metadata::vo_bit::unset_vo_bit_for_addr(cell); self.pr.release_pages(get_super_page(cell)); } } diff --git a/src/policy/lockfreeimmortalspace.rs b/src/policy/lockfreeimmortalspace.rs index e0f0f82258..e8c462c1de 100644 --- a/src/policy/lockfreeimmortalspace.rs +++ b/src/policy/lockfreeimmortalspace.rs @@ -59,7 +59,7 @@ impl SFT for LockFreeImmortalSpace { } fn initialize_object_metadata(&self, _object: ObjectReference, _alloc: bool) { #[cfg(feature = "vo_bit")] - crate::util::vo_bit::set_vo_bit(_object); + crate::util::metadata::vo_bit::set_vo_bit(_object); } fn sft_trace_object( &self, diff --git a/src/policy/mallocspace/global.rs b/src/policy/mallocspace/global.rs index 1886cd2f29..1bc247eb66 100644 --- a/src/policy/mallocspace/global.rs +++ b/src/policy/mallocspace/global.rs @@ -482,23 +482,27 @@ impl MallocSpace { let chunk_end = chunk_start + BYTES_IN_CHUNK; debug_assert!( - crate::util::vo_bit::VO_BIT_SIDE_METADATA_SPEC.log_bytes_in_region + crate::util::metadata::vo_bit::VO_BIT_SIDE_METADATA_SPEC.log_bytes_in_region == mark_bit_spec.log_bytes_in_region, "Alloc-bit and mark-bit metadata have different minimum object sizes!" ); // For bulk xor'ing 128-bit vectors on architectures with vector instructions // Each bit represents an object of LOG_MIN_OBJ_SIZE size - let bulk_load_size: usize = - 128 * (1 << crate::util::vo_bit::VO_BIT_SIDE_METADATA_SPEC.log_bytes_in_region); + let bulk_load_size: usize = 128 + * (1 << crate::util::metadata::vo_bit::VO_BIT_SIDE_METADATA_SPEC.log_bytes_in_region); // The start of a possibly empty page. This will be updated during the sweeping, and always points to the next page of last live objects. let mut empty_page_start = Address::ZERO; // Scan the chunk by every 'bulk_load_size' region. while address < chunk_end { - let alloc_128: u128 = - unsafe { load128(&crate::util::vo_bit::VO_BIT_SIDE_METADATA_SPEC, address) }; + let alloc_128: u128 = unsafe { + load128( + &crate::util::metadata::vo_bit::VO_BIT_SIDE_METADATA_SPEC, + address, + ) + }; let mark_128: u128 = unsafe { load128(&mark_bit_spec, address) }; // Check if there are dead objects in the bulk loaded region diff --git a/src/policy/mallocspace/metadata.rs b/src/policy/mallocspace/metadata.rs index da9bff1187..6a4660c4e5 100644 --- a/src/policy/mallocspace/metadata.rs +++ b/src/policy/mallocspace/metadata.rs @@ -3,7 +3,7 @@ use crate::util::heap::layout::vm_layout_constants::BYTES_IN_CHUNK; use crate::util::metadata::side_metadata; use crate::util::metadata::side_metadata::SideMetadataContext; use crate::util::metadata::side_metadata::SideMetadataSpec; -use crate::util::vo_bit; +use crate::util::metadata::vo_bit; use crate::util::Address; use crate::util::ObjectReference; use crate::vm::{ObjectModel, VMBinding}; diff --git a/src/policy/markcompactspace.rs b/src/policy/markcompactspace.rs index d6c0e136cd..f395083cf9 100644 --- a/src/policy/markcompactspace.rs +++ b/src/policy/markcompactspace.rs @@ -11,7 +11,8 @@ use crate::util::heap::layout::heap_layout::{Mmapper, VMMap}; use crate::util::heap::{HeapMeta, MonotonePageResource, PageResource, VMRequest}; use crate::util::metadata::extract_side_metadata; use crate::util::metadata::side_metadata::{SideMetadataContext, SideMetadataSpec}; -use crate::util::{vo_bit, Address, ObjectReference}; +use crate::util::metadata::vo_bit; +use crate::util::{Address, ObjectReference}; use crate::{vm::*, ObjectQueue}; use atomic::Ordering; @@ -57,7 +58,7 @@ impl SFT for MarkCompactSpace { } fn initialize_object_metadata(&self, object: ObjectReference, _alloc: bool) { - crate::util::vo_bit::set_vo_bit(object); + crate::util::metadata::vo_bit::set_vo_bit(object); } #[cfg(feature = "sanity")] @@ -231,7 +232,7 @@ impl MarkCompactSpace { object: ObjectReference, ) -> ObjectReference { debug_assert!( - crate::util::vo_bit::is_vo_bit_set(object), + crate::util::metadata::vo_bit::is_vo_bit_set(object), "{:x}: VO-bit not set", object ); @@ -247,7 +248,7 @@ impl MarkCompactSpace { object: ObjectReference, ) -> ObjectReference { debug_assert!( - crate::util::vo_bit::is_vo_bit_set(object), + crate::util::metadata::vo_bit::is_vo_bit_set(object), "{:x}: VO-bit not set", object ); diff --git a/src/policy/sft.rs b/src/policy/sft.rs index c742c9fa8e..10fff0d9f7 100644 --- a/src/policy/sft.rs +++ b/src/policy/sft.rs @@ -2,7 +2,7 @@ use crate::plan::VectorObjectQueue; use crate::scheduler::GCWorker; use crate::util::conversions; #[cfg(feature = "is_mmtk_object")] -use crate::util::vo_bit; +use crate::util::metadata::vo_bit; use crate::util::*; use crate::vm::VMBinding; use std::marker::PhantomData; diff --git a/src/util/alloc/immix_allocator.rs b/src/util/alloc/immix_allocator.rs index 72430e0260..3f9b41620e 100644 --- a/src/util/alloc/immix_allocator.rs +++ b/src/util/alloc/immix_allocator.rs @@ -258,7 +258,7 @@ impl ImmixAllocator { self.tls ); #[cfg(feature = "vo_bit")] - crate::util::vo_bit::bzero_vo_bit(self.cursor, self.limit - self.cursor); + crate::util::metadata::vo_bit::bzero_vo_bit(self.cursor, self.limit - self.cursor); crate::util::memory::zero(self.cursor, self.limit - self.cursor); debug_assert!( align_allocation_no_fill::(self.cursor, align, offset) + size <= self.limit diff --git a/src/util/is_mmtk_object.rs b/src/util/is_mmtk_object.rs index d8590a9014..05d85472c5 100644 --- a/src/util/is_mmtk_object.rs +++ b/src/util/is_mmtk_object.rs @@ -1,4 +1,4 @@ /// The region size (in bytes) of the `VO_BIT` side metadata. /// The VM can use this to check if an object is properly aligned. pub const VO_BIT_REGION_SIZE: usize = - 1usize << crate::util::vo_bit::VO_BIT_SIDE_METADATA_SPEC.log_bytes_in_region; + 1usize << crate::util::metadata::vo_bit::VO_BIT_SIDE_METADATA_SPEC.log_bytes_in_region; diff --git a/src/util/linear_scan.rs b/src/util/linear_scan.rs index e7cff5553f..52c44c5e5b 100644 --- a/src/util/linear_scan.rs +++ b/src/util/linear_scan.rs @@ -1,4 +1,4 @@ -use crate::util::vo_bit; +use crate::util::metadata::vo_bit; use crate::util::Address; use crate::util::ObjectReference; use crate::vm::ObjectModel; diff --git a/src/util/metadata/mod.rs b/src/util/metadata/mod.rs index 4e2004baaa..d0641b343d 100644 --- a/src/util/metadata/mod.rs +++ b/src/util/metadata/mod.rs @@ -225,5 +225,6 @@ pub mod side_metadata; pub use metadata_val_traits::*; pub(crate) mod log_bit; +pub(crate) mod vo_bit; pub use global::*; diff --git a/src/util/metadata/side_metadata/constants.rs b/src/util/metadata/side_metadata/constants.rs index dda9027633..e0b9a099be 100644 --- a/src/util/metadata/side_metadata/constants.rs +++ b/src/util/metadata/side_metadata/constants.rs @@ -30,7 +30,8 @@ pub(crate) const GLOBAL_SIDE_METADATA_BASE_OFFSET: SideMetadataOffset = SideMetadataOffset::addr(GLOBAL_SIDE_METADATA_BASE_ADDRESS); // Base address of VO-bit, public to VM bindings which may need to use this. -pub const VO_BIT_SIDE_METADATA_ADDR: Address = crate::util::vo_bit::VO_BIT_SIDE_METADATA_ADDR; +pub const VO_BIT_SIDE_METADATA_ADDR: Address = + crate::util::metadata::vo_bit::VO_BIT_SIDE_METADATA_ADDR; /// This constant represents the worst-case ratio of source data size to global side metadata. /// A value of 2 means the space required for global side metadata must be less than 1/4th of the source data. diff --git a/src/util/metadata/side_metadata/global.rs b/src/util/metadata/side_metadata/global.rs index 3be2996ce9..2512c2d6b3 100644 --- a/src/util/metadata/side_metadata/global.rs +++ b/src/util/metadata/side_metadata/global.rs @@ -4,7 +4,7 @@ use crate::util::heap::layout::vm_layout_constants::BYTES_IN_CHUNK; use crate::util::memory; use crate::util::metadata::metadata_val_traits::*; #[cfg(feature = "vo_bit")] -use crate::util::vo_bit::VO_BIT_SIDE_METADATA_SPEC; +use crate::util::metadata::vo_bit::VO_BIT_SIDE_METADATA_SPEC; use crate::util::Address; use num_traits::FromPrimitive; use std::fmt; diff --git a/src/util/vo_bit.rs b/src/util/metadata/vo_bit.rs similarity index 100% rename from src/util/vo_bit.rs rename to src/util/metadata/vo_bit.rs diff --git a/src/util/mod.rs b/src/util/mod.rs index b06006eb63..cf659953e1 100644 --- a/src/util/mod.rs +++ b/src/util/mod.rs @@ -63,8 +63,6 @@ pub(crate) mod statistics; pub(crate) mod test_util; /// A treadmill implementation. pub(crate) mod treadmill; -/// Valid object bit -pub(crate) mod vo_bit; // These modules are private. They are only used by other util modules. diff --git a/src/util/object_forwarding.rs b/src/util/object_forwarding.rs index 3cb10ce59a..e3738bc79e 100644 --- a/src/util/object_forwarding.rs +++ b/src/util/object_forwarding.rs @@ -82,7 +82,7 @@ pub fn forward_object( ) -> ObjectReference { let new_object = VM::VMObjectModel::copy(object, semantics, copy_context); #[cfg(feature = "vo_bit")] - crate::util::vo_bit::set_vo_bit(new_object); + crate::util::metadata::vo_bit::set_vo_bit(new_object); if let Some(shift) = forwarding_bits_offset_in_forwarding_pointer::() { VM::VMObjectModel::LOCAL_FORWARDING_POINTER_SPEC.store_atomic::( object, From 281107d0e226e8acb2e0ffc16902a3d3410ab8bf Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Thu, 13 Oct 2022 21:22:14 +0800 Subject: [PATCH 07/14] Minor format fix --- src/util/metadata/vo_bit.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/util/metadata/vo_bit.rs b/src/util/metadata/vo_bit.rs index a5bdecba6e..28752ef58e 100644 --- a/src/util/metadata/vo_bit.rs +++ b/src/util/metadata/vo_bit.rs @@ -14,7 +14,7 @@ use crate::util::metadata::side_metadata::SideMetadataSpec; use crate::util::Address; use crate::util::ObjectReference; -/// An VO-bit is required per min-object-size aligned address , rather than per object, and can only exist as side metadata. +/// A VO-bit is required per min-object-size aligned address, rather than per object, and can only exist as side metadata. pub(crate) const VO_BIT_SIDE_METADATA_SPEC: SideMetadataSpec = crate::util::metadata::side_metadata::spec_defs::VO_BIT; From c4c0ac31ebe0211066fa4c0848910b36378a2349 Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Fri, 14 Oct 2022 14:55:38 +0800 Subject: [PATCH 08/14] VO-bits are only accessable using ObjectReference. It is how it is intended to be used. It is set at addresses pointed by `ObjectReference`, not the beginning of an object. --- src/policy/largeobjectspace.rs | 8 ++++++-- src/policy/mallocspace/metadata.rs | 2 +- src/policy/markcompactspace.rs | 2 +- src/policy/sft.rs | 2 +- src/util/linear_scan.rs | 4 ++-- src/util/metadata/vo_bit.rs | 25 ++++++------------------- 6 files changed, 17 insertions(+), 26 deletions(-) diff --git a/src/policy/largeobjectspace.rs b/src/policy/largeobjectspace.rs index 999caf7477..62af8fe5d4 100644 --- a/src/policy/largeobjectspace.rs +++ b/src/policy/largeobjectspace.rs @@ -231,18 +231,22 @@ impl LargeObjectSpace { // FIXME: borrow checker fighting // didn't call self.release_multiple_pages // so the compiler knows I'm borrowing two different fields + + // FIXME: Use local metadata instead of VO-bit. + // The `unsafe { cell.to_object_reference() }` below indicates that VO-bit is not what we + // want here. VO-bit is set at ObjectReference, which is different from the cell address. if sweep_nursery { for cell in self.treadmill.collect_nursery() { // println!("- cn {}", cell); #[cfg(feature = "vo_bit")] - crate::util::metadata::vo_bit::unset_vo_bit_for_addr(cell); + crate::util::metadata::vo_bit::unset_vo_bit(unsafe { cell.to_object_reference() }); self.pr.release_pages(get_super_page(cell)); } } else { for cell in self.treadmill.collect() { // println!("- ts {}", cell); #[cfg(feature = "vo_bit")] - crate::util::metadata::vo_bit::unset_vo_bit_for_addr(cell); + crate::util::metadata::vo_bit::unset_vo_bit(unsafe { cell.to_object_reference() }); self.pr.release_pages(get_super_page(cell)); } } diff --git a/src/policy/mallocspace/metadata.rs b/src/policy/mallocspace/metadata.rs index 6a4660c4e5..550e12e51d 100644 --- a/src/policy/mallocspace/metadata.rs +++ b/src/policy/mallocspace/metadata.rs @@ -163,7 +163,7 @@ pub fn is_alloced_by_malloc(object: ObjectReference) -> bool { /// If not, it will try to load the alloc bit for the address rounded down to the metadata's granularity. pub fn has_object_alloced_by_malloc(addr: Address) -> bool { // FIXME: MallocSpace should use a local metadata to record allocated units. - is_meta_space_mapped_for_address(addr) && vo_bit::is_vo_bit_set_for_addr(addr) + is_meta_space_mapped_for_address(addr) && vo_bit::is_vo_bit_set(unsafe { addr.to_object_reference() }) } pub fn is_marked(object: ObjectReference, ordering: Ordering) -> bool { diff --git a/src/policy/markcompactspace.rs b/src/policy/markcompactspace.rs index f395083cf9..87c2194c5f 100644 --- a/src/policy/markcompactspace.rs +++ b/src/policy/markcompactspace.rs @@ -379,7 +379,7 @@ impl MarkCompactSpace { for obj in linear_scan { // clear the VO-bit // FIXME: MarkCompact should use a local metadata to record allocated units. - vo_bit::unset_vo_bit_for_addr(obj.to_address()); + vo_bit::unset_vo_bit(obj); let forwarding_pointer = Self::get_header_forwarding_pointer(obj); diff --git a/src/policy/sft.rs b/src/policy/sft.rs index 10fff0d9f7..d6dc4fa558 100644 --- a/src/policy/sft.rs +++ b/src/policy/sft.rs @@ -78,7 +78,7 @@ pub trait SFT { return false; } // The `addr` is mapped. We use the VO-bit to get the exact answer. - vo_bit::is_vo_bit_set_for_addr(addr) + vo_bit::is_vo_bit_set(unsafe { addr.to_object_reference() }) } /// Initialize object metadata (in the header, or in the side metadata). diff --git a/src/util/linear_scan.rs b/src/util/linear_scan.rs index 52c44c5e5b..33757e28a4 100644 --- a/src/util/linear_scan.rs +++ b/src/util/linear_scan.rs @@ -43,9 +43,9 @@ impl let is_object = if ATOMIC_LOAD_ALLOC_BIT { // FIXME: Any space that supports linear scan (such as MarkCompact space) should // introduce its own allocation bit rather than using VO-bit. - vo_bit::is_vo_bit_set_for_addr(self.cursor) + vo_bit::is_vo_bit_set(unsafe { self.cursor.to_object_reference() }) } else { - unsafe { vo_bit::is_vo_bit_set_for_addr_unsafe(self.cursor) } + unsafe { vo_bit::is_vo_bit_set_unsafe(self.cursor.to_object_reference()) } }; if is_object { diff --git a/src/util/metadata/vo_bit.rs b/src/util/metadata/vo_bit.rs index 28752ef58e..7522a99a01 100644 --- a/src/util/metadata/vo_bit.rs +++ b/src/util/metadata/vo_bit.rs @@ -25,20 +25,15 @@ pub fn set_vo_bit(object: ObjectReference) { VO_BIT_SIDE_METADATA_SPEC.store_atomic::(object.to_address(), 1, Ordering::SeqCst); } -pub fn unset_vo_bit_for_addr(address: Address) { - debug_assert!( - is_vo_bit_set_for_addr(address), - "{:x}: VO-bit not set", - address - ); - VO_BIT_SIDE_METADATA_SPEC.store_atomic::(address, 0, Ordering::SeqCst); -} - pub fn unset_vo_bit(object: ObjectReference) { debug_assert!(is_vo_bit_set(object), "{:x}: VO-bit not set", object); VO_BIT_SIDE_METADATA_SPEC.store_atomic::(object.to_address(), 0, Ordering::SeqCst); } +pub fn is_vo_bit_set(object: ObjectReference) -> bool { + VO_BIT_SIDE_METADATA_SPEC.load_atomic::(object.to_address(), Ordering::SeqCst) == 1 +} + /// # Safety /// /// This is unsafe: check the comment on `side_metadata::store` @@ -48,20 +43,12 @@ pub unsafe fn unset_vo_bit_unsafe(object: ObjectReference) { VO_BIT_SIDE_METADATA_SPEC.store::(object.to_address(), 0); } -pub fn is_vo_bit_set(object: ObjectReference) -> bool { - is_vo_bit_set_for_addr(object.to_address()) -} - -pub fn is_vo_bit_set_for_addr(address: Address) -> bool { - VO_BIT_SIDE_METADATA_SPEC.load_atomic::(address, Ordering::SeqCst) == 1 -} - /// # Safety /// /// This is unsafe: check the comment on `side_metadata::load` /// -pub unsafe fn is_vo_bit_set_for_addr_unsafe(address: Address) -> bool { - VO_BIT_SIDE_METADATA_SPEC.load::(address) == 1 +pub unsafe fn is_vo_bit_set_unsafe(object: ObjectReference) -> bool { + VO_BIT_SIDE_METADATA_SPEC.load::(object.to_address()) == 1 } pub fn bzero_vo_bit(start: Address, size: usize) { From 9d579e0d99dd8c9b13e13fd7792dfd5d700d1280 Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Fri, 14 Oct 2022 16:28:22 +0800 Subject: [PATCH 09/14] Remove is_mmtk_object feature in favor for vo_bit vo_bit is designed for the very purpose of is_mmtk_object, so we don't need another feature for that. --- Cargo.toml | 5 +- src/memory_manager.rs | 48 --------- src/policy/mallocspace/global.rs | 10 +- src/policy/mallocspace/metadata.rs | 13 +-- src/policy/sft.rs | 18 ++-- src/util/is_mmtk_object.rs | 4 - src/util/metadata/mod.rs | 5 +- src/util/metadata/vo_bit.rs | 104 +++++++++++++++++-- src/util/mod.rs | 2 - vmbindings/dummyvm/Cargo.toml | 2 +- vmbindings/dummyvm/src/api.rs | 6 +- vmbindings/dummyvm/src/tests/conservatism.rs | 64 ++++++------ vmbindings/dummyvm/src/tests/mod.rs | 2 +- 13 files changed, 161 insertions(+), 122 deletions(-) delete mode 100644 src/util/is_mmtk_object.rs diff --git a/Cargo.toml b/Cargo.toml index fc1f1c5c6b..16323370a7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -78,12 +78,9 @@ ro_space = [] # TODO: This is not properly implemented yet. We currently use an immortal space instead, and all our spaces have execution permission at the moment. code_space = [] -# Valid-object bit. +# Valid-object bit, useful for conservative garbage collection. vo_bit = [] -# conservative garbage collection support -is_mmtk_object = ["vo_bit"] - # The following two features are useful for using Immix for VMs that do not support moving GC. # Disable defragmentation for ImmixSpace. This makes Immix a non-moving plan. diff --git a/src/memory_manager.rs b/src/memory_manager.rs index 730fb21e79..d3a936766b 100644 --- a/src/memory_manager.rs +++ b/src/memory_manager.rs @@ -570,54 +570,6 @@ pub fn is_live_object(object: ObjectReference) -> bool { object.is_live() } -/// Check if `addr` is the address of an object reference to an MMTk object. -/// -/// Concretely: -/// 1. Return true if `addr.to_object_reference()` is a valid object reference to an object in any -/// space in MMTk. -/// 2. Also return true if there exists an `objref: ObjectReference` such that -/// - `objref` is a valid object reference to an object in any space in MMTk, and -/// - `lo <= objref.to_address() < hi`, where -/// - `lo = addr.align_down(VO_BIT_REGION_SIZE)` and -/// - `hi = lo + VO_BIT_REGION_SIZE` and -/// - `VO_BIT_REGION_SIZE` is [`crate::util::is_mmtk_object::VO_BIT_REGION_SIZE`]. -/// It is the byte granularity of the VO-bit. -/// 3. Return false otherwise. This function never panics. -/// -/// Case 2 means **this function is imprecise for misaligned addresses**. -/// This function uses the VO-bit side metadata, i.e. a bitmap. -/// For space efficiency, each bit of the bitmap governs a small region of memory. -/// The size of a region is currently defined as the [minimum object size](crate::util::constants::MIN_OBJECT_SIZE), -/// which is currently defined as the [word size](crate::util::constants::BYTES_IN_WORD), -/// which is 4 bytes on 32-bit systems or 8 bytes on 64-bit systems. -/// The alignment of a region is also the region size. -/// If a VO-bit is `1`, the bitmap cannot tell which address within the 4-byte or 8-byte region -/// is the valid object reference. -/// Therefore, if the input `addr` is not properly aligned, but is close to a valid object -/// reference, this function may still return true. -/// -/// For the reason above, the VM **must check if `addr` is properly aligned** before calling this -/// function. For most VMs, valid object references are always aligned to the word size, so -/// checking `addr.is_aligned_to(BYTES_IN_WORD)` should usually work. If you are paranoid, you can -/// always check against [`crate::util::is_mmtk_object::VO_BIT_REGION_SIZE`]. -/// -/// This function is useful for conservative root scanning. The VM can iterate through all words in -/// a stack, filter out zeros, misaligned words, obviously out-of-range words (such as addresses -/// greater than `0x0000_7fff_ffff_ffff` on Linux on x86_64), and use this function to deside if the -/// word is really a reference. -/// -/// Note: This function has special behaviors if the VM space (enabled by the `vm_space` feature) -/// is present. See `crate::plan::global::BasePlan::vm_space`. -/// -/// Argument: -/// * `addr`: An arbitrary address. -#[cfg(feature = "is_mmtk_object")] -pub fn is_mmtk_object(addr: Address) -> bool { - use crate::mmtk::SFT_MAP; - use crate::policy::sft_map::SFTMap; - SFT_MAP.get_checked(addr).is_mmtk_object(addr) -} - /// Return true if the `object` lies in a region of memory where /// - only MMTk can allocate into, or /// - only MMTk's delegated memory allocator (such as a malloc implementation) can allocate into diff --git a/src/policy/mallocspace/global.rs b/src/policy/mallocspace/global.rs index 1bc247eb66..79f0f47b0b 100644 --- a/src/policy/mallocspace/global.rs +++ b/src/policy/mallocspace/global.rs @@ -78,13 +78,13 @@ impl SFT for MallocSpace { } /// For malloc space, we just use the side metadata. - #[cfg(feature = "is_mmtk_object")] + #[cfg(feature = "vo_bit")] #[inline(always)] - fn is_mmtk_object(&self, addr: Address) -> bool { - debug_assert!(!addr.is_zero()); + fn is_valid_mmtk_object(&self, object: ObjectReference) -> bool { + debug_assert!(!object.is_null()); // `addr` cannot be mapped by us. It should be mapped by the malloc library. - debug_assert!(!addr.is_mapped()); - has_object_alloced_by_malloc(addr) + debug_assert!(!object.to_address().is_mapped()); + is_alloced_by_malloc(object) } fn initialize_object_metadata(&self, object: ObjectReference, _alloc: bool) { diff --git a/src/policy/mallocspace/metadata.rs b/src/policy/mallocspace/metadata.rs index 550e12e51d..d3dcc75e58 100644 --- a/src/policy/mallocspace/metadata.rs +++ b/src/policy/mallocspace/metadata.rs @@ -152,18 +152,15 @@ pub fn map_meta_space(metadata: &SideMetadataContext, addr: Address, size: usize } } -/// Check if a given object was allocated by malloc -pub fn is_alloced_by_malloc(object: ObjectReference) -> bool { - has_object_alloced_by_malloc(object.to_address()) -} - -/// Check if there is an object allocated by malloc at the address. +/// Check if there is an object allocated by malloc at the address of `object`. /// /// This function doesn't check if `addr` is aligned. /// If not, it will try to load the alloc bit for the address rounded down to the metadata's granularity. -pub fn has_object_alloced_by_malloc(addr: Address) -> bool { +pub fn is_alloced_by_malloc(object: ObjectReference) -> bool { // FIXME: MallocSpace should use a local metadata to record allocated units. - is_meta_space_mapped_for_address(addr) && vo_bit::is_vo_bit_set(unsafe { addr.to_object_reference() }) + // The VO-bit can be cleared explicitly by the VM. If we don't introduce another metadata, + // MallocSpace will not be able to free objects. + is_meta_space_mapped_for_address(object.to_address()) && vo_bit::is_vo_bit_set(object) } pub fn is_marked(object: ObjectReference, ordering: Ordering) -> bool { diff --git a/src/policy/sft.rs b/src/policy/sft.rs index d6dc4fa558..8f233e7bf1 100644 --- a/src/policy/sft.rs +++ b/src/policy/sft.rs @@ -1,7 +1,7 @@ use crate::plan::VectorObjectQueue; use crate::scheduler::GCWorker; use crate::util::conversions; -#[cfg(feature = "is_mmtk_object")] +#[cfg(feature = "vo_bit")] use crate::util::metadata::vo_bit; use crate::util::*; use crate::vm::VMBinding; @@ -67,18 +67,18 @@ pub trait SFT { /// This default implementation works for all spaces that use MMTk's mapper to allocate memory. /// Some spaces, like `MallocSpace`, use third-party libraries to allocate memory. /// Such spaces needs to override this method. - #[cfg(feature = "is_mmtk_object")] + #[cfg(feature = "vo_bit")] #[inline(always)] - fn is_mmtk_object(&self, addr: Address) -> bool { - // Having found the SFT means the `addr` is in one of our spaces. + fn is_valid_mmtk_object(&self, object: ObjectReference) -> bool { + // Having found the SFT means the `object` is in one of our spaces. // Although the SFT map is allocated eagerly when the space is contiguous, // the pages of the space itself are acquired on demand. - // Therefore, the page of `addr` may not have been mapped, yet. - if !addr.is_mapped() { + // Therefore, the page of `object` may not have been mapped, yet. + if !object.to_address().is_mapped() { return false; } // The `addr` is mapped. We use the VO-bit to get the exact answer. - vo_bit::is_vo_bit_set(unsafe { addr.to_object_reference() }) + vo_bit::is_vo_bit_set(object) } /// Initialize object metadata (in the header, or in the side metadata). @@ -145,9 +145,9 @@ impl SFT for EmptySpaceSFT { fn is_in_space(&self, _object: ObjectReference) -> bool { false } - #[cfg(feature = "is_mmtk_object")] + #[cfg(feature = "vo_bit")] #[inline(always)] - fn is_mmtk_object(&self, _addr: Address) -> bool { + fn is_valid_mmtk_object(&self, _object: ObjectReference) -> bool { false } diff --git a/src/util/is_mmtk_object.rs b/src/util/is_mmtk_object.rs deleted file mode 100644 index 05d85472c5..0000000000 --- a/src/util/is_mmtk_object.rs +++ /dev/null @@ -1,4 +0,0 @@ -/// The region size (in bytes) of the `VO_BIT` side metadata. -/// The VM can use this to check if an object is properly aligned. -pub const VO_BIT_REGION_SIZE: usize = - 1usize << crate::util::metadata::vo_bit::VO_BIT_SIDE_METADATA_SPEC.log_bytes_in_region; diff --git a/src/util/metadata/mod.rs b/src/util/metadata/mod.rs index d0641b343d..2ff48a237a 100644 --- a/src/util/metadata/mod.rs +++ b/src/util/metadata/mod.rs @@ -225,6 +225,9 @@ pub mod side_metadata; pub use metadata_val_traits::*; pub(crate) mod log_bit; -pub(crate) mod vo_bit; + +// FIXME: vo_bit should only be enabled if the "vo_bit" feature is on. +// Currently it is used in MallocSpace and MarkCompactSpace. +pub mod vo_bit; pub use global::*; diff --git a/src/util/metadata/vo_bit.rs b/src/util/metadata/vo_bit.rs index 7522a99a01..e6ec2d5136 100644 --- a/src/util/metadata/vo_bit.rs +++ b/src/util/metadata/vo_bit.rs @@ -7,9 +7,24 @@ //! //! The main purpose of VO-bit is supporting conservative GC. It is the canonical source of //! information about whether there is an object in the MMTk heap at any given address. +//! +//! The VO-bit has the granularity of one bit per minimum object alignment. Each bit governs the +//! region of `lo <= addr < hi`, where +//! - `lo = addr.align_down(VO_BIT_REGION_SIZE)` +//! - `hi = lo + VO_BIT_REGION_SIZE` +//! - The constant [`VO_BIT_REGION_SIZE`] is size of the region (in bytes) each bit governs. +//! +//! Because of the granularity, if the user wants to check if an *arbitrary* address points to a +//! valid object, it must check if the address is properly aligned. use atomic::Ordering; +#[cfg(feature = "vo_bit")] +// Eventually the entire `vo_bit` module will be guarded by this feature. +use crate::mmtk::SFT_MAP; +#[cfg(feature = "vo_bit")] +// Eventually the entire `vo_bit` module will be guarded by this feature. +use crate::policy::sft_map::SFTMap; use crate::util::metadata::side_metadata::SideMetadataSpec; use crate::util::Address; use crate::util::ObjectReference; @@ -18,19 +33,30 @@ use crate::util::ObjectReference; pub(crate) const VO_BIT_SIDE_METADATA_SPEC: SideMetadataSpec = crate::util::metadata::side_metadata::spec_defs::VO_BIT; -pub const VO_BIT_SIDE_METADATA_ADDR: Address = VO_BIT_SIDE_METADATA_SPEC.get_absolute_offset(); +pub(crate) const VO_BIT_SIDE_METADATA_ADDR: Address = + VO_BIT_SIDE_METADATA_SPEC.get_absolute_offset(); + +/// The region size (in bytes) of the `VO_BIT` side metadata. +/// +/// Currently, it is set to the [minimum object size](crate::util::constants::MIN_OBJECT_SIZE), +/// which is currently defined as the [word size](crate::util::constants::BYTES_IN_WORD). +/// +/// The VM can use this to check if an object is properly aligned. +#[cfg(feature = "vo_bit")] // Eventually the entire `vo_bit` module will be guarded by this feature. +pub const VO_BIT_REGION_SIZE: usize = + 1usize << crate::util::metadata::vo_bit::VO_BIT_SIDE_METADATA_SPEC.log_bytes_in_region; -pub fn set_vo_bit(object: ObjectReference) { +pub(crate) fn set_vo_bit(object: ObjectReference) { debug_assert!(!is_vo_bit_set(object), "{:x}: VO-bit already set", object); VO_BIT_SIDE_METADATA_SPEC.store_atomic::(object.to_address(), 1, Ordering::SeqCst); } -pub fn unset_vo_bit(object: ObjectReference) { +pub(crate) fn unset_vo_bit(object: ObjectReference) { debug_assert!(is_vo_bit_set(object), "{:x}: VO-bit not set", object); VO_BIT_SIDE_METADATA_SPEC.store_atomic::(object.to_address(), 0, Ordering::SeqCst); } -pub fn is_vo_bit_set(object: ObjectReference) -> bool { +pub(crate) fn is_vo_bit_set(object: ObjectReference) -> bool { VO_BIT_SIDE_METADATA_SPEC.load_atomic::(object.to_address(), Ordering::SeqCst) == 1 } @@ -38,7 +64,7 @@ pub fn is_vo_bit_set(object: ObjectReference) -> bool { /// /// This is unsafe: check the comment on `side_metadata::store` /// -pub unsafe fn unset_vo_bit_unsafe(object: ObjectReference) { +pub(crate) unsafe fn unset_vo_bit_unsafe(object: ObjectReference) { debug_assert!(is_vo_bit_set(object), "{:x}: VO-bit not set", object); VO_BIT_SIDE_METADATA_SPEC.store::(object.to_address(), 0); } @@ -47,10 +73,74 @@ pub unsafe fn unset_vo_bit_unsafe(object: ObjectReference) { /// /// This is unsafe: check the comment on `side_metadata::load` /// -pub unsafe fn is_vo_bit_set_unsafe(object: ObjectReference) -> bool { +pub(crate) unsafe fn is_vo_bit_set_unsafe(object: ObjectReference) -> bool { VO_BIT_SIDE_METADATA_SPEC.load::(object.to_address()) == 1 } -pub fn bzero_vo_bit(start: Address, size: usize) { +pub(crate) fn bzero_vo_bit(start: Address, size: usize) { VO_BIT_SIDE_METADATA_SPEC.bzero_metadata(start, size); } + +/// Check if `object` is a reference to a valid MMTk object. +/// +/// Concretely: +/// 1. Return true if `addr.to_object_reference()` is a valid object reference to an object in any +/// space in MMTk. +/// 2. Also return true if there exists an `objref: ObjectReference` such that +/// - `objref` is a valid object reference to an object in any space in MMTk, and +/// - `lo <= objref.to_address() < hi`, where +/// - `lo = addr.align_down(VO_BIT_REGION_SIZE)` and +/// - `hi = lo + VO_BIT_REGION_SIZE` and +/// - `VO_BIT_REGION_SIZE` is [`crate::util::is_mmtk_object::VO_BIT_REGION_SIZE`]. +/// It is the byte granularity of the VO-bit. +/// 3. Return false otherwise. This function never panics. +/// +/// Case 2 means **this function is imprecise for misaligned addresses**. +/// This function uses the VO-bit side metadata, i.e. a bitmap. +/// For space efficiency, each bit of the bitmap governs a small region of memory. +/// The size of a region is currently defined as the [minimum object size](crate::util::constants::MIN_OBJECT_SIZE), +/// which is currently defined as the [word size](crate::util::constants::BYTES_IN_WORD), +/// which is 4 bytes on 32-bit systems or 8 bytes on 64-bit systems. +/// The alignment of a region is also the region size. +/// If a VO-bit is `1`, the bitmap cannot tell which address within the 4-byte or 8-byte region +/// is the valid object reference. +/// Therefore, if the input `addr` is not properly aligned, but is close to a valid object +/// reference, this function may still return true. +/// +/// For the reason above, the VM **must check if `addr` is properly aligned** before calling this +/// function. For most VMs, valid object references are always aligned to the word size, so +/// checking `addr.is_aligned_to(BYTES_IN_WORD)` should usually work. If you are paranoid, you can +/// always check against [`crate::util::is_mmtk_object::VO_BIT_REGION_SIZE`]. +/// +/// This function is useful for conservative root scanning. The VM can iterate through all words in +/// a stack, filter out zeros, misaligned words, obviously out-of-range words (such as addresses +/// greater than `0x0000_7fff_ffff_ffff` on Linux on x86_64), and use this function to deside if the +/// word is really a reference. +/// +/// Note: This function has special behaviors if the VM space (enabled by the `vm_space` feature) +/// is present. See `crate::plan::global::BasePlan::vm_space`. +/// +/// Argument: +/// * `object`: An ObjectReference converted from an arbitrary address +#[cfg(feature = "vo_bit")] // Eventually the entire `vo_bit` module will be guarded by this feature. +pub fn is_valid_mmtk_object(object: ObjectReference) -> bool { + SFT_MAP + .get_checked(object.to_address()) + .is_valid_mmtk_object(object) +} + +/// Invalidate an object. +/// +/// By calling this method, the GC will treat it as dead, and will not trace it or scan it in +/// subsequent GCs. It is useful for VMs that may destory per-object metadata (ususally during +/// finalization) so that any attempt to scan the object after that may result in crash. +/// +/// Argument: +/// * `object`: An object that is still valid. +#[cfg(feature = "vo_bit")] // Eventually the entire `vo_bit` module will be guarded by this feature. +pub fn invalidate_object(object: ObjectReference) { + debug_assert!(SFT_MAP + .get_checked(object.to_address()) + .is_valid_mmtk_object(object)); + unset_vo_bit(object); +} diff --git a/src/util/mod.rs b/src/util/mod.rs index cf659953e1..5fc1e83e75 100644 --- a/src/util/mod.rs +++ b/src/util/mod.rs @@ -41,8 +41,6 @@ pub(crate) mod erase_vm; pub(crate) mod finalizable_processor; /// Heap implementation, including page resource, mmapper, etc. pub(crate) mod heap; -#[cfg(feature = "is_mmtk_object")] -pub mod is_mmtk_object; /// Logger initialization pub(crate) mod logger; /// Various malloc implementations (conditionally compiled by features) diff --git a/vmbindings/dummyvm/Cargo.toml b/vmbindings/dummyvm/Cargo.toml index 20be91eded..a234cda568 100644 --- a/vmbindings/dummyvm/Cargo.toml +++ b/vmbindings/dummyvm/Cargo.toml @@ -21,6 +21,6 @@ atomic = "0.4.6" [features] default = [] -is_mmtk_object = ["mmtk/is_mmtk_object"] +vo_bit = ["mmtk/vo_bit"] malloc_counted_size = ["mmtk/malloc_counted_size"] malloc_mark_sweep = ["mmtk/malloc_mark_sweep"] diff --git a/vmbindings/dummyvm/src/api.rs b/vmbindings/dummyvm/src/api.rs index d68508c7bf..8f2055d48c 100644 --- a/vmbindings/dummyvm/src/api.rs +++ b/vmbindings/dummyvm/src/api.rs @@ -107,10 +107,10 @@ pub extern "C" fn mmtk_is_live_object(object: ObjectReference) -> bool{ memory_manager::is_live_object(object) } -#[cfg(feature = "is_mmtk_object")] +#[cfg(feature = "vo_bit")] #[no_mangle] -pub extern "C" fn mmtk_is_mmtk_object(addr: Address) -> bool { - memory_manager::is_mmtk_object(addr) +pub extern "C" fn mmtk_is_valid_mmtk_object(object: ObjectReference) -> bool { + mmtk::util::metadata::vo_bit::is_valid_mmtk_object(object) } #[no_mangle] diff --git a/vmbindings/dummyvm/src/tests/conservatism.rs b/vmbindings/dummyvm/src/tests/conservatism.rs index 4ef542d83f..dc3fdf7a05 100644 --- a/vmbindings/dummyvm/src/tests/conservatism.rs +++ b/vmbindings/dummyvm/src/tests/conservatism.rs @@ -5,7 +5,7 @@ use crate::api::*; use crate::object_model::OBJECT_REF_OFFSET; use crate::tests::fixtures::{Fixture, SingleObject}; use mmtk::util::constants::LOG_BITS_IN_WORD; -use mmtk::util::is_mmtk_object::VO_BIT_REGION_SIZE; +use mmtk::util::metadata::vo_bit::VO_BIT_REGION_SIZE; use mmtk::util::*; lazy_static! { @@ -16,35 +16,35 @@ fn basic_filter(addr: Address) -> bool { !addr.is_zero() && addr.as_usize() % VO_BIT_REGION_SIZE == (OBJECT_REF_OFFSET % VO_BIT_REGION_SIZE) } -fn assert_filter_pass(addr: Address) { +fn assert_filter_pass(object: ObjectReference) { assert!( - basic_filter(addr), + basic_filter(object.to_address()), "{} should pass basic filter, but failed.", - addr, + object, ); } -fn assert_filter_fail(addr: Address) { +fn assert_filter_fail(object: ObjectReference) { assert!( - !basic_filter(addr), + !basic_filter(object.to_address()), "{} should fail basic filter, but passed.", - addr, + object, ); } -fn assert_valid_objref(addr: Address) { +fn assert_valid_objref(object: ObjectReference) { assert!( - mmtk_is_mmtk_object(addr), + mmtk_is_valid_mmtk_object(object), "mmtk_is_mmtk_object({}) should return true. Got false.", - addr, + object, ); } -fn assert_invalid_objref(addr: Address, real: Address) { +fn assert_invalid_objref(object: ObjectReference, real: ObjectReference) { assert!( - !mmtk_is_mmtk_object(addr), + !mmtk_is_valid_mmtk_object(object), "mmtk_is_mmtk_object({}) should return false. Got true. Real object: {}", - addr, + object, real, ); } @@ -52,9 +52,9 @@ fn assert_invalid_objref(addr: Address, real: Address) { #[test] pub fn null() { SINGLE_OBJECT.with_fixture(|fixture| { - let addr = Address::ZERO; - assert_filter_fail(addr); - assert_invalid_objref(addr, fixture.objref.to_address()); + let object = ObjectReference::NULL; + assert_filter_fail(object); + assert_invalid_objref(object, fixture.objref); }); } @@ -66,7 +66,7 @@ pub fn too_small() { SINGLE_OBJECT.with_fixture(|fixture| { for offset in 1usize..SMALL_OFFSET { let addr = Address::ZERO + offset; - assert_invalid_objref(addr, fixture.objref.to_address()); + assert_invalid_objref(unsafe { addr.to_object_reference() }, fixture.objref); } }); } @@ -75,7 +75,8 @@ pub fn too_small() { pub fn max() { SINGLE_OBJECT.with_fixture(|fixture| { let addr = Address::MAX; - assert_invalid_objref(addr, fixture.objref.to_address()); + let object = unsafe { addr.to_object_reference() }; + assert_invalid_objref(object, fixture.objref); }); } @@ -84,7 +85,8 @@ pub fn too_big() { SINGLE_OBJECT.with_fixture(|fixture| { for offset in 1usize..SMALL_OFFSET { let addr = Address::MAX - offset; - assert_invalid_objref(addr, fixture.objref.to_address()); + let object = unsafe { addr.to_object_reference() }; + assert_invalid_objref(object, fixture.objref); } }); } @@ -92,9 +94,9 @@ pub fn too_big() { #[test] pub fn direct_hit() { SINGLE_OBJECT.with_fixture(|fixture| { - let addr = fixture.objref.to_address(); - assert_filter_pass(addr); - assert_valid_objref(addr); + let object = fixture.objref; + assert_filter_pass(object); + assert_valid_objref(object); }); } @@ -106,7 +108,8 @@ pub fn small_offsets() { for offset in 1usize..SEVERAL_PAGES { let addr = fixture.objref.to_address() + offset; if basic_filter(addr) { - assert_invalid_objref(addr, fixture.objref.to_address()); + let object = unsafe { addr.to_object_reference() }; + assert_invalid_objref(object, fixture.objref); } } }); @@ -118,8 +121,9 @@ pub fn medium_offsets_aligned() { let alignment = std::mem::align_of::
(); for offset in (alignment..(alignment * SEVERAL_PAGES)).step_by(alignment) { let addr = fixture.objref.to_address() + offset; - assert_filter_pass(addr); - assert_invalid_objref(addr, fixture.objref.to_address()); + let object = unsafe { addr.to_object_reference() }; + assert_filter_pass(object); + assert_invalid_objref(object, fixture.objref); } }); } @@ -133,8 +137,9 @@ pub fn large_offsets_aligned() { Some(n) => unsafe { Address::from_usize(n) }, None => break, }; - assert_filter_pass(addr); - assert_invalid_objref(addr, fixture.objref.to_address()); + let object = unsafe { addr.to_object_reference() }; + assert_filter_pass(object); + assert_invalid_objref(object, fixture.objref); } }); } @@ -149,8 +154,9 @@ pub fn negative_offsets() { Some(n) => unsafe { Address::from_usize(n) }, None => break, }; - assert_filter_pass(addr); - assert_invalid_objref(addr, fixture.objref.to_address()); + let object = unsafe { addr.to_object_reference() }; + assert_filter_pass(object); + assert_invalid_objref(object, fixture.objref); } }); } diff --git a/vmbindings/dummyvm/src/tests/mod.rs b/vmbindings/dummyvm/src/tests/mod.rs index 353156d856..96164512b7 100644 --- a/vmbindings/dummyvm/src/tests/mod.rs +++ b/vmbindings/dummyvm/src/tests/mod.rs @@ -16,7 +16,7 @@ mod malloc_api; #[cfg(feature = "malloc_counted_size")] mod malloc_counted; mod malloc_ms; -#[cfg(feature = "is_mmtk_object")] +#[cfg(feature = "vo_bit")] mod conservatism; mod is_in_mmtk_spaces; mod fixtures; From 38992824fb3508fb6d532490a0c131271cfc4a3f Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Mon, 17 Oct 2022 12:24:17 +0800 Subject: [PATCH 10/14] Fixed comments --- src/memory_manager.rs | 4 +- src/plan/global.rs | 11 ++-- src/util/metadata/vo_bit.rs | 64 +++++++------------- vmbindings/dummyvm/src/tests/conservatism.rs | 6 +- 4 files changed, 34 insertions(+), 51 deletions(-) diff --git a/src/memory_manager.rs b/src/memory_manager.rs index d3a936766b..55c420e835 100644 --- a/src/memory_manager.rs +++ b/src/memory_manager.rs @@ -584,8 +584,8 @@ pub fn is_live_object(object: ObjectReference) -> bool { /// object for the VM in response to `memory_manager::alloc`, this function will return true; but /// if the VM directly called `malloc` to allocate the object, this function will return false. /// -/// If `is_mmtk_object(object.to_address())` returns true, `is_in_mmtk_spaces(object)` must also -/// return true. +/// If `src::util::metadata::vo_map::is_valid_mmtk_object(object)` returns true, +/// `is_in_mmtk_spaces(object)` must also return true. /// /// This function is useful if an object reference in the VM can be either a pointer into the MMTk /// heap, or a pointer to non-MMTk objects. If the VM has a pre-built boot image that contains diff --git a/src/plan/global.rs b/src/plan/global.rs index 85f148a2ce..6a7b5922c9 100644 --- a/src/plan/global.rs +++ b/src/plan/global.rs @@ -419,12 +419,13 @@ pub struct BasePlan { /// A VM space is a space allocated and populated by the VM. Currently it is used by JikesRVM /// for boot image. /// - /// If VM space is present, it has some special interaction with the - /// `memory_manager::is_mmtk_object` and the `memory_manager::is_in_mmtk_spaces` functions. + /// If VM space is present, it has some special interaction with the VO-bit metadata and the + /// `memory_manager::is_in_mmtk_spaces` functions. /// - /// - The `is_mmtk_object` function requires the VO-bit side metadata to identify objects, - /// but currently we do not require the boot image to provide it, so it will not work if the - /// address argument is in the VM space. + /// - Currently we do not require the boot image to provide a bitmap to be used as the VO-bit + /// metadata, so features that require the VO-bit will not work. Conservative GC will not + /// be able to identify if an address is a valid object reference if it points into the VM + /// space. /// /// - The `is_in_mmtk_spaces` currently returns `true` if the given object reference is in /// the VM space. diff --git a/src/util/metadata/vo_bit.rs b/src/util/metadata/vo_bit.rs index e6ec2d5136..5d92cefb79 100644 --- a/src/util/metadata/vo_bit.rs +++ b/src/util/metadata/vo_bit.rs @@ -2,28 +2,38 @@ //! //! The valid-object bit (VO-bit) metadata is a one-bit-per-object side metadata. It is set for //! every object at allocation time (more precisely, during `post_alloc`), and cleared when either -//! - the object reclaims by the GC, or -//! - the VM explicitly clears the VO-bit of the object. +//! - the object is reclaimed by the GC, or +//! - the VM explicitly clears the VO-bit of the object (using the [`invalidate_object`] API). //! //! The main purpose of VO-bit is supporting conservative GC. It is the canonical source of //! information about whether there is an object in the MMTk heap at any given address. //! -//! The VO-bit has the granularity of one bit per minimum object alignment. Each bit governs the +//! The granularity of VO-bit is one bit per minimum object alignment. Each bit governs the //! region of `lo <= addr < hi`, where //! - `lo = addr.align_down(VO_BIT_REGION_SIZE)` //! - `hi = lo + VO_BIT_REGION_SIZE` //! - The constant [`VO_BIT_REGION_SIZE`] is size of the region (in bytes) each bit governs. //! -//! Because of the granularity, if the user wants to check if an *arbitrary* address points to a -//! valid object, it must check if the address is properly aligned. +//! Because of the granularity, the VO-bit metadata cannot tell *which* address in each region +//! has a valid object. Therefore, the VM **must check if an address is properly aligned** before +//! consulting the VO-bit metadata (by calling the [`is_valid_mmtk_object`] function). For most +//! VMs, the alignment requirement of object references is usually equal to [`VO_BIT_REGION_SIZE`], +//! so checking `object.to_address().is_aligned_to(VO_BIT_REGION_SIZE)` should usually work. +//! +//! This function is useful for conservative root scanning. The VM can iterate through all words +//! in a stack, filter out zeros, misaligned words, obviously out-of-range words (such as addresses +//! greater than `0x0000_7fff_ffff_ffff` on Linux on x86_64), and use this function to deside if +//! the word is really a reference. +//! +//! Note: This function has special behaviors if the VM space (enabled by the `vm_space` feature) +//! is present. See `crate::plan::global::BasePlan::vm_space`. +//! use atomic::Ordering; #[cfg(feature = "vo_bit")] -// Eventually the entire `vo_bit` module will be guarded by this feature. use crate::mmtk::SFT_MAP; #[cfg(feature = "vo_bit")] -// Eventually the entire `vo_bit` module will be guarded by this feature. use crate::policy::sft_map::SFTMap; use crate::util::metadata::side_metadata::SideMetadataSpec; use crate::util::Address; @@ -83,42 +93,14 @@ pub(crate) fn bzero_vo_bit(start: Address, size: usize) { /// Check if `object` is a reference to a valid MMTk object. /// -/// Concretely: -/// 1. Return true if `addr.to_object_reference()` is a valid object reference to an object in any -/// space in MMTk. -/// 2. Also return true if there exists an `objref: ObjectReference` such that -/// - `objref` is a valid object reference to an object in any space in MMTk, and -/// - `lo <= objref.to_address() < hi`, where -/// - `lo = addr.align_down(VO_BIT_REGION_SIZE)` and -/// - `hi = lo + VO_BIT_REGION_SIZE` and -/// - `VO_BIT_REGION_SIZE` is [`crate::util::is_mmtk_object::VO_BIT_REGION_SIZE`]. -/// It is the byte granularity of the VO-bit. -/// 3. Return false otherwise. This function never panics. -/// -/// Case 2 means **this function is imprecise for misaligned addresses**. -/// This function uses the VO-bit side metadata, i.e. a bitmap. -/// For space efficiency, each bit of the bitmap governs a small region of memory. -/// The size of a region is currently defined as the [minimum object size](crate::util::constants::MIN_OBJECT_SIZE), -/// which is currently defined as the [word size](crate::util::constants::BYTES_IN_WORD), -/// which is 4 bytes on 32-bit systems or 8 bytes on 64-bit systems. -/// The alignment of a region is also the region size. -/// If a VO-bit is `1`, the bitmap cannot tell which address within the 4-byte or 8-byte region -/// is the valid object reference. -/// Therefore, if the input `addr` is not properly aligned, but is close to a valid object -/// reference, this function may still return true. -/// -/// For the reason above, the VM **must check if `addr` is properly aligned** before calling this -/// function. For most VMs, valid object references are always aligned to the word size, so -/// checking `addr.is_aligned_to(BYTES_IN_WORD)` should usually work. If you are paranoid, you can -/// always check against [`crate::util::is_mmtk_object::VO_BIT_REGION_SIZE`]. +/// This function returns true if the VO-bit is set for the address of `object`. /// -/// This function is useful for conservative root scanning. The VM can iterate through all words in -/// a stack, filter out zeros, misaligned words, obviously out-of-range words (such as addresses -/// greater than `0x0000_7fff_ffff_ffff` on Linux on x86_64), and use this function to deside if the -/// word is really a reference. +/// The input parameter `object` can be converted from an arbitrary address. This function will +/// always return true or false, and will never panic. /// -/// Note: This function has special behaviors if the VM space (enabled by the `vm_space` feature) -/// is present. See `crate::plan::global::BasePlan::vm_space`. +/// Due to the granularity of the VO-bit metadata (see [module-level documentation][self]), the +/// user must check the alignment of `object` before calling this function in order to get the +/// correct result. /// /// Argument: /// * `object`: An ObjectReference converted from an arbitrary address diff --git a/vmbindings/dummyvm/src/tests/conservatism.rs b/vmbindings/dummyvm/src/tests/conservatism.rs index dc3fdf7a05..6a37a34f37 100644 --- a/vmbindings/dummyvm/src/tests/conservatism.rs +++ b/vmbindings/dummyvm/src/tests/conservatism.rs @@ -1,5 +1,5 @@ // GITHUB-CI: MMTK_PLAN=all -// GITHUB-CI: FEATURES=is_mmtk_object +// GITHUB-CI: FEATURES=vo_map use crate::api::*; use crate::object_model::OBJECT_REF_OFFSET; @@ -35,7 +35,7 @@ fn assert_filter_fail(object: ObjectReference) { fn assert_valid_objref(object: ObjectReference) { assert!( mmtk_is_valid_mmtk_object(object), - "mmtk_is_mmtk_object({}) should return true. Got false.", + "mmtk_is_valid_mmtk_object({}) should return true. Got false.", object, ); } @@ -43,7 +43,7 @@ fn assert_valid_objref(object: ObjectReference) { fn assert_invalid_objref(object: ObjectReference, real: ObjectReference) { assert!( !mmtk_is_valid_mmtk_object(object), - "mmtk_is_mmtk_object({}) should return false. Got true. Real object: {}", + "mmtk_is_valid_mmtk_object({}) should return false. Got true. Real object: {}", object, real, ); From 145d4b20cde98380845e4e33fb8a38fc9a1d903b Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Mon, 17 Oct 2022 12:57:30 +0800 Subject: [PATCH 11/14] Comment fixes and safety bzero should be unsafe because of potential races. --- src/policy/immix/block.rs | 4 +++- src/util/alloc/immix_allocator.rs | 7 +++++- src/util/metadata/vo_bit.rs | 38 +++++++++++++++++++++---------- 3 files changed, 35 insertions(+), 14 deletions(-) diff --git a/src/policy/immix/block.rs b/src/policy/immix/block.rs index a292442c7e..f79df0f25d 100644 --- a/src/policy/immix/block.rs +++ b/src/policy/immix/block.rs @@ -184,7 +184,9 @@ impl Block { #[inline] pub fn deinit(&self) { #[cfg(feature = "vo_bit")] - crate::util::metadata::vo_bit::bzero_vo_bit(self.start(), Self::BYTES); + unsafe { + crate::util::metadata::vo_bit::bzero_vo_bit(self.start(), Self::BYTES); + } self.set_state(BlockState::Unallocated); } diff --git a/src/util/alloc/immix_allocator.rs b/src/util/alloc/immix_allocator.rs index 3f9b41620e..162c29f040 100644 --- a/src/util/alloc/immix_allocator.rs +++ b/src/util/alloc/immix_allocator.rs @@ -258,7 +258,12 @@ impl ImmixAllocator { self.tls ); #[cfg(feature = "vo_bit")] - crate::util::metadata::vo_bit::bzero_vo_bit(self.cursor, self.limit - self.cursor); + unsafe { + crate::util::metadata::vo_bit::bzero_vo_bit( + self.cursor, + self.limit - self.cursor, + ); + } crate::util::memory::zero(self.cursor, self.limit - self.cursor); debug_assert!( align_allocation_no_fill::(self.cursor, align, offset) + size <= self.limit diff --git a/src/util/metadata/vo_bit.rs b/src/util/metadata/vo_bit.rs index 5d92cefb79..db81a22f75 100644 --- a/src/util/metadata/vo_bit.rs +++ b/src/util/metadata/vo_bit.rs @@ -56,38 +56,55 @@ pub(crate) const VO_BIT_SIDE_METADATA_ADDR: Address = pub const VO_BIT_REGION_SIZE: usize = 1usize << crate::util::metadata::vo_bit::VO_BIT_SIDE_METADATA_SPEC.log_bytes_in_region; +/// Set the VO-bit of `object` atomically. pub(crate) fn set_vo_bit(object: ObjectReference) { debug_assert!(!is_vo_bit_set(object), "{:x}: VO-bit already set", object); - VO_BIT_SIDE_METADATA_SPEC.store_atomic::(object.to_address(), 1, Ordering::SeqCst); + VO_BIT_SIDE_METADATA_SPEC.fetch_or_atomic::(object.to_address(), 1, Ordering::SeqCst); } +/// Unset the VO-bit of `object` atomically. pub(crate) fn unset_vo_bit(object: ObjectReference) { + // Note: Both the VM and the GC are allowed to unset VO-bit. However, if the VM unsets the + // VO-bit first, that object will not be traced by the GC, and the GC will not try to clear its + // VO-bit again. So it is valid to assert the VO-bit must still be set when this function is + // called. debug_assert!(is_vo_bit_set(object), "{:x}: VO-bit not set", object); - VO_BIT_SIDE_METADATA_SPEC.store_atomic::(object.to_address(), 0, Ordering::SeqCst); + VO_BIT_SIDE_METADATA_SPEC.fetch_and_atomic::(object.to_address(), 0, Ordering::SeqCst); } +/// Check if the VO-bit of `object` is set atomically. pub(crate) fn is_vo_bit_set(object: ObjectReference) -> bool { VO_BIT_SIDE_METADATA_SPEC.load_atomic::(object.to_address(), Ordering::SeqCst) == 1 } -/// # Safety +/// Unset the VO-bit of `object` non-atomically. /// -/// This is unsafe: check the comment on `side_metadata::store` +/// # Safety /// +/// It will be a data race if another thread concurrently accesses any bit in the/ same byte. +/// It should only be used when such a race is impossible. pub(crate) unsafe fn unset_vo_bit_unsafe(object: ObjectReference) { debug_assert!(is_vo_bit_set(object), "{:x}: VO-bit not set", object); VO_BIT_SIDE_METADATA_SPEC.store::(object.to_address(), 0); } -/// # Safety +/// Check if the VO-bit of `object` is set non-atomically. /// -/// This is unsafe: check the comment on `side_metadata::load` +/// # Safety /// +/// It will be a data race if another thread concurrently modifies any bit in the same byte. +/// It should only be used when such a race is impossible. pub(crate) unsafe fn is_vo_bit_set_unsafe(object: ObjectReference) -> bool { VO_BIT_SIDE_METADATA_SPEC.load::(object.to_address()) == 1 } -pub(crate) fn bzero_vo_bit(start: Address, size: usize) { +/// Unset all VO-bits for all objects in the region of `start <= addr < start + size`. +/// +/// # Safety +/// +/// It will be a data race if another thread concurrently accesses any bit in the region. +/// It should only be used when such a race is impossible. +pub(crate) unsafe fn bzero_vo_bit(start: Address, size: usize) { VO_BIT_SIDE_METADATA_SPEC.bzero_metadata(start, size); } @@ -120,9 +137,6 @@ pub fn is_valid_mmtk_object(object: ObjectReference) -> bool { /// Argument: /// * `object`: An object that is still valid. #[cfg(feature = "vo_bit")] // Eventually the entire `vo_bit` module will be guarded by this feature. -pub fn invalidate_object(object: ObjectReference) { - debug_assert!(SFT_MAP - .get_checked(object.to_address()) - .is_valid_mmtk_object(object)); - unset_vo_bit(object); +pub fn invalidate_object(_object: ObjectReference) { + unimplemented!("GC algorithms need to be updated to skip invalid objects during tracing.") } From e929f83ec752ae89068505cf9b44059def80dbba Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Mon, 17 Oct 2022 13:40:21 +0800 Subject: [PATCH 12/14] typo --- vmbindings/dummyvm/src/tests/conservatism.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vmbindings/dummyvm/src/tests/conservatism.rs b/vmbindings/dummyvm/src/tests/conservatism.rs index 6a37a34f37..afa457daf5 100644 --- a/vmbindings/dummyvm/src/tests/conservatism.rs +++ b/vmbindings/dummyvm/src/tests/conservatism.rs @@ -1,5 +1,5 @@ // GITHUB-CI: MMTK_PLAN=all -// GITHUB-CI: FEATURES=vo_map +// GITHUB-CI: FEATURES=vo_bit use crate::api::*; use crate::object_model::OBJECT_REF_OFFSET; From 33121fb8b269bb7538e7a2f4abe21e588002dac2 Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Mon, 17 Oct 2022 15:04:56 +0800 Subject: [PATCH 13/14] Revert to store_atomic There is a bug with fetch_and and fetch_or. --- src/util/metadata/vo_bit.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/util/metadata/vo_bit.rs b/src/util/metadata/vo_bit.rs index db81a22f75..d3615c6633 100644 --- a/src/util/metadata/vo_bit.rs +++ b/src/util/metadata/vo_bit.rs @@ -59,7 +59,7 @@ pub const VO_BIT_REGION_SIZE: usize = /// Set the VO-bit of `object` atomically. pub(crate) fn set_vo_bit(object: ObjectReference) { debug_assert!(!is_vo_bit_set(object), "{:x}: VO-bit already set", object); - VO_BIT_SIDE_METADATA_SPEC.fetch_or_atomic::(object.to_address(), 1, Ordering::SeqCst); + VO_BIT_SIDE_METADATA_SPEC.store_atomic::(object.to_address(), 1, Ordering::SeqCst); } /// Unset the VO-bit of `object` atomically. @@ -69,7 +69,7 @@ pub(crate) fn unset_vo_bit(object: ObjectReference) { // VO-bit again. So it is valid to assert the VO-bit must still be set when this function is // called. debug_assert!(is_vo_bit_set(object), "{:x}: VO-bit not set", object); - VO_BIT_SIDE_METADATA_SPEC.fetch_and_atomic::(object.to_address(), 0, Ordering::SeqCst); + VO_BIT_SIDE_METADATA_SPEC.store_atomic::(object.to_address(), 0, Ordering::SeqCst); } /// Check if the VO-bit of `object` is set atomically. From 100499cd58bbbe3fd143d3afb42e2cab2a50cb0d Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Mon, 17 Oct 2022 20:40:18 +0800 Subject: [PATCH 14/14] Typo --- src/memory_manager.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/memory_manager.rs b/src/memory_manager.rs index 55c420e835..2dbac2ef57 100644 --- a/src/memory_manager.rs +++ b/src/memory_manager.rs @@ -584,7 +584,7 @@ pub fn is_live_object(object: ObjectReference) -> bool { /// object for the VM in response to `memory_manager::alloc`, this function will return true; but /// if the VM directly called `malloc` to allocate the object, this function will return false. /// -/// If `src::util::metadata::vo_map::is_valid_mmtk_object(object)` returns true, +/// If `src::util::metadata::vo_bit::is_valid_mmtk_object(object)` returns true, /// `is_in_mmtk_spaces(object)` must also return true. /// /// This function is useful if an object reference in the VM can be either a pointer into the MMTk