diff --git a/Cargo.toml b/Cargo.toml index 9096425241..4e2433a65b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -81,11 +81,13 @@ ro_space = [] # TODO: This is not properly implemented yet. We currently use an immortal space instead, and all our spaces have execution permission at the moment. code_space = [] -# metadata -global_alloc_bit = [] +# Global valid object (VO) bit metadata. +# The VO bit is set when an object is allocated, and cleared when it is reclaimed. +# See `src/util/metadata/vo_bit.rs` +vo_bit = [] # conservative garbage collection support -is_mmtk_object = ["global_alloc_bit"] +is_mmtk_object = ["vo_bit"] # Enable object pinning, in particular, enable pinning/unpinning, and its metadata object_pinning = [] diff --git a/src/memory_manager.rs b/src/memory_manager.rs index 1a28645343..f6bc4a1b5d 100644 --- a/src/memory_manager.rs +++ b/src/memory_manager.rs @@ -582,20 +582,20 @@ pub fn is_live_object(object: ObjectReference) -> bool { /// 2. Also return true if there exists an `objref: ObjectReference` such that /// - `objref` is a valid object reference to an object in any space in MMTk, and /// - `lo <= objref.to_address() < hi`, where -/// - `lo = addr.align_down(ALLOC_BIT_REGION_SIZE)` and -/// - `hi = lo + ALLOC_BIT_REGION_SIZE` and -/// - `ALLOC_BIT_REGION_SIZE` is [`crate::util::is_mmtk_object::ALLOC_BIT_REGION_SIZE`]. -/// It is the byte granularity of the alloc bit. +/// - `lo = addr.align_down(VO_BIT_REGION_SIZE)` and +/// - `hi = lo + VO_BIT_REGION_SIZE` and +/// - `VO_BIT_REGION_SIZE` is [`crate::util::is_mmtk_object::VO_BIT_REGION_SIZE`]. +/// It is the byte granularity of the valid object (VO) bit. /// 3. Return false otherwise. This function never panics. /// /// Case 2 means **this function is imprecise for misaligned addresses**. -/// This function uses the "alloc bits" side metadata, i.e. a bitmap. +/// This function uses the "valid object (VO) bits" side metadata, i.e. a bitmap. /// For space efficiency, each bit of the bitmap governs a small region of memory. /// The size of a region is currently defined as the [minimum object size](crate::util::constants::MIN_OBJECT_SIZE), /// which is currently defined as the [word size](crate::util::constants::BYTES_IN_WORD), /// which is 4 bytes on 32-bit systems or 8 bytes on 64-bit systems. /// The alignment of a region is also the region size. -/// If an alloc bit is `1`, the bitmap cannot tell which address within the 4-byte or 8-byte region +/// If a VO bit is `1`, the bitmap cannot tell which address within the 4-byte or 8-byte region /// is the valid object reference. /// Therefore, if the input `addr` is not properly aligned, but is close to a valid object /// reference, this function may still return true. @@ -603,7 +603,7 @@ pub fn is_live_object(object: ObjectReference) -> bool { /// For the reason above, the VM **must check if `addr` is properly aligned** before calling this /// function. For most VMs, valid object references are always aligned to the word size, so /// checking `addr.is_aligned_to(BYTES_IN_WORD)` should usually work. If you are paranoid, you can -/// always check against [`crate::util::is_mmtk_object::ALLOC_BIT_REGION_SIZE`]. +/// always check against [`crate::util::is_mmtk_object::VO_BIT_REGION_SIZE`]. /// /// This function is useful for conservative root scanning. The VM can iterate through all words in /// a stack, filter out zeros, misaligned words, obviously out-of-range words (such as addresses diff --git a/src/plan/global.rs b/src/plan/global.rs index 27846d0f7f..aba0d72d5a 100644 --- a/src/plan/global.rs +++ b/src/plan/global.rs @@ -415,7 +415,7 @@ pub struct BasePlan { /// If VM space is present, it has some special interaction with the /// `memory_manager::is_mmtk_object` and the `memory_manager::is_in_mmtk_spaces` functions. /// - /// - The `is_mmtk_object` funciton requires the alloc_bit side metadata to identify objects, + /// - The `is_mmtk_object` funciton requires the valid object (VO) bit side metadata to identify objects, /// but currently we do not require the boot image to provide it, so it will not work if the /// address argument is in the VM space. /// diff --git a/src/plan/markcompact/global.rs b/src/plan/markcompact/global.rs index 0de80a61c7..70aafde211 100644 --- a/src/plan/markcompact/global.rs +++ b/src/plan/markcompact/global.rs @@ -15,11 +15,11 @@ use crate::policy::space::Space; use crate::scheduler::gc_work::*; use crate::scheduler::*; use crate::util::alloc::allocators::AllocatorSelector; -#[cfg(not(feature = "global_alloc_bit"))] -use crate::util::alloc_bit::ALLOC_SIDE_METADATA_SPEC; use crate::util::copy::CopySemantics; use crate::util::heap::VMRequest; use crate::util::metadata::side_metadata::{SideMetadataContext, SideMetadataSanity}; +#[cfg(not(feature = "vo_bit"))] +use crate::util::metadata::vo_bit::VO_BIT_SIDE_METADATA_SPEC; use crate::util::opaque_pointer::*; use crate::vm::VMBinding; @@ -180,15 +180,15 @@ impl Plan for MarkCompact { impl MarkCompact { pub fn new(args: CreateGeneralPlanArgs) -> Self { - // if global_alloc_bit is enabled, ALLOC_SIDE_METADATA_SPEC will be added to + // if vo_bit is enabled, VO_BIT_SIDE_METADATA_SPEC will be added to // SideMetadataContext by default, so we don't need to add it here. - #[cfg(feature = "global_alloc_bit")] + #[cfg(feature = "vo_bit")] let global_side_metadata_specs = SideMetadataContext::new_global_specs(&[]); - // if global_alloc_bit is NOT enabled, - // we need to add ALLOC_SIDE_METADATA_SPEC to SideMetadataContext here. - #[cfg(not(feature = "global_alloc_bit"))] + // if vo_bit is NOT enabled, + // we need to add VO_BIT_SIDE_METADATA_SPEC to SideMetadataContext here. + #[cfg(not(feature = "vo_bit"))] let global_side_metadata_specs = - SideMetadataContext::new_global_specs(&[ALLOC_SIDE_METADATA_SPEC]); + SideMetadataContext::new_global_specs(&[VO_BIT_SIDE_METADATA_SPEC]); let mut plan_args = CreateSpecificPlanArgs { global_args: args, diff --git a/src/policy/copyspace.rs b/src/policy/copyspace.rs index 29a16713e2..70909b263d 100644 --- a/src/policy/copyspace.rs +++ b/src/policy/copyspace.rs @@ -5,7 +5,7 @@ use crate::policy::sft::SFT; use crate::policy::space::{CommonSpace, Space}; use crate::scheduler::GCWorker; use crate::util::copy::*; -#[cfg(feature = "global_alloc_bit")] +#[cfg(feature = "vo_bit")] use crate::util::heap::layout::vm_layout_constants::BYTES_IN_CHUNK; use crate::util::heap::{MonotonePageResource, PageResource}; use crate::util::metadata::{extract_side_metadata, MetadataSpec}; @@ -56,8 +56,8 @@ impl SFT for CopySpace { } fn initialize_object_metadata(&self, _object: ObjectReference, _alloc: bool) { - #[cfg(feature = "global_alloc_bit")] - crate::util::alloc_bit::set_alloc_bit::(_object); + #[cfg(feature = "vo_bit")] + crate::util::metadata::vo_bit::set_vo_bit::(_object); } fn get_forwarded_object(&self, object: ObjectReference) -> Option { @@ -74,7 +74,7 @@ impl SFT for CopySpace { #[cfg(feature = "is_mmtk_object")] fn is_mmtk_object(&self, addr: Address) -> bool { - crate::util::alloc_bit::is_alloced_object::(addr).is_some() + crate::util::metadata::vo_bit::is_vo_bit_set_for_addr::(addr).is_some() } fn sft_trace_object( @@ -172,21 +172,21 @@ impl CopySpace { pub fn release(&self) { unsafe { - #[cfg(feature = "global_alloc_bit")] - self.reset_alloc_bit(); + #[cfg(feature = "vo_bit")] + self.reset_vo_bit(); self.pr.reset(); } self.common.metadata.reset(); self.from_space.store(false, Ordering::SeqCst); } - #[cfg(feature = "global_alloc_bit")] - unsafe fn reset_alloc_bit(&self) { + #[cfg(feature = "vo_bit")] + unsafe fn reset_vo_bit(&self) { let current_chunk = self.pr.get_current_chunk(); if self.common.contiguous { - // If we have allocated something into this space, we need to clear its alloc bit. + // If we have allocated something into this space, we need to clear its VO bit. if current_chunk != self.common.start { - crate::util::alloc_bit::bzero_alloc_bit( + crate::util::metadata::vo_bit::bzero_vo_bit( self.common.start, current_chunk + BYTES_IN_CHUNK - self.common.start, ); @@ -218,10 +218,10 @@ impl CopySpace { // This object is in from space, we will copy. Make sure we have a valid copy semantic. debug_assert!(semantics.is_some()); - #[cfg(feature = "global_alloc_bit")] + #[cfg(feature = "vo_bit")] debug_assert!( - crate::util::alloc_bit::is_alloced::(object), - "{:x}: alloc bit not set", + crate::util::metadata::vo_bit::is_vo_bit_set::(object), + "{:x}: VO bit not set", object ); diff --git a/src/policy/immix/block.rs b/src/policy/immix/block.rs index bff04144ed..d8da73ca5e 100644 --- a/src/policy/immix/block.rs +++ b/src/policy/immix/block.rs @@ -166,8 +166,8 @@ impl Block { /// Deinitalize a block before releasing. pub fn deinit(&self) { - #[cfg(feature = "global_alloc_bit")] - crate::util::alloc_bit::bzero_alloc_bit(self.start(), Self::BYTES); + #[cfg(feature = "vo_bit")] + crate::util::metadata::vo_bit::bzero_vo_bit(self.start(), Self::BYTES); self.set_state(BlockState::Unallocated); } @@ -224,8 +224,8 @@ impl Block { holes += 1; } - #[cfg(feature = "global_alloc_bit")] - crate::util::alloc_bit::bzero_alloc_bit(line.start(), Line::BYTES); + #[cfg(feature = "vo_bit")] + crate::util::metadata::vo_bit::bzero_vo_bit(line.start(), Line::BYTES); #[cfg(feature = "immix_zero_on_release")] crate::util::memory::zero(line.start(), Line::BYTES); diff --git a/src/policy/immix/immixspace.rs b/src/policy/immix/immixspace.rs index 9d956367ee..d50850ce5d 100644 --- a/src/policy/immix/immixspace.rs +++ b/src/policy/immix/immixspace.rs @@ -117,12 +117,12 @@ impl SFT for ImmixSpace { true } fn initialize_object_metadata(&self, _object: ObjectReference, _alloc: bool) { - #[cfg(feature = "global_alloc_bit")] - crate::util::alloc_bit::set_alloc_bit::(_object); + #[cfg(feature = "vo_bit")] + crate::util::metadata::vo_bit::set_vo_bit::(_object); } #[cfg(feature = "is_mmtk_object")] fn is_mmtk_object(&self, addr: Address) -> bool { - crate::util::alloc_bit::is_alloced_object::(addr).is_some() + crate::util::metadata::vo_bit::is_vo_bit_set_for_addr::(addr).is_some() } fn sft_trace_object( &self, @@ -485,10 +485,10 @@ impl ImmixSpace { queue: &mut impl ObjectQueue, object: ObjectReference, ) -> ObjectReference { - #[cfg(feature = "global_alloc_bit")] + #[cfg(feature = "vo_bit")] debug_assert!( - crate::util::alloc_bit::is_alloced::(object), - "{:x}: alloc bit not set", + crate::util::metadata::vo_bit::is_vo_bit_set::(object), + "{:x}: VO bit not set", object ); if self.attempt_mark(object, self.mark_state) { @@ -520,10 +520,10 @@ impl ImmixSpace { ) -> ObjectReference { let copy_context = worker.get_copy_context_mut(); debug_assert!(!super::BLOCK_ONLY); - #[cfg(feature = "global_alloc_bit")] + #[cfg(feature = "vo_bit")] debug_assert!( - crate::util::alloc_bit::is_alloced::(object), - "{:x}: alloc bit not set", + crate::util::metadata::vo_bit::is_vo_bit_set::(object), + "{:x}: VO bit not set", object ); let forwarding_status = ForwardingWord::attempt_to_forward::(object); diff --git a/src/policy/immortalspace.rs b/src/policy/immortalspace.rs index e1d5d34311..65fc6190e6 100644 --- a/src/policy/immortalspace.rs +++ b/src/policy/immortalspace.rs @@ -75,12 +75,12 @@ impl SFT for ImmortalSpace { if self.common.needs_log_bit { VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.mark_as_unlogged::(object, Ordering::SeqCst); } - #[cfg(feature = "global_alloc_bit")] - crate::util::alloc_bit::set_alloc_bit::(object); + #[cfg(feature = "vo_bit")] + crate::util::metadata::vo_bit::set_vo_bit::(object); } #[cfg(feature = "is_mmtk_object")] fn is_mmtk_object(&self, addr: Address) -> bool { - crate::util::alloc_bit::is_alloced_object::(addr).is_some() + crate::util::metadata::vo_bit::is_vo_bit_set_for_addr::(addr).is_some() } fn sft_trace_object( &self, @@ -192,10 +192,10 @@ impl ImmortalSpace { queue: &mut Q, object: ObjectReference, ) -> ObjectReference { - #[cfg(feature = "global_alloc_bit")] + #[cfg(feature = "vo_bit")] debug_assert!( - crate::util::alloc_bit::is_alloced::(object), - "{:x}: alloc bit not set", + crate::util::metadata::vo_bit::is_vo_bit_set::(object), + "{:x}: VO bit not set", object ); if ImmortalSpace::::test_and_mark(object, self.mark_state) { diff --git a/src/policy/largeobjectspace.rs b/src/policy/largeobjectspace.rs index 8569bac787..ea4ea66b1d 100644 --- a/src/policy/largeobjectspace.rs +++ b/src/policy/largeobjectspace.rs @@ -78,13 +78,13 @@ impl SFT for LargeObjectSpace { VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.mark_as_unlogged::(object, Ordering::SeqCst); } - #[cfg(feature = "global_alloc_bit")] - crate::util::alloc_bit::set_alloc_bit::(object); + #[cfg(feature = "vo_bit")] + crate::util::metadata::vo_bit::set_vo_bit::(object); self.treadmill.add_to_treadmill(object, alloc); } #[cfg(feature = "is_mmtk_object")] fn is_mmtk_object(&self, addr: Address) -> bool { - crate::util::alloc_bit::is_alloced_object::(addr).is_some() + crate::util::metadata::vo_bit::is_vo_bit_set_for_addr::(addr).is_some() } fn sft_trace_object( &self, @@ -189,10 +189,10 @@ impl LargeObjectSpace { queue: &mut Q, object: ObjectReference, ) -> ObjectReference { - #[cfg(feature = "global_alloc_bit")] + #[cfg(feature = "vo_bit")] debug_assert!( - crate::util::alloc_bit::is_alloced::(object), - "{:x}: alloc bit not set", + crate::util::metadata::vo_bit::is_vo_bit_set::(object), + "{:x}: VO bit not set", object ); let nursery_object = self.is_in_nursery(object); @@ -225,8 +225,8 @@ impl LargeObjectSpace { fn sweep_large_pages(&mut self, sweep_nursery: bool) { let sweep = |object: ObjectReference| { - #[cfg(feature = "global_alloc_bit")] - crate::util::alloc_bit::unset_alloc_bit::(object); + #[cfg(feature = "vo_bit")] + crate::util::metadata::vo_bit::unset_vo_bit::(object); self.pr .release_pages(get_super_page(object.to_object_start::())); }; diff --git a/src/policy/lockfreeimmortalspace.rs b/src/policy/lockfreeimmortalspace.rs index 033c2c0d98..a2638ce07e 100644 --- a/src/policy/lockfreeimmortalspace.rs +++ b/src/policy/lockfreeimmortalspace.rs @@ -68,12 +68,12 @@ impl SFT for LockFreeImmortalSpace { unimplemented!() } fn initialize_object_metadata(&self, _object: ObjectReference, _alloc: bool) { - #[cfg(feature = "global_alloc_bit")] - crate::util::alloc_bit::set_alloc_bit::(_object); + #[cfg(feature = "vo_bit")] + crate::util::metadata::vo_bit::set_vo_bit::(_object); } #[cfg(feature = "is_mmtk_object")] fn is_mmtk_object(&self, addr: Address) -> bool { - crate::util::alloc_bit::is_alloced_object::(addr).is_some() + crate::util::metadata::vo_bit::is_vo_bit_set_for_addr::(addr).is_some() } fn sft_trace_object( &self, diff --git a/src/policy/markcompactspace.rs b/src/policy/markcompactspace.rs index 0d5bcfe6f6..ecc7b2035d 100644 --- a/src/policy/markcompactspace.rs +++ b/src/policy/markcompactspace.rs @@ -8,8 +8,8 @@ use crate::util::alloc::allocator::align_allocation_no_fill; use crate::util::constants::LOG_BYTES_IN_WORD; use crate::util::copy::CopySemantics; use crate::util::heap::{MonotonePageResource, PageResource}; -use crate::util::metadata::extract_side_metadata; -use crate::util::{alloc_bit, Address, ObjectReference}; +use crate::util::metadata::{extract_side_metadata, vo_bit}; +use crate::util::{Address, ObjectReference}; use crate::{vm::*, ObjectQueue}; use atomic::Ordering; @@ -69,7 +69,7 @@ impl SFT for MarkCompactSpace { } fn initialize_object_metadata(&self, object: ObjectReference, _alloc: bool) { - crate::util::alloc_bit::set_alloc_bit::(object); + crate::util::metadata::vo_bit::set_vo_bit::(object); } #[cfg(feature = "sanity")] @@ -79,7 +79,7 @@ impl SFT for MarkCompactSpace { #[cfg(feature = "is_mmtk_object")] fn is_mmtk_object(&self, addr: Address) -> bool { - crate::util::alloc_bit::is_alloced_object::(addr).is_some() + crate::util::metadata::vo_bit::is_vo_bit_set_for_addr::(addr).is_some() } fn sft_trace_object( @@ -218,8 +218,8 @@ impl MarkCompactSpace { object: ObjectReference, ) -> ObjectReference { debug_assert!( - crate::util::alloc_bit::is_alloced::(object), - "{:x}: alloc bit not set", + crate::util::metadata::vo_bit::is_vo_bit_set::(object), + "{:x}: VO bit not set", object ); if MarkCompactSpace::::test_and_mark(object) { @@ -234,8 +234,8 @@ impl MarkCompactSpace { object: ObjectReference, ) -> ObjectReference { debug_assert!( - crate::util::alloc_bit::is_alloced::(object), - "{:x}: alloc bit not set", + crate::util::metadata::vo_bit::is_vo_bit_set::(object), + "{:x}: VO bit not set", object ); // from this stage and onwards, mark bit is no longer needed @@ -363,8 +363,8 @@ impl MarkCompactSpace { start, end, ); for obj in linear_scan { - // clear the alloc bit - alloc_bit::unset_alloc_bit::(obj); + // clear the VO bit + vo_bit::unset_vo_bit::(obj); let forwarding_pointer = Self::get_header_forwarding_pointer(obj); @@ -377,8 +377,8 @@ impl MarkCompactSpace { // copy object trace!(" copy from {} to {}", obj, new_object); let end_of_new_object = VM::VMObjectModel::copy_to(obj, new_object, Address::ZERO); - // update alloc_bit, - alloc_bit::set_alloc_bit::(new_object); + // update VO bit, + vo_bit::set_vo_bit::(new_object); to = new_object.to_object_start::() + copied_size; debug_assert_eq!(end_of_new_object, to); } diff --git a/src/policy/marksweepspace/malloc_ms/global.rs b/src/policy/marksweepspace/malloc_ms/global.rs index 718a958ff4..0742eeda04 100644 --- a/src/policy/marksweepspace/malloc_ms/global.rs +++ b/src/policy/marksweepspace/malloc_ms/global.rs @@ -93,7 +93,7 @@ impl SFT for MallocSpace { true } - // For malloc space, we need to further check the alloc bit. + // For malloc space, we need to further check the VO bit. fn is_in_space(&self, object: ObjectReference) -> bool { is_alloced_by_malloc::(object) } @@ -109,7 +109,7 @@ impl SFT for MallocSpace { fn initialize_object_metadata(&self, object: ObjectReference, _alloc: bool) { trace!("initialize_object_metadata for object {}", object); - set_alloc_bit::(object); + set_vo_bit::(object); } fn sft_trace_object( @@ -161,7 +161,7 @@ impl Space for MallocSpace { let addr = object.to_object_start::(); let active_mem = self.active_mem.lock().unwrap(); if ret { - // The alloc bit tells that the object is in space. + // The VO bit tells that the object is in space. debug_assert!( *active_mem.get(&addr).unwrap() != 0, "active mem check failed for {} (object {}) - was freed", @@ -169,7 +169,7 @@ impl Space for MallocSpace { object ); } else { - // The alloc bit tells that the object is not in space. It could never be allocated, or have been freed. + // The VO bit tells that the object is not in space. It could never be allocated, or have been freed. debug_assert!( (!active_mem.contains_key(&addr)) || (active_mem.contains_key(&addr) && *active_mem.get(&addr).unwrap() == 0), @@ -238,10 +238,10 @@ pub const MAX_OBJECT_SIZE: usize = crate::util::constants::MAX_INT; impl MallocSpace { pub fn extend_global_side_metadata_specs(specs: &mut Vec) { - // MallocSpace needs to use alloc bit. If the feature is turned on, the alloc bit spec is in the global specs. + // MallocSpace needs to use VO bit. If the feature is turned on, the VO bit spec is in the global specs. // Otherwise, we manually add it. - if !cfg!(feature = "global_alloc_bit") { - specs.push(crate::util::alloc_bit::ALLOC_SIDE_METADATA_SPEC); + if !cfg!(feature = "vo_bit") { + specs.push(crate::util::metadata::vo_bit::VO_BIT_SIDE_METADATA_SPEC); } // MallocSpace also need a global chunk metadata. // TODO: I don't know why this is a global spec. Can we replace it with the chunk map (and the local spec used in the chunk map)? @@ -538,7 +538,7 @@ impl MallocSpace { // Free object self.free_internal(obj_start, bytes, offset_malloc); trace!("free object {}", object); - unsafe { unset_alloc_bit_unsafe::(object) }; + unsafe { unset_vo_bit_unsafe::(object) }; true } else { @@ -615,30 +615,35 @@ impl MallocSpace { let chunk_end = chunk_start + BYTES_IN_CHUNK; debug_assert!( - crate::util::alloc_bit::ALLOC_SIDE_METADATA_SPEC.log_bytes_in_region + crate::util::metadata::vo_bit::VO_BIT_SIDE_METADATA_SPEC.log_bytes_in_region == mark_bit_spec.log_bytes_in_region, - "Alloc-bit and mark-bit metadata have different minimum object sizes!" + "VO-bit and mark-bit metadata have different minimum object sizes!" ); // For bulk xor'ing 128-bit vectors on architectures with vector instructions // Each bit represents an object of LOG_MIN_OBJ_SIZE size - let bulk_load_size: usize = - 128 * (1 << crate::util::alloc_bit::ALLOC_SIDE_METADATA_SPEC.log_bytes_in_region); + let bulk_load_size: usize = 128 + * (1 << crate::util::metadata::vo_bit::VO_BIT_SIDE_METADATA_SPEC + .log_bytes_in_region); // The start of a possibly empty page. This will be updated during the sweeping, and always points to the next page of last live objects. let mut empty_page_start = Address::ZERO; // Scan the chunk by every 'bulk_load_size' region. while address < chunk_end { - let alloc_128: u128 = - unsafe { load128(&crate::util::alloc_bit::ALLOC_SIDE_METADATA_SPEC, address) }; + let alloc_128: u128 = unsafe { + load128( + &crate::util::metadata::vo_bit::VO_BIT_SIDE_METADATA_SPEC, + address, + ) + }; let mark_128: u128 = unsafe { load128(&mark_bit_spec, address) }; // Check if there are dead objects in the bulk loaded region if alloc_128 ^ mark_128 != 0 { let end = address + bulk_load_size; - // We will do non atomic load on the alloc bit, as this is the only thread that access the alloc bit for a chunk. + // We will do non atomic load on the VO bit, as this is the only thread that access the VO bit for a chunk. // Linear scan through the bulk load region. let bulk_load_scan = crate::util::linear_scan::ObjectIterator::< VM, @@ -676,7 +681,7 @@ impl MallocSpace { if ASSERT_ALLOCATION { debug_assert!( self.active_mem.lock().unwrap().contains_key(&obj_start), - "Address {} with alloc bit is not in active_mem", + "Address {} with VO bit is not in active_mem", obj_start ); debug_assert_eq!( @@ -742,7 +747,7 @@ impl MallocSpace { let (obj_start, _, bytes) = Self::get_malloc_addr_size(object); debug_assert!( self.active_mem.lock().unwrap().contains_key(&obj_start), - "Address {} with alloc bit is not in active_mem", + "Address {} with VO bit is not in active_mem", obj_start ); debug_assert_eq!( diff --git a/src/policy/marksweepspace/malloc_ms/metadata.rs b/src/policy/marksweepspace/malloc_ms/metadata.rs index 65ad4ee434..ea28f90fe3 100644 --- a/src/policy/marksweepspace/malloc_ms/metadata.rs +++ b/src/policy/marksweepspace/malloc_ms/metadata.rs @@ -1,9 +1,9 @@ -use crate::util::alloc_bit; use crate::util::conversions; use crate::util::heap::layout::vm_layout_constants::BYTES_IN_CHUNK; use crate::util::metadata::side_metadata; use crate::util::metadata::side_metadata::SideMetadataContext; use crate::util::metadata::side_metadata::SideMetadataSpec; +use crate::util::metadata::vo_bit; use crate::util::Address; use crate::util::ObjectReference; use crate::vm::{ObjectModel, VMBinding}; @@ -105,7 +105,7 @@ fn map_active_chunk_metadata(chunk_start: Address) { ); } -/// We map the active chunk metadata (if not previously mapped), as well as the alloc bit metadata +/// We map the active chunk metadata (if not previously mapped), as well as the VO bit metadata /// and active page metadata here. Note that if [addr, addr + size) crosses multiple chunks, we /// will map for each chunk. pub fn map_meta_space(metadata: &SideMetadataContext, addr: Address, size: usize) { @@ -155,19 +155,19 @@ pub fn map_meta_space(metadata: &SideMetadataContext, addr: Address, size: usize /// Check if a given object was allocated by malloc pub fn is_alloced_by_malloc(object: ObjectReference) -> bool { is_meta_space_mapped_for_address(object.to_address::()) - && alloc_bit::is_alloced::(object) + && vo_bit::is_vo_bit_set::(object) } /// Check if there is an object allocated by malloc at the address. /// /// This function doesn't check if `addr` is aligned. -/// If not, it will try to load the alloc bit for the address rounded down to the metadata's granularity. +/// If not, it will try to load the VO bit for the address rounded down to the metadata's granularity. #[cfg(feature = "is_mmtk_object")] pub fn has_object_alloced_by_malloc(addr: Address) -> Option { if !is_meta_space_mapped_for_address(addr) { return None; } - alloc_bit::is_alloced_object::(addr) + vo_bit::is_vo_bit_set_for_addr::(addr) } pub fn is_marked(object: ObjectReference, ordering: Ordering) -> bool { @@ -218,8 +218,8 @@ pub unsafe fn is_chunk_marked_unsafe(chunk_start: Address) -> bool { ACTIVE_CHUNK_METADATA_SPEC.load::(chunk_start) == 1 } -pub fn set_alloc_bit(object: ObjectReference) { - alloc_bit::set_alloc_bit::(object); +pub fn set_vo_bit(object: ObjectReference) { + vo_bit::set_vo_bit::(object); } pub fn set_mark_bit(object: ObjectReference, ordering: Ordering) { @@ -227,8 +227,8 @@ pub fn set_mark_bit(object: ObjectReference, ordering: Ordering) } #[allow(unused)] -pub fn unset_alloc_bit(object: ObjectReference) { - alloc_bit::unset_alloc_bit::(object); +pub fn unset_vo_bit(object: ObjectReference) { + vo_bit::unset_vo_bit::(object); } #[allow(unused)] @@ -255,8 +255,8 @@ pub(super) unsafe fn unset_offset_malloc_bit_unsafe(address: Address) { OFFSET_MALLOC_METADATA_SPEC.store::(address, 0); } -pub unsafe fn unset_alloc_bit_unsafe(object: ObjectReference) { - alloc_bit::unset_alloc_bit_unsafe::(object); +pub unsafe fn unset_vo_bit_unsafe(object: ObjectReference) { + vo_bit::unset_vo_bit_unsafe::(object); } #[allow(unused)] diff --git a/src/policy/marksweepspace/native_ms/block.rs b/src/policy/marksweepspace/native_ms/block.rs index a7276c709b..024dddd379 100644 --- a/src/policy/marksweepspace/native_ms/block.rs +++ b/src/policy/marksweepspace/native_ms/block.rs @@ -287,10 +287,10 @@ impl Block { if !VM::VMObjectModel::LOCAL_MARK_BIT_SPEC .is_marked::(potential_object, Ordering::SeqCst) { - // clear alloc bit if it is ever set. It is possible that the alloc bit is never set for this cell (i.e. there was no object in this cell before this GC), + // clear VO bit if it is ever set. It is possible that the VO bit is never set for this cell (i.e. there was no object in this cell before this GC), // we unset the bit anyway. - #[cfg(feature = "global_alloc_bit")] - crate::util::alloc_bit::unset_alloc_bit_nocheck::(potential_object); + #[cfg(feature = "vo_bit")] + crate::util::metadata::vo_bit::unset_vo_bit_nocheck::(potential_object); unsafe { cell.store::
(last); } @@ -353,9 +353,9 @@ impl Block { self, cell, last ); - // Clear alloc bit: we don't know where the object reference actually is, so we bulk zero the cell. - #[cfg(feature = "global_alloc_bit")] - crate::util::alloc_bit::bzero_alloc_bit(cell, cell_size); + // Clear VO bit: we don't know where the object reference actually is, so we bulk zero the cell. + #[cfg(feature = "vo_bit")] + crate::util::metadata::vo_bit::bzero_vo_bit(cell, cell_size); // store the previous cell to make the free list debug_assert!(last.is_zero() || (last >= self.start() && last < self.end())); diff --git a/src/policy/marksweepspace/native_ms/global.rs b/src/policy/marksweepspace/native_ms/global.rs index 15ab359785..859d8007ed 100644 --- a/src/policy/marksweepspace/native_ms/global.rs +++ b/src/policy/marksweepspace/native_ms/global.rs @@ -107,13 +107,13 @@ impl SFT for MarkSweepSpace { } fn initialize_object_metadata(&self, _object: crate::util::ObjectReference, _alloc: bool) { - #[cfg(feature = "global_alloc_bit")] - crate::util::alloc_bit::set_alloc_bit::(_object); + #[cfg(feature = "vo_bit")] + crate::util::metadata::vo_bit::set_vo_bit::(_object); } #[cfg(feature = "is_mmtk_object")] fn is_mmtk_object(&self, addr: Address) -> bool { - crate::util::alloc_bit::is_alloced_object::(addr).is_some() + crate::util::metadata::vo_bit::is_vo_bit_set_for_addr::(addr).is_some() } fn sft_trace_object( @@ -283,8 +283,8 @@ impl MarkSweepSpace { for metadata_spec in Block::METADATA_SPECS { metadata_spec.set_zero_atomic(block.start(), Ordering::SeqCst); } - #[cfg(feature = "global_alloc_bit")] - crate::util::alloc_bit::bzero_alloc_bit(block.start(), Block::BYTES); + #[cfg(feature = "vo_bit")] + crate::util::metadata::vo_bit::bzero_vo_bit(block.start(), Block::BYTES); } pub fn acquire_block(&self, tls: VMThread, size: usize, align: usize) -> BlockAcquireResult { diff --git a/src/util/alloc/free_list_allocator.rs b/src/util/alloc/free_list_allocator.rs index 8bbc8b8dd1..e7e07f7edf 100644 --- a/src/util/alloc/free_list_allocator.rs +++ b/src/util/alloc/free_list_allocator.rs @@ -393,9 +393,9 @@ impl FreeListAllocator { // unset allocation bit unsafe { - crate::util::alloc_bit::unset_alloc_bit_unsafe::(ObjectReference::from_raw_address( - addr, - )) + crate::util::metadata::vo_bit::unset_vo_bit_unsafe::( + ObjectReference::from_raw_address(addr), + ) }; } diff --git a/src/util/alloc_bit.rs b/src/util/alloc_bit.rs deleted file mode 100644 index 0a2f37f9a8..0000000000 --- a/src/util/alloc_bit.rs +++ /dev/null @@ -1,95 +0,0 @@ -use atomic::Ordering; - -use crate::util::metadata::side_metadata::SideMetadataSpec; -use crate::util::Address; -use crate::util::ObjectReference; -use crate::vm::VMBinding; - -/// An alloc-bit is required per min-object-size aligned address , rather than per object, and can only exist as side metadata. -pub(crate) const ALLOC_SIDE_METADATA_SPEC: SideMetadataSpec = - crate::util::metadata::side_metadata::spec_defs::ALLOC_BIT; - -pub const ALLOC_SIDE_METADATA_ADDR: Address = ALLOC_SIDE_METADATA_SPEC.get_absolute_offset(); - -/// Atomically set the alloc bit for an object. -pub fn set_alloc_bit(object: ObjectReference) { - debug_assert!( - !is_alloced::(object), - "{:x}: alloc bit already set", - object - ); - ALLOC_SIDE_METADATA_SPEC.store_atomic::(object.to_address::(), 1, Ordering::SeqCst); -} - -/// Atomically unset the alloc bit for an object. -pub fn unset_alloc_bit(object: ObjectReference) { - debug_assert!(is_alloced::(object), "{:x}: alloc bit not set", object); - ALLOC_SIDE_METADATA_SPEC.store_atomic::(object.to_address::(), 0, Ordering::SeqCst); -} - -/// Atomically unset the alloc bit for an object, regardless whether the bit is set or not. -pub fn unset_alloc_bit_nocheck(object: ObjectReference) { - ALLOC_SIDE_METADATA_SPEC.store_atomic::(object.to_address::(), 0, Ordering::SeqCst); -} - -/// Non-atomically unset the alloc bit for an object. The caller needs to ensure the side -/// metadata for the alloc bit for the object is accessed by only one thread. -/// -/// # Safety -/// -/// This is unsafe: check the comment on `side_metadata::store` -pub unsafe fn unset_alloc_bit_unsafe(object: ObjectReference) { - debug_assert!(is_alloced::(object), "{:x}: alloc bit not set", object); - ALLOC_SIDE_METADATA_SPEC.store::(object.to_address::(), 0); -} - -/// Check if the alloc bit is set for an object. -pub fn is_alloced(object: ObjectReference) -> bool { - ALLOC_SIDE_METADATA_SPEC.load_atomic::(object.to_address::(), Ordering::SeqCst) == 1 -} - -/// Check if an address can be turned directly into an object reference using the alloc bit. -/// If so, return `Some(object)`. Otherwise return `None`. -pub fn is_alloced_object(address: Address) -> Option { - let potential_object = ObjectReference::from_raw_address(address); - let addr = potential_object.to_address::(); - - // If we haven't mapped alloc bit for the address, it cannot be an object - if !ALLOC_SIDE_METADATA_SPEC.is_mapped(addr) { - return None; - } - - if ALLOC_SIDE_METADATA_SPEC.load_atomic::(addr, Ordering::SeqCst) == 1 { - Some(potential_object) - } else { - None - } -} - -/// Check if an address can be turned directly into an object reference using the alloc bit. -/// If so, return `Some(object)`. Otherwise return `None`. The caller needs to ensure the side -/// metadata for the alloc bit for the object is accessed by only one thread. -/// -/// # Safety -/// -/// This is unsafe: check the comment on `side_metadata::load` -pub unsafe fn is_alloced_object_unsafe(address: Address) -> Option { - let potential_object = ObjectReference::from_raw_address(address); - let addr = potential_object.to_address::(); - - // If we haven't mapped alloc bit for the address, it cannot be an object - if !ALLOC_SIDE_METADATA_SPEC.is_mapped(addr) { - return None; - } - - if ALLOC_SIDE_METADATA_SPEC.load::(addr) == 1 { - Some(potential_object) - } else { - None - } -} - -/// Bulk zero the alloc bit. -pub fn bzero_alloc_bit(start: Address, size: usize) { - ALLOC_SIDE_METADATA_SPEC.bzero_metadata(start, size); -} diff --git a/src/util/is_mmtk_object.rs b/src/util/is_mmtk_object.rs index 85b8d4315f..05d85472c5 100644 --- a/src/util/is_mmtk_object.rs +++ b/src/util/is_mmtk_object.rs @@ -1,4 +1,4 @@ -/// The region size (in bytes) of the `ALLOC_BIT` side metadata. +/// The region size (in bytes) of the `VO_BIT` side metadata. /// The VM can use this to check if an object is properly aligned. -pub const ALLOC_BIT_REGION_SIZE: usize = - 1usize << crate::util::alloc_bit::ALLOC_SIDE_METADATA_SPEC.log_bytes_in_region; +pub const VO_BIT_REGION_SIZE: usize = + 1usize << crate::util::metadata::vo_bit::VO_BIT_SIDE_METADATA_SPEC.log_bytes_in_region; diff --git a/src/util/linear_scan.rs b/src/util/linear_scan.rs index 3b871fe0cb..17a736dcb9 100644 --- a/src/util/linear_scan.rs +++ b/src/util/linear_scan.rs @@ -1,27 +1,30 @@ -use crate::util::alloc_bit; +use crate::util::metadata::vo_bit; use crate::util::Address; use crate::util::ObjectReference; use crate::vm::ObjectModel; use crate::vm::VMBinding; use std::marker::PhantomData; -/// Iterate over an address range, and find each object by alloc bit. -/// ATOMIC_LOAD_ALLOC_BIT can be set to false if it is known that loading alloc bit +// FIXME: MarkCompact uses linear scanning to discover allocated objects in the MarkCompactSpace. +// It should use a local metadata (specific to the MarkCompactSpace) for that purpose. +// In the future, we should let MarkCompact do linear scanning using its local metadata instead. + +/// Iterate over an address range, and find each object by VO bit. +/// ATOMIC_LOAD_VO_BIT can be set to false if it is known that loading VO bit /// non-atomically is correct (e.g. a single thread is scanning this address range, and -/// it is the only thread that accesses alloc bit). -pub struct ObjectIterator -{ +/// it is the only thread that accesses VO bit). +pub struct ObjectIterator { start: Address, end: Address, cursor: Address, _p: PhantomData<(VM, S)>, } -impl - ObjectIterator +impl + ObjectIterator { /// Create an iterator for the address range. The caller must ensure - /// that the alloc bit metadata is mapped for the address range. + /// that the VO bit metadata is mapped for the address range. pub fn new(start: Address, end: Address) -> Self { debug_assert!(start < end); ObjectIterator { @@ -33,17 +36,17 @@ impl } } -impl std::iter::Iterator - for ObjectIterator +impl std::iter::Iterator + for ObjectIterator { type Item = ObjectReference; fn next(&mut self) -> Option<::Item> { while self.cursor < self.end { - let is_object = if ATOMIC_LOAD_ALLOC_BIT { - alloc_bit::is_alloced_object::(self.cursor) + let is_object = if ATOMIC_LOAD_VO_BIT { + vo_bit::is_vo_bit_set_for_addr::(self.cursor) } else { - unsafe { alloc_bit::is_alloced_object_unsafe::(self.cursor) } + unsafe { vo_bit::is_vo_bit_set_unsafe::(self.cursor) } }; if let Some(object) = is_object { diff --git a/src/util/metadata/mod.rs b/src/util/metadata/mod.rs index 765a8ef741..2090a2f677 100644 --- a/src/util/metadata/mod.rs +++ b/src/util/metadata/mod.rs @@ -2,7 +2,7 @@ //! //! This module is designed to enable the implementation of a wide range of GC algorithms for VMs with various combinations of in-object and on-side space for GC-specific metadata (e.g. forwarding bits, marking bit, logging bit, etc.). //! -//! The new metadata design differentiates per-object metadata (e.g. forwarding-bits and marking-bit) from other types of metadata including per-address (e.g. alloc-bit) and per-X (where X != object size), because the per-object metadata can optionally be kept in the object headers. +//! The new metadata design differentiates per-object metadata (e.g. forwarding-bits and marking-bit) from other types of metadata including per-address (e.g. VO bit) and per-X (where X != object size), because the per-object metadata can optionally be kept in the object headers. //! //! MMTk acknowledges the VM-dependant nature of the in-object metadata, and asks the VM bindings to contribute by implementing the related parts in the ['ObjectModel'](crate::vm::ObjectModel). //! @@ -227,5 +227,6 @@ pub use metadata_val_traits::*; pub(crate) mod log_bit; pub(crate) mod mark_bit; pub(crate) mod pin_bit; +pub(crate) mod vo_bit; pub use global::*; diff --git a/src/util/metadata/side_metadata/constants.rs b/src/util/metadata/side_metadata/constants.rs index 6e660e52b7..5b39598b6d 100644 --- a/src/util/metadata/side_metadata/constants.rs +++ b/src/util/metadata/side_metadata/constants.rs @@ -11,7 +11,7 @@ use crate::util::Address; // reserved addresses such as 0x0. // XXXX: I updated the base address for 32 bit to 0x1000_0000. For what I tested on, the library // and the malloc heap often starts at 0x800_0000. If we start the metadata from the second 4Mb chunk (i.e. the chunk `[0x40_0000, 0x80_0000)`), -// we won't be guaranteed enough space before 0x800_0000. For example, the alloc bit is 1 bit per 4 bytes +// we won't be guaranteed enough space before 0x800_0000. For example, the VO bit is 1 bit per 4 bytes // (1 word in 32bits), and it will take the address range of [0x40_000, 0x840_0000) which clashes with // the library/heap. So I move this to 0x1000_0000. // This is made public, as VM bingdings may need to use this. @@ -29,8 +29,9 @@ pub const GLOBAL_SIDE_METADATA_BASE_ADDRESS: Address = pub(crate) const GLOBAL_SIDE_METADATA_BASE_OFFSET: SideMetadataOffset = SideMetadataOffset::addr(GLOBAL_SIDE_METADATA_BASE_ADDRESS); -// Base address of alloc bit, public to VM bindings which may need to use this. -pub const ALLOC_SIDE_METADATA_ADDR: Address = crate::util::alloc_bit::ALLOC_SIDE_METADATA_ADDR; +// Base address of VO bit, public to VM bindings which may need to use this. +pub const VO_BIT_SIDE_METADATA_ADDR: Address = + crate::util::metadata::vo_bit::VO_BIT_SIDE_METADATA_ADDR; /// This constant represents the worst-case ratio of source data size to global side metadata. /// A value of 2 means the space required for global side metadata must be less than 1/4th of the source data. diff --git a/src/util/metadata/side_metadata/global.rs b/src/util/metadata/side_metadata/global.rs index ecf71dcd9a..1cfd33f93b 100644 --- a/src/util/metadata/side_metadata/global.rs +++ b/src/util/metadata/side_metadata/global.rs @@ -1,10 +1,10 @@ use super::*; -#[cfg(feature = "global_alloc_bit")] -use crate::util::alloc_bit::ALLOC_SIDE_METADATA_SPEC; use crate::util::constants::{BYTES_IN_PAGE, LOG_BITS_IN_BYTE}; use crate::util::heap::layout::vm_layout_constants::BYTES_IN_CHUNK; use crate::util::memory; use crate::util::metadata::metadata_val_traits::*; +#[cfg(feature = "vo_bit")] +use crate::util::metadata::vo_bit::VO_BIT_SIDE_METADATA_SPEC; use crate::util::Address; use num_traits::FromPrimitive; use std::fmt; @@ -839,8 +839,8 @@ impl SideMetadataContext { pub fn new_global_specs(specs: &[SideMetadataSpec]) -> Vec { let mut ret = vec![]; - #[cfg(feature = "global_alloc_bit")] - ret.push(ALLOC_SIDE_METADATA_SPEC); + #[cfg(feature = "vo_bit")] + ret.push(VO_BIT_SIDE_METADATA_SPEC); if let Some(spec) = crate::mmtk::SFT_MAP.get_side_metadata() { if spec.is_global { diff --git a/src/util/metadata/side_metadata/spec_defs.rs b/src/util/metadata/side_metadata/spec_defs.rs index 09bee53c00..150b28dc28 100644 --- a/src/util/metadata/side_metadata/spec_defs.rs +++ b/src/util/metadata/side_metadata/spec_defs.rs @@ -55,7 +55,7 @@ macro_rules! define_side_metadata_specs { define_side_metadata_specs!( last_spec_as LAST_GLOBAL_SIDE_METADATA_SPEC, // Mark the start of an object - ALLOC_BIT = (global: true, log_num_of_bits: 0, log_bytes_in_region: LOG_MIN_OBJECT_SIZE as usize), + VO_BIT = (global: true, log_num_of_bits: 0, log_bytes_in_region: LOG_MIN_OBJECT_SIZE as usize), // Track chunks used by (malloc) marksweep MS_ACTIVE_CHUNK = (global: true, log_num_of_bits: 3, log_bytes_in_region: LOG_BYTES_IN_CHUNK), // Track the index in SFT map for a chunk (only used for SFT sparse chunk map) diff --git a/src/util/metadata/vo_bit.rs b/src/util/metadata/vo_bit.rs new file mode 100644 index 0000000000..51ee3957e8 --- /dev/null +++ b/src/util/metadata/vo_bit.rs @@ -0,0 +1,104 @@ +//! Valid object bit (VO bit) +//! +//! The valid object bit, or "VO bit" for short", is a global per-address metadata. It is set at +//! the address of the `ObjectReference` of an object when the object is allocated, and cleared +//! when the object is reclaimed by the GC. +//! +//! The main purpose of VO bit is supporting conservative GC. It is the canonical source of +//! information about whether there is an object in the MMTk heap at any given address. + +use atomic::Ordering; + +use crate::util::metadata::side_metadata::SideMetadataSpec; +use crate::util::Address; +use crate::util::ObjectReference; +use crate::vm::VMBinding; + +/// A VO bit is required per min-object-size aligned address, rather than per object, and can only exist as side metadata. +pub(crate) const VO_BIT_SIDE_METADATA_SPEC: SideMetadataSpec = + crate::util::metadata::side_metadata::spec_defs::VO_BIT; + +pub const VO_BIT_SIDE_METADATA_ADDR: Address = VO_BIT_SIDE_METADATA_SPEC.get_absolute_offset(); + +/// Atomically set the VO bit for an object. +pub fn set_vo_bit(object: ObjectReference) { + debug_assert!( + !is_vo_bit_set::(object), + "{:x}: VO bit already set", + object + ); + VO_BIT_SIDE_METADATA_SPEC.store_atomic::(object.to_address::(), 1, Ordering::SeqCst); +} + +/// Atomically unset the VO bit for an object. +pub fn unset_vo_bit(object: ObjectReference) { + debug_assert!(is_vo_bit_set::(object), "{:x}: VO bit not set", object); + VO_BIT_SIDE_METADATA_SPEC.store_atomic::(object.to_address::(), 0, Ordering::SeqCst); +} + +/// Atomically unset the VO bit for an object, regardless whether the bit is set or not. +pub fn unset_vo_bit_nocheck(object: ObjectReference) { + VO_BIT_SIDE_METADATA_SPEC.store_atomic::(object.to_address::(), 0, Ordering::SeqCst); +} + +/// Non-atomically unset the VO bit for an object. The caller needs to ensure the side +/// metadata for the VO bit for the object is accessed by only one thread. +/// +/// # Safety +/// +/// This is unsafe: check the comment on `side_metadata::store` +pub unsafe fn unset_vo_bit_unsafe(object: ObjectReference) { + debug_assert!(is_vo_bit_set::(object), "{:x}: VO bit not set", object); + VO_BIT_SIDE_METADATA_SPEC.store::(object.to_address::(), 0); +} + +/// Check if the VO bit is set for an object. +pub fn is_vo_bit_set(object: ObjectReference) -> bool { + VO_BIT_SIDE_METADATA_SPEC.load_atomic::(object.to_address::(), Ordering::SeqCst) == 1 +} + +/// Check if an address can be turned directly into an object reference using the VO bit. +/// If so, return `Some(object)`. Otherwise return `None`. +pub fn is_vo_bit_set_for_addr(address: Address) -> Option { + let potential_object = ObjectReference::from_raw_address(address); + let addr = potential_object.to_address::(); + + // If we haven't mapped VO bit for the address, it cannot be an object + if !VO_BIT_SIDE_METADATA_SPEC.is_mapped(addr) { + return None; + } + + if VO_BIT_SIDE_METADATA_SPEC.load_atomic::(addr, Ordering::SeqCst) == 1 { + Some(potential_object) + } else { + None + } +} + +/// Check if an address can be turned directly into an object reference using the VO bit. +/// If so, return `Some(object)`. Otherwise return `None`. The caller needs to ensure the side +/// metadata for the VO bit for the object is accessed by only one thread. +/// +/// # Safety +/// +/// This is unsafe: check the comment on `side_metadata::load` +pub unsafe fn is_vo_bit_set_unsafe(address: Address) -> Option { + let potential_object = ObjectReference::from_raw_address(address); + let addr = potential_object.to_address::(); + + // If we haven't mapped VO bit for the address, it cannot be an object + if !VO_BIT_SIDE_METADATA_SPEC.is_mapped(addr) { + return None; + } + + if VO_BIT_SIDE_METADATA_SPEC.load::(addr) == 1 { + Some(potential_object) + } else { + None + } +} + +/// Bulk zero the VO bit. +pub fn bzero_vo_bit(start: Address, size: usize) { + VO_BIT_SIDE_METADATA_SPEC.bzero_metadata(start, size); +} diff --git a/src/util/mod.rs b/src/util/mod.rs index 9156ea81e8..ae859bedfb 100644 --- a/src/util/mod.rs +++ b/src/util/mod.rs @@ -29,8 +29,6 @@ pub mod options; pub mod reference_processor; // The following modules are only public in the mmtk crate. They should only be used in MMTk core. -/// Alloc bit -pub(crate) mod alloc_bit; /// An analysis framework for collecting data and profiling in GC. #[cfg(feature = "analysis")] pub(crate) mod analysis; diff --git a/src/util/object_forwarding.rs b/src/util/object_forwarding.rs index 52ac0bffda..b7e602a9dc 100644 --- a/src/util/object_forwarding.rs +++ b/src/util/object_forwarding.rs @@ -81,8 +81,8 @@ pub fn forward_object( copy_context: &mut GCWorkerCopyContext, ) -> ObjectReference { let new_object = VM::VMObjectModel::copy(object, semantics, copy_context); - #[cfg(feature = "global_alloc_bit")] - crate::util::alloc_bit::set_alloc_bit::(new_object); + #[cfg(feature = "vo_bit")] + crate::util::metadata::vo_bit::set_vo_bit::(new_object); if let Some(shift) = forwarding_bits_offset_in_forwarding_pointer::() { VM::VMObjectModel::LOCAL_FORWARDING_POINTER_SPEC.store_atomic::( object, diff --git a/vmbindings/dummyvm/src/tests/conservatism.rs b/vmbindings/dummyvm/src/tests/conservatism.rs index 57288b00fa..fd4313e2d4 100644 --- a/vmbindings/dummyvm/src/tests/conservatism.rs +++ b/vmbindings/dummyvm/src/tests/conservatism.rs @@ -5,7 +5,7 @@ use crate::api::*; use crate::object_model::OBJECT_REF_OFFSET; use crate::tests::fixtures::{Fixture, SingleObject}; use mmtk::util::constants::LOG_BITS_IN_WORD; -use mmtk::util::is_mmtk_object::ALLOC_BIT_REGION_SIZE; +use mmtk::util::is_mmtk_object::VO_BIT_REGION_SIZE; use mmtk::util::*; lazy_static! { @@ -13,7 +13,7 @@ lazy_static! { } fn basic_filter(addr: Address) -> bool { - !addr.is_zero() && addr.as_usize() % ALLOC_BIT_REGION_SIZE == (OBJECT_REF_OFFSET % ALLOC_BIT_REGION_SIZE) + !addr.is_zero() && addr.as_usize() % VO_BIT_REGION_SIZE == (OBJECT_REF_OFFSET % VO_BIT_REGION_SIZE) } fn assert_filter_pass(addr: Address) {