diff --git a/Cargo.toml b/Cargo.toml index 8f8b9d4e38..16323370a7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -78,11 +78,8 @@ ro_space = [] # TODO: This is not properly implemented yet. We currently use an immortal space instead, and all our spaces have execution permission at the moment. code_space = [] -# metadata -global_alloc_bit = [] - -# conservative garbage collection support -is_mmtk_object = ["global_alloc_bit"] +# Valid-object bit, useful for conservative garbage collection. +vo_bit = [] # The following two features are useful for using Immix for VMs that do not support moving GC. diff --git a/src/memory_manager.rs b/src/memory_manager.rs index ec14799225..2dbac2ef57 100644 --- a/src/memory_manager.rs +++ b/src/memory_manager.rs @@ -570,54 +570,6 @@ pub fn is_live_object(object: ObjectReference) -> bool { object.is_live() } -/// Check if `addr` is the address of an object reference to an MMTk object. -/// -/// Concretely: -/// 1. Return true if `addr.to_object_reference()` is a valid object reference to an object in any -/// space in MMTk. -/// 2. Also return true if there exists an `objref: ObjectReference` such that -/// - `objref` is a valid object reference to an object in any space in MMTk, and -/// - `lo <= objref.to_address() < hi`, where -/// - `lo = addr.align_down(ALLOC_BIT_REGION_SIZE)` and -/// - `hi = lo + ALLOC_BIT_REGION_SIZE` and -/// - `ALLOC_BIT_REGION_SIZE` is [`crate::util::is_mmtk_object::ALLOC_BIT_REGION_SIZE`]. -/// It is the byte granularity of the alloc bit. -/// 3. Return false otherwise. This function never panics. -/// -/// Case 2 means **this function is imprecise for misaligned addresses**. -/// This function uses the "alloc bits" side metadata, i.e. a bitmap. -/// For space efficiency, each bit of the bitmap governs a small region of memory. -/// The size of a region is currently defined as the [minimum object size](crate::util::constants::MIN_OBJECT_SIZE), -/// which is currently defined as the [word size](crate::util::constants::BYTES_IN_WORD), -/// which is 4 bytes on 32-bit systems or 8 bytes on 64-bit systems. -/// The alignment of a region is also the region size. -/// If an alloc bit is `1`, the bitmap cannot tell which address within the 4-byte or 8-byte region -/// is the valid object reference. -/// Therefore, if the input `addr` is not properly aligned, but is close to a valid object -/// reference, this function may still return true. -/// -/// For the reason above, the VM **must check if `addr` is properly aligned** before calling this -/// function. For most VMs, valid object references are always aligned to the word size, so -/// checking `addr.is_aligned_to(BYTES_IN_WORD)` should usually work. If you are paranoid, you can -/// always check against [`crate::util::is_mmtk_object::ALLOC_BIT_REGION_SIZE`]. -/// -/// This function is useful for conservative root scanning. The VM can iterate through all words in -/// a stack, filter out zeros, misaligned words, obviously out-of-range words (such as addresses -/// greater than `0x0000_7fff_ffff_ffff` on Linux on x86_64), and use this function to deside if the -/// word is really a reference. -/// -/// Note: This function has special behaviors if the VM space (enabled by the `vm_space` feature) -/// is present. See `crate::plan::global::BasePlan::vm_space`. -/// -/// Argument: -/// * `addr`: An arbitrary address. -#[cfg(feature = "is_mmtk_object")] -pub fn is_mmtk_object(addr: Address) -> bool { - use crate::mmtk::SFT_MAP; - use crate::policy::sft_map::SFTMap; - SFT_MAP.get_checked(addr).is_mmtk_object(addr) -} - /// Return true if the `object` lies in a region of memory where /// - only MMTk can allocate into, or /// - only MMTk's delegated memory allocator (such as a malloc implementation) can allocate into @@ -632,8 +584,8 @@ pub fn is_mmtk_object(addr: Address) -> bool { /// object for the VM in response to `memory_manager::alloc`, this function will return true; but /// if the VM directly called `malloc` to allocate the object, this function will return false. /// -/// If `is_mmtk_object(object.to_address())` returns true, `is_in_mmtk_spaces(object)` must also -/// return true. +/// If `src::util::metadata::vo_bit::is_valid_mmtk_object(object)` returns true, +/// `is_in_mmtk_spaces(object)` must also return true. /// /// This function is useful if an object reference in the VM can be either a pointer into the MMTk /// heap, or a pointer to non-MMTk objects. If the VM has a pre-built boot image that contains diff --git a/src/plan/global.rs b/src/plan/global.rs index 840f1046dd..6a7b5922c9 100644 --- a/src/plan/global.rs +++ b/src/plan/global.rs @@ -419,12 +419,13 @@ pub struct BasePlan { /// A VM space is a space allocated and populated by the VM. Currently it is used by JikesRVM /// for boot image. /// - /// If VM space is present, it has some special interaction with the - /// `memory_manager::is_mmtk_object` and the `memory_manager::is_in_mmtk_spaces` functions. + /// If VM space is present, it has some special interaction with the VO-bit metadata and the + /// `memory_manager::is_in_mmtk_spaces` functions. /// - /// - The `is_mmtk_object` funciton requires the alloc_bit side metadata to identify objects, - /// but currently we do not require the boot image to provide it, so it will not work if the - /// address argument is in the VM space. + /// - Currently we do not require the boot image to provide a bitmap to be used as the VO-bit + /// metadata, so features that require the VO-bit will not work. Conservative GC will not + /// be able to identify if an address is a valid object reference if it points into the VM + /// space. /// /// - The `is_in_mmtk_spaces` currently returns `true` if the given object reference is in /// the VM space. diff --git a/src/plan/markcompact/global.rs b/src/plan/markcompact/global.rs index 4195f4ae75..27dde6f1b4 100644 --- a/src/plan/markcompact/global.rs +++ b/src/plan/markcompact/global.rs @@ -15,14 +15,14 @@ use crate::policy::space::Space; use crate::scheduler::gc_work::*; use crate::scheduler::*; use crate::util::alloc::allocators::AllocatorSelector; -#[cfg(not(feature = "global_alloc_bit"))] -use crate::util::alloc_bit::ALLOC_SIDE_METADATA_SPEC; use crate::util::copy::CopySemantics; use crate::util::heap::layout::heap_layout::Mmapper; use crate::util::heap::layout::heap_layout::VMMap; use crate::util::heap::HeapMeta; use crate::util::heap::VMRequest; use crate::util::metadata::side_metadata::{SideMetadataContext, SideMetadataSanity}; +#[cfg(not(feature = "vo_bit"))] +use crate::util::metadata::vo_bit::VO_BIT_SIDE_METADATA_SPEC; use crate::util::opaque_pointer::*; use crate::util::options::Options; use crate::vm::VMBinding; @@ -179,15 +179,15 @@ impl Plan for MarkCompact { impl MarkCompact { pub fn new(vm_map: &'static VMMap, mmapper: &'static Mmapper, options: Arc) -> Self { let mut heap = HeapMeta::new(&options); - // if global_alloc_bit is enabled, ALLOC_SIDE_METADATA_SPEC will be added to + // if vo_bit is enabled, VO_BIT_SIDE_METADATA_SPEC will be added to // SideMetadataContext by default, so we don't need to add it here. - #[cfg(feature = "global_alloc_bit")] + #[cfg(feature = "vo_bit")] let global_metadata_specs = SideMetadataContext::new_global_specs(&[]); - // if global_alloc_bit is NOT enabled, - // we need to add ALLOC_SIDE_METADATA_SPEC to SideMetadataContext here. - #[cfg(not(feature = "global_alloc_bit"))] + // if vo_bit is NOT enabled, + // we need to add VO_BIT_SIDE_METADATA_SPEC to SideMetadataContext here. + #[cfg(not(feature = "vo_bit"))] let global_metadata_specs = - SideMetadataContext::new_global_specs(&[ALLOC_SIDE_METADATA_SPEC]); + SideMetadataContext::new_global_specs(&[VO_BIT_SIDE_METADATA_SPEC]); let mc_space = MarkCompactSpace::new( "mark_compact_space", diff --git a/src/plan/marksweep/global.rs b/src/plan/marksweep/global.rs index 8029a13b79..922c43c66b 100644 --- a/src/plan/marksweep/global.rs +++ b/src/plan/marksweep/global.rs @@ -11,12 +11,12 @@ use crate::policy::mallocspace::MallocSpace; use crate::policy::space::Space; use crate::scheduler::*; use crate::util::alloc::allocators::AllocatorSelector; -#[cfg(not(feature = "global_alloc_bit"))] -use crate::util::alloc_bit::ALLOC_SIDE_METADATA_SPEC; use crate::util::heap::layout::heap_layout::Mmapper; use crate::util::heap::layout::heap_layout::VMMap; use crate::util::heap::HeapMeta; use crate::util::metadata::side_metadata::{SideMetadataContext, SideMetadataSanity}; +#[cfg(not(feature = "vo_bit"))] +use crate::util::metadata::vo_bit::VO_BIT_SIDE_METADATA_SPEC; use crate::util::options::Options; use crate::util::VMWorkerThread; use crate::vm::VMBinding; @@ -97,16 +97,16 @@ impl Plan for MarkSweep { impl MarkSweep { pub fn new(vm_map: &'static VMMap, mmapper: &'static Mmapper, options: Arc) -> Self { let heap = HeapMeta::new(&options); - // if global_alloc_bit is enabled, ALLOC_SIDE_METADATA_SPEC will be added to + // if vo_bit is enabled, VO_BIT_SIDE_METADATA_SPEC will be added to // SideMetadataContext by default, so we don't need to add it here. - #[cfg(feature = "global_alloc_bit")] + #[cfg(feature = "vo_bit")] let global_metadata_specs = SideMetadataContext::new_global_specs(&[ACTIVE_CHUNK_METADATA_SPEC]); - // if global_alloc_bit is NOT enabled, - // we need to add ALLOC_SIDE_METADATA_SPEC to SideMetadataContext here. - #[cfg(not(feature = "global_alloc_bit"))] + // if vo_bit is NOT enabled, + // we need to add VO_BIT_SIDE_METADATA_SPEC to SideMetadataContext here. + #[cfg(not(feature = "vo_bit"))] let global_metadata_specs = SideMetadataContext::new_global_specs(&[ - ALLOC_SIDE_METADATA_SPEC, + VO_BIT_SIDE_METADATA_SPEC, ACTIVE_CHUNK_METADATA_SPEC, ]); diff --git a/src/policy/copyspace.rs b/src/policy/copyspace.rs index 34368e01f7..e43617dc98 100644 --- a/src/policy/copyspace.rs +++ b/src/policy/copyspace.rs @@ -7,7 +7,7 @@ use crate::policy::space::{CommonSpace, Space}; use crate::scheduler::GCWorker; use crate::util::copy::*; use crate::util::heap::layout::heap_layout::{Mmapper, VMMap}; -#[cfg(feature = "global_alloc_bit")] +#[cfg(feature = "vo_bit")] use crate::util::heap::layout::vm_layout_constants::BYTES_IN_CHUNK; use crate::util::heap::HeapMeta; use crate::util::heap::VMRequest; @@ -46,8 +46,8 @@ impl SFT for CopySpace { } fn initialize_object_metadata(&self, _object: ObjectReference, _alloc: bool) { - #[cfg(feature = "global_alloc_bit")] - crate::util::alloc_bit::set_alloc_bit(_object); + #[cfg(feature = "vo_bit")] + crate::util::metadata::vo_bit::set_vo_bit(_object); } #[inline(always)] @@ -182,21 +182,21 @@ impl CopySpace { pub fn release(&self) { unsafe { - #[cfg(feature = "global_alloc_bit")] - self.reset_alloc_bit(); + #[cfg(feature = "vo_bit")] + self.reset_vo_bit(); self.pr.reset(); } self.common.metadata.reset(); self.from_space.store(false, Ordering::SeqCst); } - #[cfg(feature = "global_alloc_bit")] - unsafe fn reset_alloc_bit(&self) { + #[cfg(feature = "vo_bit")] + unsafe fn reset_vo_bit(&self) { let current_chunk = self.pr.get_current_chunk(); if self.common.contiguous { - // If we have allocated something into this space, we need to clear its alloc bit. + // If we have allocated something into this space, we need to clear its VO-bit. if current_chunk != self.common.start { - crate::util::alloc_bit::bzero_alloc_bit( + crate::util::metadata::vo_bit::bzero_vo_bit( self.common.start, current_chunk + BYTES_IN_CHUNK - self.common.start, ); @@ -229,10 +229,10 @@ impl CopySpace { // This object is in from space, we will copy. Make sure we have a valid copy semantic. debug_assert!(semantics.is_some()); - #[cfg(feature = "global_alloc_bit")] + #[cfg(feature = "vo_bit")] debug_assert!( - crate::util::alloc_bit::is_alloced(object), - "{:x}: alloc bit not set", + crate::util::metadata::vo_bit::is_vo_bit_set(object), + "{:x}: VO-bit not set", object ); diff --git a/src/policy/immix/block.rs b/src/policy/immix/block.rs index 930568ed39..f79df0f25d 100644 --- a/src/policy/immix/block.rs +++ b/src/policy/immix/block.rs @@ -183,8 +183,10 @@ impl Block { /// Deinitalize a block before releasing. #[inline] pub fn deinit(&self) { - #[cfg(feature = "global_alloc_bit")] - crate::util::alloc_bit::bzero_alloc_bit(self.start(), Self::BYTES); + #[cfg(feature = "vo_bit")] + unsafe { + crate::util::metadata::vo_bit::bzero_vo_bit(self.start(), Self::BYTES); + } self.set_state(BlockState::Unallocated); } diff --git a/src/policy/immix/immixspace.rs b/src/policy/immix/immixspace.rs index f625275312..489e91d7c3 100644 --- a/src/policy/immix/immixspace.rs +++ b/src/policy/immix/immixspace.rs @@ -76,8 +76,8 @@ impl SFT for ImmixSpace { true } fn initialize_object_metadata(&self, _object: ObjectReference, _alloc: bool) { - #[cfg(feature = "global_alloc_bit")] - crate::util::alloc_bit::set_alloc_bit(_object); + #[cfg(feature = "vo_bit")] + crate::util::metadata::vo_bit::set_vo_bit(_object); } #[inline(always)] fn sft_trace_object( @@ -388,10 +388,10 @@ impl ImmixSpace { semantics: CopySemantics, worker: &mut GCWorker, ) -> ObjectReference { - #[cfg(feature = "global_alloc_bit")] + #[cfg(feature = "vo_bit")] debug_assert!( - crate::util::alloc_bit::is_alloced(object), - "{:x}: alloc bit not set", + crate::util::metadata::vo_bit::is_vo_bit_set(object), + "{:x}: VO-bit not set", object ); if Block::containing::(object).is_defrag_source() { @@ -482,8 +482,8 @@ impl ImmixSpace { Block::containing::(object).set_state(BlockState::Marked); object } else { - #[cfg(feature = "global_alloc_bit")] - crate::util::alloc_bit::unset_alloc_bit(object); + #[cfg(feature = "vo_bit")] + crate::util::metadata::vo_bit::unset_vo_bit(object); ForwardingWord::forward_object::(object, semantics, copy_context) }; debug_assert_eq!( diff --git a/src/policy/immortalspace.rs b/src/policy/immortalspace.rs index 68d699b295..378c4e6398 100644 --- a/src/policy/immortalspace.rs +++ b/src/policy/immortalspace.rs @@ -69,8 +69,8 @@ impl SFT for ImmortalSpace { if self.common.needs_log_bit { VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.mark_as_unlogged::(object, Ordering::SeqCst); } - #[cfg(feature = "global_alloc_bit")] - crate::util::alloc_bit::set_alloc_bit(object); + #[cfg(feature = "vo_bit")] + crate::util::metadata::vo_bit::set_vo_bit(object); } #[inline(always)] fn sft_trace_object( @@ -207,10 +207,10 @@ impl ImmortalSpace { queue: &mut Q, object: ObjectReference, ) -> ObjectReference { - #[cfg(feature = "global_alloc_bit")] + #[cfg(feature = "vo_bit")] debug_assert!( - crate::util::alloc_bit::is_alloced(object), - "{:x}: alloc bit not set", + crate::util::metadata::vo_bit::is_vo_bit_set(object), + "{:x}: VO-bit not set", object ); if ImmortalSpace::::test_and_mark(object, self.mark_state) { diff --git a/src/policy/largeobjectspace.rs b/src/policy/largeobjectspace.rs index cbb7d9a76a..62af8fe5d4 100644 --- a/src/policy/largeobjectspace.rs +++ b/src/policy/largeobjectspace.rs @@ -72,8 +72,8 @@ impl SFT for LargeObjectSpace { VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.mark_as_unlogged::(object, Ordering::SeqCst); } - #[cfg(feature = "global_alloc_bit")] - crate::util::alloc_bit::set_alloc_bit(object); + #[cfg(feature = "vo_bit")] + crate::util::metadata::vo_bit::set_vo_bit(object); let cell = VM::VMObjectModel::object_start_ref(object); self.treadmill.add_to_treadmill(cell, alloc); } @@ -203,10 +203,10 @@ impl LargeObjectSpace { queue: &mut Q, object: ObjectReference, ) -> ObjectReference { - #[cfg(feature = "global_alloc_bit")] + #[cfg(feature = "vo_bit")] debug_assert!( - crate::util::alloc_bit::is_alloced(object), - "{:x}: alloc bit not set", + crate::util::metadata::vo_bit::is_vo_bit_set(object), + "{:x}: VO-bit not set", object ); let nursery_object = self.is_in_nursery(object); @@ -231,18 +231,22 @@ impl LargeObjectSpace { // FIXME: borrow checker fighting // didn't call self.release_multiple_pages // so the compiler knows I'm borrowing two different fields + + // FIXME: Use local metadata instead of VO-bit. + // The `unsafe { cell.to_object_reference() }` below indicates that VO-bit is not what we + // want here. VO-bit is set at ObjectReference, which is different from the cell address. if sweep_nursery { for cell in self.treadmill.collect_nursery() { // println!("- cn {}", cell); - #[cfg(feature = "global_alloc_bit")] - crate::util::alloc_bit::unset_addr_alloc_bit(cell); + #[cfg(feature = "vo_bit")] + crate::util::metadata::vo_bit::unset_vo_bit(unsafe { cell.to_object_reference() }); self.pr.release_pages(get_super_page(cell)); } } else { for cell in self.treadmill.collect() { // println!("- ts {}", cell); - #[cfg(feature = "global_alloc_bit")] - crate::util::alloc_bit::unset_addr_alloc_bit(cell); + #[cfg(feature = "vo_bit")] + crate::util::metadata::vo_bit::unset_vo_bit(unsafe { cell.to_object_reference() }); self.pr.release_pages(get_super_page(cell)); } } diff --git a/src/policy/lockfreeimmortalspace.rs b/src/policy/lockfreeimmortalspace.rs index c836b32982..e8c462c1de 100644 --- a/src/policy/lockfreeimmortalspace.rs +++ b/src/policy/lockfreeimmortalspace.rs @@ -58,8 +58,8 @@ impl SFT for LockFreeImmortalSpace { unimplemented!() } fn initialize_object_metadata(&self, _object: ObjectReference, _alloc: bool) { - #[cfg(feature = "global_alloc_bit")] - crate::util::alloc_bit::set_alloc_bit(_object); + #[cfg(feature = "vo_bit")] + crate::util::metadata::vo_bit::set_vo_bit(_object); } fn sft_trace_object( &self, diff --git a/src/policy/mallocspace/global.rs b/src/policy/mallocspace/global.rs index 1a9c248091..79f0f47b0b 100644 --- a/src/policy/mallocspace/global.rs +++ b/src/policy/mallocspace/global.rs @@ -78,13 +78,13 @@ impl SFT for MallocSpace { } /// For malloc space, we just use the side metadata. - #[cfg(feature = "is_mmtk_object")] + #[cfg(feature = "vo_bit")] #[inline(always)] - fn is_mmtk_object(&self, addr: Address) -> bool { - debug_assert!(!addr.is_zero()); + fn is_valid_mmtk_object(&self, object: ObjectReference) -> bool { + debug_assert!(!object.is_null()); // `addr` cannot be mapped by us. It should be mapped by the malloc library. - debug_assert!(!addr.is_mapped()); - has_object_alloced_by_malloc(addr) + debug_assert!(!object.to_address().is_mapped()); + is_alloced_by_malloc(object) } fn initialize_object_metadata(&self, object: ObjectReference, _alloc: bool) { @@ -482,23 +482,27 @@ impl MallocSpace { let chunk_end = chunk_start + BYTES_IN_CHUNK; debug_assert!( - crate::util::alloc_bit::ALLOC_SIDE_METADATA_SPEC.log_bytes_in_region + crate::util::metadata::vo_bit::VO_BIT_SIDE_METADATA_SPEC.log_bytes_in_region == mark_bit_spec.log_bytes_in_region, "Alloc-bit and mark-bit metadata have different minimum object sizes!" ); // For bulk xor'ing 128-bit vectors on architectures with vector instructions // Each bit represents an object of LOG_MIN_OBJ_SIZE size - let bulk_load_size: usize = - 128 * (1 << crate::util::alloc_bit::ALLOC_SIDE_METADATA_SPEC.log_bytes_in_region); + let bulk_load_size: usize = 128 + * (1 << crate::util::metadata::vo_bit::VO_BIT_SIDE_METADATA_SPEC.log_bytes_in_region); // The start of a possibly empty page. This will be updated during the sweeping, and always points to the next page of last live objects. let mut empty_page_start = Address::ZERO; // Scan the chunk by every 'bulk_load_size' region. while address < chunk_end { - let alloc_128: u128 = - unsafe { load128(&crate::util::alloc_bit::ALLOC_SIDE_METADATA_SPEC, address) }; + let alloc_128: u128 = unsafe { + load128( + &crate::util::metadata::vo_bit::VO_BIT_SIDE_METADATA_SPEC, + address, + ) + }; let mark_128: u128 = unsafe { load128(&mark_bit_spec, address) }; // Check if there are dead objects in the bulk loaded region diff --git a/src/policy/mallocspace/metadata.rs b/src/policy/mallocspace/metadata.rs index 07c4b8e439..d3dcc75e58 100644 --- a/src/policy/mallocspace/metadata.rs +++ b/src/policy/mallocspace/metadata.rs @@ -1,9 +1,9 @@ -use crate::util::alloc_bit; use crate::util::conversions; use crate::util::heap::layout::vm_layout_constants::BYTES_IN_CHUNK; use crate::util::metadata::side_metadata; use crate::util::metadata::side_metadata::SideMetadataContext; use crate::util::metadata::side_metadata::SideMetadataSpec; +use crate::util::metadata::vo_bit; use crate::util::Address; use crate::util::ObjectReference; use crate::vm::{ObjectModel, VMBinding}; @@ -152,17 +152,15 @@ pub fn map_meta_space(metadata: &SideMetadataContext, addr: Address, size: usize } } -/// Check if a given object was allocated by malloc -pub fn is_alloced_by_malloc(object: ObjectReference) -> bool { - has_object_alloced_by_malloc(object.to_address()) -} - -/// Check if there is an object allocated by malloc at the address. +/// Check if there is an object allocated by malloc at the address of `object`. /// /// This function doesn't check if `addr` is aligned. /// If not, it will try to load the alloc bit for the address rounded down to the metadata's granularity. -pub fn has_object_alloced_by_malloc(addr: Address) -> bool { - is_meta_space_mapped_for_address(addr) && alloc_bit::is_alloced_object(addr) +pub fn is_alloced_by_malloc(object: ObjectReference) -> bool { + // FIXME: MallocSpace should use a local metadata to record allocated units. + // The VO-bit can be cleared explicitly by the VM. If we don't introduce another metadata, + // MallocSpace will not be able to free objects. + is_meta_space_mapped_for_address(object.to_address()) && vo_bit::is_vo_bit_set(object) } pub fn is_marked(object: ObjectReference, ordering: Ordering) -> bool { @@ -205,7 +203,8 @@ pub unsafe fn is_chunk_marked_unsafe(chunk_start: Address) -> bool { } pub fn set_alloc_bit(object: ObjectReference) { - alloc_bit::set_alloc_bit(object); + // FIXME: MallocSpace should use a local metadata to record allocated units. + vo_bit::set_vo_bit(object); } pub fn set_mark_bit(object: ObjectReference, ordering: Ordering) { @@ -214,7 +213,8 @@ pub fn set_mark_bit(object: ObjectReference, ordering: Ordering) #[allow(unused)] pub fn unset_alloc_bit(object: ObjectReference) { - alloc_bit::unset_alloc_bit(object); + // FIXME: MallocSpace should use a local metadata to record allocated units. + vo_bit::unset_vo_bit(object); } pub(super) fn set_page_mark(page_addr: Address) { @@ -238,7 +238,8 @@ pub(super) unsafe fn unset_offset_malloc_bit_unsafe(address: Address) { } pub unsafe fn unset_alloc_bit_unsafe(object: ObjectReference) { - alloc_bit::unset_alloc_bit_unsafe(object); + // FIXME: MallocSpace should use a local metadata to record allocated units. + vo_bit::unset_vo_bit_unsafe(object); } #[allow(unused)] diff --git a/src/policy/markcompactspace.rs b/src/policy/markcompactspace.rs index a6f3da1dd7..87c2194c5f 100644 --- a/src/policy/markcompactspace.rs +++ b/src/policy/markcompactspace.rs @@ -11,7 +11,8 @@ use crate::util::heap::layout::heap_layout::{Mmapper, VMMap}; use crate::util::heap::{HeapMeta, MonotonePageResource, PageResource, VMRequest}; use crate::util::metadata::extract_side_metadata; use crate::util::metadata::side_metadata::{SideMetadataContext, SideMetadataSpec}; -use crate::util::{alloc_bit, Address, ObjectReference}; +use crate::util::metadata::vo_bit; +use crate::util::{Address, ObjectReference}; use crate::{vm::*, ObjectQueue}; use atomic::Ordering; @@ -57,7 +58,7 @@ impl SFT for MarkCompactSpace { } fn initialize_object_metadata(&self, object: ObjectReference, _alloc: bool) { - crate::util::alloc_bit::set_alloc_bit(object); + crate::util::metadata::vo_bit::set_vo_bit(object); } #[cfg(feature = "sanity")] @@ -231,8 +232,8 @@ impl MarkCompactSpace { object: ObjectReference, ) -> ObjectReference { debug_assert!( - crate::util::alloc_bit::is_alloced(object), - "{:x}: alloc bit not set", + crate::util::metadata::vo_bit::is_vo_bit_set(object), + "{:x}: VO-bit not set", object ); if MarkCompactSpace::::test_and_mark(object) { @@ -247,8 +248,8 @@ impl MarkCompactSpace { object: ObjectReference, ) -> ObjectReference { debug_assert!( - crate::util::alloc_bit::is_alloced(object), - "{:x}: alloc bit not set", + crate::util::metadata::vo_bit::is_vo_bit_set(object), + "{:x}: VO-bit not set", object ); // from this stage and onwards, mark bit is no longer needed @@ -376,8 +377,9 @@ impl MarkCompactSpace { start, end, ); for obj in linear_scan { - // clear the alloc bit - alloc_bit::unset_addr_alloc_bit(obj.to_address()); + // clear the VO-bit + // FIXME: MarkCompact should use a local metadata to record allocated units. + vo_bit::unset_vo_bit(obj); let forwarding_pointer = Self::get_header_forwarding_pointer(obj); @@ -390,8 +392,9 @@ impl MarkCompactSpace { // copy object trace!(" copy from {} to {}", obj, new_object); let end_of_new_object = VM::VMObjectModel::copy_to(obj, new_object, Address::ZERO); - // update alloc_bit, - alloc_bit::set_alloc_bit(new_object); + // update VO-bit + // FIXME: MarkCompact should use a local metadata to record allocated units. + vo_bit::set_vo_bit(new_object); to = new_object.to_address() + copied_size; debug_assert_eq!(end_of_new_object, to); } diff --git a/src/policy/sft.rs b/src/policy/sft.rs index 7036d34f16..8f233e7bf1 100644 --- a/src/policy/sft.rs +++ b/src/policy/sft.rs @@ -1,8 +1,8 @@ use crate::plan::VectorObjectQueue; use crate::scheduler::GCWorker; -#[cfg(feature = "is_mmtk_object")] -use crate::util::alloc_bit; use crate::util::conversions; +#[cfg(feature = "vo_bit")] +use crate::util::metadata::vo_bit; use crate::util::*; use crate::vm::VMBinding; use std::marker::PhantomData; @@ -67,18 +67,18 @@ pub trait SFT { /// This default implementation works for all spaces that use MMTk's mapper to allocate memory. /// Some spaces, like `MallocSpace`, use third-party libraries to allocate memory. /// Such spaces needs to override this method. - #[cfg(feature = "is_mmtk_object")] + #[cfg(feature = "vo_bit")] #[inline(always)] - fn is_mmtk_object(&self, addr: Address) -> bool { - // Having found the SFT means the `addr` is in one of our spaces. + fn is_valid_mmtk_object(&self, object: ObjectReference) -> bool { + // Having found the SFT means the `object` is in one of our spaces. // Although the SFT map is allocated eagerly when the space is contiguous, // the pages of the space itself are acquired on demand. - // Therefore, the page of `addr` may not have been mapped, yet. - if !addr.is_mapped() { + // Therefore, the page of `object` may not have been mapped, yet. + if !object.to_address().is_mapped() { return false; } - // The `addr` is mapped. We use the global alloc bit to get the exact answer. - alloc_bit::is_alloced_object(addr) + // The `addr` is mapped. We use the VO-bit to get the exact answer. + vo_bit::is_vo_bit_set(object) } /// Initialize object metadata (in the header, or in the side metadata). @@ -145,9 +145,9 @@ impl SFT for EmptySpaceSFT { fn is_in_space(&self, _object: ObjectReference) -> bool { false } - #[cfg(feature = "is_mmtk_object")] + #[cfg(feature = "vo_bit")] #[inline(always)] - fn is_mmtk_object(&self, _addr: Address) -> bool { + fn is_valid_mmtk_object(&self, _object: ObjectReference) -> bool { false } diff --git a/src/util/alloc/immix_allocator.rs b/src/util/alloc/immix_allocator.rs index d73c0143ef..162c29f040 100644 --- a/src/util/alloc/immix_allocator.rs +++ b/src/util/alloc/immix_allocator.rs @@ -257,8 +257,13 @@ impl ImmixAllocator { end_line, self.tls ); - #[cfg(feature = "global_alloc_bit")] - crate::util::alloc_bit::bzero_alloc_bit(self.cursor, self.limit - self.cursor); + #[cfg(feature = "vo_bit")] + unsafe { + crate::util::metadata::vo_bit::bzero_vo_bit( + self.cursor, + self.limit - self.cursor, + ); + } crate::util::memory::zero(self.cursor, self.limit - self.cursor); debug_assert!( align_allocation_no_fill::(self.cursor, align, offset) + size <= self.limit diff --git a/src/util/alloc_bit.rs b/src/util/alloc_bit.rs deleted file mode 100644 index e9bb9f93ca..0000000000 --- a/src/util/alloc_bit.rs +++ /dev/null @@ -1,70 +0,0 @@ -use atomic::Ordering; - -use crate::util::heap::layout::vm_layout_constants::BYTES_IN_CHUNK; -use crate::util::metadata::side_metadata::SideMetadataContext; -use crate::util::metadata::side_metadata::SideMetadataSpec; -use crate::util::Address; -use crate::util::ObjectReference; - -/// An alloc-bit is required per min-object-size aligned address , rather than per object, and can only exist as side metadata. -pub(crate) const ALLOC_SIDE_METADATA_SPEC: SideMetadataSpec = - crate::util::metadata::side_metadata::spec_defs::ALLOC_BIT; - -pub const ALLOC_SIDE_METADATA_ADDR: Address = ALLOC_SIDE_METADATA_SPEC.get_absolute_offset(); - -pub fn map_meta_space_for_chunk(metadata: &SideMetadataContext, chunk_start: Address) { - let mmap_metadata_result = metadata.try_map_metadata_space(chunk_start, BYTES_IN_CHUNK); - debug_assert!( - mmap_metadata_result.is_ok(), - "mmap sidemetadata failed for chunk_start ({})", - chunk_start - ); -} - -pub fn set_alloc_bit(object: ObjectReference) { - debug_assert!(!is_alloced(object), "{:x}: alloc bit already set", object); - ALLOC_SIDE_METADATA_SPEC.store_atomic::(object.to_address(), 1, Ordering::SeqCst); -} - -pub fn unset_addr_alloc_bit(address: Address) { - debug_assert!( - is_alloced_object(address), - "{:x}: alloc bit not set", - address - ); - ALLOC_SIDE_METADATA_SPEC.store_atomic::(address, 0, Ordering::SeqCst); -} - -pub fn unset_alloc_bit(object: ObjectReference) { - debug_assert!(is_alloced(object), "{:x}: alloc bit not set", object); - ALLOC_SIDE_METADATA_SPEC.store_atomic::(object.to_address(), 0, Ordering::SeqCst); -} - -/// # Safety -/// -/// This is unsafe: check the comment on `side_metadata::store` -/// -pub unsafe fn unset_alloc_bit_unsafe(object: ObjectReference) { - debug_assert!(is_alloced(object), "{:x}: alloc bit not set", object); - ALLOC_SIDE_METADATA_SPEC.store::(object.to_address(), 0); -} - -pub fn is_alloced(object: ObjectReference) -> bool { - is_alloced_object(object.to_address()) -} - -pub fn is_alloced_object(address: Address) -> bool { - ALLOC_SIDE_METADATA_SPEC.load_atomic::(address, Ordering::SeqCst) == 1 -} - -/// # Safety -/// -/// This is unsafe: check the comment on `side_metadata::load` -/// -pub unsafe fn is_alloced_object_unsafe(address: Address) -> bool { - ALLOC_SIDE_METADATA_SPEC.load::(address) == 1 -} - -pub fn bzero_alloc_bit(start: Address, size: usize) { - ALLOC_SIDE_METADATA_SPEC.bzero_metadata(start, size); -} diff --git a/src/util/is_mmtk_object.rs b/src/util/is_mmtk_object.rs deleted file mode 100644 index 85b8d4315f..0000000000 --- a/src/util/is_mmtk_object.rs +++ /dev/null @@ -1,4 +0,0 @@ -/// The region size (in bytes) of the `ALLOC_BIT` side metadata. -/// The VM can use this to check if an object is properly aligned. -pub const ALLOC_BIT_REGION_SIZE: usize = - 1usize << crate::util::alloc_bit::ALLOC_SIDE_METADATA_SPEC.log_bytes_in_region; diff --git a/src/util/linear_scan.rs b/src/util/linear_scan.rs index 3212c2efef..33757e28a4 100644 --- a/src/util/linear_scan.rs +++ b/src/util/linear_scan.rs @@ -1,4 +1,4 @@ -use crate::util::alloc_bit; +use crate::util::metadata::vo_bit; use crate::util::Address; use crate::util::ObjectReference; use crate::vm::ObjectModel; @@ -41,9 +41,11 @@ impl fn next(&mut self) -> Option<::Item> { while self.cursor < self.end { let is_object = if ATOMIC_LOAD_ALLOC_BIT { - alloc_bit::is_alloced_object(self.cursor) + // FIXME: Any space that supports linear scan (such as MarkCompact space) should + // introduce its own allocation bit rather than using VO-bit. + vo_bit::is_vo_bit_set(unsafe { self.cursor.to_object_reference() }) } else { - unsafe { alloc_bit::is_alloced_object_unsafe(self.cursor) } + unsafe { vo_bit::is_vo_bit_set_unsafe(self.cursor.to_object_reference()) } }; if is_object { diff --git a/src/util/metadata/mod.rs b/src/util/metadata/mod.rs index 23d1150885..2ff48a237a 100644 --- a/src/util/metadata/mod.rs +++ b/src/util/metadata/mod.rs @@ -2,7 +2,7 @@ //! //! This module is designed to enable the implementation of a wide range of GC algorithms for VMs with various combinations of in-object and on-side space for GC-specific metadata (e.g. forwarding bits, marking bit, logging bit, etc.). //! -//! The new metadata design differentiates per-object metadata (e.g. forwarding-bits and marking-bit) from other types of metadata including per-address (e.g. alloc-bit) and per-X (where X != object size), because the per-object metadata can optionally be kept in the object headers. +//! The new metadata design differentiates per-object metadata (e.g. forwarding-bits and marking-bit) from other types of metadata including per-address and per-X (where X != object size), because the per-object metadata can optionally be kept in the object headers. //! //! MMTk acknowledges the VM-dependant nature of the in-object metadata, and asks the VM bindings to contribute by implementing the related parts in the ['ObjectModel'](crate::vm::ObjectModel). //! @@ -226,4 +226,8 @@ pub use metadata_val_traits::*; pub(crate) mod log_bit; +// FIXME: vo_bit should only be enabled if the "vo_bit" feature is on. +// Currently it is used in MallocSpace and MarkCompactSpace. +pub mod vo_bit; + pub use global::*; diff --git a/src/util/metadata/side_metadata/constants.rs b/src/util/metadata/side_metadata/constants.rs index 6e660e52b7..e0b9a099be 100644 --- a/src/util/metadata/side_metadata/constants.rs +++ b/src/util/metadata/side_metadata/constants.rs @@ -11,7 +11,7 @@ use crate::util::Address; // reserved addresses such as 0x0. // XXXX: I updated the base address for 32 bit to 0x1000_0000. For what I tested on, the library // and the malloc heap often starts at 0x800_0000. If we start the metadata from the second 4Mb chunk (i.e. the chunk `[0x40_0000, 0x80_0000)`), -// we won't be guaranteed enough space before 0x800_0000. For example, the alloc bit is 1 bit per 4 bytes +// we won't be guaranteed enough space before 0x800_0000. For example, the VO-bit is 1 bit per 4 bytes // (1 word in 32bits), and it will take the address range of [0x40_000, 0x840_0000) which clashes with // the library/heap. So I move this to 0x1000_0000. // This is made public, as VM bingdings may need to use this. @@ -29,8 +29,9 @@ pub const GLOBAL_SIDE_METADATA_BASE_ADDRESS: Address = pub(crate) const GLOBAL_SIDE_METADATA_BASE_OFFSET: SideMetadataOffset = SideMetadataOffset::addr(GLOBAL_SIDE_METADATA_BASE_ADDRESS); -// Base address of alloc bit, public to VM bindings which may need to use this. -pub const ALLOC_SIDE_METADATA_ADDR: Address = crate::util::alloc_bit::ALLOC_SIDE_METADATA_ADDR; +// Base address of VO-bit, public to VM bindings which may need to use this. +pub const VO_BIT_SIDE_METADATA_ADDR: Address = + crate::util::metadata::vo_bit::VO_BIT_SIDE_METADATA_ADDR; /// This constant represents the worst-case ratio of source data size to global side metadata. /// A value of 2 means the space required for global side metadata must be less than 1/4th of the source data. diff --git a/src/util/metadata/side_metadata/global.rs b/src/util/metadata/side_metadata/global.rs index fca15b738d..2512c2d6b3 100644 --- a/src/util/metadata/side_metadata/global.rs +++ b/src/util/metadata/side_metadata/global.rs @@ -1,10 +1,10 @@ use super::*; -#[cfg(feature = "global_alloc_bit")] -use crate::util::alloc_bit::ALLOC_SIDE_METADATA_SPEC; use crate::util::constants::{BYTES_IN_PAGE, LOG_BITS_IN_BYTE}; use crate::util::heap::layout::vm_layout_constants::BYTES_IN_CHUNK; use crate::util::memory; use crate::util::metadata::metadata_val_traits::*; +#[cfg(feature = "vo_bit")] +use crate::util::metadata::vo_bit::VO_BIT_SIDE_METADATA_SPEC; use crate::util::Address; use num_traits::FromPrimitive; use std::fmt; @@ -158,7 +158,7 @@ impl SideMetadataSpec { let _lock = sanity::SANITY_LOCK.lock().unwrap(); // yiluowei: Not Sure but this assertion seems too strict for Immix recycled lines - #[cfg(not(feature = "global_alloc_bit"))] + #[cfg(not(feature = "vo_bit"))] debug_assert!(start.is_aligned_to(BYTES_IN_PAGE) && meta_byte_lshift(self, start) == 0); #[cfg(feature = "extreme_assertions")] @@ -713,8 +713,8 @@ impl SideMetadataContext { pub fn new_global_specs(specs: &[SideMetadataSpec]) -> Vec { let mut ret = vec![]; - #[cfg(feature = "global_alloc_bit")] - ret.push(ALLOC_SIDE_METADATA_SPEC); + #[cfg(feature = "vo_bit")] + ret.push(VO_BIT_SIDE_METADATA_SPEC); { use crate::policy::sft_map::SFTMap; diff --git a/src/util/metadata/side_metadata/spec_defs.rs b/src/util/metadata/side_metadata/spec_defs.rs index 52955fe4db..bc586569ad 100644 --- a/src/util/metadata/side_metadata/spec_defs.rs +++ b/src/util/metadata/side_metadata/spec_defs.rs @@ -54,8 +54,8 @@ macro_rules! define_side_metadata_specs { // This defines all GLOBAL side metadata used by mmtk-core. define_side_metadata_specs!( last_spec_as LAST_GLOBAL_SIDE_METADATA_SPEC, - // Mark the start of an object - ALLOC_BIT = (global: true, log_num_of_bits: 0, log_bytes_in_region: LOG_MIN_OBJECT_SIZE as usize), + // Valid object bit. + VO_BIT = (global: true, log_num_of_bits: 0, log_bytes_in_region: LOG_MIN_OBJECT_SIZE as usize), // Track chunks used by (malloc) marksweep MS_ACTIVE_CHUNK = (global: true, log_num_of_bits: 3, log_bytes_in_region: LOG_BYTES_IN_CHUNK as usize), // Track the index in SFT map for a chunk (only used for SFT sparse chunk map) diff --git a/src/util/metadata/vo_bit.rs b/src/util/metadata/vo_bit.rs new file mode 100644 index 0000000000..d3615c6633 --- /dev/null +++ b/src/util/metadata/vo_bit.rs @@ -0,0 +1,142 @@ +//! Valid-object bit (VO-bit) +//! +//! The valid-object bit (VO-bit) metadata is a one-bit-per-object side metadata. It is set for +//! every object at allocation time (more precisely, during `post_alloc`), and cleared when either +//! - the object is reclaimed by the GC, or +//! - the VM explicitly clears the VO-bit of the object (using the [`invalidate_object`] API). +//! +//! The main purpose of VO-bit is supporting conservative GC. It is the canonical source of +//! information about whether there is an object in the MMTk heap at any given address. +//! +//! The granularity of VO-bit is one bit per minimum object alignment. Each bit governs the +//! region of `lo <= addr < hi`, where +//! - `lo = addr.align_down(VO_BIT_REGION_SIZE)` +//! - `hi = lo + VO_BIT_REGION_SIZE` +//! - The constant [`VO_BIT_REGION_SIZE`] is size of the region (in bytes) each bit governs. +//! +//! Because of the granularity, the VO-bit metadata cannot tell *which* address in each region +//! has a valid object. Therefore, the VM **must check if an address is properly aligned** before +//! consulting the VO-bit metadata (by calling the [`is_valid_mmtk_object`] function). For most +//! VMs, the alignment requirement of object references is usually equal to [`VO_BIT_REGION_SIZE`], +//! so checking `object.to_address().is_aligned_to(VO_BIT_REGION_SIZE)` should usually work. +//! +//! This function is useful for conservative root scanning. The VM can iterate through all words +//! in a stack, filter out zeros, misaligned words, obviously out-of-range words (such as addresses +//! greater than `0x0000_7fff_ffff_ffff` on Linux on x86_64), and use this function to deside if +//! the word is really a reference. +//! +//! Note: This function has special behaviors if the VM space (enabled by the `vm_space` feature) +//! is present. See `crate::plan::global::BasePlan::vm_space`. +//! + +use atomic::Ordering; + +#[cfg(feature = "vo_bit")] +use crate::mmtk::SFT_MAP; +#[cfg(feature = "vo_bit")] +use crate::policy::sft_map::SFTMap; +use crate::util::metadata::side_metadata::SideMetadataSpec; +use crate::util::Address; +use crate::util::ObjectReference; + +/// A VO-bit is required per min-object-size aligned address, rather than per object, and can only exist as side metadata. +pub(crate) const VO_BIT_SIDE_METADATA_SPEC: SideMetadataSpec = + crate::util::metadata::side_metadata::spec_defs::VO_BIT; + +pub(crate) const VO_BIT_SIDE_METADATA_ADDR: Address = + VO_BIT_SIDE_METADATA_SPEC.get_absolute_offset(); + +/// The region size (in bytes) of the `VO_BIT` side metadata. +/// +/// Currently, it is set to the [minimum object size](crate::util::constants::MIN_OBJECT_SIZE), +/// which is currently defined as the [word size](crate::util::constants::BYTES_IN_WORD). +/// +/// The VM can use this to check if an object is properly aligned. +#[cfg(feature = "vo_bit")] // Eventually the entire `vo_bit` module will be guarded by this feature. +pub const VO_BIT_REGION_SIZE: usize = + 1usize << crate::util::metadata::vo_bit::VO_BIT_SIDE_METADATA_SPEC.log_bytes_in_region; + +/// Set the VO-bit of `object` atomically. +pub(crate) fn set_vo_bit(object: ObjectReference) { + debug_assert!(!is_vo_bit_set(object), "{:x}: VO-bit already set", object); + VO_BIT_SIDE_METADATA_SPEC.store_atomic::(object.to_address(), 1, Ordering::SeqCst); +} + +/// Unset the VO-bit of `object` atomically. +pub(crate) fn unset_vo_bit(object: ObjectReference) { + // Note: Both the VM and the GC are allowed to unset VO-bit. However, if the VM unsets the + // VO-bit first, that object will not be traced by the GC, and the GC will not try to clear its + // VO-bit again. So it is valid to assert the VO-bit must still be set when this function is + // called. + debug_assert!(is_vo_bit_set(object), "{:x}: VO-bit not set", object); + VO_BIT_SIDE_METADATA_SPEC.store_atomic::(object.to_address(), 0, Ordering::SeqCst); +} + +/// Check if the VO-bit of `object` is set atomically. +pub(crate) fn is_vo_bit_set(object: ObjectReference) -> bool { + VO_BIT_SIDE_METADATA_SPEC.load_atomic::(object.to_address(), Ordering::SeqCst) == 1 +} + +/// Unset the VO-bit of `object` non-atomically. +/// +/// # Safety +/// +/// It will be a data race if another thread concurrently accesses any bit in the/ same byte. +/// It should only be used when such a race is impossible. +pub(crate) unsafe fn unset_vo_bit_unsafe(object: ObjectReference) { + debug_assert!(is_vo_bit_set(object), "{:x}: VO-bit not set", object); + VO_BIT_SIDE_METADATA_SPEC.store::(object.to_address(), 0); +} + +/// Check if the VO-bit of `object` is set non-atomically. +/// +/// # Safety +/// +/// It will be a data race if another thread concurrently modifies any bit in the same byte. +/// It should only be used when such a race is impossible. +pub(crate) unsafe fn is_vo_bit_set_unsafe(object: ObjectReference) -> bool { + VO_BIT_SIDE_METADATA_SPEC.load::(object.to_address()) == 1 +} + +/// Unset all VO-bits for all objects in the region of `start <= addr < start + size`. +/// +/// # Safety +/// +/// It will be a data race if another thread concurrently accesses any bit in the region. +/// It should only be used when such a race is impossible. +pub(crate) unsafe fn bzero_vo_bit(start: Address, size: usize) { + VO_BIT_SIDE_METADATA_SPEC.bzero_metadata(start, size); +} + +/// Check if `object` is a reference to a valid MMTk object. +/// +/// This function returns true if the VO-bit is set for the address of `object`. +/// +/// The input parameter `object` can be converted from an arbitrary address. This function will +/// always return true or false, and will never panic. +/// +/// Due to the granularity of the VO-bit metadata (see [module-level documentation][self]), the +/// user must check the alignment of `object` before calling this function in order to get the +/// correct result. +/// +/// Argument: +/// * `object`: An ObjectReference converted from an arbitrary address +#[cfg(feature = "vo_bit")] // Eventually the entire `vo_bit` module will be guarded by this feature. +pub fn is_valid_mmtk_object(object: ObjectReference) -> bool { + SFT_MAP + .get_checked(object.to_address()) + .is_valid_mmtk_object(object) +} + +/// Invalidate an object. +/// +/// By calling this method, the GC will treat it as dead, and will not trace it or scan it in +/// subsequent GCs. It is useful for VMs that may destory per-object metadata (ususally during +/// finalization) so that any attempt to scan the object after that may result in crash. +/// +/// Argument: +/// * `object`: An object that is still valid. +#[cfg(feature = "vo_bit")] // Eventually the entire `vo_bit` module will be guarded by this feature. +pub fn invalidate_object(_object: ObjectReference) { + unimplemented!("GC algorithms need to be updated to skip invalid objects during tracing.") +} diff --git a/src/util/mod.rs b/src/util/mod.rs index 771e253ab4..5fc1e83e75 100644 --- a/src/util/mod.rs +++ b/src/util/mod.rs @@ -29,8 +29,6 @@ pub mod options; pub mod reference_processor; // The following modules are only public in the mmtk crate. They should only be used in MMTk core. -/// Alloc bit -pub(crate) mod alloc_bit; /// An analysis framework for collecting data and profiling in GC. #[cfg(feature = "analysis")] pub(crate) mod analysis; @@ -43,8 +41,6 @@ pub(crate) mod erase_vm; pub(crate) mod finalizable_processor; /// Heap implementation, including page resource, mmapper, etc. pub(crate) mod heap; -#[cfg(feature = "is_mmtk_object")] -pub mod is_mmtk_object; /// Logger initialization pub(crate) mod logger; /// Various malloc implementations (conditionally compiled by features) diff --git a/src/util/object_forwarding.rs b/src/util/object_forwarding.rs index 8b83830554..e3738bc79e 100644 --- a/src/util/object_forwarding.rs +++ b/src/util/object_forwarding.rs @@ -81,8 +81,8 @@ pub fn forward_object( copy_context: &mut GCWorkerCopyContext, ) -> ObjectReference { let new_object = VM::VMObjectModel::copy(object, semantics, copy_context); - #[cfg(feature = "global_alloc_bit")] - crate::util::alloc_bit::set_alloc_bit(new_object); + #[cfg(feature = "vo_bit")] + crate::util::metadata::vo_bit::set_vo_bit(new_object); if let Some(shift) = forwarding_bits_offset_in_forwarding_pointer::() { VM::VMObjectModel::LOCAL_FORWARDING_POINTER_SPEC.store_atomic::( object, diff --git a/vmbindings/dummyvm/Cargo.toml b/vmbindings/dummyvm/Cargo.toml index 20be91eded..a234cda568 100644 --- a/vmbindings/dummyvm/Cargo.toml +++ b/vmbindings/dummyvm/Cargo.toml @@ -21,6 +21,6 @@ atomic = "0.4.6" [features] default = [] -is_mmtk_object = ["mmtk/is_mmtk_object"] +vo_bit = ["mmtk/vo_bit"] malloc_counted_size = ["mmtk/malloc_counted_size"] malloc_mark_sweep = ["mmtk/malloc_mark_sweep"] diff --git a/vmbindings/dummyvm/src/api.rs b/vmbindings/dummyvm/src/api.rs index d68508c7bf..8f2055d48c 100644 --- a/vmbindings/dummyvm/src/api.rs +++ b/vmbindings/dummyvm/src/api.rs @@ -107,10 +107,10 @@ pub extern "C" fn mmtk_is_live_object(object: ObjectReference) -> bool{ memory_manager::is_live_object(object) } -#[cfg(feature = "is_mmtk_object")] +#[cfg(feature = "vo_bit")] #[no_mangle] -pub extern "C" fn mmtk_is_mmtk_object(addr: Address) -> bool { - memory_manager::is_mmtk_object(addr) +pub extern "C" fn mmtk_is_valid_mmtk_object(object: ObjectReference) -> bool { + mmtk::util::metadata::vo_bit::is_valid_mmtk_object(object) } #[no_mangle] diff --git a/vmbindings/dummyvm/src/tests/conservatism.rs b/vmbindings/dummyvm/src/tests/conservatism.rs index d66de2d6fa..afa457daf5 100644 --- a/vmbindings/dummyvm/src/tests/conservatism.rs +++ b/vmbindings/dummyvm/src/tests/conservatism.rs @@ -1,11 +1,11 @@ // GITHUB-CI: MMTK_PLAN=all -// GITHUB-CI: FEATURES=is_mmtk_object +// GITHUB-CI: FEATURES=vo_bit use crate::api::*; use crate::object_model::OBJECT_REF_OFFSET; use crate::tests::fixtures::{Fixture, SingleObject}; use mmtk::util::constants::LOG_BITS_IN_WORD; -use mmtk::util::is_mmtk_object::ALLOC_BIT_REGION_SIZE; +use mmtk::util::metadata::vo_bit::VO_BIT_REGION_SIZE; use mmtk::util::*; lazy_static! { @@ -13,38 +13,38 @@ lazy_static! { } fn basic_filter(addr: Address) -> bool { - !addr.is_zero() && addr.as_usize() % ALLOC_BIT_REGION_SIZE == (OBJECT_REF_OFFSET % ALLOC_BIT_REGION_SIZE) + !addr.is_zero() && addr.as_usize() % VO_BIT_REGION_SIZE == (OBJECT_REF_OFFSET % VO_BIT_REGION_SIZE) } -fn assert_filter_pass(addr: Address) { +fn assert_filter_pass(object: ObjectReference) { assert!( - basic_filter(addr), + basic_filter(object.to_address()), "{} should pass basic filter, but failed.", - addr, + object, ); } -fn assert_filter_fail(addr: Address) { +fn assert_filter_fail(object: ObjectReference) { assert!( - !basic_filter(addr), + !basic_filter(object.to_address()), "{} should fail basic filter, but passed.", - addr, + object, ); } -fn assert_valid_objref(addr: Address) { +fn assert_valid_objref(object: ObjectReference) { assert!( - mmtk_is_mmtk_object(addr), - "mmtk_is_mmtk_object({}) should return true. Got false.", - addr, + mmtk_is_valid_mmtk_object(object), + "mmtk_is_valid_mmtk_object({}) should return true. Got false.", + object, ); } -fn assert_invalid_objref(addr: Address, real: Address) { +fn assert_invalid_objref(object: ObjectReference, real: ObjectReference) { assert!( - !mmtk_is_mmtk_object(addr), - "mmtk_is_mmtk_object({}) should return false. Got true. Real object: {}", - addr, + !mmtk_is_valid_mmtk_object(object), + "mmtk_is_valid_mmtk_object({}) should return false. Got true. Real object: {}", + object, real, ); } @@ -52,9 +52,9 @@ fn assert_invalid_objref(addr: Address, real: Address) { #[test] pub fn null() { SINGLE_OBJECT.with_fixture(|fixture| { - let addr = Address::ZERO; - assert_filter_fail(addr); - assert_invalid_objref(addr, fixture.objref.to_address()); + let object = ObjectReference::NULL; + assert_filter_fail(object); + assert_invalid_objref(object, fixture.objref); }); } @@ -66,7 +66,7 @@ pub fn too_small() { SINGLE_OBJECT.with_fixture(|fixture| { for offset in 1usize..SMALL_OFFSET { let addr = Address::ZERO + offset; - assert_invalid_objref(addr, fixture.objref.to_address()); + assert_invalid_objref(unsafe { addr.to_object_reference() }, fixture.objref); } }); } @@ -75,7 +75,8 @@ pub fn too_small() { pub fn max() { SINGLE_OBJECT.with_fixture(|fixture| { let addr = Address::MAX; - assert_invalid_objref(addr, fixture.objref.to_address()); + let object = unsafe { addr.to_object_reference() }; + assert_invalid_objref(object, fixture.objref); }); } @@ -84,7 +85,8 @@ pub fn too_big() { SINGLE_OBJECT.with_fixture(|fixture| { for offset in 1usize..SMALL_OFFSET { let addr = Address::MAX - offset; - assert_invalid_objref(addr, fixture.objref.to_address()); + let object = unsafe { addr.to_object_reference() }; + assert_invalid_objref(object, fixture.objref); } }); } @@ -92,9 +94,9 @@ pub fn too_big() { #[test] pub fn direct_hit() { SINGLE_OBJECT.with_fixture(|fixture| { - let addr = fixture.objref.to_address(); - assert_filter_pass(addr); - assert_valid_objref(addr); + let object = fixture.objref; + assert_filter_pass(object); + assert_valid_objref(object); }); } @@ -106,7 +108,8 @@ pub fn small_offsets() { for offset in 1usize..SEVERAL_PAGES { let addr = fixture.objref.to_address() + offset; if basic_filter(addr) { - assert_invalid_objref(addr, fixture.objref.to_address()); + let object = unsafe { addr.to_object_reference() }; + assert_invalid_objref(object, fixture.objref); } } }); @@ -118,8 +121,9 @@ pub fn medium_offsets_aligned() { let alignment = std::mem::align_of::
(); for offset in (alignment..(alignment * SEVERAL_PAGES)).step_by(alignment) { let addr = fixture.objref.to_address() + offset; - assert_filter_pass(addr); - assert_invalid_objref(addr, fixture.objref.to_address()); + let object = unsafe { addr.to_object_reference() }; + assert_filter_pass(object); + assert_invalid_objref(object, fixture.objref); } }); } @@ -133,8 +137,9 @@ pub fn large_offsets_aligned() { Some(n) => unsafe { Address::from_usize(n) }, None => break, }; - assert_filter_pass(addr); - assert_invalid_objref(addr, fixture.objref.to_address()); + let object = unsafe { addr.to_object_reference() }; + assert_filter_pass(object); + assert_invalid_objref(object, fixture.objref); } }); } @@ -149,8 +154,9 @@ pub fn negative_offsets() { Some(n) => unsafe { Address::from_usize(n) }, None => break, }; - assert_filter_pass(addr); - assert_invalid_objref(addr, fixture.objref.to_address()); + let object = unsafe { addr.to_object_reference() }; + assert_filter_pass(object); + assert_invalid_objref(object, fixture.objref); } }); } diff --git a/vmbindings/dummyvm/src/tests/mod.rs b/vmbindings/dummyvm/src/tests/mod.rs index 353156d856..96164512b7 100644 --- a/vmbindings/dummyvm/src/tests/mod.rs +++ b/vmbindings/dummyvm/src/tests/mod.rs @@ -16,7 +16,7 @@ mod malloc_api; #[cfg(feature = "malloc_counted_size")] mod malloc_counted; mod malloc_ms; -#[cfg(feature = "is_mmtk_object")] +#[cfg(feature = "vo_bit")] mod conservatism; mod is_in_mmtk_spaces; mod fixtures;