diff --git a/.github/scripts/ci-test.sh b/.github/scripts/ci-test.sh index 76ef86873e..041a4bb816 100755 --- a/.github/scripts/ci-test.sh +++ b/.github/scripts/ci-test.sh @@ -18,7 +18,7 @@ cd vmbindings/dummyvm for fn in $(ls src/tests/*.rs); do t=$(basename -s .rs $fn) - if [[ $t == "mod.rs" ]]; then + if [[ $t == "mod" ]]; then continue fi diff --git a/Cargo.toml b/Cargo.toml index ad033071b8..c97d211ed9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -24,7 +24,7 @@ mmtk-macros = { version = "0.12.0", path = "macros/" } libc = "0.2" jemalloc-sys = { version = "0.3.2", features = ["disable_initial_exec_tls"], optional = true } mimalloc-sys = { version = "0.1.6", optional = true } -hoard-sys = { version = "0.1.1", optional = true } +hoard-sys = { version = "0.1.2", optional = true } lazy_static = "1.1" log = { version = "0.4", features = ["max_level_trace", "release_max_level_off"] } crossbeam = "0.8.1" @@ -89,6 +89,9 @@ nogc_multi_space = [] # To collect statistics for each GC work packet. Enabling this may introduce a small overhead (several percentage slowdown on benchmark time). work_packet_stats = [] +# Count the malloc'd memory into the heap size +malloc_counted_size = [] + # Do not modify the following line - ci-common.sh matches it # -- Mutally exclusive features -- # Only one feature from each group can be provided. Otherwise build will fail. diff --git a/docs/tutorial/code/mygc_semispace/global.rs b/docs/tutorial/code/mygc_semispace/global.rs index c0c09d0b00..b7d6233cfb 100644 --- a/docs/tutorial/code/mygc_semispace/global.rs +++ b/docs/tutorial/code/mygc_semispace/global.rs @@ -102,8 +102,8 @@ impl Plan for MyGC { // ANCHOR_END: schedule_collection // ANCHOR: collection_required() - fn collection_required(&self, space_full: bool, space: &dyn Space) -> bool { - self.base().collection_required(self, space_full, space) + fn collection_required(&self, space_full: bool, _space: Option<&dyn Space>) -> bool { + self.base().collection_required(self, space_full) } // ANCHOR_END: collection_required() diff --git a/src/memory_manager.rs b/src/memory_manager.rs index 59dcd08ac2..5509344dad 100644 --- a/src/memory_manager.rs +++ b/src/memory_manager.rs @@ -153,6 +153,84 @@ pub fn get_allocator_mapping( mmtk.plan.get_allocator_mapping()[semantics] } +/// The standard malloc. MMTk either uses its own allocator, or forward the call to a +/// library malloc. +pub fn malloc(size: usize) -> Address { + crate::util::malloc::malloc(size) +} + +/// The standard malloc except that with the feature `malloc_counted_size`, MMTk will count the allocated memory into its heap size. +/// Thus the method requires a reference to an MMTk instance. MMTk either uses its own allocator, or forward the call to a +/// library malloc. +#[cfg(feature = "malloc_counted_size")] +pub fn counted_malloc(mmtk: &MMTK, size: usize) -> Address { + crate::util::malloc::counted_malloc(mmtk, size) +} + +/// The standard calloc. +pub fn calloc(num: usize, size: usize) -> Address { + crate::util::malloc::calloc(num, size) +} + +/// The standard calloc except that with the feature `malloc_counted_size`, MMTk will count the allocated memory into its heap size. +/// Thus the method requires a reference to an MMTk instance. +#[cfg(feature = "malloc_counted_size")] +pub fn counted_calloc(mmtk: &MMTK, num: usize, size: usize) -> Address { + crate::util::malloc::counted_calloc(mmtk, num, size) +} + +/// The standard realloc. +pub fn realloc(addr: Address, size: usize) -> Address { + crate::util::malloc::realloc(addr, size) +} + +/// The standard realloc except that with the feature `malloc_counted_size`, MMTk will count the allocated memory into its heap size. +/// Thus the method requires a reference to an MMTk instance, and the size of the existing memory that will be reallocated. +/// The `addr` in the arguments must be an address that is earlier returned from MMTk's `malloc()`, `calloc()` or `realloc()`. +#[cfg(feature = "malloc_counted_size")] +pub fn realloc_with_old_size( + mmtk: &MMTK, + addr: Address, + size: usize, + old_size: usize, +) -> Address { + crate::util::malloc::realloc_with_old_size(mmtk, addr, size, old_size) +} + +/// The standard free. +/// The `addr` in the arguments must be an address that is earlier returned from MMTk's `malloc()`, `calloc()` or `realloc()`. +pub fn free(addr: Address) { + crate::util::malloc::free(addr) +} + +/// The standard free except that with the feature `malloc_counted_size`, MMTk will count the allocated memory into its heap size. +/// Thus the method requires a reference to an MMTk instance, and the size of the memory to free. +/// The `addr` in the arguments must be an address that is earlier returned from MMTk's `malloc()`, `calloc()` or `realloc()`. +#[cfg(feature = "malloc_counted_size")] +pub fn free_with_size(mmtk: &MMTK, addr: Address, old_size: usize) { + crate::util::malloc::free_with_size(mmtk, addr, old_size) +} + +/// Poll for GC. MMTk will decide if a GC is needed. If so, this call will block +/// the current thread, and trigger a GC. Otherwise, it will simply return. +/// Usually a binding does not need to call this function. MMTk will poll for GC during its allocation. +/// However, if a binding uses counted malloc (which won't poll for GC), they may want to poll for GC manually. +/// This function should only be used by mutator threads. +pub fn gc_poll(mmtk: &MMTK, tls: VMMutatorThread) { + use crate::vm::{ActivePlan, Collection}; + debug_assert!( + VM::VMActivePlan::is_mutator(tls.0), + "gc_poll() can only be called by a mutator thread." + ); + + let plan = mmtk.get_plan(); + if plan.should_trigger_gc_when_heap_is_full() && plan.poll(false, None) { + debug!("Collection required"); + assert!(plan.is_initialized(), "GC is not allowed here: collection is not initialized (did you call initialize_collection()?)."); + VM::VMCollection::block_for_gc(tls); + } +} + /// Run the main loop for the GC controller thread. This method does not return. /// /// Arguments: diff --git a/src/plan/generational/copying/global.rs b/src/plan/generational/copying/global.rs index 3384760827..d53fc97580 100644 --- a/src/plan/generational/copying/global.rs +++ b/src/plan/generational/copying/global.rs @@ -64,7 +64,7 @@ impl Plan for GenCopy { } } - fn collection_required(&self, space_full: bool, space: &dyn Space) -> bool + fn collection_required(&self, space_full: bool, space: Option<&dyn Space>) -> bool where Self: Sized, { diff --git a/src/plan/generational/global.rs b/src/plan/generational/global.rs index c54e5bc8b3..6ff2657c84 100644 --- a/src/plan/generational/global.rs +++ b/src/plan/generational/global.rs @@ -106,7 +106,7 @@ impl Gen { &self, plan: &P, space_full: bool, - space: &dyn Space, + space: Option<&dyn Space>, ) -> bool { let nursery_full = self.nursery.reserved_pages() >= (conversions::bytes_to_pages_up(*self.common.base.options.max_nursery)); @@ -114,13 +114,18 @@ impl Gen { return true; } - if space_full && space.common().descriptor != self.nursery.common().descriptor { + // Is the GC triggered by nursery? + // - if space is none, it is not. Return false immediately. + // - if space is some, we further check its descriptor. + let is_triggered_by_nursery = space.map_or(false, |s| { + s.common().descriptor == self.nursery.common().descriptor + }); + // If space is full and the GC is not triggered by nursery, next GC will be full heap GC. + if space_full && !is_triggered_by_nursery { self.next_gc_full_heap.store(true, Ordering::SeqCst); } - self.common - .base - .collection_required(plan, space_full, space) + self.common.base.collection_required(plan, space_full) } pub fn force_full_heap_collection(&self) { diff --git a/src/plan/generational/immix/global.rs b/src/plan/generational/immix/global.rs index eb0714adf3..8e26cbf15a 100644 --- a/src/plan/generational/immix/global.rs +++ b/src/plan/generational/immix/global.rs @@ -96,7 +96,7 @@ impl Plan for GenImmix { self.gen.last_collection_full_heap() } - fn collection_required(&self, space_full: bool, space: &dyn Space) -> bool + fn collection_required(&self, space_full: bool, space: Option<&dyn Space>) -> bool where Self: Sized, { diff --git a/src/plan/global.rs b/src/plan/global.rs index e93ad834bb..871c2c7472 100644 --- a/src/plan/global.rs +++ b/src/plan/global.rs @@ -203,7 +203,14 @@ pub trait Plan: 'static + Sync + Downcast { /// This is invoked once per GC by one worker thread. 'tls' is the worker thread that executes this method. fn release(&mut self, tls: VMWorkerThread); - fn poll(&self, space_full: bool, space: &dyn Space) -> bool { + /// This method is called periodically by the allocation subsystem + /// (by default, each time a page is consumed), and provides the + /// collector with an opportunity to collect. + /// + /// Arguments: + /// * `space_full`: Space request failed, must recover pages within 'space'. + /// * `space`: The space that triggered the poll. This could `None` if the poll is not triggered by a space. + fn poll(&self, space_full: bool, space: Option<&dyn Space>) -> bool { if self.collection_required(space_full, space) { // FIXME /*if space == META_DATA_SPACE { @@ -236,8 +243,12 @@ pub trait Plan: 'static + Sync + Downcast { false } - fn log_poll(&self, space: &dyn Space, message: &'static str) { - info!(" [POLL] {}: {}", space.get_name(), message); + fn log_poll(&self, space: Option<&dyn Space>, message: &'static str) { + if let Some(space) = space { + info!(" [POLL] {}: {}", space.get_name(), message); + } else { + info!(" [POLL] {}", message); + } } /** @@ -248,7 +259,7 @@ pub trait Plan: 'static + Sync + Downcast { * @param space TODO * @return true if a collection is requested by the plan. */ - fn collection_required(&self, space_full: bool, _space: &dyn Space) -> bool; + fn collection_required(&self, space_full: bool, _space: Option<&dyn Space>) -> bool; // Note: The following methods are about page accounting. The default implementation should // work fine for non-copying plans. For copying plans, the plan should override any of these methods @@ -372,9 +383,12 @@ pub struct BasePlan { /// Have we scanned all the stacks? stacks_prepared: AtomicBool, pub mutator_iterator_lock: Mutex<()>, - // A counter that keeps tracks of the number of bytes allocated since last stress test - pub allocation_bytes: AtomicUsize, - // Wrapper around analysis counters + /// A counter that keeps tracks of the number of bytes allocated since last stress test + allocation_bytes: AtomicUsize, + /// A counteer that keeps tracks of the number of bytes allocated by malloc + #[cfg(feature = "malloc_counted_size")] + malloc_bytes: AtomicUsize, + /// Wrapper around analysis counters #[cfg(feature = "analysis")] pub analysis_manager: AnalysisManager, @@ -518,6 +532,8 @@ impl BasePlan { scanned_stacks: AtomicUsize::new(0), mutator_iterator_lock: Mutex::new(()), allocation_bytes: AtomicUsize::new(0), + #[cfg(feature = "malloc_counted_size")] + malloc_bytes: AtomicUsize::new(0), #[cfg(feature = "analysis")] analysis_manager, } @@ -596,6 +612,14 @@ impl BasePlan { pages += self.ro_space.reserved_pages(); } + // If we need to count malloc'd size as part of our heap, we add it here. + #[cfg(feature = "malloc_counted_size")] + { + pages += crate::util::conversions::bytes_to_pages_up( + self.malloc_bytes.load(Ordering::SeqCst), + ); + } + // The VM space may be used as an immutable boot image, in which case, we should not count // it as part of the heap size. pages @@ -794,12 +818,7 @@ impl BasePlan { && (self.allocation_bytes.load(Ordering::SeqCst) > *self.options.stress_factor) } - pub(super) fn collection_required( - &self, - plan: &P, - space_full: bool, - _space: &dyn Space, - ) -> bool { + pub(super) fn collection_required(&self, plan: &P, space_full: bool) -> bool { let stress_force_gc = self.should_do_stress_gc(); if stress_force_gc { debug!( @@ -838,6 +857,19 @@ impl BasePlan { self.vm_space .verify_side_metadata_sanity(side_metadata_sanity_checker); } + + #[cfg(feature = "malloc_counted_size")] + pub(crate) fn increase_malloc_bytes_by(&self, size: usize) { + self.malloc_bytes.fetch_add(size, Ordering::SeqCst); + } + #[cfg(feature = "malloc_counted_size")] + pub(crate) fn decrease_malloc_bytes_by(&self, size: usize) { + self.malloc_bytes.fetch_sub(size, Ordering::SeqCst); + } + #[cfg(feature = "malloc_counted_size")] + pub fn get_malloc_bytes(&self) -> usize { + self.malloc_bytes.load(Ordering::SeqCst) + } } /** diff --git a/src/plan/immix/global.rs b/src/plan/immix/global.rs index 2d40d32a8b..c910d097e8 100644 --- a/src/plan/immix/global.rs +++ b/src/plan/immix/global.rs @@ -51,8 +51,8 @@ pub const IMMIX_CONSTRAINTS: PlanConstraints = PlanConstraints { impl Plan for Immix { type VM = VM; - fn collection_required(&self, space_full: bool, space: &dyn Space) -> bool { - self.base().collection_required(self, space_full, space) + fn collection_required(&self, space_full: bool, _space: Option<&dyn Space>) -> bool { + self.base().collection_required(self, space_full) } fn last_collection_was_exhaustive(&self) -> bool { diff --git a/src/plan/markcompact/global.rs b/src/plan/markcompact/global.rs index c7e3f451d8..5650c07494 100644 --- a/src/plan/markcompact/global.rs +++ b/src/plan/markcompact/global.rs @@ -163,8 +163,8 @@ impl Plan for MarkCompact { .add(crate::util::sanity::sanity_checker::ScheduleSanityGC::::new(self)); } - fn collection_required(&self, space_full: bool, space: &dyn Space) -> bool { - self.base().collection_required(self, space_full, space) + fn collection_required(&self, space_full: bool, _space: Option<&dyn Space>) -> bool { + self.base().collection_required(self, space_full) } fn get_used_pages(&self) -> usize { diff --git a/src/plan/marksweep/global.rs b/src/plan/marksweep/global.rs index c06ac5f5c8..963702b823 100644 --- a/src/plan/marksweep/global.rs +++ b/src/plan/marksweep/global.rs @@ -72,8 +72,8 @@ impl Plan for MarkSweep { self.common.release(tls, true); } - fn collection_required(&self, space_full: bool, space: &dyn Space) -> bool { - self.base().collection_required(self, space_full, space) + fn collection_required(&self, space_full: bool, _space: Option<&dyn Space>) -> bool { + self.base().collection_required(self, space_full) } fn get_used_pages(&self) -> usize { diff --git a/src/plan/nogc/global.rs b/src/plan/nogc/global.rs index 74bd1efd33..def42bb34e 100644 --- a/src/plan/nogc/global.rs +++ b/src/plan/nogc/global.rs @@ -48,8 +48,8 @@ impl Plan for NoGC { self.nogc_space.init(vm_map); } - fn collection_required(&self, space_full: bool, space: &dyn Space) -> bool { - self.base().collection_required(self, space_full, space) + fn collection_required(&self, space_full: bool, _space: Option<&dyn Space>) -> bool { + self.base().collection_required(self, space_full) } fn base(&self) -> &BasePlan { diff --git a/src/plan/pageprotect/global.rs b/src/plan/pageprotect/global.rs index 6f5b8b47e2..1da5c53dfe 100644 --- a/src/plan/pageprotect/global.rs +++ b/src/plan/pageprotect/global.rs @@ -79,8 +79,8 @@ impl Plan for PageProtect { self.space.release(true); } - fn collection_required(&self, space_full: bool, space: &dyn Space) -> bool { - self.base().collection_required(self, space_full, space) + fn collection_required(&self, space_full: bool, _space: Option<&dyn Space>) -> bool { + self.base().collection_required(self, space_full) } fn get_used_pages(&self) -> usize { diff --git a/src/plan/semispace/global.rs b/src/plan/semispace/global.rs index 6b8db36328..526a8b4543 100644 --- a/src/plan/semispace/global.rs +++ b/src/plan/semispace/global.rs @@ -110,8 +110,8 @@ impl Plan for SemiSpace { self.fromspace().release(); } - fn collection_required(&self, space_full: bool, space: &dyn Space) -> bool { - self.base().collection_required(self, space_full, space) + fn collection_required(&self, space_full: bool, _space: Option<&dyn Space>) -> bool { + self.base().collection_required(self, space_full) } fn get_collection_reserved_pages(&self) -> usize { diff --git a/src/policy/mallocspace/global.rs b/src/policy/mallocspace/global.rs index ecb129368e..ddd71691ba 100644 --- a/src/policy/mallocspace/global.rs +++ b/src/policy/mallocspace/global.rs @@ -6,7 +6,7 @@ use crate::policy::space::SFT; use crate::util::constants::BYTES_IN_PAGE; use crate::util::heap::layout::heap_layout::VMMap; use crate::util::heap::PageResource; -use crate::util::malloc::*; +use crate::util::malloc::malloc_ms_util::*; use crate::util::metadata::side_metadata::{ bzero_metadata, SideMetadataContext, SideMetadataSanity, SideMetadataSpec, }; @@ -238,7 +238,7 @@ impl MallocSpace { pub fn alloc(&self, tls: VMThread, size: usize, align: usize, offset: isize) -> Address { // TODO: Should refactor this and Space.acquire() - if VM::VMActivePlan::global().poll(false, self) { + if VM::VMActivePlan::global().poll(false, Some(self)) { assert!(VM::VMActivePlan::is_mutator(tls), "Polling in GC worker"); VM::VMCollection::block_for_gc(VMMutatorThread(tls)); return unsafe { Address::zero() }; diff --git a/src/policy/space.rs b/src/policy/space.rs index 9eb04527b6..ee35c56964 100644 --- a/src/policy/space.rs +++ b/src/policy/space.rs @@ -407,7 +407,7 @@ pub trait Space: 'static + SFT + Sync + Downcast { trace!("Pages reserved"); trace!("Polling .."); - if should_poll && VM::VMActivePlan::global().poll(false, self.as_space()) { + if should_poll && VM::VMActivePlan::global().poll(false, Some(self.as_space())) { debug!("Collection required"); assert!(allow_gc, "GC is not allowed here: collection is not initialized (did you call initialize_collection()?)."); pr.clear_request(pages_reserved); @@ -485,7 +485,7 @@ pub trait Space: 'static + SFT + Sync + Downcast { "Physical allocation failed when GC is not allowed!" ); - let gc_performed = VM::VMActivePlan::global().poll(true, self.as_space()); + let gc_performed = VM::VMActivePlan::global().poll(true, Some(self.as_space())); debug_assert!(gc_performed, "GC not performed when forced."); pr.clear_request(pages_reserved); drop(lock); // drop the lock before block diff --git a/src/util/malloc/library.rs b/src/util/malloc/library.rs new file mode 100644 index 0000000000..84e5bd3aef --- /dev/null +++ b/src/util/malloc/library.rs @@ -0,0 +1,65 @@ +// Export one of the malloc libraries. + +#[cfg(feature = "malloc_hoard")] +pub use self::hoard::*; +#[cfg(feature = "malloc_jemalloc")] +pub use self::jemalloc::*; +#[cfg(not(any( + feature = "malloc_jemalloc", + feature = "malloc_mimalloc", + feature = "malloc_hoard", +)))] +pub use self::libc_malloc::*; +#[cfg(feature = "malloc_mimalloc")] +pub use self::mimalloc::*; + +// Different malloc libraries + +// TODO: We should conditinally include some methods in the module, such as posix extension and GNU extension. + +#[cfg(feature = "malloc_jemalloc")] +mod jemalloc { + // ANSI C + pub use jemalloc_sys::{calloc, free, malloc, realloc}; + // Posix + pub use jemalloc_sys::posix_memalign; + // GNU + pub use jemalloc_sys::malloc_usable_size; +} + +#[cfg(feature = "malloc_mimalloc")] +mod mimalloc { + // ANSI C + pub use mimalloc_sys::{ + mi_calloc as calloc, mi_free as free, mi_malloc as malloc, mi_realloc as realloc, + }; + // Posix + pub use mimalloc_sys::mi_posix_memalign as posix_memalign; + // GNU + pub use mimalloc_sys::mi_malloc_usable_size as malloc_usable_size; +} + +#[cfg(feature = "malloc_hoard")] +mod hoard { + // ANSI C + pub use hoard_sys::{calloc, free, malloc, realloc}; + // Posix + pub use hoard_sys::posix_memalign; + // GNU + pub use hoard_sys::malloc_usable_size; +} + +/// If no malloc lib is specified, use the libc implementation +#[cfg(not(any( + feature = "malloc_jemalloc", + feature = "malloc_mimalloc", + feature = "malloc_hoard", +)))] +mod libc_malloc { + // ANSI C + pub use libc::{calloc, free, malloc, realloc}; + // Posix + pub use libc::posix_memalign; + // GNU + pub use libc::malloc_usable_size; +} diff --git a/src/util/malloc.rs b/src/util/malloc/malloc_ms_util.rs similarity index 72% rename from src/util/malloc.rs rename to src/util/malloc/malloc_ms_util.rs index 43b307f69c..1f054a1ced 100644 --- a/src/util/malloc.rs +++ b/src/util/malloc/malloc_ms_util.rs @@ -1,27 +1,10 @@ use crate::util::constants::BYTES_IN_ADDRESS; +use crate::util::malloc::library::*; use crate::util::Address; use crate::vm::VMBinding; -#[cfg(feature = "malloc_jemalloc")] -pub use jemalloc_sys::{calloc, free, malloc_usable_size, posix_memalign}; -#[cfg(feature = "malloc_mimalloc")] -pub use mimalloc_sys::{ - mi_calloc as calloc, mi_calloc_aligned, mi_free as free, - mi_malloc_usable_size as malloc_usable_size, -}; - -#[cfg(feature = "malloc_hoard")] -pub use hoard_sys::{calloc, free, malloc_usable_size}; - -#[cfg(not(any( - feature = "malloc_jemalloc", - feature = "malloc_mimalloc", - feature = "malloc_hoard", -)))] -pub use libc::{calloc, free, malloc_usable_size, posix_memalign}; - -#[cfg(not(any(feature = "malloc_mimalloc", feature = "malloc_hoard",)))] -fn align_alloc(size: usize, align: usize) -> Address { +/// Allocate with alignment. This also guarantees the memory is zero initialized. +pub fn align_alloc(size: usize, align: usize) -> Address { let mut ptr = std::ptr::null_mut::(); let ptr_ptr = std::ptr::addr_of_mut!(ptr); let result = unsafe { posix_memalign(ptr_ptr, align, size) }; @@ -33,22 +16,9 @@ fn align_alloc(size: usize, align: usize) -> Address { address } -#[cfg(feature = "malloc_mimalloc")] -fn align_alloc(size: usize, align: usize) -> Address { - let raw = unsafe { mi_calloc_aligned(1, size, align) }; - Address::from_mut_ptr(raw) -} - -// hoard_sys does not provide align_alloc, -// we have to do it ourselves -#[cfg(feature = "malloc_hoard")] -fn align_alloc(size: usize, align: usize) -> Address { - align_offset_alloc::(size, align, 0) -} - // Beside returning the allocation result, // this will store the malloc result at (result - BYTES_IN_ADDRESS) -fn align_offset_alloc(size: usize, align: usize, offset: isize) -> Address { +pub fn align_offset_alloc(size: usize, align: usize, offset: isize) -> Address { // we allocate extra `align` bytes here, so we are able to handle offset let actual_size = size + align + BYTES_IN_ADDRESS; let raw = unsafe { calloc(1, actual_size) }; @@ -67,7 +37,7 @@ fn align_offset_alloc(size: usize, align: usize, offset: isize) - result } -fn offset_malloc_usable_size(address: Address) -> usize { +pub fn offset_malloc_usable_size(address: Address) -> usize { let malloc_res_ptr: *mut usize = (address - BYTES_IN_ADDRESS).to_mut_ptr(); let malloc_res = unsafe { *malloc_res_ptr } as *mut libc::c_void; unsafe { malloc_usable_size(malloc_res) } @@ -80,6 +50,8 @@ pub fn offset_free(address: Address) { unsafe { free(malloc_res) }; } +pub use crate::util::malloc::library::free; + /// get malloc usable size of an address /// is_offset_malloc: whether the address is allocated with some offset pub fn get_malloc_usable_size(address: Address, is_offset_malloc: bool) -> usize { diff --git a/src/util/malloc/mod.rs b/src/util/malloc/mod.rs new file mode 100644 index 0000000000..551b014026 --- /dev/null +++ b/src/util/malloc/mod.rs @@ -0,0 +1,89 @@ +/// Malloc provided by libraries +pub(crate) mod library; +/// Using malloc as mark sweep free-list allocator +pub mod malloc_ms_util; + +use crate::util::Address; +#[cfg(feature = "malloc_counted_size")] +use crate::vm::VMBinding; +#[cfg(feature = "malloc_counted_size")] +use crate::MMTK; + +// The following expose a set of malloc API. They are currently implemented with +// the library malloc. When we have native malloc implementation, we should change +// their implementation to point to our native malloc. + +// We have two versions for each function: +// * a normal version: it has the signature that is compatible with the standard malloc library. +// * a counted version: the allocated/freed bytes are calculated into MMTk's heap. So extra arguments +// are needed to maintain allocated bytes properly. The API is inspired by Julia's counted malloc. +// The counted version is only available with the feature `malloc_counted_size`. + +#[inline(always)] +pub fn malloc(size: usize) -> Address { + Address::from_mut_ptr(unsafe { self::library::malloc(size) }) +} + +#[cfg(feature = "malloc_counted_size")] +#[inline(always)] +pub fn counted_malloc(mmtk: &MMTK, size: usize) -> Address { + let res = malloc(size); + if !res.is_zero() { + mmtk.plan.base().increase_malloc_bytes_by(size); + } + res +} + +#[inline(always)] +pub fn calloc(num: usize, size: usize) -> Address { + Address::from_mut_ptr(unsafe { self::library::calloc(num, size) }) +} + +#[cfg(feature = "malloc_counted_size")] +#[inline(always)] +pub fn counted_calloc(mmtk: &MMTK, num: usize, size: usize) -> Address { + let res = calloc(num, size); + if !res.is_zero() { + mmtk.plan.base().increase_malloc_bytes_by(num * size); + } + res +} + +#[inline(always)] +pub fn realloc(addr: Address, size: usize) -> Address { + Address::from_mut_ptr(unsafe { self::library::realloc(addr.to_mut_ptr(), size) }) +} + +#[cfg(feature = "malloc_counted_size")] +#[inline(always)] +pub fn realloc_with_old_size( + mmtk: &MMTK, + addr: Address, + size: usize, + old_size: usize, +) -> Address { + let res = realloc(addr, size); + + if !addr.is_zero() { + mmtk.plan.base().decrease_malloc_bytes_by(old_size); + } + if size != 0 && !res.is_zero() { + mmtk.plan.base().increase_malloc_bytes_by(size); + } + + res +} + +#[inline(always)] +pub fn free(addr: Address) { + unsafe { self::library::free(addr.to_mut_ptr()) } +} + +#[cfg(feature = "malloc_counted_size")] +#[inline(always)] +pub fn free_with_size(mmtk: &MMTK, addr: Address, old_size: usize) { + free(addr); + if !addr.is_zero() { + mmtk.plan.base().decrease_malloc_bytes_by(old_size); + } +} diff --git a/vmbindings/dummyvm/Cargo.toml b/vmbindings/dummyvm/Cargo.toml index d0bea8a8b5..963420d014 100644 --- a/vmbindings/dummyvm/Cargo.toml +++ b/vmbindings/dummyvm/Cargo.toml @@ -21,3 +21,4 @@ atomic_refcell = "0.1.7" [features] default = [] is_mmtk_object = ["mmtk/is_mmtk_object"] +malloc_counted_size = ["mmtk/malloc_counted_size"] diff --git a/vmbindings/dummyvm/src/api.rs b/vmbindings/dummyvm/src/api.rs index 61585ed67d..dfdd01b639 100644 --- a/vmbindings/dummyvm/src/api.rs +++ b/vmbindings/dummyvm/src/api.rs @@ -167,3 +167,43 @@ pub extern "C" fn mmtk_starting_heap_address() -> Address { pub extern "C" fn mmtk_last_heap_address() -> Address { memory_manager::last_heap_address() } + +#[no_mangle] +#[cfg(feature = "malloc_counted_size")] +pub extern "C" fn mmtk_counted_malloc(size: usize) -> Address { + memory_manager::counted_malloc::(&SINGLETON, size) +} +#[no_mangle] +pub extern "C" fn mmtk_malloc(size: usize) -> Address { + memory_manager::malloc(size) +} + +#[no_mangle] +#[cfg(feature = "malloc_counted_size")] +pub extern "C" fn mmtk_counted_calloc(num: usize, size: usize) -> Address { + memory_manager::counted_calloc::(&SINGLETON, num, size) +} +#[no_mangle] +pub extern "C" fn mmtk_calloc(num: usize, size: usize) -> Address { + memory_manager::calloc(num, size) +} + +#[no_mangle] +#[cfg(feature = "malloc_counted_size")] +pub extern "C" fn mmtk_realloc_with_old_size(addr: Address, size: usize, old_size: usize) -> Address { + memory_manager::realloc_with_old_size::(&SINGLETON, addr, size, old_size) +} +#[no_mangle] +pub extern "C" fn mmtk_realloc(addr: Address, size: usize) -> Address { + memory_manager::realloc(addr, size) +} + +#[no_mangle] +#[cfg(feature = "malloc_counted_size")] +pub extern "C" fn mmtk_free_with_size(addr: Address, old_size: usize) { + memory_manager::free_with_size::(&SINGLETON, addr, old_size) +} +#[no_mangle] +pub extern "C" fn mmtk_free(addr: Address) { + memory_manager::free(addr) +} diff --git a/vmbindings/dummyvm/src/tests/fixtures/mod.rs b/vmbindings/dummyvm/src/tests/fixtures/mod.rs index 2eb249b1d0..a3d30fbe50 100644 --- a/vmbindings/dummyvm/src/tests/fixtures/mod.rs +++ b/vmbindings/dummyvm/src/tests/fixtures/mod.rs @@ -1,16 +1,23 @@ +// Some tests are conditionally compiled. So not all the code in this module will be used. We simply allow dead code in this module. +#![allow(dead_code)] + use atomic_refcell::AtomicRefCell; use std::sync::Once; +use std::sync::Mutex; use mmtk::AllocationSemantics; +use mmtk::MMTK; use mmtk::util::{ObjectReference, VMThread, VMMutatorThread}; use crate::api::*; use crate::object_model::OBJECT_REF_OFFSET; +use crate::DummyVM; pub trait FixtureContent { fn create() -> Self; } + pub struct Fixture { content: AtomicRefCell>>, once: Once, @@ -32,10 +39,29 @@ impl Fixture { let mut borrow = self.content.borrow_mut(); *borrow = Some(content); }); - { - let borrow = self.content.borrow(); - func(borrow.as_ref().unwrap()) + let borrow = self.content.borrow(); + func(borrow.as_ref().unwrap()) + } +} + +/// SerialFixture ensures all `with_fixture()` calls will be executed serially. +pub struct SerialFixture { + content: Mutex>> +} + +impl SerialFixture { + pub fn new() -> Self { + Self { + content: Mutex::new(None) + } + } + + pub fn with_fixture(&self, func: F) { + let mut c = self.content.lock().unwrap(); + if c.is_none() { + *c = Some(Box::new(T::create())); } + func(c.as_ref().unwrap()) } } @@ -66,3 +92,20 @@ impl FixtureContent for SingleObject { SingleObject { objref } } } + +pub struct MMTKSingleton { + pub mmtk: &'static MMTK +} + +impl FixtureContent for MMTKSingleton { + fn create() -> Self { + const MB: usize = 1024 * 1024; + // 1MB heap + mmtk_gc_init(MB); + mmtk_initialize_collection(VMThread::UNINITIALIZED); + + MMTKSingleton { + mmtk: &crate::SINGLETON, + } + } +} diff --git a/vmbindings/dummyvm/src/tests/malloc.rs b/vmbindings/dummyvm/src/tests/malloc.rs deleted file mode 100644 index c7d04cfbad..0000000000 --- a/vmbindings/dummyvm/src/tests/malloc.rs +++ /dev/null @@ -1,36 +0,0 @@ -use mmtk::util::malloc; -use crate::DummyVM; - -#[test] -fn test_malloc() { - let (address1, bool1) = malloc::alloc::(16, 8, 0); - let (address2, bool2) = malloc::alloc::(16, 32, 0); - let (address3, bool3) = malloc::alloc::(16, 8, 4); - let (address4, bool4) = malloc::alloc::(32, 64, 4); - - assert!(address1.is_aligned_to(8)); - assert!(address2.is_aligned_to(32)); - assert!((address3 + 4 as isize).is_aligned_to(8)); - assert!((address4 + 4 as isize).is_aligned_to(64)); - - assert!(!bool1); - #[cfg(feature = "malloc_hoard")] - assert!(bool2); - #[cfg(not(feature = "malloc_hoard"))] - assert!(!bool2); - assert!(bool3); - assert!(bool4); - - assert!(malloc::get_malloc_usable_size(address1, bool1) >= 16); - assert!(malloc::get_malloc_usable_size(address2, bool2) >= 16); - assert!(malloc::get_malloc_usable_size(address3, bool3) >= 16); - assert!(malloc::get_malloc_usable_size(address4, bool4) >= 32); - - unsafe { malloc::free(address1.to_mut_ptr()); } - #[cfg(feature = "malloc_hoard")] - malloc::offset_free(address2); - #[cfg(not(feature = "malloc_hoard"))] - unsafe { malloc::free(address2.to_mut_ptr()); } - malloc::offset_free(address3); - malloc::offset_free(address4); -} diff --git a/vmbindings/dummyvm/src/tests/malloc_api.rs b/vmbindings/dummyvm/src/tests/malloc_api.rs new file mode 100644 index 0000000000..4b31fd8773 --- /dev/null +++ b/vmbindings/dummyvm/src/tests/malloc_api.rs @@ -0,0 +1,24 @@ +use crate::api::*; + +#[test] +pub fn malloc_free() { + let res = mmtk_malloc(8); + assert!(!res.is_zero()); + mmtk_free(res); +} + +#[test] +pub fn calloc_free() { + let res = mmtk_calloc(1, 8); + assert!(!res.is_zero()); + mmtk_free(res); +} + +#[test] +pub fn realloc_free() { + let res1 = mmtk_malloc(8); + assert!(!res1.is_zero()); + let res2 = mmtk_realloc(res1, 16); + assert!(!res2.is_zero()); + mmtk_free(res2); +} diff --git a/vmbindings/dummyvm/src/tests/malloc_counted.rs b/vmbindings/dummyvm/src/tests/malloc_counted.rs new file mode 100644 index 0000000000..3f003fbecc --- /dev/null +++ b/vmbindings/dummyvm/src/tests/malloc_counted.rs @@ -0,0 +1,84 @@ +// GITHUB-CI: FEATURES=malloc_counted_size + +use crate::tests::fixtures::{SerialFixture, MMTKSingleton}; +use crate::api::*; + +lazy_static! { + static ref MMTK_SINGLETON: SerialFixture = SerialFixture::new(); +} + +#[test] +pub fn malloc_free() { + MMTK_SINGLETON.with_fixture(|fixture| { + let bytes_before = fixture.mmtk.get_plan().base().get_malloc_bytes(); + + let res = mmtk_counted_malloc(8); + assert!(!res.is_zero()); + let bytes_after_alloc = fixture.mmtk.get_plan().base().get_malloc_bytes(); + assert_eq!(bytes_before + 8, bytes_after_alloc); + + mmtk_free_with_size(res, 8); + let bytes_after_free = fixture.mmtk.get_plan().base().get_malloc_bytes(); + assert_eq!(bytes_before, bytes_after_free); + }); +} + +#[test] +pub fn calloc_free() { + MMTK_SINGLETON.with_fixture(|fixture| { + let bytes_before = fixture.mmtk.get_plan().base().get_malloc_bytes(); + + let res = mmtk_counted_calloc(1, 8); + assert!(!res.is_zero()); + let bytes_after_alloc = fixture.mmtk.get_plan().base().get_malloc_bytes(); + assert_eq!(bytes_before + 8, bytes_after_alloc); + + mmtk_free_with_size(res, 8); + let bytes_after_free = fixture.mmtk.get_plan().base().get_malloc_bytes(); + assert_eq!(bytes_before, bytes_after_free); + }); +} + +#[test] +pub fn realloc_grow() { + MMTK_SINGLETON.with_fixture(|fixture| { + let bytes_before = fixture.mmtk.get_plan().base().get_malloc_bytes(); + + let res1 = mmtk_counted_malloc(8); + assert!(!res1.is_zero()); + let bytes_after_alloc = fixture.mmtk.get_plan().base().get_malloc_bytes(); + assert_eq!(bytes_before + 8, bytes_after_alloc); + + // grow to 16 bytes + let res2 = mmtk_realloc_with_old_size(res1, 16, 8); + assert!(!res2.is_zero()); + let bytes_after_realloc = fixture.mmtk.get_plan().base().get_malloc_bytes(); + assert_eq!(bytes_before + 16, bytes_after_realloc); + + mmtk_free_with_size(res2, 16); + let bytes_after_free = fixture.mmtk.get_plan().base().get_malloc_bytes(); + assert_eq!(bytes_before, bytes_after_free); + }); +} + +#[test] +pub fn realloc_shrink() { + MMTK_SINGLETON.with_fixture(|fixture| { + let bytes_before = fixture.mmtk.get_plan().base().get_malloc_bytes(); + + let res1 = mmtk_counted_malloc(16); + assert!(!res1.is_zero()); + let bytes_after_alloc = fixture.mmtk.get_plan().base().get_malloc_bytes(); + assert_eq!(bytes_before + 16, bytes_after_alloc); + + // shrink to 8 bytes + let res2 = mmtk_realloc_with_old_size(res1, 8, 16); + assert!(!res2.is_zero()); + let bytes_after_realloc = fixture.mmtk.get_plan().base().get_malloc_bytes(); + assert_eq!(bytes_before + 8, bytes_after_realloc); + + mmtk_free_with_size(res2, 8); + let bytes_after_free = fixture.mmtk.get_plan().base().get_malloc_bytes(); + assert_eq!(bytes_before, bytes_after_free); + }); +} diff --git a/vmbindings/dummyvm/src/tests/malloc_ms.rs b/vmbindings/dummyvm/src/tests/malloc_ms.rs new file mode 100644 index 0000000000..0eeb513163 --- /dev/null +++ b/vmbindings/dummyvm/src/tests/malloc_ms.rs @@ -0,0 +1,36 @@ +use mmtk::util::malloc::malloc_ms_util; +use crate::DummyVM; + +#[test] +fn test_malloc() { + let (address1, bool1) = malloc_ms_util::alloc::(16, 8, 0); + let (address2, bool2) = malloc_ms_util::alloc::(16, 32, 0); + let (address3, bool3) = malloc_ms_util::alloc::(16, 8, 4); + let (address4, bool4) = malloc_ms_util::alloc::(32, 64, 4); + + assert!(address1.is_aligned_to(8)); + assert!(address2.is_aligned_to(32)); + assert!((address3 + 4 as isize).is_aligned_to(8)); + assert!((address4 + 4 as isize).is_aligned_to(64)); + + assert!(!bool1); + #[cfg(feature = "malloc_hoard")] + assert!(bool2); + #[cfg(not(feature = "malloc_hoard"))] + assert!(!bool2); + assert!(bool3); + assert!(bool4); + + assert!(malloc_ms_util::get_malloc_usable_size(address1, bool1) >= 16); + assert!(malloc_ms_util::get_malloc_usable_size(address2, bool2) >= 16); + assert!(malloc_ms_util::get_malloc_usable_size(address3, bool3) >= 16); + assert!(malloc_ms_util::get_malloc_usable_size(address4, bool4) >= 32); + + unsafe { malloc_ms_util::free(address1.to_mut_ptr()); } + #[cfg(feature = "malloc_hoard")] + malloc_ms_util::offset_free(address2); + #[cfg(not(feature = "malloc_hoard"))] + unsafe { malloc_ms_util::free(address2.to_mut_ptr()); } + malloc_ms_util::offset_free(address3); + malloc_ms_util::offset_free(address4); +} diff --git a/vmbindings/dummyvm/src/tests/mod.rs b/vmbindings/dummyvm/src/tests/mod.rs index d8f2a202b9..095ff11dc2 100644 --- a/vmbindings/dummyvm/src/tests/mod.rs +++ b/vmbindings/dummyvm/src/tests/mod.rs @@ -11,7 +11,11 @@ mod allocate_without_initialize_collection; mod allocate_with_initialize_collection; mod allocate_with_disable_collection; mod allocate_with_re_enable_collection; -mod malloc; +#[cfg(not(feature = "malloc_counted_size"))] +mod malloc_api; +#[cfg(feature = "malloc_counted_size")] +mod malloc_counted; +mod malloc_ms; #[cfg(feature = "is_mmtk_object")] mod conservatism; mod is_in_mmtk_spaces;