diff --git a/Cargo.toml b/Cargo.toml index 9a3cba2d9e..5f5c62d606 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -38,7 +38,7 @@ mimalloc-sys = { version = "0.1.6", optional = true } mmtk-macros = { version = "0.18.0", path = "macros/" } num_cpus = "1.8" num-traits = "0.2" -pfm = { version = "0.1.0-beta.1", optional = true } +pfm = { version = "0.1.0-beta.3", optional = true } regex = "1.7.0" spin = "0.9.5" static_assertions = "1.1.0" diff --git a/docs/header/mmtk.h b/docs/header/mmtk.h index 29b982eca2..6a7cca995a 100644 --- a/docs/header/mmtk.h +++ b/docs/header/mmtk.h @@ -44,14 +44,14 @@ extern void mmtk_disable_collection(); extern void* mmtk_alloc(MMTk_Mutator mutator, size_t size, size_t align, - ssize_t offset, + size_t offset, int allocator); // Slowpath allocation for an object extern void* mmtk_alloc_slow(MMTk_Mutator mutator, size_t size, size_t align, - ssize_t offset, + size_t offset, int allocator); // Perform post-allocation hooks or actions such as initializing object metadata diff --git a/examples/reference_bump_allocator.c b/examples/reference_bump_allocator.c index 9055ffe95c..dfed257726 100644 --- a/examples/reference_bump_allocator.c +++ b/examples/reference_bump_allocator.c @@ -31,7 +31,7 @@ extern MMTk_Mutator bind_mutator(void *tls) { return NULL; } -extern void* align_allocation(void* region, size_t align, ssize_t offset) { +extern void* align_allocation(void* region, size_t align, size_t offset) { ssize_t region_signed = (ssize_t) region; ssize_t mask = (ssize_t) (align - 1); @@ -42,7 +42,7 @@ extern void* align_allocation(void* region, size_t align, ssize_t offset) { } extern void* alloc(MMTk_Mutator mutator, size_t size, - size_t align, ssize_t offset, int allocator) { + size_t align, size_t offset, int allocator) { void* result = align_allocation(IMMORTAL_SPACE.heap_cursor, align, offset); void* new_cursor = (void*)((size_t) result + size); @@ -54,7 +54,7 @@ extern void* alloc(MMTk_Mutator mutator, size_t size, } extern void* alloc_slow(MMTk_Mutator mutator, size_t size, - size_t align, ssize_t offset, int allocator) { + size_t align, size_t offset, int allocator) { perror("Not implemented\n"); exit(1); diff --git a/src/memory_manager.rs b/src/memory_manager.rs index ba34dd0398..5c736d27c5 100644 --- a/src/memory_manager.rs +++ b/src/memory_manager.rs @@ -146,7 +146,7 @@ pub fn alloc( mutator: &mut Mutator, size: usize, align: usize, - offset: isize, + offset: usize, semantics: AllocationSemantics, ) -> Address { // MMTk has assumptions about minimal object size. diff --git a/src/plan/mutator_context.rs b/src/plan/mutator_context.rs index 71713bf455..b221f3003f 100644 --- a/src/plan/mutator_context.rs +++ b/src/plan/mutator_context.rs @@ -86,7 +86,7 @@ impl MutatorContext for Mutator { &mut self, size: usize, align: usize, - offset: isize, + offset: usize, allocator: AllocationSemantics, ) -> Address { unsafe { @@ -161,7 +161,7 @@ pub trait MutatorContext: Send + 'static { &mut self, size: usize, align: usize, - offset: isize, + offset: usize, allocator: AllocationSemantics, ) -> Address; fn post_alloc(&mut self, refer: ObjectReference, bytes: usize, allocator: AllocationSemantics); diff --git a/src/policy/copy_context.rs b/src/policy/copy_context.rs index e9b15176bf..0954834874 100644 --- a/src/policy/copy_context.rs +++ b/src/policy/copy_context.rs @@ -20,7 +20,7 @@ pub trait PolicyCopyContext: 'static + Send { original: ObjectReference, bytes: usize, align: usize, - offset: isize, + offset: usize, ) -> Address; fn post_copy(&mut self, _obj: ObjectReference, _bytes: usize) {} } diff --git a/src/policy/copyspace.rs b/src/policy/copyspace.rs index 70909b263d..18823a0fb3 100644 --- a/src/policy/copyspace.rs +++ b/src/policy/copyspace.rs @@ -306,7 +306,7 @@ impl PolicyCopyContext for CopySpaceCopyContext { _original: ObjectReference, bytes: usize, align: usize, - offset: isize, + offset: usize, ) -> Address { self.copy_allocator.alloc(bytes, align, offset) } diff --git a/src/policy/immix/immixspace.rs b/src/policy/immix/immixspace.rs index d50850ce5d..ed09e3e75f 100644 --- a/src/policy/immix/immixspace.rs +++ b/src/policy/immix/immixspace.rs @@ -900,7 +900,7 @@ impl PolicyCopyContext for ImmixCopyContext { _original: ObjectReference, bytes: usize, align: usize, - offset: isize, + offset: usize, ) -> Address { self.allocator.alloc(bytes, align, offset) } @@ -949,7 +949,7 @@ impl PolicyCopyContext for ImmixHybridCopyContext { _original: ObjectReference, bytes: usize, align: usize, - offset: isize, + offset: usize, ) -> Address { if self.get_space().in_defrag() { self.defrag_allocator.alloc(bytes, align, offset) diff --git a/src/policy/marksweepspace/malloc_ms/global.rs b/src/policy/marksweepspace/malloc_ms/global.rs index 0742eeda04..6df45a0d9d 100644 --- a/src/policy/marksweepspace/malloc_ms/global.rs +++ b/src/policy/marksweepspace/malloc_ms/global.rs @@ -325,7 +325,7 @@ impl MallocSpace { } } - pub fn alloc(&self, tls: VMThread, size: usize, align: usize, offset: isize) -> Address { + pub fn alloc(&self, tls: VMThread, size: usize, align: usize, offset: usize) -> Address { // TODO: Should refactor this and Space.acquire() if self.get_gc_trigger().poll(false, Some(self)) { assert!(VM::VMActivePlan::is_mutator(tls), "Polling in GC worker"); diff --git a/src/util/alloc/allocator.rs b/src/util/alloc/allocator.rs index 9cc235609e..a1cbc65e4a 100644 --- a/src/util/alloc/allocator.rs +++ b/src/util/alloc/allocator.rs @@ -23,7 +23,7 @@ pub enum AllocationError { pub fn align_allocation_no_fill( region: Address, alignment: usize, - offset: isize, + offset: usize, ) -> Address { align_allocation_inner::(region, alignment, offset, VM::MIN_ALIGNMENT, false) } @@ -31,7 +31,7 @@ pub fn align_allocation_no_fill( pub fn align_allocation( region: Address, alignment: usize, - offset: isize, + offset: usize, ) -> Address { align_allocation_inner::(region, alignment, offset, VM::MIN_ALIGNMENT, true) } @@ -39,7 +39,7 @@ pub fn align_allocation( pub fn align_allocation_inner( region: Address, alignment: usize, - offset: isize, + offset: usize, known_alignment: usize, fillalignmentgap: bool, ) -> Address { @@ -51,10 +51,9 @@ pub fn align_allocation_inner( } debug_assert!(!(fillalignmentgap && region.is_zero())); debug_assert!(alignment <= VM::MAX_ALIGNMENT); - debug_assert!(offset >= 0); debug_assert!(region.is_aligned_to(VM::ALLOC_END_ALIGNMENT)); debug_assert!((alignment & (VM::MIN_ALIGNMENT - 1)) == 0); - debug_assert!((offset & (VM::MIN_ALIGNMENT - 1) as isize) == 0); + debug_assert!((offset & (VM::MIN_ALIGNMENT - 1)) == 0); // No alignment ever required. if alignment <= known_alignment || VM::MAX_ALIGNMENT <= VM::MIN_ALIGNMENT { @@ -64,7 +63,7 @@ pub fn align_allocation_inner( // May require an alignment let region_isize = region.as_usize() as isize; let mask = (alignment - 1) as isize; // fromIntSignExtend - let neg_off = -offset; // fromIntSignExtend + let neg_off: isize = -(offset as isize); // fromIntSignExtend // TODO: Consider using neg_off.wrapping_sub_unsigned(region.as_usize()), and we can remove region_isize. // This requires Rust 1.66.0+. @@ -164,7 +163,7 @@ pub trait Allocator: Downcast { /// * `size`: the allocation size in bytes. /// * `align`: the required alignment in bytes. /// * `offset` the required offset in bytes. - fn alloc(&mut self, size: usize, align: usize, offset: isize) -> Address; + fn alloc(&mut self, size: usize, align: usize, offset: usize) -> Address; /// Slowpath allocation attempt. This function is explicitly not inlined for performance /// considerations. @@ -174,7 +173,7 @@ pub trait Allocator: Downcast { /// * `align`: the required alignment in bytes. /// * `offset` the required offset in bytes. #[inline(never)] - fn alloc_slow(&mut self, size: usize, align: usize, offset: isize) -> Address { + fn alloc_slow(&mut self, size: usize, align: usize, offset: usize) -> Address { self.alloc_slow_inline(size, align, offset) } @@ -194,7 +193,7 @@ pub trait Allocator: Downcast { /// * `size`: the allocation size in bytes. /// * `align`: the required alignment in bytes. /// * `offset` the required offset in bytes. - fn alloc_slow_inline(&mut self, size: usize, align: usize, offset: isize) -> Address { + fn alloc_slow_inline(&mut self, size: usize, align: usize, offset: usize) -> Address { let tls = self.get_tls(); let plan = self.get_plan().base(); let is_mutator = VM::VMActivePlan::is_mutator(tls); @@ -317,7 +316,7 @@ pub trait Allocator: Downcast { /// * `size`: the allocation size in bytes. /// * `align`: the required alignment in bytes. /// * `offset` the required offset in bytes. - fn alloc_slow_once(&mut self, size: usize, align: usize, offset: isize) -> Address; + fn alloc_slow_once(&mut self, size: usize, align: usize, offset: usize) -> Address; /// Single slowpath allocation attempt for stress test. When the stress factor is set (e.g. to /// N), we would expect for every N bytes allocated, we will trigger a stress GC. However, for @@ -351,7 +350,7 @@ pub trait Allocator: Downcast { &mut self, size: usize, align: usize, - offset: isize, + offset: usize, need_poll: bool, ) -> Address { // If an allocator does thread local allocation but does not override this method to diff --git a/src/util/alloc/bumpallocator.rs b/src/util/alloc/bumpallocator.rs index b31de06a5e..65a9c22578 100644 --- a/src/util/alloc/bumpallocator.rs +++ b/src/util/alloc/bumpallocator.rs @@ -61,7 +61,7 @@ impl Allocator for BumpAllocator { BLOCK_SIZE } - fn alloc(&mut self, size: usize, align: usize, offset: isize) -> Address { + fn alloc(&mut self, size: usize, align: usize, offset: usize) -> Address { trace!("alloc"); let result = align_allocation_no_fill::(self.cursor, align, offset); let new_cursor = result + size; @@ -83,7 +83,7 @@ impl Allocator for BumpAllocator { } } - fn alloc_slow_once(&mut self, size: usize, align: usize, offset: isize) -> Address { + fn alloc_slow_once(&mut self, size: usize, align: usize, offset: usize) -> Address { trace!("alloc_slow"); self.acquire_block(size, align, offset, false) } @@ -100,7 +100,7 @@ impl Allocator for BumpAllocator { &mut self, size: usize, align: usize, - offset: isize, + offset: usize, need_poll: bool, ) -> Address { if need_poll { @@ -155,7 +155,7 @@ impl BumpAllocator { &mut self, size: usize, align: usize, - offset: isize, + offset: usize, stress_test: bool, ) -> Address { let block_size = (size + BLOCK_MASK) & (!BLOCK_MASK); diff --git a/src/util/alloc/free_list_allocator.rs b/src/util/alloc/free_list_allocator.rs index e7e07f7edf..cc76cb969f 100644 --- a/src/util/alloc/free_list_allocator.rs +++ b/src/util/alloc/free_list_allocator.rs @@ -43,7 +43,7 @@ impl Allocator for FreeListAllocator { } // Find a block with free space and allocate to it - fn alloc(&mut self, size: usize, align: usize, offset: isize) -> Address { + fn alloc(&mut self, size: usize, align: usize, offset: usize) -> Address { debug_assert!( size <= MAX_BIN_SIZE, "Alloc request for {} bytes is too big.", @@ -79,7 +79,7 @@ impl Allocator for FreeListAllocator { self.alloc_slow(size, align, offset) } - fn alloc_slow_once(&mut self, size: usize, align: usize, offset: isize) -> Address { + fn alloc_slow_once(&mut self, size: usize, align: usize, offset: usize) -> Address { // Try get a block from the space if let Some(block) = self.acquire_global_block(size, align, false) { let addr = self.block_alloc(block); @@ -101,7 +101,7 @@ impl Allocator for FreeListAllocator { &mut self, size: usize, align: usize, - offset: isize, + offset: usize, need_poll: bool, ) -> Address { trace!("allow slow precise stress s={}", size); diff --git a/src/util/alloc/immix_allocator.rs b/src/util/alloc/immix_allocator.rs index 4563bd802c..0679ceb7e4 100644 --- a/src/util/alloc/immix_allocator.rs +++ b/src/util/alloc/immix_allocator.rs @@ -65,7 +65,7 @@ impl Allocator for ImmixAllocator { crate::policy::immix::block::Block::BYTES } - fn alloc(&mut self, size: usize, align: usize, offset: isize) -> Address { + fn alloc(&mut self, size: usize, align: usize, offset: usize) -> Address { debug_assert!( size <= crate::policy::immix::MAX_IMMIX_OBJECT_SIZE, "Trying to allocate a {} bytes object, which is larger than MAX_IMMIX_OBJECT_SIZE {}", @@ -104,7 +104,7 @@ impl Allocator for ImmixAllocator { } /// Acquire a clean block from ImmixSpace for allocation. - fn alloc_slow_once(&mut self, size: usize, align: usize, offset: isize) -> Address { + fn alloc_slow_once(&mut self, size: usize, align: usize, offset: usize) -> Address { trace!("{:?}: alloc_slow_once", self.tls); self.acquire_clean_block(size, align, offset) } @@ -117,7 +117,7 @@ impl Allocator for ImmixAllocator { &mut self, size: usize, align: usize, - offset: isize, + offset: usize, need_poll: bool, ) -> Address { trace!("{:?}: alloc_slow_once_precise_stress", self.tls); @@ -195,7 +195,7 @@ impl ImmixAllocator { } /// Large-object (larger than a line) bump allocation. - fn overflow_alloc(&mut self, size: usize, align: usize, offset: isize) -> Address { + fn overflow_alloc(&mut self, size: usize, align: usize, offset: usize) -> Address { trace!("{:?}: overflow_alloc", self.tls); let start = align_allocation_no_fill::(self.large_cursor, align, offset); let end = start + size; @@ -212,7 +212,7 @@ impl ImmixAllocator { } /// Bump allocate small objects into recyclable lines (i.e. holes). - fn alloc_slow_hot(&mut self, size: usize, align: usize, offset: isize) -> Address { + fn alloc_slow_hot(&mut self, size: usize, align: usize, offset: usize) -> Address { trace!("{:?}: alloc_slow_hot", self.tls); if self.acquire_recyclable_lines(size, align, offset) { // If stress test is active, then we need to go to the slow path instead of directly @@ -238,7 +238,7 @@ impl ImmixAllocator { } /// Search for recyclable lines. - fn acquire_recyclable_lines(&mut self, size: usize, align: usize, offset: isize) -> bool { + fn acquire_recyclable_lines(&mut self, size: usize, align: usize, offset: usize) -> bool { while self.line.is_some() || self.acquire_recyclable_block() { let line = self.line.unwrap(); if let Some((start_line, end_line)) = self.immix_space().get_next_available_lines(line) @@ -289,7 +289,7 @@ impl ImmixAllocator { } // Get a clean block from ImmixSpace. - fn acquire_clean_block(&mut self, size: usize, align: usize, offset: isize) -> Address { + fn acquire_clean_block(&mut self, size: usize, align: usize, offset: usize) -> Address { match self.immix_space().get_clean_block(self.tls, self.copy) { None => Address::ZERO, Some(block) => { @@ -314,7 +314,7 @@ impl ImmixAllocator { /// Return whether the TLAB has been exhausted and we need to acquire a new block. Assumes that /// the buffer limits have been restored using [`ImmixAllocator::restore_limit_for_stress`]. /// Note that this function may implicitly change the limits of the allocator. - fn require_new_block(&mut self, size: usize, align: usize, offset: isize) -> bool { + fn require_new_block(&mut self, size: usize, align: usize, offset: usize) -> bool { let result = align_allocation_no_fill::(self.cursor, align, offset); let new_cursor = result + size; let insufficient_space = new_cursor > self.limit; diff --git a/src/util/alloc/large_object_allocator.rs b/src/util/alloc/large_object_allocator.rs index 5c6abf2b83..43191cdf22 100644 --- a/src/util/alloc/large_object_allocator.rs +++ b/src/util/alloc/large_object_allocator.rs @@ -34,7 +34,7 @@ impl Allocator for LargeObjectAllocator { false } - fn alloc(&mut self, size: usize, align: usize, offset: isize) -> Address { + fn alloc(&mut self, size: usize, align: usize, offset: usize) -> Address { let cell: Address = self.alloc_slow(size, align, offset); // We may get a null ptr from alloc due to the VM being OOM if !cell.is_zero() { @@ -44,7 +44,7 @@ impl Allocator for LargeObjectAllocator { } } - fn alloc_slow_once(&mut self, size: usize, align: usize, _offset: isize) -> Address { + fn alloc_slow_once(&mut self, size: usize, align: usize, _offset: usize) -> Address { let header = 0; // HashSet is used instead of DoublyLinkedList let maxbytes = allocator::get_maximum_aligned_size::(size + header, align); let pages = crate::util::conversions::bytes_to_pages_up(maxbytes); diff --git a/src/util/alloc/malloc_allocator.rs b/src/util/alloc/malloc_allocator.rs index b7074cd20c..9585a10c17 100644 --- a/src/util/alloc/malloc_allocator.rs +++ b/src/util/alloc/malloc_allocator.rs @@ -25,7 +25,7 @@ impl Allocator for MallocAllocator { self.plan } - fn alloc(&mut self, size: usize, align: usize, offset: isize) -> Address { + fn alloc(&mut self, size: usize, align: usize, offset: usize) -> Address { self.alloc_slow(size, align, offset) } @@ -37,9 +37,7 @@ impl Allocator for MallocAllocator { false } - fn alloc_slow_once(&mut self, size: usize, align: usize, offset: isize) -> Address { - assert!(offset >= 0); - + fn alloc_slow_once(&mut self, size: usize, align: usize, offset: usize) -> Address { self.space.alloc(self.tls, size, align, offset) } } diff --git a/src/util/alloc/markcompact_allocator.rs b/src/util/alloc/markcompact_allocator.rs index a8fabb7b98..72332318d4 100644 --- a/src/util/alloc/markcompact_allocator.rs +++ b/src/util/alloc/markcompact_allocator.rs @@ -48,7 +48,7 @@ impl Allocator for MarkCompactAllocator { self.bump_allocator.get_thread_local_buffer_granularity() } - fn alloc(&mut self, size: usize, align: usize, offset: isize) -> Address { + fn alloc(&mut self, size: usize, align: usize, offset: usize) -> Address { let rtn = self .bump_allocator .alloc(size + Self::HEADER_RESERVED_IN_BYTES, align, offset); @@ -61,7 +61,7 @@ impl Allocator for MarkCompactAllocator { } } - fn alloc_slow_once(&mut self, size: usize, align: usize, offset: isize) -> Address { + fn alloc_slow_once(&mut self, size: usize, align: usize, offset: usize) -> Address { trace!("alloc_slow"); self.bump_allocator.alloc_slow_once(size, align, offset) } @@ -78,7 +78,7 @@ impl Allocator for MarkCompactAllocator { &mut self, size: usize, align: usize, - offset: isize, + offset: usize, need_poll: bool, ) -> Address { self.bump_allocator diff --git a/src/util/analysis/mod.rs b/src/util/analysis/mod.rs index 4814afe880..6df78658d1 100644 --- a/src/util/analysis/mod.rs +++ b/src/util/analysis/mod.rs @@ -23,7 +23,7 @@ use self::obj_size::PerSizeClassObjectCounter; /// invoke it in its respective place. /// pub trait RtAnalysis { - fn alloc_hook(&mut self, _size: usize, _align: usize, _offset: isize) {} + fn alloc_hook(&mut self, _size: usize, _align: usize, _offset: usize) {} fn gc_hook(&mut self, _mmtk: &'static MMTK) {} fn set_running(&mut self, running: bool); } @@ -72,7 +72,7 @@ impl AnalysisManager { routines.push(routine.clone()); } - pub fn alloc_hook(&self, size: usize, align: usize, offset: isize) { + pub fn alloc_hook(&self, size: usize, align: usize, offset: usize) { let routines = self.routines.lock().unwrap(); for r in &*routines { r.lock().unwrap().alloc_hook(size, align, offset); diff --git a/src/util/analysis/obj_num.rs b/src/util/analysis/obj_num.rs index ff0f967ee3..a4bdd1a239 100644 --- a/src/util/analysis/obj_num.rs +++ b/src/util/analysis/obj_num.rs @@ -18,7 +18,7 @@ impl ObjectCounter { } impl RtAnalysis for ObjectCounter { - fn alloc_hook(&mut self, _size: usize, _align: usize, _offset: isize) { + fn alloc_hook(&mut self, _size: usize, _align: usize, _offset: usize) { if self.running { // The analysis routine simply updates the counter when the allocation hook is called self.counter.lock().unwrap().inc(); diff --git a/src/util/analysis/obj_size.rs b/src/util/analysis/obj_size.rs index 6ad2b1715c..b02bbb9479 100644 --- a/src/util/analysis/obj_size.rs +++ b/src/util/analysis/obj_size.rs @@ -46,7 +46,7 @@ impl PerSizeClassObjectCounter { } impl RtAnalysis for PerSizeClassObjectCounter { - fn alloc_hook(&mut self, size: usize, _align: usize, _offset: isize) { + fn alloc_hook(&mut self, size: usize, _align: usize, _offset: usize) { if !self.running { return; } diff --git a/src/util/copy/mod.rs b/src/util/copy/mod.rs index a342193d9c..cf1a3ad45d 100644 --- a/src/util/copy/mod.rs +++ b/src/util/copy/mod.rs @@ -75,7 +75,7 @@ impl GCWorkerCopyContext { original: ObjectReference, bytes: usize, align: usize, - offset: isize, + offset: usize, semantics: CopySemantics, ) -> Address { #[cfg(debug_assertions)] diff --git a/src/util/malloc/malloc_ms_util.rs b/src/util/malloc/malloc_ms_util.rs index ea67735132..78fd8c7ca4 100644 --- a/src/util/malloc/malloc_ms_util.rs +++ b/src/util/malloc/malloc_ms_util.rs @@ -18,7 +18,7 @@ pub fn align_alloc(size: usize, align: usize) -> Address { // Beside returning the allocation result, // this will store the malloc result at (result - BYTES_IN_ADDRESS) -pub fn align_offset_alloc(size: usize, align: usize, offset: isize) -> Address { +pub fn align_offset_alloc(size: usize, align: usize, offset: usize) -> Address { // we allocate extra `align` bytes here, so we are able to handle offset let actual_size = size + align + BYTES_IN_ADDRESS; let raw = unsafe { calloc(1, actual_size) }; @@ -26,7 +26,7 @@ pub fn align_offset_alloc(size: usize, align: usize, offset: isiz if address.is_zero() { return address; } - let mod_offset = offset % (align as isize); + let mod_offset = offset % align; let mut result = crate::util::alloc::allocator::align_allocation_no_fill::(address, align, mod_offset); if result - BYTES_IN_ADDRESS < address { @@ -64,7 +64,7 @@ pub fn get_malloc_usable_size(address: Address, is_offset_malloc: bool) -> usize /// allocate `size` bytes, which is aligned to `align` at `offset` /// return the address, and whether it is an offset allocation -pub fn alloc(size: usize, align: usize, offset: isize) -> (Address, bool) { +pub fn alloc(size: usize, align: usize, offset: usize) -> (Address, bool) { let address: Address; let mut is_offset_malloc = false; // malloc returns 16 bytes aligned address. diff --git a/src/vm/object_model.rs b/src/vm/object_model.rs index f640e0f453..bb7c8c0f31 100644 --- a/src/vm/object_model.rs +++ b/src/vm/object_model.rs @@ -351,7 +351,7 @@ pub trait ObjectModel { /// /// Arguments: /// * `object`: The object to be queried. - fn get_align_offset_when_copied(object: ObjectReference) -> isize; + fn get_align_offset_when_copied(object: ObjectReference) -> usize; /// Get the type descriptor for an object. /// diff --git a/vmbindings/dummyvm/api/mmtk.h b/vmbindings/dummyvm/api/mmtk.h index 4283d218e5..3d126341ec 100644 --- a/vmbindings/dummyvm/api/mmtk.h +++ b/vmbindings/dummyvm/api/mmtk.h @@ -42,14 +42,14 @@ extern void mmtk_disable_collection(); extern void* mmtk_alloc(MMTk_Mutator mutator, size_t size, size_t align, - ssize_t offset, + size_t offset, int allocator); // Slowpath allocation for an object extern void* mmtk_alloc_slow(MMTk_Mutator mutator, size_t size, size_t align, - ssize_t offset, + size_t offset, int allocator); // Perform post-allocation hooks or actions such as initializing object metadata diff --git a/vmbindings/dummyvm/src/api.rs b/vmbindings/dummyvm/src/api.rs index d0fd2aa0a7..2109c187d8 100644 --- a/vmbindings/dummyvm/src/api.rs +++ b/vmbindings/dummyvm/src/api.rs @@ -44,7 +44,7 @@ pub extern "C" fn mmtk_destroy_mutator(mutator: *mut Mutator) { #[no_mangle] pub extern "C" fn mmtk_alloc(mutator: *mut Mutator, size: usize, - align: usize, offset: isize, mut semantics: AllocationSemantics) -> Address { + align: usize, offset: usize, mut semantics: AllocationSemantics) -> Address { if size >= SINGLETON.get_plan().constraints().max_non_los_default_alloc_bytes { semantics = AllocationSemantics::Los; } diff --git a/vmbindings/dummyvm/src/object_model.rs b/vmbindings/dummyvm/src/object_model.rs index 57e9484e6f..ca90c7a8d1 100644 --- a/vmbindings/dummyvm/src/object_model.rs +++ b/vmbindings/dummyvm/src/object_model.rs @@ -42,7 +42,7 @@ impl ObjectModel for VMObjectModel { ::std::mem::size_of::() } - fn get_align_offset_when_copied(_object: ObjectReference) -> isize { + fn get_align_offset_when_copied(_object: ObjectReference) -> usize { 0 } diff --git a/vmbindings/dummyvm/src/tests/allocate_align_offset.rs b/vmbindings/dummyvm/src/tests/allocate_align_offset.rs index b305579466..ad8b27fb5d 100644 --- a/vmbindings/dummyvm/src/tests/allocate_align_offset.rs +++ b/vmbindings/dummyvm/src/tests/allocate_align_offset.rs @@ -30,7 +30,7 @@ pub fn allocate_alignment() { #[test] pub fn allocate_offset() { MUTATOR.with_fixture(|fixture| { - const OFFSET: isize = 4; + const OFFSET: usize = 4; let min = DummyVM::MIN_ALIGNMENT; let max = DummyVM::MAX_ALIGNMENT; info!("Allowed alignment between {} and {}", min, max);