diff --git a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp index f4453a7ba0bb4..e081b3da7ee10 100644 --- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp +++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp @@ -86,6 +86,7 @@ #include "gc/shared/oopStorageParState.hpp" #include "gc/shared/preservedMarks.inline.hpp" #include "gc/shared/referenceProcessor.inline.hpp" +#include "gc/shared/slidingForwarding.hpp" #include "gc/shared/suspendibleThreadSet.hpp" #include "gc/shared/taskqueue.inline.hpp" #include "gc/shared/taskTerminator.hpp" @@ -1440,6 +1441,8 @@ jint G1CollectedHeap::initialize() { G1InitLogger::print(); + SlidingForwarding::initialize(heap_rs.region(), HeapRegion::GrainWords); + return JNI_OK; } diff --git a/src/hotspot/share/gc/g1/g1FullCollector.cpp b/src/hotspot/share/gc/g1/g1FullCollector.cpp index 430d6f327886b..1a87120b82998 100644 --- a/src/hotspot/share/gc/g1/g1FullCollector.cpp +++ b/src/hotspot/share/gc/g1/g1FullCollector.cpp @@ -40,6 +40,7 @@ #include "gc/shared/preservedMarks.inline.hpp" #include "gc/shared/classUnloadingContext.hpp" #include "gc/shared/referenceProcessor.hpp" +#include "gc/shared/slidingForwarding.hpp" #include "gc/shared/verifyOption.hpp" #include "gc/shared/weakProcessor.inline.hpp" #include "gc/shared/workerPolicy.hpp" @@ -212,6 +213,8 @@ void G1FullCollector::collect() { // Don't add any more derived pointers during later phases deactivate_derived_pointers(); + SlidingForwarding::begin(); + phase2_prepare_compaction(); if (has_compaction_targets()) { @@ -224,6 +227,8 @@ void G1FullCollector::collect() { log_info(gc, phases) ("No Regions selected for compaction. Skipping Phase 3: Adjust pointers and Phase 4: Compact heap"); } + SlidingForwarding::end(); + phase5_reset_metadata(); G1CollectedHeap::finish_codecache_marking_cycle(); diff --git a/src/hotspot/share/gc/g1/g1FullGCCompactTask.cpp b/src/hotspot/share/gc/g1/g1FullGCCompactTask.cpp index 2ed6ccd4735fb..cfb16e59bc20e 100644 --- a/src/hotspot/share/gc/g1/g1FullGCCompactTask.cpp +++ b/src/hotspot/share/gc/g1/g1FullGCCompactTask.cpp @@ -30,6 +30,7 @@ #include "gc/g1/g1FullGCCompactTask.hpp" #include "gc/g1/heapRegion.inline.hpp" #include "gc/shared/gcTraceTime.inline.hpp" +#include "gc/shared/slidingForwarding.inline.hpp" #include "logging/log.hpp" #include "oops/oop.inline.hpp" #include "utilities/ticks.hpp" @@ -41,7 +42,7 @@ void G1FullGCCompactTask::G1CompactRegionClosure::clear_in_bitmap(oop obj) { size_t G1FullGCCompactTask::G1CompactRegionClosure::apply(oop obj) { size_t size = obj->size(); - if (obj->is_forwarded()) { + if (SlidingForwarding::is_forwarded(obj)) { G1FullGCCompactTask::copy_object_to_new_location(obj); } @@ -52,13 +53,13 @@ size_t G1FullGCCompactTask::G1CompactRegionClosure::apply(oop obj) { } void G1FullGCCompactTask::copy_object_to_new_location(oop obj) { - assert(obj->is_forwarded(), "Sanity!"); - assert(obj->forwardee() != obj, "Object must have a new location"); + assert(SlidingForwarding::is_forwarded(obj), "Sanity!"); + assert(SlidingForwarding::forwardee(obj) != obj, "Object must have a new location"); size_t size = obj->size(); // Copy object and reinit its mark. HeapWord* obj_addr = cast_from_oop(obj); - HeapWord* destination = cast_from_oop(obj->forwardee()); + HeapWord* destination = cast_from_oop(SlidingForwarding::forwardee(obj)); Copy::aligned_conjoint_words(obj_addr, destination, size); // There is no need to transform stack chunks - marking already did that. @@ -121,7 +122,7 @@ void G1FullGCCompactTask::compact_humongous_obj(HeapRegion* src_hr) { size_t word_size = obj->size(); uint num_regions = (uint)G1CollectedHeap::humongous_obj_size_in_regions(word_size); - HeapWord* destination = cast_from_oop(obj->forwardee()); + HeapWord* destination = cast_from_oop(SlidingForwarding::forwardee(obj)); assert(collector()->mark_bitmap()->is_marked(obj), "Should only compact marked objects"); collector()->mark_bitmap()->clear(obj); diff --git a/src/hotspot/share/gc/g1/g1FullGCCompactionPoint.cpp b/src/hotspot/share/gc/g1/g1FullGCCompactionPoint.cpp index a1db2aa87ba2e..573d10368453f 100644 --- a/src/hotspot/share/gc/g1/g1FullGCCompactionPoint.cpp +++ b/src/hotspot/share/gc/g1/g1FullGCCompactionPoint.cpp @@ -27,6 +27,7 @@ #include "gc/g1/g1FullGCCompactionPoint.hpp" #include "gc/g1/heapRegion.hpp" #include "gc/shared/preservedMarks.inline.hpp" +#include "gc/shared/slidingForwarding.inline.hpp" #include "oops/oop.inline.hpp" #include "utilities/debug.hpp" @@ -106,10 +107,10 @@ void G1FullGCCompactionPoint::forward(oop object, size_t size) { if (!object->is_forwarded()) { preserved_stack()->push_if_necessary(object, object->mark()); } - object->forward_to(cast_to_oop(_compaction_top)); - assert(object->is_forwarded(), "must be forwarded"); + SlidingForwarding::forward_to(object, cast_to_oop(_compaction_top)); + assert(SlidingForwarding::is_forwarded(object), "must be forwarded"); } else { - assert(!object->is_forwarded(), "must not be forwarded"); + assert(SlidingForwarding::is_not_forwarded(object), "must not be forwarded"); } // Update compaction values. @@ -172,8 +173,8 @@ void G1FullGCCompactionPoint::forward_humongous(HeapRegion* hr) { preserved_stack()->push_if_necessary(obj, obj->mark()); HeapRegion* dest_hr = _compaction_regions->at(range_begin); - obj->forward_to(cast_to_oop(dest_hr->bottom())); - assert(obj->is_forwarded(), "Object must be forwarded!"); + SlidingForwarding::forward_to(obj, cast_to_oop(dest_hr->bottom())); + assert(SlidingForwarding::is_forwarded(obj), "Object must be forwarded!"); // Add the humongous object regions to the compaction point. add_humongous(hr); diff --git a/src/hotspot/share/gc/g1/g1FullGCOopClosures.inline.hpp b/src/hotspot/share/gc/g1/g1FullGCOopClosures.inline.hpp index aa194d16203b7..4df6d866811c8 100644 --- a/src/hotspot/share/gc/g1/g1FullGCOopClosures.inline.hpp +++ b/src/hotspot/share/gc/g1/g1FullGCOopClosures.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -32,6 +32,7 @@ #include "gc/g1/g1ConcurrentMarkBitMap.inline.hpp" #include "gc/g1/g1FullGCMarker.inline.hpp" #include "gc/g1/heapRegionRemSet.inline.hpp" +#include "gc/shared/slidingForwarding.inline.hpp" #include "memory/iterator.inline.hpp" #include "memory/universe.hpp" #include "oops/access.inline.hpp" @@ -65,8 +66,8 @@ template inline void G1AdjustClosure::adjust_pointer(T* p) { return; } - if (obj->is_forwarded()) { - oop forwardee = obj->forwardee(); + if (SlidingForwarding::is_forwarded(obj)) { + oop forwardee = SlidingForwarding::forwardee(obj); // Forwarded, just update. assert(G1CollectedHeap::heap()->is_in_reserved(forwardee), "should be in object space"); RawAccess::oop_store(p, forwardee); diff --git a/src/hotspot/share/gc/g1/g1FullGCPrepareTask.inline.hpp b/src/hotspot/share/gc/g1/g1FullGCPrepareTask.inline.hpp index 2a2add76f48c6..7a26b2c9ea0aa 100644 --- a/src/hotspot/share/gc/g1/g1FullGCPrepareTask.inline.hpp +++ b/src/hotspot/share/gc/g1/g1FullGCPrepareTask.inline.hpp @@ -32,6 +32,7 @@ #include "gc/g1/g1FullGCCompactionPoint.hpp" #include "gc/g1/g1FullGCScope.hpp" #include "gc/g1/heapRegion.inline.hpp" +#include "gc/shared/slidingForwarding.inline.hpp" void G1DetermineCompactionQueueClosure::free_empty_humongous_region(HeapRegion* hr) { _g1h->free_humongous_region(hr, nullptr); @@ -114,10 +115,10 @@ inline bool G1DetermineCompactionQueueClosure::do_heap_region(HeapRegion* hr) { } inline size_t G1SerialRePrepareClosure::apply(oop obj) { - if (obj->is_forwarded()) { + if (SlidingForwarding::is_forwarded(obj)) { // We skip objects compiled into the first region or // into regions not part of the serial compaction point. - if (cast_from_oop(obj->forwardee()) < _dense_prefix_top) { + if (cast_from_oop(SlidingForwarding::forwardee(obj)) < _dense_prefix_top) { return obj->size(); } } diff --git a/src/hotspot/share/gc/serial/genMarkSweep.cpp b/src/hotspot/share/gc/serial/genMarkSweep.cpp index d0594a3a50037..d9bb3c2b901ae 100644 --- a/src/hotspot/share/gc/serial/genMarkSweep.cpp +++ b/src/hotspot/share/gc/serial/genMarkSweep.cpp @@ -48,6 +48,7 @@ #include "gc/shared/preservedMarks.inline.hpp" #include "gc/shared/referencePolicy.hpp" #include "gc/shared/referenceProcessorPhaseTimes.hpp" +#include "gc/shared/slidingForwarding.inline.hpp" #include "gc/shared/space.inline.hpp" #include "gc/shared/strongRootsScope.hpp" #include "gc/shared/weakProcessor.hpp" @@ -198,7 +199,7 @@ class Compacter { static void forward_obj(oop obj, HeapWord* new_addr) { prefetch_write_scan(obj); if (cast_from_oop(obj) != new_addr) { - obj->forward_to(cast_to_oop(new_addr)); + SlidingForwarding::forward_to(obj, cast_to_oop(new_addr)); } else { assert(obj->is_gc_marked(), "inv"); // This obj will stay in-place. Fix the markword. @@ -223,7 +224,7 @@ class Compacter { prefetch_read_scan(addr); oop obj = cast_to_oop(addr); - oop new_obj = obj->forwardee(); + oop new_obj = SlidingForwarding::forwardee(obj); HeapWord* new_addr = cast_from_oop(new_obj); assert(addr != new_addr, "inv"); prefetch_write_copy(new_addr); @@ -319,13 +320,13 @@ class Compacter { HeapWord* top = space->top(); // Check if the first obj inside this space is forwarded. - if (!cast_to_oop(cur_addr)->is_forwarded()) { + if (SlidingForwarding::is_not_forwarded(cast_to_oop(cur_addr))) { // Jump over consecutive (in-place) live-objs-chunk cur_addr = get_first_dead(i); } while (cur_addr < top) { - if (!cast_to_oop(cur_addr)->is_forwarded()) { + if (SlidingForwarding::is_not_forwarded(cast_to_oop(cur_addr))) { cur_addr = *(HeapWord**) cur_addr; continue; } @@ -448,6 +449,8 @@ void GenMarkSweep::invoke_at_safepoint(bool clear_all_softrefs) { phase1_mark(clear_all_softrefs); + SlidingForwarding::begin(); + Compacter compacter{gch}; { @@ -495,6 +498,8 @@ void GenMarkSweep::invoke_at_safepoint(bool clear_all_softrefs) { // (Should this be in general part?) gch->save_marks(); + SlidingForwarding::end(); + deallocate_stacks(); MarkSweep::_string_dedup_requests->flush(); diff --git a/src/hotspot/share/gc/serial/markSweep.inline.hpp b/src/hotspot/share/gc/serial/markSweep.inline.hpp index 97283e987466f..41833fb5a5f40 100644 --- a/src/hotspot/share/gc/serial/markSweep.inline.hpp +++ b/src/hotspot/share/gc/serial/markSweep.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -31,6 +31,7 @@ #include "classfile/javaClasses.inline.hpp" #include "gc/shared/continuationGCSupport.inline.hpp" #include "gc/serial/serialStringDedup.hpp" +#include "gc/shared/slidingForwarding.inline.hpp" #include "memory/universe.hpp" #include "oops/markWord.hpp" #include "oops/access.inline.hpp" @@ -45,8 +46,8 @@ template inline void MarkSweep::adjust_pointer(T* p) { oop obj = CompressedOops::decode_not_null(heap_oop); assert(Universe::heap()->is_in(obj), "should be in heap"); - if (obj->is_forwarded()) { - oop new_obj = obj->forwardee(); + if (SlidingForwarding::is_forwarded(obj)) { + oop new_obj = SlidingForwarding::forwardee(obj); assert(is_object_aligned(new_obj), "oop must be aligned"); RawAccess::oop_store(p, new_obj); } diff --git a/src/hotspot/share/gc/serial/serialHeap.cpp b/src/hotspot/share/gc/serial/serialHeap.cpp index fe0ba9f3e8809..480f50fdd7470 100644 --- a/src/hotspot/share/gc/serial/serialHeap.cpp +++ b/src/hotspot/share/gc/serial/serialHeap.cpp @@ -55,6 +55,7 @@ #include "gc/shared/oopStorageParState.inline.hpp" #include "gc/shared/oopStorageSet.inline.hpp" #include "gc/shared/scavengableNMethods.hpp" +#include "gc/shared/slidingForwarding.hpp" #include "gc/shared/space.hpp" #include "gc/shared/strongRootsScope.hpp" #include "gc/shared/suspendibleThreadSet.hpp" @@ -203,6 +204,8 @@ jint SerialHeap::initialize() { GCInitLogger::print(); + SlidingForwarding::initialize(_reserved, SpaceAlignment / HeapWordSize); + return JNI_OK; } diff --git a/src/hotspot/share/gc/shared/preservedMarks.cpp b/src/hotspot/share/gc/shared/preservedMarks.cpp index 9889dbc369018..d3c97e4fd82b2 100644 --- a/src/hotspot/share/gc/shared/preservedMarks.cpp +++ b/src/hotspot/share/gc/shared/preservedMarks.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,6 +24,7 @@ #include "precompiled.hpp" #include "gc/shared/preservedMarks.inline.hpp" +#include "gc/shared/slidingForwarding.inline.hpp" #include "gc/shared/workerThread.hpp" #include "gc/shared/workerUtils.hpp" #include "memory/allocation.inline.hpp" @@ -43,7 +44,7 @@ void PreservedMarks::restore() { void PreservedMarks::adjust_preserved_mark(PreservedMark* elem) { oop obj = elem->get_oop(); if (obj->is_forwarded()) { - elem->set_oop(obj->forwardee()); + elem->set_oop(SlidingForwarding::forwardee(obj)); } } diff --git a/src/hotspot/share/gc/shared/slidingForwarding.cpp b/src/hotspot/share/gc/shared/slidingForwarding.cpp new file mode 100644 index 0000000000000..0ca378f433668 --- /dev/null +++ b/src/hotspot/share/gc/shared/slidingForwarding.cpp @@ -0,0 +1,117 @@ +/* + * Copyright (c) 2021, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "gc/shared/gc_globals.hpp" +#include "gc/shared/slidingForwarding.hpp" +#include "utilities/ostream.hpp" +#include "utilities/powerOfTwo.hpp" + +// We cannot use 0, because that may already be a valid base address in zero-based heaps. +// 0x1 is safe because heap base addresses must be aligned by much larger alignment +HeapWord* const SlidingForwarding::UNUSED_BASE = reinterpret_cast(0x1); + +HeapWord* SlidingForwarding::_heap_start = nullptr; +size_t SlidingForwarding::_region_size_words = 0; +size_t SlidingForwarding::_heap_start_region_bias = 0; +size_t SlidingForwarding::_num_regions = 0; +uint SlidingForwarding::_region_size_bytes_shift = 0; +uintptr_t SlidingForwarding::_region_mask = 0; +HeapWord** SlidingForwarding::_biased_bases[SlidingForwarding::NUM_TARGET_REGIONS] = { nullptr, nullptr }; +HeapWord** SlidingForwarding::_bases_table = nullptr; +SlidingForwarding::FallbackTable* SlidingForwarding::_fallback_table = nullptr; + +void SlidingForwarding::initialize(MemRegion heap, size_t region_size_words) { +#ifdef _LP64 + _heap_start = heap.start(); + + // If the heap is small enough to fit directly into the available offset bits, + // and we are running Serial GC, we can treat the whole heap as a single region + // if it happens to be aligned to allow biasing. + size_t rounded_heap_size = round_up_power_of_2(heap.byte_size()); + + if (UseSerialGC && (heap.word_size() <= (1 << NUM_OFFSET_BITS)) && + is_aligned((uintptr_t)_heap_start, rounded_heap_size)) { + _num_regions = 1; + _region_size_words = heap.word_size(); + _region_size_bytes_shift = log2i_exact(rounded_heap_size); + } else { + _num_regions = align_up(pointer_delta(heap.end(), heap.start()), region_size_words) / region_size_words; + _region_size_words = region_size_words; + _region_size_bytes_shift = log2i_exact(_region_size_words) + LogHeapWordSize; + } + _heap_start_region_bias = (uintptr_t)_heap_start >> _region_size_bytes_shift; + _region_mask = ~((uintptr_t(1) << _region_size_bytes_shift) - 1); + + guarantee((_heap_start_region_bias << _region_size_bytes_shift) == (uintptr_t)_heap_start, "must be aligned: _heap_start_region_bias: " SIZE_FORMAT ", _region_size_byte_shift: %u, _heap_start: " PTR_FORMAT, _heap_start_region_bias, _region_size_bytes_shift, p2i(_heap_start)); + + assert(_region_size_words >= 1, "regions must be at least a word large"); + assert(_bases_table == nullptr, "should not be initialized yet"); + assert(_fallback_table == nullptr, "should not be initialized yet"); +#endif +} + +void SlidingForwarding::begin() { +#ifdef _LP64 + assert(_bases_table == nullptr, "should not be initialized yet"); + assert(_fallback_table == nullptr, "should not be initialized yet"); + + size_t max = _num_regions * NUM_TARGET_REGIONS; + _bases_table = NEW_C_HEAP_ARRAY(HeapWord*, max, mtGC); + HeapWord** biased_start = _bases_table - _heap_start_region_bias; + _biased_bases[0] = biased_start; + _biased_bases[1] = biased_start + _num_regions; + for (size_t i = 0; i < max; i++) { + _bases_table[i] = UNUSED_BASE; + } +#endif +} + +void SlidingForwarding::end() { +#ifdef _LP64 + assert(_bases_table != nullptr, "should be initialized"); + FREE_C_HEAP_ARRAY(HeapWord*, _bases_table); + _bases_table = nullptr; + delete _fallback_table; + _fallback_table = nullptr; +#endif +} + +void SlidingForwarding::fallback_forward_to(HeapWord* from, HeapWord* to) { + if (_fallback_table == nullptr) { + _fallback_table = new (mtGC) FallbackTable(); + } + _fallback_table->put_when_absent(from, to); +} + +HeapWord* SlidingForwarding::fallback_forwardee(HeapWord* from) { + assert(_fallback_table != nullptr, "fallback table must be present"); + HeapWord** found = _fallback_table->get(from); + if (found != nullptr) { + return *found; + } else { + return nullptr; + } +} diff --git a/src/hotspot/share/gc/shared/slidingForwarding.hpp b/src/hotspot/share/gc/shared/slidingForwarding.hpp new file mode 100644 index 0000000000000..851d863aa8d2d --- /dev/null +++ b/src/hotspot/share/gc/shared/slidingForwarding.hpp @@ -0,0 +1,179 @@ +/* + * Copyright (c) 2021, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_GC_SHARED_SLIDINGFORWARDING_HPP +#define SHARE_GC_SHARED_SLIDINGFORWARDING_HPP + +#include "memory/allocation.hpp" +#include "memory/memRegion.hpp" +#include "oops/markWord.hpp" +#include "oops/oopsHierarchy.hpp" +#include "utilities/fastHash.hpp" +#include "utilities/resourceHash.hpp" + +/** + * SlidingForwarding is a method to store forwarding information in a compressed form into the object header, + * that has been specifically designed for sliding compaction GCs and compact object headers. With compact object + * headers, we store the compressed class pointer in the header, which would be overwritten by full forwarding + * pointer, if we allow the legacy forwarding code to act. This would lose the class information for the object, + * which is required later in GC cycle to iterate the reference fields and get the object size for copying. + * + * SlidingForwarding requires only small side tables and guarantees constant-time access and modification. + * + * The idea is to use a pointer compression scheme very similar to the one that is used for compressed oops. + * We divide the heap into number of logical regions. Each region spans maximum of 2^NUM_OFFSET_BITS words. + * + * The key advantage of sliding compaction for encoding efficiency: it can forward objects from one region to a + * maximum of two regions. This is an intuitive property: when we slide the compact region full of data, it can + * only span two adjacent regions. This property allows us to use the off-side table to record the addresses of + * two target regions. The table holds N*2 entries for N logical regions. For each region, it gives the base + * address of the two target regions, or a special placeholder if not used. A single bit in forwarding would + * indicate to which of the two "to" regions the object is forwarded into. + * + * This encoding efficiency allows to store the forwarding information in the object header _together_ with the + * compressed class pointer. + * + * When recording the sliding forwarding, the mark word would look roughly like this: + * + * 64 32 0 + * [................................OOOOOOOOOOOOOOOOOOOOOOOOOOOOAFTT] + * ^----- normal lock bits, would record "object is forwarded" + * ^------- fallback bit (explained below) + * ^-------- alternate region select + * ^------------------------------------ in-region offset + * ^-------------------------------------------------------------------- protected area, *not touched* by this code, useful for + * compressed class pointer with compact object headers + * + * Adding a forwarding then generally works as follows: + * 1. Compute the "to" offset in the "to" region, this gives "offset". + * 2. Check if the primary "from" offset at base table contains "to" region base, use it. + * If not usable, continue to next step. If usable, set "alternate" = "false" and jump to (4). + * 3. Check if the alternate "from" offset at base table contains "to" region base, use it. + * This gives us "alternate" = "true". This should always complete for sliding forwarding. + * 4. Compute the mark word from "offset" and "alternate", write it out + * + * Similarly, looking up the target address, given an original object address generally works as follows: + * 1. Load the mark from object, and decode "offset" and "alternate" from there + * 2. Compute the "from" base offset from the object + * 3. Look up "to" region base from the base table either at primary or alternate indices, using "alternate" flag + * 4. Compute the "to" address from "to" region base and "offset" + * + * This algorithm is broken by G1 last-ditch serial compaction: there, object from a single region can be + * forwarded to multiple, more than two regions. To deal with that, we initialize a fallback-hashtable for + * storing those extra forwardings, and set another bit in the header to indicate that the forwardee is not + * encoded but should be looked-up in the hashtable. G1 serial compaction is not very common - it is the + * last-last-ditch GC that is used when the JVM is scrambling to squeeze more space out of the heap, and at + * that point, ultimate performance is no longer the main concern. + */ +class SlidingForwarding : public AllStatic { +private: + + /* + * A simple hash-table that acts as fallback for the sliding forwarding. + * This is used in the case of G1 serial compaction, which violates the + * assumption of sliding forwarding that each object of any region is only + * ever forwarded to one of two target regions. At this point, the GC is + * scrambling to free up more Java heap memory, and therefore performance + * is not the major concern. + * + * The implementation is a straightforward open hashtable. + * It is a single-threaded (not thread-safe) implementation, and that + * is sufficient because G1 serial compaction is single-threaded. + */ + inline static unsigned hash(HeapWord* const& from) { + uint64_t val = reinterpret_cast(from); + uint64_t hash = FastHash::get_hash64(val, UCONST64(0xAAAAAAAAAAAAAAAA)); + return checked_cast(hash >> 32); + } + inline static bool equals(HeapWord* const& lhs, HeapWord* const& rhs) { + return lhs == rhs; + } + typedef ResourceHashtable FallbackTable; + + static const uintptr_t MARK_LOWER_HALF_MASK = right_n_bits(32); + + // We need the lowest two bits to indicate a forwarded object. + // The next bit indicates that the forwardee should be looked-up in a fallback-table. + static const int FALLBACK_SHIFT = markWord::lock_bits; + static const int FALLBACK_BITS = 1; + static const int FALLBACK_MASK = right_n_bits(FALLBACK_BITS) << FALLBACK_SHIFT; + + // Next bit selects the target region + static const int ALT_REGION_SHIFT = FALLBACK_SHIFT + FALLBACK_BITS; + static const int ALT_REGION_BITS = 1; + // This will be "2" always, but expose it as named constant for clarity + static const size_t NUM_TARGET_REGIONS = 1 << ALT_REGION_BITS; + + // The offset bits start then + static const int OFFSET_BITS_SHIFT = ALT_REGION_SHIFT + ALT_REGION_BITS; + + // How many bits we use for the offset + static const int NUM_OFFSET_BITS = 32 - OFFSET_BITS_SHIFT; + + // Indicates an unused base address in the target base table. + static HeapWord* const UNUSED_BASE; + + static HeapWord* _heap_start; + static size_t _region_size_words; + + static size_t _heap_start_region_bias; + static size_t _num_regions; + static uint _region_size_bytes_shift; + static uintptr_t _region_mask; + + // The target base table memory. + static HeapWord** _bases_table; + // Entries into the target base tables, biased to the start of the heap. + static HeapWord** _biased_bases[NUM_TARGET_REGIONS]; + + static FallbackTable* _fallback_table; + + static inline size_t biased_region_index_containing(HeapWord* addr); + + static inline uintptr_t encode_forwarding(HeapWord* from, HeapWord* to); + static inline HeapWord* decode_forwarding(HeapWord* from, uintptr_t encoded); + + static void fallback_forward_to(HeapWord* from, HeapWord* to); + static HeapWord* fallback_forwardee(HeapWord* from); + + static inline void forward_to_impl(oop from, oop to); + static inline oop forwardee_impl(oop from); + +public: + static void initialize(MemRegion heap, size_t region_size_words); + + static void begin(); + static void end(); + + static inline bool is_forwarded(oop obj); + static inline bool is_not_forwarded(oop obj); + + static inline void forward_to(oop from, oop to); + static inline oop forwardee(oop from); +}; + +#endif // SHARE_GC_SHARED_SLIDINGFORWARDING_HPP diff --git a/src/hotspot/share/gc/shared/slidingForwarding.inline.hpp b/src/hotspot/share/gc/shared/slidingForwarding.inline.hpp new file mode 100644 index 0000000000000..8f314b11b0f47 --- /dev/null +++ b/src/hotspot/share/gc/shared/slidingForwarding.inline.hpp @@ -0,0 +1,163 @@ +/* + * Copyright (c) 2021, Red Hat, Inc. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_SHARED_SLIDINGFORWARDING_INLINE_HPP +#define SHARE_GC_SHARED_SLIDINGFORWARDING_INLINE_HPP + +#include "gc/shared/gc_globals.hpp" +#include "gc/shared/slidingForwarding.hpp" +#include "oops/markWord.hpp" +#include "oops/oop.inline.hpp" +#include "utilities/macros.hpp" + +inline bool SlidingForwarding::is_forwarded(oop obj) { + return obj->is_forwarded(); +} + +inline bool SlidingForwarding::is_not_forwarded(oop obj) { + return !obj->is_forwarded(); +} + +size_t SlidingForwarding::biased_region_index_containing(HeapWord* addr) { + return (uintptr_t)addr >> _region_size_bytes_shift; +} + +uintptr_t SlidingForwarding::encode_forwarding(HeapWord* from, HeapWord* to) { + static_assert(NUM_TARGET_REGIONS == 2, "Only implemented for this amount"); + + size_t from_reg_idx = biased_region_index_containing(from); + HeapWord* to_region_base = (HeapWord*)((uintptr_t)to & _region_mask); + + HeapWord** base = &_biased_bases[0][from_reg_idx]; + uintptr_t alternate = 0; + if (*base == to_region_base) { + // Primary is good + } else if (*base == UNUSED_BASE) { + // Primary is free + *base = to_region_base; + } else { + base = &_biased_bases[1][from_reg_idx]; + if (*base == to_region_base) { + // Alternate is good + } else if (*base == UNUSED_BASE) { + // Alternate is free + *base = to_region_base; + } else { + // Both primary and alternate are not fitting + // This happens only in the following rare situations: + // - In Serial GC, sometimes when compact-top switches spaces, because the + // region boudaries are virtual and objects can cross regions + // - In G1 serial compaction, because tails of various compaction chains + // are distributed across the remainders of already compacted regions. + return (1 << FALLBACK_SHIFT) | markWord::marked_value; + } + alternate = 1; + } + + size_t offset = pointer_delta(to, to_region_base); + assert(offset < _region_size_words, "Offset should be within the region. from: " PTR_FORMAT + ", to: " PTR_FORMAT ", to_region_base: " PTR_FORMAT ", offset: " SIZE_FORMAT, + p2i(from), p2i(to), p2i(to_region_base), offset); + + uintptr_t encoded = (offset << OFFSET_BITS_SHIFT) | + (alternate << ALT_REGION_SHIFT) | + markWord::marked_value; + + assert(to == decode_forwarding(from, encoded), "must be reversible"); + assert((encoded & ~MARK_LOWER_HALF_MASK) == 0, "must encode to lowest 32 bits"); + return encoded; +} + +HeapWord* SlidingForwarding::decode_forwarding(HeapWord* from, uintptr_t encoded) { + assert((encoded & markWord::lock_mask_in_place) == markWord::marked_value, "must be marked as forwarded"); + assert((encoded & FALLBACK_MASK) == 0, "must not be fallback-forwarded"); + assert((encoded & ~MARK_LOWER_HALF_MASK) == 0, "must decode from lowest 32 bits"); + size_t alternate = (encoded >> ALT_REGION_SHIFT) & right_n_bits(ALT_REGION_BITS); + assert(alternate < NUM_TARGET_REGIONS, "Sanity"); + uintptr_t offset = (encoded >> OFFSET_BITS_SHIFT); + + size_t from_idx = biased_region_index_containing(from); + HeapWord* base = _biased_bases[alternate][from_idx]; + assert(base != UNUSED_BASE, "must not be unused base: encoded: " INTPTR_FORMAT, encoded); + HeapWord* decoded = base + offset; + assert(decoded >= _heap_start, + "Address must be above heap start. encoded: " INTPTR_FORMAT ", alt_region: " SIZE_FORMAT ", base: " PTR_FORMAT, + encoded, alternate, p2i(base)); + + return decoded; +} + +inline void SlidingForwarding::forward_to_impl(oop from, oop to) { + assert(_bases_table != nullptr, "call begin() before forwarding"); + + markWord from_header = from->mark(); + if (from_header.has_displaced_mark_helper()) { + from_header = from_header.displaced_mark_helper(); + } + + HeapWord* from_hw = cast_from_oop(from); + HeapWord* to_hw = cast_from_oop(to); + uintptr_t encoded = encode_forwarding(from_hw, to_hw); + markWord new_header = markWord((from_header.value() & ~MARK_LOWER_HALF_MASK) | encoded); + from->set_mark(new_header); + + if ((encoded & FALLBACK_MASK) != 0) { + fallback_forward_to(from_hw, to_hw); + } +} + +inline void SlidingForwarding::forward_to(oop obj, oop fwd) { +#ifdef _LP64 + assert(_bases_table != nullptr, "expect sliding forwarding initialized"); + forward_to_impl(obj, fwd); + assert(forwardee(obj) == fwd, "must be forwarded to correct forwardee"); +#else + obj->forward_to(fwd); +#endif +} + +inline oop SlidingForwarding::forwardee_impl(oop from) { + assert(_bases_table != nullptr, "call begin() before asking for forwarding"); + + markWord header = from->mark(); + HeapWord* from_hw = cast_from_oop(from); + if ((header.value() & FALLBACK_MASK) != 0) { + HeapWord* to = fallback_forwardee(from_hw); + return cast_to_oop(to); + } + uintptr_t encoded = header.value() & MARK_LOWER_HALF_MASK; + HeapWord* to = decode_forwarding(from_hw, encoded); + return cast_to_oop(to); +} + +inline oop SlidingForwarding::forwardee(oop obj) { +#ifdef _LP64 + assert(_bases_table != nullptr, "expect sliding forwarding initialized"); + return forwardee_impl(obj); +#else + return obj->forwardee(); +#endif +} + +#endif // SHARE_GC_SHARED_SLIDINGFORWARDING_INLINE_HPP diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp index 3552669abb661..a4ee7f2e7dc68 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp @@ -28,6 +28,7 @@ #include "gc/shared/continuationGCSupport.hpp" #include "gc/shared/gcTraceTime.inline.hpp" #include "gc/shared/preservedMarks.inline.hpp" +#include "gc/shared/slidingForwarding.inline.hpp" #include "gc/shared/tlab_globals.hpp" #include "gc/shared/workerThread.hpp" #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp" @@ -228,6 +229,8 @@ void ShenandoahFullGC::do_it(GCCause::Cause gc_cause) { // until all phases run together. ShenandoahHeapLocker lock(heap->lock()); + SlidingForwarding::begin(); + phase2_calculate_target_addresses(worker_slices); OrderAccess::fence(); @@ -241,6 +244,7 @@ void ShenandoahFullGC::do_it(GCCause::Cause gc_cause) { // Epilogue _preserved_marks->restore(heap->workers()); _preserved_marks->reclaim(); + SlidingForwarding::end(); } // Resize metaspace @@ -370,7 +374,7 @@ class ShenandoahPrepareForCompactionObjectClosure : public ObjectClosure { assert(_compact_point + obj_size <= _to_region->end(), "must fit"); shenandoah_assert_not_forwarded(nullptr, p); _preserved_marks->push_if_necessary(p, p->mark()); - p->forward_to(cast_to_oop(_compact_point)); + SlidingForwarding::forward_to(p, cast_to_oop(_compact_point)); _compact_point += obj_size; } }; @@ -478,7 +482,7 @@ void ShenandoahFullGC::calculate_target_humongous_objects() { if (start >= to_begin && start != r->index()) { // Fits into current window, and the move is non-trivial. Record the move then, and continue scan. _preserved_marks->get(0)->push_if_necessary(old_obj, old_obj->mark()); - old_obj->forward_to(cast_to_oop(heap->get_region(start)->bottom())); + SlidingForwarding::forward_to(old_obj, cast_to_oop(heap->get_region(start)->bottom())); to_end = start; continue; } @@ -738,8 +742,8 @@ class ShenandoahAdjustPointersClosure : public MetadataVisitingOopIterateClosure if (!CompressedOops::is_null(o)) { oop obj = CompressedOops::decode_not_null(o); assert(_ctx->is_marked(obj), "must be marked"); - if (obj->is_forwarded()) { - oop forw = obj->forwardee(); + if (SlidingForwarding::is_forwarded(obj)) { + oop forw = SlidingForwarding::forwardee(obj); RawAccess::oop_store(p, forw); } } @@ -849,9 +853,9 @@ class ShenandoahCompactObjectsClosure : public ObjectClosure { void do_object(oop p) { assert(_heap->complete_marking_context()->is_marked(p), "must be marked"); size_t size = p->size(); - if (p->is_forwarded()) { + if (SlidingForwarding::is_forwarded(p)) { HeapWord* compact_from = cast_from_oop(p); - HeapWord* compact_to = cast_from_oop(p->forwardee()); + HeapWord* compact_to = cast_from_oop(SlidingForwarding::forwardee(p)); Copy::aligned_conjoint_words(compact_from, compact_to, size); oop new_obj = cast_to_oop(compact_to); @@ -956,7 +960,7 @@ void ShenandoahFullGC::compact_humongous_objects() { ShenandoahHeapRegion* r = heap->get_region(c - 1); if (r->is_humongous_start()) { oop old_obj = cast_to_oop(r->bottom()); - if (!old_obj->is_forwarded()) { + if (SlidingForwarding::is_not_forwarded(old_obj)) { // No need to move the object, it stays at the same slot continue; } @@ -965,7 +969,7 @@ void ShenandoahFullGC::compact_humongous_objects() { size_t old_start = r->index(); size_t old_end = old_start + num_regions - 1; - size_t new_start = heap->heap_region_index_containing(old_obj->forwardee()); + size_t new_start = heap->heap_region_index_containing(SlidingForwarding::forwardee(old_obj)); size_t new_end = new_start + num_regions - 1; assert(old_start != new_start, "must be real move"); assert(r->is_stw_move_allowed(), "Region " SIZE_FORMAT " should be movable", r->index()); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp index de55dde8aca96..75fc1a6642d30 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp @@ -34,6 +34,7 @@ #include "gc/shared/locationPrinter.inline.hpp" #include "gc/shared/memAllocator.hpp" #include "gc/shared/plab.hpp" +#include "gc/shared/slidingForwarding.hpp" #include "gc/shared/tlab_globals.hpp" #include "gc/shenandoah/shenandoahBarrierSet.hpp" @@ -438,6 +439,8 @@ jint ShenandoahHeap::initialize() { ShenandoahInitLogger::print(); + SlidingForwarding::initialize(_heap_region, ShenandoahHeapRegion::region_size_words()); + return JNI_OK; } diff --git a/src/hotspot/share/utilities/fastHash.hpp b/src/hotspot/share/utilities/fastHash.hpp new file mode 100644 index 0000000000000..86b1dcf2b5ee4 --- /dev/null +++ b/src/hotspot/share/utilities/fastHash.hpp @@ -0,0 +1,97 @@ +/* + * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_UTILITIES_FASTHASH_HPP +#define SHARE_UTILITIES_FASTHASH_HPP + +#include "memory/allStatic.hpp" + +class FastHash : public AllStatic { +private: + static void fullmul64(uint64_t& hi, uint64_t& lo, uint64_t op1, uint64_t op2) { +#if defined(__SIZEOF_INT128__) + __uint128_t prod = static_cast<__uint128_t>(op1) * static_cast<__uint128_t>(op2); + hi = static_cast(prod >> 64); + lo = static_cast(prod >> 0); +#else + /* First calculate all of the cross products. */ + uint64_t lo_lo = (op1 & 0xFFFFFFFF) * (op2 & 0xFFFFFFFF); + uint64_t hi_lo = (op1 >> 32) * (op2 & 0xFFFFFFFF); + uint64_t lo_hi = (op1 & 0xFFFFFFFF) * (op2 >> 32); + uint64_t hi_hi = (op1 >> 32) * (op2 >> 32); + + /* Now add the products together. These will never overflow. */ + uint64_t cross = (lo_lo >> 32) + (hi_lo & 0xFFFFFFFF) + lo_hi; + uint64_t upper = (hi_lo >> 32) + (cross >> 32) + hi_hi; + hi = upper; + lo = (cross << 32) | (lo_lo & 0xFFFFFFFF); +#endif + } + + static void fullmul32(uint32_t& hi, uint32_t& lo, uint32_t op1, uint32_t op2) { + uint64_t x64 = op1, y64 = op2, xy64 = x64 * y64; + hi = (uint32_t)(xy64 >> 32); + lo = (uint32_t)(xy64 >> 0); + } + + static uint64_t ror(uint64_t x, uint64_t distance) { + distance = distance & 0x3F; + return (x >> distance) | (x << (64 - distance)); + } + +public: + static uint64_t get_hash64(uint64_t x, uint64_t y) { + const uint64_t M = 0x8ADAE89C337954D5; + const uint64_t A = 0xAAAAAAAAAAAAAAAA; // REPAA + const uint64_t H0 = (x ^ y), L0 = (x ^ A); + + uint64_t U0, V0; fullmul64(U0, V0, L0, M); + const uint64_t Q0 = (H0 * M); + const uint64_t L1 = (Q0 ^ U0); + + uint64_t U1, V1; fullmul64(U1, V1, L1, M); + const uint64_t P1 = (V0 ^ M); + const uint64_t Q1 = ror(P1, L1); + const uint64_t L2 = (Q1 ^ U1); + return V1 ^ L2; + } + + static uint32_t get_hash32(uint32_t x, uint32_t y) { + const uint32_t M = 0x337954D5; + const uint32_t A = 0xAAAAAAAA; // REPAA + const uint32_t H0 = (x ^ y), L0 = (x ^ A); + + uint32_t U0, V0; fullmul32(U0, V0, L0, M); + const uint32_t Q0 = (H0 * M); + const uint32_t L1 = (Q0 ^ U0); + + uint32_t U1, V1; fullmul32(U1, V1, L1, M); + const uint32_t P1 = (V0 ^ M); + const uint32_t Q1 = ror(P1, L1); + const uint32_t L2 = (Q1 ^ U1); + return V1 ^ L2; + } +}; + +#endif// SHARE_UTILITIES_FASTHASH_HPP diff --git a/test/hotspot/gtest/gc/shared/test_preservedMarks.cpp b/test/hotspot/gtest/gc/shared/test_preservedMarks.cpp index 5f9a361105ec7..fa8d246415a45 100644 --- a/test/hotspot/gtest/gc/shared/test_preservedMarks.cpp +++ b/test/hotspot/gtest/gc/shared/test_preservedMarks.cpp @@ -23,59 +23,50 @@ #include "precompiled.hpp" #include "gc/shared/preservedMarks.inline.hpp" +#include "gc/shared/slidingForwarding.inline.hpp" #include "oops/oop.inline.hpp" #include "unittest.hpp" -// Class to create a "fake" oop with a mark that will -// return true for calls to must_be_preserved(). -class FakeOop { - oopDesc _oop; - -public: - FakeOop() : _oop() { _oop.set_mark(originalMark()); } - - oop get_oop() { return &_oop; } - markWord mark() { return _oop.mark(); } - void set_mark(markWord m) { _oop.set_mark(m); } - void forward_to(oop obj) { - markWord m = markWord::encode_pointer_as_mark(obj); - _oop.set_mark(m); - } - - static markWord originalMark() { return markWord(markWord::lock_mask_in_place); } - static markWord changedMark() { return markWord(0x4711); } -}; +static markWord originalMark() { return markWord(markWord::lock_mask_in_place); } +static markWord changedMark() { return markWord(0x4711); } #define ASSERT_MARK_WORD_EQ(a, b) ASSERT_EQ((a).value(), (b).value()) TEST_VM(PreservedMarks, iterate_and_restore) { PreservedMarks pm; - FakeOop o1; - FakeOop o2; - FakeOop o3; - FakeOop o4; + + HeapWord fakeheap[32] = { nullptr }; + HeapWord* heap = align_up(fakeheap, 8 * sizeof(HeapWord)); + SlidingForwarding::initialize(MemRegion(&heap[0], &heap[16]), 8); + + oop o1 = cast_to_oop(&heap[0]); o1->set_mark(originalMark()); + oop o2 = cast_to_oop(&heap[2]); o2->set_mark(originalMark()); + oop o3 = cast_to_oop(&heap[4]); o3->set_mark(originalMark()); + oop o4 = cast_to_oop(&heap[6]); o4->set_mark(originalMark()); // Make sure initial marks are correct. - ASSERT_MARK_WORD_EQ(o1.mark(), FakeOop::originalMark()); - ASSERT_MARK_WORD_EQ(o2.mark(), FakeOop::originalMark()); - ASSERT_MARK_WORD_EQ(o3.mark(), FakeOop::originalMark()); - ASSERT_MARK_WORD_EQ(o4.mark(), FakeOop::originalMark()); + ASSERT_MARK_WORD_EQ(o1->mark(), originalMark()); + ASSERT_MARK_WORD_EQ(o2->mark(), originalMark()); + ASSERT_MARK_WORD_EQ(o3->mark(), originalMark()); + ASSERT_MARK_WORD_EQ(o4->mark(), originalMark()); // Change the marks and verify change. - o1.set_mark(FakeOop::changedMark()); - o2.set_mark(FakeOop::changedMark()); - ASSERT_MARK_WORD_EQ(o1.mark(), FakeOop::changedMark()); - ASSERT_MARK_WORD_EQ(o2.mark(), FakeOop::changedMark()); + o1->set_mark(changedMark()); + o2->set_mark(changedMark()); + ASSERT_MARK_WORD_EQ(o1->mark(), changedMark()); + ASSERT_MARK_WORD_EQ(o2->mark(), changedMark()); + + SlidingForwarding::begin(); // Push o1 and o2 to have their marks preserved. - pm.push_if_necessary(o1.get_oop(), o1.mark()); - pm.push_if_necessary(o2.get_oop(), o2.mark()); + pm.push_if_necessary(o1, o1->mark()); + pm.push_if_necessary(o2, o2->mark()); // Fake a move from o1->o3 and o2->o4. - o1.forward_to(o3.get_oop()); - o2.forward_to(o4.get_oop()); - ASSERT_EQ(o1.get_oop()->forwardee(), o3.get_oop()); - ASSERT_EQ(o2.get_oop()->forwardee(), o4.get_oop()); + SlidingForwarding::forward_to(o1, o3); + SlidingForwarding::forward_to(o2, o4); + ASSERT_EQ(SlidingForwarding::forwardee(o1), o3); + ASSERT_EQ(SlidingForwarding::forwardee(o2), o4); // Adjust will update the PreservedMarks stack to // make sure the mark is updated at the new location. pm.adjust_during_full_gc(); @@ -83,6 +74,7 @@ TEST_VM(PreservedMarks, iterate_and_restore) { // Restore all preserved and verify that the changed // mark is now present at o3 and o4. pm.restore(); - ASSERT_MARK_WORD_EQ(o3.mark(), FakeOop::changedMark()); - ASSERT_MARK_WORD_EQ(o4.mark(), FakeOop::changedMark()); + ASSERT_MARK_WORD_EQ(o3->mark(), changedMark()); + ASSERT_MARK_WORD_EQ(o4->mark(), changedMark()); + SlidingForwarding::end(); } diff --git a/test/hotspot/gtest/gc/shared/test_slidingForwarding.cpp b/test/hotspot/gtest/gc/shared/test_slidingForwarding.cpp new file mode 100644 index 0000000000000..68960f49145ef --- /dev/null +++ b/test/hotspot/gtest/gc/shared/test_slidingForwarding.cpp @@ -0,0 +1,120 @@ +/* + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/shared/slidingForwarding.inline.hpp" +#include "oops/markWord.hpp" +#include "oops/oop.inline.hpp" +#include "utilities/align.hpp" +#include "unittest.hpp" + +#ifdef _LP64 +#ifndef PRODUCT + +static uintptr_t make_mark(uintptr_t target_region, uintptr_t offset) { + return (target_region) << 3 | (offset << 4) | 3 /* forwarded */; +} + +static uintptr_t make_fallback() { + return ((uintptr_t(1) << 2) /* fallback */ | 3 /* forwarded */); +} + +// Test simple forwarding within the same region. +TEST_VM(SlidingForwarding, simple) { + HeapWord fakeheap[32] = { nullptr }; + HeapWord* heap = align_up(fakeheap, 8 * sizeof(HeapWord)); + oop obj1 = cast_to_oop(&heap[2]); + oop obj2 = cast_to_oop(&heap[0]); + SlidingForwarding::initialize(MemRegion(&heap[0], &heap[16]), 8); + obj1->set_mark(markWord::prototype()); + SlidingForwarding::begin(); + + SlidingForwarding::forward_to(obj1, obj2); + ASSERT_EQ(obj1->mark().value(), make_mark(0 /* target_region */, 0 /* offset */)); + ASSERT_EQ(SlidingForwarding::forwardee(obj1), obj2); + + SlidingForwarding::end(); +} + +// Test forwardings crossing 2 regions. +TEST_VM(SlidingForwarding, tworegions) { + HeapWord fakeheap[32] = { nullptr }; + HeapWord* heap = align_up(fakeheap, 8 * sizeof(HeapWord)); + oop obj1 = cast_to_oop(&heap[14]); + oop obj2 = cast_to_oop(&heap[2]); + oop obj3 = cast_to_oop(&heap[10]); + SlidingForwarding::initialize(MemRegion(&heap[0], &heap[16]), 8); + obj1->set_mark(markWord::prototype()); + SlidingForwarding::begin(); + + SlidingForwarding::forward_to(obj1, obj2); + ASSERT_EQ(obj1->mark().value(), make_mark(0 /* target_region */, 2 /* offset */)); + ASSERT_EQ(SlidingForwarding::forwardee(obj1), obj2); + + SlidingForwarding::forward_to(obj1, obj3); + ASSERT_EQ(obj1->mark().value(), make_mark(1 /* target_region */, 2 /* offset */)); + ASSERT_EQ(SlidingForwarding::forwardee(obj1), obj3); + + SlidingForwarding::end(); +} + +// Test fallback forwardings crossing 4 regions. +TEST_VM(SlidingForwarding, fallback) { + HeapWord fakeheap[32] = { nullptr }; + HeapWord* heap = align_up(fakeheap, 8 * sizeof(HeapWord)); + oop s_obj1 = cast_to_oop(&heap[12]); + oop s_obj2 = cast_to_oop(&heap[13]); + oop s_obj3 = cast_to_oop(&heap[14]); + oop s_obj4 = cast_to_oop(&heap[15]); + oop t_obj1 = cast_to_oop(&heap[2]); + oop t_obj2 = cast_to_oop(&heap[4]); + oop t_obj3 = cast_to_oop(&heap[10]); + oop t_obj4 = cast_to_oop(&heap[12]); + SlidingForwarding::initialize(MemRegion(&heap[0], &heap[16]), 4); + s_obj1->set_mark(markWord::prototype()); + s_obj2->set_mark(markWord::prototype()); + s_obj3->set_mark(markWord::prototype()); + s_obj4->set_mark(markWord::prototype()); + SlidingForwarding::begin(); + + SlidingForwarding::forward_to(s_obj1, t_obj1); + ASSERT_EQ(s_obj1->mark().value(), make_mark(0 /* target_region */, 2 /* offset */)); + ASSERT_EQ(SlidingForwarding::forwardee(s_obj1), t_obj1); + + SlidingForwarding::forward_to(s_obj2, t_obj2); + ASSERT_EQ(s_obj2->mark().value(), make_mark(1 /* target_region */, 0 /* offset */)); + ASSERT_EQ(SlidingForwarding::forwardee(s_obj2), t_obj2); + + SlidingForwarding::forward_to(s_obj3, t_obj3); + ASSERT_EQ(s_obj3->mark().value(), make_fallback()); + ASSERT_EQ(SlidingForwarding::forwardee(s_obj3), t_obj3); + + SlidingForwarding::forward_to(s_obj4, t_obj4); + ASSERT_EQ(s_obj4->mark().value(), make_fallback()); + ASSERT_EQ(SlidingForwarding::forwardee(s_obj4), t_obj4); + + SlidingForwarding::end(); +} + +#endif // PRODUCT +#endif // _LP64