diff --git a/substratevm/CHANGELOG.md b/substratevm/CHANGELOG.md index d6cf8815318c..e36b7bb69681 100644 --- a/substratevm/CHANGELOG.md +++ b/substratevm/CHANGELOG.md @@ -23,6 +23,7 @@ This changelog summarizes major changes to GraalVM Native Image. * (GR-43837) `--report-unsupported-elements-at-runtime` is now enabled by default and the option is deprecated. * (GR-53359) Provide the `.debug_gdb_scripts` section that triggers auto-loading of `svmhelpers.py` in GDB. Remove single and double quotes from `ClassLoader.nameAndId` in the debuginfo. * (GR-47365) Include dynamic proxy metadata in the reflection metadata with the syntax `"type": { "proxy": [] }`. This allows members of proxy classes to be accessed reflectively. `proxy-config.json` is now deprecated but will still be honored. +* (GR-18214) In-place compacting garbage collection for the Serial GC old generation with `-H:+CompactingOldGen`. ## GraalVM for JDK 22 (Internal Version 24.0.0) * (GR-48304) Red Hat added support for the JFR event ThreadAllocationStatistics. diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/AdaptiveCollectionPolicy.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/AdaptiveCollectionPolicy.java index d168ca473233..ecefb5ddb7a8 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/AdaptiveCollectionPolicy.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/AdaptiveCollectionPolicy.java @@ -155,11 +155,12 @@ public String getName() { public boolean shouldCollectCompletely(boolean followingIncrementalCollection) { // should_{attempt_scavenge,full_GC} guaranteeSizeParametersInitialized(); - if (!followingIncrementalCollection && shouldCollectYoungGenSeparately(true)) { + if (!followingIncrementalCollection && shouldCollectYoungGenSeparately(!SerialGCOptions.useCompactingOldGen())) { /* - * Default to always doing an incremental collection first because we expect most of the - * objects in the young generation to be garbage, and we can reuse their leftover chunks - * for copying the live objects in the old generation with fewer allocations. + * With a copying collector, default to always doing an incremental collection first + * because we expect most of the objects in the young generation to be garbage, and we + * can reuse their leftover chunks for copying the live objects in the old generation + * with fewer allocations. With a compacting collector, there is no benefit. */ return false; } diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/AlignedHeapChunk.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/AlignedHeapChunk.java index 2c60e57f9fb6..f8d34e8e5dce 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/AlignedHeapChunk.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/AlignedHeapChunk.java @@ -24,6 +24,7 @@ */ package com.oracle.svm.core.genscavenge; +import org.graalvm.nativeimage.c.struct.RawField; import org.graalvm.nativeimage.c.struct.RawStructure; import org.graalvm.word.Pointer; import org.graalvm.word.UnsignedWord; @@ -33,6 +34,7 @@ import com.oracle.svm.core.Uninterruptible; import com.oracle.svm.core.genscavenge.remset.RememberedSet; import com.oracle.svm.core.heap.ObjectVisitor; +import com.oracle.svm.core.heap.RestrictHeapAccess; import com.oracle.svm.core.util.PointerUtils; import jdk.graal.compiler.api.directives.GraalDirectives; @@ -50,17 +52,14 @@ * Most allocation within a AlignedHeapChunk is via fast-path allocation snippets, but a slow-path * allocation method is available. *

- * Objects in a AlignedHeapChunk have to be promoted by copying from their current HeapChunk to a - * destination HeapChunk. - *

- * An AlignedHeapChunk is laid out: + * An AlignedHeapChunk is laid out as follows: * *

- * +===============+-------+--------+----------------------+
- * | AlignedHeader | Card  | First  | Object ...           |
- * | Fields        | Table | Object |                      |
- * |               |       | Table  |                      |
- * +===============+-------+--------+----------------------+
+ * +===============+-------+--------+-----------------+-----------------+
+ * | AlignedHeader | Card  | First  | Initial Object  | Object ...      |
+ * | Fields        | Table | Object | Move Info (only |                 |
+ * |               |       | Table  | Compacting GC)  |                 |
+ * +===============+-------+--------+-----------------+-----------------+
  * 
* * The size of both the CardTable and the FirstObjectTable depends on the used {@link RememberedSet} @@ -78,15 +77,22 @@ private AlignedHeapChunk() { // all static */ @RawStructure public interface AlignedHeader extends HeapChunk.Header { + @RawField + boolean getShouldSweepInsteadOfCompact(); + + @RawField + void setShouldSweepInsteadOfCompact(boolean value); } @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) public static void initialize(AlignedHeader chunk, UnsignedWord chunkSize) { + assert chunkSize.equal(HeapParameters.getAlignedHeapChunkSize()) : "expecting all aligned chunks to be the same size"; HeapChunk.initialize(chunk, AlignedHeapChunk.getObjectsStart(chunk), chunkSize); + chunk.setShouldSweepInsteadOfCompact(false); } public static void reset(AlignedHeader chunk) { - HeapChunk.initialize(chunk, AlignedHeapChunk.getObjectsStart(chunk), HeapChunk.getEndOffset(chunk)); + initialize(chunk, HeapChunk.getEndOffset(chunk)); } @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) @@ -98,6 +104,10 @@ public static Pointer getObjectsEnd(AlignedHeader that) { return HeapChunk.getEndPointer(that); } + public static boolean isEmpty(AlignedHeader that) { + return HeapChunk.getTopOffset(that).equal(getObjectsStartOffset()); + } + /** Allocate uninitialized memory within this AlignedHeapChunk. */ @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) static Pointer allocateMemory(AlignedHeader that, UnsignedWord size) { @@ -146,4 +156,20 @@ static boolean walkObjectsFromInline(AlignedHeader that, Pointer start, ObjectVi public static UnsignedWord getObjectsStartOffset() { return RememberedSet.get().getHeaderSizeOfAlignedChunk(); } + + @Fold + public static UnsignedWord getUsableSizeForObjects() { + return HeapParameters.getAlignedHeapChunkSize().subtract(getObjectsStartOffset()); + } + + public interface Visitor { + /** + * Visit an {@link AlignedHeapChunk}. + * + * @param chunk The {@link AlignedHeapChunk} to be visited. + * @return {@code true} if visiting should continue, {@code false} if visiting should stop. + */ + @RestrictHeapAccess(access = RestrictHeapAccess.Access.NO_ALLOCATION, reason = "Must not allocate while visiting the heap.") + boolean visitChunk(AlignedHeapChunk.AlignedHeader chunk); + } } diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/CollectionPolicy.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/CollectionPolicy.java index 8b235331c44e..07f950467a1c 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/CollectionPolicy.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/CollectionPolicy.java @@ -44,7 +44,7 @@ public interface CollectionPolicy { static String getInitialPolicyName() { if (SubstrateOptions.UseEpsilonGC.getValue()) { return "NeverCollect"; - } else if (!SubstrateOptions.useRememberedSet()) { + } else if (!SerialGCOptions.useRememberedSet()) { return "OnlyCompletely"; } String name = SerialGCOptions.InitialCollectionPolicy.getValue(); diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/CompactingOldGeneration.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/CompactingOldGeneration.java new file mode 100644 index 000000000000..80b43f4011bb --- /dev/null +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/CompactingOldGeneration.java @@ -0,0 +1,491 @@ +/* + * Copyright (c) 2014, 2024, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.svm.core.genscavenge; + +import static com.oracle.svm.core.snippets.KnownIntrinsics.readCallerStackPointer; +import static com.oracle.svm.core.snippets.KnownIntrinsics.readReturnAddress; + +import org.graalvm.nativeimage.IsolateThread; +import org.graalvm.nativeimage.Platform; +import org.graalvm.nativeimage.Platforms; +import org.graalvm.nativeimage.c.function.CodePointer; +import org.graalvm.word.Pointer; +import org.graalvm.word.UnsignedWord; + +import com.oracle.svm.core.AlwaysInline; +import com.oracle.svm.core.NeverInline; +import com.oracle.svm.core.Uninterruptible; +import com.oracle.svm.core.code.RuntimeCodeInfoMemory; +import com.oracle.svm.core.genscavenge.GCImpl.ChunkReleaser; +import com.oracle.svm.core.genscavenge.compacting.CompactingVisitor; +import com.oracle.svm.core.genscavenge.compacting.MarkStack; +import com.oracle.svm.core.genscavenge.compacting.ObjectFixupVisitor; +import com.oracle.svm.core.genscavenge.compacting.ObjectMoveInfo; +import com.oracle.svm.core.genscavenge.compacting.ObjectRefFixupVisitor; +import com.oracle.svm.core.genscavenge.compacting.PlanningVisitor; +import com.oracle.svm.core.genscavenge.compacting.RuntimeCodeCacheFixupWalker; +import com.oracle.svm.core.genscavenge.compacting.SweepingVisitor; +import com.oracle.svm.core.genscavenge.remset.BrickTable; +import com.oracle.svm.core.genscavenge.remset.RememberedSet; +import com.oracle.svm.core.graal.RuntimeCompilation; +import com.oracle.svm.core.heap.ObjectHeader; +import com.oracle.svm.core.heap.ObjectVisitor; +import com.oracle.svm.core.log.Log; +import com.oracle.svm.core.thread.VMThreads; +import com.oracle.svm.core.threadlocal.VMThreadLocalSupport; + +import jdk.graal.compiler.word.Word; + +/** + * Core of the mark-compact implementation for the old generation, which collects using (almost) + * only memory that is already in use, while {@link CopyingOldGeneration} has a worst-case memory + * usage of 2x the heap size during collections. This implementation has a single {@link Space}. + * + * Complete collections are carried out in the following stages: + * + * + * + * While updating references using lookups in the brick table and structures seems expensive, it + * frequently needs only few accesses. It would be possible to introduce a field in each object that + * stores its new location during collections, but that would add significant memory overhead even + * outside of GC. In contrast, using entirely separate side tables would require extra memory only + * during GC and enable collecting with fewer passes over the heap, but requires allocating the + * tables precisely at a time when memory might be scarce. + * + * Some parts of the implementation are scattered over the GC code and can be found by following the + * usages of {@link SerialGCOptions#useCompactingOldGen()}. + */ +final class CompactingOldGeneration extends OldGeneration { + + private final Space space = new Space("Old", "O", false, HeapParameters.getMaxSurvivorSpaces() + 1); + private final MarkStack markStack = new MarkStack(); + + private final GreyObjectsWalker toGreyObjectsWalker = new GreyObjectsWalker(); + private final PlanningVisitor planningVisitor = new PlanningVisitor(); + private final ObjectRefFixupVisitor refFixupVisitor = new ObjectRefFixupVisitor(); + private final ObjectFixupVisitor fixupVisitor = new ObjectFixupVisitor(refFixupVisitor); + private final CompactingVisitor compactingVisitor = new CompactingVisitor(); + private final SweepingVisitor sweepingVisitor = new SweepingVisitor(); + private final RuntimeCodeCacheFixupWalker runtimeCodeCacheFixupWalker = new RuntimeCodeCacheFixupWalker(refFixupVisitor); + + @Platforms(Platform.HOSTED_ONLY.class) + CompactingOldGeneration(String name) { + super(name); + } + + @Override + @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) + void beginPromotion(boolean incrementalGc) { + if (!incrementalGc) { + absorb(HeapImpl.getHeapImpl().getYoungGeneration()); + } + toGreyObjectsWalker.setScanStart(space); + } + + @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) + void absorb(YoungGeneration youngGen) { + space.absorb(youngGen.getEden()); + for (int i = 0; i < youngGen.getMaxSurvivorSpaces(); i++) { + space.absorb(youngGen.getSurvivorFromSpaceAt(i)); + space.absorb(youngGen.getSurvivorToSpaceAt(i)); + } + } + + @Override + @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) + void blackenDirtyCardRoots(GreyToBlackObjectVisitor visitor) { + RememberedSet.get().walkDirtyObjects(space, visitor, true); + } + + @Override + @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) + boolean scanGreyObjects(boolean incrementalGc) { + if (incrementalGc) { + if (!toGreyObjectsWalker.haveGreyObjects()) { + return false; + } + toGreyObjectsWalker.walkGreyObjects(); + } else { + if (markStack.isEmpty()) { + return false; + } + GreyToBlackObjectVisitor visitor = GCImpl.getGCImpl().getGreyToBlackObjectVisitor(); + do { + visitor.visitObjectInline(markStack.pop()); + } while (!markStack.isEmpty()); + } + return true; + } + + @AlwaysInline("GC performance") + @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) + @Override + public Object promoteAlignedObject(Object original, AlignedHeapChunk.AlignedHeader originalChunk, Space originalSpace) { + if (!GCImpl.getGCImpl().isCompleteCollection()) { + assert originalSpace.isFromSpace(); + return space.copyAlignedObject(original, originalSpace); + } + assert originalSpace == space; + Word header = ObjectHeader.readHeaderFromObject(original); + if (ObjectHeaderImpl.isMarkedHeader(header)) { + return original; + } + Object result = original; + if (ObjectHeaderImpl.isIdentityHashFieldOptional() && + ObjectHeaderImpl.hasIdentityHashFromAddressInline(header) && + !originalChunk.getShouldSweepInsteadOfCompact()) { + /* + * This object's identity hash code is based on its current address, which we expect to + * change during compaction, so we must add a field to store it, which increases the + * object's size. The easiest way to handle this is to copy the object. + */ + result = space.copyAlignedObject(original, originalSpace); + assert !ObjectHeaderImpl.hasIdentityHashFromAddressInline(ObjectHeader.readHeaderFromObject(result)); + } + ObjectHeaderImpl.setMarked(result); + markStack.push(result); + return result; + } + + @AlwaysInline("GC performance") + @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) + @Override + protected Object promoteUnalignedObject(Object original, UnalignedHeapChunk.UnalignedHeader originalChunk, Space originalSpace) { + if (!GCImpl.getGCImpl().isCompleteCollection()) { + assert originalSpace.isFromSpace(); + space.promoteUnalignedHeapChunk(originalChunk, originalSpace); + return original; + } + assert originalSpace == space; + if (!ObjectHeaderImpl.isMarked(original)) { + ObjectHeaderImpl.setMarked(original); + markStack.push(original); + } + return original; + } + + @Override + @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) + protected boolean promotePinnedObject(Object obj, HeapChunk.Header originalChunk, boolean isAligned, Space originalSpace) { + if (!GCImpl.getGCImpl().isCompleteCollection()) { + assert originalSpace != space && originalSpace.isFromSpace(); + if (isAligned) { + space.promoteAlignedHeapChunk((AlignedHeapChunk.AlignedHeader) originalChunk, originalSpace); + } else { + space.promoteUnalignedHeapChunk((UnalignedHeapChunk.UnalignedHeader) originalChunk, originalSpace); + } + return true; + } + assert originalSpace == space; + if (ObjectHeaderImpl.isMarked(obj)) { + assert !isAligned || ((AlignedHeapChunk.AlignedHeader) originalChunk).getShouldSweepInsteadOfCompact(); + return true; + } + if (isAligned) { + ((AlignedHeapChunk.AlignedHeader) originalChunk).setShouldSweepInsteadOfCompact(true); + } + ObjectHeaderImpl.setMarked(obj); + markStack.push(obj); + return true; + } + + @Override + void sweepAndCompact(Timers timers, ChunkReleaser chunkReleaser) { + /* + * Update or clear reference object referent fields now because planning below overwrites + * referent objects that do not survive or have been copied (e.g. adding for an identity + * hashcode field). + */ + ReferenceObjectProcessing.updateForwardedRefs(); + + Timer oldPlanningTimer = timers.oldPlanning.open(); + try { + planCompaction(); + } finally { + oldPlanningTimer.close(); + } + + Timer oldFixupTimer = timers.oldFixup.open(); + try { + fixupReferencesBeforeCompaction(chunkReleaser, timers); + } finally { + oldFixupTimer.close(); + } + + Timer oldCompactionTimer = timers.oldCompaction.open(); + try { + compact(timers); + } finally { + oldCompactionTimer.close(); + } + } + + private void planCompaction() { + planningVisitor.init(space); + space.walkAlignedHeapChunks(planningVisitor); + } + + @Uninterruptible(reason = "Avoid unnecessary safepoint checks in GC for performance.") + private void fixupReferencesBeforeCompaction(ChunkReleaser chunkReleaser, Timers timers) { + Timer oldFixupAlignedChunksTimer = timers.oldFixupAlignedChunks.open(); + try { + AlignedHeapChunk.AlignedHeader aChunk = space.getFirstAlignedHeapChunk(); + while (aChunk.isNonNull()) { + ObjectMoveInfo.walkObjects(aChunk, fixupVisitor); + aChunk = HeapChunk.getNext(aChunk); + } + } finally { + oldFixupAlignedChunksTimer.close(); + } + + Timer oldFixupImageHeapTimer = timers.oldFixupImageHeap.open(); + try { + for (ImageHeapInfo info = HeapImpl.getFirstImageHeapInfo(); info != null; info = info.next) { + GCImpl.walkImageHeapRoots(info, fixupVisitor); + } + if (AuxiliaryImageHeap.isPresent()) { + ImageHeapInfo auxImageHeapInfo = AuxiliaryImageHeap.singleton().getImageHeapInfo(); + if (auxImageHeapInfo != null) { + GCImpl.walkImageHeapRoots(auxImageHeapInfo, fixupVisitor); + } + } + } finally { + oldFixupImageHeapTimer.close(); + } + + Timer oldFixupThreadLocalsTimer = timers.oldFixupThreadLocals.open(); + try { + for (IsolateThread isolateThread = VMThreads.firstThread(); isolateThread.isNonNull(); isolateThread = VMThreads.nextThread(isolateThread)) { + VMThreadLocalSupport.singleton().walk(isolateThread, refFixupVisitor); + } + } finally { + oldFixupThreadLocalsTimer.close(); + } + + Timer oldFixupStackTimer = timers.oldFixupStack.open(); + try { + fixupStackReferences(); + } finally { + oldFixupStackTimer.close(); + } + + /* + * Check each unaligned object and fix its references if the object is marked. Add the chunk + * to the releaser's list in case the object is not marked and therefore won't survive. + */ + Timer oldFixupUnalignedChunksTimer = timers.oldFixupUnalignedChunks.open(); + try { + fixupUnalignedChunkReferences(chunkReleaser); + } finally { + oldFixupUnalignedChunksTimer.close(); + } + + Timer oldFixupRuntimeCodeCacheTimer = timers.oldFixupRuntimeCodeCache.open(); + try { + if (RuntimeCompilation.isEnabled()) { + RuntimeCodeInfoMemory.singleton().walkRuntimeMethodsDuringGC(runtimeCodeCacheFixupWalker); + } + } finally { + oldFixupRuntimeCodeCacheTimer.close(); + } + } + + @Uninterruptible(reason = "Avoid unnecessary safepoint checks in GC for performance.") + private void fixupUnalignedChunkReferences(ChunkReleaser chunkReleaser) { + UnalignedHeapChunk.UnalignedHeader uChunk = space.getFirstUnalignedHeapChunk(); + while (uChunk.isNonNull()) { + UnalignedHeapChunk.UnalignedHeader next = HeapChunk.getNext(uChunk); + Pointer objPointer = UnalignedHeapChunk.getObjectStart(uChunk); + Object obj = objPointer.toObject(); + if (ObjectHeaderImpl.isMarked(obj)) { + ObjectHeaderImpl.unsetMarkedAndKeepRememberedSetBit(obj); + RememberedSet.get().clearRememberedSet(uChunk); + UnalignedHeapChunk.walkObjectsInline(uChunk, fixupVisitor); + } else { + space.extractUnalignedHeapChunk(uChunk); + chunkReleaser.add(uChunk); + } + uChunk = next; + } + } + + @NeverInline("Starting a stack walk in the caller frame. " + + "Note that we could start the stack frame also further down the stack, because GC stack frames must not access any objects that are processed by the GC. " + + "But we don't store stack frame information for the first frame we would need to process.") + @Uninterruptible(reason = "Required by called JavaStackWalker methods. We are at a safepoint during GC, so it does not change anything for this method.") + private void fixupStackReferences() { + Pointer sp = readCallerStackPointer(); + CodePointer ip = readReturnAddress(); + GCImpl.walkStackRoots(refFixupVisitor, sp, ip, false); + } + + private void compact(Timers timers) { + AlignedHeapChunk.AlignedHeader chunk = space.getFirstAlignedHeapChunk(); + while (chunk.isNonNull()) { + if (chunk.getShouldSweepInsteadOfCompact()) { + ObjectMoveInfo.visit(chunk, sweepingVisitor); + chunk.setShouldSweepInsteadOfCompact(false); + } else { + compactingVisitor.init(chunk); + ObjectMoveInfo.visit(chunk, compactingVisitor); + } + chunk = HeapChunk.getNext(chunk); + } + + Timer oldCompactionRememberedSetsTimer = timers.oldCompactionRememberedSets.open(); + try { + chunk = space.getFirstAlignedHeapChunk(); + while (chunk.isNonNull()) { + /* + * Clears the card table (which currently contains the brick table) and updates the + * first object table. + * + * GR-54022: we should be able to avoid this pass and build the first object tables + * during planning and reset card tables once we detect that we are finished with a + * chunk during compaction. The remembered set bits are already set after planning. + */ + if (!AlignedHeapChunk.isEmpty(chunk)) { + RememberedSet.get().enableRememberedSetForChunk(chunk); + } // empty chunks will be freed or reset before reuse, no need to reinitialize here + + chunk = HeapChunk.getNext(chunk); + } + } finally { + oldCompactionRememberedSetsTimer.close(); + } + } + + /** + * At the end of the collection, adds empty aligned chunks to be released (typically at the + * end). Unaligned chunks have already been added in {@link #fixupReferencesBeforeCompaction}. + */ + @Override + void releaseSpaces(ChunkReleaser chunkReleaser) { + AlignedHeapChunk.AlignedHeader aChunk = space.getFirstAlignedHeapChunk(); + while (aChunk.isNonNull()) { + AlignedHeapChunk.AlignedHeader next = HeapChunk.getNext(aChunk); + if (AlignedHeapChunk.isEmpty(aChunk)) { + space.extractAlignedHeapChunk(aChunk); + chunkReleaser.add(aChunk); + } + aChunk = next; + } + } + + @Override + void swapSpaces() { + // Compacting in-place, no spaces to swap. + } + + @Override + boolean isInSpace(Pointer ptr) { + return space.contains(ptr); + } + + @Override + public boolean walkObjects(ObjectVisitor visitor) { + return space.walkObjects(visitor); + } + + @Override + public void logUsage(Log log) { + space.logUsage(log, true); + } + + @Override + public void logChunks(Log log) { + space.logChunks(log); + } + + /** + * This value is only updated during a GC. Be careful when calling this method during a GC as it + * might wrongly include chunks that will be freed at the end of the GC. + */ + @Override + @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) + UnsignedWord getChunkBytes() { + return space.getChunkBytes(); + } + + @Override + UnsignedWord computeObjectBytes() { + return space.computeObjectBytes(); + } + + @Override + boolean verifyRememberedSets() { + return HeapVerifier.verifyRememberedSet(space); + } + + @Override + boolean verifySpaces() { + return HeapVerifier.verifySpace(space); + } + + @Override + void checkSanityBeforeCollection() { + assert markStack.isEmpty(); + } + + @Override + void checkSanityAfterCollection() { + assert markStack.isEmpty(); + } + + @Override + @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) + void tearDown() { + markStack.tearDown(); + space.tearDown(); + } +} diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/CopyingOldGeneration.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/CopyingOldGeneration.java new file mode 100644 index 000000000000..9962dff2e554 --- /dev/null +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/CopyingOldGeneration.java @@ -0,0 +1,222 @@ +/* + * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.svm.core.genscavenge; + +import org.graalvm.nativeimage.Platform; +import org.graalvm.nativeimage.Platforms; +import org.graalvm.word.Pointer; +import org.graalvm.word.UnsignedWord; + +import com.oracle.svm.core.AlwaysInline; +import com.oracle.svm.core.Uninterruptible; +import com.oracle.svm.core.genscavenge.GCImpl.ChunkReleaser; +import com.oracle.svm.core.genscavenge.remset.RememberedSet; +import com.oracle.svm.core.heap.ObjectVisitor; +import com.oracle.svm.core.log.Log; + +/** + * An OldGeneration has two Spaces, {@link #fromSpace} for existing objects, and {@link #toSpace} + * for newly-allocated or promoted objects. + */ +final class CopyingOldGeneration extends OldGeneration { + /* These Spaces are final and are flipped by transferring chunks from one to the other. */ + private final Space fromSpace; + private final Space toSpace; + + private final GreyObjectsWalker toGreyObjectsWalker = new GreyObjectsWalker(); + + @Platforms(Platform.HOSTED_ONLY.class) + CopyingOldGeneration(String name) { + super(name); + int age = HeapParameters.getMaxSurvivorSpaces() + 1; + this.fromSpace = new Space("Old", "O", false, age); + this.toSpace = new Space("Old To", "O", true, age); + } + + @Override + @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) + void tearDown() { + fromSpace.tearDown(); + toSpace.tearDown(); + } + + @Override + public boolean walkObjects(ObjectVisitor visitor) { + return getFromSpace().walkObjects(visitor) && getToSpace().walkObjects(visitor); + } + + /** Promote an Object to ToSpace if it is not already in ToSpace. */ + @AlwaysInline("GC performance") + @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) + @Override + public Object promoteAlignedObject(Object original, AlignedHeapChunk.AlignedHeader originalChunk, Space originalSpace) { + assert originalSpace.isFromSpace(); + return getToSpace().copyAlignedObject(original, originalSpace); + } + + @AlwaysInline("GC performance") + @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) + @Override + protected Object promoteUnalignedObject(Object original, UnalignedHeapChunk.UnalignedHeader originalChunk, Space originalSpace) { + assert originalSpace.isFromSpace(); + getToSpace().promoteUnalignedHeapChunk(originalChunk, originalSpace); + return original; + } + + @Override + @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) + protected boolean promotePinnedObject(Object obj, HeapChunk.Header originalChunk, boolean isAligned, Space originalSpace) { + assert originalSpace.isFromSpace(); + if (isAligned) { + getToSpace().promoteAlignedHeapChunk((AlignedHeapChunk.AlignedHeader) originalChunk, originalSpace); + } else { + getToSpace().promoteUnalignedHeapChunk((UnalignedHeapChunk.UnalignedHeader) originalChunk, originalSpace); + } + return true; + } + + @Override + void releaseSpaces(ChunkReleaser chunkReleaser) { + getFromSpace().releaseChunks(chunkReleaser); + } + + @Override + @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) + void beginPromotion(boolean incrementalGc) { + if (incrementalGc) { + emptyFromSpaceIntoToSpace(); + } + toGreyObjectsWalker.setScanStart(getToSpace()); + } + + @Override + @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) + boolean scanGreyObjects(boolean incrementalGc) { + if (!toGreyObjectsWalker.haveGreyObjects()) { + return false; + } + toGreyObjectsWalker.walkGreyObjects(); + return true; + } + + @Override + public void logUsage(Log log) { + getFromSpace().logUsage(log, true); + getToSpace().logUsage(log, false); + } + + @Override + public void logChunks(Log log) { + getFromSpace().logChunks(log); + getToSpace().logChunks(log); + } + + @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) + Space getFromSpace() { + return fromSpace; + } + + @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) + Space getToSpace() { + return toSpace; + } + + @Override + void swapSpaces() { + assert getFromSpace().isEmpty() : "fromSpace should be empty."; + getFromSpace().absorb(getToSpace()); + } + + @Override + @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) + void blackenDirtyCardRoots(GreyToBlackObjectVisitor visitor) { + RememberedSet.get().walkDirtyObjects(toSpace, visitor, true); + } + + @Override + boolean isInSpace(Pointer ptr) { + return fromSpace.contains(ptr) || toSpace.contains(ptr); + } + + @Override + boolean verifyRememberedSets() { + boolean success = true; + success &= HeapVerifier.verifyRememberedSet(toSpace); + success &= HeapVerifier.verifyRememberedSet(fromSpace); + return success; + } + + @Override + boolean verifySpaces() { + boolean success = true; + if (!toSpace.isEmpty()) { + Log.log().string("Old generation to-space contains chunks: firstAlignedChunk: ").zhex(toSpace.getFirstAlignedHeapChunk()) + .string(", firstUnalignedChunk: ").zhex(toSpace.getFirstUnalignedHeapChunk()).newline(); + success = false; + } + success &= HeapVerifier.verifySpace(fromSpace); + success &= HeapVerifier.verifySpace(toSpace); + return success; + } + + @Override + void sweepAndCompact(Timers timers, ChunkReleaser chunkReleaser) { + /* + * Compaction occurred implicitly by copying live objects one after another. Sweeping could + * be done on dead objects in pinned chunks, but is currently not implemented. + */ + } + + /* Extract all the HeapChunks from FromSpace and append them to ToSpace. */ + @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) + void emptyFromSpaceIntoToSpace() { + getToSpace().absorb(getFromSpace()); + } + + /** + * This value is only updated during a GC. Be careful when calling this method during a GC as it + * might wrongly include chunks that will be freed at the end of the GC. + */ + @Override + @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) + UnsignedWord getChunkBytes() { + return fromSpace.getChunkBytes().add(toSpace.getChunkBytes()); + } + + @Override + UnsignedWord computeObjectBytes() { + return fromSpace.computeObjectBytes().add(toSpace.computeObjectBytes()); + } + + @Override + void checkSanityBeforeCollection() { + assert toSpace.isEmpty() : "toSpace should be empty before a collection."; + } + + @Override + void checkSanityAfterCollection() { + assert toSpace.isEmpty() : "toSpace should be empty after a collection."; + } +} diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/GCImpl.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/GCImpl.java index c24b080b18ac..2c2fd52eba51 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/GCImpl.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/GCImpl.java @@ -71,6 +71,8 @@ import com.oracle.svm.core.heap.GC; import com.oracle.svm.core.heap.GCCause; import com.oracle.svm.core.heap.NoAllocationVerifier; +import com.oracle.svm.core.heap.ObjectReferenceVisitor; +import com.oracle.svm.core.heap.ObjectVisitor; import com.oracle.svm.core.heap.OutOfMemoryUtil; import com.oracle.svm.core.heap.PhysicalMemory; import com.oracle.svm.core.heap.ReferenceHandler; @@ -265,7 +267,7 @@ private boolean collectImpl(GCCause cause, long requestingNanoTime, boolean forc } private boolean doCollectImpl(GCCause cause, long requestingNanoTime, boolean forceFullGC, boolean forceNoIncremental) { - precondition(); + checkSanityBeforeCollection(); CommittedMemoryProvider.get().beforeGarbageCollection(); @@ -295,7 +297,7 @@ private boolean doCollectImpl(GCCause cause, long requestingNanoTime, boolean fo HeapImpl.getChunkProvider().freeExcessAlignedChunks(); CommittedMemoryProvider.get().afterGarbageCollection(); - postcondition(); + checkSanityAfterCollection(); return outOfMemory; } @@ -311,7 +313,7 @@ private boolean doCollectOnce(GCCause cause, long requestingNanoTime, boolean co if (!followsIncremental) { // we would have verified the heap after the incremental GC verifyBeforeGC(); } - scavenge(!complete); + doCollectCore(!complete); verifyAfterGC(); if (complete) { lastWholeHeapExaminedTimeMillis = System.currentTimeMillis(); @@ -443,17 +445,16 @@ private Log printGCPrefixAndTime() { return Log.log().string("[").rational(uptimeMs, TimeUtils.millisPerSecond, 3).string("s").string("] GC(").unsigned(collectionEpoch).string(") "); } - private static void precondition() { - OldGeneration oldGen = HeapImpl.getHeapImpl().getOldGeneration(); - assert oldGen.getToSpace().isEmpty() : "oldGen.getToSpace() should be empty before a collection."; + private static void checkSanityBeforeCollection() { + HeapImpl heap = HeapImpl.getHeapImpl(); + heap.getYoungGeneration().checkSanityBeforeCollection(); + heap.getOldGeneration().checkSanityBeforeCollection(); } - private static void postcondition() { + private static void checkSanityAfterCollection() { HeapImpl heap = HeapImpl.getHeapImpl(); - YoungGeneration youngGen = heap.getYoungGeneration(); - OldGeneration oldGen = heap.getOldGeneration(); - assert youngGen.getEden().isEmpty() : "youngGen.getEden() should be empty after a collection."; - assert oldGen.getToSpace().isEmpty() : "oldGen.getToSpace() should be empty after a collection."; + heap.getYoungGeneration().checkSanityAfterCollection(); + heap.getOldGeneration().checkSanityAfterCollection(); } @Fold @@ -478,8 +479,8 @@ public boolean isCompleteCollection() { return completeCollection; } - /** Scavenge, either from dirty roots or from all roots, and process discovered references. */ - private void scavenge(boolean incremental) { + /** Collect, either incrementally or completely, and process discovered references. */ + private void doCollectCore(boolean incremental) { GreyToBlackObjRefVisitor.Counters counters = greyToBlackObjRefVisitor.openCounters(); long startTicks; try { @@ -487,7 +488,8 @@ private void scavenge(boolean incremental) { try { startTicks = JfrGCEvents.startGCPhasePause(); try { - cheneyScan(incremental); + /* Scan reachable objects and potentially already copy them once discovered. */ + scan(incremental); } finally { JfrGCEvents.emitGCPhasePauseEvent(getCollectionEpoch(), incremental ? "Incremental Scan" : "Scan", startTicks); } @@ -495,6 +497,11 @@ private void scavenge(boolean incremental) { rootScanTimer.close(); } + if (!incremental) { + /* Sweep or compact objects in the old generation unless already done by copying. */ + HeapImpl.getHeapImpl().getOldGeneration().sweepAndCompact(timers, chunkReleaser); + } + Timer referenceObjectsTimer = timers.referenceObjects.open(); try { startTicks = JfrGCEvents.startGCPhasePause(); @@ -529,18 +536,18 @@ private void scavenge(boolean incremental) { Timer releaseSpacesTimer = timers.releaseSpaces.open(); try { - assert chunkReleaser.isEmpty(); + assert SerialGCOptions.useCompactingOldGen() || chunkReleaser.isEmpty(); startTicks = JfrGCEvents.startGCPhasePause(); try { releaseSpaces(); /* - * Do not uncommit any aligned chunks yet if we just did an incremental GC so if - * we decide to do a full GC next, we can reuse the chunks for copying live old - * objects with fewer chunk allocations. In either case, excess chunks are - * released later. + * With a copying collector, do not uncommit any aligned chunks yet if we just + * did an incremental GC so if we decide to do a full GC next, we can reuse the + * chunks for copying live old objects with fewer chunk allocations. In either + * case, excess chunks are released later. */ - boolean keepAllAlignedChunks = incremental; + boolean keepAllAlignedChunks = !SerialGCOptions.useCompactingOldGen() && incremental; chunkReleaser.release(keepAllAlignedChunks); } finally { JfrGCEvents.emitGCPhasePauseEvent(getCollectionEpoch(), "Release Spaces", startTicks); @@ -585,27 +592,26 @@ private void cleanRuntimeCodeCache() { } @Uninterruptible(reason = "We don't want any safepoint checks in the core part of the GC.") - private void cheneyScan(boolean incremental) { + private void scan(boolean incremental) { if (incremental) { - cheneyScanFromDirtyRoots(); + scanFromDirtyRoots(); } else { - cheneyScanFromRoots(); + scanFromRoots(); } } @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) - private void cheneyScanFromRoots() { - Timer cheneyScanFromRootsTimer = timers.cheneyScanFromRoots.open(); + private void scanFromRoots() { + Timer scanFromRootsTimer = timers.scanFromRoots.open(); try { long startTicks = JfrGCEvents.startGCPhasePause(); try { - /* Take a snapshot of the heap so that I can visit all the promoted Objects. */ /* - * Debugging tip: I could move the taking of the snapshot and the scanning of grey - * Objects into each of the blackening methods, or even put them around individual - * Object reference visits. + * Snapshot the heap so that objects that are promoted afterwards can be visited. + * When using a compacting old generation, it absorbs all chunks from the young + * generation at this point. */ - prepareForPromotion(false); + beginPromotion(false); } finally { JfrGCEvents.emitGCPhasePauseEvent(getCollectionEpoch(), "Snapshot Heap", startTicks); } @@ -659,36 +665,19 @@ private void cheneyScanFromRoots() { JfrGCEvents.emitGCPhasePauseEvent(getCollectionEpoch(), "Scan From Roots", startTicks); } } finally { - cheneyScanFromRootsTimer.close(); + scanFromRootsTimer.close(); } } @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) - private void cheneyScanFromDirtyRoots() { - Timer cheneyScanFromDirtyRootsTimer = timers.cheneyScanFromDirtyRoots.open(); + private void scanFromDirtyRoots() { + Timer scanFromDirtyRootsTimer = timers.scanFromDirtyRoots.open(); try { long startTicks = JfrGCEvents.startGCPhasePause(); - try { - /* - * Move all the chunks in fromSpace to toSpace. That does not make those chunks - * grey, so I have to use the dirty cards marks to blacken them, but that's what - * card marks are for. - */ - OldGeneration oldGen = HeapImpl.getHeapImpl().getOldGeneration(); - oldGen.emptyFromSpaceIntoToSpace(); - } finally { - JfrGCEvents.emitGCPhasePauseEvent(getCollectionEpoch(), "Promote Old Generation", startTicks); - } - startTicks = JfrGCEvents.startGCPhasePause(); try { - /* Take a snapshot of the heap so that I can visit all the promoted Objects. */ - /* - * Debugging tip: I could move the taking of the snapshot and the scanning of grey - * Objects into each of the blackening methods, or even put them around individual - * Object reference visits. - */ - prepareForPromotion(true); + /* Snapshot the heap so that objects that are promoted afterwards can be visited. */ + beginPromotion(true); } finally { JfrGCEvents.emitGCPhasePauseEvent(getCollectionEpoch(), "Snapshot Heap", startTicks); } @@ -751,7 +740,7 @@ private void cheneyScanFromDirtyRoots() { JfrGCEvents.emitGCPhasePauseEvent(getCollectionEpoch(), "Scan From Roots", startTicks); } } finally { - cheneyScanFromDirtyRootsTimer.close(); + scanFromDirtyRootsTimer.close(); } } @@ -815,41 +804,48 @@ private void blackenStackRoots() { Pointer sp = readCallerStackPointer(); CodePointer ip = readReturnAddress(); - JavaStackWalk walk = StackValue.get(JavaStackWalk.class); - JavaStackWalker.initWalk(walk, sp, ip); - walkStack(walk); - - /* - * Scan the stacks of all the threads. Other threads will be blocked at a safepoint (or - * in native code) so they will each have a JavaFrameAnchor in their VMThread. - */ - for (IsolateThread vmThread = VMThreads.firstThread(); vmThread.isNonNull(); vmThread = VMThreads.nextThread(vmThread)) { - if (vmThread == CurrentIsolate.getCurrentThread()) { - /* - * The current thread is already scanned by code above, so we do not have to do - * anything for it here. It might have a JavaFrameAnchor from earlier Java-to-C - * transitions, but certainly not at the top of the stack since it is running - * this code, so just this scan would be incomplete. - */ - continue; - } - if (JavaStackWalker.initWalk(walk, vmThread)) { - walkStack(walk); - } - } + walkStackRoots(greyToBlackObjRefVisitor, sp, ip, true); } finally { blackenStackRootsTimer.close(); } } + @AlwaysInline("GC performance") + @Uninterruptible(reason = "Required by called JavaStackWalker methods. We are at a safepoint during GC, so it does not change anything for this method.", mayBeInlined = true) + static void walkStackRoots(ObjectReferenceVisitor visitor, Pointer currentThreadSp, CodePointer currentThreadIp, boolean visitRuntimeCodeInfo) { + JavaStackWalk walk = StackValue.get(JavaStackWalk.class); + JavaStackWalker.initWalk(walk, currentThreadSp, currentThreadIp); + walkStack(walk, visitor, visitRuntimeCodeInfo); + + /* + * Scan the stacks of all the threads. Other threads will be blocked at a safepoint (or in + * native code) so they will each have a JavaFrameAnchor in their VMThread. + */ + for (IsolateThread vmThread = VMThreads.firstThread(); vmThread.isNonNull(); vmThread = VMThreads.nextThread(vmThread)) { + if (vmThread == CurrentIsolate.getCurrentThread()) { + /* + * The current thread is already scanned by code above, so we do not have to do + * anything for it here. It might have a JavaFrameAnchor from earlier Java-to-C + * transitions, but certainly not at the top of the stack since it is running this + * code, so just this scan would be incomplete. + */ + continue; + } + if (JavaStackWalker.initWalk(walk, vmThread)) { + walkStack(walk, visitor, visitRuntimeCodeInfo); + } + } + } + /** * This method inlines {@link JavaStackWalker#continueWalk(JavaStackWalk, CodeInfo)} and * {@link CodeInfoTable#visitObjectReferences}. This avoids looking up the * {@link SimpleCodeInfoQueryResult} twice per frame, and also ensures that there are no virtual * calls to a stack frame visitor. */ - @Uninterruptible(reason = "Required by called JavaStackWalker methods. We are at a safepoint during GC, so it does not change anything for this method.") - private void walkStack(JavaStackWalk walk) { + @AlwaysInline("GC performance") + @Uninterruptible(reason = "Required by called JavaStackWalker methods. We are at a safepoint during GC, so it does not change anything for this method.", mayBeInlined = true) + private static void walkStack(JavaStackWalk walk, ObjectReferenceVisitor visitor, boolean visitRuntimeCodeInfo) { assert VMOperation.isGCInProgress() : "This methods accesses a CodeInfo without a tether"; while (true) { @@ -873,7 +869,7 @@ private void walkStack(JavaStackWalk walk) { if (referenceMapIndex == ReferenceMapIndex.NO_REFERENCE_MAP) { throw CodeInfoTable.reportNoReferenceMap(sp, ip, codeInfo); } - CodeReferenceMapDecoder.walkOffsetsFromPointer(sp, referenceMapEncoding, referenceMapIndex, greyToBlackObjRefVisitor, null); + CodeReferenceMapDecoder.walkOffsetsFromPointer(sp, referenceMapEncoding, referenceMapIndex, visitor, null); } else { /* * This is a deoptimized frame. The DeoptimizedFrame object is stored in the frame, @@ -881,13 +877,13 @@ private void walkStack(JavaStackWalk walk) { */ } - if (RuntimeCompilation.isEnabled() && !CodeInfoAccess.isAOTImageCode(codeInfo)) { + if (RuntimeCompilation.isEnabled() && visitRuntimeCodeInfo && !CodeInfoAccess.isAOTImageCode(codeInfo)) { /* * Runtime-compiled code that is currently on the stack must be kept alive. So, we * mark the tether as strongly reachable. The RuntimeCodeCacheWalker will handle all * other object references later on. */ - RuntimeCodeInfoAccess.walkTether(codeInfo, greyToBlackObjRefVisitor); + RuntimeCodeInfoAccess.walkTether(codeInfo, visitor); } if (!JavaStackWalker.continueWalk(walk, queryResult, deoptFrame)) { @@ -986,8 +982,14 @@ private void blackenImageHeapRoots() { @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) private void blackenImageHeapRoots(ImageHeapInfo imageHeapInfo) { - ImageHeapWalker.walkPartitionInline(imageHeapInfo.firstWritableRegularObject, imageHeapInfo.lastWritableRegularObject, greyToBlackObjectVisitor, true); - ImageHeapWalker.walkPartitionInline(imageHeapInfo.firstWritableHugeObject, imageHeapInfo.lastWritableHugeObject, greyToBlackObjectVisitor, false); + walkImageHeapRoots(imageHeapInfo, greyToBlackObjectVisitor); + } + + @AlwaysInline("GC Performance") + @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) + static void walkImageHeapRoots(ImageHeapInfo imageHeapInfo, ObjectVisitor visitor) { + ImageHeapWalker.walkPartitionInline(imageHeapInfo.firstWritableRegularObject, imageHeapInfo.lastWritableRegularObject, visitor, true); + ImageHeapWalker.walkPartitionInline(imageHeapInfo.firstWritableHugeObject, imageHeapInfo.lastWritableHugeObject, visitor, false); } @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) @@ -998,19 +1000,18 @@ private void blackenDirtyCardRoots() { * Walk To-Space looking for dirty cards, and within those for old-to-young pointers. * Promote any referenced young objects. */ - Space oldGenToSpace = HeapImpl.getHeapImpl().getOldGeneration().getToSpace(); - RememberedSet.get().walkDirtyObjects(oldGenToSpace, greyToBlackObjectVisitor, true); + HeapImpl.getHeapImpl().getOldGeneration().blackenDirtyCardRoots(greyToBlackObjectVisitor); } finally { blackenDirtyCardRootsTimer.close(); } } @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) - private static void prepareForPromotion(boolean isIncremental) { + private static void beginPromotion(boolean isIncremental) { HeapImpl heap = HeapImpl.getHeapImpl(); - heap.getOldGeneration().prepareForPromotion(); + heap.getOldGeneration().beginPromotion(isIncremental); if (isIncremental) { - heap.getYoungGeneration().prepareForPromotion(); + heap.getYoungGeneration().beginPromotion(); } } @@ -1019,9 +1020,9 @@ private void scanGreyObjects(boolean isIncremental) { Timer scanGreyObjectsTimer = timers.scanGreyObjects.open(); try { if (isIncremental) { - scanGreyObjectsLoop(); + incrementalScanGreyObjectsLoop(); } else { - HeapImpl.getHeapImpl().getOldGeneration().scanGreyObjects(); + HeapImpl.getHeapImpl().getOldGeneration().scanGreyObjects(false); } } finally { scanGreyObjectsTimer.close(); @@ -1029,14 +1030,14 @@ private void scanGreyObjects(boolean isIncremental) { } @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) - private static void scanGreyObjectsLoop() { + private static void incrementalScanGreyObjectsLoop() { HeapImpl heap = HeapImpl.getHeapImpl(); YoungGeneration youngGen = heap.getYoungGeneration(); OldGeneration oldGen = heap.getOldGeneration(); boolean hasGrey; do { hasGrey = youngGen.scanGreyObjects(); - hasGrey |= oldGen.scanGreyObjects(); + hasGrey |= oldGen.scanGreyObjects(true); } while (hasGrey); } @@ -1048,7 +1049,8 @@ Object promoteObject(Object original, UnsignedWord header) { boolean isAligned = ObjectHeaderImpl.isAlignedHeader(header); Header originalChunk = getChunk(original, isAligned); Space originalSpace = HeapChunk.getSpace(originalChunk); - if (!originalSpace.isFromSpace()) { + if (originalSpace.isToSpace()) { + assert !SerialGCOptions.useCompactingOldGen() || !completeCollection; return original; } @@ -1092,16 +1094,16 @@ private void promotePinnedObject(PinnedObjectImpl pinned) { boolean isAligned = ObjectHeaderImpl.isAlignedObject(referent); Header originalChunk = getChunk(referent, isAligned); Space originalSpace = HeapChunk.getSpace(originalChunk); - if (originalSpace.isFromSpace()) { + if (originalSpace.isFromSpace() || (originalSpace.isCompactingOldSpace() && completeCollection)) { boolean promoted = false; if (!completeCollection && originalSpace.getNextAgeForPromotion() < policy.getTenuringAge()) { - promoted = heap.getYoungGeneration().promoteChunk(originalChunk, isAligned, originalSpace); + promoted = heap.getYoungGeneration().promotePinnedObject(referent, originalChunk, isAligned, originalSpace); if (!promoted) { accounting.onSurvivorOverflowed(); } } if (!promoted) { - heap.getOldGeneration().promoteChunk(originalChunk, isAligned, originalSpace); + heap.getOldGeneration().promotePinnedObject(referent, originalChunk, isAligned, originalSpace); } } } @@ -1109,9 +1111,8 @@ private void promotePinnedObject(PinnedObjectImpl pinned) { private static void swapSpaces() { HeapImpl heap = HeapImpl.getHeapImpl(); - OldGeneration oldGen = heap.getOldGeneration(); heap.getYoungGeneration().swapSpaces(); - oldGen.swapSpaces(); + heap.getOldGeneration().swapSpaces(); } private void releaseSpaces() { diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/Generation.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/Generation.java index f109d30f4f3d..11cdc4d7b10f 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/Generation.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/Generation.java @@ -99,5 +99,11 @@ public String getName() { * * @return true on success, false if the there was insufficient capacity. */ - protected abstract boolean promoteChunk(HeapChunk.Header originalChunk, boolean isAligned, Space originalSpace); + protected abstract boolean promotePinnedObject(Object obj, HeapChunk.Header originalChunk, boolean isAligned, Space originalSpace); + + void checkSanityBeforeCollection() { + } + + void checkSanityAfterCollection() { + } } diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/GreyToBlackObjRefVisitor.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/GreyToBlackObjRefVisitor.java index bad7d773496d..3b9de4873812 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/GreyToBlackObjRefVisitor.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/GreyToBlackObjRefVisitor.java @@ -24,7 +24,6 @@ */ package com.oracle.svm.core.genscavenge; -import jdk.graal.compiler.word.Word; import org.graalvm.nativeimage.Platform; import org.graalvm.nativeimage.Platforms; import org.graalvm.word.Pointer; @@ -38,6 +37,8 @@ import com.oracle.svm.core.hub.LayoutEncoding; import com.oracle.svm.core.log.Log; +import jdk.graal.compiler.word.Word; + /** * This visitor is handed Pointers to Object references and if necessary it promotes the * referenced Object and replaces the Object reference with a forwarding pointer. @@ -103,8 +104,13 @@ public boolean visitObjectReferenceInline(Pointer objRef, int innerOffset, boole return true; } - // Promote the Object if necessary, making it at least grey, and ... Object obj = p.toObject(); + if (SerialGCOptions.useCompactingOldGen() && ObjectHeaderImpl.isMarkedHeader(header)) { + RememberedSet.get().dirtyCardIfNecessary(holderObject, obj); + return true; + } + + // Promote the Object if necessary, making it at least grey, and ... assert innerOffset < LayoutEncoding.getSizeFromObjectInGC(obj).rawValue(); Object copy = GCImpl.getGCImpl().promoteObject(obj, header); if (copy != obj) { diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/HeapChunk.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/HeapChunk.java index 002d30d46e0b..98157657db81 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/HeapChunk.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/HeapChunk.java @@ -170,8 +170,8 @@ public interface Header> extends HeaderPadding { } @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) - public static void initialize(Header chunk, Pointer objectsStart, UnsignedWord chunkSize) { - HeapChunk.setEndOffset(chunk, chunkSize); + public static void initialize(Header chunk, Pointer objectsStart, UnsignedWord endOffset) { + HeapChunk.setEndOffset(chunk, endOffset); HeapChunk.setTopPointer(chunk, objectsStart); HeapChunk.setSpace(chunk, null); HeapChunk.setNext(chunk, WordFactory.nullPointer()); @@ -203,7 +203,7 @@ public static void setTopPointer(Header that, Pointer newTop) { } @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) - static void setTopPointerCarefully(Header that, Pointer newTop) { + public static void setTopPointerCarefully(Header that, Pointer newTop) { assert getTopPointer(that).isNonNull() : "Not safe: top currently points to NULL."; assert getTopPointer(that).belowOrEqual(newTop) : "newTop too low."; assert newTop.belowOrEqual(getEndPointer(that)) : "newTop too high."; diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/HeapChunkLogging.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/HeapChunkLogging.java index 41297bf827ea..bebcbc09af16 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/HeapChunkLogging.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/HeapChunkLogging.java @@ -34,7 +34,7 @@ class HeapChunkLogging { private static final int MAX_CHUNKS_TO_PRINT = 64 * 1024; - public static void logChunks(Log log, AlignedHeapChunk.AlignedHeader firstChunk, String shortSpaceName, boolean isFromSpace) { + public static void logChunks(Log log, AlignedHeapChunk.AlignedHeader firstChunk, String shortSpaceName, boolean isToSpace) { if (firstChunk.isNonNull()) { int i = 0; AlignedHeapChunk.AlignedHeader chunk = firstChunk; @@ -43,7 +43,7 @@ public static void logChunks(Log log, AlignedHeapChunk.AlignedHeader firstChunk, Pointer top = HeapChunk.getTopPointer(chunk); Pointer end = AlignedHeapChunk.getObjectsEnd(chunk); - logChunk(log, chunk, bottom, top, end, true, shortSpaceName, isFromSpace); + logChunk(log, chunk, bottom, top, end, true, shortSpaceName, isToSpace); chunk = HeapChunk.getNext(chunk); i++; @@ -55,7 +55,7 @@ public static void logChunks(Log log, AlignedHeapChunk.AlignedHeader firstChunk, } } - public static void logChunks(Log log, UnalignedHeapChunk.UnalignedHeader firstChunk, String shortSpaceName, boolean isFromSpace) { + public static void logChunks(Log log, UnalignedHeapChunk.UnalignedHeader firstChunk, String shortSpaceName, boolean isToSpace) { if (firstChunk.isNonNull()) { int i = 0; UnalignedHeapChunk.UnalignedHeader chunk = firstChunk; @@ -64,7 +64,7 @@ public static void logChunks(Log log, UnalignedHeapChunk.UnalignedHeader firstCh Pointer top = HeapChunk.getTopPointer(chunk); Pointer end = UnalignedHeapChunk.getObjectEnd(chunk); - logChunk(log, chunk, bottom, top, end, false, shortSpaceName, isFromSpace); + logChunk(log, chunk, bottom, top, end, false, shortSpaceName, isToSpace); chunk = HeapChunk.getNext(chunk); i++; @@ -76,7 +76,7 @@ public static void logChunks(Log log, UnalignedHeapChunk.UnalignedHeader firstCh } } - private static void logChunk(Log log, HeapChunk.Header chunk, Pointer bottom, Pointer top, Pointer end, boolean isAligned, String shortSpaceName, boolean isFromSpace) { + private static void logChunk(Log log, HeapChunk.Header chunk, Pointer bottom, Pointer top, Pointer end, boolean isAligned, String shortSpaceName, boolean isToSpace) { UnsignedWord used = top.subtract(bottom); UnsignedWord capacity = end.subtract(bottom); UnsignedWord usedPercent = used.multiply(100).unsignedDivide(capacity); @@ -85,7 +85,7 @@ private static void logChunk(Log log, HeapChunk.Header chunk, Pointer bottom, log.string("|").unsigned(usedPercent, 3, RIGHT_ALIGN).string("%"); log.string("|").string(shortSpaceName, 3, RIGHT_ALIGN); log.string("|").string(isAligned ? "A" : "U"); - log.string("|").string(isFromSpace ? "" : "T"); + log.string("|").string(isToSpace ? "T" : ""); log.newline(); } } diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/HeapChunkProvider.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/HeapChunkProvider.java index 306456594045..ec4bbfeac58f 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/HeapChunkProvider.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/HeapChunkProvider.java @@ -278,7 +278,7 @@ private static void zap(Header chunk, WordBase value) { } void logFreeChunks(Log log) { - HeapChunkLogging.logChunks(log, unusedAlignedChunks.get(), "F", true); + HeapChunkLogging.logChunks(log, unusedAlignedChunks.get(), "F", false); } @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/HeapImpl.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/HeapImpl.java index 64a1e0e006e9..2bddeef9d6ef 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/HeapImpl.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/HeapImpl.java @@ -100,7 +100,7 @@ public final class HeapImpl extends Heap { // Singleton instances, created during image generation. private final YoungGeneration youngGeneration = new YoungGeneration("YoungGeneration"); - private final OldGeneration oldGeneration = new OldGeneration("OldGeneration"); + private final OldGeneration oldGeneration; private final HeapChunkProvider chunkProvider = new HeapChunkProvider(); private final ObjectHeaderImpl objectHeaderImpl = new ObjectHeaderImpl(); private final GCImpl gcImpl; @@ -125,6 +125,8 @@ public final class HeapImpl extends Heap { public HeapImpl() { this.gcImpl = new GCImpl(); this.runtimeCodeInfoGcSupport = new RuntimeCodeInfoGCSupportImpl(); + this.oldGeneration = SerialGCOptions.useCompactingOldGen() ? new CompactingOldGeneration("OldGeneration") + : new CopyingOldGeneration("OldGeneration"); HeapParameters.initialize(); DiagnosticThunkRegistry.singleton().add(new DumpHeapSettingsAndStatistics()); DiagnosticThunkRegistry.singleton().add(new DumpHeapUsage()); @@ -413,7 +415,7 @@ public void detachThread(IsolateThread isolateThread) { @Fold public static boolean usesImageHeapCardMarking() { Boolean enabled = SerialGCOptions.ImageHeapCardMarking.getValue(); - if (enabled == Boolean.FALSE || enabled == null && !SubstrateOptions.useRememberedSet()) { + if (enabled == Boolean.FALSE || enabled == null && !SerialGCOptions.useRememberedSet()) { return false; } else if (enabled == null) { return isImageHeapAligned(); @@ -707,7 +709,7 @@ public long getThreadAllocatedMemory(IsolateThread thread) { @Override @Uninterruptible(reason = "Ensure that no GC can occur between modification of the object and this call.", callerMustBe = true) public void dirtyAllReferencesOf(Object obj) { - if (SubstrateOptions.useRememberedSet() && obj != null) { + if (SerialGCOptions.useRememberedSet() && obj != null) { ForcedSerialPostWriteBarrier.force(OffsetAddressNode.address(obj, 0), false); } } @@ -762,10 +764,10 @@ private boolean printLocationInfo(Log log, Pointer ptr, boolean allowJavaHeapAcc if (allowJavaHeapAccess) { // Accessing spaces and chunks is safe if we prevent a GC. - if (isInYoungGen(ptr)) { + if (youngGeneration.isInSpace(ptr)) { log.string("points into the young generation"); return true; - } else if (isInOldGen(ptr)) { + } else if (oldGeneration.isInSpace(ptr)) { log.string("points into the old generation"); return true; } @@ -780,51 +782,7 @@ private boolean printLocationInfo(Log log, Pointer ptr, boolean allowJavaHeapAcc } boolean isInHeap(Pointer ptr) { - return isInImageHeap(ptr) || isInYoungGen(ptr) || isInOldGen(ptr); - } - - @Uninterruptible(reason = "Prevent that chunks are freed.") - private boolean isInYoungGen(Pointer ptr) { - if (findPointerInSpace(youngGeneration.getEden(), ptr)) { - return true; - } - - for (int i = 0; i < youngGeneration.getMaxSurvivorSpaces(); i++) { - if (findPointerInSpace(youngGeneration.getSurvivorFromSpaceAt(i), ptr)) { - return true; - } - if (findPointerInSpace(youngGeneration.getSurvivorToSpaceAt(i), ptr)) { - return true; - } - } - return false; - } - - @Uninterruptible(reason = "Prevent that chunks are freed.") - private boolean isInOldGen(Pointer ptr) { - return findPointerInSpace(oldGeneration.getFromSpace(), ptr) || findPointerInSpace(oldGeneration.getToSpace(), ptr); - } - - @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) - private static boolean findPointerInSpace(Space space, Pointer p) { - AlignedHeapChunk.AlignedHeader aChunk = space.getFirstAlignedHeapChunk(); - while (aChunk.isNonNull()) { - Pointer start = AlignedHeapChunk.getObjectsStart(aChunk); - if (start.belowOrEqual(p) && p.belowThan(HeapChunk.getTopPointer(aChunk))) { - return true; - } - aChunk = HeapChunk.getNext(aChunk); - } - - UnalignedHeapChunk.UnalignedHeader uChunk = space.getFirstUnalignedHeapChunk(); - while (uChunk.isNonNull()) { - Pointer start = UnalignedHeapChunk.getObjectStart(uChunk); - if (start.belowOrEqual(p) && p.belowThan(HeapChunk.getTopPointer(uChunk))) { - return true; - } - uChunk = HeapChunk.getNext(uChunk); - } - return false; + return isInImageHeap(ptr) || youngGeneration.isInSpace(ptr) || oldGeneration.isInSpace(ptr); } private static boolean printTlabInfo(Log log, Pointer ptr) { diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/HeapVerifier.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/HeapVerifier.java index 5c78f2e509dd..87435a8de47e 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/HeapVerifier.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/HeapVerifier.java @@ -32,7 +32,6 @@ import org.graalvm.word.Pointer; import org.graalvm.word.WordFactory; -import com.oracle.svm.core.SubstrateOptions; import com.oracle.svm.core.config.ConfigurationValues; import com.oracle.svm.core.genscavenge.AlignedHeapChunk.AlignedHeader; import com.oracle.svm.core.genscavenge.UnalignedHeapChunk.UnalignedHeader; @@ -113,29 +112,16 @@ private static boolean verifyYoungGeneration(Occasion occasion) { } private static boolean verifyOldGeneration() { - boolean success = true; - OldGeneration oldGeneration = HeapImpl.getHeapImpl().getOldGeneration(); - Space fromSpace = oldGeneration.getFromSpace(); - Space toSpace = oldGeneration.getToSpace(); - - if (!toSpace.isEmpty()) { - Log.log().string("Old generation to-space contains chunks: firstAlignedChunk: ").zhex(toSpace.getFirstAlignedHeapChunk()).string(", firstUnalignedChunk: ") - .zhex(toSpace.getFirstUnalignedHeapChunk()).newline(); - success = false; - } - - success &= verifySpace(fromSpace); - success &= verifySpace(toSpace); - return success; + return HeapImpl.getHeapImpl().getOldGeneration().verifySpaces(); } - protected boolean verifyRememberedSets() { + private static boolean verifyRememberedSets() { /* * After we are done with all other verifications, it is guaranteed that the heap is in a * reasonable state. Now, we can verify the remembered sets without having to worry about * basic heap consistency. */ - if (!SubstrateOptions.useRememberedSet() || !SerialGCOptions.VerifyRememberedSet.getValue()) { + if (!SerialGCOptions.useRememberedSet() || !SerialGCOptions.VerifyRememberedSet.getValue()) { return true; } @@ -157,18 +143,18 @@ protected boolean verifyRememberedSets() { success &= rememberedSet.verify(info.getFirstWritableUnalignedChunk(), info.getLastWritableUnalignedChunk()); } - OldGeneration oldGeneration = HeapImpl.getHeapImpl().getOldGeneration(); - Space toSpace = oldGeneration.getToSpace(); - success &= rememberedSet.verify(toSpace.getFirstAlignedHeapChunk()); - success &= rememberedSet.verify(toSpace.getFirstUnalignedHeapChunk()); + success &= HeapImpl.getHeapImpl().getOldGeneration().verifyRememberedSets(); + return success; + } - Space fromSpace = oldGeneration.getFromSpace(); - success &= rememberedSet.verify(fromSpace.getFirstAlignedHeapChunk()); - success &= rememberedSet.verify(fromSpace.getFirstUnalignedHeapChunk()); + static boolean verifyRememberedSet(Space space) { + boolean success = true; + success &= RememberedSet.get().verify(space.getFirstAlignedHeapChunk()); + success &= RememberedSet.get().verify(space.getFirstUnalignedHeapChunk()); return success; } - private static boolean verifySpace(Space space) { + static boolean verifySpace(Space space) { boolean success = true; success &= verifyChunkList(space, "aligned", space.getFirstAlignedHeapChunk(), space.getLastAlignedHeapChunk()); success &= verifyChunkList(space, "unaligned", space.getFirstUnalignedHeapChunk(), space.getLastUnalignedHeapChunk()); @@ -210,6 +196,11 @@ private static boolean verifyAlignedChunks(Space space, AlignedHeader firstAlign success = false; } + if (aChunk.getShouldSweepInsteadOfCompact()) { + Log.log().string("Aligned chunk ").zhex(aChunk).string(" is marked for sweeping while this should only be used during collections.").newline(); + success = false; + } + OBJECT_VERIFIER.initialize(aChunk, WordFactory.nullPointer()); AlignedHeapChunk.walkObjects(aChunk, OBJECT_VERIFIER); aChunk = HeapChunk.getNext(aChunk); @@ -269,6 +260,11 @@ private static boolean verifyObject(Object obj, AlignedHeader aChunk, UnalignedH return false; } + if (SerialGCOptions.useCompactingOldGen() && ObjectHeaderImpl.isMarkedHeader(header)) { + Log.log().string("Object ").zhex(ptr).string(" has a marked header: ").zhex(header).newline(); + return false; + } + assert aChunk.isNonNull() ^ uChunk.isNonNull(); HeapChunk.Header chunk = aChunk.isNonNull() ? aChunk : uChunk; if (HeapImpl.isImageHeapAligned() || !HeapImpl.getHeapImpl().isInImageHeap(obj)) { @@ -309,7 +305,7 @@ private static boolean verifyObject(Object obj, AlignedHeader aChunk, UnalignedH // we can't verify that this bit is set. } else if (space.isOldSpace()) { - if (SubstrateOptions.useRememberedSet() && !RememberedSet.get().hasRememberedSet(header)) { + if (SerialGCOptions.useRememberedSet() && !RememberedSet.get().hasRememberedSet(header)) { Log.log().string("Object ").zhex(ptr).string(" is in old generation chunk ").zhex(chunk).string(" but does not have a remembered set.").newline(); return false; } diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/ObjectHeaderImpl.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/ObjectHeaderImpl.java index c9f5af276171..8639528db37f 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/ObjectHeaderImpl.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/ObjectHeaderImpl.java @@ -64,8 +64,9 @@ */ public final class ObjectHeaderImpl extends ObjectHeader { private static final UnsignedWord UNALIGNED_BIT = WordFactory.unsigned(0b00001); - private static final UnsignedWord REMEMBERED_SET_BIT = WordFactory.unsigned(0b00010); - private static final UnsignedWord FORWARDED_BIT = WordFactory.unsigned(0b00100); + private static final UnsignedWord REMSET_OR_MARKED1_BIT = WordFactory.unsigned(0b00010); + private static final UnsignedWord FORWARDED_OR_MARKED2_BIT = WordFactory.unsigned(0b00100); + private static final UnsignedWord MARKED_BITS = REMSET_OR_MARKED1_BIT.or(FORWARDED_OR_MARKED2_BIT); /** * Optional: per-object identity hash code state to avoid a fixed field, initially implicitly @@ -220,7 +221,7 @@ public boolean hasIdentityHashFromAddress(Word header) { @AlwaysInline("GC performance") @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) - static boolean hasIdentityHashFromAddressInline(Word header) { + public static boolean hasIdentityHashFromAddressInline(Word header) { if (GraalDirectives.inIntrinsic()) { ReplacementsUtil.staticAssert(isIdentityHashFieldOptional(), "use only when hashcode fields are optional"); } else { @@ -267,7 +268,7 @@ public Word encodeAsObjectHeader(DynamicHub hub, boolean rememberedSet, boolean result = result.shiftLeft(numReservedExtraBits); } if (rememberedSet) { - result = result.or(REMEMBERED_SET_BIT); + result = result.or(REMSET_OR_MARKED1_BIT); } if (unaligned) { result = result.or(UNALIGNED_BIT); @@ -307,7 +308,7 @@ public long encodeAsImageHeapObjectHeader(ImageHeapObject obj, long hubOffsetFro assert (header & reservedBitsMask) == 0 : "Object header bits must be zero initially"; if (obj.getPartition() instanceof ChunkedImageHeapPartition partition) { if (partition.isWritable() && HeapImpl.usesImageHeapCardMarking()) { - header |= REMEMBERED_SET_BIT.rawValue(); + header |= REMSET_OR_MARKED1_BIT.rawValue(); } if (partition.usesUnalignedObjects()) { header |= UNALIGNED_BIT.rawValue(); @@ -345,13 +346,48 @@ public static boolean isUnalignedHeader(UnsignedWord header) { @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) public static void setRememberedSetBit(Object o) { UnsignedWord oldHeader = readHeaderFromObject(o); - UnsignedWord newHeader = oldHeader.or(REMEMBERED_SET_BIT); + assert oldHeader.and(FORWARDED_OR_MARKED2_BIT).equal(0); + UnsignedWord newHeader = oldHeader.or(REMSET_OR_MARKED1_BIT); writeHeaderToObject(o, newHeader); } @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) public static boolean hasRememberedSet(UnsignedWord header) { - return header.and(REMEMBERED_SET_BIT).notEqual(0); + return header.and(REMSET_OR_MARKED1_BIT).notEqual(0); + } + + @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) + public static void setMarked(Object o) { + if (!SerialGCOptions.useCompactingOldGen()) { // not guarantee(): always folds, prevent call + throw VMError.shouldNotReachHere("Only for compacting GC."); + } + UnsignedWord header = readHeaderFromObject(o); + assert header.and(FORWARDED_OR_MARKED2_BIT).equal(0) : "forwarded or already marked"; + /* + * The remembered bit is already set if the object was in the old generation before, or + * unset if it was only just absorbed from the young generation, in which case we set it. + */ + writeHeaderToObject(o, header.or(MARKED_BITS)); + } + + @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) + public static void unsetMarkedAndKeepRememberedSetBit(Object o) { + UnsignedWord header = readHeaderFromObject(o); + assert isMarkedHeader(header); + writeHeaderToObject(o, header.and(FORWARDED_OR_MARKED2_BIT.not())); + } + + @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) + public static boolean isMarked(Object o) { + return isMarkedHeader(readHeaderFromObject(o)); + } + + @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) + public static boolean isMarkedHeader(UnsignedWord header) { + if (!SerialGCOptions.useCompactingOldGen()) { + throw VMError.shouldNotReachHere("Only for compacting GC."); + } + return header.and(MARKED_BITS).equal(MARKED_BITS); } @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) @@ -362,16 +398,16 @@ static boolean isPointerToForwardedObject(Pointer p) { @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) public static boolean isForwardedHeader(UnsignedWord header) { - return header.and(FORWARDED_BIT).notEqual(0); + return header.and(MARKED_BITS).equal(FORWARDED_OR_MARKED2_BIT); } @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) - Object getForwardedObject(Pointer ptr) { + public Object getForwardedObject(Pointer ptr) { return getForwardedObject(ptr, readHeaderFromPointer(ptr)); } @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) - Object getForwardedObject(Pointer ptr, UnsignedWord header) { + public Object getForwardedObject(Pointer ptr, UnsignedWord header) { assert isForwardedHeader(header); if (ReferenceAccess.singleton().haveCompressedReferences()) { if (hasShift()) { @@ -417,7 +453,7 @@ private UnsignedWord getForwardHeader(Object copy) { } assert getHeaderBitsFromHeader(result).equal(0); - return result.or(FORWARDED_BIT); + return result.or(FORWARDED_OR_MARKED2_BIT); } @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/OldGeneration.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/OldGeneration.java index dabffb701de7..22c43d93962a 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/OldGeneration.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/OldGeneration.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2024, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,145 +27,50 @@ import static jdk.graal.compiler.nodes.extended.BranchProbabilityNode.EXTREMELY_SLOW_PATH_PROBABILITY; import static jdk.graal.compiler.nodes.extended.BranchProbabilityNode.probability; -import org.graalvm.nativeimage.Platform; -import org.graalvm.nativeimage.Platforms; +import org.graalvm.word.Pointer; import org.graalvm.word.UnsignedWord; -import com.oracle.svm.core.AlwaysInline; import com.oracle.svm.core.Uninterruptible; import com.oracle.svm.core.genscavenge.GCImpl.ChunkReleaser; import com.oracle.svm.core.genscavenge.remset.RememberedSet; -import com.oracle.svm.core.heap.ObjectVisitor; -import com.oracle.svm.core.log.Log; import com.oracle.svm.core.thread.VMOperation; import com.oracle.svm.core.util.VMError; -/** - * An OldGeneration has two Spaces, {@link #fromSpace} for existing objects, and {@link #toSpace} - * for newly-allocated or promoted objects. - */ -public final class OldGeneration extends Generation { - /* This Spaces are final and are flipped by transferring chunks from one to the other. */ - private final Space fromSpace; - private final Space toSpace; - - private final GreyObjectsWalker toGreyObjectsWalker = new GreyObjectsWalker(); - - @Platforms(Platform.HOSTED_ONLY.class) +public abstract class OldGeneration extends Generation { OldGeneration(String name) { super(name); - int age = HeapParameters.getMaxSurvivorSpaces() + 1; - this.fromSpace = new Space("Old", "O", true, age); - this.toSpace = new Space("Old To", "O", false, age); } @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) - void tearDown() { - fromSpace.tearDown(); - toSpace.tearDown(); - } + abstract void beginPromotion(boolean incrementalGc); - @Override - public boolean walkObjects(ObjectVisitor visitor) { - return getFromSpace().walkObjects(visitor) && getToSpace().walkObjects(visitor); - } - - /** Promote an Object to ToSpace if it is not already in ToSpace. */ - @AlwaysInline("GC performance") @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) - @Override - public Object promoteAlignedObject(Object original, AlignedHeapChunk.AlignedHeader originalChunk, Space originalSpace) { - assert originalSpace.isFromSpace(); - return getToSpace().promoteAlignedObject(original, originalSpace); - } + abstract void blackenDirtyCardRoots(GreyToBlackObjectVisitor visitor); - @AlwaysInline("GC performance") @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) - @Override - protected Object promoteUnalignedObject(Object original, UnalignedHeapChunk.UnalignedHeader originalChunk, Space originalSpace) { - assert originalSpace.isFromSpace(); - getToSpace().promoteUnalignedHeapChunk(originalChunk, originalSpace); - return original; - } + abstract boolean scanGreyObjects(boolean incrementalGc); - @Override - @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) - protected boolean promoteChunk(HeapChunk.Header originalChunk, boolean isAligned, Space originalSpace) { - assert originalSpace.isFromSpace(); - if (isAligned) { - getToSpace().promoteAlignedHeapChunk((AlignedHeapChunk.AlignedHeader) originalChunk, originalSpace); - } else { - getToSpace().promoteUnalignedHeapChunk((UnalignedHeapChunk.UnalignedHeader) originalChunk, originalSpace); - } - return true; - } + abstract void sweepAndCompact(Timers timers, ChunkReleaser chunkReleaser); - void releaseSpaces(ChunkReleaser chunkReleaser) { - getFromSpace().releaseChunks(chunkReleaser); - } + abstract void releaseSpaces(ChunkReleaser chunkReleaser); - @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) - void prepareForPromotion() { - toGreyObjectsWalker.setScanStart(getToSpace()); - } + abstract void swapSpaces(); @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) - boolean scanGreyObjects() { - if (!toGreyObjectsWalker.haveGreyObjects()) { - return false; - } - toGreyObjectsWalker.walkGreyObjects(); - return true; - } + abstract UnsignedWord getChunkBytes(); - @Override - public void logUsage(Log log) { - getFromSpace().logUsage(log, true); - getToSpace().logUsage(log, false); - } + abstract UnsignedWord computeObjectBytes(); - @Override - public void logChunks(Log log) { - getFromSpace().logChunks(log); - getToSpace().logChunks(log); - } + abstract boolean isInSpace(Pointer ptr); - @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) - Space getFromSpace() { - return fromSpace; - } + abstract boolean verifyRememberedSets(); - @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) - Space getToSpace() { - return toSpace; - } + abstract boolean verifySpaces(); - void swapSpaces() { - assert getFromSpace().isEmpty() : "fromSpace should be empty."; - getFromSpace().absorb(getToSpace()); - } - - /* Extract all the HeapChunks from FromSpace and append them to ToSpace. */ - @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) - void emptyFromSpaceIntoToSpace() { - getToSpace().absorb(getFromSpace()); - } - - /** - * This value is only updated during a GC. Be careful when calling this method during a GC as it - * might wrongly include chunks that will be freed at the end of the GC. - */ @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) - UnsignedWord getChunkBytes() { - return fromSpace.getChunkBytes().add(toSpace.getChunkBytes()); - } - - UnsignedWord computeObjectBytes() { - return fromSpace.computeObjectBytes().add(toSpace.computeObjectBytes()); - } + abstract void tearDown(); @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) - @SuppressWarnings("static-method") AlignedHeapChunk.AlignedHeader requestAlignedChunk() { assert VMOperation.isGCInProgress() : "Should only be called from the collector."; AlignedHeapChunk.AlignedHeader chunk = HeapImpl.getChunkProvider().produceAlignedChunk(); diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/ReferenceObjectProcessing.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/ReferenceObjectProcessing.java index ad326c6c4d78..477fb6aadb4c 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/ReferenceObjectProcessing.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/ReferenceObjectProcessing.java @@ -121,8 +121,7 @@ private static void discover(Object obj, ObjectReferenceVisitor refVisitor) { } Object refObject = referentAddr.toObject(); if (willSurviveThisCollection(refObject)) { - // Referent is in a to-space. So, this is either an object that got promoted without - // being moved or an object in the old gen. + // Either an object that got promoted without being moved or an object in the old gen. RememberedSet.get().dirtyCardIfNecessary(dr, refObject); return; } @@ -137,7 +136,7 @@ private static void discover(Object obj, ObjectReferenceVisitor refVisitor) { // Important: we need to pass the reference object as holder so that the remembered // set can be updated accordingly! refVisitor.visitObjectReference(ReferenceInternals.getReferentFieldAddress(dr), true, dr); - return; // referent will survive and referent field has been updated + return; // referent will survive } } @@ -201,8 +200,14 @@ static void afterCollection(UnsignedWord freeBytes) { */ private static boolean processRememberedRef(Reference dr) { Pointer refPointer = ReferenceInternals.getReferentPointer(dr); - assert refPointer.isNonNull() : "Referent is null: should not have been discovered"; assert !HeapImpl.getHeapImpl().isInImageHeap(refPointer) : "Image heap referent: should not have been discovered"; + + if (SerialGCOptions.useCompactingOldGen() && GCImpl.getGCImpl().isCompleteCollection()) { + assert refPointer.isNull() || !ObjectHeaderImpl.isPointerToForwardedObject(refPointer); + return refPointer.isNonNull(); + } + + assert refPointer.isNonNull() : "Referent is null: should not have been discovered"; if (maybeUpdateForwardedReference(dr, refPointer)) { return true; } @@ -236,8 +241,33 @@ private static boolean maybeUpdateForwardedReference(Reference dr, Pointer re @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) private static boolean willSurviveThisCollection(Object obj) { + if (SerialGCOptions.useCompactingOldGen() && GCImpl.getGCImpl().isCompleteCollection()) { + return ObjectHeaderImpl.isMarked(obj); + } HeapChunk.Header chunk = HeapChunk.getEnclosingHeapChunk(obj); Space space = HeapChunk.getSpace(chunk); - return !space.isFromSpace(); + return space.isToSpace() || space.isCompactingOldSpace(); + } + + static void updateForwardedRefs() { + assert SerialGCOptions.useCompactingOldGen(); + + Reference current = rememberedRefsList; + while (current != null) { + // Get the next node (the last node has a cyclic reference to self). + Reference next = ReferenceInternals.getNextDiscovered(current); + assert next != null; + next = (next != current) ? next : null; + + Pointer refPointer = ReferenceInternals.getReferentPointer(current); + if (!maybeUpdateForwardedReference(current, refPointer)) { + UnsignedWord header = ObjectHeader.readHeaderFromPointer(refPointer); + if (!ObjectHeaderImpl.isMarkedHeader(header)) { + ReferenceInternals.setReferent(current, null); + } + } + + current = next; + } } } diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/RuntimeCodeCacheReachabilityAnalyzer.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/RuntimeCodeCacheReachabilityAnalyzer.java index 8c1b27a372e9..60680663bbcf 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/RuntimeCodeCacheReachabilityAnalyzer.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/RuntimeCodeCacheReachabilityAnalyzer.java @@ -24,7 +24,6 @@ */ package com.oracle.svm.core.genscavenge; -import jdk.graal.compiler.word.Word; import org.graalvm.nativeimage.Platform; import org.graalvm.nativeimage.Platforms; import org.graalvm.word.Pointer; @@ -36,6 +35,8 @@ import com.oracle.svm.core.hub.DynamicHub; import com.oracle.svm.core.util.DuplicatedInNativeCode; +import jdk.graal.compiler.word.Word; + @DuplicatedInNativeCode final class RuntimeCodeCacheReachabilityAnalyzer implements ObjectReferenceVisitor { private boolean unreachableObjects; @@ -74,12 +75,16 @@ public static boolean isReachable(Pointer ptrToObj) { if (ObjectHeaderImpl.isForwardedHeader(header)) { return true; } - + if (SerialGCOptions.useCompactingOldGen() && ObjectHeaderImpl.isMarkedHeader(header)) { + return true; + } Space space = HeapChunk.getSpace(HeapChunk.getEnclosingHeapChunk(ptrToObj, header)); - if (!space.isFromSpace()) { + if (space.isToSpace()) { + return true; + } + if (space.isCompactingOldSpace() && !GCImpl.getGCImpl().isCompleteCollection()) { return true; } - Class clazz = DynamicHub.toClass(ohi.dynamicHubFromObjectHeader(header)); return isAssumedReachable(clazz); } diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/SerialGCOptions.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/SerialGCOptions.java index e7276e6b53cd..650a36335793 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/SerialGCOptions.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/SerialGCOptions.java @@ -27,10 +27,13 @@ import org.graalvm.collections.UnmodifiableEconomicMap; import com.oracle.svm.core.SubstrateOptions; +import com.oracle.svm.core.genscavenge.compacting.ObjectMoveInfo; import com.oracle.svm.core.option.HostedOptionKey; import com.oracle.svm.core.option.RuntimeOptionKey; +import com.oracle.svm.core.option.SubstrateOptionsParser; import com.oracle.svm.core.util.UserError; +import jdk.graal.compiler.api.replacements.Fold; import jdk.graal.compiler.options.Option; import jdk.graal.compiler.options.OptionKey; import jdk.graal.compiler.options.OptionType; @@ -107,6 +110,15 @@ public Integer getValue(OptionValues values) { @Option(help = "Ignore the maximum heap size while in VM-internal code.", type = OptionType.Expert)// public static final HostedOptionKey IgnoreMaxHeapSizeWhileInVMOperation = new HostedOptionKey<>(false, SerialGCOptions::serialGCOnly); + /** Query these options only through an appropriate method. */ + public static class ConcealedOptions { + @Option(help = "Collect old generation by compacting in-place instead of copying.", type = OptionType.Expert) // + public static final HostedOptionKey CompactingOldGen = new HostedOptionKey<>(false, SerialGCOptions::validateCompactingOldGen); + + @Option(help = "Determines if a remembered set is used, which is necessary for collecting the young and old generation independently.", type = OptionType.Expert) // + public static final HostedOptionKey UseRememberedSet = new HostedOptionKey<>(true, SerialGCOptions::serialGCOnly); + } + private SerialGCOptions() { } @@ -115,4 +127,29 @@ private static void serialGCOnly(OptionKey optionKey) { throw UserError.abort("The option '" + optionKey.getName() + "' can only be used together with the serial garbage collector ('--gc=serial')."); } } + + private static void validateCompactingOldGen(HostedOptionKey compactingOldGen) { + if (!compactingOldGen.getValue()) { + return; + } + serialGCOnly(compactingOldGen); + if (!useRememberedSet()) { + throw UserError.abort("%s requires %s.", SubstrateOptionsParser.commandArgument(ConcealedOptions.CompactingOldGen, "+"), + SubstrateOptionsParser.commandArgument(ConcealedOptions.UseRememberedSet, "+")); + } + if (SerialAndEpsilonGCOptions.AlignedHeapChunkSize.getValue() > ObjectMoveInfo.MAX_CHUNK_SIZE) { + throw UserError.abort("%s requires %s.", SubstrateOptionsParser.commandArgument(ConcealedOptions.CompactingOldGen, "+"), + SubstrateOptionsParser.commandArgument(SerialAndEpsilonGCOptions.AlignedHeapChunkSize, "")); + } + } + + @Fold + public static boolean useRememberedSet() { + return !SubstrateOptions.UseEpsilonGC.getValue() && ConcealedOptions.UseRememberedSet.getValue(); + } + + @Fold + public static boolean useCompactingOldGen() { + return !SubstrateOptions.UseEpsilonGC.getValue() && ConcealedOptions.CompactingOldGen.getValue(); + } } diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/Space.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/Space.java index c03cb861a1b2..cdc9b6b11abb 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/Space.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/Space.java @@ -61,7 +61,7 @@ public final class Space { private final String name; private final String shortName; - private final boolean isFromSpace; + private final boolean isToSpace; private final int age; private final ChunksAccounting accounting; @@ -77,16 +77,16 @@ public final class Space { * collections so they should not move. */ @Platforms(Platform.HOSTED_ONLY.class) - Space(String name, String shortName, boolean isFromSpace, int age) { - this(name, shortName, isFromSpace, age, null); + Space(String name, String shortName, boolean isToSpace, int age) { + this(name, shortName, isToSpace, age, null); } @Platforms(Platform.HOSTED_ONLY.class) - Space(String name, String shortName, boolean isFromSpace, int age, ChunksAccounting parentAccounting) { + Space(String name, String shortName, boolean isToSpace, int age, ChunksAccounting parentAccounting) { assert name != null : "Space name should not be null."; this.name = name; this.shortName = shortName; - this.isFromSpace = isFromSpace; + this.isToSpace = isToSpace; this.age = age; this.accounting = new ChunksAccounting(parentAccounting); } @@ -132,6 +132,12 @@ public boolean isOldSpace() { return age == (HeapParameters.getMaxSurvivorSpaces() + 1); } + @AlwaysInline("GC performance.") + @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) + public boolean isCompactingOldSpace() { + return SerialGCOptions.useCompactingOldGen() && isOldSpace(); + } + @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) int getAge() { return age; @@ -142,9 +148,15 @@ int getNextAgeForPromotion() { return age + 1; } + @AlwaysInline("GC performance.") @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) boolean isFromSpace() { - return isFromSpace; + return !isToSpace && !isCompactingOldSpace(); + } + + @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) + boolean isToSpace() { + return isToSpace; } public boolean walkObjects(ObjectVisitor visitor) { @@ -165,6 +177,17 @@ public boolean walkObjects(ObjectVisitor visitor) { return true; } + boolean walkAlignedHeapChunks(AlignedHeapChunk.Visitor visitor) { + AlignedHeapChunk.AlignedHeader chunk = getFirstAlignedHeapChunk(); + while (chunk.isNonNull()) { + if (!visitor.visitChunk(chunk)) { + return false; + } + chunk = HeapChunk.getNext(chunk); + } + return true; + } + public void logUsage(Log log, boolean logIfEmpty) { UnsignedWord chunkBytes; if (isEdenSpace() && !VMOperation.isGCInProgress()) { @@ -181,8 +204,8 @@ public void logUsage(Log log, boolean logIfEmpty) { } public void logChunks(Log log) { - HeapChunkLogging.logChunks(log, getFirstAlignedHeapChunk(), shortName, isFromSpace); - HeapChunkLogging.logChunks(log, getFirstUnalignedHeapChunk(), shortName, isFromSpace); + HeapChunkLogging.logChunks(log, getFirstAlignedHeapChunk(), shortName, isToSpace()); + HeapChunkLogging.logChunks(log, getFirstUnalignedHeapChunk(), shortName, isToSpace()); } /** @@ -373,9 +396,9 @@ private void setLastUnalignedHeapChunk(UnalignedHeapChunk.UnalignedHeader chunk) /** Promote an aligned Object to this Space. */ @AlwaysInline("GC performance") @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) - Object promoteAlignedObject(Object original, Space originalSpace) { + Object copyAlignedObject(Object original, Space originalSpace) { assert ObjectHeaderImpl.isAlignedObject(original); - assert this != originalSpace && originalSpace.isFromSpace(); + assert originalSpace.isFromSpace() || (originalSpace == this && isCompactingOldSpace()); Object copy = copyAlignedObject(original); if (copy != null) { @@ -423,10 +446,19 @@ private Object copyAlignedObject(Object originalObj) { ObjectHeaderImpl.getObjectHeaderImpl().setIdentityHashInField(copy); } if (isOldSpace()) { - // If the object was promoted to the old gen, we need to take care of the remembered - // set bit and the first object table (even when promoting from old to old). - AlignedHeapChunk.AlignedHeader copyChunk = AlignedHeapChunk.getEnclosingChunk(copy); - RememberedSet.get().enableRememberedSetForObject(copyChunk, copy); + if (SerialGCOptions.useCompactingOldGen() && GCImpl.getGCImpl().isCompleteCollection()) { + /* + * In a compacting complete collection, the remembered set bit is set already during + * marking and the first object table is built later during compaction. + */ + } else { + /* + * If an object was copied to the old generation, its remembered set bit must be set + * and the first object table must be updated (even when copying from old to old). + */ + AlignedHeapChunk.AlignedHeader copyChunk = AlignedHeapChunk.getEnclosingChunk(copy); + RememberedSet.get().enableRememberedSetForObject(copyChunk, copy); + } } return copy; } @@ -502,6 +534,7 @@ void absorb(Space src) { appendUnalignedHeapChunk(uChunk); uChunk = next; } + assert src.isEmpty(); } /** @@ -563,4 +596,25 @@ private UnsignedWord computeUnalignedObjectBytes() { } return result; } + + boolean contains(Pointer p) { + AlignedHeapChunk.AlignedHeader aChunk = getFirstAlignedHeapChunk(); + while (aChunk.isNonNull()) { + Pointer start = AlignedHeapChunk.getObjectsStart(aChunk); + if (start.belowOrEqual(p) && p.belowThan(HeapChunk.getTopPointer(aChunk))) { + return true; + } + aChunk = HeapChunk.getNext(aChunk); + } + + UnalignedHeapChunk.UnalignedHeader uChunk = getFirstUnalignedHeapChunk(); + while (uChunk.isNonNull()) { + Pointer start = UnalignedHeapChunk.getObjectStart(uChunk); + if (start.belowOrEqual(p) && p.belowThan(HeapChunk.getTopPointer(uChunk))) { + return true; + } + uChunk = HeapChunk.getNext(uChunk); + } + return false; + } } diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/Timers.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/Timers.java index 0e480fe082ae..6d34182d2194 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/Timers.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/Timers.java @@ -114,14 +114,24 @@ final class Timers { final Timer blackenImageHeapRoots = new Timer("blackenImageHeapRoots"); final Timer blackenDirtyCardRoots = new Timer("blackenDirtyCardRoots"); final Timer blackenStackRoots = new Timer("blackenStackRoots"); - final Timer cheneyScanFromRoots = new Timer("cheneyScanFromRoots"); - final Timer cheneyScanFromDirtyRoots = new Timer("cheneyScanFromDirtyRoots"); + final Timer scanFromRoots = new Timer("scanFromRoots"); + final Timer scanFromDirtyRoots = new Timer("scanFromDirtyRoots"); final Timer collection = new Timer("collection"); final Timer cleanCodeCache = new Timer("cleanCodeCache"); final Timer referenceObjects = new Timer("referenceObjects"); final Timer promotePinnedObjects = new Timer("promotePinnedObjects"); final Timer rootScan = new Timer("rootScan"); final Timer scanGreyObjects = new Timer("scanGreyObjects"); + final Timer oldPlanning = new Timer("oldPlanning"); + final Timer oldFixup = new Timer("oldFixup"); + final Timer oldFixupAlignedChunks = new Timer("oldFixupAlignedChunks"); + final Timer oldFixupImageHeap = new Timer("oldFixupImageHeap"); + final Timer oldFixupThreadLocals = new Timer("oldFixupThreadLocals"); + final Timer oldFixupRuntimeCodeCache = new Timer("oldFixupRuntimeCodeCache"); + final Timer oldFixupStack = new Timer("oldFixupStack"); + final Timer oldFixupUnalignedChunks = new Timer("oldFixupUnalignedChunks"); + final Timer oldCompaction = new Timer("oldCompaction"); + final Timer oldCompactionRememberedSets = new Timer("oldCompactionRememberedSets"); final Timer releaseSpaces = new Timer("releaseSpaces"); final Timer verifyAfter = new Timer("verifyAfter"); final Timer verifyBefore = new Timer("verifyBefore"); @@ -139,8 +149,8 @@ void resetAllExceptMutator() { verifyBefore.reset(); collection.reset(); rootScan.reset(); - cheneyScanFromRoots.reset(); - cheneyScanFromDirtyRoots.reset(); + scanFromRoots.reset(); + scanFromDirtyRoots.reset(); promotePinnedObjects.reset(); blackenStackRoots.reset(); walkThreadLocals.reset(); @@ -149,6 +159,18 @@ void resetAllExceptMutator() { blackenImageHeapRoots.reset(); blackenDirtyCardRoots.reset(); scanGreyObjects.reset(); + if (SerialGCOptions.useCompactingOldGen()) { + oldPlanning.reset(); + oldFixup.reset(); + oldFixupAlignedChunks.reset(); + oldFixupImageHeap.reset(); + oldFixupThreadLocals.reset(); + oldFixupRuntimeCodeCache.reset(); + oldFixupStack.reset(); + oldFixupUnalignedChunks.reset(); + oldCompaction.reset(); + oldCompactionRememberedSets.reset(); + } cleanCodeCache.reset(); referenceObjects.reset(); releaseSpaces.reset(); @@ -164,8 +186,8 @@ void logAfterCollection(Log log) { logOneTimer(log, " ", collection); logOneTimer(log, " ", verifyBefore); logOneTimer(log, " ", rootScan); - logOneTimer(log, " ", cheneyScanFromRoots); - logOneTimer(log, " ", cheneyScanFromDirtyRoots); + logOneTimer(log, " ", scanFromRoots); + logOneTimer(log, " ", scanFromDirtyRoots); logOneTimer(log, " ", promotePinnedObjects); logOneTimer(log, " ", blackenStackRoots); logOneTimer(log, " ", walkThreadLocals); @@ -174,6 +196,18 @@ void logAfterCollection(Log log) { logOneTimer(log, " ", blackenImageHeapRoots); logOneTimer(log, " ", blackenDirtyCardRoots); logOneTimer(log, " ", scanGreyObjects); + if (SerialGCOptions.useCompactingOldGen()) { + logOneTimer(log, " ", oldPlanning); + logOneTimer(log, " ", oldFixup); + logOneTimer(log, " ", oldFixupAlignedChunks); + logOneTimer(log, " ", oldFixupImageHeap); + logOneTimer(log, " ", oldFixupThreadLocals); + logOneTimer(log, " ", oldFixupRuntimeCodeCache); + logOneTimer(log, " ", oldFixupStack); + logOneTimer(log, " ", oldFixupUnalignedChunks); + logOneTimer(log, " ", oldCompaction); + logOneTimer(log, " ", oldCompactionRememberedSets); + } logOneTimer(log, " ", cleanCodeCache); logOneTimer(log, " ", referenceObjects); logOneTimer(log, " ", releaseSpaces); diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/YoungGeneration.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/YoungGeneration.java index 10806e651939..847428e4ee2e 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/YoungGeneration.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/YoungGeneration.java @@ -26,6 +26,7 @@ import org.graalvm.nativeimage.Platform; import org.graalvm.nativeimage.Platforms; +import org.graalvm.word.Pointer; import org.graalvm.word.UnsignedWord; import org.graalvm.word.WordFactory; @@ -48,7 +49,7 @@ public final class YoungGeneration extends Generation { @Platforms(Platform.HOSTED_ONLY.class) YoungGeneration(String name) { super(name); - this.eden = new Space("Eden", "E", true, 0); + this.eden = new Space("Eden", "E", false, 0); this.maxSurvivorSpaces = HeapParameters.getMaxSurvivorSpaces(); this.survivorFromSpaces = new Space[maxSurvivorSpaces]; this.survivorToSpaces = new Space[maxSurvivorSpaces]; @@ -56,8 +57,8 @@ public final class YoungGeneration extends Generation { this.survivorsToSpacesAccounting = new ChunksAccounting(); for (int i = 0; i < maxSurvivorSpaces; i++) { int age = i + 1; - this.survivorFromSpaces[i] = new Space("Survivor-" + age, "S" + age, true, age); - this.survivorToSpaces[i] = new Space("Survivor-" + age + " To", "S" + age, false, age, survivorsToSpacesAccounting); + this.survivorFromSpaces[i] = new Space("Survivor-" + age, "S" + age, false, age); + this.survivorToSpaces[i] = new Space("Survivor-" + age + " To", "S" + age, true, age, survivorsToSpacesAccounting); this.survivorGreyObjectsWalkers[i] = new GreyObjectsWalker(); } } @@ -153,7 +154,7 @@ void swapSpaces() { } @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) - void prepareForPromotion() { + void beginPromotion() { for (int i = 0; i < maxSurvivorSpaces; i++) { assert getSurvivorToSpaceAt(i).isEmpty() : "SurvivorToSpace should be empty."; getSurvivorGreyObjectsWalker(i).setScanStart(getSurvivorToSpaceAt(i)); @@ -263,7 +264,7 @@ protected Object promoteAlignedObject(Object original, AlignedHeapChunk.AlignedH // survivor space. If it does not, we return null here to tell the caller. int age = originalSpace.getNextAgeForPromotion(); Space toSpace = getSurvivorToSpaceAt(age - 1); - return toSpace.promoteAlignedObject(original, originalSpace); + return toSpace.copyAlignedObject(original, originalSpace); } @AlwaysInline("GC performance") @@ -284,7 +285,7 @@ protected Object promoteUnalignedObject(Object original, UnalignedHeapChunk.Unal @Override @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) - protected boolean promoteChunk(HeapChunk.Header originalChunk, boolean isAligned, Space originalSpace) { + protected boolean promotePinnedObject(Object obj, HeapChunk.Header originalChunk, boolean isAligned, Space originalSpace) { assert originalSpace.isFromSpace(); assert originalSpace.getAge() < maxSurvivorSpaces; if (!fitsInSurvivors(originalChunk, isAligned)) { @@ -330,4 +331,24 @@ AlignedHeapChunk.AlignedHeader requestAlignedSurvivorChunk() { } return HeapImpl.getChunkProvider().produceAlignedChunk(); } + + @Override + public void checkSanityAfterCollection() { + assert eden.isEmpty() : "eden should be empty after a collection."; + } + + boolean isInSpace(Pointer ptr) { + if (getEden().contains(ptr)) { + return true; + } + for (int i = 0; i < getMaxSurvivorSpaces(); i++) { + if (getSurvivorFromSpaceAt(i).contains(ptr)) { + return true; + } + if (getSurvivorToSpaceAt(i).contains(ptr)) { + return true; + } + } + return false; + } } diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/compacting/CompactingVisitor.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/compacting/CompactingVisitor.java new file mode 100644 index 000000000000..2a8912fe5b0c --- /dev/null +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/compacting/CompactingVisitor.java @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.svm.core.genscavenge.compacting; + +import org.graalvm.word.Pointer; +import org.graalvm.word.UnsignedWord; + +import com.oracle.svm.core.UnmanagedMemoryUtil; +import com.oracle.svm.core.genscavenge.AlignedHeapChunk; +import com.oracle.svm.core.genscavenge.HeapChunk; + +/** Moves sequences of live objects to their planned location during compaction. */ +public final class CompactingVisitor implements ObjectMoveInfo.Visitor { + private AlignedHeapChunk.AlignedHeader chunk; + + public void init(AlignedHeapChunk.AlignedHeader c) { + this.chunk = c; + HeapChunk.setTopPointer(c, AlignedHeapChunk.getObjectsStart(c)); + } + + @Override + public boolean visit(Pointer objSeq, UnsignedWord size, Pointer destAddress, Pointer nextObjSeq) { + if (size.equal(0)) { // gap right at the chunk's start + assert objSeq.equal(AlignedHeapChunk.getObjectsStart(chunk)); + return true; + } + + AlignedHeapChunk.AlignedHeader destChunk; + if (destAddress.equal(objSeq)) { + destChunk = chunk; + } else { + UnmanagedMemoryUtil.copy(objSeq, destAddress, size); + destChunk = AlignedHeapChunk.getEnclosingChunkFromObjectPointer(destAddress); + } + HeapChunk.setTopPointerCarefully(destChunk, destAddress.add(size)); + return true; + } +} diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/compacting/MarkStack.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/compacting/MarkStack.java new file mode 100644 index 000000000000..f32f0fd5e50a --- /dev/null +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/compacting/MarkStack.java @@ -0,0 +1,152 @@ +/* + * Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.svm.core.genscavenge.compacting; + +import static com.oracle.svm.core.Uninterruptible.CALLED_FROM_UNINTERRUPTIBLE_CODE; +import static jdk.vm.ci.code.CodeUtil.K; + +import org.graalvm.nativeimage.Platform; +import org.graalvm.nativeimage.Platforms; +import org.graalvm.nativeimage.c.struct.RawField; +import org.graalvm.nativeimage.c.struct.RawStructure; +import org.graalvm.nativeimage.c.struct.SizeOf; +import org.graalvm.nativeimage.c.struct.UniqueLocationIdentity; +import org.graalvm.word.PointerBase; +import org.graalvm.word.UnsignedWord; +import org.graalvm.word.WordFactory; + +import com.oracle.svm.core.AlwaysInline; +import com.oracle.svm.core.Uninterruptible; +import com.oracle.svm.core.config.ConfigurationValues; +import com.oracle.svm.core.memory.NullableNativeMemory; +import com.oracle.svm.core.nmt.NmtCategory; +import com.oracle.svm.core.util.VMError; + +import jdk.graal.compiler.api.replacements.Fold; +import jdk.graal.compiler.word.ObjectAccess; + +/** + * LIFO stack for objects to visit during the mark phase. Without it, recursive calls could exhaust + * the {@linkplain com.oracle.svm.core.stack.StackOverflowCheck yellow zone stack space} during GC. + */ +public final class MarkStack { + private static final int SEGMENT_SIZE = 64 * K - /* avoid potential malloc() overallocation */ 64; + + @Fold + static int entriesPerSegment() { + return (SEGMENT_SIZE - SizeOf.get(Segment.class)) / ConfigurationValues.getObjectLayout().getReferenceSize(); + } + + private Segment top; + private int cursor; + + @Platforms(Platform.HOSTED_ONLY.class) + public MarkStack() { + } + + @AlwaysInline("GC performance") + @Uninterruptible(reason = CALLED_FROM_UNINTERRUPTIBLE_CODE, mayBeInlined = true) + public void push(Object obj) { + assert obj != null; + + if (top.isNull() || cursor == entriesPerSegment()) { + top = allocateSegment(top); + cursor = 0; + } + + UnsignedWord offset = getOffsetAtIndex(cursor); + ObjectAccess.writeObject(top, offset, obj); + cursor++; + } + + @AlwaysInline("GC performance") + @Uninterruptible(reason = CALLED_FROM_UNINTERRUPTIBLE_CODE, mayBeInlined = true) + public Object pop() { + assert !isEmpty(); + + cursor--; + UnsignedWord offset = getOffsetAtIndex(cursor); + Object obj = ObjectAccess.readObject(top, offset); + + assert obj != null; + + if (cursor == 0) { + if (top.getNext().isNonNull()) { // free eagerly, use cursor==0 only if completely empty + Segment t = top; + top = top.getNext(); + cursor = entriesPerSegment(); + NullableNativeMemory.free(t); + } else { + // keep a single segment + } + } + + return obj; + } + + @AlwaysInline("GC performance") + @Uninterruptible(reason = CALLED_FROM_UNINTERRUPTIBLE_CODE, mayBeInlined = true) + public boolean isEmpty() { + assert cursor != 0 || top.isNull() || top.getNext().isNull() : "should see cursor == 0 only with a single segment (or none)"; + return top.isNull() || cursor == 0; + } + + @RawStructure + interface Segment extends PointerBase { + + @RawField + @UniqueLocationIdentity + Segment getNext(); + + @RawField + @UniqueLocationIdentity + void setNext(Segment p); + } + + @Uninterruptible(reason = CALLED_FROM_UNINTERRUPTIBLE_CODE, mayBeInlined = true) + private static Segment allocateSegment(Segment next) { + UnsignedWord size = WordFactory.unsigned(SEGMENT_SIZE); + Segment segment = NullableNativeMemory.malloc(size, NmtCategory.GC); + VMError.guarantee(segment.isNonNull(), "Could not allocate mark stack memory: malloc() returned null."); + segment.setNext(next); + return segment; + } + + @AlwaysInline("GC performance") + @Uninterruptible(reason = CALLED_FROM_UNINTERRUPTIBLE_CODE, mayBeInlined = true) + private static UnsignedWord getOffsetAtIndex(int index) { + int refSize = ConfigurationValues.getObjectLayout().getReferenceSize(); + return WordFactory.unsigned(index).multiply(refSize).add(SizeOf.unsigned(Segment.class)); + } + + @Uninterruptible(reason = CALLED_FROM_UNINTERRUPTIBLE_CODE, mayBeInlined = true) + public void tearDown() { + if (top.isNonNull()) { + assert top.getNext().isNull(); + NullableNativeMemory.free(top); + top = WordFactory.nullPointer(); + } + } +} diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/compacting/ObjectFixupVisitor.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/compacting/ObjectFixupVisitor.java new file mode 100644 index 000000000000..8ff33a7f430f --- /dev/null +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/compacting/ObjectFixupVisitor.java @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.svm.core.genscavenge.compacting; + +import static com.oracle.svm.core.Uninterruptible.CALLED_FROM_UNINTERRUPTIBLE_CODE; +import static jdk.graal.compiler.nodes.extended.BranchProbabilityNode.SLOW_PATH_PROBABILITY; +import static jdk.graal.compiler.nodes.extended.BranchProbabilityNode.probability; + +import java.lang.ref.Reference; + +import com.oracle.svm.core.AlwaysInline; +import com.oracle.svm.core.Uninterruptible; +import com.oracle.svm.core.heap.ObjectVisitor; +import com.oracle.svm.core.heap.ReferenceInternals; +import com.oracle.svm.core.hub.DynamicHub; +import com.oracle.svm.core.hub.InteriorObjRefWalker; +import com.oracle.svm.core.snippets.KnownIntrinsics; +import com.oracle.svm.core.util.VMError; + +/** Visits surviving objects before compaction to update their references. */ +public final class ObjectFixupVisitor implements ObjectVisitor { + private final ObjectRefFixupVisitor refFixupVisitor; + + public ObjectFixupVisitor(ObjectRefFixupVisitor refFixupVisitor) { + this.refFixupVisitor = refFixupVisitor; + } + + @Override + @AlwaysInline("GC performance") + @Uninterruptible(reason = CALLED_FROM_UNINTERRUPTIBLE_CODE, mayBeInlined = true) + public boolean visitObjectInline(Object obj) { + DynamicHub hub = KnownIntrinsics.readHub(obj); + if (probability(SLOW_PATH_PROBABILITY, hub.isReferenceInstanceClass())) { + // update Target_java_lang_ref_Reference.referent + Reference dr = (Reference) obj; + refFixupVisitor.visitObjectReferenceInline(ReferenceInternals.getReferentFieldAddress(dr), 0, true, dr); + } + InteriorObjRefWalker.walkObjectInline(obj, refFixupVisitor); + return true; + } + + @Override + public boolean visitObject(Object o) { + throw VMError.shouldNotReachHere("for performance reasons"); + } +} diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/compacting/ObjectMoveInfo.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/compacting/ObjectMoveInfo.java new file mode 100644 index 000000000000..4ff328417040 --- /dev/null +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/compacting/ObjectMoveInfo.java @@ -0,0 +1,255 @@ +/* + * Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.svm.core.genscavenge.compacting; + +import static com.oracle.svm.core.Uninterruptible.CALLED_FROM_UNINTERRUPTIBLE_CODE; + +import com.oracle.svm.core.util.VMError; +import org.graalvm.word.Pointer; +import org.graalvm.word.UnsignedWord; +import org.graalvm.word.WordFactory; + +import com.oracle.svm.core.AlwaysInline; +import com.oracle.svm.core.Uninterruptible; +import com.oracle.svm.core.config.ConfigurationValues; +import com.oracle.svm.core.genscavenge.AlignedHeapChunk; +import com.oracle.svm.core.genscavenge.HeapChunk; +import com.oracle.svm.core.genscavenge.ObjectHeaderImpl; +import com.oracle.svm.core.genscavenge.remset.BrickTable; +import com.oracle.svm.core.hub.LayoutEncoding; + +import jdk.graal.compiler.api.replacements.Fold; + +/** + * {@link PlanningVisitor} decides where objects will be moved and uses the methods of this class to + * store this information in a structure directly before each contiguous sequence of live objects, + * where there is always a sufficiently large gap formed by unreachable objects (because the + * structure fits the minimum object size). This avoids reserving space in objects that is needed + * only for compaction, but also requires more passes over the heap and more expensive accesses to + * determine the new location of objects. + *

+ * The structure consists of the following fields, which are sized according to whether 8-byte or + * compressed 4-byte object references are in use, and in the latter case themselves use compression + * by shifting the (zero) object alignment bits. + *

    + *
  • New location:
    + * Provides the new address of the sequence of objects after compaction. This address can be outside + * of the current chunk.
  • + *
  • Size:
    + * The number of live object bytes that the sequence consists of.
  • + *
  • Next sequence offset:
    + * The number of bytes between the start of this object sequence and the subsequent object sequence. + * This forms a singly-linked list over all object sequences (and their structures) in an aligned + * chunk. An offset of 0 means that there are no more objects in the chunk.
  • + *
+ * The binary layout is as follows, with sizes given for both 8-byte/4-byte object references. The + * fields are arranged so that accesses to them are aligned. + * + *
+ * ------------------------+======================+==============+=========================+-------------------
+ *  ... gap (unused bytes) | new location (8B/4B) | size (4B/2B) | next seq offset (4B/2B) | live objects ...
+ * ------------------------+======================+==============+=========================+-------------------
+ *                                                                                         ^- object sequence start
+ * 
+ */ +public final class ObjectMoveInfo { + + /** + * The maximum size of aligned heap chunks, based on 2 bytes for the size and the next object + * sequence offset and an object alignment of 8 bytes. + */ + public static final int MAX_CHUNK_SIZE = ~(~0xffff * 8) + 1; + + static void setNewAddress(Pointer objSeqStart, Pointer newAddress) { + if (useCompressedLayout()) { + long offset = newAddress.subtract(objSeqStart).rawValue(); + offset /= ConfigurationValues.getObjectLayout().getAlignment(); + objSeqStart.writeInt(-8, (int) offset); + } else { + objSeqStart.writeWord(-16, newAddress); + } + assert getNewAddress(objSeqStart).equal(newAddress); + } + + @Uninterruptible(reason = CALLED_FROM_UNINTERRUPTIBLE_CODE, mayBeInlined = true) + static Pointer getNewAddress(Pointer objSeqStart) { + if (useCompressedLayout()) { + long offset = objSeqStart.readInt(-8); + offset *= ConfigurationValues.getObjectLayout().getAlignment(); + return objSeqStart.add(WordFactory.signed(offset)); + } else { + return objSeqStart.readWord(-16); + } + } + + static void setObjectSeqSize(Pointer objSeqStart, UnsignedWord nbytes) { + if (useCompressedLayout()) { + UnsignedWord value = nbytes.unsignedDivide(ConfigurationValues.getObjectLayout().getAlignment()); + objSeqStart.writeShort(-4, (short) value.rawValue()); + } else { + objSeqStart.writeInt(-8, (int) nbytes.rawValue()); + } + assert getObjectSeqSize(objSeqStart).equal(nbytes); + } + + @Uninterruptible(reason = CALLED_FROM_UNINTERRUPTIBLE_CODE, mayBeInlined = true) + static UnsignedWord getObjectSeqSize(Pointer objSeqStart) { + if (useCompressedLayout()) { + UnsignedWord value = WordFactory.unsigned(objSeqStart.readShort(-4) & 0xffff); + return value.multiply(ConfigurationValues.getObjectLayout().getAlignment()); + } else { + return WordFactory.unsigned(objSeqStart.readInt(-8)); + } + } + + static void setNextObjectSeqOffset(Pointer objSeqStart, UnsignedWord offset) { + if (useCompressedLayout()) { + UnsignedWord value = offset.unsignedDivide(ConfigurationValues.getObjectLayout().getAlignment()); + objSeqStart.writeShort(-2, (short) value.rawValue()); + } else { + objSeqStart.writeInt(-4, (int) offset.rawValue()); + } + assert getNextObjectSeqOffset(objSeqStart).equal(offset); + } + + @Uninterruptible(reason = CALLED_FROM_UNINTERRUPTIBLE_CODE, mayBeInlined = true) + static UnsignedWord getNextObjectSeqOffset(Pointer objSeqStart) { + if (useCompressedLayout()) { + UnsignedWord value = WordFactory.unsigned(objSeqStart.readShort(-2) & 0xffff); + return value.multiply(ConfigurationValues.getObjectLayout().getAlignment()); + } else { + return WordFactory.unsigned(objSeqStart.readInt(-4)); + } + } + + @Uninterruptible(reason = CALLED_FROM_UNINTERRUPTIBLE_CODE, mayBeInlined = true) + static Pointer getNextObjectSeqAddress(Pointer objSeqStart) { + UnsignedWord offset = getNextObjectSeqOffset(objSeqStart); + if (offset.equal(0)) { + return WordFactory.nullPointer(); + } + return objSeqStart.add(offset); + } + + @Fold + static boolean useCompressedLayout() { + return ConfigurationValues.getObjectLayout().getReferenceSize() == Integer.BYTES; + } + + /** + * Walks aligned chunks with gaps between object sequences. + * + * @see HeapChunk#walkObjectsFrom + * @see AlignedHeapChunk#walkObjects + */ + @Uninterruptible(reason = CALLED_FROM_UNINTERRUPTIBLE_CODE, mayBeInlined = true) + public static void walkObjects(AlignedHeapChunk.AlignedHeader chunkHeader, ObjectFixupVisitor visitor) { + Pointer p = AlignedHeapChunk.getObjectsStart(chunkHeader); + do { + Pointer nextObjSeq = getNextObjectSeqAddress(p); + Pointer objSeqEnd = p.add(getObjectSeqSize(p)); + assert objSeqEnd.belowOrEqual(HeapChunk.getTopPointer(chunkHeader)); + while (p.notEqual(objSeqEnd)) { + assert p.belowThan(objSeqEnd); + Object obj = p.toObject(); + UnsignedWord objSize = LayoutEncoding.getSizeFromObjectInlineInGC(obj); + if (!visitor.visitObjectInline(obj)) { + throw VMError.shouldNotReachHereAtRuntime(); + } + p = p.add(objSize); + } + p = nextObjSeq; + } while (p.isNonNull()); + } + + @Uninterruptible(reason = CALLED_FROM_UNINTERRUPTIBLE_CODE, mayBeInlined = true) + static Pointer getNewObjectAddress(Pointer objPointer) { + assert ObjectHeaderImpl.isAlignedObject(objPointer.toObject()); + + AlignedHeapChunk.AlignedHeader chunk = AlignedHeapChunk.getEnclosingChunkFromObjectPointer(objPointer); + if (objPointer.aboveOrEqual(HeapChunk.getTopPointer(chunk))) { + return WordFactory.nullPointer(); // object did not survive, is in gap at chunk end + } + + Pointer objSeq = BrickTable.getEntry(chunk, BrickTable.getIndex(chunk, objPointer)); + if (objSeq.aboveThan(objPointer)) { // object not alive, in gap across brick table entries + return WordFactory.nullPointer(); + } + + Pointer nextObjSeq = getNextObjectSeqAddress(objSeq); + while (nextObjSeq.isNonNull() && nextObjSeq.belowOrEqual(objPointer)) { + objSeq = nextObjSeq; + nextObjSeq = getNextObjectSeqAddress(objSeq); + } + if (objPointer.aboveOrEqual(objSeq.add(getObjectSeqSize(objSeq)))) { + return WordFactory.nullPointer(); // object did not survive, in gap between objects + } + + Pointer newObjSeqAddress = getNewAddress(objSeq); + Pointer objOffset = objPointer.subtract(objSeq); + return newObjSeqAddress.add(objOffset); + } + + public static int getSize() { + return useCompressedLayout() ? 8 : 16; + } + + @AlwaysInline("GC performance: enables non-virtual visitor call") + public static void visit(AlignedHeapChunk.AlignedHeader chunk, Visitor visitor) { + Pointer p = AlignedHeapChunk.getObjectsStart(chunk); + UnsignedWord size = getObjectSeqSize(p); + Pointer newAddress = getNewAddress(p); + Pointer next = getNextObjectSeqAddress(p); + do { + // The visitor might overwrite the current and/or next move info, so read it eagerly. + UnsignedWord nextSize = next.isNonNull() ? getObjectSeqSize(next) : WordFactory.zero(); + Pointer nextNewAddress = next.isNonNull() ? getNewAddress(next) : WordFactory.nullPointer(); + Pointer nextNext = next.isNonNull() ? getNextObjectSeqAddress(next) : WordFactory.nullPointer(); + + if (!visitor.visit(p, size, newAddress, next)) { + return; + } + + p = next; + size = nextSize; + newAddress = nextNewAddress; + next = nextNext; + } while (p.isNonNull()); + } + + /** A closure to be applied to sequences of objects. */ + public interface Visitor { + /** + * Visit a sequence of objects with information that can be queried with + * {@link ObjectMoveInfo} methods. + * + * @return {@code true} if visiting should continue, {@code false} if visiting should stop. + */ + boolean visit(Pointer objSeq, UnsignedWord size, Pointer newAddress, Pointer nextObjSeq); + } + + private ObjectMoveInfo() { + } +} diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/compacting/ObjectRefFixupVisitor.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/compacting/ObjectRefFixupVisitor.java new file mode 100644 index 000000000000..161e3ab686b6 --- /dev/null +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/compacting/ObjectRefFixupVisitor.java @@ -0,0 +1,79 @@ +/* + * Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.svm.core.genscavenge.compacting; + +import static com.oracle.svm.core.Uninterruptible.CALLED_FROM_UNINTERRUPTIBLE_CODE; + +import java.lang.ref.Reference; + +import org.graalvm.word.Pointer; + +import com.oracle.svm.core.AlwaysInline; +import com.oracle.svm.core.Uninterruptible; +import com.oracle.svm.core.genscavenge.HeapImpl; +import com.oracle.svm.core.genscavenge.ObjectHeaderImpl; +import com.oracle.svm.core.genscavenge.remset.RememberedSet; +import com.oracle.svm.core.heap.ObjectReferenceVisitor; +import com.oracle.svm.core.heap.ReferenceAccess; + +/** + * Updates each reference after marking and before compaction to point to the referenced object's + * future location. + */ +public final class ObjectRefFixupVisitor implements ObjectReferenceVisitor { + @Override + public boolean visitObjectReference(Pointer objRef, boolean compressed, Object holderObject) { + return visitObjectReferenceInline(objRef, 0, compressed, holderObject); + } + + @Override + @AlwaysInline("GC performance") + @Uninterruptible(reason = CALLED_FROM_UNINTERRUPTIBLE_CODE, mayBeInlined = true) + public boolean visitObjectReferenceInline(Pointer objRef, int innerOffset, boolean compressed, Object holderObject) { + assert innerOffset == 0; + Pointer p = ReferenceAccess.singleton().readObjectAsUntrackedPointer(objRef, compressed); + if (p.isNull() || HeapImpl.getHeapImpl().isInImageHeap(p)) { + return true; + } + + Object obj; + Object original = p.toObject(); + if (ObjectHeaderImpl.isAlignedObject(original)) { + Pointer newLocation = ObjectMoveInfo.getNewObjectAddress(p); + assert newLocation.isNonNull() // + || holderObject == null // references from CodeInfo, invalidated or weak + || holderObject instanceof Reference; // cleared referent + + obj = newLocation.toObject(); + ReferenceAccess.singleton().writeObjectAt(objRef, obj, compressed); + } else { + obj = original; + } + if (HeapImpl.usesImageHeapCardMarking() && HeapImpl.getHeapImpl().isInImageHeap(holderObject)) { + RememberedSet.get().dirtyCardIfNecessary(holderObject, obj); + } + return true; + } +} diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/compacting/PlanningVisitor.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/compacting/PlanningVisitor.java new file mode 100644 index 000000000000..27ef374685f1 --- /dev/null +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/compacting/PlanningVisitor.java @@ -0,0 +1,186 @@ +/* + * Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.svm.core.genscavenge.compacting; + +import org.graalvm.nativeimage.Platform; +import org.graalvm.nativeimage.Platforms; +import org.graalvm.word.Pointer; +import org.graalvm.word.UnsignedWord; +import org.graalvm.word.WordFactory; + +import com.oracle.svm.core.config.ConfigurationValues; +import com.oracle.svm.core.genscavenge.AlignedHeapChunk; +import com.oracle.svm.core.genscavenge.HeapChunk; +import com.oracle.svm.core.genscavenge.ObjectHeaderImpl; +import com.oracle.svm.core.genscavenge.Space; +import com.oracle.svm.core.genscavenge.remset.BrickTable; +import com.oracle.svm.core.hub.LayoutEncoding; + +import jdk.graal.compiler.word.Word; + +/** + * Decides where live objects will be moved during compaction and stores this information in gaps + * between them using {@link ObjectMoveInfo} so that {@link ObjectFixupVisitor}, + * {@link CompactingVisitor} and {@link SweepingVisitor} can update references and move live objects + * or overwrite dead objects. + */ +public final class PlanningVisitor implements AlignedHeapChunk.Visitor { + private AlignedHeapChunk.AlignedHeader allocChunk; + private Pointer allocPointer; + + @Platforms(Platform.HOSTED_ONLY.class) + public PlanningVisitor() { + } + + public void init(Space space) { + allocChunk = space.getFirstAlignedHeapChunk(); + allocPointer = AlignedHeapChunk.getObjectsStart(allocChunk); + } + + @Override + public boolean visitChunk(AlignedHeapChunk.AlignedHeader chunk) { + boolean sweeping = chunk.getShouldSweepInsteadOfCompact(); + Pointer initialTop = HeapChunk.getTopPointer(chunk); // top doesn't move until we are done + + Pointer objSeq = AlignedHeapChunk.getObjectsStart(chunk); + UnsignedWord gapSize = WordFactory.zero(); + UnsignedWord objSeqSize = WordFactory.zero(); + UnsignedWord brickIndex = WordFactory.zero(); + + /* Initialize the move info structure at the chunk's object start location. */ + ObjectMoveInfo.setNewAddress(objSeq, allocPointer); + ObjectMoveInfo.setObjectSeqSize(objSeq, WordFactory.zero()); + ObjectMoveInfo.setNextObjectSeqOffset(objSeq, WordFactory.zero()); + + BrickTable.setEntry(chunk, brickIndex, objSeq); + + Pointer p = objSeq; + while (p.belowThan(initialTop)) { + Word header = ObjectHeaderImpl.readHeaderFromPointer(p); + + UnsignedWord objSize; + if (ObjectHeaderImpl.isForwardedHeader(header)) { + /* + * If an object was copied from a chunk that won't be swept and forwarding was put + * in place, it was because we needed to add an identity hash code field to the + * object, and we need the object's original size here. + */ + assert !sweeping && ConfigurationValues.getObjectLayout().isIdentityHashFieldOptional(); + Object forwardedObj = ObjectHeaderImpl.getObjectHeaderImpl().getForwardedObject(p, header); + objSize = LayoutEncoding.getSizeFromObjectWithoutOptionalIdHashFieldInGC(forwardedObj); + } else { + objSize = LayoutEncoding.getSizeFromObjectInlineInGC(p.toObject()); + } + + if (ObjectHeaderImpl.isMarkedHeader(header)) { + ObjectHeaderImpl.unsetMarkedAndKeepRememberedSetBit(p.toObject()); + + /* + * Adding the optional identity hash field would increase an object's size, so we + * should have copied all objects that need one during marking instead. + */ + assert sweeping || !ConfigurationValues.getObjectLayout().isIdentityHashFieldOptional() || + !ObjectHeaderImpl.hasIdentityHashFromAddressInline(header); + + if (gapSize.notEqual(0)) { // end of a gap, start of an object sequence + // Link previous move info to here. + ObjectMoveInfo.setNextObjectSeqOffset(objSeq, p.subtract(objSeq)); + + // Initialize new move info. + objSeq = p; + ObjectMoveInfo.setNextObjectSeqOffset(objSeq, WordFactory.zero()); + + gapSize = WordFactory.zero(); + } + + objSeqSize = objSeqSize.add(objSize); + + } else { // not marked, i.e. not alive and start of a gap of yet unknown size + if (objSeqSize.notEqual(0)) { // end of an object sequence + Pointer newAddress = sweeping ? objSeq : allocate(objSeqSize); + ObjectMoveInfo.setNewAddress(objSeq, newAddress); + ObjectMoveInfo.setObjectSeqSize(objSeq, objSeqSize); + + objSeqSize = WordFactory.zero(); + + /* Set brick table entries. */ + UnsignedWord currentBrick = BrickTable.getIndex(chunk, p); + while (brickIndex.belowThan(currentBrick)) { + brickIndex = brickIndex.add(1); + BrickTable.setEntry(chunk, brickIndex, objSeq); + } + } + gapSize = gapSize.add(objSize); + } + p = p.add(objSize); + } + assert gapSize.equal(0) || objSeqSize.equal(0); + + if (gapSize.notEqual(0)) { // truncate gap at chunk end + chunk.setTopOffset(chunk.getTopOffset().subtract(gapSize)); + } else if (objSeqSize.notEqual(0)) { + Pointer newAddress = sweeping ? objSeq : allocate(objSeqSize); + ObjectMoveInfo.setNewAddress(objSeq, newAddress); + ObjectMoveInfo.setObjectSeqSize(objSeq, objSeqSize); + } + + if (sweeping) { + /* + * Continue allocating for compaction after the swept memory. Note that this forfeits + * unused memory in the chunks before, but the order of objects must stay the same + * across all chunks. If chunks end up completely empty however, they will be released + * after compaction. + * + * GR-54021: it should be possible to avoid this limitation by sweeping chunks without + * ObjectMoveInfo and brick tables and potentially even do the sweeping right here. + */ + this.allocChunk = chunk; + this.allocPointer = HeapChunk.getTopPointer(chunk); + } + + /* Set remaining brick table entries at chunk end. */ + brickIndex = brickIndex.add(1); + while (brickIndex.belowThan(BrickTable.getLength())) { + BrickTable.setEntry(chunk, brickIndex, objSeq); + brickIndex = brickIndex.add(1); + } + + return true; + } + + private Pointer allocate(UnsignedWord size) { + Pointer p = allocPointer; + allocPointer = allocPointer.add(size); + if (allocPointer.aboveThan(AlignedHeapChunk.getObjectsEnd(allocChunk))) { + allocChunk = HeapChunk.getNext(allocChunk); + assert allocChunk.isNonNull(); + assert !allocChunk.getShouldSweepInsteadOfCompact(); + + p = AlignedHeapChunk.getObjectsStart(allocChunk); + allocPointer = p.add(size); + } + return p; + } +} diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/compacting/RuntimeCodeCacheFixupWalker.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/compacting/RuntimeCodeCacheFixupWalker.java new file mode 100644 index 000000000000..ae1b72d8685e --- /dev/null +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/compacting/RuntimeCodeCacheFixupWalker.java @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2024, 2024, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.svm.core.genscavenge.compacting; + +import org.graalvm.nativeimage.Platform; +import org.graalvm.nativeimage.Platforms; + +import com.oracle.svm.core.code.CodeInfo; +import com.oracle.svm.core.code.RuntimeCodeCache.CodeInfoVisitor; +import com.oracle.svm.core.code.RuntimeCodeInfoAccess; +import com.oracle.svm.core.genscavenge.SerialGCOptions; + +/** Before compaction, updates references from {@link CodeInfo} structures. */ +public final class RuntimeCodeCacheFixupWalker implements CodeInfoVisitor { + private final ObjectRefFixupVisitor visitor; + + @Platforms(Platform.HOSTED_ONLY.class) + public RuntimeCodeCacheFixupWalker(ObjectRefFixupVisitor visitor) { + assert SerialGCOptions.useCompactingOldGen(); + this.visitor = visitor; + } + + @Override + public boolean visitCode(CodeInfo codeInfo) { + if (RuntimeCodeInfoAccess.areAllObjectsOnImageHeap(codeInfo)) { + return true; + } + + /* + * Whether this CodeInfo remains valid or will be invalidated or freed during this GC, we + * update all its references, including clearing those to objects that do not survive. + */ + RuntimeCodeInfoAccess.walkStrongReferences(codeInfo, visitor); + RuntimeCodeInfoAccess.walkWeakReferences(codeInfo, visitor); + return true; + } +} diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/compacting/SweepingVisitor.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/compacting/SweepingVisitor.java new file mode 100644 index 000000000000..8f0c3d18040e --- /dev/null +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/compacting/SweepingVisitor.java @@ -0,0 +1,85 @@ +/* + * Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.svm.core.genscavenge.compacting; + +import static jdk.graal.compiler.replacements.AllocationSnippets.FillContent.WITH_GARBAGE_IF_ASSERTIONS_ENABLED; + +import org.graalvm.word.Pointer; +import org.graalvm.word.UnsignedWord; + +import com.oracle.svm.core.config.ConfigurationValues; +import com.oracle.svm.core.genscavenge.graal.nodes.FormatArrayNode; +import com.oracle.svm.core.genscavenge.graal.nodes.FormatObjectNode; +import com.oracle.svm.core.heap.FillerArray; +import com.oracle.svm.core.heap.FillerObject; +import com.oracle.svm.core.hub.LayoutEncoding; +import com.oracle.svm.core.util.UnsignedUtils; + +import jdk.graal.compiler.api.replacements.Fold; +import jdk.graal.compiler.core.common.NumUtil; +import jdk.vm.ci.meta.JavaKind; + +/** + * Overwrites dead objects with filler objects so that heap walks or scans that use card tables + * cannot encounter them (and their broken references). + */ +public final class SweepingVisitor implements ObjectMoveInfo.Visitor { + private static final Class ARRAY_CLASS = FillerArray.class; + private static final JavaKind ARRAY_ELEMENT_KIND = JavaKind.Int; + private static final int ARRAY_ELEMENT_SIZE = ARRAY_ELEMENT_KIND.getByteCount(); + + @Fold + static int arrayMinSize() { + return NumUtil.safeToInt(ConfigurationValues.getObjectLayout().getArraySize(ARRAY_ELEMENT_KIND, 0, false)); + } + + @Fold + static int arrayBaseOffset() { + return ConfigurationValues.getObjectLayout().getArrayBaseOffset(ARRAY_ELEMENT_KIND); + } + + @Override + public boolean visit(Pointer objSeq, UnsignedWord size, Pointer newAddress, Pointer nextObjSeq) { + if (nextObjSeq.isNonNull()) { + Pointer gapStart = objSeq.add(size); + assert gapStart.belowOrEqual(nextObjSeq); + if (gapStart.notEqual(nextObjSeq)) { + writeFillerObjectAt(gapStart, nextObjSeq.subtract(gapStart)); + } + } + return true; + } + + private static void writeFillerObjectAt(Pointer p, UnsignedWord size) { + assert size.aboveThan(0); + if (size.aboveOrEqual(arrayMinSize())) { + int length = UnsignedUtils.safeToInt(size.subtract(arrayBaseOffset()).unsignedDivide(ARRAY_ELEMENT_SIZE)); + FormatArrayNode.formatArray(p, ARRAY_CLASS, length, true, false, WITH_GARBAGE_IF_ASSERTIONS_ENABLED, false); + } else { + FormatObjectNode.formatObject(p, FillerObject.class, true, WITH_GARBAGE_IF_ASSERTIONS_ENABLED, false); + } + assert LayoutEncoding.getSizeFromObjectInGC(p.toObject()).equal(size); + } +} diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/graal/GenScavengeGCFeature.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/graal/GenScavengeGCFeature.java index 9982425c3d4b..bd81e79d5f1e 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/graal/GenScavengeGCFeature.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/graal/GenScavengeGCFeature.java @@ -45,6 +45,7 @@ import com.oracle.svm.core.genscavenge.HeapVerifier; import com.oracle.svm.core.genscavenge.ImageHeapInfo; import com.oracle.svm.core.genscavenge.IncrementalGarbageCollectorMXBean; +import com.oracle.svm.core.genscavenge.SerialGCOptions; import com.oracle.svm.core.genscavenge.jvmstat.EpsilonGCPerfData; import com.oracle.svm.core.genscavenge.jvmstat.SerialGCPerfData; import com.oracle.svm.core.genscavenge.remset.CardTableBasedRememberedSet; @@ -125,9 +126,7 @@ public void duringSetup(DuringSetupAccess access) { @Override public void registerLowerings(RuntimeConfiguration runtimeConfig, OptionValues options, Providers providers, Map, NodeLoweringProvider> lowerings, boolean hosted) { - if (SubstrateOptions.useRememberedSet()) { - // Even though I don't hold on to this instance, it is preserved because it becomes the - // enclosing instance for the lowerings registered within it. + if (SerialGCOptions.useRememberedSet()) { BarrierSnippets barrierSnippets = new BarrierSnippets(options, providers); barrierSnippets.registerLowerings(providers.getMetaAccess(), lowerings); } @@ -164,7 +163,7 @@ public void registerForeignCalls(SubstrateForeignCallsProvider foreignCalls) { } private static RememberedSet createRememberedSet() { - if (SubstrateOptions.useRememberedSet()) { + if (SerialGCOptions.useRememberedSet()) { return new CardTableBasedRememberedSet(); } else { return new NoRememberedSet(); diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/remset/AlignedChunkRememberedSet.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/remset/AlignedChunkRememberedSet.java index b2c038091a4e..7efb7222bf00 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/remset/AlignedChunkRememberedSet.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/remset/AlignedChunkRememberedSet.java @@ -26,9 +26,6 @@ import java.util.List; -import jdk.graal.compiler.api.replacements.Fold; -import jdk.graal.compiler.replacements.nodes.AssertionNode; -import jdk.graal.compiler.word.Word; import org.graalvm.nativeimage.Platform; import org.graalvm.nativeimage.Platforms; import org.graalvm.nativeimage.c.struct.SizeOf; @@ -45,12 +42,18 @@ import com.oracle.svm.core.genscavenge.HeapChunk; import com.oracle.svm.core.genscavenge.HeapParameters; import com.oracle.svm.core.genscavenge.ObjectHeaderImpl; +import com.oracle.svm.core.genscavenge.SerialGCOptions; +import com.oracle.svm.core.genscavenge.compacting.ObjectMoveInfo; import com.oracle.svm.core.hub.LayoutEncoding; import com.oracle.svm.core.image.ImageHeapObject; import com.oracle.svm.core.util.HostedByteBufferPointer; import com.oracle.svm.core.util.PointerUtils; import com.oracle.svm.core.util.UnsignedUtils; +import jdk.graal.compiler.api.replacements.Fold; +import jdk.graal.compiler.replacements.nodes.AssertionNode; +import jdk.graal.compiler.word.Word; + final class AlignedChunkRememberedSet { private AlignedChunkRememberedSet() { } @@ -63,6 +66,10 @@ public static int wordSize() { @Fold public static UnsignedWord getHeaderSize() { UnsignedWord headerSize = getFirstObjectTableLimitOffset(); + if (SerialGCOptions.useCompactingOldGen()) { + // Compaction needs room for a ObjectMoveInfo structure before the first object. + headerSize = headerSize.add(ObjectMoveInfo.getSize()); + } UnsignedWord alignment = WordFactory.unsigned(ConfigurationValues.getObjectLayout().getAlignment()); return UnsignedUtils.roundUp(headerSize, alignment); } @@ -269,7 +276,7 @@ static UnsignedWord getCardTableLimitOffset() { } @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) - private static Pointer getCardTableStart(AlignedHeader chunk) { + static Pointer getCardTableStart(AlignedHeader chunk) { return getCardTableStart(HeapChunk.asPointer(chunk)); } diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/remset/BrickTable.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/remset/BrickTable.java new file mode 100644 index 000000000000..e6e61c8d3f05 --- /dev/null +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/remset/BrickTable.java @@ -0,0 +1,98 @@ +/* + * Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.svm.core.genscavenge.remset; + +import static com.oracle.svm.core.Uninterruptible.CALLED_FROM_UNINTERRUPTIBLE_CODE; + +import org.graalvm.word.Pointer; +import org.graalvm.word.UnsignedWord; + +import com.oracle.svm.core.Uninterruptible; +import com.oracle.svm.core.config.ConfigurationValues; +import com.oracle.svm.core.genscavenge.AlignedHeapChunk; +import com.oracle.svm.core.genscavenge.HeapChunk; +import com.oracle.svm.core.genscavenge.compacting.ObjectMoveInfo; + +import jdk.graal.compiler.api.replacements.Fold; + +/** + * Inspired by the .NET CoreCLR GC, the {@link BrickTable} speeds up lookups of new object locations + * after compaction by acting as a lookup table for {@link ObjectMoveInfo} structures. Each entry + * stores a pointer to the start of the first such structure for the fraction of the chunk that it + * covers. It borrows the memory of a chunk's {@link CardTable}. + */ +public final class BrickTable { + private static final int ENTRY_SIZE_BYTES = 2; + private static final int BYTES_COVERED_BY_ENTRY = CardTable.BYTES_COVERED_BY_ENTRY * ENTRY_SIZE_BYTES; + + /** @return The table index for the object at the given address. */ + @Uninterruptible(reason = CALLED_FROM_UNINTERRUPTIBLE_CODE, mayBeInlined = true) + public static UnsignedWord getIndex(AlignedHeapChunk.AlignedHeader chunk, Pointer pointer) { + Pointer objectsStart = AlignedHeapChunk.getObjectsStart(chunk); + UnsignedWord index = pointer.subtract(objectsStart).unsignedDivide(BYTES_COVERED_BY_ENTRY); + assert index.aboveOrEqual(0) && index.belowThan(getLength()) : "index out of range"; + return index; + } + + @Fold + public static UnsignedWord getLength() { + UnsignedWord bytesCovered = AlignedHeapChunk.getUsableSizeForObjects(); + UnsignedWord length = bytesCovered.add(BYTES_COVERED_BY_ENTRY - 1).unsignedDivide(BYTES_COVERED_BY_ENTRY); + assert length.multiply(ENTRY_SIZE_BYTES).belowOrEqual(AlignedChunkRememberedSet.getCardTableSize()) : "brick table size does not match card table size"; + return length; + } + + /** + * @return The first {@link ObjectMoveInfo} in the fraction of the chunk that is covered by the + * entry at the given index. + */ + @Uninterruptible(reason = CALLED_FROM_UNINTERRUPTIBLE_CODE, mayBeInlined = true) + public static Pointer getEntry(AlignedHeapChunk.AlignedHeader chunk, UnsignedWord index) { + short entry = getBrickTableStart(chunk).readShort(index.multiply(ENTRY_SIZE_BYTES)); + int offset = (entry & 0xffff) * ConfigurationValues.getObjectLayout().getAlignment(); + return HeapChunk.asPointer(chunk).add(offset); + } + + /** + * Sets the first {@link ObjectMoveInfo} in the fraction of the chunk that is covered by the + * entry at the given index. + */ + @Uninterruptible(reason = CALLED_FROM_UNINTERRUPTIBLE_CODE, mayBeInlined = true) + public static void setEntry(AlignedHeapChunk.AlignedHeader chunk, UnsignedWord index, Pointer pointer) { + UnsignedWord offset = pointer.subtract(HeapChunk.asPointer(chunk)); + int alignment = ConfigurationValues.getObjectLayout().getAlignment(); + short entry = (short) offset.unsignedDivide(alignment).rawValue(); + getBrickTableStart(chunk).writeShort(index.multiply(ENTRY_SIZE_BYTES), entry); + assert getEntry(chunk, index).equal(pointer); + } + + @Uninterruptible(reason = CALLED_FROM_UNINTERRUPTIBLE_CODE, mayBeInlined = true) + private static Pointer getBrickTableStart(AlignedHeapChunk.AlignedHeader chunk) { + return AlignedChunkRememberedSet.getCardTableStart(chunk); + } + + private BrickTable() { + } +} diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/remset/CardTable.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/remset/CardTable.java index d9fb3d7e1d3d..26aa7729e4bb 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/remset/CardTable.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/remset/CardTable.java @@ -45,8 +45,10 @@ import com.oracle.svm.core.snippets.KnownIntrinsics; import com.oracle.svm.core.util.UnsignedUtils; +import jdk.graal.compiler.api.directives.GraalDirectives; import jdk.graal.compiler.core.common.SuppressFBWarnings; import jdk.graal.compiler.nodes.extended.BranchProbabilityNode; +import jdk.graal.compiler.replacements.ReplacementsUtil; import jdk.graal.compiler.word.Word; /** @@ -118,7 +120,13 @@ private static boolean isClean(Pointer table, UnsignedWord index) { @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) private static int readEntry(Pointer table, UnsignedWord index) { - return table.readByte(index); + byte entry = table.readByte(index); + if (GraalDirectives.inIntrinsic()) { + ReplacementsUtil.dynamicAssert(entry == CLEAN_ENTRY || entry == DIRTY_ENTRY, "valid entry"); + } else { + assert entry == CLEAN_ENTRY || entry == DIRTY_ENTRY; + } + return entry; } @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) diff --git a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/SubstrateOptions.java b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/SubstrateOptions.java index 67de5054ea33..ee95439f2a0d 100644 --- a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/SubstrateOptions.java +++ b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/SubstrateOptions.java @@ -870,11 +870,6 @@ public static boolean shouldCompileInIsolates() { @Option(help = "Size of the reserved address space of each compilation isolate (0: default for new isolates).") // public static final RuntimeOptionKey CompilationIsolateAddressSpaceSize = new RuntimeOptionKey<>(0L); - @Fold - public static boolean useRememberedSet() { - return !SubstrateOptions.UseEpsilonGC.getValue() && ConcealedOptions.UseRememberedSet.getValue(); - } - /** Query these options only through an appropriate method. */ public static class ConcealedOptions { @@ -897,9 +892,6 @@ public Boolean getValue(OptionValues values) { @Option(help = "Activate runtime compilation in separate isolates (enable support during image build with option SupportCompileInIsolates).") // public static final RuntimeOptionKey CompileInIsolates = new RuntimeOptionKey<>(true, RelevantForCompilationIsolates); - @Option(help = "Determines if a remembered sets is used, which is necessary for collecting the young and old generation independently.", type = OptionType.Expert) // - public static final HostedOptionKey UseRememberedSet = new HostedOptionKey<>(true); - /** Use {@link VMOperationControl#useDedicatedVMOperationThread()} instead. */ @Option(help = "Determines if VM operations should be executed in a dedicated thread.", type = OptionType.Expert)// public static final HostedOptionKey UseDedicatedVMOperationThread = new HostedOptionKey<>(false); diff --git a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/hub/LayoutEncoding.java b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/hub/LayoutEncoding.java index 1c487d69bdb9..4dbf7d647ab9 100644 --- a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/hub/LayoutEncoding.java +++ b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/hub/LayoutEncoding.java @@ -355,6 +355,11 @@ public static UnsignedWord getSizeFromObjectInlineInGC(Object obj, boolean addOp return getSizeFromObjectInline(obj, withOptionalIdHashField); } + @AlwaysInline("GC performance") + public static UnsignedWord getSizeFromObjectWithoutOptionalIdHashFieldInGC(Object obj) { + return getSizeFromObjectInline(obj, false); + } + @AlwaysInline("Actual inlining decided by callers.") @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) private static UnsignedWord getSizeFromObjectInline(Object obj, boolean withOptionalIdHashField) {