diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/AlignedHeapChunk.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/AlignedHeapChunk.java index 86dd5c3e0c48..da7be2af2225 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/AlignedHeapChunk.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/AlignedHeapChunk.java @@ -114,6 +114,14 @@ static Pointer allocateMemory(AlignedHeader that, UnsignedWord size) { return result; } + /** Retract the latest allocation. */ + @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) + static void retractAllocation(AlignedHeader that, UnsignedWord size) { + Pointer newTop = HeapChunk.getTopPointer(that).subtract(size); + assert newTop.aboveOrEqual(getObjectsStart(that)); + HeapChunk.setTopPointer(that, newTop); + } + @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) static UnsignedWord getCommittedObjectMemory(AlignedHeader that) { return HeapChunk.getEndOffset(that).subtract(getObjectsStartOffset()); @@ -169,10 +177,5 @@ static final class MemoryWalkerAccessImpl extends HeapChunk.MemoryWalkerAccessIm public boolean isAligned(AlignedHeapChunk.AlignedHeader heapChunk) { return true; } - - @Override - public UnsignedWord getAllocationStart(AlignedHeapChunk.AlignedHeader heapChunk) { - return getObjectsStart(heapChunk); - } } } diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/GCImpl.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/GCImpl.java index 65415bf20647..cf51d9fea6c3 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/GCImpl.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/GCImpl.java @@ -63,6 +63,7 @@ import com.oracle.svm.core.genscavenge.BasicCollectionPolicies.NeverCollect; import com.oracle.svm.core.genscavenge.HeapChunk.Header; import com.oracle.svm.core.genscavenge.UnalignedHeapChunk.UnalignedHeader; +import com.oracle.svm.core.genscavenge.parallel.ParallelGC; import com.oracle.svm.core.genscavenge.remset.RememberedSet; import com.oracle.svm.core.graal.RuntimeCompilation; import com.oracle.svm.core.heap.CodeReferenceMapDecoder; @@ -126,6 +127,8 @@ public final class GCImpl implements GC { public String getName() { if (SubstrateOptions.UseEpsilonGC.getValue()) { return "Epsilon GC"; + } else if (SubstrateOptions.UseParallelGC.getValue()) { + return "Parallel GC"; } else { return "Serial GC"; } @@ -196,6 +199,10 @@ private void collectOperation(CollectionVMOperationData data) { assert VMOperation.isGCInProgress() : "Collection should be a VMOperation."; assert getCollectionEpoch().equal(data.getRequestingEpoch()); + if (ParallelGC.isEnabled()) { + ParallelGC.singleton().initialize(); + } + timers.mutator.closeAt(data.getRequestingNanoTime()); startCollectionOrExit(); @@ -1071,6 +1078,8 @@ private void scanGreyObjects(boolean isIncremental) { try { if (isIncremental) { scanGreyObjectsLoop(); + } else if (ParallelGC.isEnabled()) { + ParallelGC.singleton().waitForIdle(); } else { HeapImpl.getHeapImpl().getOldGeneration().scanGreyObjects(); } @@ -1093,20 +1102,23 @@ private static void scanGreyObjectsLoop() { @AlwaysInline("GC performance") @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) - @SuppressWarnings("static-method") Object promoteObject(Object original, UnsignedWord header) { HeapImpl heap = HeapImpl.getHeapImpl(); boolean isAligned = ObjectHeaderImpl.isAlignedHeader(header); Header originalChunk = getChunk(original, isAligned); + + /* If the parallel GC is used, then the space may be outdated or null. */ Space originalSpace = HeapChunk.getSpace(originalChunk); - if (!originalSpace.isFromSpace()) { + assert originalSpace != null || ParallelGC.isEnabled() && ParallelGC.singleton().isInParallelPhase(); + if (originalSpace == null || !originalSpace.isFromSpace()) { + /* Object was already promoted or is currently being promoted. */ return original; } Object result = null; if (!completeCollection && originalSpace.getNextAgeForPromotion() < policy.getTenuringAge()) { if (isAligned) { - result = heap.getYoungGeneration().promoteAlignedObject(original, (AlignedHeader) originalChunk, originalSpace); + result = heap.getYoungGeneration().promoteAlignedObject(original, originalSpace); } else { result = heap.getYoungGeneration().promoteUnalignedObject(original, (UnalignedHeader) originalChunk, originalSpace); } @@ -1114,11 +1126,13 @@ Object promoteObject(Object original, UnsignedWord header) { accounting.onSurvivorOverflowed(); } } - if (result == null) { // complete collection, tenuring age reached, or survivor space full + + /* Complete collection, tenuring age reached, or survivor space full. */ + if (result == null) { if (isAligned) { - result = heap.getOldGeneration().promoteAlignedObject(original, (AlignedHeader) originalChunk, originalSpace); + result = heap.getOldGeneration().promoteAlignedObject(original, originalSpace); } else { - result = heap.getOldGeneration().promoteUnalignedObject(original, (UnalignedHeader) originalChunk, originalSpace); + result = heap.getOldGeneration().promoteUnalignedObject(original, (UnalignedHeader) originalChunk); } assert result != null : "promotion failure in old generation must have been handled"; } @@ -1152,7 +1166,7 @@ private void promotePinnedObject(PinnedObjectImpl pinned) { } } if (!promoted) { - heap.getOldGeneration().promoteChunk(originalChunk, isAligned, originalSpace); + heap.getOldGeneration().promoteChunk(originalChunk, isAligned); } } } @@ -1241,7 +1255,7 @@ public static boolean hasNeverCollectPolicy() { } @Fold - GreyToBlackObjectVisitor getGreyToBlackObjectVisitor() { + public GreyToBlackObjectVisitor getGreyToBlackObjectVisitor() { return greyToBlackObjectVisitor; } diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/GenScavengeMemoryPoolMXBeans.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/GenScavengeMemoryPoolMXBeans.java index 441b4e6d6b33..50ac37182285 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/GenScavengeMemoryPoolMXBeans.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/GenScavengeMemoryPoolMXBeans.java @@ -46,7 +46,7 @@ public class GenScavengeMemoryPoolMXBeans { @Platforms(Platform.HOSTED_ONLY.class) public static MemoryPoolMXBean[] createMemoryPoolMXBeans() { - if (SubstrateOptions.UseSerialGC.getValue()) { + if (SubstrateOptions.useSerialOrParallelGC()) { mxBeans = new AbstractMemoryPoolMXBean[]{ new EdenMemoryPoolMXBean(YOUNG_GEN_SCAVENGER, COMPLETE_SCAVENGER), new SurvivorMemoryPoolMXBean(YOUNG_GEN_SCAVENGER, COMPLETE_SCAVENGER), diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/Generation.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/Generation.java index e487a4c7f1d2..d5559bd77e52 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/Generation.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/Generation.java @@ -27,8 +27,6 @@ import org.graalvm.nativeimage.Platform; import org.graalvm.nativeimage.Platforms; -import com.oracle.svm.core.AlwaysInline; -import com.oracle.svm.core.Uninterruptible; import com.oracle.svm.core.heap.ObjectVisitor; import com.oracle.svm.core.log.Log; @@ -55,46 +53,4 @@ public String getName() { /** Report some statistics about the Generation to a Log. */ public abstract Log report(Log log, boolean traceHeapChunks); - - /** - * Promote an Object to this Generation, typically by copying and leaving a forwarding pointer - * to the new Object in place of the original Object. If the object cannot be promoted due to - * insufficient capacity, returns {@code null}. - * - * This turns an Object from white to grey: the object is in this Generation, but has not yet - * had its interior pointers visited. - * - * @return a reference to the promoted object, which is different to the original reference if - * promotion was done by copying, or {@code null} if there was insufficient capacity in - * this generation. - */ - @AlwaysInline("GC performance") - @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) - protected abstract Object promoteAlignedObject(Object original, AlignedHeapChunk.AlignedHeader originalChunk, Space originalSpace); - - /** - * Promote an Object to this Generation, typically by HeapChunk motion. If the object cannot be - * promoted due to insufficient capacity, returns {@code null}. - * - * This turns an Object from white to grey: the object is in this Generation, but has not yet - * had its interior pointers visited. - * - * @return a reference to the promoted object, which is the same as the original if the object - * was promoted through HeapChunk motion, or {@code null} if there was insufficient - * capacity in this generation. - */ - @AlwaysInline("GC performance") - @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) - protected abstract Object promoteUnalignedObject(Object original, UnalignedHeapChunk.UnalignedHeader originalChunk, Space originalSpace); - - /** - * Promote a HeapChunk from its original space to the appropriate space in this generation if - * there is sufficient capacity. - * - * This turns all the Objects in the chunk from white to grey: the objects are in the target - * Space, but have not yet had their interior pointers visited. - * - * @return true on success, false if the there was insufficient capacity. - */ - protected abstract boolean promoteChunk(HeapChunk.Header originalChunk, boolean isAligned, Space originalSpace); } diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/GreyToBlackObjRefVisitor.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/GreyToBlackObjRefVisitor.java index 3281a53ac41c..fae6561ab1c2 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/GreyToBlackObjRefVisitor.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/GreyToBlackObjRefVisitor.java @@ -31,11 +31,13 @@ import com.oracle.svm.core.AlwaysInline; import com.oracle.svm.core.Uninterruptible; +import com.oracle.svm.core.genscavenge.parallel.ParallelGC; import com.oracle.svm.core.genscavenge.remset.RememberedSet; import com.oracle.svm.core.heap.ObjectHeader; import com.oracle.svm.core.heap.ObjectReferenceVisitor; import com.oracle.svm.core.heap.ReferenceAccess; import com.oracle.svm.core.hub.LayoutEncoding; +import com.oracle.svm.core.jdk.UninterruptibleUtils.AtomicLong; import com.oracle.svm.core.log.Log; /** @@ -97,6 +99,8 @@ public boolean visitObjectReferenceInline(Pointer objRef, int innerOffset, boole counters.noteForwardedReferent(); // Update the reference to point to the forwarded Object. Object obj = ohi.getForwardedObject(p, header); + assert ParallelGC.isEnabled() && ParallelGC.singleton().isInParallelPhase() || + innerOffset < LayoutEncoding.getSizeFromObjectInGC(obj).rawValue(); Object offsetObj = (innerOffset == 0) ? obj : Word.objectToUntrackedPointer(obj).add(innerOffset).toObject(); ReferenceAccess.singleton().writeObjectAt(objRef, offsetObj, compressed); RememberedSet.get().dirtyCardIfNecessary(holderObject, obj); @@ -105,11 +109,12 @@ public boolean visitObjectReferenceInline(Pointer objRef, int innerOffset, boole // Promote the Object if necessary, making it at least grey, and ... Object obj = p.toObject(); - assert innerOffset < LayoutEncoding.getSizeFromObjectInGC(obj).rawValue(); Object copy = GCImpl.getGCImpl().promoteObject(obj, header); if (copy != obj) { // ... update the reference to point to the copy, making the reference black. counters.noteCopiedReferent(); + assert ParallelGC.isEnabled() && ParallelGC.singleton().isInParallelPhase() || + innerOffset < LayoutEncoding.getSizeFromObjectInGC(copy).rawValue(); Object offsetCopy = (innerOffset == 0) ? copy : Word.objectToUntrackedPointer(copy).add(innerOffset).toObject(); ReferenceAccess.singleton().writeObjectAt(objRef, offsetCopy, compressed); } else { @@ -152,19 +157,17 @@ public interface Counters extends AutoCloseable { @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) void noteUnmodifiedReference(); - void toLog(); - void reset(); } public static class RealCounters implements Counters { - private long objRef; - private long nullObjRef; - private long nullReferent; - private long forwardedReferent; - private long nonHeapReferent; - private long copiedReferent; - private long unmodifiedReference; + private final AtomicLong objRef = new AtomicLong(0); + private final AtomicLong nullObjRef = new AtomicLong(0); + private final AtomicLong nullReferent = new AtomicLong(0); + private final AtomicLong forwardedReferent = new AtomicLong(0); + private final AtomicLong nonHeapReferent = new AtomicLong(0); + private final AtomicLong copiedReferent = new AtomicLong(0); + private final AtomicLong unmodifiedReference = new AtomicLong(0); RealCounters() { reset(); @@ -172,13 +175,13 @@ public static class RealCounters implements Counters { @Override public void reset() { - objRef = 0L; - nullObjRef = 0L; - nullReferent = 0L; - forwardedReferent = 0L; - nonHeapReferent = 0L; - copiedReferent = 0L; - unmodifiedReference = 0L; + objRef.set(0L); + nullObjRef.set(0L); + nullReferent.set(0L); + forwardedReferent.set(0L); + nonHeapReferent.set(0L); + copiedReferent.set(0L); + unmodifiedReference.set(0L); } @Override @@ -196,50 +199,49 @@ public void close() { @Override @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) public void noteObjRef() { - objRef += 1L; + objRef.incrementAndGet(); } @Override @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) public void noteNullReferent() { - nullReferent += 1L; + nullReferent.incrementAndGet(); } @Override @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) public void noteForwardedReferent() { - forwardedReferent += 1L; + forwardedReferent.incrementAndGet(); } @Override @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) public void noteNonHeapReferent() { - nonHeapReferent += 1L; + nonHeapReferent.incrementAndGet(); } @Override @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) public void noteCopiedReferent() { - copiedReferent += 1L; + copiedReferent.incrementAndGet(); } @Override @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) public void noteUnmodifiedReference() { - unmodifiedReference += 1L; + unmodifiedReference.incrementAndGet(); } - @Override - public void toLog() { + private void toLog() { Log log = Log.log(); log.string("[GreyToBlackObjRefVisitor.counters:"); - log.string(" objRef: ").signed(objRef); - log.string(" nullObjRef: ").signed(nullObjRef); - log.string(" nullReferent: ").signed(nullReferent); - log.string(" forwardedReferent: ").signed(forwardedReferent); - log.string(" nonHeapReferent: ").signed(nonHeapReferent); - log.string(" copiedReferent: ").signed(copiedReferent); - log.string(" unmodifiedReference: ").signed(unmodifiedReference); + log.string(" objRef: ").signed(objRef.get()); + log.string(" nullObjRef: ").signed(nullObjRef.get()); + log.string(" nullReferent: ").signed(nullReferent.get()); + log.string(" forwardedReferent: ").signed(forwardedReferent.get()); + log.string(" nonHeapReferent: ").signed(nonHeapReferent.get()); + log.string(" copiedReferent: ").signed(copiedReferent.get()); + log.string(" unmodifiedReference: ").signed(unmodifiedReference.get()); log.string("]").newline(); } } @@ -288,10 +290,6 @@ public void noteCopiedReferent() { public void noteUnmodifiedReference() { } - @Override - public void toLog() { - } - @Override public void reset() { } diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/HeapChunk.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/HeapChunk.java index f395ef97877d..abea3fbbb30a 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/HeapChunk.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/HeapChunk.java @@ -175,9 +175,9 @@ public interface Header> extends HeaderPadding { public static void initialize(Header chunk, Pointer objectsStart, UnsignedWord chunkSize) { HeapChunk.setEndOffset(chunk, chunkSize); HeapChunk.setTopPointer(chunk, objectsStart); - HeapChunk.setSpace(chunk, null); - HeapChunk.setNext(chunk, WordFactory.nullPointer()); - HeapChunk.setPrevious(chunk, WordFactory.nullPointer()); + chunk.setSpace(null); + chunk.setOffsetToNextChunk(WordFactory.zero()); + chunk.setOffsetToPreviousChunk(WordFactory.zero()); /* * The epoch is obviously not random, but cheap to use, and we cannot use a random number @@ -227,6 +227,10 @@ public static void setEndOffset(Header that, UnsignedWord newEnd) { that.setEndOffset(newEnd); } + /** + * If the parallel GC is used, then this method is racy. So, it may return null or an outdated + * value if it is called for an unaligned heap chunk that is in the middle of being promoted. + */ @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) public static Space getSpace(Header that) { return that.getSpace(); @@ -234,6 +238,7 @@ public static Space getSpace(Header that) { @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) public static void setSpace(Header that, Space newSpace) { + assert newSpace == null || that.getSpace() == null : "heap chunk must be removed from its current space before it can be registered with a new space"; that.setSpace(newSpace); } @@ -258,8 +263,8 @@ public static > void setNext(Header that, T newNext) { } @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) - public static UnsignedWord getIdentityHashSalt(Header that) { - return that.getIdentityHashSalt(IdentityHashCodeSupport.IDENTITY_HASHCODE_SALT_LOCATION); + public static long getIdentityHashSalt(Header that) { + return that.getIdentityHashSalt(IdentityHashCodeSupport.IDENTITY_HASHCODE_SALT_LOCATION).rawValue(); } @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) @@ -345,6 +350,7 @@ public static HeapChunk.Header getEnclosingHeapChunk(Object obj) { } } + @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) public static HeapChunk.Header getEnclosingHeapChunk(Pointer ptrToObj, UnsignedWord header) { if (ObjectHeaderImpl.isAlignedHeader(header)) { return AlignedHeapChunk.getEnclosingChunkFromObjectPointer(ptrToObj); @@ -367,26 +373,5 @@ public UnsignedWord getStart(T heapChunk) { public UnsignedWord getSize(T heapChunk) { return HeapChunk.getEndOffset(heapChunk); } - - @Override - public UnsignedWord getAllocationEnd(T heapChunk) { - return HeapChunk.getTopPointer(heapChunk); - } - - @Override - public String getRegion(T heapChunk) { - /* This method knows too much about spaces, especially the "free" space. */ - Space space = getSpace(heapChunk); - String result; - if (space == null) { - result = "free"; - } else if (space.isYoungSpace()) { - result = "young"; - } else { - result = "old"; - } - return result; - } - } } diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/HeapImpl.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/HeapImpl.java index 4f3b501a0dd1..7f0340754c75 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/HeapImpl.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/HeapImpl.java @@ -57,6 +57,7 @@ import com.oracle.svm.core.genscavenge.ThreadLocalAllocation.Descriptor; import com.oracle.svm.core.genscavenge.UnalignedHeapChunk.UnalignedHeader; import com.oracle.svm.core.genscavenge.graal.ForcedSerialPostWriteBarrier; +import com.oracle.svm.core.genscavenge.parallel.ParallelGC; import com.oracle.svm.core.graal.snippets.SubstrateAllocationSnippets; import com.oracle.svm.core.heap.GC; import com.oracle.svm.core.heap.GCCause; @@ -196,6 +197,9 @@ public boolean walkObjects(ObjectVisitor visitor) { @Override @Uninterruptible(reason = "Tear-down in progress.") public boolean tearDown() { + if (ParallelGC.isEnabled()) { + ParallelGC.singleton().tearDown(); + } youngGeneration.tearDown(); oldGeneration.tearDown(); getChunkProvider().tearDown(); @@ -715,7 +719,7 @@ public long getIdentityHashSalt(Object obj) { assert !isInImageHeap(obj) : "Image heap objects have identity hash code fields"; } HeapChunk.Header chunk = HeapChunk.getEnclosingHeapChunk(obj); - return HeapChunk.getIdentityHashSalt(chunk).rawValue(); + return HeapChunk.getIdentityHashSalt(chunk); } @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/JfrGCEventSupport.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/JfrGCEventSupport.java index 6a2a962d5aeb..54965f7a338f 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/JfrGCEventSupport.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/JfrGCEventSupport.java @@ -143,7 +143,7 @@ private int popPhase() { class JfrGCEventFeature implements InternalFeature { @Override public boolean isInConfiguration(IsInConfigurationAccess access) { - return SubstrateOptions.UseSerialGC.getValue(); + return SubstrateOptions.useSerialOrParallelGC(); } @Override diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/ObjectHeaderImpl.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/ObjectHeaderImpl.java index be0def8590b7..60b821f02a93 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/ObjectHeaderImpl.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/ObjectHeaderImpl.java @@ -343,6 +343,11 @@ public static void setRememberedSetBit(Object o) { writeHeaderToObject(o, newHeader); } + @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) + static long setRememberedSetBit(long headerBytes) { + return headerBytes | REMEMBERED_SET_BIT.rawValue(); + } + @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) public static boolean hasRememberedSet(UnsignedWord header) { return header.and(REMEMBERED_SET_BIT).notEqual(0); @@ -359,11 +364,6 @@ public static boolean isForwardedHeader(UnsignedWord header) { return header.and(FORWARDED_BIT).notEqual(0); } - @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) - Object getForwardedObject(Pointer ptr) { - return getForwardedObject(ptr, readHeaderFromPointer(ptr)); - } - @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) Object getForwardedObject(Pointer ptr, UnsignedWord header) { assert isForwardedHeader(header); @@ -393,6 +393,24 @@ void installForwardingPointer(Object original, Object copy) { assert isPointerToForwardedObject(Word.objectToUntrackedPointer(original)); } + /** + * The original header are the 8 bytes at the hub offset (regardless if compressed references + * are used or not). + */ + @AlwaysInline("GC performance") + @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) + Object installForwardingPointerParallel(Object original, long eightHeaderBytes, Object copy) { + UnsignedWord forwardHeader = getForwardHeader(copy); + /* Try installing the new header. */ + Pointer originalPtr = Word.objectToUntrackedPointer(original); + long value = originalPtr.compareAndSwapLong(getHubOffset(), eightHeaderBytes, forwardHeader.rawValue(), LocationIdentity.ANY_LOCATION); + assert isPointerToForwardedObject(originalPtr); + if (value != eightHeaderBytes) { + return getForwardedObject(originalPtr, WordFactory.unsigned(value)); + } + return copy; + } + @AlwaysInline("GC performance") @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) private UnsignedWord getForwardHeader(Object copy) { diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/OldGeneration.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/OldGeneration.java index 6ecf2f04d3fa..7e4ca4fc9a09 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/OldGeneration.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/OldGeneration.java @@ -35,6 +35,7 @@ import com.oracle.svm.core.MemoryWalker; import com.oracle.svm.core.Uninterruptible; import com.oracle.svm.core.genscavenge.GCImpl.ChunkReleaser; +import com.oracle.svm.core.genscavenge.parallel.ParallelGC; import com.oracle.svm.core.genscavenge.remset.RememberedSet; import com.oracle.svm.core.heap.ObjectVisitor; import com.oracle.svm.core.log.Log; @@ -74,31 +75,25 @@ public boolean walkObjects(ObjectVisitor visitor) { /** Promote an Object to ToSpace if it is not already in ToSpace. */ @AlwaysInline("GC performance") @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) - @Override - public Object promoteAlignedObject(Object original, AlignedHeapChunk.AlignedHeader originalChunk, Space originalSpace) { + Object promoteAlignedObject(Object original, Space originalSpace) { assert originalSpace.isFromSpace(); return getToSpace().promoteAlignedObject(original, originalSpace); } @AlwaysInline("GC performance") @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) - @Override - protected Object promoteUnalignedObject(Object original, UnalignedHeapChunk.UnalignedHeader originalChunk, Space originalSpace) { - assert originalSpace.isFromSpace(); - getToSpace().promoteUnalignedHeapChunk(originalChunk, originalSpace); + Object promoteUnalignedObject(Object original, UnalignedHeapChunk.UnalignedHeader originalChunk) { + getToSpace().promoteUnalignedHeapChunk(originalChunk); return original; } - @Override @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) - protected boolean promoteChunk(HeapChunk.Header originalChunk, boolean isAligned, Space originalSpace) { - assert originalSpace.isFromSpace(); + void promoteChunk(HeapChunk.Header originalChunk, boolean isAligned) { if (isAligned) { - getToSpace().promoteAlignedHeapChunk((AlignedHeapChunk.AlignedHeader) originalChunk, originalSpace); + getToSpace().promoteAlignedHeapChunk((AlignedHeapChunk.AlignedHeader) originalChunk); } else { - getToSpace().promoteUnalignedHeapChunk((UnalignedHeapChunk.UnalignedHeader) originalChunk, originalSpace); + getToSpace().promoteUnalignedHeapChunk((UnalignedHeapChunk.UnalignedHeader) originalChunk); } - return true; } void releaseSpaces(ChunkReleaser chunkReleaser) { @@ -107,11 +102,16 @@ void releaseSpaces(ChunkReleaser chunkReleaser) { @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) void prepareForPromotion() { + if (ParallelGC.isEnabled() && GCImpl.getGCImpl().isCompleteCollection()) { + return; + } toGreyObjectsWalker.setScanStart(getToSpace()); } @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) boolean scanGreyObjects() { + assert !ParallelGC.isEnabled() || !GCImpl.getGCImpl().isCompleteCollection(); + if (!toGreyObjectsWalker.haveGreyObjects()) { return false; } diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/ReferenceObjectProcessing.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/ReferenceObjectProcessing.java index e2b2df83407e..269c2d3abe1b 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/ReferenceObjectProcessing.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/ReferenceObjectProcessing.java @@ -43,6 +43,7 @@ import com.oracle.svm.core.heap.ObjectReferenceVisitor; import com.oracle.svm.core.heap.ReferenceInternals; import com.oracle.svm.core.hub.DynamicHub; +import com.oracle.svm.core.jdk.UninterruptibleUtils.AtomicReference; import com.oracle.svm.core.snippets.KnownIntrinsics; import com.oracle.svm.core.thread.VMOperation; import com.oracle.svm.core.util.UnsignedUtils; @@ -50,7 +51,7 @@ /** Discovers and handles {@link Reference} objects during garbage collection. */ final class ReferenceObjectProcessing { /** Head of the linked list of discovered references that need to be revisited. */ - private static Reference rememberedRefsList; + private static final AtomicReference> rememberedRefsList = new AtomicReference<>(); /** * For a {@link SoftReference}, the longest duration after its last access to keep its referent @@ -113,17 +114,22 @@ private static void discover(Object obj, ObjectReferenceVisitor refVisitor) { // Referents in the image heap cannot be moved or reclaimed, no need to look closer. return; } - if (maybeUpdateForwardedReference(dr, referentAddr)) { + + /* + * The parallel GC may modify the object header at any time, so we only read the object + * header once. + */ + UnsignedWord referentHeader = ObjectHeader.readHeaderFromPointer(referentAddr); + if (maybeUpdateForwardedReference(dr, referentAddr, referentHeader)) { // Some other object had a strong reference to the referent, so the referent was already // promoted. The call above updated the reference object so that it now points to the // promoted object. return; } - Object refObject = referentAddr.toObject(); - if (willSurviveThisCollection(refObject)) { + if (willSurviveThisCollection(referentAddr, referentHeader)) { // Referent is in a to-space. So, this is either an object that got promoted without // being moved or an object in the old gen. - RememberedSet.get().dirtyCardIfNecessary(dr, refObject); + RememberedSet.get().dirtyCardIfNecessary(dr, referentAddr.toObject()); return; } if (!softReferencesAreWeak && dr instanceof SoftReference) { @@ -146,9 +152,11 @@ private static void discover(Object obj, ObjectReferenceVisitor refVisitor) { // are revisited after the GC finished promoting all strongly reachable objects. // null link means undiscovered, avoid for the last node with a cyclic reference - Reference next = (rememberedRefsList != null) ? rememberedRefsList : dr; - ReferenceInternals.setNextDiscovered(dr, next); - rememberedRefsList = dr; + Reference expected; + do { + expected = rememberedRefsList.get(); + ReferenceInternals.setNextDiscovered(dr, expected != null ? expected : dr); + } while (!rememberedRefsList.compareAndSet(expected, dr)); } /** @@ -159,8 +167,7 @@ private static void discover(Object obj, ObjectReferenceVisitor refVisitor) { */ static Reference processRememberedReferences() { Reference pendingHead = null; - Reference current = rememberedRefsList; - rememberedRefsList = null; + Reference current = rememberedRefsList.getAndSet(null); while (current != null) { // Get the next node (the last node has a cyclic reference to self). @@ -185,7 +192,7 @@ static Reference processRememberedReferences() { } static void afterCollection(UnsignedWord freeBytes) { - assert rememberedRefsList == null; + assert rememberedRefsList.get() == null; UnsignedWord unused = freeBytes.unsignedDivide(1024 * 1024 /* MB */); maxSoftRefAccessIntervalMs = unused.multiply(SerialGCOptions.SoftRefLRUPolicyMSPerMB.getValue()); ReferenceInternals.updateSoftReferenceClock(); @@ -203,12 +210,13 @@ private static boolean processRememberedRef(Reference dr) { Pointer refPointer = ReferenceInternals.getReferentPointer(dr); assert refPointer.isNonNull() : "Referent is null: should not have been discovered"; assert !HeapImpl.getHeapImpl().isInImageHeap(refPointer) : "Image heap referent: should not have been discovered"; - if (maybeUpdateForwardedReference(dr, refPointer)) { + + UnsignedWord refHeader = ObjectHeader.readHeaderFromPointer(refPointer); + if (maybeUpdateForwardedReference(dr, refPointer, refHeader)) { return true; } - Object refObject = refPointer.toObject(); - if (willSurviveThisCollection(refObject)) { - RememberedSet.get().dirtyCardIfNecessary(dr, refObject); + if (willSurviveThisCollection(refPointer, refHeader)) { + RememberedSet.get().dirtyCardIfNecessary(dr, refPointer.toObject()); return true; } /* @@ -223,11 +231,9 @@ private static boolean processRememberedRef(Reference dr) { } @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) - private static boolean maybeUpdateForwardedReference(Reference dr, Pointer referentAddr) { - ObjectHeaderImpl ohi = ObjectHeaderImpl.getObjectHeaderImpl(); - UnsignedWord header = ObjectHeader.readHeaderFromPointer(referentAddr); + private static boolean maybeUpdateForwardedReference(Reference dr, Pointer referentAddr, UnsignedWord header) { if (ObjectHeaderImpl.isForwardedHeader(header)) { - Object forwardedObj = ohi.getForwardedObject(referentAddr); + Object forwardedObj = ObjectHeaderImpl.getObjectHeaderImpl().getForwardedObject(referentAddr, header); ReferenceInternals.setReferent(dr, forwardedObj); return true; } @@ -235,9 +241,9 @@ private static boolean maybeUpdateForwardedReference(Reference dr, Pointer re } @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) - private static boolean willSurviveThisCollection(Object obj) { - HeapChunk.Header chunk = HeapChunk.getEnclosingHeapChunk(obj); + private static boolean willSurviveThisCollection(Pointer ptr, UnsignedWord header) { + HeapChunk.Header chunk = HeapChunk.getEnclosingHeapChunk(ptr, header); Space space = HeapChunk.getSpace(chunk); - return !space.isFromSpace(); + return space != null && !space.isFromSpace(); } } diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/SerialAndEpsilonGCOptions.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/SerialAndEpsilonGCOptions.java index 7db109493358..390ed6ce5fa3 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/SerialAndEpsilonGCOptions.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/SerialAndEpsilonGCOptions.java @@ -36,16 +36,17 @@ import com.oracle.svm.core.util.InterruptImageBuilding; import com.oracle.svm.core.util.UserError; -/** Common options that can be specified for both the serial and the epsilon GC. */ +/** Options that can be specified for the serial, the parallel, and the epsilon GC. */ +// TODO (chaeubl): rename public final class SerialAndEpsilonGCOptions { - @Option(help = "The maximum heap size as percent of physical memory. Serial and epsilon GC only.", type = OptionType.User) // - public static final RuntimeOptionKey MaximumHeapSizePercent = new NotifyGCRuntimeOptionKey<>(80, SerialAndEpsilonGCOptions::serialOrEpsilonGCOnly); + @Option(help = "The maximum heap size as percent of physical memory. Serial, parallel, and epsilon GC only.", type = OptionType.User) // + public static final RuntimeOptionKey MaximumHeapSizePercent = new NotifyGCRuntimeOptionKey<>(80, SerialAndEpsilonGCOptions::serialOrParallelOrEpsilonGCOnly); - @Option(help = "The maximum size of the young generation as a percentage of the maximum heap size. Serial and epsilon GC only.", type = OptionType.User) // - public static final RuntimeOptionKey MaximumYoungGenerationSizePercent = new NotifyGCRuntimeOptionKey<>(10, SerialAndEpsilonGCOptions::serialOrEpsilonGCOnly); + @Option(help = "The maximum size of the young generation as a percentage of the maximum heap size. Serial, parallel, and epsilon GC only.", type = OptionType.User) // + public static final RuntimeOptionKey MaximumYoungGenerationSizePercent = new NotifyGCRuntimeOptionKey<>(10, SerialAndEpsilonGCOptions::serialOrParallelOrEpsilonGCOnly); - @Option(help = "The size of an aligned chunk. Serial and epsilon GC only.", type = OptionType.Expert) // - public static final HostedOptionKey AlignedHeapChunkSize = new HostedOptionKey<>(512 * 1024L, SerialAndEpsilonGCOptions::serialOrEpsilonGCOnly) { + @Option(help = "The size of an aligned chunk. Serial, parallel, and epsilon GC only.", type = OptionType.Expert) // + public static final HostedOptionKey AlignedHeapChunkSize = new HostedOptionKey<>(512 * 1024L, SerialAndEpsilonGCOptions::serialOrParallelOrEpsilonGCOnly) { @Override protected void onValueUpdate(EconomicMap, Object> values, Long oldValue, Long newValue) { int multiple = 4096; @@ -57,31 +58,32 @@ protected void onValueUpdate(EconomicMap, Object> values, Long oldV * This should be a fraction of the size of an aligned chunk, else large small arrays will not * fit in an aligned chunk. */ - @Option(help = "The size at or above which an array will be allocated in its own unaligned chunk. Serial and epsilon GC only.", type = OptionType.Expert) // - public static final HostedOptionKey LargeArrayThreshold = new HostedOptionKey<>(128 * 1024L, SerialAndEpsilonGCOptions::serialOrEpsilonGCOnly); + @Option(help = "The size at or above which an array will be allocated in its own unaligned chunk. Serial, parallel, and epsilon GC only.", type = OptionType.Expert) // + public static final HostedOptionKey LargeArrayThreshold = new HostedOptionKey<>(128 * 1024L, SerialAndEpsilonGCOptions::serialOrParallelOrEpsilonGCOnly); - @Option(help = "Fill unused memory chunks with a sentinel value. Serial and epsilon GC only.", type = OptionType.Debug) // - public static final HostedOptionKey ZapChunks = new HostedOptionKey<>(false, SerialAndEpsilonGCOptions::serialOrEpsilonGCOnly); + @Option(help = "Fill unused memory chunks with a sentinel value. Serial, parallel, and epsilon GC only.", type = OptionType.Debug) // + public static final HostedOptionKey ZapChunks = new HostedOptionKey<>(false, SerialAndEpsilonGCOptions::serialOrParallelOrEpsilonGCOnly); - @Option(help = "Before use, fill memory chunks with a sentinel value. Serial and epsilon GC only.", type = OptionType.Debug) // - public static final HostedOptionKey ZapProducedHeapChunks = new HostedOptionKey<>(false, SerialAndEpsilonGCOptions::serialOrEpsilonGCOnly); + @Option(help = "Before use, fill memory chunks with a sentinel value. Serial, parallel, and epsilon GC only.", type = OptionType.Debug) // + public static final HostedOptionKey ZapProducedHeapChunks = new HostedOptionKey<>(false, SerialAndEpsilonGCOptions::serialOrParallelOrEpsilonGCOnly); - @Option(help = "After use, Fill memory chunks with a sentinel value. Serial and epsilon GC only.", type = OptionType.Debug) // - public static final HostedOptionKey ZapConsumedHeapChunks = new HostedOptionKey<>(false, SerialAndEpsilonGCOptions::serialOrEpsilonGCOnly); + @Option(help = "After use, Fill memory chunks with a sentinel value. Serial, parallel, and epsilon GC only.", type = OptionType.Debug) // + public static final HostedOptionKey ZapConsumedHeapChunks = new HostedOptionKey<>(false, SerialAndEpsilonGCOptions::serialOrParallelOrEpsilonGCOnly); - @Option(help = "Bytes that can be allocated before (re-)querying the physical memory size. Serial and epsilon GC only.", type = OptionType.Debug) // - public static final HostedOptionKey AllocationBeforePhysicalMemorySize = new HostedOptionKey<>(1L * 1024L * 1024L, SerialAndEpsilonGCOptions::serialOrEpsilonGCOnly); + @Option(help = "Bytes that can be allocated before (re-)querying the physical memory size. Serial, parallel, and epsilon GC only.", type = OptionType.Debug) // + public static final HostedOptionKey AllocationBeforePhysicalMemorySize = new HostedOptionKey<>(1L * 1024L * 1024L, SerialAndEpsilonGCOptions::serialOrParallelOrEpsilonGCOnly); - @Option(help = "Number of bytes at the beginning of each heap chunk that are not used for payload data, i.e., can be freely used as metadata by the heap chunk provider. Serial and epsilon GC only.", type = OptionType.Debug) // - public static final HostedOptionKey HeapChunkHeaderPadding = new HostedOptionKey<>(0, SerialAndEpsilonGCOptions::serialOrEpsilonGCOnly); + @Option(help = "Number of bytes at the beginning of each heap chunk that are not used for payload data, i.e., can be freely used as metadata by the heap chunk provider. Serial, parallel, and epsilon GC only.", type = OptionType.Debug) // + public static final HostedOptionKey HeapChunkHeaderPadding = new HostedOptionKey<>(0, SerialAndEpsilonGCOptions::serialOrParallelOrEpsilonGCOnly); private SerialAndEpsilonGCOptions() { } - public static void serialOrEpsilonGCOnly(OptionKey optionKey) { - if (!SubstrateOptions.UseSerialGC.getValue() && !SubstrateOptions.UseEpsilonGC.getValue()) { + private static void serialOrParallelOrEpsilonGCOnly(OptionKey optionKey) { + if (!SubstrateOptions.useSerialOrParallelOrEpsilonGC()) { throw new InterruptImageBuilding( - "The option '" + optionKey.getName() + "' can only be used together with the serial ('--gc=serial') or the epsilon garbage collector ('--gc=epsilon')."); + "The option '" + optionKey.getName() + + "' can only be used together with the serial ('--gc=serial'), parallel ('--gc=parallel'), or the epsilon garbage collector ('--gc=epsilon')."); } } } diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/SerialGCOptions.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/SerialGCOptions.java index edac8966b176..005bfaf05e6c 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/SerialGCOptions.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/SerialGCOptions.java @@ -36,19 +36,20 @@ import com.oracle.svm.core.util.InterruptImageBuilding; import com.oracle.svm.core.util.UserError; -/** Options that are only valid for the serial GC (and not for the epsilon GC). */ +/** Options that can be specified for the serial and the parallel GC. */ +// TODO (chaeubl): rename this class public final class SerialGCOptions { - @Option(help = "The garbage collection policy, either Adaptive (default) or BySpaceAndTime. Serial GC only.", type = OptionType.User)// - public static final HostedOptionKey InitialCollectionPolicy = new HostedOptionKey<>("Adaptive", SerialGCOptions::serialGCOnly); + @Option(help = "The garbage collection policy, either Adaptive (default) or BySpaceAndTime. Serial and parallel GC only.", type = OptionType.User)// + public static final HostedOptionKey InitialCollectionPolicy = new HostedOptionKey<>("Adaptive", SerialGCOptions::serialOrParallelGCOnly); - @Option(help = "Percentage of total collection time that should be spent on young generation collections. Serial GC with collection policy 'BySpaceAndTime' only.", type = OptionType.User)// - public static final RuntimeOptionKey PercentTimeInIncrementalCollection = new RuntimeOptionKey<>(50, SerialGCOptions::serialGCOnly); + @Option(help = "Percentage of total collection time that should be spent on young generation collections. Serial and parallel GC only, if the collection policy 'BySpaceAndTime' is used.", type = OptionType.User)// + public static final RuntimeOptionKey PercentTimeInIncrementalCollection = new RuntimeOptionKey<>(50, SerialGCOptions::serialOrParallelGCOnly); - @Option(help = "The maximum free bytes reserved for allocations, in bytes (0 for automatic according to GC policy). Serial GC only.", type = OptionType.User)// - public static final RuntimeOptionKey MaxHeapFree = new RuntimeOptionKey<>(0L, SerialGCOptions::serialGCOnly); + @Option(help = "The maximum free bytes reserved for allocations, in bytes (0 for automatic according to GC policy). Serial and parallel GC only.", type = OptionType.User)// + public static final RuntimeOptionKey MaxHeapFree = new RuntimeOptionKey<>(0L, SerialGCOptions::serialOrParallelGCOnly); - @Option(help = "Maximum number of survivor spaces. Serial GC only.", type = OptionType.Expert) // - public static final HostedOptionKey MaxSurvivorSpaces = new HostedOptionKey<>(null, SerialGCOptions::serialGCOnly) { + @Option(help = "Maximum number of survivor spaces. Serial and parallel GC only.", type = OptionType.Expert) // + public static final HostedOptionKey MaxSurvivorSpaces = new HostedOptionKey<>(null, SerialGCOptions::serialOrParallelGCOnly) { @Override public Integer getValueOrDefault(UnmodifiableEconomicMap, Object> values) { Integer value = (Integer) values.get(this); @@ -63,61 +64,61 @@ public Integer getValue(OptionValues values) { } }; - @Option(help = "Determines if a full GC collects the young generation separately or together with the old generation. Serial GC only.", type = OptionType.Expert) // - public static final RuntimeOptionKey CollectYoungGenerationSeparately = new RuntimeOptionKey<>(null, SerialGCOptions::serialGCOnly); + @Option(help = "Determines if a full GC collects the young generation separately or together with the old generation. Serial and parallel GC only.", type = OptionType.Expert) // + public static final RuntimeOptionKey CollectYoungGenerationSeparately = new RuntimeOptionKey<>(null, SerialGCOptions::serialOrParallelGCOnly); - @Option(help = "Enables card marking for image heap objects, which arranges them in chunks. Automatically enabled when supported. Serial GC only.", type = OptionType.Expert) // - public static final HostedOptionKey ImageHeapCardMarking = new HostedOptionKey<>(null, SerialGCOptions::serialGCOnly); + @Option(help = "Enables card marking for image heap objects, which arranges them in chunks. Automatically enabled when supported. Serial and parallel GC only.", type = OptionType.Expert) // + public static final HostedOptionKey ImageHeapCardMarking = new HostedOptionKey<>(null, SerialGCOptions::serialOrParallelGCOnly); @Option(help = "This number of milliseconds multiplied by the free heap memory in MByte is the time span " + - "for which a soft reference will keep its referent alive after its last access. Serial GC only.", type = OptionType.Expert) // - public static final HostedOptionKey SoftRefLRUPolicyMSPerMB = new HostedOptionKey<>(1000, SerialGCOptions::serialGCOnly); + "for which a soft reference will keep its referent alive after its last access. Serial and parallel GC only.", type = OptionType.Expert) // + public static final HostedOptionKey SoftRefLRUPolicyMSPerMB = new HostedOptionKey<>(1000, SerialGCOptions::serialOrParallelGCOnly); - @Option(help = "Print the shape of the heap before and after each collection, if +VerboseGC. Serial GC only.", type = OptionType.Debug)// - public static final RuntimeOptionKey PrintHeapShape = new RuntimeOptionKey<>(false, SerialGCOptions::serialGCOnly); + @Option(help = "Print the shape of the heap before and after each collection, if +VerboseGC. Serial and parallel GC only.", type = OptionType.Debug)// + public static final RuntimeOptionKey PrintHeapShape = new RuntimeOptionKey<>(false, SerialGCOptions::serialOrParallelGCOnly); - @Option(help = "Print summary GC information after application main method returns. Serial GC only.", type = OptionType.Debug)// - public static final RuntimeOptionKey PrintGCSummary = new RuntimeOptionKey<>(false, SerialGCOptions::serialGCOnly); + @Option(help = "Print summary GC information after application main method returns. Serial and parallel GC only.", type = OptionType.Debug)// + public static final RuntimeOptionKey PrintGCSummary = new RuntimeOptionKey<>(false, SerialGCOptions::serialOrParallelGCOnly); - @Option(help = "Print a time stamp at each collection, if +PrintGC or +VerboseGC. Serial GC only.", type = OptionType.Debug)// - public static final RuntimeOptionKey PrintGCTimeStamps = new RuntimeOptionKey<>(false, SerialGCOptions::serialGCOnly); + @Option(help = "Print a time stamp at each collection, if +PrintGC or +VerboseGC. Serial and parallel GC only.", type = OptionType.Debug)// + public static final RuntimeOptionKey PrintGCTimeStamps = new RuntimeOptionKey<>(false, SerialGCOptions::serialOrParallelGCOnly); - @Option(help = "Print the time for each of the phases of each collection, if +VerboseGC. Serial GC only.", type = OptionType.Debug)// - public static final RuntimeOptionKey PrintGCTimes = new RuntimeOptionKey<>(false, SerialGCOptions::serialGCOnly); + @Option(help = "Print the time for each of the phases of each collection, if +VerboseGC. Serial and parallel GC only.", type = OptionType.Debug)// + public static final RuntimeOptionKey PrintGCTimes = new RuntimeOptionKey<>(false, SerialGCOptions::serialOrParallelGCOnly); - @Option(help = "Instrument write barriers with counters. Serial GC only.", type = OptionType.Debug)// - public static final HostedOptionKey CountWriteBarriers = new HostedOptionKey<>(false, SerialGCOptions::serialGCOnly); + @Option(help = "Instrument write barriers with counters. Serial and parallel GC only.", type = OptionType.Debug)// + public static final HostedOptionKey CountWriteBarriers = new HostedOptionKey<>(false, SerialGCOptions::serialOrParallelGCOnly); - @Option(help = "Verify the heap before doing a garbage collection if VerifyHeap is enabled. Serial GC only.", type = OptionType.Debug)// - public static final HostedOptionKey VerifyBeforeGC = new HostedOptionKey<>(true, SerialGCOptions::serialGCOnly); + @Option(help = "Verify the heap before doing a garbage collection if VerifyHeap is enabled. Serial and parallel GC only.", type = OptionType.Debug)// + public static final HostedOptionKey VerifyBeforeGC = new HostedOptionKey<>(true, SerialGCOptions::serialOrParallelGCOnly); - @Option(help = "Verify the heap after doing a garbage collection if VerifyHeap is enabled. Serial GC only.", type = OptionType.Debug)// - public static final HostedOptionKey VerifyAfterGC = new HostedOptionKey<>(true, SerialGCOptions::serialGCOnly); + @Option(help = "Verify the heap after doing a garbage collection if VerifyHeap is enabled. Serial and parallel GC only.", type = OptionType.Debug)// + public static final HostedOptionKey VerifyAfterGC = new HostedOptionKey<>(true, SerialGCOptions::serialOrParallelGCOnly); - @Option(help = "Verify the remembered set if VerifyHeap is enabled. Serial GC only.", type = OptionType.Debug)// - public static final HostedOptionKey VerifyRememberedSet = new HostedOptionKey<>(true, SerialGCOptions::serialGCOnly); + @Option(help = "Verify the remembered set if VerifyHeap is enabled. Serial and parallel GC only.", type = OptionType.Debug)// + public static final HostedOptionKey VerifyRememberedSet = new HostedOptionKey<>(true, SerialGCOptions::serialOrParallelGCOnly); - @Option(help = "Verify all object references if VerifyHeap is enabled. Serial GC only.", type = OptionType.Debug)// - public static final HostedOptionKey VerifyReferences = new HostedOptionKey<>(true, SerialGCOptions::serialGCOnly); + @Option(help = "Verify all object references if VerifyHeap is enabled. Serial and parallel GC only.", type = OptionType.Debug)// + public static final HostedOptionKey VerifyReferences = new HostedOptionKey<>(true, SerialGCOptions::serialOrParallelGCOnly); - @Option(help = "Verify that object references point into valid heap chunks if VerifyHeap is enabled. Serial GC only.", type = OptionType.Debug)// - public static final HostedOptionKey VerifyReferencesPointIntoValidChunk = new HostedOptionKey<>(false, SerialGCOptions::serialGCOnly); + @Option(help = "Verify that object references point into valid heap chunks if VerifyHeap is enabled. Serial and parallel GC only.", type = OptionType.Debug)// + public static final HostedOptionKey VerifyReferencesPointIntoValidChunk = new HostedOptionKey<>(false, SerialGCOptions::serialOrParallelGCOnly); - @Option(help = "Verify write barriers. Serial GC only.", type = OptionType.Debug)// - public static final HostedOptionKey VerifyWriteBarriers = new HostedOptionKey<>(false, SerialGCOptions::serialGCOnly); + @Option(help = "Verify write barriers. Serial and parallel GC only.", type = OptionType.Debug)// + public static final HostedOptionKey VerifyWriteBarriers = new HostedOptionKey<>(false, SerialGCOptions::serialOrParallelGCOnly); - @Option(help = "Trace heap chunks during collections, if +VerboseGC and +PrintHeapShape. Serial GC only.", type = OptionType.Debug) // - public static final RuntimeOptionKey TraceHeapChunks = new RuntimeOptionKey<>(false, SerialGCOptions::serialGCOnly); + @Option(help = "Trace heap chunks during collections, if +VerboseGC and +PrintHeapShape. Serial and parallel GC only.", type = OptionType.Debug) // + public static final RuntimeOptionKey TraceHeapChunks = new RuntimeOptionKey<>(false, SerialGCOptions::serialOrParallelGCOnly); - @Option(help = "Develop demographics of the object references visited. Serial GC only.", type = OptionType.Debug)// - public static final HostedOptionKey GreyToBlackObjRefDemographics = new HostedOptionKey<>(false, SerialGCOptions::serialGCOnly); + @Option(help = "Develop demographics of the object references visited. Serial and parallel GC only.", type = OptionType.Debug)// + public static final HostedOptionKey GreyToBlackObjRefDemographics = new HostedOptionKey<>(false, SerialGCOptions::serialOrParallelGCOnly); private SerialGCOptions() { } - private static void serialGCOnly(OptionKey optionKey) { - if (!SubstrateOptions.UseSerialGC.getValue()) { - throw new InterruptImageBuilding("The option '" + optionKey.getName() + "' can only be used together with the serial garbage collector ('--gc=serial')."); + private static void serialOrParallelGCOnly(OptionKey optionKey) { + if (!SubstrateOptions.useSerialOrParallelGC()) { + throw new InterruptImageBuilding("The option '" + optionKey.getName() + "' can only be used together with the serial ('--gc=serial') or parallel garbage collector ('--gc=parallel')."); } } } diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/Space.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/Space.java index 281dcbfc8e84..d14603da7207 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/Space.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/Space.java @@ -43,6 +43,7 @@ import com.oracle.svm.core.UnmanagedMemoryUtil; import com.oracle.svm.core.config.ConfigurationValues; import com.oracle.svm.core.genscavenge.GCImpl.ChunkReleaser; +import com.oracle.svm.core.genscavenge.parallel.ParallelGC; import com.oracle.svm.core.genscavenge.remset.RememberedSet; import com.oracle.svm.core.heap.ObjectHeader; import com.oracle.svm.core.heap.ObjectVisitor; @@ -96,13 +97,16 @@ public String getName() { @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) public boolean isEmpty() { - return (getFirstAlignedHeapChunk().isNull() && getFirstUnalignedHeapChunk().isNull()); + return firstAlignedHeapChunk.isNull() && firstUnalignedHeapChunk.isNull(); } @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) void tearDown() { - HeapChunkProvider.freeAlignedChunkList(getFirstAlignedHeapChunk()); - HeapChunkProvider.freeUnalignedChunkList(getFirstUnalignedHeapChunk()); + HeapChunkProvider.freeAlignedChunkList(firstAlignedHeapChunk); + firstAlignedHeapChunk = WordFactory.nullPointer(); + + HeapChunkProvider.freeUnalignedChunkList(firstUnalignedHeapChunk); + firstUnalignedHeapChunk = WordFactory.nullPointer(); } @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) @@ -141,14 +145,14 @@ boolean isFromSpace() { } public boolean walkObjects(ObjectVisitor visitor) { - AlignedHeapChunk.AlignedHeader aChunk = getFirstAlignedHeapChunk(); + AlignedHeapChunk.AlignedHeader aChunk = firstAlignedHeapChunk; while (aChunk.isNonNull()) { if (!AlignedHeapChunk.walkObjects(aChunk, visitor)) { return false; } aChunk = HeapChunk.getNext(aChunk); } - UnalignedHeapChunk.UnalignedHeader uChunk = getFirstUnalignedHeapChunk(); + UnalignedHeapChunk.UnalignedHeader uChunk = firstUnalignedHeapChunk; while (uChunk.isNonNull()) { if (!UnalignedHeapChunk.walkObjects(uChunk, visitor)) { return false; @@ -163,8 +167,8 @@ public Log report(Log log, boolean traceHeapChunks) { log.string(getName()).string(":").indent(true); accounting.report(log); if (traceHeapChunks) { - HeapChunkLogging.logChunks(log, getFirstAlignedHeapChunk()); - HeapChunkLogging.logChunks(log, getFirstUnalignedHeapChunk()); + HeapChunkLogging.logChunks(log, firstAlignedHeapChunk); + HeapChunkLogging.logChunks(log, firstUnalignedHeapChunk); } log.redent(false); return log; @@ -176,19 +180,72 @@ public Log report(Log log, boolean traceHeapChunks) { @AlwaysInline("GC performance") @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) private Pointer allocateMemory(UnsignedWord objectSize) { - Pointer result = WordFactory.nullPointer(); + if (ParallelGC.isEnabled() && GCImpl.getGCImpl().isCompleteCollection()) { + return allocateMemoryParallel(objectSize); + } + return allocateMemorySerial(objectSize); + } + + @AlwaysInline("GC performance") + @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) + private Pointer allocateMemorySerial(UnsignedWord objectSize) { + assert !ParallelGC.isEnabled() || !GCImpl.getGCImpl().isCompleteCollection(); + /* Fast-path: try allocating in the last chunk. */ - AlignedHeapChunk.AlignedHeader oldChunk = getLastAlignedHeapChunk(); + AlignedHeapChunk.AlignedHeader oldChunk = lastAlignedHeapChunk; if (oldChunk.isNonNull()) { - result = AlignedHeapChunk.allocateMemory(oldChunk, objectSize); - } - if (result.isNonNull()) { - return result; + Pointer result = AlignedHeapChunk.allocateMemory(oldChunk, objectSize); + if (result.isNonNull()) { + return result; + } } /* Slow-path: try allocating a new chunk for the requested memory. */ return allocateInNewChunk(objectSize); } + @AlwaysInline("GC performance") + @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) + private Pointer allocateMemoryParallel(UnsignedWord objectSize) { + /* Fast-path: try allocating in the thread local allocation chunk. */ + AlignedHeapChunk.AlignedHeader oldChunk = ParallelGC.singleton().getAllocationChunk(); + if (oldChunk.isNonNull()) { + Pointer result = AlignedHeapChunk.allocateMemory(oldChunk, objectSize); + if (result.isNonNull()) { + return result; + } + } + /* Slow-path: try allocating a new chunk for the requested memory. */ + return allocateInNewChunkParallel(objectSize); + } + + @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) + private Pointer allocateInNewChunkParallel(UnsignedWord objectSize) { + AlignedHeapChunk.AlignedHeader newChunk; + ParallelGC.singleton().getMutex().lockNoTransitionUnspecifiedOwner(); + try { + ParallelGC.singleton().pushAllocChunk(); + newChunk = requestAlignedHeapChunk(); + } finally { + ParallelGC.singleton().getMutex().unlockNoTransitionUnspecifiedOwner(); + } + + ParallelGC.singleton().setAllocationChunk(newChunk); + if (newChunk.isNonNull()) { + return AlignedHeapChunk.allocateMemory(newChunk, objectSize); + } + return WordFactory.nullPointer(); + } + + /** Retract the latest allocation. */ + @AlwaysInline("GC performance") + @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) + private static void retractAllocationParallel(UnsignedWord objectSize) { + assert ParallelGC.isEnabled() && ParallelGC.singleton().isInParallelPhase(); + AlignedHeapChunk.AlignedHeader chunk = ParallelGC.singleton().getAllocationChunk(); + assert chunk.isNonNull(); + AlignedHeapChunk.retractAllocation(chunk, objectSize); + } + @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) private Pointer allocateInNewChunk(UnsignedWord objectSize) { AlignedHeapChunk.AlignedHeader newChunk = requestAlignedHeapChunk(); @@ -210,123 +267,109 @@ public void releaseChunks(ChunkReleaser chunkReleaser) { accounting.reset(); } - @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) - void appendAlignedHeapChunk(AlignedHeapChunk.AlignedHeader aChunk) { - /* - * This method is used from {@link PosixJavaThreads#detachThread(VMThread)}, so it can not - * guarantee that it is inside a VMOperation, only that there is some mutual exclusion. - */ - if (SubstrateOptions.MultiThreaded.getValue()) { - VMThreads.guaranteeOwnsThreadMutex("Trying to append an aligned heap chunk but no mutual exclusion.", true); + @Uninterruptible(reason = "Must not interact with garbage collections.") + void appendAlignedHeapChunk(AlignedHeapChunk.AlignedHeader aChunk, Space originalSpace) { + assert verifyMutualExclusionForAppendChunk() : "Trying to append an aligned heap chunk but no mutual exclusion."; + assert HeapChunk.getSpace(aChunk) == originalSpace; + assert this != originalSpace; + + if (originalSpace != null) { + originalSpace.extractAlignedHeapChunk(aChunk); } - appendAlignedHeapChunkUninterruptibly(aChunk); - accounting.noteAlignedHeapChunk(); - } - @Uninterruptible(reason = "Must not interact with garbage collections.") - private void appendAlignedHeapChunkUninterruptibly(AlignedHeapChunk.AlignedHeader aChunk) { - AlignedHeapChunk.AlignedHeader oldLast = getLastAlignedHeapChunk(); HeapChunk.setSpace(aChunk, this); + AlignedHeapChunk.AlignedHeader oldLast = lastAlignedHeapChunk; HeapChunk.setPrevious(aChunk, oldLast); HeapChunk.setNext(aChunk, WordFactory.nullPointer()); if (oldLast.isNonNull()) { HeapChunk.setNext(oldLast, aChunk); } - setLastAlignedHeapChunk(aChunk); - if (getFirstAlignedHeapChunk().isNull()) { - setFirstAlignedHeapChunk(aChunk); + lastAlignedHeapChunk = aChunk; + if (firstAlignedHeapChunk.isNull()) { + firstAlignedHeapChunk = aChunk; } + accounting.noteAlignedHeapChunk(); } - @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) - void extractAlignedHeapChunk(AlignedHeapChunk.AlignedHeader aChunk) { - assert VMOperation.isGCInProgress() : "Should only be called by the collector."; - extractAlignedHeapChunkUninterruptibly(aChunk); - accounting.unnoteAlignedHeapChunk(); + @Uninterruptible(reason = "Must not interact with garbage collections.") + void appendUnalignedHeapChunk(UnalignedHeapChunk.UnalignedHeader uChunk, Space originalSpace) { + assert verifyMutualExclusionForAppendChunk() : "Trying to append an aligned heap chunk but no mutual exclusion."; + assert HeapChunk.getSpace(uChunk) == originalSpace; + assert this != originalSpace; + + if (originalSpace != null) { + originalSpace.extractUnalignedHeapChunk(uChunk); + } + + HeapChunk.setSpace(uChunk, this); + UnalignedHeapChunk.UnalignedHeader oldLast = lastUnalignedHeapChunk; + HeapChunk.setPrevious(uChunk, oldLast); + HeapChunk.setNext(uChunk, WordFactory.nullPointer()); + if (oldLast.isNonNull()) { + HeapChunk.setNext(oldLast, uChunk); + } + lastUnalignedHeapChunk = uChunk; + if (firstUnalignedHeapChunk.isNull()) { + firstUnalignedHeapChunk = uChunk; + } + accounting.noteUnalignedHeapChunk(uChunk); } @Uninterruptible(reason = "Must not interact with garbage collections.") - private void extractAlignedHeapChunkUninterruptibly(AlignedHeapChunk.AlignedHeader aChunk) { + private void extractAlignedHeapChunk(AlignedHeapChunk.AlignedHeader aChunk) { + assert VMOperation.isGCInProgress(); + AlignedHeapChunk.AlignedHeader chunkNext = HeapChunk.getNext(aChunk); AlignedHeapChunk.AlignedHeader chunkPrev = HeapChunk.getPrevious(aChunk); if (chunkPrev.isNonNull()) { HeapChunk.setNext(chunkPrev, chunkNext); } else { - setFirstAlignedHeapChunk(chunkNext); + firstAlignedHeapChunk = chunkNext; } if (chunkNext.isNonNull()) { HeapChunk.setPrevious(chunkNext, chunkPrev); } else { - setLastAlignedHeapChunk(chunkPrev); + lastAlignedHeapChunk = chunkPrev; } HeapChunk.setNext(aChunk, WordFactory.nullPointer()); HeapChunk.setPrevious(aChunk, WordFactory.nullPointer()); HeapChunk.setSpace(aChunk, null); - } - - @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) - void appendUnalignedHeapChunk(UnalignedHeapChunk.UnalignedHeader uChunk) { - /* - * This method is used from {@link PosixJavaThreads#detachThread(VMThread)}, so it can not - * guarantee that it is inside a VMOperation, only that there is some mutual exclusion. - */ - if (SubstrateOptions.MultiThreaded.getValue()) { - VMThreads.guaranteeOwnsThreadMutex("Trying to append an unaligned chunk but no mutual exclusion.", true); - } - appendUnalignedHeapChunkUninterruptibly(uChunk); - accounting.noteUnalignedHeapChunk(uChunk); + accounting.unnoteAlignedHeapChunk(); } @Uninterruptible(reason = "Must not interact with garbage collections.") - private void appendUnalignedHeapChunkUninterruptibly(UnalignedHeapChunk.UnalignedHeader uChunk) { - UnalignedHeapChunk.UnalignedHeader oldLast = getLastUnalignedHeapChunk(); - HeapChunk.setSpace(uChunk, this); - HeapChunk.setPrevious(uChunk, oldLast); - HeapChunk.setNext(uChunk, WordFactory.nullPointer()); - if (oldLast.isNonNull()) { - HeapChunk.setNext(oldLast, uChunk); - } - setLastUnalignedHeapChunk(uChunk); - if (getFirstUnalignedHeapChunk().isNull()) { - setFirstUnalignedHeapChunk(uChunk); - } - } - - @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) - void extractUnalignedHeapChunk(UnalignedHeapChunk.UnalignedHeader uChunk) { - assert VMOperation.isGCInProgress() : "Trying to extract an unaligned chunk but not in a VMOperation."; - extractUnalignedHeapChunkUninterruptibly(uChunk); - accounting.unnoteUnalignedHeapChunk(uChunk); - } + private void extractUnalignedHeapChunk(UnalignedHeapChunk.UnalignedHeader uChunk) { + assert VMOperation.isGCInProgress(); - @Uninterruptible(reason = "Must not interact with garbage collections.") - private void extractUnalignedHeapChunkUninterruptibly(UnalignedHeapChunk.UnalignedHeader uChunk) { UnalignedHeapChunk.UnalignedHeader chunkNext = HeapChunk.getNext(uChunk); UnalignedHeapChunk.UnalignedHeader chunkPrev = HeapChunk.getPrevious(uChunk); if (chunkPrev.isNonNull()) { HeapChunk.setNext(chunkPrev, chunkNext); } else { - setFirstUnalignedHeapChunk(chunkNext); + firstUnalignedHeapChunk = chunkNext; } if (chunkNext.isNonNull()) { HeapChunk.setPrevious(chunkNext, chunkPrev); } else { - setLastUnalignedHeapChunk(chunkPrev); + lastUnalignedHeapChunk = chunkPrev; } /* Reset the fields that the result chunk keeps for Space. */ HeapChunk.setNext(uChunk, WordFactory.nullPointer()); HeapChunk.setPrevious(uChunk, WordFactory.nullPointer()); HeapChunk.setSpace(uChunk, null); + accounting.unnoteUnalignedHeapChunk(uChunk); } @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) - public AlignedHeapChunk.AlignedHeader getFirstAlignedHeapChunk() { - return firstAlignedHeapChunk; + private static boolean verifyMutualExclusionForAppendChunk() { + return !SubstrateOptions.MultiThreaded.getValue() || + VMThreads.ownsThreadMutex(true) || + ParallelGC.isEnabled() && VMOperation.isGCInProgress() && ParallelGC.singleton().isInParallelPhase() && ParallelGC.singleton().getMutex().isOwner(true); } @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) - private void setFirstAlignedHeapChunk(AlignedHeapChunk.AlignedHeader chunk) { - firstAlignedHeapChunk = chunk; + public AlignedHeapChunk.AlignedHeader getFirstAlignedHeapChunk() { + return firstAlignedHeapChunk; } @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) @@ -334,31 +377,16 @@ AlignedHeapChunk.AlignedHeader getLastAlignedHeapChunk() { return lastAlignedHeapChunk; } - @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) - private void setLastAlignedHeapChunk(AlignedHeapChunk.AlignedHeader chunk) { - lastAlignedHeapChunk = chunk; - } - @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) public UnalignedHeapChunk.UnalignedHeader getFirstUnalignedHeapChunk() { return firstUnalignedHeapChunk; } - @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) - private void setFirstUnalignedHeapChunk(UnalignedHeapChunk.UnalignedHeader chunk) { - this.firstUnalignedHeapChunk = chunk; - } - @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) UnalignedHeapChunk.UnalignedHeader getLastUnalignedHeapChunk() { return lastUnalignedHeapChunk; } - @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) - private void setLastUnalignedHeapChunk(UnalignedHeapChunk.UnalignedHeader chunk) { - lastUnalignedHeapChunk = chunk; - } - /** Promote an aligned Object to this Space. */ @AlwaysInline("GC performance") @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) @@ -366,16 +394,15 @@ Object promoteAlignedObject(Object original, Space originalSpace) { assert ObjectHeaderImpl.isAlignedObject(original); assert this != originalSpace && originalSpace.isFromSpace(); - Object copy = copyAlignedObject(original); - if (copy != null) { - ObjectHeaderImpl.getObjectHeaderImpl().installForwardingPointer(original, copy); + if (ParallelGC.isEnabled() && ParallelGC.singleton().isInParallelPhase()) { + return copyAlignedObjectParallel(original); } - return copy; + return copyAlignedObjectSerial(original); } @AlwaysInline("GC performance") @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) - private Object copyAlignedObject(Object originalObj) { + private Object copyAlignedObjectSerial(Object originalObj) { assert VMOperation.isGCInProgress(); assert ObjectHeaderImpl.isAlignedObject(originalObj); @@ -400,17 +427,104 @@ private Object copyAlignedObject(Object originalObj) { * references. That's okay, because all references in the copy are visited and overwritten * later on anyways (the card table is also updated at that point if necessary). */ - Pointer originalMemory = Word.objectToUntrackedPointer(originalObj); + Word originalMemory = Word.objectToUntrackedPointer(originalObj); UnmanagedMemoryUtil.copyLongsForward(originalMemory, copyMemory, originalSize); Object copy = copyMemory.toObject(); if (probability(SLOW_PATH_PROBABILITY, addIdentityHashField)) { // Must do first: ensures correct object size below and in other places - int value = IdentityHashCodeSupport.computeHashCodeFromAddress(originalObj); + AlignedHeapChunk.AlignedHeader originalChunk = AlignedHeapChunk.getEnclosingChunkFromObjectPointer(originalMemory); + int value = IdentityHashCodeSupport.computeHashCodeFromAddress(originalMemory, HeapChunk.getIdentityHashSalt(originalChunk)); int offset = LayoutEncoding.getOptionalIdentityHashOffset(copy); ObjectAccess.writeInt(copy, offset, value, IdentityHashCodeSupport.IDENTITY_HASHCODE_LOCATION); ObjectHeaderImpl.getObjectHeaderImpl().setIdentityHashInField(copy); } + if (isOldSpace()) { + // If the object was promoted to the old gen, we need to take care of the remembered + // set bit and the first object table (even when promoting from old to old). + AlignedHeapChunk.AlignedHeader copyChunk = AlignedHeapChunk.getEnclosingChunk(copy); + RememberedSet.get().enableRememberedSetForObject(copyChunk, copy); + } + + ObjectHeaderImpl.getObjectHeaderImpl().installForwardingPointer(originalObj, copy); + return copy; + } + + @AlwaysInline("GC performance") + @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) + private Object copyAlignedObjectParallel(Object original) { + assert VMOperation.isGCInProgress(); + + /* + * The GC worker thread doesn't own the object yet, so the 8 bytes starting at the hub + * offset can be changed at any time (if another GC worker thread forwards the object). Note + * that those bytes may also include data such as the array length. + * + * So, we read the 8 bytes at the hub offset once and then extract all necessary data from + * those bytes. This is necessary to avoid races. + */ + Word originalMemory = Word.objectToUntrackedPointer(original); + int hubOffset = ObjectHeader.getHubOffset(); + long eightHeaderBytes = originalMemory.readLong(hubOffset); + Word originalHeader = ObjectHeaderImpl.hasShift() ? WordFactory.unsigned(eightHeaderBytes & 0xFFFFFFFFL) : WordFactory.unsigned(eightHeaderBytes); + assert ObjectHeaderImpl.isAlignedHeader(originalHeader); + + ObjectHeaderImpl ohi = ObjectHeaderImpl.getObjectHeaderImpl(); + if (ObjectHeaderImpl.isForwardedHeader(originalHeader)) { + return ohi.getForwardedObject(originalMemory, originalHeader); + } + + /* + * We need the forwarding pointer to point somewhere, so we speculatively allocate memory + * here. If another thread copies the object first, we retract the allocation later. + */ + UnsignedWord originalSize = LayoutEncoding.getSizeFromHeader(original, originalHeader, eightHeaderBytes, false); + UnsignedWord copySize = originalSize; + boolean addIdentityHashField = false; + if (!ConfigurationValues.getObjectLayout().hasFixedIdentityHashField()) { + if (probability(SLOW_PATH_PROBABILITY, ObjectHeaderImpl.hasIdentityHashFromAddressInline(originalHeader))) { + addIdentityHashField = true; + copySize = LayoutEncoding.getSizeFromHeader(original, originalHeader, eightHeaderBytes, true); + } + } + + assert copySize.aboveThan(0); + Pointer copyMemory = allocateMemoryParallel(copySize); + if (probability(VERY_SLOW_PATH_PROBABILITY, copyMemory.isNull())) { + return null; + } + + /* + * It's important that we set the RS bit before everything else because + * YoungGeneration.contains() checks it. + */ + long copyHeaderBytes = isOldSpace() ? ObjectHeaderImpl.setRememberedSetBit(eightHeaderBytes) : eightHeaderBytes; + copyMemory.writeLong(hubOffset, copyHeaderBytes); + + /* Install forwarding pointer into the original header. */ + Object copy = copyMemory.toObject(); + Object forward = ohi.installForwardingPointerParallel(original, eightHeaderBytes, copy); + if (forward != copy) { + /* We lost the race. Retract speculatively allocated memory. */ + retractAllocationParallel(copySize); + return forward; + } + + /* We have won the race. Copy the rest of the object. */ + if (hubOffset > 0) { + UnmanagedMemoryUtil.copyLongsForward(originalMemory, copyMemory, WordFactory.unsigned(hubOffset)); + } + int offset = hubOffset + Long.BYTES; + UnmanagedMemoryUtil.copyLongsForward(originalMemory.add(offset), copyMemory.add(offset), originalSize.subtract(offset)); + + if (probability(SLOW_PATH_PROBABILITY, addIdentityHashField)) { + AlignedHeapChunk.AlignedHeader originalChunk = AlignedHeapChunk.getEnclosingChunkFromObjectPointer(originalMemory); + int value = IdentityHashCodeSupport.computeHashCodeFromAddress(originalMemory, HeapChunk.getIdentityHashSalt(originalChunk)); + offset = LayoutEncoding.getOptionalIdentityHashOffset(copy); + ObjectAccess.writeInt(copy, offset, value, IdentityHashCodeSupport.IDENTITY_HASHCODE_LOCATION); + ObjectHeaderImpl.getObjectHeaderImpl().setIdentityHashInField(copy); + } + if (isOldSpace()) { // If the object was promoted to the old gen, we need to take care of the remembered // set bit and the first object table (even when promoting from old to old). @@ -422,12 +536,14 @@ private Object copyAlignedObject(Object originalObj) { /** Promote an AlignedHeapChunk by moving it to this space. */ @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) - void promoteAlignedHeapChunk(AlignedHeapChunk.AlignedHeader chunk, Space originalSpace) { - assert this != originalSpace && originalSpace.isFromSpace(); + void promoteAlignedHeapChunk(AlignedHeapChunk.AlignedHeader chunk) { + assert !(ParallelGC.isEnabled() && ParallelGC.singleton().isInParallelPhase()); - originalSpace.extractAlignedHeapChunk(chunk); - appendAlignedHeapChunk(chunk); + Space originalSpace = HeapChunk.getSpace(chunk); + assert originalSpace.isFromSpace(); + assert !this.isFromSpace(); + appendAlignedHeapChunk(chunk, originalSpace); if (this.isOldSpace()) { if (originalSpace.isYoungSpace()) { RememberedSet.get().enableRememberedSetForChunk(chunk); @@ -436,16 +552,51 @@ void promoteAlignedHeapChunk(AlignedHeapChunk.AlignedHeader chunk, Space origina RememberedSet.get().clearRememberedSet(chunk); } } + + if (ParallelGC.isEnabled() && GCImpl.getGCImpl().isCompleteCollection()) { + ParallelGC.singleton().push(chunk); + } } /** Promote an UnalignedHeapChunk by moving it to this Space. */ @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) - void promoteUnalignedHeapChunk(UnalignedHeapChunk.UnalignedHeader chunk, Space originalSpace) { - assert this != originalSpace && originalSpace.isFromSpace(); + void promoteUnalignedHeapChunk(UnalignedHeapChunk.UnalignedHeader chunk) { + if (ParallelGC.isEnabled() && ParallelGC.singleton().isInParallelPhase()) { + promoteUnalignedHeapChunkParallel(chunk); + } else { + promoteUnalignedHeapChunkSerial(chunk); + } + } - originalSpace.extractUnalignedHeapChunk(chunk); - appendUnalignedHeapChunk(chunk); + @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) + private void promoteUnalignedHeapChunkSerial(UnalignedHeapChunk.UnalignedHeader chunk) { + Space originalSpace = HeapChunk.getSpace(chunk); + promoteUnalignedHeapChunk0(chunk, originalSpace); + if (ParallelGC.isEnabled() && GCImpl.getGCImpl().isCompleteCollection()) { + ParallelGC.singleton().push(chunk); + } + } + + @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) + private void promoteUnalignedHeapChunkParallel(UnalignedHeapChunk.UnalignedHeader chunk) { + ParallelGC.singleton().getMutex().lockNoTransitionUnspecifiedOwner(); + try { + Space originalSpace = HeapChunk.getSpace(chunk); + if (!originalSpace.isFromSpace()) { + /* The chunk was already promoted in the meanwhile. */ + return; + } + promoteUnalignedHeapChunk0(chunk, originalSpace); + ParallelGC.singleton().push(chunk); + } finally { + ParallelGC.singleton().getMutex().unlockNoTransitionUnspecifiedOwner(); + } + } + + @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) + private void promoteUnalignedHeapChunk0(UnalignedHeapChunk.UnalignedHeader chunk, Space originalSpace) { + assert originalSpace.isFromSpace(); if (this.isOldSpace()) { if (originalSpace.isYoungSpace()) { RememberedSet.get().enableRememberedSetForChunk(chunk); @@ -454,6 +605,7 @@ void promoteUnalignedHeapChunk(UnalignedHeapChunk.UnalignedHeader chunk, Space o RememberedSet.get().clearRememberedSet(chunk); } } + appendUnalignedHeapChunk(chunk, originalSpace); } @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) @@ -466,7 +618,7 @@ private AlignedHeapChunk.AlignedHeader requestAlignedHeapChunk() { chunk = HeapImpl.getHeapImpl().getOldGeneration().requestAlignedChunk(); } if (chunk.isNonNull()) { - appendAlignedHeapChunk(chunk); + appendAlignedHeapChunk(chunk, null); } return chunk; } @@ -480,27 +632,25 @@ void absorb(Space src) { AlignedHeapChunk.AlignedHeader aChunk = src.getFirstAlignedHeapChunk(); while (aChunk.isNonNull()) { AlignedHeapChunk.AlignedHeader next = HeapChunk.getNext(aChunk); - src.extractAlignedHeapChunk(aChunk); - appendAlignedHeapChunk(aChunk); + appendAlignedHeapChunk(aChunk, src); aChunk = next; } UnalignedHeapChunk.UnalignedHeader uChunk = src.getFirstUnalignedHeapChunk(); while (uChunk.isNonNull()) { UnalignedHeapChunk.UnalignedHeader next = HeapChunk.getNext(uChunk); - src.extractUnalignedHeapChunk(uChunk); - appendUnalignedHeapChunk(uChunk); + appendUnalignedHeapChunk(uChunk, src); uChunk = next; } } boolean walkHeapChunks(MemoryWalker.Visitor visitor) { boolean continueVisiting = true; - AlignedHeapChunk.AlignedHeader aChunk = getFirstAlignedHeapChunk(); + AlignedHeapChunk.AlignedHeader aChunk = firstAlignedHeapChunk; while (continueVisiting && aChunk.isNonNull()) { continueVisiting = visitor.visitHeapChunk(aChunk, AlignedHeapChunk.getMemoryWalkerAccess()); aChunk = HeapChunk.getNext(aChunk); } - UnalignedHeapChunk.UnalignedHeader uChunk = getFirstUnalignedHeapChunk(); + UnalignedHeapChunk.UnalignedHeader uChunk = firstUnalignedHeapChunk; while (continueVisiting && uChunk.isNonNull()) { continueVisiting = visitor.visitHeapChunk(uChunk, UnalignedHeapChunk.getMemoryWalkerAccess()); uChunk = HeapChunk.getNext(uChunk); @@ -530,7 +680,7 @@ UnsignedWord computeObjectBytes() { private UnsignedWord computeAlignedObjectBytes() { UnsignedWord result = WordFactory.zero(); - AlignedHeapChunk.AlignedHeader aChunk = getFirstAlignedHeapChunk(); + AlignedHeapChunk.AlignedHeader aChunk = firstAlignedHeapChunk; while (aChunk.isNonNull()) { UnsignedWord allocatedBytes = HeapChunk.getTopOffset(aChunk).subtract(AlignedHeapChunk.getObjectsStartOffset()); result = result.add(allocatedBytes); @@ -541,7 +691,7 @@ private UnsignedWord computeAlignedObjectBytes() { private UnsignedWord computeUnalignedObjectBytes() { UnsignedWord result = WordFactory.zero(); - UnalignedHeapChunk.UnalignedHeader uChunk = getFirstUnalignedHeapChunk(); + UnalignedHeapChunk.UnalignedHeader uChunk = firstUnalignedHeapChunk; while (uChunk.isNonNull()) { UnsignedWord allocatedBytes = HeapChunk.getTopOffset(uChunk).subtract(UnalignedHeapChunk.getObjectStartOffset()); result = result.add(allocatedBytes); diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/ThreadLocalAllocation.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/ThreadLocalAllocation.java index e42ad9153ec2..15293ed0ea39 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/ThreadLocalAllocation.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/ThreadLocalAllocation.java @@ -475,14 +475,14 @@ private static void retireTlabToEden(IsolateThread thread) { while (alignedChunk.isNonNull()) { AlignedHeader next = HeapChunk.getNext(alignedChunk); HeapChunk.setNext(alignedChunk, WordFactory.nullPointer()); - eden.appendAlignedHeapChunk(alignedChunk); + eden.appendAlignedHeapChunk(alignedChunk, null); alignedChunk = next; } while (unalignedChunk.isNonNull()) { UnalignedHeader next = HeapChunk.getNext(unalignedChunk); HeapChunk.setNext(unalignedChunk, WordFactory.nullPointer()); - eden.appendUnalignedHeapChunk(unalignedChunk); + eden.appendUnalignedHeapChunk(unalignedChunk, null); unalignedChunk = next; } } diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/UnalignedHeapChunk.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/UnalignedHeapChunk.java index 8a1ef2eaa98f..8911d44cda00 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/UnalignedHeapChunk.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/UnalignedHeapChunk.java @@ -176,10 +176,5 @@ static final class MemoryWalkerAccessImpl extends HeapChunk.MemoryWalkerAccessIm public boolean isAligned(UnalignedHeapChunk.UnalignedHeader heapChunk) { return false; } - - @Override - public UnsignedWord getAllocationStart(UnalignedHeapChunk.UnalignedHeader heapChunk) { - return getObjectStart(heapChunk); - } } } diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/UseSerialOrEpsilonGC.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/UseSerialOrEpsilonGC.java index 245874b4f0ef..48be7c373ede 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/UseSerialOrEpsilonGC.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/UseSerialOrEpsilonGC.java @@ -32,9 +32,10 @@ import com.oracle.svm.core.SubstrateOptions; @Platforms(Platform.HOSTED_ONLY.class) +// TODO (chaeubl): rename public class UseSerialOrEpsilonGC implements BooleanSupplier { @Override public boolean getAsBoolean() { - return SubstrateOptions.UseSerialGC.getValue() || SubstrateOptions.UseEpsilonGC.getValue(); + return SubstrateOptions.useSerialOrParallelOrEpsilonGC(); } } diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/YoungGeneration.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/YoungGeneration.java index d7f337a6a0b4..e378e2e60c6f 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/YoungGeneration.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/YoungGeneration.java @@ -253,10 +253,9 @@ public boolean contains(Object object) { @AlwaysInline("GC performance") @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) - @Override - protected Object promoteAlignedObject(Object original, AlignedHeapChunk.AlignedHeader originalChunk, Space originalSpace) { - assert originalSpace.isFromSpace(); + Object promoteAlignedObject(Object original, Space originalSpace) { assert ObjectHeaderImpl.isAlignedObject(original); + assert originalSpace.isFromSpace(); assert originalSpace.getAge() < maxSurvivorSpaces; // The object might fit in an existing chunk in the survivor space. If it doesn't, we get @@ -269,8 +268,7 @@ protected Object promoteAlignedObject(Object original, AlignedHeapChunk.AlignedH @AlwaysInline("GC performance") @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) - @Override - protected Object promoteUnalignedObject(Object original, UnalignedHeapChunk.UnalignedHeader originalChunk, Space originalSpace) { + Object promoteUnalignedObject(Object original, UnalignedHeapChunk.UnalignedHeader originalChunk, Space originalSpace) { assert originalSpace.isFromSpace(); assert originalSpace.getAge() < maxSurvivorSpaces; if (!unalignedChunkFitsInSurvivors(originalChunk)) { @@ -279,13 +277,12 @@ protected Object promoteUnalignedObject(Object original, UnalignedHeapChunk.Unal int age = originalSpace.getNextAgeForPromotion(); Space toSpace = getSurvivorToSpaceAt(age - 1); - toSpace.promoteUnalignedHeapChunk(originalChunk, originalSpace); + toSpace.promoteUnalignedHeapChunk(originalChunk); return original; } - @Override @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) - protected boolean promoteChunk(HeapChunk.Header originalChunk, boolean isAligned, Space originalSpace) { + boolean promoteChunk(HeapChunk.Header originalChunk, boolean isAligned, Space originalSpace) { assert originalSpace.isFromSpace(); assert originalSpace.getAge() < maxSurvivorSpaces; if (!fitsInSurvivors(originalChunk, isAligned)) { @@ -295,9 +292,9 @@ protected boolean promoteChunk(HeapChunk.Header originalChunk, boolean isAlig int age = originalSpace.getNextAgeForPromotion(); Space toSpace = getSurvivorToSpaceAt(age - 1); if (isAligned) { - toSpace.promoteAlignedHeapChunk((AlignedHeapChunk.AlignedHeader) originalChunk, originalSpace); + toSpace.promoteAlignedHeapChunk((AlignedHeapChunk.AlignedHeader) originalChunk); } else { - toSpace.promoteUnalignedHeapChunk((UnalignedHeapChunk.UnalignedHeader) originalChunk, originalSpace); + toSpace.promoteUnalignedHeapChunk((UnalignedHeapChunk.UnalignedHeader) originalChunk); } return true; } diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/graal/BarrierSnippets.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/graal/BarrierSnippets.java index dae405b49556..90eee97f3691 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/graal/BarrierSnippets.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/graal/BarrierSnippets.java @@ -205,7 +205,7 @@ class BarrierSnippetCounters { class BarrierSnippetCountersFeature implements InternalFeature { @Override public boolean isInConfiguration(IsInConfigurationAccess access) { - return SubstrateOptions.UseSerialGC.getValue() && SubstrateOptions.useRememberedSet(); + return SubstrateOptions.useSerialOrParallelGC() && SubstrateOptions.useRememberedSet(); } @Override diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/graal/GenScavengeGCFeature.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/graal/GenScavengeGCFeature.java index 8216f42ead84..df035266567e 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/graal/GenScavengeGCFeature.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/graal/GenScavengeGCFeature.java @@ -46,6 +46,7 @@ import com.oracle.svm.core.genscavenge.ImageHeapInfo; import com.oracle.svm.core.genscavenge.IncrementalGarbageCollectorMXBean; import com.oracle.svm.core.genscavenge.LinearImageHeapLayouter; +import com.oracle.svm.core.genscavenge.UseSerialOrEpsilonGC; import com.oracle.svm.core.genscavenge.jvmstat.EpsilonGCPerfData; import com.oracle.svm.core.genscavenge.jvmstat.SerialGCPerfData; import com.oracle.svm.core.genscavenge.remset.CardTableBasedRememberedSet; @@ -71,7 +72,7 @@ class GenScavengeGCFeature implements InternalFeature { @Override public boolean isInConfiguration(IsInConfigurationAccess access) { - return new com.oracle.svm.core.genscavenge.UseSerialOrEpsilonGC().getAsBoolean(); + return new UseSerialOrEpsilonGC().getAsBoolean(); } @Override @@ -160,7 +161,7 @@ private static RememberedSet createRememberedSet() { } private static PerfDataHolder createPerfData() { - if (SubstrateOptions.UseSerialGC.getValue()) { + if (SubstrateOptions.useSerialOrParallelGC()) { return new SerialGCPerfData(); } else { assert SubstrateOptions.UseEpsilonGC.getValue(); diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/jvmstat/SerialGCPerfData.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/jvmstat/SerialGCPerfData.java index 4fe4410401eb..b9e72d73cc38 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/jvmstat/SerialGCPerfData.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/jvmstat/SerialGCPerfData.java @@ -34,6 +34,7 @@ import com.oracle.svm.core.genscavenge.HeapAccounting; import com.oracle.svm.core.genscavenge.HeapImpl; import com.oracle.svm.core.genscavenge.HeapParameters; +import com.oracle.svm.core.genscavenge.parallel.ParallelGC; import com.oracle.svm.core.jvmstat.PerfDataHolder; import com.oracle.svm.core.jvmstat.PerfLongConstant; import com.oracle.svm.core.jvmstat.PerfLongCounter; @@ -45,6 +46,7 @@ /** * Performance data for our serial GC. */ +// TODO (chaeubl): rename this class public class SerialGCPerfData implements PerfDataHolder { private final PerfDataGCPolicy gcPolicy; private final PerfDataCollector youngCollector; @@ -78,7 +80,9 @@ public void allocate() { gcPolicy.allocate(); youngCollector.allocate("Serial young collection pauses"); - oldCollector.allocate("Serial full collection pauses"); + + String oldCollectorName = ParallelGC.isEnabled() ? "Parallel" : "Serial"; + oldCollector.allocate(oldCollectorName + " full collection pauses"); youngGen.allocate("young"); youngGen.spaces[0].allocate("eden"); diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/parallel/ChunkQueue.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/parallel/ChunkQueue.java new file mode 100644 index 000000000000..b0c76e4d3c0c --- /dev/null +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/parallel/ChunkQueue.java @@ -0,0 +1,108 @@ +/* + * Copyright (c) 2022, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2022, 2022, BELLSOFT. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package com.oracle.svm.core.genscavenge.parallel; + +import org.graalvm.compiler.api.replacements.Fold; +import org.graalvm.nativeimage.ImageSingletons; +import org.graalvm.nativeimage.Platform; +import org.graalvm.nativeimage.Platforms; +import org.graalvm.nativeimage.impl.UnmanagedMemorySupport; +import org.graalvm.word.Pointer; +import org.graalvm.word.WordFactory; + +import com.oracle.svm.core.Uninterruptible; +import com.oracle.svm.core.config.ConfigurationValues; +import com.oracle.svm.core.thread.VMThreads; +import com.oracle.svm.core.util.VMError; + +/** + * A queue that stores pointers into "grey" heap chunks that need to be scanned. Note that the + * pointers don't necessarily point to the beginning of a chunk. GC workers threads may only access + * the queue if they hold {@link ParallelGC#getMutex()}. + */ +public class ChunkQueue { + private static final int INITIAL_SIZE = 1024 * wordSize(); + + private Pointer buffer; + private int size; + private int top; + + @Fold + static int wordSize() { + return ConfigurationValues.getTarget().wordSize; + } + + @Platforms(Platform.HOSTED_ONLY.class) + ChunkQueue() { + } + + @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) + public void initialize() { + assert top == 0 && size == 0 && buffer.isNull(); + top = 0; + size = INITIAL_SIZE; + buffer = ImageSingletons.lookup(UnmanagedMemorySupport.class).malloc(WordFactory.unsigned(size)); + VMError.guarantee(buffer.isNonNull(), "Failed to allocate native memory for the ChunkBuffer."); + } + + @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) + void push(Pointer ptr) { + assert !ParallelGC.singleton().isInParallelPhase() && VMThreads.ownsThreadMutex() || ParallelGC.singleton().isInParallelPhase() && ParallelGC.singleton().getMutex().isOwner(true); + if (top >= size) { + size *= 2; + assert top < size; + buffer = ImageSingletons.lookup(UnmanagedMemorySupport.class).realloc(buffer, WordFactory.unsigned(size)); + VMError.guarantee(buffer.isNonNull(), "Failed to allocate native memory for the ChunkBuffer."); + } + buffer.writeWord(top, ptr); + top += wordSize(); + } + + @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) + Pointer pop() { + assert ParallelGC.singleton().isInParallelPhase() && ParallelGC.singleton().getMutex().isOwner(true); + if (top > 0) { + top -= wordSize(); + return buffer.readWord(top); + } + return WordFactory.nullPointer(); + } + + @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) + boolean isEmpty() { + assert !ParallelGC.singleton().isInParallelPhase(); + return top == 0; + } + + @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) + void teardown() { + ImageSingletons.lookup(UnmanagedMemorySupport.class).free(buffer); + buffer = WordFactory.nullPointer(); + size = 0; + top = 0; + } +} diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/parallel/ParallelGC.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/parallel/ParallelGC.java new file mode 100644 index 000000000000..63531f206db9 --- /dev/null +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/parallel/ParallelGC.java @@ -0,0 +1,500 @@ +/* + * Copyright (c) 2022, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2022, 2022, BELLSOFT. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package com.oracle.svm.core.genscavenge.parallel; + +import java.util.function.BooleanSupplier; + +import org.graalvm.compiler.api.replacements.Fold; +import org.graalvm.nativeimage.CurrentIsolate; +import org.graalvm.nativeimage.ImageSingletons; +import org.graalvm.nativeimage.Isolate; +import org.graalvm.nativeimage.Platform; +import org.graalvm.nativeimage.Platforms; +import org.graalvm.nativeimage.c.function.CEntryPoint; +import org.graalvm.nativeimage.c.function.CEntryPointLiteral; +import org.graalvm.nativeimage.c.function.CFunctionPointer; +import org.graalvm.nativeimage.c.struct.RawField; +import org.graalvm.nativeimage.c.struct.RawStructure; +import org.graalvm.nativeimage.c.struct.SizeOf; +import org.graalvm.nativeimage.impl.UnmanagedMemorySupport; +import org.graalvm.word.Pointer; +import org.graalvm.word.PointerBase; +import org.graalvm.word.UnsignedWord; +import org.graalvm.word.WordFactory; + +import com.oracle.svm.core.NeverInline; +import com.oracle.svm.core.SubstrateGCOptions; +import com.oracle.svm.core.SubstrateOptions; +import com.oracle.svm.core.Uninterruptible; +import com.oracle.svm.core.c.function.CEntryPointOptions; +import com.oracle.svm.core.feature.AutomaticallyRegisteredFeature; +import com.oracle.svm.core.feature.InternalFeature; +import com.oracle.svm.core.genscavenge.AlignedHeapChunk; +import com.oracle.svm.core.genscavenge.GCImpl; +import com.oracle.svm.core.genscavenge.HeapChunk; +import com.oracle.svm.core.genscavenge.UnalignedHeapChunk; +import com.oracle.svm.core.graal.nodes.WriteCurrentVMThreadNode; +import com.oracle.svm.core.graal.snippets.CEntryPointSnippets; +import com.oracle.svm.core.jdk.Jvm; +import com.oracle.svm.core.jdk.UninterruptibleUtils; +import com.oracle.svm.core.locks.VMCondition; +import com.oracle.svm.core.locks.VMMutex; +import com.oracle.svm.core.log.Log; +import com.oracle.svm.core.option.SubstrateOptionKey; +import com.oracle.svm.core.thread.PlatformThreads; +import com.oracle.svm.core.thread.PlatformThreads.OSThreadHandle; +import com.oracle.svm.core.thread.PlatformThreads.OSThreadHandlePointer; +import com.oracle.svm.core.thread.PlatformThreads.ThreadLocalKey; +import com.oracle.svm.core.util.UserError; +import com.oracle.svm.core.util.VMError; + +/** + * A garbage collector that tries to shorten GC pauses by using multiple worker threads. Currently, + * the only phase supported is scanning grey objects during a full GC. The number of worker threads + * can be set with a runtime option (see {@link SubstrateOptions#ParallelGCThreads}). + *

+ * The GC worker threads are unattached threads that are started lazily and that call AOT-compiled + * code. So, they don't have an {@link org.graalvm.nativeimage.IsolateThread} data structure and + * don't participate in the safepoint handling. + *

+ * Worker threads use heap chunks as the unit of work. Chunks to be scanned are stored in the + * {@link ChunkQueue}. Worker threads pop chunks from the queue and scan them for references to live + * objects to be promoted. When promoting an aligned chunk object, they speculatively allocate + * memory for its copy in the to-space, then compete to install forwarding pointer in the original + * object. The winning thread proceeds to copy object data, losing threads retract the speculatively + * allocated memory. + *

+ * Each worker thread allocates memory in its own thread local allocation chunk for speed. As + * allocation chunks become filled up, they are pushed to {@link ChunkQueue}. This pop-scan-push + * cycle continues until the chunk buffer becomes empty. At this point, worker threads are parked + * and the GC routine continues on the main GC thread. + */ +public class ParallelGC { + private static final int UNALIGNED_BIT = 0b01; + private static final int MAX_WORKER_THREADS = 8; + + private final VMMutex mutex = new VMMutex("parallelGC"); + private final VMCondition seqPhase = new VMCondition(mutex); + private final VMCondition parPhase = new VMCondition(mutex); + private final ChunkQueue chunkQueue = new ChunkQueue(); + private final CEntryPointLiteral gcWorkerRunFunc = CEntryPointLiteral.create(ParallelGC.class, "gcWorkerRun", GCWorkerThreadState.class); + + private boolean initialized; + private ThreadLocalKey workerStateTL; + private GCWorkerThreadState workerStates; + private OSThreadHandlePointer workerThreads; + private int numWorkerThreads; + private int busyWorkerThreads; + private volatile boolean inParallelPhase; + private volatile boolean shutdown; + + @Platforms(Platform.HOSTED_ONLY.class) + public ParallelGC() { + } + + @Fold + public static ParallelGC singleton() { + return ImageSingletons.lookup(ParallelGC.class); + } + + @Fold + public static boolean isEnabled() { + return SubstrateOptions.UseParallelGC.getValue(); + } + + @Uninterruptible(reason = "Called from a GC worker thread.") + public boolean isInParallelPhase() { + return inParallelPhase; + } + + @Uninterruptible(reason = "Called from a GC worker thread.") + public VMMutex getMutex() { + return mutex; + } + + @Uninterruptible(reason = "Called from a GC worker thread.") + public AlignedHeapChunk.AlignedHeader getAllocationChunk() { + GCWorkerThreadState state = getWorkerThreadState(); + return state.getAllocChunk(); + } + + @Uninterruptible(reason = "Called from a GC worker thread.") + public void setAllocationChunk(AlignedHeapChunk.AlignedHeader chunk) { + GCWorkerThreadState state = getWorkerThreadState(); + assert state.getAllocChunk().isNull() && state.getAllocChunkScanOffset().equal(0); + + state.setAllocChunk(chunk); + state.setAllocChunkScanOffset(AlignedHeapChunk.getObjectsStartOffset()); + } + + @Uninterruptible(reason = "Called from a GC worker thread.") + public void push(AlignedHeapChunk.AlignedHeader aChunk) { + push(AlignedHeapChunk.getObjectsStart(aChunk)); + } + + @Uninterruptible(reason = "Called from a GC worker thread.") + public void push(UnalignedHeapChunk.UnalignedHeader uChunk) { + push(HeapChunk.asPointer(uChunk).or(ParallelGC.UNALIGNED_BIT)); + } + + @Uninterruptible(reason = "Called from a GC worker thread.") + private void push(Pointer ptr) { + assert ptr.isNonNull(); + chunkQueue.push(ptr); + if (inParallelPhase) { + assert mutex.isOwner(true); + parPhase.signal(); + } + } + + @Uninterruptible(reason = "Called from a GC worker thread.") + public void pushAllocChunk() { + assert GCImpl.getGCImpl().isCompleteCollection(); + + /* + * Scanning (and therefore enqueueing) is only necessary if there are any not yet scanned + * objects in the chunk. + */ + GCWorkerThreadState state = getWorkerThreadState(); + AlignedHeapChunk.AlignedHeader chunk = state.getAllocChunk(); + if (chunk.isNonNull() && !chunk.equal(state.getScannedChunk())) { + UnsignedWord scanOffset = state.getAllocChunkScanOffset(); + assert scanOffset.aboveOrEqual(AlignedHeapChunk.getObjectsStartOffset()); + if (chunk.getTopOffset().aboveThan(scanOffset)) { + Pointer ptrIntoChunk = HeapChunk.asPointer(chunk).add(scanOffset); + push(ptrIntoChunk); + } + } + + state.setAllocChunk(WordFactory.nullPointer()); + state.setAllocChunkScanOffset(WordFactory.zero()); + } + + @Uninterruptible(reason = "Called from a GC worker thread.") + private GCWorkerThreadState getWorkerThreadState() { + if (CurrentIsolate.getCurrentThread().isNull()) { + return PlatformThreads.singleton().getUnmanagedThreadLocalValue(workerStateTL); + } + return workerStates.addressOf(numWorkerThreads); + } + + public void initialize() { + if (initialized) { + return; + } + + initialized = true; + inParallelPhase = true; + + chunkQueue.initialize(); + workerStateTL = PlatformThreads.singleton().createUnmanagedThreadLocal(); + numWorkerThreads = getWorkerCount(); + busyWorkerThreads = numWorkerThreads; + + /* Allocate one struct per worker thread and one struct for the main GC thread. */ + int numWorkerStates = numWorkerThreads + 1; + workerStates = ImageSingletons.lookup(UnmanagedMemorySupport.class).calloc(SizeOf.unsigned(GCWorkerThreadState.class).multiply(numWorkerStates)); + VMError.guarantee(workerStates.isNonNull()); + for (int i = 0; i < numWorkerStates; i++) { + workerStates.addressOf(i).setIsolate(CurrentIsolate.getIsolate()); + } + + /* Start the worker threads and wait until they are in a well-defined state. */ + workerThreads = ImageSingletons.lookup(UnmanagedMemorySupport.class).malloc(SizeOf.unsigned(OSThreadHandlePointer.class).multiply(numWorkerThreads)); + VMError.guarantee(workerThreads.isNonNull()); + for (int i = 0; i < numWorkerThreads; i++) { + OSThreadHandle thread = PlatformThreads.singleton().startThreadUnmanaged(gcWorkerRunFunc.getFunctionPointer(), workerStates.addressOf(i), 0); + workerThreads.write(i, thread); + } + + waitUntilWorkerThreadsFinish(); + } + + @Uninterruptible(reason = "Tear-down in progress.") + public void tearDown() { + if (!initialized) { + return; + } + + initialized = false; + + chunkQueue.teardown(); + + /* Signal the worker threads so that they can shut down. */ + inParallelPhase = true; + shutdown = true; + parPhase.broadcast(); + + for (int i = 0; i < numWorkerThreads; i++) { + OSThreadHandle thread = workerThreads.read(i); + PlatformThreads.singleton().joinThreadUnmanaged(thread); + } + inParallelPhase = false; + busyWorkerThreads = 0; + + ImageSingletons.lookup(UnmanagedMemorySupport.class).free(workerThreads); + workerThreads = WordFactory.nullPointer(); + + PlatformThreads.singleton().deleteUnmanagedThreadLocal(workerStateTL); + workerStateTL = WordFactory.nullPointer(); + + numWorkerThreads = 0; + } + + private static int getWorkerCount() { + int setting = SubstrateOptions.ParallelGCThreads.getValue(); + int workerCount = setting > 0 ? setting : getDefaultWorkerCount(); + verboseGCLog().string("[Number of ParallelGC threads: ").unsigned(workerCount).string("]").newline(); + return workerCount; + } + + private static int getDefaultWorkerCount() { + /* This does not take the container support into account. */ + int cpus = Jvm.JVM_ActiveProcessorCount(); + return UninterruptibleUtils.Math.min(cpus, MAX_WORKER_THREADS); + } + + @Uninterruptible(reason = "Heap base is not set up yet.") + @CEntryPoint(include = UseParallelGC.class, publishAs = CEntryPoint.Publish.NotPublished) + @CEntryPointOptions(prologue = GCWorkerThreadPrologue.class, epilogue = CEntryPointOptions.NoEpilogue.class) + private static void gcWorkerRun(GCWorkerThreadState state) { + try { + ParallelGC.singleton().work(state); + } catch (Throwable e) { + throw VMError.shouldNotReachHere(e); + } + } + + @NeverInline("Prevent reads from floating up.") + @Uninterruptible(reason = "Called from a GC worker thread.") + private void work(GCWorkerThreadState state) { + PlatformThreads.singleton().setUnmanagedThreadLocalValue(workerStateTL, state); + try { + work0(state); + } catch (Throwable e) { + VMError.shouldNotReachHere(e); + } + } + + @Uninterruptible(reason = "Called from a GC worker thread.") + private void work0(GCWorkerThreadState state) { + while (!shutdown) { + Pointer ptr; + mutex.lockNoTransitionUnspecifiedOwner(); + try { + ptr = chunkQueue.pop(); + /* Block if there is no local/global work. */ + if (ptr.isNull() && !allocChunkNeedsScanning(state)) { + decrementBusyWorkers(); + do { + parPhase.blockNoTransitionUnspecifiedOwner(); + } while (!inParallelPhase); + incrementBusyWorkers(); + } + } finally { + mutex.unlockNoTransitionUnspecifiedOwner(); + } + + if (ptr.isNonNull()) { + scanChunk(ptr); + } else { + scanAllocChunk(state); + } + } + } + + @Uninterruptible(reason = "Called from a GC worker thread.") + private static void scanChunk(Pointer ptr) { + if (ptr.and(UNALIGNED_BIT).notEqual(0)) { + UnalignedHeapChunk.walkObjectsInline((UnalignedHeapChunk.UnalignedHeader) ptr.and(~UNALIGNED_BIT), GCImpl.getGCImpl().getGreyToBlackObjectVisitor()); + } else { + AlignedHeapChunk.AlignedHeader chunk = AlignedHeapChunk.getEnclosingChunkFromObjectPointer(ptr); + HeapChunk.walkObjectsFromInline(chunk, ptr, GCImpl.getGCImpl().getGreyToBlackObjectVisitor()); + } + } + + @Uninterruptible(reason = "Called from a GC worker thread.") + private static void scanAllocChunk(GCWorkerThreadState state) { + if (!allocChunkNeedsScanning(state)) { + return; + } + + AlignedHeapChunk.AlignedHeader allocChunk = state.getAllocChunk(); + UnsignedWord scanOffset = state.getAllocChunkScanOffset(); + assert scanOffset.aboveOrEqual(AlignedHeapChunk.getObjectsStartOffset()); + Pointer scanPointer = HeapChunk.asPointer(allocChunk).add(scanOffset); + state.setScannedChunk(allocChunk); + HeapChunk.walkObjectsFromInline(allocChunk, scanPointer, GCImpl.getGCImpl().getGreyToBlackObjectVisitor()); + state.setScannedChunk(WordFactory.nullPointer()); + if (allocChunk.equal(state.getAllocChunk())) { + /* Remember top offset so that we don't scan the same objects again. */ + state.setAllocChunkScanOffset(allocChunk.getTopOffset()); + } + } + + @Uninterruptible(reason = "Called from a GC worker thread.") + private static boolean allocChunkNeedsScanning(GCWorkerThreadState state) { + AlignedHeapChunk.AlignedHeader allocChunk = state.getAllocChunk(); + return allocChunk.isNonNull() && allocChunk.getTopOffset().aboveThan(state.getAllocChunkScanOffset()); + } + + @Uninterruptible(reason = "Called from a GC worker thread.") + private void incrementBusyWorkers() { + assert mutex.isOwner(true); + ++busyWorkerThreads; + } + + @Uninterruptible(reason = "Called from a GC worker thread.") + private void decrementBusyWorkers() { + assert mutex.isOwner(true); + if (--busyWorkerThreads == 0) { + inParallelPhase = false; + seqPhase.signal(); + } + } + + /** + * Start parallel phase and wait until all chunks have been processed. + */ + @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) + public void waitForIdle() { + pushAllocChunk(); + + if (!chunkQueue.isEmpty()) { + mutex.lockNoTransitionUnspecifiedOwner(); + try { + /* Let worker threads run. */ + inParallelPhase = true; + parPhase.broadcast(); + + waitUntilWorkerThreadsFinish0(); + } finally { + mutex.unlockNoTransitionUnspecifiedOwner(); + } + } + + assert chunkQueue.isEmpty(); + assert !inParallelPhase; + assert busyWorkerThreads == 0; + + /* Reset all thread local states. */ + for (int i = 0; i < numWorkerThreads + 1; i++) { + GCWorkerThreadState state = workerStates.addressOf(i); + state.setAllocChunk(WordFactory.nullPointer()); + state.setAllocChunkScanOffset(WordFactory.zero()); + assert state.getScannedChunk().isNull(); + } + } + + @Uninterruptible(reason = "Called from a GC worker thread.") + private void waitUntilWorkerThreadsFinish() { + mutex.lockNoTransitionUnspecifiedOwner(); + try { + waitUntilWorkerThreadsFinish0(); + } finally { + mutex.unlockNoTransitionUnspecifiedOwner(); + } + } + + @Uninterruptible(reason = "Called from a GC worker thread.") + private void waitUntilWorkerThreadsFinish0() { + while (inParallelPhase) { + seqPhase.blockNoTransitionUnspecifiedOwner(); + } + } + + private static Log verboseGCLog() { + return SubstrateGCOptions.VerboseGC.getValue() ? Log.log() : Log.noopLog(); + } + + @RawStructure + private interface GCWorkerThreadState extends PointerBase { + @RawField + Isolate getIsolate(); + + @RawField + void setIsolate(Isolate value); + + @RawField + AlignedHeapChunk.AlignedHeader getAllocChunk(); + + @RawField + void setAllocChunk(AlignedHeapChunk.AlignedHeader value); + + @RawField + AlignedHeapChunk.AlignedHeader getScannedChunk(); + + @RawField + void setScannedChunk(AlignedHeapChunk.AlignedHeader value); + + @RawField + UnsignedWord getAllocChunkScanOffset(); + + @RawField + void setAllocChunkScanOffset(UnsignedWord value); + + GCWorkerThreadState addressOf(int index); + } + + private static class GCWorkerThreadPrologue implements CEntryPointOptions.Prologue { + @Uninterruptible(reason = "prologue") + @SuppressWarnings("unused") + public static void enter(GCWorkerThreadState state) { + CEntryPointSnippets.setHeapBase(state.getIsolate()); + WriteCurrentVMThreadNode.writeCurrentVMThread(WordFactory.nullPointer()); + } + } + + private static class UseParallelGC implements BooleanSupplier { + @Override + public boolean getAsBoolean() { + return ParallelGC.isEnabled(); + } + } +} + +@Platforms(Platform.HOSTED_ONLY.class) +@AutomaticallyRegisteredFeature() +@SuppressWarnings("unused") +class ParallelGCFeature implements InternalFeature { + @Override + public boolean isInConfiguration(IsInConfigurationAccess access) { + return ParallelGC.isEnabled(); + } + + @Override + public void afterRegistration(AfterRegistrationAccess access) { + verifyOptionEnabled(SubstrateOptions.SpawnIsolates); + + ImageSingletons.add(ParallelGC.class, new ParallelGC()); + } + + private static void verifyOptionEnabled(SubstrateOptionKey option) { + String optionMustBeEnabledFmt = "When using the parallel garbage collector ('--gc=parallel'), please note that option '%s' must be enabled."; + UserError.guarantee(option.getValue(), optionMustBeEnabledFmt, option.getName()); + } +} diff --git a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/MemoryWalker.java b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/MemoryWalker.java index 85104becd0a4..980c7db17c4c 100644 --- a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/MemoryWalker.java +++ b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/MemoryWalker.java @@ -80,21 +80,6 @@ public interface HeapChunkAccess { /** Return the size of the heap chunk. */ UnsignedWord getSize(T heapChunk); - /** Return the address where allocation starts within the heap chunk. */ - UnsignedWord getAllocationStart(T heapChunk); - - /** - * Return the address where allocation has ended within the heap chunk. This is the first - * address past the end of allocated space within the heap chunk. - */ - UnsignedWord getAllocationEnd(T heapChunk); - - /** - * Return the name of the region that contains the heap chunk. E.g., "young", "old", "free", - * etc. - */ - String getRegion(T heapChunk); - /** Return true if the heap chunk is an aligned heap chunk, else false. */ boolean isAligned(T heapChunk); } diff --git a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/SubstrateDiagnostics.java b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/SubstrateDiagnostics.java index ddf5ca33a06f..debcdf01c923 100644 --- a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/SubstrateDiagnostics.java +++ b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/SubstrateDiagnostics.java @@ -124,7 +124,7 @@ public static boolean isFatalErrorHandlingInProgress() { @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) public static boolean isFatalErrorHandlingThread() { - return fatalErrorState().diagnosticThread.get() == CurrentIsolate.getCurrentThread(); + return CurrentIsolate.getCurrentThread().isNonNull() && fatalErrorState().diagnosticThread.get() == CurrentIsolate.getCurrentThread(); } public static int maxInvocations() { diff --git a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/SubstrateOptions.java b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/SubstrateOptions.java index 77adb65b3665..e6ee9ee47cc2 100644 --- a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/SubstrateOptions.java +++ b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/SubstrateOptions.java @@ -65,6 +65,7 @@ import com.oracle.svm.core.option.RuntimeOptionKey; import com.oracle.svm.core.option.SubstrateOptionsParser; import com.oracle.svm.core.thread.VMOperationControl; +import com.oracle.svm.core.util.InterruptImageBuilding; import com.oracle.svm.core.util.UserError; import com.oracle.svm.util.LogUtils; import com.oracle.svm.util.ModuleSupport; @@ -293,6 +294,7 @@ public String helpText() { @Override protected void onValueUpdate(EconomicMap, Object> values, Boolean oldValue, Boolean newValue) { if (newValue) { + SubstrateOptions.UseParallelGC.update(values, false); SubstrateOptions.UseEpsilonGC.update(values, false); } } @@ -305,10 +307,44 @@ protected void onValueUpdate(EconomicMap, Object> values, Boolean o protected void onValueUpdate(EconomicMap, Object> values, Boolean oldValue, Boolean newValue) { if (newValue) { SubstrateOptions.UseSerialGC.update(values, false); + SubstrateOptions.UseParallelGC.update(values, false); } } }; + @APIOption(name = "parallel", group = GCGroup.class, customHelp = "Parallel garbage collector")// + @Option(help = "Use a parallel GC")// + public static final HostedOptionKey UseParallelGC = new HostedOptionKey<>(false, SubstrateOptions::requireMultiThreading) { + @Override + protected void onValueUpdate(EconomicMap, Object> values, Boolean oldValue, Boolean newValue) { + if (newValue) { + SubstrateOptions.UseSerialGC.update(values, false); + SubstrateOptions.UseEpsilonGC.update(values, false); + } + } + }; + + @Option(help = "Number of GC worker threads. Parallel and G1 GC only.", type = OptionType.User)// + public static final RuntimeOptionKey ParallelGCThreads = new RuntimeOptionKey<>(0, Immutable); + + private static void requireMultiThreading(HostedOptionKey optionKey) { + if (optionKey.getValue() && !MultiThreaded.getValue()) { + throw new InterruptImageBuilding(String.format("The option %s requires the option %s to be set.", + SubstrateOptionsParser.commandArgument(optionKey, "+"), + SubstrateOptionsParser.commandArgument(MultiThreaded, "+"))); + } + } + + @Fold + public static boolean useSerialOrParallelGC() { + return UseSerialGC.getValue() || UseParallelGC.getValue(); + } + + @Fold + public static boolean useSerialOrParallelOrEpsilonGC() { + return UseSerialGC.getValue() || UseParallelGC.getValue() || UseEpsilonGC.getValue(); + } + @Option(help = "The size of each thread stack at run-time, in bytes.", type = OptionType.User)// public static final RuntimeOptionKey StackSize = new RuntimeOptionKey<>(0L); diff --git a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/heap/ObjectHeader.java b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/heap/ObjectHeader.java index d74bd5435bf0..71bdd77b278e 100644 --- a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/heap/ObjectHeader.java +++ b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/heap/ObjectHeader.java @@ -150,7 +150,7 @@ protected static int getCompressionShift() { } @Fold - protected static int getHubOffset() { + public static int getHubOffset() { return ConfigurationValues.getObjectLayout().getHubOffset(); } } diff --git a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/heap/ObjectVisitor.java b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/heap/ObjectVisitor.java index ac706669506e..7882d214733c 100644 --- a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/heap/ObjectVisitor.java +++ b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/heap/ObjectVisitor.java @@ -38,7 +38,9 @@ public interface ObjectVisitor { @RestrictHeapAccess(access = RestrictHeapAccess.Access.NO_ALLOCATION, reason = "Must not allocate while visiting the heap.") boolean visitObject(Object o); - /** Like visitObject(Object), but inlined for performance. */ + /** + * Like visitObject(Object), but inlined for performance. + */ @RestrictHeapAccess(access = RestrictHeapAccess.Access.NO_ALLOCATION, reason = "Must not allocate while visiting the heap.") default boolean visitObjectInline(Object o) { return visitObject(o); diff --git a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/heap/OutOfMemoryUtil.java b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/heap/OutOfMemoryUtil.java index 2daabb05d6ec..2cc8ae34abc9 100644 --- a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/heap/OutOfMemoryUtil.java +++ b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/heap/OutOfMemoryUtil.java @@ -29,6 +29,7 @@ import com.oracle.svm.core.headers.LibC; import com.oracle.svm.core.jdk.JDKUtils; import com.oracle.svm.core.log.Log; +import com.oracle.svm.core.thread.VMOperation; import com.oracle.svm.core.util.VMError; public class OutOfMemoryUtil { @@ -39,9 +40,18 @@ public static OutOfMemoryError heapSizeExceeded() { return reportOutOfMemoryError(OUT_OF_MEMORY_ERROR); } - @Uninterruptible(reason = "Not uninterruptible but it doesn't matter for the callers.", calleeMustBe = false) + @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) @RestrictHeapAccess(access = RestrictHeapAccess.Access.NO_ALLOCATION, reason = "Can't allocate while out of memory.") public static OutOfMemoryError reportOutOfMemoryError(OutOfMemoryError error) { + if (VMOperation.isGCInProgress()) { + /* An OutOfMemoryError during a GC is always a fatal error. */ + throw VMError.shouldNotReachHere(error); + } + throw reportOutOfMemoryError0(error); + } + + @Uninterruptible(reason = "Not uninterruptible but it doesn't matter for the callers.", calleeMustBe = false) + private static OutOfMemoryError reportOutOfMemoryError0(OutOfMemoryError error) { if (SubstrateGCOptions.ExitOnOutOfMemoryError.getValue()) { if (LibC.isSupported()) { Log.log().string("Terminating due to java.lang.OutOfMemoryError: ").string(JDKUtils.getRawMessage(error)).newline(); diff --git a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/heap/Target_java_lang_ref_Reference.java b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/heap/Target_java_lang_ref_Reference.java index c3d61c9ab4e5..30c820d71901 100644 --- a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/heap/Target_java_lang_ref_Reference.java +++ b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/heap/Target_java_lang_ref_Reference.java @@ -98,7 +98,7 @@ public final class Target_java_lang_ref_Reference { @SuppressWarnings("unused") // @Alias @RecomputeFieldValue(kind = RecomputeFieldValue.Kind.Reset) // - @ExcludeFromReferenceMap(reason = "Some GCs process this field manually.", onlyIf = NotSerialNotEpsilonGC.class) // + @ExcludeFromReferenceMap(reason = "Some GCs process this field manually.", onlyIf = NotSerialNotParallelNotEpsilonGC.class) // transient Target_java_lang_ref_Reference discovered; @Alias @RecomputeFieldValue(kind = RecomputeFieldValue.Kind.Custom, declClass = ComputeQueueValue.class) // @@ -226,9 +226,9 @@ public Object transform(Object receiver, Object originalValue) { } @Platforms(Platform.HOSTED_ONLY.class) -class NotSerialNotEpsilonGC implements BooleanSupplier { +class NotSerialNotParallelNotEpsilonGC implements BooleanSupplier { @Override public boolean getAsBoolean() { - return !SubstrateOptions.UseSerialGC.getValue() && !SubstrateOptions.UseEpsilonGC.getValue(); + return !SubstrateOptions.useSerialOrParallelOrEpsilonGC(); } } diff --git a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/hub/LayoutEncoding.java b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/hub/LayoutEncoding.java index b1f30bc5a7d8..bf723046aa21 100644 --- a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/hub/LayoutEncoding.java +++ b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/hub/LayoutEncoding.java @@ -348,11 +348,50 @@ public static UnsignedWord getSizeFromObjectInlineInGC(Object obj, boolean addOp return getSizeFromObjectInline(obj, withOptionalIdHashField); } + @AlwaysInline("GC performance") + @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) + public static UnsignedWord getSizeFromHeader(Object obj, Word header, long eightHeaderBytes, boolean addOptionalIdHashField) { + ObjectHeader oh = Heap.getHeap().getObjectHeader(); + DynamicHub hub = oh.dynamicHubFromObjectHeader(header); + int encoding = hub.getLayoutEncoding(); + boolean withOptionalIdHashField = addOptionalIdHashField || + (!ConfigurationValues.getObjectLayout().hasFixedIdentityHashField() && oh.hasOptionalIdentityHashField(header)); + + if (isArrayLike(encoding)) { + int arrayLength = getArrayLengthFromHeader(obj, eightHeaderBytes); + return getArraySize(encoding, arrayLength, withOptionalIdHashField); + } else { + return getPureInstanceSize(hub, withOptionalIdHashField); + } + } + + @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) + private static int getArrayLengthFromHeader(Object obj, long eightHeaderBytes) { + ObjectLayout ol = ConfigurationValues.getObjectLayout(); + assert ol.getArrayLengthOffset() >= 4; + if (ol.getArrayLengthOffset() == 4) { + /* + * If the array length is located within the first 8 bytes, then we need to extract it + * from the already read header data. + */ + int result = (int) (eightHeaderBytes >>> 32); + assert result >= 0; + return result; + } + return ArrayLengthNode.arrayLength(obj); + } + @AlwaysInline("Actual inlining decided by callers.") @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) private static UnsignedWord getSizeFromObjectInline(Object obj, boolean withOptionalIdHashField) { DynamicHub hub = KnownIntrinsics.readHub(obj); int encoding = hub.getLayoutEncoding(); + return getSizeFromEncoding(obj, hub, encoding, withOptionalIdHashField); + } + + @AlwaysInline("GC performance") + @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) + private static UnsignedWord getSizeFromEncoding(Object obj, DynamicHub hub, int encoding, boolean withOptionalIdHashField) { if (isArrayLike(encoding)) { return getArraySize(encoding, ArrayLengthNode.arrayLength(obj), withOptionalIdHashField); } else { diff --git a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/identityhashcode/IdentityHashCodeSupport.java b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/identityhashcode/IdentityHashCodeSupport.java index 32ffd9585564..edebb2f74d13 100644 --- a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/identityhashcode/IdentityHashCodeSupport.java +++ b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/identityhashcode/IdentityHashCodeSupport.java @@ -85,8 +85,12 @@ public static int generateIdentityHashCode(Object obj) { @Uninterruptible(reason = "Prevent a GC interfering with the object's identity hash state.") public static int computeHashCodeFromAddress(Object obj) { - Word address = Word.objectToUntrackedPointer(obj); long salt = Heap.getHeap().getIdentityHashSalt(obj); + return computeHashCodeFromAddress(Word.objectToUntrackedPointer(obj), salt); + } + + @Uninterruptible(reason = "Prevent a GC interfering with the object's identity hash state.") + public static int computeHashCodeFromAddress(Word address, long salt) { SignedWord salted = WordFactory.signed(salt).xor(address); int hash = mix32(salted.rawValue()) >>> 1; // shift: ensure positive, same as on HotSpot return (hash == 0) ? 1 : hash; // ensure nonzero diff --git a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/jdk/VMErrorSubstitutions.java b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/jdk/VMErrorSubstitutions.java index bd7b2bb953a2..7a0391c20f85 100644 --- a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/jdk/VMErrorSubstitutions.java +++ b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/jdk/VMErrorSubstitutions.java @@ -27,7 +27,9 @@ import static com.oracle.svm.core.heap.RestrictHeapAccess.Access.NO_ALLOCATION; import org.graalvm.compiler.nodes.UnreachableNode; +import org.graalvm.nativeimage.CurrentIsolate; import org.graalvm.nativeimage.ImageSingletons; +import org.graalvm.nativeimage.Isolate; import org.graalvm.nativeimage.LogHandler; import org.graalvm.nativeimage.Platforms; import org.graalvm.nativeimage.c.function.CodePointer; @@ -35,9 +37,11 @@ import com.oracle.svm.core.NeverInline; import com.oracle.svm.core.SubstrateDiagnostics; +import com.oracle.svm.core.SubstrateOptions; import com.oracle.svm.core.Uninterruptible; import com.oracle.svm.core.annotate.Substitute; import com.oracle.svm.core.annotate.TargetClass; +import com.oracle.svm.core.c.function.CEntryPointActions; import com.oracle.svm.core.heap.RestrictHeapAccess; import com.oracle.svm.core.log.Log; import com.oracle.svm.core.snippets.KnownIntrinsics; @@ -135,6 +139,10 @@ public class VMErrorSubstitutions { static RuntimeException shouldNotReachHere(CodePointer callerIP, String msg, Throwable ex) { ThreadStackPrinter.printBacktrace(); + if (SubstrateOptions.SpawnIsolates.getValue() && CurrentIsolate.getCurrentThread().isNull()) { + CEntryPointActions.enterAttachThreadFromCrashHandler((Isolate) KnownIntrinsics.heapBase()); + } + SafepointBehavior.preventSafepoints(); StackOverflowCheck.singleton().disableStackOverflowChecksForFatalError(); diff --git a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/thread/PlatformThreads.java b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/thread/PlatformThreads.java index c30108b99dec..fbe0e8acda86 100644 --- a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/thread/PlatformThreads.java +++ b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/thread/PlatformThreads.java @@ -556,6 +556,11 @@ public OSThreadHandle startThreadUnmanaged(CFunctionPointer threadRoutine, Point throw VMError.shouldNotReachHere("Shouldn't call PlatformThreads.startThreadUnmanaged directly."); } + @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) + public boolean joinThreadUnmanaged(OSThreadHandle threadHandle) { + return joinThreadUnmanaged(threadHandle, WordFactory.nullPointer()); + } + @SuppressWarnings("unused") @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) public boolean joinThreadUnmanaged(OSThreadHandle threadHandle, WordPointer threadExitStatus) { diff --git a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/thread/VMThreads.java b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/thread/VMThreads.java index e60fa5f26367..098e925c92a4 100644 --- a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/thread/VMThreads.java +++ b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/thread/VMThreads.java @@ -622,6 +622,16 @@ public static void guaranteeOwnsThreadMutex(String message, boolean allowUnspeci THREAD_MUTEX.guaranteeIsOwner(message, allowUnspecifiedOwner); } + @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) + public static boolean ownsThreadMutex() { + return THREAD_MUTEX.isOwner(); + } + + @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) + public static boolean ownsThreadMutex(boolean allowUnspecifiedOwner) { + return THREAD_MUTEX.isOwner(allowUnspecifiedOwner); + } + public static boolean printLocationInfo(Log log, UnsignedWord value, boolean allowUnsafeOperations) { for (IsolateThread thread = firstThreadUnsafe(); thread.isNonNull(); thread = nextThread(thread)) { if (thread.equal(value)) {