diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/AbstractCollectionPolicy.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/AbstractCollectionPolicy.java new file mode 100644 index 000000000000..eca546000dee --- /dev/null +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/AbstractCollectionPolicy.java @@ -0,0 +1,388 @@ +/* + * Copyright (c) 2021, 2021, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.svm.core.genscavenge; + +import java.util.concurrent.locks.ReentrantLock; + +import org.graalvm.compiler.api.replacements.Fold; +import org.graalvm.word.UnsignedWord; +import org.graalvm.word.WordFactory; + +import com.oracle.svm.core.SubstrateGCOptions; +import com.oracle.svm.core.annotate.Uninterruptible; +import com.oracle.svm.core.heap.PhysicalMemory; +import com.oracle.svm.core.heap.ReferenceAccess; +import com.oracle.svm.core.jdk.UninterruptibleUtils; +import com.oracle.svm.core.option.RuntimeOptionValues; +import com.oracle.svm.core.thread.VMOperation; +import com.oracle.svm.core.util.UnsignedUtils; +import com.oracle.svm.core.util.VMError; + +abstract class AbstractCollectionPolicy implements CollectionPolicy { + + protected static final int MAX_TENURING_THRESHOLD = 15; + + static int getMaxSurvivorSpaces(Integer userValue) { + assert userValue == null || userValue >= 0; + return (userValue != null) ? userValue : AbstractCollectionPolicy.MAX_TENURING_THRESHOLD; + } + + /* + * Constants that can be made options if desirable. These are -XX options in HotSpot, refer to + * their descriptions for details. The values are HotSpot defaults unless labeled otherwise. + * + * Don't change these values individually without carefully going over their occurrences in + * HotSpot source code, there are dependencies between them that are not handled in our code. + */ + protected static final int INITIAL_SURVIVOR_RATIO = 8; + protected static final int MIN_SURVIVOR_RATIO = 3; + protected static final int DEFAULT_TIME_WEIGHT = 25; // -XX:AdaptiveTimeWeight + + /* Constants to compute defaults for values which can be set through existing options. */ + /** HotSpot: -XX:MaxHeapSize default without ergonomics. */ + protected static final UnsignedWord SMALL_HEAP_SIZE = WordFactory.unsigned(96 * 1024 * 1024); + protected static final int NEW_RATIO = 2; // HotSpot: -XX:NewRatio + protected static final int LARGE_MEMORY_MAX_HEAP_PERCENT = 25; // -XX:MaxRAMPercentage + protected static final int SMALL_MEMORY_MAX_HEAP_PERCENT = 50; // -XX:MinRAMPercentage + protected static final double INITIAL_HEAP_MEMORY_PERCENT = 1.5625; // -XX:InitialRAMPercentage + + protected final AdaptiveWeightedAverage avgYoungGenAlignedChunkFraction = new AdaptiveWeightedAverage(DEFAULT_TIME_WEIGHT); + + protected UnsignedWord survivorSize; + protected UnsignedWord edenSize; + protected UnsignedWord promoSize; + protected UnsignedWord oldSize; + protected int tenuringThreshold; + + protected volatile SizeParameters sizes; + private final ReentrantLock sizesUpdateLock = new ReentrantLock(); + + protected AbstractCollectionPolicy(int initialTenuringThreshold) { + tenuringThreshold = UninterruptibleUtils.Math.clamp(initialTenuringThreshold, 1, HeapParameters.getMaxSurvivorSpaces() + 1); + } + + @Override + public boolean shouldCollectOnAllocation() { + if (sizes == null) { + return false; // updateSizeParameters() has never been called + } + UnsignedWord edenUsed = HeapImpl.getHeapImpl().getAccounting().getEdenUsedBytes(); + return edenUsed.aboveOrEqual(edenSize); + } + + @Fold + static UnsignedWord minSpaceSize() { + return HeapParameters.getAlignedHeapChunkSize(); + } + + @Uninterruptible(reason = "Used in uninterruptible code.", mayBeInlined = true) + static UnsignedWord alignUp(UnsignedWord size) { + return UnsignedUtils.roundUp(size, minSpaceSize()); + } + + @Uninterruptible(reason = "Used in uninterruptible code.", mayBeInlined = true) + static UnsignedWord alignDown(UnsignedWord size) { + return UnsignedUtils.roundDown(size, minSpaceSize()); + } + + @Uninterruptible(reason = "Used in uninterruptible code.", mayBeInlined = true) + static boolean isAligned(UnsignedWord size) { + return UnsignedUtils.isAMultiple(size, minSpaceSize()); + } + + @Uninterruptible(reason = "Used in uninterruptible code.", mayBeInlined = true) + static UnsignedWord minSpaceSize(UnsignedWord size) { + return UnsignedUtils.max(size, minSpaceSize()); + } + + @Override + public void ensureSizeParametersInitialized() { + if (sizes == null) { + updateSizeParameters(); + } + } + + @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) + protected void guaranteeSizeParametersInitialized() { + VMError.guarantee(sizes != null); + } + + @Override + public void updateSizeParameters() { + PhysicalMemory.tryInitialize(); + + SizeParameters params = computeSizeParameters(); + SizeParameters previous = sizes; + if (previous != null && params.equal(previous)) { + return; // nothing to do + } + sizesUpdateLock.lock(); + try { + updateSizeParametersLocked(params, previous); + } finally { + sizesUpdateLock.unlock(); + } + guaranteeSizeParametersInitialized(); // sanity + } + + @Uninterruptible(reason = "Must be atomic with regard to garbage collection.") + private void updateSizeParametersLocked(SizeParameters params, SizeParameters previous) { + if (sizes != previous) { + // Some other thread beat us and we cannot tell if our values or their values are newer, + // so back off -- any newer values will be applied eventually. + return; + } + sizes = params; + + if (previous == null || gcCount() == 0) { + survivorSize = params.initialSurvivorSize; + edenSize = params.initialEdenSize; + oldSize = params.initialOldSize(); + promoSize = UnsignedUtils.min(edenSize, oldSize); + } + + /* + * NOTE: heap limits can change when options are updated at runtime or once the physical + * memory size becomes known. This means that we start off with sizes which can cause higher + * GC costs initially, and when shrinking the heap, that previously computed values such as + * GC costs and intervals and survived/promoted objects are likely no longer representative. + * + * We assume that such changes happen very early on and values then adapt reasonably quick, + * but we must still ensure that computations can handle it (for example, no overflows). + */ + survivorSize = UnsignedUtils.min(survivorSize, params.maxSurvivorSize()); + edenSize = UnsignedUtils.min(edenSize, maxEdenSize()); + oldSize = UnsignedUtils.min(oldSize, params.maxOldSize()); + promoSize = UnsignedUtils.min(promoSize, params.maxOldSize()); + } + + @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) + protected UnsignedWord maxEdenSize() { + return alignDown(sizes.maxYoungSize.subtract(survivorSize.multiply(2))); + } + + @Override + public UnsignedWord getMaximumHeapSize() { + guaranteeSizeParametersInitialized(); + return sizes.maxHeapSize; + } + + @Override + public UnsignedWord getMaximumYoungGenerationSize() { + guaranteeSizeParametersInitialized(); + return sizes.maxYoungSize; + } + + @Override + public UnsignedWord getCurrentHeapCapacity() { + assert VMOperation.isGCInProgress() : "use only during GC"; + guaranteeSizeParametersInitialized(); + return edenSize.add(survivorSize.multiply(2)).add(oldSize); + } + + @Override + public UnsignedWord getSurvivorSpacesCapacity() { + assert VMOperation.isGCInProgress() : "use only during GC"; + guaranteeSizeParametersInitialized(); + return survivorSize; + } + + @Override + public UnsignedWord getMaximumFreeAlignedChunksSize() { + assert VMOperation.isGCInProgress() : "use only during GC"; + guaranteeSizeParametersInitialized(); + /* + * Keep chunks ready for allocations in eden and for the survivor to-spaces during young + * collections (although we might keep too many aligned chunks when large objects in + * unallocated chunks are also allocated). We could alternatively return + * getCurrentHeapCapacity() to have chunks ready during full GCs as well. + */ + UnsignedWord total = edenSize.add(survivorSize); + double alignedFraction = Math.min(1, Math.max(0, avgYoungGenAlignedChunkFraction.getAverage())); + return UnsignedUtils.fromDouble(UnsignedUtils.toDouble(total) * alignedFraction); + } + + @Override + public int getTenuringAge() { + assert VMOperation.isGCInProgress() : "use only during GC"; + return tenuringThreshold; + } + + @Override + public UnsignedWord getMinimumHeapSize() { + return sizes.minHeapSize; + } + + @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) + protected abstract long gcCount(); + + protected SizeParameters computeSizeParameters() { + UnsignedWord addressSpaceSize = ReferenceAccess.singleton().getAddressSpaceSize(); + UnsignedWord minAllSpaces = minSpaceSize().multiply(2); // eden, old + if (HeapParameters.getMaxSurvivorSpaces() > 0) { + minAllSpaces = minAllSpaces.add(minSpaceSize().multiply(2)); // survivor from and to + } + + UnsignedWord maxHeap; + long optionMax = SubstrateGCOptions.MaxHeapSize.getValue(); + if (optionMax > 0L) { + maxHeap = WordFactory.unsigned(optionMax); + } else if (!PhysicalMemory.isInitialized()) { + maxHeap = addressSpaceSize; + } else { + UnsignedWord physicalMemorySize = PhysicalMemory.getCachedSize(); + if (HeapParameters.Options.MaximumHeapSizePercent.hasBeenSet(RuntimeOptionValues.singleton())) { + maxHeap = physicalMemorySize.unsignedDivide(100).multiply(HeapParameters.getMaximumHeapSizePercent()); + } else { + UnsignedWord reasonableMax = physicalMemorySize.unsignedDivide(100).multiply(AbstractCollectionPolicy.LARGE_MEMORY_MAX_HEAP_PERCENT); + UnsignedWord reasonableMin = physicalMemorySize.unsignedDivide(100).multiply(AbstractCollectionPolicy.SMALL_MEMORY_MAX_HEAP_PERCENT); + if (reasonableMin.belowThan(AbstractCollectionPolicy.SMALL_HEAP_SIZE)) { + // small physical memory, use a small fraction for the heap + reasonableMax = reasonableMin; + } else { + reasonableMax = UnsignedUtils.max(reasonableMax, AbstractCollectionPolicy.SMALL_HEAP_SIZE); + } + maxHeap = reasonableMax; + } + } + maxHeap = UnsignedUtils.clamp(alignDown(maxHeap), minAllSpaces, alignDown(addressSpaceSize)); + + UnsignedWord maxYoung; + long optionMaxYoung = SubstrateGCOptions.MaxNewSize.getValue(); + if (optionMaxYoung > 0L) { + maxYoung = WordFactory.unsigned(optionMaxYoung); + } else if (HeapParameters.Options.MaximumYoungGenerationSizePercent.hasBeenSet(RuntimeOptionValues.singleton())) { + maxYoung = maxHeap.unsignedDivide(100).multiply(HeapParameters.getMaximumYoungGenerationSizePercent()); + } else { + maxYoung = maxHeap.unsignedDivide(AbstractCollectionPolicy.NEW_RATIO + 1); + } + maxYoung = UnsignedUtils.clamp(alignUp(maxYoung), minSpaceSize(), maxHeap); + + UnsignedWord maxOld = maxHeap.subtract(maxYoung); + maxOld = minSpaceSize(alignUp(maxOld)); + maxHeap = maxYoung.add(maxOld); + if (maxHeap.aboveThan(addressSpaceSize)) { + maxYoung = alignDown(maxYoung.subtract(minSpaceSize())); + maxHeap = maxYoung.add(maxOld); + VMError.guarantee(maxHeap.belowOrEqual(addressSpaceSize) && maxYoung.aboveOrEqual(minSpaceSize())); + } + + UnsignedWord minHeap = WordFactory.zero(); + long optionMin = SubstrateGCOptions.MinHeapSize.getValue(); + if (optionMin > 0L) { + minHeap = WordFactory.unsigned(optionMin); + } + minHeap = UnsignedUtils.clamp(alignUp(minHeap), minAllSpaces, maxHeap); + + UnsignedWord initialHeap; + if (PhysicalMemory.isInitialized()) { + initialHeap = UnsignedUtils.fromDouble(UnsignedUtils.toDouble(PhysicalMemory.getCachedSize()) / 100 * AbstractCollectionPolicy.INITIAL_HEAP_MEMORY_PERCENT); + } else { + initialHeap = AbstractCollectionPolicy.SMALL_HEAP_SIZE; + } + initialHeap = UnsignedUtils.clamp(alignUp(initialHeap), minHeap, maxHeap); + + UnsignedWord initialYoung; + if (initialHeap.equal(maxHeap)) { + initialYoung = maxYoung; + } else { + initialYoung = UnsignedUtils.clamp(alignUp(initialHeap.unsignedDivide(AbstractCollectionPolicy.NEW_RATIO + 1)), minSpaceSize(), maxYoung); + } + UnsignedWord initialSurvivor = WordFactory.zero(); + if (HeapParameters.getMaxSurvivorSpaces() > 0) { + /* + * In HotSpot, this is the reserved capacity of each of the survivor From and To spaces, + * i.e., together they occupy 2x this size. Our chunked heap doesn't reserve memory, so + * we use never occupy more than 1x this size for survivors except during collections. + * However, this is inconsistent with how we interpret the maximum size of the old + * generation, which we can exceed the (current) old gen size while copying during + * collections. + */ + initialSurvivor = minSpaceSize(alignUp(initialYoung.unsignedDivide(AbstractCollectionPolicy.INITIAL_SURVIVOR_RATIO))); + } + UnsignedWord initialEden = minSpaceSize(alignUp(initialYoung.subtract(initialSurvivor.multiply(2)))); + + return new SizeParameters(maxHeap, maxYoung, initialHeap, initialEden, initialSurvivor, minHeap); + } + + protected static final class SizeParameters { + final UnsignedWord maxHeapSize; + final UnsignedWord maxYoungSize; + final UnsignedWord initialHeapSize; + final UnsignedWord initialEdenSize; + final UnsignedWord initialSurvivorSize; + final UnsignedWord minHeapSize; + + SizeParameters(UnsignedWord maxHeapSize, UnsignedWord maxYoungSize, UnsignedWord initialHeapSize, + UnsignedWord initialEdenSize, UnsignedWord initialSurvivorSize, UnsignedWord minHeapSize) { + this.maxHeapSize = maxHeapSize; + this.maxYoungSize = maxYoungSize; + this.initialHeapSize = initialHeapSize; + this.initialEdenSize = initialEdenSize; + this.initialSurvivorSize = initialSurvivorSize; + this.minHeapSize = minHeapSize; + + assert isAligned(maxHeapSize) && isAligned(maxYoungSize) && isAligned(initialHeapSize) && isAligned(initialEdenSize) && isAligned(initialSurvivorSize); + assert isAligned(maxSurvivorSize()) && isAligned(initialYoungSize()) && isAligned(initialOldSize()) && isAligned(maxOldSize()); + + assert initialHeapSize.belowOrEqual(maxHeapSize); + assert maxSurvivorSize().belowThan(maxYoungSize); + assert maxYoungSize.add(maxOldSize()).equal(maxHeapSize); + assert maxHeapSize.belowOrEqual(ReferenceAccess.singleton().getAddressSpaceSize()); + assert initialEdenSize.add(initialSurvivorSize.multiply(2)).equal(initialYoungSize()); + assert initialYoungSize().add(initialOldSize()).equal(initialHeapSize); + } + + @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) + UnsignedWord maxSurvivorSize() { + if (HeapParameters.getMaxSurvivorSpaces() == 0) { + return WordFactory.zero(); + } + UnsignedWord size = maxYoungSize.unsignedDivide(MIN_SURVIVOR_RATIO); + return minSpaceSize(alignDown(size)); + } + + @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) + UnsignedWord initialYoungSize() { + return initialEdenSize.add(initialSurvivorSize.multiply(2)); + } + + @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) + UnsignedWord initialOldSize() { + return initialHeapSize.subtract(initialYoungSize()); + } + + @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) + UnsignedWord maxOldSize() { + return maxHeapSize.subtract(maxYoungSize); + } + + @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) + boolean equal(SizeParameters other) { + return maxHeapSize.equal(other.maxHeapSize) && maxYoungSize.equal(other.maxYoungSize) && initialHeapSize.equal(other.initialHeapSize) && + initialEdenSize.equal(other.initialEdenSize) && initialSurvivorSize.equal(other.initialSurvivorSize) && minHeapSize.equal(other.minHeapSize); + } + } +} diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/AdaptiveCollectionPolicy.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/AdaptiveCollectionPolicy.java new file mode 100644 index 000000000000..a2746b0f0616 --- /dev/null +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/AdaptiveCollectionPolicy.java @@ -0,0 +1,557 @@ +/* + * Copyright (c) 2021, 2021, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.svm.core.genscavenge; + +import org.graalvm.word.UnsignedWord; +import org.graalvm.word.WordFactory; + +import com.oracle.svm.core.annotate.Uninterruptible; +import com.oracle.svm.core.heap.GCCause; +import com.oracle.svm.core.util.TimeUtils; +import com.oracle.svm.core.util.UnsignedUtils; + +/** + * A garbage collection policy that balances throughput and memory footprint. + * + * Much of this is based on HotSpot's ParallelGC adaptive size policy, but without the pause time + * goals. Many methods in this class have been adapted from classes {@code PSAdaptiveSizePolicy} and + * its base class {@code AdaptiveSizePolicy}. Method and variable names have been kept mostly the + * same for comparability. + */ +final class AdaptiveCollectionPolicy extends AbstractCollectionPolicy { + + /* + * Constants that can be made options if desirable. These are -XX options in HotSpot, refer to + * their descriptions for details. The values are HotSpot defaults unless labeled otherwise. + * + * Don't change these values individually without carefully going over their occurrences in + * HotSpot source code, there are dependencies between them that are not handled in our code. + */ + private static final int ADAPTIVE_TIME_WEIGHT = DEFAULT_TIME_WEIGHT; + private static final int ADAPTIVE_SIZE_POLICY_READY_THRESHOLD = 5; + private static final int ADAPTIVE_SIZE_DECREMENT_SCALE_FACTOR = 4; + private static final int ADAPTIVE_SIZE_POLICY_WEIGHT = 10; + private static final boolean USE_ADAPTIVE_SIZE_POLICY_WITH_SYSTEM_GC = false; + private static final boolean USE_ADAPTIVE_SIZE_DECAY_MAJOR_GC_COST = true; + private static final double ADAPTIVE_SIZE_MAJOR_GC_DECAY_TIME_SCALE = 10; + private static final boolean USE_ADAPTIVE_SIZE_POLICY_FOOTPRINT_GOAL = true; + private static final int THRESHOLD_TOLERANCE = 10; + private static final int SURVIVOR_PADDING = 3; + private static final int INITIAL_TENURING_THRESHOLD = 7; + private static final int PROMOTED_PADDING = 3; + private static final int TENURED_GENERATION_SIZE_SUPPLEMENT_DECAY = 2; + private static final int YOUNG_GENERATION_SIZE_SUPPLEMENT_DECAY = 8; + private static final int PAUSE_PADDING = 1; + /** + * Ratio of mutator wall-clock time to GC wall-clock time. HotSpot's default is 99, i.e. + * spending 1% of time in GC. We set it to 19, i.e. 5%, to prefer a small footprint. + */ + private static final int GC_TIME_RATIO = 19; + /** + * Maximum size increment step percentages. We reduce them from HotSpot's default of 20 to avoid + * growing the heap too eagerly, and to enable {@linkplain #ADAPTIVE_SIZE_USE_COST_ESTIMATORS + * cost estimators} to resize the heap in smaller steps which might yield improved throughput + * when larger steps do not. + */ + private static final int YOUNG_GENERATION_SIZE_INCREMENT = 10; + private static final int TENURED_GENERATION_SIZE_INCREMENT = 10; + /* + * Supplements to accelerate the expansion of the heap at startup. We do not use them in favor + * of a small footprint. + */ + private static final int YOUNG_GENERATION_SIZE_SUPPLEMENT = 0; // HotSpot default: 80 + private static final int TENURED_GENERATION_SIZE_SUPPLEMENT = 0; // HotSpot default: 80 + /** + * Use least square fitting to estimate if increasing heap sizes will significantly improve + * throughput. This is intended to limit memory usage once throughput cannot be increased much + * more, for example when the application is heavily multi-threaded and our single-threaded + * collector cannot reach the throughput goal. We use a reciprocal function with exponential + * discounting of old data points, unlike HotSpot's AdaptiveSizeThroughPutPolicy option + * (disabled by default) which uses linear least-square fitting without discounting. + */ + private static final boolean ADAPTIVE_SIZE_USE_COST_ESTIMATORS = true; + private static final int ADAPTIVE_SIZE_POLICY_INITIALIZING_STEPS = ADAPTIVE_SIZE_POLICY_READY_THRESHOLD; + /** The minimum increase in throughput in percent for expanding a space by 1% of its size. */ + private static final double ADAPTIVE_SIZE_ESTIMATOR_MIN_SIZE_THROUGHPUT_TRADEOFF = 0.8; + /** The effective number of most recent data points used by estimator (exponential decay). */ + private static final int ADAPTIVE_SIZE_COST_ESTIMATORS_HISTORY_LENGTH = ADAPTIVE_TIME_WEIGHT; + /** Threshold for triggering a complete collection after repeated minor collections. */ + private static final int CONSECUTIVE_MINOR_TO_MAJOR_COLLECTION_PAUSE_TIME_RATIO = 2; + + /* Constants derived from other constants. */ + private static final double THROUGHPUT_GOAL = 1.0 - 1.0 / (1.0 + GC_TIME_RATIO); + private static final double THRESHOLD_TOLERANCE_PERCENT = 1.0 + THRESHOLD_TOLERANCE / 100.0; + + private final Timer minorTimer = new Timer("minor/between minor"); + private final AdaptiveWeightedAverage avgMinorGcCost = new AdaptiveWeightedAverage(ADAPTIVE_TIME_WEIGHT); + private final AdaptivePaddedAverage avgMinorPause = new AdaptivePaddedAverage(ADAPTIVE_TIME_WEIGHT, PAUSE_PADDING); + private final AdaptivePaddedAverage avgSurvived = new AdaptivePaddedAverage(ADAPTIVE_SIZE_POLICY_WEIGHT, SURVIVOR_PADDING); + private final AdaptivePaddedAverage avgPromoted = new AdaptivePaddedAverage(ADAPTIVE_SIZE_POLICY_WEIGHT, PROMOTED_PADDING, true); + private final ReciprocalLeastSquareFit minorCostEstimator = new ReciprocalLeastSquareFit(ADAPTIVE_SIZE_COST_ESTIMATORS_HISTORY_LENGTH); + private long minorCount; + private long latestMinorMutatorIntervalSeconds; + private boolean youngGenPolicyIsReady; + private UnsignedWord youngGenSizeIncrementSupplement = WordFactory.unsigned(YOUNG_GENERATION_SIZE_SUPPLEMENT); + private long youngGenChangeForMinorThroughput; + private int minorCountSinceMajorCollection; + + private final Timer majorTimer = new Timer("major/between major"); + private final AdaptiveWeightedAverage avgMajorGcCost = new AdaptiveWeightedAverage(ADAPTIVE_TIME_WEIGHT); + private final AdaptivePaddedAverage avgMajorPause = new AdaptivePaddedAverage(ADAPTIVE_TIME_WEIGHT, PAUSE_PADDING); + private final AdaptiveWeightedAverage avgMajorIntervalSeconds = new AdaptiveWeightedAverage(ADAPTIVE_TIME_WEIGHT); + private final AdaptiveWeightedAverage avgOldLive = new AdaptiveWeightedAverage(ADAPTIVE_SIZE_POLICY_WEIGHT); + private final ReciprocalLeastSquareFit majorCostEstimator = new ReciprocalLeastSquareFit(ADAPTIVE_SIZE_COST_ESTIMATORS_HISTORY_LENGTH); + private long majorCount; + private UnsignedWord oldGenSizeIncrementSupplement = WordFactory.unsigned(TENURED_GENERATION_SIZE_SUPPLEMENT); + private long latestMajorMutatorIntervalSeconds; + private boolean oldSizeExceededInPreviousCollection; + private long oldGenChangeForMajorThroughput; + + AdaptiveCollectionPolicy() { + super(INITIAL_TENURING_THRESHOLD); + } + + @Override + public String getName() { + return "adaptive"; + } + + @Override + public boolean shouldCollectCompletely(boolean followingIncrementalCollection) { // should_{attempt_scavenge,full_GC} + guaranteeSizeParametersInitialized(); + + if (!followingIncrementalCollection) { + /* + * Always do an incremental collection first because we expect most of the objects in + * the young generation to be garbage, and we can reuse their leftover chunks for + * copying the live objects in the old generation with fewer allocations. + */ + return false; + } + if (oldSizeExceededInPreviousCollection) { + /* + * In the preceding incremental collection, we promoted objects to the old generation + * beyond its current capacity to avoid a promotion failure, but due to the chunked + * nature of our heap, we should still be within the maximum heap size. Follow up with a + * full collection during which we reclaim enough space or expand the old generation. + */ + return true; + } + if (minorCountSinceMajorCollection * avgMinorPause.getAverage() >= CONSECUTIVE_MINOR_TO_MAJOR_COLLECTION_PAUSE_TIME_RATIO * avgMajorPause.getPaddedAverage()) { + /* + * When we do many incremental collections in a row because they reclaim sufficient + * space, still trigger a complete collection when reaching a cumulative pause time + * threshold so that garbage in the old generation can also be reclaimed. + */ + return true; + } + + UnsignedWord youngUsed = HeapImpl.getHeapImpl().getYoungGeneration().getChunkBytes(); + UnsignedWord oldUsed = HeapImpl.getHeapImpl().getOldGeneration().getChunkBytes(); + + /* + * If the remaining free space in the old generation is less than what is expected to be + * needed by the next collection, do a full collection now. + */ + UnsignedWord averagePromoted = UnsignedUtils.fromDouble(avgPromoted.getPaddedAverage()); + UnsignedWord promotionEstimate = UnsignedUtils.min(averagePromoted, youngUsed); + UnsignedWord oldFree = oldSize.subtract(oldUsed); + return promotionEstimate.aboveThan(oldFree); + } + + private void updateAverages(UnsignedWord survivedChunkBytes, UnsignedWord survivorOverflowObjectBytes, UnsignedWord promotedObjectBytes) { + /* + * Adding the object bytes of overflowed survivor objects does not consider the overhead of + * partially filled chunks in the many survivor spaces, so it underestimates the necessary + * survivors capacity. However, this should self-correct as we expand the survivor space and + * reduce the tenuring age to avoid overflowing survivor objects in the first place. + */ + avgSurvived.sample(survivedChunkBytes.add(survivorOverflowObjectBytes)); + + avgPromoted.sample(promotedObjectBytes); + } + + private void computeSurvivorSpaceSizeAndThreshold(boolean isSurvivorOverflow, UnsignedWord survivorLimit) { + if (!youngGenPolicyIsReady) { + return; + } + + boolean incrTenuringThreshold = false; + boolean decrTenuringThreshold = false; + if (!isSurvivorOverflow) { + /* + * We use the tenuring threshold to equalize the cost of major and minor collections. + * + * THRESHOLD_TOLERANCE_PERCENT is used to indicate how sensitive the tenuring threshold + * is to differences in cost between the collection types. + */ + if (minorGcCost() > majorGcCost() * THRESHOLD_TOLERANCE_PERCENT) { + decrTenuringThreshold = true; + } else if (majorGcCost() > minorGcCost() * THRESHOLD_TOLERANCE_PERCENT) { + incrTenuringThreshold = true; + } + } else { + decrTenuringThreshold = true; + } + + UnsignedWord targetSize = minSpaceSize(alignUp(UnsignedUtils.fromDouble(avgSurvived.getPaddedAverage()))); + if (targetSize.aboveThan(survivorLimit)) { + targetSize = survivorLimit; + decrTenuringThreshold = true; + } + survivorSize = targetSize; + + if (decrTenuringThreshold) { + tenuringThreshold = Math.max(tenuringThreshold - 1, 1); + } else if (incrTenuringThreshold) { + tenuringThreshold = Math.min(tenuringThreshold + 1, HeapParameters.getMaxSurvivorSpaces() + 1); + } + } + + private void computeEdenSpaceSize() { + boolean expansionReducesCost = true; // general assumption + boolean useEstimator = ADAPTIVE_SIZE_USE_COST_ESTIMATORS && youngGenChangeForMinorThroughput > ADAPTIVE_SIZE_POLICY_INITIALIZING_STEPS; + if (useEstimator) { + expansionReducesCost = minorCostEstimator.getSlope(UnsignedUtils.toDouble(edenSize)) <= 0; + } + + UnsignedWord desiredEdenSize = edenSize; + if (expansionReducesCost && adjustedMutatorCost() < THROUGHPUT_GOAL && gcCost() > 0) { + // from adjust_eden_for_throughput(): + UnsignedWord edenHeapDelta = edenIncrementWithSupplementAlignedUp(edenSize); + double scaleByRatio = minorGcCost() / gcCost(); + assert scaleByRatio >= 0 && scaleByRatio <= 1; + UnsignedWord scaledEdenHeapDelta = UnsignedUtils.fromDouble(scaleByRatio * UnsignedUtils.toDouble(edenHeapDelta)); + + expansionReducesCost = !useEstimator || expansionSignificantlyReducesCost(minorCostEstimator, edenSize, scaledEdenHeapDelta); + if (expansionReducesCost) { + desiredEdenSize = alignUp(desiredEdenSize.add(scaledEdenHeapDelta)); + desiredEdenSize = UnsignedUtils.max(desiredEdenSize, edenSize); + youngGenChangeForMinorThroughput++; + } + /* + * If the estimator says expanding by delta does not lead to a significant improvement, + * shrink so to not get stuck in a supposed optimum and to keep collecting data points. + */ + } + if (!expansionReducesCost || (USE_ADAPTIVE_SIZE_POLICY_FOOTPRINT_GOAL && youngGenPolicyIsReady && adjustedMutatorCost() >= THROUGHPUT_GOAL)) { + UnsignedWord desiredSum = edenSize.add(promoSize); + desiredEdenSize = adjustEdenForFootprint(edenSize, desiredSum); + } + assert isAligned(desiredEdenSize); + desiredEdenSize = minSpaceSize(desiredEdenSize); + + UnsignedWord edenLimit = maxEdenSize(); + if (desiredEdenSize.aboveThan(edenLimit)) { + /* + * If the policy says to get a larger eden but is hitting the limit, don't decrease + * eden. This can lead to a general drifting down of the eden size. Let the tenuring + * calculation push more into the old gen. + */ + desiredEdenSize = UnsignedUtils.max(edenLimit, edenSize); + } + edenSize = desiredEdenSize; + } + + private static boolean expansionSignificantlyReducesCost(ReciprocalLeastSquareFit estimator, UnsignedWord size, UnsignedWord delta) { + double x0 = UnsignedUtils.toDouble(size); + double x0Throughput = 1 - estimator.estimate(x0); + if (x0 == 0 || x0Throughput == 0) { // division by zero below + return false; + } + double x1 = x0 + UnsignedUtils.toDouble(delta); + double x1Throughput = 1 - estimator.estimate(x1); + if (x0 >= x1 || x0Throughput >= x1Throughput) { + return false; + } + double min = (x1 / x0 - 1) * ADAPTIVE_SIZE_ESTIMATOR_MIN_SIZE_THROUGHPUT_TRADEOFF; + double estimated = x1Throughput / x0Throughput - 1; + return (estimated >= min); + } + + private static UnsignedWord adjustEdenForFootprint(UnsignedWord curEden, UnsignedWord desiredSum) { + assert curEden.belowOrEqual(desiredSum); + + UnsignedWord change = edenDecrement(curEden); + change = scaleDown(change, curEden, desiredSum); + + UnsignedWord reducedSize = curEden.subtract(change); + assert reducedSize.belowOrEqual(curEden); + return alignUp(reducedSize); + } + + private static UnsignedWord scaleDown(UnsignedWord change, UnsignedWord part, UnsignedWord total) { + assert part.belowOrEqual(total); + UnsignedWord reducedChange = change; + if (total.aboveThan(0)) { + double fraction = UnsignedUtils.toDouble(part) / UnsignedUtils.toDouble(total); + reducedChange = UnsignedUtils.fromDouble(fraction * UnsignedUtils.toDouble(change)); + } + assert reducedChange.belowOrEqual(change); + return reducedChange; + } + + private static UnsignedWord edenDecrement(UnsignedWord curEden) { + return spaceIncrement(curEden, WordFactory.unsigned(YOUNG_GENERATION_SIZE_INCREMENT)) + .unsignedDivide(ADAPTIVE_SIZE_DECREMENT_SCALE_FACTOR); + } + + private double adjustedMutatorCost() { + double cost = 1 - decayingGcCost(); + assert cost >= 0; + return cost; + } + + private double decayingGcCost() { // decaying_gc_cost and decaying_major_gc_cost + double decayedMajorGcCost = majorGcCost(); + double avgMajorInterval = avgMajorIntervalSeconds.getAverage(); + if (USE_ADAPTIVE_SIZE_DECAY_MAJOR_GC_COST && ADAPTIVE_SIZE_MAJOR_GC_DECAY_TIME_SCALE > 0 && avgMajorInterval > 0) { + double secondsSinceMajor = secondsSinceMajorGc(); + if (secondsSinceMajor > 0 && secondsSinceMajor > ADAPTIVE_SIZE_MAJOR_GC_DECAY_TIME_SCALE * avgMajorInterval) { + double decayed = decayedMajorGcCost * (ADAPTIVE_SIZE_MAJOR_GC_DECAY_TIME_SCALE * avgMajorInterval) / secondsSinceMajor; + decayedMajorGcCost = Math.min(decayedMajorGcCost, decayed); + } + } + return Math.min(1, decayedMajorGcCost + minorGcCost()); + } + + private double minorGcCost() { + return Math.max(0, avgMinorGcCost.getAverage()); + } + + private double majorGcCost() { + return Math.max(0, avgMajorGcCost.getAverage()); + } + + private double gcCost() { + double cost = Math.min(1, minorGcCost() + majorGcCost()); + assert cost >= 0 : "Both minor and major costs are non-negative"; + return cost; + } + + private UnsignedWord edenIncrementWithSupplementAlignedUp(UnsignedWord curEden) { + return alignUp(spaceIncrement(curEden, youngGenSizeIncrementSupplement.add(YOUNG_GENERATION_SIZE_INCREMENT))); + } + + private static UnsignedWord spaceIncrement(UnsignedWord curSize, UnsignedWord percentChange) { // {eden,promo}_increment + return curSize.unsignedDivide(100).multiply(percentChange); + } + + private double secondsSinceMajorGc() { // time_since_major_gc + majorTimer.close(); + try { + return TimeUtils.nanosToSecondsDouble(majorTimer.getMeasuredNanos()); + } finally { + majorTimer.open(); + } + } + + @Override + public void onCollectionBegin(boolean completeCollection) { // {major,minor}_collection_begin + Timer timer = completeCollection ? majorTimer : minorTimer; + timer.close(); + if (completeCollection) { + latestMajorMutatorIntervalSeconds = timer.getMeasuredNanos(); + } else { + latestMinorMutatorIntervalSeconds = timer.getMeasuredNanos(); + } + + // Capture the fraction of bytes in aligned chunks at the start to include all allocated + // (also dead) objects, because we use it to reserve aligned chunks for future allocations + UnsignedWord youngChunkBytes = GCImpl.getGCImpl().getAccounting().getYoungChunkBytesBefore(); + if (youngChunkBytes.notEqual(0)) { + UnsignedWord youngAlignedChunkBytes = HeapImpl.getHeapImpl().getYoungGeneration().getAlignedChunkBytes(); + avgYoungGenAlignedChunkFraction.sample(UnsignedUtils.toDouble(youngAlignedChunkBytes) / UnsignedUtils.toDouble(youngChunkBytes)); + } + + timer.reset(); + timer.open(); // measure collection pause + } + + @Override + public void onCollectionEnd(boolean completeCollection, GCCause cause) { // {major,minor}_collection_end + Timer timer = completeCollection ? majorTimer : minorTimer; + timer.close(); + + if (completeCollection) { + updateCollectionEndAverages(avgMajorGcCost, avgMajorPause, majorCostEstimator, avgMajorIntervalSeconds, + cause, latestMajorMutatorIntervalSeconds, timer.getMeasuredNanos(), promoSize); + majorCount++; + minorCountSinceMajorCollection = 0; + + } else { + updateCollectionEndAverages(avgMinorGcCost, avgMinorPause, minorCostEstimator, null, + cause, latestMinorMutatorIntervalSeconds, timer.getMeasuredNanos(), edenSize); + minorCount++; + minorCountSinceMajorCollection++; + + if (minorCount >= ADAPTIVE_SIZE_POLICY_READY_THRESHOLD) { + youngGenPolicyIsReady = true; + } + } + + timer.reset(); + timer.open(); + + GCAccounting accounting = GCImpl.getGCImpl().getAccounting(); + UnsignedWord oldLive = accounting.getOldGenerationAfterChunkBytes(); + oldSizeExceededInPreviousCollection = oldLive.aboveThan(oldSize); + + /* + * Update the averages that survivor space and tenured space sizes are derived from. Note + * that we use chunk bytes (not object bytes) for the survivors. This is because they are + * kept in many spaces (one for each age), which potentially results in significant overhead + * from chunks that may only be partially filled, especially when the heap is small. Using + * chunk bytes here ensures that the needed survivor capacity is not underestimated. + */ + UnsignedWord survivedChunkBytes = HeapImpl.getHeapImpl().getYoungGeneration().getSurvivorChunkBytes(); + UnsignedWord survivorOverflowObjectBytes = accounting.getSurvivorOverflowObjectBytes(); + UnsignedWord tenuredObjBytes = accounting.getTenuredObjectBytes(); // includes overflowed + updateAverages(survivedChunkBytes, survivorOverflowObjectBytes, tenuredObjBytes); + + computeSurvivorSpaceSizeAndThreshold(survivorOverflowObjectBytes.aboveThan(0), sizes.maxSurvivorSize()); + computeEdenSpaceSize(); + if (completeCollection) { + computeOldGenSpaceSize(oldLive); + } + decaySupplementalGrowth(completeCollection); + } + + private void computeOldGenSpaceSize(UnsignedWord oldLive) { // compute_old_gen_free_space + avgOldLive.sample(oldLive); + + // NOTE: if maxOldSize shrunk and difference is negative, unsigned conversion results in 0 + UnsignedWord promoLimit = UnsignedUtils.fromDouble(UnsignedUtils.toDouble(sizes.maxOldSize()) - avgOldLive.getAverage()); + promoLimit = alignDown(UnsignedUtils.max(promoSize, promoLimit)); + + boolean expansionReducesCost = true; // general assumption + boolean useEstimator = ADAPTIVE_SIZE_USE_COST_ESTIMATORS && oldGenChangeForMajorThroughput > ADAPTIVE_SIZE_POLICY_INITIALIZING_STEPS; + if (useEstimator) { + expansionReducesCost = majorCostEstimator.getSlope(UnsignedUtils.toDouble(promoSize)) <= 0; + } + + UnsignedWord desiredPromoSize = promoSize; + if (expansionReducesCost && adjustedMutatorCost() < THROUGHPUT_GOAL && gcCost() > 0) { + // from adjust_promo_for_throughput(): + UnsignedWord promoHeapDelta = promoIncrementWithSupplementAlignedUp(promoSize); + double scaleByRatio = majorGcCost() / gcCost(); + assert scaleByRatio >= 0 && scaleByRatio <= 1; + UnsignedWord scaledPromoHeapDelta = UnsignedUtils.fromDouble(scaleByRatio * UnsignedUtils.toDouble(promoHeapDelta)); + + expansionReducesCost = !useEstimator || expansionSignificantlyReducesCost(majorCostEstimator, promoSize, scaledPromoHeapDelta); + if (expansionReducesCost) { + desiredPromoSize = alignUp(promoSize.add(scaledPromoHeapDelta)); + desiredPromoSize = UnsignedUtils.max(desiredPromoSize, promoSize); + oldGenChangeForMajorThroughput++; + } + /* + * If the estimator says expanding by delta does not lead to a significant improvement, + * shrink so to not get stuck in a supposed optimum and to keep collecting data points. + */ + } + if (!expansionReducesCost || (USE_ADAPTIVE_SIZE_POLICY_FOOTPRINT_GOAL && youngGenPolicyIsReady && adjustedMutatorCost() >= THROUGHPUT_GOAL)) { + UnsignedWord desiredSum = edenSize.add(promoSize); + desiredPromoSize = adjustPromoForFootprint(promoSize, desiredSum); + } + assert isAligned(desiredPromoSize); + desiredPromoSize = minSpaceSize(desiredPromoSize); + + desiredPromoSize = UnsignedUtils.min(desiredPromoSize, promoLimit); + promoSize = desiredPromoSize; + + // from PSOldGen::resize + UnsignedWord desiredFreeSpace = calculatedOldFreeSizeInBytes(); + UnsignedWord desiredOldSize = alignUp(oldLive.add(desiredFreeSpace)); + oldSize = UnsignedUtils.clamp(desiredOldSize, minSpaceSize(), sizes.maxOldSize()); + } + + UnsignedWord calculatedOldFreeSizeInBytes() { + return UnsignedUtils.fromDouble(UnsignedUtils.toDouble(promoSize) + avgPromoted.getPaddedAverage()); + } + + private static UnsignedWord adjustPromoForFootprint(UnsignedWord curPromo, UnsignedWord desiredSum) { + assert curPromo.belowOrEqual(desiredSum); + + UnsignedWord change = promoDecrement(curPromo); + change = scaleDown(change, curPromo, desiredSum); + + UnsignedWord reducedSize = curPromo.subtract(change); + assert reducedSize.belowOrEqual(curPromo); + return alignUp(reducedSize); + } + + private static UnsignedWord promoDecrement(UnsignedWord curPromo) { + return promoIncrement(curPromo).unsignedDivide(ADAPTIVE_SIZE_DECREMENT_SCALE_FACTOR); + } + + private static UnsignedWord promoIncrement(UnsignedWord curPromo) { + return spaceIncrement(curPromo, WordFactory.unsigned(TENURED_GENERATION_SIZE_INCREMENT)); + } + + private UnsignedWord promoIncrementWithSupplementAlignedUp(UnsignedWord curPromo) { + return alignUp(spaceIncrement(curPromo, oldGenSizeIncrementSupplement.add(TENURED_GENERATION_SIZE_INCREMENT))); + } + + private void decaySupplementalGrowth(boolean completeCollection) { + // Decay the supplement growth factor even if it is not used. It is only meant to give a + // boost to the initial growth and if it is not used, then it was not needed. + if (completeCollection) { + // Don't wait for the threshold value for the major collections. If here, the + // supplemental growth term was used and should decay. + if (majorCount % TENURED_GENERATION_SIZE_SUPPLEMENT_DECAY == 0) { + oldGenSizeIncrementSupplement = oldGenSizeIncrementSupplement.unsignedShiftRight(1); + } + } else { + if (minorCount >= ADAPTIVE_SIZE_POLICY_READY_THRESHOLD && minorCount % YOUNG_GENERATION_SIZE_SUPPLEMENT_DECAY == 0) { + youngGenSizeIncrementSupplement = youngGenSizeIncrementSupplement.unsignedShiftRight(1); + } + } + } + + private static void updateCollectionEndAverages(AdaptiveWeightedAverage costAverage, AdaptivePaddedAverage pauseAverage, ReciprocalLeastSquareFit costEstimator, + AdaptiveWeightedAverage intervalSeconds, GCCause cause, long mutatorNanos, long pauseNanos, UnsignedWord sizeBytes) { + if (cause == GenScavengeGCCause.OnAllocation || USE_ADAPTIVE_SIZE_POLICY_WITH_SYSTEM_GC) { + double cost = 0; + double mutatorInSeconds = TimeUtils.nanosToSecondsDouble(mutatorNanos); + double pauseInSeconds = TimeUtils.nanosToSecondsDouble(pauseNanos); + pauseAverage.sample(pauseInSeconds); + if (mutatorInSeconds > 0 && pauseInSeconds > 0) { + double intervalInSeconds = mutatorInSeconds + pauseInSeconds; + cost = pauseInSeconds / intervalInSeconds; + costAverage.sample(cost); + if (intervalSeconds != null) { + intervalSeconds.sample(intervalInSeconds); + } + } + costEstimator.sample(UnsignedUtils.toDouble(sizeBytes), cost); + } + } + + @Override + @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) + protected long gcCount() { + return minorCount + majorCount; + } +} diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/AdaptiveWeightedAverage.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/AdaptiveWeightedAverage.java new file mode 100644 index 000000000000..b4796cc84061 --- /dev/null +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/AdaptiveWeightedAverage.java @@ -0,0 +1,138 @@ +/* + * Copyright (c) 2021, 2021, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.svm.core.genscavenge; + +import org.graalvm.word.UnsignedWord; + +import com.oracle.svm.core.util.UnsignedUtils; + +/** + * A weighted average maintains a running, weighted average of some floating-point value. + * + * The average is adaptive in that we smooth it for the initial samples; we don't use the weight + * until we have enough samples for it to be meaningful. + * + * This serves as our best estimate of a future unknown. + */ +class AdaptiveWeightedAverage { + static final int OLD_THRESHOLD = 100; + + private final int weight; + + private double average; + private long sampleCount; + private boolean isOld; + + AdaptiveWeightedAverage(int weight) { + this(weight, 0); + } + + AdaptiveWeightedAverage(int weight, double avg) { + this.weight = weight; + this.average = avg; + } + + public double getAverage() { + return average; + } + + public void sample(double value) { + sampleCount++; + if (!isOld && sampleCount > OLD_THRESHOLD) { + isOld = true; + } + average = computeAdaptiveAverage(value, average); + } + + public final void sample(UnsignedWord value) { + sample(UnsignedUtils.toDouble(value)); + } + + protected double computeAdaptiveAverage(double sample, double avg) { + /* + * We smoothen the samples by not using weight directly until we've had enough data to make + * it meaningful. We'd like the first weight used to be 1, the second to be 1/2, etc until + * we have OLD_THRESHOLD/weight samples. + */ + long countWeight = 0; + if (!isOld) { // avoid division by zero if the counter wraps + countWeight = OLD_THRESHOLD / sampleCount; + } + long adaptiveWeight = Math.max(weight, countWeight); + return expAvg(avg, sample, adaptiveWeight); + } + + private static double expAvg(double avg, double sample, long adaptiveWeight) { + assert adaptiveWeight <= 100 : "weight must be a percentage"; + return (100.0 - adaptiveWeight) * avg / 100.0 + adaptiveWeight * sample / 100.0; + } +} + +/** + * A weighted average that includes a deviation from the average, some multiple of which is added to + * the average. + * + * This serves as our best estimate of an upper bound on a future unknown. + */ +class AdaptivePaddedAverage extends AdaptiveWeightedAverage { + private final int padding; + private final boolean noZeroDeviations; + + private double paddedAverage; + private double deviation; + + AdaptivePaddedAverage(int weight, int padding) { + this(weight, padding, false); + } + + /** + * @param noZeroDeviations do not update deviations when a sample is zero. The average is + * allowed to change. This is to prevent zero samples from drastically changing the + * padded average. + */ + AdaptivePaddedAverage(int weight, int padding, boolean noZeroDeviations) { + super(weight); + this.padding = padding; + this.noZeroDeviations = noZeroDeviations; + } + + @Override + public void sample(double value) { + super.sample(value); + double average = super.getAverage(); + if (value != 0 || !noZeroDeviations) { + deviation = computeAdaptiveAverage(Math.abs(value - average), deviation); + } + paddedAverage = average + padding * deviation; + } + + public double getPaddedAverage() { + return paddedAverage; + } + + public double getDeviation() { + return deviation; + } +} diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/AlignedHeapChunk.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/AlignedHeapChunk.java index 09b15b87976f..b84544c24e01 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/AlignedHeapChunk.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/AlignedHeapChunk.java @@ -124,7 +124,7 @@ public static AlignedHeader getEnclosingChunk(Object obj) { } public static AlignedHeader getEnclosingChunkFromObjectPointer(Pointer ptr) { - return (AlignedHeader) PointerUtils.roundDown(ptr, HeapPolicy.getAlignedHeapChunkAlignment()); + return (AlignedHeader) PointerUtils.roundDown(ptr, HeapParameters.getAlignedHeapChunkAlignment()); } /** Return the offset of an object within the objects part of a chunk. */ diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/BasicCollectionPolicies.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/BasicCollectionPolicies.java new file mode 100644 index 000000000000..5145441da672 --- /dev/null +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/BasicCollectionPolicies.java @@ -0,0 +1,264 @@ +/* + * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.svm.core.genscavenge; + +import static com.oracle.svm.core.genscavenge.BasicCollectionPolicies.Options.AllocationBeforePhysicalMemorySize; +import static com.oracle.svm.core.genscavenge.BasicCollectionPolicies.Options.PercentTimeInIncrementalCollection; + +import org.graalvm.compiler.options.Option; +import org.graalvm.word.UnsignedWord; +import org.graalvm.word.WordFactory; + +import com.oracle.svm.core.SubstrateGCOptions; +import com.oracle.svm.core.heap.GCCause; +import com.oracle.svm.core.heap.PhysicalMemory; +import com.oracle.svm.core.heap.ReferenceAccess; +import com.oracle.svm.core.option.HostedOptionKey; +import com.oracle.svm.core.option.RuntimeOptionKey; +import com.oracle.svm.core.util.TimeUtils; +import com.oracle.svm.core.util.VMError; + +/** Basic/legacy garbage collection policies. */ +final class BasicCollectionPolicies { + public static class Options { + @Option(help = "Percentage of total collection time that should be spent on young generation collections.")// + public static final RuntimeOptionKey PercentTimeInIncrementalCollection = new RuntimeOptionKey<>(50); + + @Option(help = "Bytes that can be allocated before (re-)querying the physical memory size") // + public static final HostedOptionKey AllocationBeforePhysicalMemorySize = new HostedOptionKey<>(1L * 1024L * 1024L); + } + + static int getMaxSurvivorSpaces(Integer userValue) { + assert userValue == null || userValue >= 0; + return 0; // override option (if set): survivor spaces not supported + } + + private BasicCollectionPolicies() { + } + + abstract static class BasicPolicy implements CollectionPolicy { + protected static UnsignedWord m(long bytes) { + assert 0 <= bytes; + return WordFactory.unsigned(bytes).multiply(1024).multiply(1024); + } + + @Override + public boolean shouldCollectOnAllocation() { + UnsignedWord youngUsed = HeapImpl.getHeapImpl().getAccounting().getYoungUsedBytes(); + return youngUsed.aboveOrEqual(getMaximumYoungGenerationSize()); + } + + @Override + public UnsignedWord getCurrentHeapCapacity() { + return getMaximumHeapSize(); + } + + @Override + public void ensureSizeParametersInitialized() { + // Size parameters are recomputed from current values whenever they are queried + } + + @Override + public void updateSizeParameters() { + // Sample the physical memory size, before the first GC but after some allocation. + UnsignedWord allocationBeforeUpdate = WordFactory.unsigned(AllocationBeforePhysicalMemorySize.getValue()); + if (GCImpl.getGCImpl().getCollectionEpoch().equal(WordFactory.zero()) && + HeapImpl.getHeapImpl().getAccounting().getYoungUsedBytes().aboveOrEqual(allocationBeforeUpdate)) { + PhysicalMemory.tryInitialize(); + } + // Size parameters are recomputed from current values whenever they are queried + } + + @Override + public final UnsignedWord getMaximumHeapSize() { + long runtimeValue = SubstrateGCOptions.MaxHeapSize.getValue(); + if (runtimeValue != 0L) { + return WordFactory.unsigned(runtimeValue); + } + + /* + * If the physical size is known yet, the maximum size of the heap is a fraction of the + * size of the physical memory. + */ + UnsignedWord addressSpaceSize = ReferenceAccess.singleton().getAddressSpaceSize(); + if (PhysicalMemory.isInitialized()) { + UnsignedWord physicalMemorySize = PhysicalMemory.getCachedSize(); + int maximumHeapSizePercent = HeapParameters.getMaximumHeapSizePercent(); + /* Do not cache because `-Xmx` option parsing may not have happened yet. */ + UnsignedWord result = physicalMemorySize.unsignedDivide(100).multiply(maximumHeapSizePercent); + if (result.belowThan(addressSpaceSize)) { + return result; + } + } + return addressSpaceSize; + } + + @Override + public final UnsignedWord getMaximumYoungGenerationSize() { + long runtimeValue = SubstrateGCOptions.MaxNewSize.getValue(); + if (runtimeValue != 0L) { + return WordFactory.unsigned(runtimeValue); + } + + /* If no value is set, use a fraction of the maximum heap size. */ + UnsignedWord maxHeapSize = getMaximumHeapSize(); + UnsignedWord youngSizeAsFraction = maxHeapSize.unsignedDivide(100).multiply(HeapParameters.getMaximumYoungGenerationSizePercent()); + /* But not more than 256MB. */ + UnsignedWord maxSize = m(256); + UnsignedWord youngSize = (youngSizeAsFraction.belowOrEqual(maxSize) ? youngSizeAsFraction : maxSize); + /* But do not cache the result as it is based on values that might change. */ + return youngSize; + } + + @Override + public final UnsignedWord getMinimumHeapSize() { + long runtimeValue = SubstrateGCOptions.MinHeapSize.getValue(); + if (runtimeValue != 0L) { + /* If `-Xms` has been parsed from the command line, use that value. */ + return WordFactory.unsigned(runtimeValue); + } + + /* A default value chosen to delay the first full collection. */ + UnsignedWord result = getMaximumYoungGenerationSize().multiply(2); + /* But not larger than -Xmx. */ + if (result.aboveThan(getMaximumHeapSize())) { + result = getMaximumHeapSize(); + } + /* But do not cache the result as it is based on values that might change. */ + return result; + } + + @Override + public UnsignedWord getSurvivorSpacesCapacity() { + return WordFactory.zero(); + } + + @Override + public final UnsignedWord getMaximumFreeAlignedChunksSize() { + return getMaximumYoungGenerationSize(); + } + + @Override + public int getTenuringAge() { + return 1; + } + + @Override + public void onCollectionBegin(boolean completeCollection) { + } + + @Override + public void onCollectionEnd(boolean completeCollection, GCCause cause) { + } + } + + public static final class OnlyIncrementally extends BasicPolicy { + + @Override + public boolean shouldCollectCompletely(boolean followingIncrementalCollection) { + return false; + } + + @Override + public String getName() { + return "only incrementally"; + } + } + + public static final class OnlyCompletely extends BasicPolicy { + + @Override + public boolean shouldCollectCompletely(boolean followingIncrementalCollection) { + return true; + } + + @Override + public String getName() { + return "only completely"; + } + } + + public static final class NeverCollect extends BasicPolicy { + + @Override + public boolean shouldCollectOnAllocation() { + throw VMError.shouldNotReachHere("Caller is supposed to be aware of never-collect policy"); + } + + @Override + public boolean shouldCollectCompletely(boolean followingIncrementalCollection) { + throw VMError.shouldNotReachHere("Collection must not be initiated in the first place"); + } + + @Override + public String getName() { + return "never collect"; + } + } + + /** + * A collection policy that delays complete collections until the heap has at least `-Xms` space + * in it, and then tries to balance time in incremental and complete collections. + */ + public static final class BySpaceAndTime extends BasicPolicy { + + @Override + public boolean shouldCollectCompletely(boolean followingIncrementalCollection) { + if (followingIncrementalCollection && !HeapParameters.Options.CollectYoungGenerationSeparately.getValue()) { + return false; + } + return estimateUsedHeapAtNextIncrementalCollection().aboveThan(getMaximumHeapSize()) || + GCImpl.getChunkBytes().aboveThan(getMinimumHeapSize()) && enoughTimeSpentOnIncrementalGCs(); + } + + /** + * Estimates the heap size at the next incremental collection assuming that the whole + * current young generation gets promoted. + */ + private UnsignedWord estimateUsedHeapAtNextIncrementalCollection() { + UnsignedWord currentYoungBytes = HeapImpl.getHeapImpl().getYoungGeneration().getChunkBytes(); + UnsignedWord maxYoungBytes = getMaximumYoungGenerationSize(); + UnsignedWord oldBytes = GCImpl.getGCImpl().getAccounting().getOldGenerationAfterChunkBytes(); + return currentYoungBytes.add(maxYoungBytes).add(oldBytes); + } + + private static boolean enoughTimeSpentOnIncrementalGCs() { + int incrementalWeight = PercentTimeInIncrementalCollection.getValue(); + assert incrementalWeight >= 0 && incrementalWeight <= 100 : "BySpaceAndTimePercentTimeInIncrementalCollection should be in the range [0..100]."; + + GCAccounting accounting = GCImpl.getGCImpl().getAccounting(); + long actualIncrementalNanos = accounting.getIncrementalCollectionTotalNanos(); + long completeNanos = accounting.getCompleteCollectionTotalNanos(); + long totalNanos = actualIncrementalNanos + completeNanos; + long expectedIncrementalNanos = TimeUtils.weightedNanos(incrementalWeight, totalNanos); + return TimeUtils.nanoTimeLessThan(expectedIncrementalNanos, actualIncrementalNanos); + } + + @Override + public String getName() { + return "by space and time"; + } + } +} diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/ChunkedImageHeapAllocator.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/ChunkedImageHeapAllocator.java index 87cdf9be247f..6af9e4b41cd4 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/ChunkedImageHeapAllocator.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/ChunkedImageHeapAllocator.java @@ -171,8 +171,8 @@ public long getUnallocatedBytes() { ChunkedImageHeapAllocator(ImageHeap imageHeap, long position) { this.imageHeap = imageHeap; - this.alignedChunkSize = UnsignedUtils.safeToInt(HeapPolicy.getAlignedHeapChunkSize()); - this.alignedChunkAlignment = UnsignedUtils.safeToInt(HeapPolicy.getAlignedHeapChunkAlignment()); + this.alignedChunkSize = UnsignedUtils.safeToInt(HeapParameters.getAlignedHeapChunkSize()); + this.alignedChunkAlignment = UnsignedUtils.safeToInt(HeapParameters.getAlignedHeapChunkAlignment()); this.alignedChunkObjectsOffset = UnsignedUtils.safeToInt(AlignedHeapChunk.getObjectsStartOffset()); this.unalignedChunkObjectsOffset = UnsignedUtils.safeToInt(UnalignedHeapChunk.getObjectStartOffset()); diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/ChunkedImageHeapLayouter.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/ChunkedImageHeapLayouter.java index 3381048fed3e..96e4d171d7e8 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/ChunkedImageHeapLayouter.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/ChunkedImageHeapLayouter.java @@ -49,7 +49,7 @@ public ChunkedImageHeapLayouter(ImageHeapInfo heapInfo, long startOffset, int nu this.heapInfo = heapInfo; this.startOffset = startOffset; this.nullRegionSize = nullRegionSize; - this.hugeObjectThreshold = HeapPolicy.getLargeArrayThreshold().rawValue(); + this.hugeObjectThreshold = HeapParameters.getLargeArrayThreshold().rawValue(); } @Override diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/ChunksAccounting.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/ChunksAccounting.java new file mode 100644 index 000000000000..e5253deb51e3 --- /dev/null +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/ChunksAccounting.java @@ -0,0 +1,131 @@ +/* + * Copyright (c) 2013, 2021, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.svm.core.genscavenge; + +import org.graalvm.nativeimage.Platform; +import org.graalvm.nativeimage.Platforms; +import org.graalvm.word.UnsignedWord; +import org.graalvm.word.WordFactory; + +import com.oracle.svm.core.annotate.Uninterruptible; +import com.oracle.svm.core.log.Log; + +/** + * Accounting for a {@link Space} or {@link Generation}. For the eden space, the values are + * inaccurate outside of a GC (see {@link HeapAccounting#getYoungUsedBytes()} and + * {@link HeapAccounting#getEdenUsedBytes()}. + */ +final class ChunksAccounting { + private final ChunksAccounting parent; + private long alignedCount; + private long unalignedCount; + private UnsignedWord unalignedChunkBytes; + + @Platforms(Platform.HOSTED_ONLY.class) + ChunksAccounting() { + this(null); + } + + @Platforms(Platform.HOSTED_ONLY.class) + ChunksAccounting(ChunksAccounting parent) { + this.parent = parent; + reset(); + } + + public void reset() { + alignedCount = 0L; + unalignedCount = 0L; + unalignedChunkBytes = WordFactory.zero(); + } + + public UnsignedWord getChunkBytes() { + return getAlignedChunkBytes().add(getUnalignedChunkBytes()); + } + + public long getAlignedChunkCount() { + return alignedCount; + } + + @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) + public UnsignedWord getAlignedChunkBytes() { + return WordFactory.unsigned(alignedCount).multiply(HeapParameters.getAlignedHeapChunkSize()); + } + + public long getUnalignedChunkCount() { + return unalignedCount; + } + + @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) + public UnsignedWord getUnalignedChunkBytes() { + return unalignedChunkBytes; + } + + void report(Log reportLog) { + reportLog.string("aligned: ").unsigned(getAlignedChunkBytes()).string("/").unsigned(alignedCount); + reportLog.string(" "); + reportLog.string("unaligned: ").unsigned(unalignedChunkBytes).string("/").unsigned(unalignedCount); + } + + @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) + void noteAlignedHeapChunk() { + alignedCount++; + if (parent != null) { + parent.noteAlignedHeapChunk(); + } + } + + void unnoteAlignedHeapChunk() { + alignedCount--; + if (parent != null) { + parent.unnoteAlignedHeapChunk(); + } + } + + @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) + void noteUnalignedHeapChunk(UnalignedHeapChunk.UnalignedHeader chunk) { + noteUnaligned(UnalignedHeapChunk.getCommittedObjectMemory(chunk)); + } + + @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) + private void noteUnaligned(UnsignedWord size) { + unalignedCount++; + unalignedChunkBytes = unalignedChunkBytes.add(size); + if (parent != null) { + parent.noteUnaligned(size); + } + } + + void unnoteUnalignedHeapChunk(UnalignedHeapChunk.UnalignedHeader chunk) { + unnoteUnaligned(UnalignedHeapChunk.getCommittedObjectMemory(chunk)); + } + + private void unnoteUnaligned(UnsignedWord size) { + unalignedCount--; + unalignedChunkBytes = unalignedChunkBytes.subtract(size); + if (parent != null) { + parent.unnoteUnaligned(size); + } + } +} diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/CollectionPolicy.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/CollectionPolicy.java index c3cff962102c..38970e76c7b8 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/CollectionPolicy.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/CollectionPolicy.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,195 +24,149 @@ */ package com.oracle.svm.core.genscavenge; -import static com.oracle.svm.core.genscavenge.CollectionPolicy.Options.PercentTimeInIncrementalCollection; -import static com.oracle.svm.core.genscavenge.HeapPolicy.getMaximumHeapSize; -import static com.oracle.svm.core.genscavenge.HeapPolicy.getMinimumHeapSize; - import org.graalvm.compiler.options.Option; import org.graalvm.nativeimage.Platform; import org.graalvm.nativeimage.Platforms; -import org.graalvm.nativeimage.hosted.Feature.FeatureAccess; import org.graalvm.word.UnsignedWord; import com.oracle.svm.core.SubstrateOptions; -import com.oracle.svm.core.log.Log; +import com.oracle.svm.core.heap.GCCause; +import com.oracle.svm.core.heap.PhysicalMemory; import com.oracle.svm.core.option.HostedOptionKey; -import com.oracle.svm.core.option.RuntimeOptionKey; -import com.oracle.svm.core.util.TimeUtils; import com.oracle.svm.core.util.UserError; -/** A collection policy decides when to collect incrementally or completely. */ -public abstract class CollectionPolicy { - public static class Options { - @Option(help = "The initial garbage collection policy, as a fully-qualified class name (might require quotes or escaping).")// - public static final HostedOptionKey InitialCollectionPolicy = new HostedOptionKey<>(BySpaceAndTime.class.getName()); - - @Option(help = "Percentage of total collection time that should be spent on young generation collections.")// - public static final RuntimeOptionKey PercentTimeInIncrementalCollection = new RuntimeOptionKey<>(50); +/** The interface for a garbage collection policy. All sizes are in bytes. */ +public interface CollectionPolicy { + final class Options { + @Option(help = "The garbage collection policy, either Adaptive (default) or BySpaceAndTime.")// + public static final HostedOptionKey InitialCollectionPolicy = new HostedOptionKey<>("BySpaceAndTime"); } @Platforms(Platform.HOSTED_ONLY.class) - static CollectionPolicy getInitialPolicy(FeatureAccess access) { + static String getInitialPolicyName() { if (SubstrateOptions.UseEpsilonGC.getValue()) { - return new NeverCollect(); + return "NeverCollect"; } else if (!SubstrateOptions.useRememberedSet()) { - return new OnlyCompletely(); - } else { - // Use whatever policy the user specified. - return instantiatePolicy(access, CollectionPolicy.class, Options.InitialCollectionPolicy.getValue()); - } - } - - @Platforms(Platform.HOSTED_ONLY.class) - private static T instantiatePolicy(FeatureAccess access, Class policyClass, String className) { - Class policy = access.findClassByName(className); - if (policy == null) { - throw UserError.abort("Policy %s does not exist. It must be a fully qualified class name.", className); + return "OnlyCompletely"; } - Object result; - try { - result = policy.getDeclaredConstructor().newInstance(); - } catch (Exception ex) { - throw UserError.abort("Policy %s cannot be instantiated.", className); + String name = Options.InitialCollectionPolicy.getValue(); + String legacyPrefix = "com.oracle.svm.core.genscavenge.CollectionPolicy$"; + if (name.startsWith(legacyPrefix)) { + return name.substring(legacyPrefix.length()); } - if (!policyClass.isInstance(result)) { - throw UserError.abort("Policy %s does not extend %s.", className, policyClass.getTypeName()); - } - return policyClass.cast(result); + return name; } - /** Return {@code true} if the current collection should entail an incremental collection. */ - public abstract boolean collectIncrementally(); - - /** Return {@code true} if the current collection should entail a complete collection. */ - public abstract boolean collectCompletely(); - - CollectionPolicy() { - } - - public abstract void nameToLog(Log log); - - public abstract String getName(); - - static GCAccounting getAccounting() { - return GCImpl.getGCImpl().getAccounting(); + @Platforms(Platform.HOSTED_ONLY.class) + static CollectionPolicy getInitialPolicy() { + String name = getInitialPolicyName(); + switch (name) { + case "Adaptive": + return new AdaptiveCollectionPolicy(); + case "Proportionate": + return new ProportionateSpacesPolicy(); + case "BySpaceAndTime": + return new BasicCollectionPolicies.BySpaceAndTime(); + case "OnlyCompletely": + return new BasicCollectionPolicies.OnlyCompletely(); + case "OnlyIncrementally": + return new BasicCollectionPolicies.OnlyIncrementally(); + case "NeverCollect": + return new BasicCollectionPolicies.NeverCollect(); + } + throw UserError.abort("Policy %s does not exist.", name); } - public static class OnlyIncrementally extends CollectionPolicy { - - @Override - public boolean collectIncrementally() { - return true; - } - - @Override - public boolean collectCompletely() { - return false; - } - - @Override - public void nameToLog(Log log) { - log.string(getName()); - } - - @Override - public String getName() { - return "only incrementally"; + @Platforms(Platform.HOSTED_ONLY.class) + static int getMaxSurvivorSpaces(Integer userValue) { + String name = getInitialPolicyName(); + if ("Adaptive".equals(name) || "Proportionate".equals(name)) { + return AbstractCollectionPolicy.getMaxSurvivorSpaces(userValue); } + return BasicCollectionPolicies.getMaxSurvivorSpaces(userValue); } - public static class OnlyCompletely extends CollectionPolicy { + String getName(); - @Override - public boolean collectIncrementally() { - return false; - } - - @Override - public boolean collectCompletely() { - return true; - } + /** + * Ensures that size parameters have been computed and methods like {@link #getMaximumHeapSize} + * provide reasonable values, but do not force a recomputation of the size parameters like + * {@link #updateSizeParameters}. + */ + void ensureSizeParametersInitialized(); - @Override - public void nameToLog(Log log) { - log.string(getName()); - } + /** + * (Re)computes minimum/maximum/initial sizes of space based on the available + * {@linkplain PhysicalMemory physical memory} and current runtime option values. This method is + * called after slow-path allocation (of a TLAB or a large object) and so allocation is allowed, + * but can trigger a collection. + */ + void updateSizeParameters(); - @Override - public String getName() { - return "only completely"; - } - } + /** + * During a slow-path allocation, determines whether to trigger a collection. Returning + * {@code true} will initiate a safepoint during which {@link #shouldCollectCompletely} will be + * called followed by the collection. + */ + boolean shouldCollectOnAllocation(); - public static class NeverCollect extends CollectionPolicy { + /** + * At a safepoint, decides whether to do a complete collection (returning {@code true}) or an + * incremental collection (returning {@code false}). + * + * @param followingIncrementalCollection whether an incremental collection has just finished in + * the same safepoint. Implementations would typically decide whether to follow up + * with a full collection based on whether enough memory was reclaimed. + */ + boolean shouldCollectCompletely(boolean followingIncrementalCollection); - @Override - public boolean collectIncrementally() { - return false; - } + /** + * The current limit for the size of the entire heap, which is less than or equal to + * {@link #getMaximumHeapSize}. + * + * NOTE: this can currently be exceeded during a collection while copying objects in the old + * generation. + */ + UnsignedWord getCurrentHeapCapacity(); - @Override - public boolean collectCompletely() { - return false; - } + /** + * The hard limit for the size of the entire heap. Exceeding this limit triggers an + * {@link OutOfMemoryError}. + * + * NOTE: this can currently be exceeded during a collection while copying objects in the old + * generation. + */ + UnsignedWord getMaximumHeapSize(); - @Override - public void nameToLog(Log log) { - log.string(getName()); - } + /** The maximum capacity of the young generation, comprising eden and survivor spaces. */ + UnsignedWord getMaximumYoungGenerationSize(); - @Override - public String getName() { - return "never collect"; - } - } + /** The minimum heap size, for inclusion in diagnostic output. */ + UnsignedWord getMinimumHeapSize(); /** - * A collection policy that delays complete collections until the heap has at least `-Xms` space - * in it, and then tries to balance time in incremental and complete collections. + * The total capacity of all survivor-from spaces of all ages, equal to the size of all + * survivor-to spaces of all ages. In other words, when copying during a collection, up to 2x + * this amount can be used for surviving objects. */ - public static class BySpaceAndTime extends CollectionPolicy { - @Override - public boolean collectIncrementally() { - return true; - } - - @Override - public boolean collectCompletely() { - return estimateUsedHeapAtNextIncrementalCollection().aboveThan(getMaximumHeapSize()) || - GCImpl.getChunkBytes().aboveThan(getMinimumHeapSize()) && enoughTimeSpentOnIncrementalGCs(); - } + UnsignedWord getSurvivorSpacesCapacity(); - /** - * Estimates the heap size at the next incremental collection assuming that the whole - * current young generation gets promoted. - */ - private static UnsignedWord estimateUsedHeapAtNextIncrementalCollection() { - UnsignedWord currentYoungBytes = HeapImpl.getHeapImpl().getYoungGeneration().getChunkBytes(); - UnsignedWord maxYoungBytes = HeapPolicy.getMaximumYoungGenerationSize(); - UnsignedWord oldBytes = getAccounting().getOldGenerationAfterChunkBytes(); - return currentYoungBytes.add(maxYoungBytes).add(oldBytes); - } - - private static boolean enoughTimeSpentOnIncrementalGCs() { - int incrementalWeight = PercentTimeInIncrementalCollection.getValue(); - assert incrementalWeight >= 0 && incrementalWeight <= 100 : "BySpaceAndTimePercentTimeInIncrementalCollection should be in the range [0..100]."; + /** + * The maximum number of bytes that should be kept readily available for allocation or copying + * during collections. + */ + UnsignedWord getMaximumFreeAlignedChunksSize(); - long actualIncrementalNanos = getAccounting().getIncrementalCollectionTotalNanos(); - long completeNanos = getAccounting().getCompleteCollectionTotalNanos(); - long totalNanos = actualIncrementalNanos + completeNanos; - long expectedIncrementalNanos = TimeUtils.weightedNanos(incrementalWeight, totalNanos); - return TimeUtils.nanoTimeLessThan(expectedIncrementalNanos, actualIncrementalNanos); - } + /** + * The age at which objects should currently be promoted to the old generation, which is between + * 1 (straight from eden) and the {@linkplain HeapParameters#getMaxSurvivorSpaces() number of + * survivor spaces + 1}. + */ + int getTenuringAge(); - @Override - public void nameToLog(Log log) { - log.string(getName()).string(": ").signed(Options.PercentTimeInIncrementalCollection.getValue()).string("% in incremental collections"); - } + /** Called at the beginning of a collection, in the safepoint operation. */ + void onCollectionBegin(boolean completeCollection); - @Override - public String getName() { - return "by space and time"; - } - } + /** Called before the end of a collection, in the safepoint operation. */ + void onCollectionEnd(boolean completeCollection, GCCause cause); } diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/GCAccounting.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/GCAccounting.java index 38fa743378e2..0533458c1cf6 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/GCAccounting.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/GCAccounting.java @@ -29,6 +29,8 @@ import org.graalvm.word.UnsignedWord; import org.graalvm.word.WordFactory; +import com.oracle.svm.core.annotate.AlwaysInline; +import com.oracle.svm.core.hub.LayoutEncoding; import com.oracle.svm.core.log.Log; /** @@ -45,15 +47,14 @@ public final class GCAccounting { private long completeCollectionTotalNanos = 0; private UnsignedWord collectedTotalChunkBytes = WordFactory.zero(); private UnsignedWord allocatedChunkBytes = WordFactory.zero(); - private UnsignedWord promotedTotalChunkBytes = WordFactory.zero(); - private UnsignedWord copiedTotalChunkBytes = WordFactory.zero(); + private UnsignedWord tenuredObjectBytes = WordFactory.zero(); + private UnsignedWord survivorOverflowObjectBytes = WordFactory.zero(); /* Before and after measures. */ private UnsignedWord youngChunkBytesBefore = WordFactory.zero(); private UnsignedWord youngChunkBytesAfter = WordFactory.zero(); private UnsignedWord oldChunkBytesBefore = WordFactory.zero(); private UnsignedWord oldChunkBytesAfter = WordFactory.zero(); - private UnsignedWord lastCollectionPromotedChunkBytes = WordFactory.zero(); /* * Bytes allocated in Objects, as opposed to bytes of chunks. These are only maintained if @@ -104,16 +105,20 @@ public UnsignedWord getOldGenerationAfterChunkBytes() { return oldChunkBytesAfter; } + UnsignedWord getYoungChunkBytesBefore() { + return youngChunkBytesBefore; + } + UnsignedWord getYoungChunkBytesAfter() { return youngChunkBytesAfter; } - public static UnsignedWord getSurvivorSpaceAfterChunkBytes(int survivorIndex) { - return HeapImpl.getHeapImpl().getYoungGeneration().getSurvivorFromSpaceAt(survivorIndex).getChunkBytes(); + UnsignedWord getTenuredObjectBytes() { + return tenuredObjectBytes; } - UnsignedWord getLastCollectionPromotedChunkBytes() { - return lastCollectionPromotedChunkBytes; + UnsignedWord getSurvivorOverflowObjectBytes() { + return survivorOverflowObjectBytes; } void beforeCollection() { @@ -126,17 +131,30 @@ void beforeCollection() { Space oldSpace = heap.getOldGeneration().getFromSpace(); oldChunkBytesBefore = oldSpace.getChunkBytes(); /* Objects are allocated in the young generation. */ - allocatedChunkBytes = allocatedChunkBytes.add(youngChunkBytesBefore); + allocatedChunkBytes = allocatedChunkBytes.add(youngGen.getEden().getChunkBytes()); if (HeapOptions.PrintGCSummary.getValue()) { - youngObjectBytesBefore = youngGen.computeObjectBytes(); + UnsignedWord edenObjectBytesBefore = youngGen.getEden().computeObjectBytes(); + youngObjectBytesBefore = edenObjectBytesBefore.add(youngGen.computeSurvivorObjectBytes()); oldObjectBytesBefore = oldSpace.computeObjectBytes(); - allocatedObjectBytes = allocatedObjectBytes.add(youngObjectBytesBefore); + allocatedObjectBytes = allocatedObjectBytes.add(edenObjectBytesBefore); } + tenuredObjectBytes = WordFactory.zero(); + survivorOverflowObjectBytes = WordFactory.zero(); trace.string(" youngChunkBytesBefore: ").unsigned(youngChunkBytesBefore) .string(" oldChunkBytesBefore: ").unsigned(oldChunkBytesBefore); trace.string("]").newline(); } + /** Called after an object has been promoted from the young generation to the old generation. */ + @AlwaysInline("GC performance") + void onObjectTenured(Object result, boolean survivorOverflow) { + UnsignedWord size = LayoutEncoding.getSizeFromObject(result); + tenuredObjectBytes = tenuredObjectBytes.add(size); + if (survivorOverflow) { + survivorOverflowObjectBytes = survivorOverflowObjectBytes.add(size); + } + } + void afterCollection(boolean completeCollection, Timer collectionTimer) { if (completeCollection) { afterCompleteCollection(collectionTimer); @@ -154,14 +172,10 @@ private void afterIncrementalCollection(Timer collectionTimer) { */ incrementalCollectionCount += 1; afterCollectionCommon(); - /* Incremental collections only promote. */ - lastCollectionPromotedChunkBytes = oldChunkBytesAfter.subtract(oldChunkBytesBefore); - promotedTotalChunkBytes = promotedTotalChunkBytes.add(lastCollectionPromotedChunkBytes); incrementalCollectionTotalNanos += collectionTimer.getMeasuredNanos(); trace.string(" incrementalCollectionCount: ").signed(incrementalCollectionCount) .string(" oldChunkBytesAfter: ").unsigned(oldChunkBytesAfter) - .string(" oldChunkBytesBefore: ").unsigned(oldChunkBytesBefore) - .string(" promotedChunkBytes: ").unsigned(lastCollectionPromotedChunkBytes); + .string(" oldChunkBytesBefore: ").unsigned(oldChunkBytesBefore); trace.string("]").newline(); } @@ -169,15 +183,13 @@ private void afterCompleteCollection(Timer collectionTimer) { Log trace = Log.noopLog().string("[GCImpl.Accounting.afterCompleteCollection:"); completeCollectionCount += 1; afterCollectionCommon(); - /* Complete collections only copy, and they copy everything. */ - copiedTotalChunkBytes = copiedTotalChunkBytes.add(oldChunkBytesAfter); completeCollectionTotalNanos += collectionTimer.getMeasuredNanos(); trace.string(" completeCollectionCount: ").signed(completeCollectionCount) .string(" oldChunkBytesAfter: ").unsigned(oldChunkBytesAfter); trace.string("]").newline(); } - void afterCollectionCommon() { + private void afterCollectionCommon() { HeapImpl heap = HeapImpl.getHeapImpl(); // This is called after the collection, after the space flip, so OldSpace is FromSpace. YoungGeneration youngGen = heap.getYoungGeneration(); diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/GCImpl.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/GCImpl.java index 0a9dcadbf46c..74042cb35638 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/GCImpl.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/GCImpl.java @@ -39,7 +39,6 @@ import org.graalvm.nativeimage.c.struct.RawField; import org.graalvm.nativeimage.c.struct.RawStructure; import org.graalvm.nativeimage.c.struct.SizeOf; -import org.graalvm.nativeimage.hosted.Feature.FeatureAccess; import org.graalvm.word.Pointer; import org.graalvm.word.UnsignedWord; import org.graalvm.word.WordFactory; @@ -49,6 +48,7 @@ import com.oracle.svm.core.SubstrateGCOptions; import com.oracle.svm.core.SubstrateOptions; import com.oracle.svm.core.UnmanagedMemoryUtil; +import com.oracle.svm.core.annotate.AlwaysInline; import com.oracle.svm.core.annotate.NeverInline; import com.oracle.svm.core.annotate.RestrictHeapAccess; import com.oracle.svm.core.annotate.Uninterruptible; @@ -63,7 +63,7 @@ import com.oracle.svm.core.deopt.DeoptimizedFrame; import com.oracle.svm.core.deopt.Deoptimizer; import com.oracle.svm.core.genscavenge.AlignedHeapChunk.AlignedHeader; -import com.oracle.svm.core.genscavenge.CollectionPolicy.NeverCollect; +import com.oracle.svm.core.genscavenge.BasicCollectionPolicies.NeverCollect; import com.oracle.svm.core.genscavenge.HeapChunk.Header; import com.oracle.svm.core.genscavenge.UnalignedHeapChunk.UnalignedHeader; import com.oracle.svm.core.genscavenge.remset.RememberedSet; @@ -94,6 +94,8 @@ * Garbage collector (incremental or complete) for {@link HeapImpl}. */ public final class GCImpl implements GC { + private static final OutOfMemoryError OUT_OF_MEMORY_ERROR = new OutOfMemoryError("Garbage-collected heap size exceeded."); + private final GreyToBlackObjRefVisitor greyToBlackObjRefVisitor = new GreyToBlackObjRefVisitor(); private final GreyToBlackObjectVisitor greyToBlackObjectVisitor = new GreyToBlackObjectVisitor(greyToBlackObjRefVisitor); private final BlackenImageHeapRootsVisitor blackenImageHeapRootsVisitor = new BlackenImageHeapRootsVisitor(); @@ -114,8 +116,8 @@ public final class GCImpl implements GC { private UnsignedWord collectionEpoch = WordFactory.zero(); @Platforms(Platform.HOSTED_ONLY.class) - GCImpl(FeatureAccess access) { - this.policy = CollectionPolicy.getInitialPolicy(access); + GCImpl() { + this.policy = CollectionPolicy.getInitialPolicy(); RuntimeSupport.getRuntimeSupport().addShutdownHook(this::printGCSummary); } @@ -124,12 +126,32 @@ public void collect(GCCause cause) { collect(cause, false); } + public void maybeCollectOnAllocation() { + boolean outOfMemory = false; + if (hasNeverCollectPolicy()) { + UnsignedWord edenUsed = HeapImpl.getHeapImpl().getAccounting().getEdenUsedBytes(); + outOfMemory = edenUsed.aboveThan(GCImpl.getPolicy().getMaximumHeapSize()); + } else if (getPolicy().shouldCollectOnAllocation()) { + outOfMemory = collectWithoutAllocating(GenScavengeGCCause.OnAllocation, false); + } + if (outOfMemory) { + throw OUT_OF_MEMORY_ERROR; + } + } + + @SuppressWarnings("static-method") + public void maybeCauseUserRequestedCollection() { + if (!SubstrateGCOptions.DisableExplicitGC.getValue()) { + HeapImpl.getHeapImpl().getGC().collectCompletely(GCCause.JavaLangSystemGC); + } + } + private void collect(GCCause cause, boolean forceFullGC) { if (!hasNeverCollectPolicy()) { UnsignedWord requestingEpoch = possibleCollectionPrologue(); boolean outOfMemory = collectWithoutAllocating(cause, forceFullGC); if (outOfMemory) { - throw HeapPolicy.OUT_OF_MEMORY_ERROR; + throw OUT_OF_MEMORY_ERROR; } possibleCollectionEpilogue(requestingEpoch); } @@ -171,8 +193,8 @@ private boolean collectOperation(GCCause cause, UnsignedWord requestingEpoch, bo ThreadLocalAllocation.disableAndFlushForAllThreads(); printGCBefore(cause.getName()); - boolean outOfMemory = collectImpl(forceFullGC); - HeapPolicy.setEdenAndYoungGenBytes(WordFactory.unsigned(0), accounting.getYoungChunkBytesAfter()); + boolean outOfMemory = collectImpl(cause, forceFullGC); + HeapImpl.getHeapImpl().getAccounting().setEdenAndYoungGenBytes(WordFactory.unsigned(0), accounting.getYoungChunkBytesAfter()); printGCAfter(cause.getName()); finishCollection(); @@ -181,7 +203,7 @@ private boolean collectOperation(GCCause cause, UnsignedWord requestingEpoch, bo return outOfMemory; } - private boolean collectImpl(boolean forceFullGC) { + private boolean collectImpl(GCCause cause, boolean forceFullGC) { boolean outOfMemory; precondition(); @@ -189,12 +211,12 @@ private boolean collectImpl(boolean forceFullGC) { NoAllocationVerifier nav = noAllocationVerifier.open(); try { - outOfMemory = doCollectImpl(forceFullGC); + outOfMemory = doCollectImpl(cause, forceFullGC); if (outOfMemory) { // Avoid running out of memory with a full GC that reclaims softly reachable objects ReferenceObjectProcessing.setSoftReferencesAreWeak(true); try { - outOfMemory = doCollectImpl(true); + outOfMemory = doCollectImpl(cause, true); } finally { ReferenceObjectProcessing.setSoftReferencesAreWeak(false); } @@ -208,37 +230,49 @@ private boolean collectImpl(boolean forceFullGC) { return outOfMemory; } - private boolean doCollectImpl(boolean forceFullGC) { + private boolean doCollectImpl(GCCause cause, boolean forceFullGC) { CommittedMemoryProvider.get().beforeGarbageCollection(); + boolean incremental = HeapParameters.Options.CollectYoungGenerationSeparately.getValue() || + (!forceFullGC && !policy.shouldCollectCompletely(false)); + boolean outOfMemory = false; + if (incremental) { + outOfMemory = doCollectOnce(cause, false, false); + } + if (!incremental || outOfMemory || forceFullGC || policy.shouldCollectCompletely(true)) { + if (incremental) { // uncommit unaligned chunks + CommittedMemoryProvider.get().afterGarbageCollection(); + } + outOfMemory = doCollectOnce(cause, true, incremental); + } + + HeapImpl.getChunkProvider().freeExcessAlignedChunks(); + CommittedMemoryProvider.get().afterGarbageCollection(); + return outOfMemory; + } + + private boolean doCollectOnce(GCCause cause, boolean complete, boolean followsIncremental) { + assert !followsIncremental || complete : "An incremental collection cannot be followed by another incremental collection"; + completeCollection = complete; + accounting.beforeCollection(); + policy.onCollectionBegin(completeCollection); Timer collectionTimer = timers.collection.open(); try { - completeCollection = forceFullGC || policy.collectCompletely(); - if (completeCollection) { - if (HeapPolicyOptions.CollectYoungGenerationSeparately.getValue()) { - scavenge(true); - } - scavenge(false); - } else if (policy.collectIncrementally()) { - scavenge(true); - } else { - VMError.shouldNotReachHere("A safepoint for a GC was triggered, so why did the GC policy decide not to do a GC?"); - } + scavenge(!complete, followsIncremental); } finally { collectionTimer.close(); } - CommittedMemoryProvider.get().afterGarbageCollection(completeCollection); - accounting.afterCollection(completeCollection, timers.collection); - UnsignedWord maxBytes = HeapPolicy.getMaximumHeapSize(); - UnsignedWord usedBytes = getChunkBytes(); - boolean outOfMemory = usedBytes.aboveThan(maxBytes); + accounting.afterCollection(completeCollection, collectionTimer); + policy.onCollectionEnd(completeCollection, cause); - ReferenceObjectProcessing.afterCollection(usedBytes, maxBytes); + UnsignedWord usedBytes = getChunkBytes(); + UnsignedWord freeBytes = policy.getCurrentHeapCapacity().subtract(usedBytes); + ReferenceObjectProcessing.afterCollection(freeBytes); - return outOfMemory; + return usedBytes.aboveThan(policy.getMaximumHeapSize()); // out of memory? } private void verifyBeforeGC() { @@ -299,11 +333,11 @@ private void printGCBefore(String cause) { sizeBefore = ((SubstrateGCOptions.PrintGC.getValue() || HeapOptions.PrintHeapShape.getValue()) ? getChunkBytes() : WordFactory.zero()); if (SubstrateGCOptions.VerboseGC.getValue() && getCollectionEpoch().equal(1)) { verboseGCLog.string("[Heap policy parameters: ").newline(); - verboseGCLog.string(" YoungGenerationSize: ").unsigned(HeapPolicy.getMaximumYoungGenerationSize()).newline(); - verboseGCLog.string(" MaximumHeapSize: ").unsigned(HeapPolicy.getMaximumHeapSize()).newline(); - verboseGCLog.string(" MinimumHeapSize: ").unsigned(HeapPolicy.getMinimumHeapSize()).newline(); - verboseGCLog.string(" AlignedChunkSize: ").unsigned(HeapPolicy.getAlignedHeapChunkSize()).newline(); - verboseGCLog.string(" LargeArrayThreshold: ").unsigned(HeapPolicy.getLargeArrayThreshold()).string("]").newline(); + verboseGCLog.string(" YoungGenerationSize: ").unsigned(getPolicy().getMaximumYoungGenerationSize()).newline(); + verboseGCLog.string(" MaximumHeapSize: ").unsigned(getPolicy().getMaximumHeapSize()).newline(); + verboseGCLog.string(" MinimumHeapSize: ").unsigned(getPolicy().getMinimumHeapSize()).newline(); + verboseGCLog.string(" AlignedChunkSize: ").unsigned(HeapParameters.getAlignedHeapChunkSize()).newline(); + verboseGCLog.string(" LargeArrayThreshold: ").unsigned(HeapParameters.getLargeArrayThreshold()).string("]").newline(); if (HeapOptions.PrintHeapShape.getValue()) { HeapImpl.getHeapImpl().logImageHeapPartitionBoundaries(verboseGCLog); } @@ -409,7 +443,7 @@ private static void verbosePostCondition() { youngGen.getEden().report(log, true).newline(); log.string("]").newline(); } - for (int i = 0; i < HeapPolicy.getMaxSurvivorSpaces(); i++) { + for (int i = 0; i < HeapParameters.getMaxSurvivorSpaces(); i++) { if ((!youngGen.getSurvivorToSpaceAt(i).isEmpty()) || forceForTesting) { log.string("[GCImpl.postcondition: Survivor toSpace should be empty after a collection.").newline(); /* Print raw fields before trying to walk the chunk lists. */ @@ -459,15 +493,15 @@ public boolean isCompleteCollection() { } /** Scavenge, either from dirty roots or from all roots, and process discovered references. */ - private void scavenge(boolean fromDirtyRoots) { + private void scavenge(boolean incremental, boolean followingIncremental) { GreyToBlackObjRefVisitor.Counters counters = greyToBlackObjRefVisitor.openCounters(); try { Timer rootScanTimer = timers.rootScan.open(); try { - if (fromDirtyRoots) { + if (incremental) { cheneyScanFromDirtyRoots(); } else { - cheneyScanFromRoots(); + cheneyScanFromRoots(followingIncremental); } } finally { rootScanTimer.close(); @@ -499,7 +533,14 @@ private void scavenge(boolean fromDirtyRoots) { try { assert chunkReleaser.isEmpty(); releaseSpaces(); - chunkReleaser.release(); + + /* + * Do not uncommit any aligned chunks yet if we just did an incremental GC so if we + * decide to do a full GC next, we can reuse the chunks for copying live old objects + * with fewer chunk allocations. In either case, excess chunks are released later. + */ + boolean keepAllAlignedChunks = incremental; + chunkReleaser.release(keepAllAlignedChunks); } finally { releaseSpacesTimer.close(); } @@ -533,7 +574,7 @@ private void cleanRuntimeCodeCache() { } } - private void cheneyScanFromRoots() { + private void cheneyScanFromRoots(boolean followingIncremental) { Timer cheneyScanFromRootsTimer = timers.cheneyScanFromRoots.open(); try { /* Take a snapshot of the heap so that I can visit all the promoted Objects. */ @@ -542,7 +583,23 @@ private void cheneyScanFromRoots() { * Objects into each of the blackening methods, or even put them around individual * Object reference visits. */ - prepareForPromotion(false); + prepareForPromotion(); + + if (followingIncremental) { + /* + * We just finished an incremental collection, so we will not be able to reclaim any + * young objects and do not need to copy them (and do not want to age or tenure them + * in the process). We still need to scan them for roots into the old generation. + * + * There is potential trouble with this: if objects in the young generation are + * reachable only from garbage objects in the old generation, the young objects are + * not reclaimed during this collection. If there is a cycle in which the young + * objects in turn keep the old objects alive, none of the objects can be reclaimed + * until the young objects are eventually tenured, or until a single complete + * collection is done before we would run out of memory. + */ + HeapImpl.getHeapImpl().getYoungGeneration().emptyFromSpacesIntoToSpaces(); + } /* * Make sure all chunks with pinned objects are in toSpace, and any formerly pinned @@ -566,14 +623,14 @@ private void cheneyScanFromRoots() { blackenImageHeapRoots(); /* Visit all the Objects promoted since the snapshot. */ - scanGreyObjects(false); + scanGreyObjects(); if (DeoptimizationSupport.enabled()) { /* Visit the runtime compiled code, now that we know all the reachable objects. */ walkRuntimeCodeCache(); /* Visit all objects that became reachable because of the compiled code. */ - scanGreyObjects(false); + scanGreyObjects(); } greyToBlackObjectVisitor.reset(); @@ -599,7 +656,7 @@ private void cheneyScanFromDirtyRoots() { * Objects into each of the blackening methods, or even put them around individual * Object reference visits. */ - prepareForPromotion(true); + prepareForPromotion(); /* * Make sure any released objects are in toSpace (because this is an incremental @@ -632,14 +689,14 @@ private void cheneyScanFromDirtyRoots() { blackenDirtyImageHeapRoots(); /* Visit all the Objects promoted since the snapshot, transitively. */ - scanGreyObjects(true); + scanGreyObjects(); if (DeoptimizationSupport.enabled()) { /* Visit the runtime compiled code, now that we know all the reachable objects. */ walkRuntimeCodeCache(); /* Visit all objects that became reachable because of the compiled code. */ - scanGreyObjects(true); + scanGreyObjects(); } greyToBlackObjectVisitor.reset(); @@ -887,52 +944,88 @@ private void blackenDirtyCardRoots() { } } - private static void prepareForPromotion(boolean isIncremental) { + private static void prepareForPromotion() { HeapImpl heap = HeapImpl.getHeapImpl(); - OldGeneration oldGen = heap.getOldGeneration(); - oldGen.prepareForPromotion(); - if (isIncremental) { - heap.getYoungGeneration().prepareForPromotion(); - } + heap.getOldGeneration().prepareForPromotion(); + heap.getYoungGeneration().prepareForPromotion(); } - private void scanGreyObjects(boolean isIncremental) { + private void scanGreyObjects() { HeapImpl heap = HeapImpl.getHeapImpl(); + YoungGeneration youngGen = heap.getYoungGeneration(); OldGeneration oldGen = heap.getOldGeneration(); Timer scanGreyObjectsTimer = timers.scanGreyObjects.open(); try { - if (isIncremental) { - scanGreyObjectsLoop(); - } else { - oldGen.scanGreyObjects(); - } + boolean hasGrey; + do { + hasGrey = youngGen.scanGreyObjects(); + hasGrey |= oldGen.scanGreyObjects(); + } while (hasGrey); } finally { scanGreyObjectsTimer.close(); } } - private static void scanGreyObjectsLoop() { + @AlwaysInline("GC performance") + @SuppressWarnings("static-method") + Object promoteObject(Object original, UnsignedWord header) { HeapImpl heap = HeapImpl.getHeapImpl(); - YoungGeneration youngGen = heap.getYoungGeneration(); - OldGeneration oldGen = heap.getOldGeneration(); - boolean hasGrey = true; - while (hasGrey) { - hasGrey = youngGen.scanGreyObjects(); - hasGrey |= oldGen.scanGreyObjects(); + boolean isAligned = ObjectHeaderImpl.isAlignedHeader(header); + Header originalChunk = getChunk(original, isAligned); + Space originalSpace = HeapChunk.getSpace(originalChunk); + if (!originalSpace.isFromSpace()) { + return original; } + + Object result = null; + boolean survivorOverflow = false; + if (originalSpace.getNextAgeForPromotion() < policy.getTenuringAge()) { + if (isAligned) { + result = heap.getYoungGeneration().promoteAlignedObject(original, (AlignedHeader) originalChunk, originalSpace); + } else { + result = heap.getYoungGeneration().promoteUnalignedObject(original, (UnalignedHeader) originalChunk, originalSpace); + } + survivorOverflow = (result == null); + } + if (result == null) { // tenuring age reached or survivor space full + if (isAligned) { + result = heap.getOldGeneration().promoteAlignedObject(original, (AlignedHeader) originalChunk, originalSpace); + } else { + result = heap.getOldGeneration().promoteUnalignedObject(original, (UnalignedHeader) originalChunk, originalSpace); + } + assert result != null : "promotion failure in old generation must have been handled"; + if (result != original) { + accounting.onObjectTenured(result, survivorOverflow); + } + } + + return result; + } + + private static Header getChunk(Object obj, boolean isAligned) { + if (isAligned) { + return AlignedHeapChunk.getEnclosingChunk(obj); + } + assert ObjectHeaderImpl.isUnalignedObject(obj); + return UnalignedHeapChunk.getEnclosingChunk(obj); } - private static void promotePinnedObject(PinnedObjectImpl pinned) { + private void promotePinnedObject(PinnedObjectImpl pinned) { HeapImpl heap = HeapImpl.getHeapImpl(); - OldGeneration oldGen = heap.getOldGeneration(); - /* Find the chunk the object is in, and if necessary, move it to To space. */ Object referent = pinned.getObject(); if (referent != null && !heap.isInImageHeap(referent)) { - /* - * The referent doesn't move, so I can ignore the result of the promotion because I - * don't have to update any pointers to it. - */ - oldGen.promoteObjectChunk(referent); + boolean isAligned = ObjectHeaderImpl.isAlignedObject(referent); + Header originalChunk = getChunk(referent, isAligned); + Space originalSpace = HeapChunk.getSpace(originalChunk); + if (originalSpace.isFromSpace()) { + boolean promoted = false; + if (originalSpace.getNextAgeForPromotion() < policy.getTenuringAge()) { + promoted = heap.getYoungGeneration().promoteChunk(originalChunk, isAligned, originalSpace); + } + if (!promoted) { + heap.getOldGeneration().promoteChunk(originalChunk, isAligned, originalSpace); + } + } } } @@ -1162,9 +1255,9 @@ public void add(UnalignedHeader chunks) { } } - void release() { + void release(boolean keepAllAlignedChunks) { if (firstAligned.isNonNull()) { - HeapImpl.getChunkProvider().consumeAlignedChunks(firstAligned); + HeapImpl.getChunkProvider().consumeAlignedChunks(firstAligned, keepAllAlignedChunks); firstAligned = WordFactory.nullPointer(); } if (firstUnaligned.isNonNull()) { @@ -1192,10 +1285,10 @@ private void printGCSummary() { Log log = Log.log(); final String prefix = "PrintGCSummary: "; - log.string(prefix).string("YoungGenerationSize: ").unsigned(HeapPolicy.getMaximumYoungGenerationSize()).newline(); - log.string(prefix).string("MinimumHeapSize: ").unsigned(HeapPolicy.getMinimumHeapSize()).newline(); - log.string(prefix).string("MaximumHeapSize: ").unsigned(HeapPolicy.getMaximumHeapSize()).newline(); - log.string(prefix).string("AlignedChunkSize: ").unsigned(HeapPolicy.getAlignedHeapChunkSize()).newline(); + log.string(prefix).string("MaximumYoungGenerationSize: ").unsigned(getPolicy().getMaximumYoungGenerationSize()).newline(); + log.string(prefix).string("MinimumHeapSize: ").unsigned(getPolicy().getMinimumHeapSize()).newline(); + log.string(prefix).string("MaximumHeapSize: ").unsigned(getPolicy().getMaximumHeapSize()).newline(); + log.string(prefix).string("AlignedChunkSize: ").unsigned(HeapParameters.getAlignedHeapChunkSize()).newline(); JavaVMOperation.enqueueBlockingSafepoint("PrintGCSummaryShutdownHook", ThreadLocalAllocation::disableAndFlushForAllThreads); HeapImpl heap = HeapImpl.getHeapImpl(); diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/Generation.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/Generation.java index fafae6783bbd..35b694891c19 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/Generation.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/Generation.java @@ -26,7 +26,6 @@ import org.graalvm.nativeimage.Platform; import org.graalvm.nativeimage.Platforms; -import org.graalvm.word.UnsignedWord; import com.oracle.svm.core.heap.ObjectVisitor; import com.oracle.svm.core.log.Log; @@ -56,16 +55,40 @@ public String getName() { public abstract Log report(Log log, boolean traceHeapChunks); /** - * Promote an Object to this Generation, either by HeapChunk motion or copying. If the original - * is copied, a forwarding pointer to the new Object is left in place of the original Object. + * Promote an Object to this Generation, typically by copying and leaving a forwarding pointer + * to the new Object in place of the original Object. If the object cannot be promoted due to + * insufficient capacity, returns {@code null}. * * This turns an Object from white to grey: the object is in this Generation, but has not yet * had its interior pointers visited. * - * @param original The original Object to be promoted. - * @param header The header of the object that should be promoted. - * @return The promoted Object, either the original if promotion was done by HeapChunk motion, - * or a new Object if promotion was done by copying. + * @return a reference to the promoted object, which is different to the original reference if + * promotion was done by copying, or {@code null} if there was insufficient capacity in + * this generation. */ - protected abstract Object promoteObject(Object original, UnsignedWord header); + protected abstract Object promoteAlignedObject(Object original, AlignedHeapChunk.AlignedHeader originalChunk, Space originalSpace); + + /** + * Promote an Object to this Generation, typically by HeapChunk motion. If the object cannot be + * promoted due to insufficient capacity, returns {@code null}. + * + * This turns an Object from white to grey: the object is in this Generation, but has not yet + * had its interior pointers visited. + * + * @return a reference to the promoted object, which is the same as the original if the object + * was promoted through HeapChunk motion, or {@code null} if there was insufficient + * capacity in this generation. + */ + protected abstract Object promoteUnalignedObject(Object original, UnalignedHeapChunk.UnalignedHeader originalChunk, Space originalSpace); + + /** + * Promote a HeapChunk from its original space to the appropriate space in this generation if + * there is sufficient capacity. + * + * This turns all the Objects in the chunk from white to grey: the objects are in the target + * Space, but have not yet had their interior pointers visited. + * + * @return true on success, false if the there was insufficient capacity. + */ + protected abstract boolean promoteChunk(HeapChunk.Header originalChunk, boolean isAligned, Space originalSpace); } diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/GreyToBlackObjRefVisitor.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/GreyToBlackObjRefVisitor.java index bfe1c9d655fb..c5cc8d9dab36 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/GreyToBlackObjRefVisitor.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/GreyToBlackObjRefVisitor.java @@ -61,22 +61,10 @@ final class GreyToBlackObjRefVisitor implements ObjectReferenceVisitor { } @Override - public boolean visitObjectReference(Pointer objRef, boolean compressed) { - return visitObjectReferenceInline(objRef, 0, compressed, null); - } - - @Override - @AlwaysInline("GC performance") - public boolean visitObjectReferenceInline(Pointer objRef, boolean compressed, Object holderObject) { + public boolean visitObjectReference(Pointer objRef, boolean compressed, Object holderObject) { return visitObjectReferenceInline(objRef, 0, compressed, holderObject); } - @Override - @AlwaysInline("GC performance") - public boolean visitObjectReferenceInline(Pointer objRef, int innerOffset, boolean compressed) { - return visitObjectReferenceInline(objRef, innerOffset, compressed, null); - } - @Override @AlwaysInline("GC performance") public boolean visitObjectReferenceInline(Pointer objRef, int innerOffset, boolean compressed, Object holderObject) { @@ -116,7 +104,7 @@ public boolean visitObjectReferenceInline(Pointer objRef, int innerOffset, boole // Promote the Object if necessary, making it at least grey, and ... Object obj = p.toObject(); assert innerOffset < LayoutEncoding.getSizeFromObject(obj).rawValue(); - Object copy = HeapImpl.getHeapImpl().promoteObject(obj, header); + Object copy = GCImpl.getGCImpl().promoteObject(obj, header); if (copy != obj) { // ... update the reference to point to the copy, making the reference black. counters.noteCopiedReferent(); diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/HeapAccounting.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/HeapAccounting.java new file mode 100644 index 000000000000..70c630d7d825 --- /dev/null +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/HeapAccounting.java @@ -0,0 +1,75 @@ +/* + * Copyright (c) 2013, 2021, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.svm.core.genscavenge; + +import org.graalvm.nativeimage.Platform; +import org.graalvm.nativeimage.Platforms; +import org.graalvm.word.UnsignedWord; + +import com.oracle.svm.core.annotate.Uninterruptible; +import com.oracle.svm.core.jdk.UninterruptibleUtils; +import com.oracle.svm.core.thread.VMOperation; + +/** + * @see GCAccounting + * @see ChunksAccounting + */ +public final class HeapAccounting { + private final UninterruptibleUtils.AtomicUnsigned edenUsedBytes = new UninterruptibleUtils.AtomicUnsigned(); + private final UninterruptibleUtils.AtomicUnsigned youngUsedBytes = new UninterruptibleUtils.AtomicUnsigned(); + + @Platforms(Platform.HOSTED_ONLY.class) + HeapAccounting() { + } + + public void setEdenAndYoungGenBytes(UnsignedWord edenBytes, UnsignedWord youngBytes) { + assert VMOperation.isGCInProgress() : "would cause races otherwise"; + youngUsedBytes.set(youngBytes); + edenUsedBytes.set(edenBytes); + } + + @Uninterruptible(reason = "Must be done during TLAB registration to not race with a potential collection.", callerMustBe = true) + public void increaseEdenUsedBytes(UnsignedWord value) { + youngUsedBytes.addAndGet(value); + edenUsedBytes.addAndGet(value); + } + + @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) + public UnsignedWord getYoungUsedBytes() { + assert !VMOperation.isGCInProgress() : "value is incorrect during a GC"; + return youngUsedBytes.get(); + } + + @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) + public UnsignedWord getEdenUsedBytes() { + assert !VMOperation.isGCInProgress() : "value is incorrect during a GC"; + return edenUsedBytes.get(); + } + + @SuppressWarnings("static-method") + public UnsignedWord getSurvivorSpaceAfterChunkBytes(int survivorIndex) { + return HeapImpl.getHeapImpl().getYoungGeneration().getSurvivorFromSpaceAt(survivorIndex).getChunkBytes(); + } +} diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/HeapChunkProvider.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/HeapChunkProvider.java index 8b6a25d35fe8..2f887e646ccb 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/HeapChunkProvider.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/HeapChunkProvider.java @@ -43,15 +43,17 @@ import com.oracle.svm.core.jdk.UninterruptibleUtils.AtomicUnsigned; import com.oracle.svm.core.log.Log; import com.oracle.svm.core.os.CommittedMemoryProvider; +import com.oracle.svm.core.thread.VMOperation; import com.oracle.svm.core.thread.VMThreads; +import com.oracle.svm.core.util.UnsignedUtils; /** * Allocates and frees the memory for aligned and unaligned heap chunks. The methods are * thread-safe, so no locking is necessary when calling them. * - * Memory for aligned chunks is not immediately released to the OS. Up to - * {@link HeapPolicy#getMinimumHeapSize()} chunks are saved in an unused chunk list. Memory for - * unaligned chunks is released immediately. + * Memory for aligned chunks is not immediately released to the OS. Chunks with a total of up to + * {@link CollectionPolicy#getMaximumFreeAlignedChunksSize()} bytes are saved in an unused chunk + * list. Memory for unaligned chunks is released immediately. */ final class HeapChunkProvider { /** @@ -97,7 +99,7 @@ private static Log log() { /** Acquire a new AlignedHeapChunk, either from the free list or from the operating system. */ AlignedHeader produceAlignedChunk() { - UnsignedWord chunkSize = HeapPolicy.getAlignedHeapChunkSize(); + UnsignedWord chunkSize = HeapParameters.getAlignedHeapChunkSize(); log().string("[HeapChunkProvider.produceAlignedChunk chunk size: ").unsigned(chunkSize).newline(); AlignedHeader result = popUnusedAlignedChunk(); @@ -106,7 +108,7 @@ AlignedHeader produceAlignedChunk() { if (result.isNull()) { /* Unused list was empty, need to allocate memory. */ noteFirstAllocationTime(); - result = (AlignedHeader) CommittedMemoryProvider.get().allocate(chunkSize, HeapPolicy.getAlignedHeapChunkAlignment(), false); + result = (AlignedHeader) CommittedMemoryProvider.get().allocate(chunkSize, HeapParameters.getAlignedHeapChunkAlignment(), false); if (result.isNull()) { throw ALIGNED_OUT_OF_MEMORY_ERROR; } @@ -117,44 +119,59 @@ AlignedHeader produceAlignedChunk() { assert HeapChunk.getTopOffset(result).equal(AlignedHeapChunk.getObjectsStartOffset()); assert HeapChunk.getEndOffset(result).equal(chunkSize); - if (HeapPolicy.getZapProducedHeapChunks()) { - zap(result, HeapPolicy.getProducedHeapChunkZapWord()); + if (HeapParameters.getZapProducedHeapChunks()) { + zap(result, HeapParameters.getProducedHeapChunkZapWord()); } - HeapPolicy.increaseEdenUsedBytes(chunkSize); - log().string(" result chunk: ").zhex(result).string(" ]").newline(); return result; } + void freeExcessAlignedChunks() { + consumeAlignedChunks(WordFactory.nullPointer(), false); + } + /** * Releases a list of AlignedHeapChunks, either to the free list or back to the operating * system. This method may only be called after the chunks were already removed from the spaces. */ - void consumeAlignedChunks(AlignedHeader firstChunk) { - assert HeapChunk.getPrevious(firstChunk).isNull() : "prev must be null"; - AlignedHeader cur = firstChunk; + void consumeAlignedChunks(AlignedHeader firstChunk, boolean keepAll) { + assert firstChunk.isNull() || HeapChunk.getPrevious(firstChunk).isNull() : "prev must be null"; - UnsignedWord minimumHeapSize = HeapPolicy.getMinimumHeapSize(); - UnsignedWord committedBytesAfterGC = GCImpl.getChunkBytes().add(getBytesInUnusedChunks()); - if (minimumHeapSize.aboveThan(committedBytesAfterGC)) { - UnsignedWord chunksToKeep = minimumHeapSize.subtract(committedBytesAfterGC).unsignedDivide(HeapPolicy.getAlignedHeapChunkSize()); - while (cur.isNonNull() && chunksToKeep.aboveThan(0)) { - AlignedHeader next = HeapChunk.getNext(cur); - cleanAlignedChunk(cur); - pushUnusedAlignedChunk(cur); - chunksToKeep = chunksToKeep.subtract(1); - cur = next; + UnsignedWord maxChunksToKeep = WordFactory.zero(); + UnsignedWord unusedChunksToFree = WordFactory.zero(); + if (keepAll) { + maxChunksToKeep = UnsignedUtils.MAX_VALUE; + } else { + UnsignedWord freeListBytes = getBytesInUnusedChunks(); + UnsignedWord reserveBytes = GCImpl.getPolicy().getMaximumFreeAlignedChunksSize(); + if (freeListBytes.belowThan(reserveBytes)) { + maxChunksToKeep = reserveBytes.subtract(freeListBytes).unsignedDivide(HeapParameters.getAlignedHeapChunkSize()); + } else { + unusedChunksToFree = freeListBytes.subtract(reserveBytes).unsignedDivide(HeapParameters.getAlignedHeapChunkSize()); } } + // Potentially keep some chunks in the free list for quicker allocation, free the rest + AlignedHeader cur = firstChunk; + while (cur.isNonNull() && maxChunksToKeep.aboveThan(0)) { + AlignedHeader next = HeapChunk.getNext(cur); + cleanAlignedChunk(cur); + pushUnusedAlignedChunk(cur); + + maxChunksToKeep = maxChunksToKeep.subtract(1); + cur = next; + } freeAlignedChunkList(cur); + + // Release chunks from the free list to the operating system when spaces shrink + freeUnusedAlignedChunksAtSafepoint(unusedChunksToFree); } private static void cleanAlignedChunk(AlignedHeader alignedChunk) { AlignedHeapChunk.reset(alignedChunk); - if (HeapPolicy.getZapConsumedHeapChunks()) { - zap(alignedChunk, HeapPolicy.getConsumedHeapChunkZapWord()); + if (HeapParameters.getZapConsumedHeapChunks()) { + zap(alignedChunk, HeapParameters.getConsumedHeapChunkZapWord()); } } @@ -178,7 +195,7 @@ private void pushUnusedAlignedChunk(AlignedHeader chunk) { HeapChunk.setNext(chunk, unusedAlignedChunks.get()); unusedAlignedChunks.set(chunk); - bytesInUnusedAlignedChunks.addAndGet(HeapPolicy.getAlignedHeapChunkSize()); + bytesInUnusedAlignedChunks.addAndGet(HeapParameters.getAlignedHeapChunkSize()); log().string(" new list top: ").zhex(unusedAlignedChunks.get()).string(" list bytes ").signed(bytesInUnusedAlignedChunks.get()).newline(); } @@ -199,7 +216,7 @@ private AlignedHeader popUnusedAlignedChunk() { if (result.isNull()) { return WordFactory.nullPointer(); } else { - bytesInUnusedAlignedChunks.subtractAndGet(HeapPolicy.getAlignedHeapChunkSize()); + bytesInUnusedAlignedChunks.subtractAndGet(HeapParameters.getAlignedHeapChunkSize()); log().string(" new list top: ").zhex(unusedAlignedChunks.get()).string(" list bytes ").signed(bytesInUnusedAlignedChunks.get()).newline(); return result; } @@ -221,6 +238,24 @@ private AlignedHeader popUnusedAlignedChunkUninterruptibly() { } } + private void freeUnusedAlignedChunksAtSafepoint(UnsignedWord count) { + VMOperation.guaranteeInProgressAtSafepoint("Removing non-atomically from the unused chunk list."); + if (count.equal(0)) { + return; + } + + AlignedHeader chunk = unusedAlignedChunks.get(); + UnsignedWord released = WordFactory.zero(); + while (chunk.isNonNull() && released.belowThan(count)) { + AlignedHeader next = HeapChunk.getNext(chunk); + freeAlignedChunk(chunk); + chunk = next; + released = released.add(1); + } + unusedAlignedChunks.set(chunk); + bytesInUnusedAlignedChunks.subtractAndGet(released.multiply(HeapParameters.getAlignedHeapChunkSize())); + } + /** Acquire an UnalignedHeapChunk from the operating system. */ UnalignedHeader produceUnalignedChunk(UnsignedWord objectSize) { UnsignedWord chunkSize = UnalignedHeapChunk.getChunkSizeForObject(objectSize); @@ -235,12 +270,10 @@ UnalignedHeader produceUnalignedChunk(UnsignedWord objectSize) { UnalignedHeapChunk.initialize(result, chunkSize); assert objectSize.belowOrEqual(HeapChunk.availableObjectMemory(result)) : "UnalignedHeapChunk insufficient for requested object"; - if (HeapPolicy.getZapProducedHeapChunks()) { - zap(result, HeapPolicy.getProducedHeapChunkZapWord()); + if (HeapParameters.getZapProducedHeapChunks()) { + zap(result, HeapParameters.getProducedHeapChunkZapWord()); } - HeapPolicy.increaseEdenUsedBytes(chunkSize); - log().string(" returns ").zhex(result).string(" ]").newline(); return result; } @@ -266,7 +299,7 @@ Log report(Log log, boolean traceHeapChunks) { log.string("Unused:").indent(true); log.string("aligned: ").signed(bytesInUnusedAlignedChunks.get()) .string("/") - .signed(bytesInUnusedAlignedChunks.get().unsignedDivide(HeapPolicy.getAlignedHeapChunkSize())); + .signed(bytesInUnusedAlignedChunks.get().unsignedDivide(HeapParameters.getAlignedHeapChunkSize())); if (traceHeapChunks) { if (unusedAlignedChunks.get().isNonNull()) { log.newline().string("aligned chunks:").redent(true); @@ -324,7 +357,7 @@ static void freeUnalignedChunkList(UnalignedHeader first) { @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) private static void freeAlignedChunk(AlignedHeader chunk) { - CommittedMemoryProvider.get().free(chunk, HeapPolicy.getAlignedHeapChunkSize(), HeapPolicy.getAlignedHeapChunkAlignment(), false); + CommittedMemoryProvider.get().free(chunk, HeapParameters.getAlignedHeapChunkSize(), HeapParameters.getAlignedHeapChunkAlignment(), false); } @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/HeapImpl.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/HeapImpl.java index 09903ec31919..ffeff4cc4233 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/HeapImpl.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/HeapImpl.java @@ -37,7 +37,6 @@ import org.graalvm.nativeimage.IsolateThread; import org.graalvm.nativeimage.Platform; import org.graalvm.nativeimage.Platforms; -import org.graalvm.nativeimage.hosted.Feature.FeatureAccess; import org.graalvm.word.Pointer; import org.graalvm.word.UnsignedWord; @@ -45,7 +44,6 @@ import com.oracle.svm.core.SubstrateDiagnostics; import com.oracle.svm.core.SubstrateDiagnostics.DiagnosticThunk; import com.oracle.svm.core.SubstrateOptions; -import com.oracle.svm.core.annotate.AlwaysInline; import com.oracle.svm.core.annotate.NeverInline; import com.oracle.svm.core.annotate.RestrictHeapAccess; import com.oracle.svm.core.annotate.Substitute; @@ -97,8 +95,8 @@ public final class HeapImpl extends Heap { private final ObjectHeaderImpl objectHeaderImpl = new ObjectHeaderImpl(); private final GCImpl gcImpl; private final RuntimeCodeInfoGCSupportImpl runtimeCodeInfoGcSupport; - private final HeapPolicy heapPolicy; private final ImageHeapInfo imageHeapInfo = new ImageHeapInfo(); + private final HeapAccounting accounting = new HeapAccounting(); /** Head of the linked list of currently pending (ready to be enqueued) {@link Reference}s. */ private Reference refPendingList; @@ -114,11 +112,11 @@ public final class HeapImpl extends Heap { private List> classList; @Platforms(Platform.HOSTED_ONLY.class) - public HeapImpl(FeatureAccess access, int pageSize) { + public HeapImpl(int pageSize) { this.pageSize = pageSize; - this.gcImpl = new GCImpl(access); + this.gcImpl = new GCImpl(); this.runtimeCodeInfoGcSupport = new RuntimeCodeInfoGCSupportImpl(); - this.heapPolicy = new HeapPolicy(); + HeapParameters.initialize(); SubstrateDiagnostics.DiagnosticThunkRegister.getSingleton().register(new DumpHeapSettingsAndStatistics()); SubstrateDiagnostics.DiagnosticThunkRegister.getSingleton().register(new DumpChunkInformation()); } @@ -224,6 +222,11 @@ public RuntimeCodeInfoGCSupport getRuntimeCodeInfoGCSupport() { return runtimeCodeInfoGcSupport; } + @Fold + public HeapAccounting getAccounting() { + return accounting; + } + GCImpl getGCImpl() { return gcImpl; } @@ -241,26 +244,6 @@ static void exitIfAllocationDisallowed(String callSite, String typeName) { } } - @AlwaysInline("GC performance") - Object promoteObject(Object original, UnsignedWord header) { - Log trace = Log.noopLog().string("[HeapImpl.promoteObject:").string(" original: ").object(original); - - Object result; - if (HeapPolicy.getMaxSurvivorSpaces() > 0 && !getGCImpl().isCompleteCollection()) { - result = getYoungGeneration().promoteObject(original, header); - } else { - result = getOldGeneration().promoteObject(original, header); - } - - trace.string(" result: ").object(result).string("]").newline(); - return result; - } - - @Fold - public HeapPolicy getHeapPolicy() { - return heapPolicy; - } - @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) public YoungGeneration getYoungGeneration() { return youngGeneration; @@ -277,7 +260,7 @@ AtomicReference getPinHead() { @Uninterruptible(reason = "Necessary to return a reasonably consistent value (a GC can change the queried values).") public UnsignedWord getUsedBytes() { - return getOldGeneration().getChunkBytes().add(HeapPolicy.getYoungUsedBytes()); + return getOldGeneration().getChunkBytes().add(getHeapImpl().getAccounting().getYoungUsedBytes()); } @Uninterruptible(reason = "Necessary to return a reasonably consistent value (a GC can change the queried values).") @@ -286,7 +269,7 @@ public UnsignedWord getCommittedBytes() { } void report(Log log) { - report(log, HeapPolicyOptions.TraceHeapChunks.getValue()); + report(log, HeapParameters.Options.TraceHeapChunks.getValue()); } void report(Log log, boolean traceHeapChunks) { @@ -304,32 +287,32 @@ void logImageHeapPartitionBoundaries(Log log) { /** Log the zap values to make it easier to search for them. */ static Log zapValuesToLog(Log log) { - if (HeapPolicy.getZapProducedHeapChunks() || HeapPolicy.getZapConsumedHeapChunks()) { + if (HeapParameters.getZapProducedHeapChunks() || HeapParameters.getZapConsumedHeapChunks()) { log.string("[Heap Chunk zap values: ").indent(true); /* Padded with spaces so the columns line up between the int and word variants. */ // @formatter:off - if (HeapPolicy.getZapProducedHeapChunks()) { + if (HeapParameters.getZapProducedHeapChunks()) { log.string(" producedHeapChunkZapInt: ") - .string(" hex: ").spaces(8).hex(HeapPolicy.getProducedHeapChunkZapInt()) - .string(" signed: ").spaces(9).signed(HeapPolicy.getProducedHeapChunkZapInt()) - .string(" unsigned: ").spaces(10).unsigned(HeapPolicy.getProducedHeapChunkZapInt()).newline(); + .string(" hex: ").spaces(8).hex(HeapParameters.getProducedHeapChunkZapInt()) + .string(" signed: ").spaces(9).signed(HeapParameters.getProducedHeapChunkZapInt()) + .string(" unsigned: ").spaces(10).unsigned(HeapParameters.getProducedHeapChunkZapInt()).newline(); log.string(" producedHeapChunkZapWord:") - .string(" hex: ").hex(HeapPolicy.getProducedHeapChunkZapWord()) - .string(" signed: ").signed(HeapPolicy.getProducedHeapChunkZapWord()) - .string(" unsigned: ").unsigned(HeapPolicy.getProducedHeapChunkZapWord()); - if (HeapPolicy.getZapConsumedHeapChunks()) { + .string(" hex: ").hex(HeapParameters.getProducedHeapChunkZapWord()) + .string(" signed: ").signed(HeapParameters.getProducedHeapChunkZapWord()) + .string(" unsigned: ").unsigned(HeapParameters.getProducedHeapChunkZapWord()); + if (HeapParameters.getZapConsumedHeapChunks()) { log.newline(); } } - if (HeapPolicy.getZapConsumedHeapChunks()) { + if (HeapParameters.getZapConsumedHeapChunks()) { log.string(" consumedHeapChunkZapInt: ") - .string(" hex: ").spaces(8).hex(HeapPolicy.getConsumedHeapChunkZapInt()) - .string(" signed: ").spaces(10).signed(HeapPolicy.getConsumedHeapChunkZapInt()) - .string(" unsigned: ").spaces(10).unsigned(HeapPolicy.getConsumedHeapChunkZapInt()).newline(); + .string(" hex: ").spaces(8).hex(HeapParameters.getConsumedHeapChunkZapInt()) + .string(" signed: ").spaces(10).signed(HeapParameters.getConsumedHeapChunkZapInt()) + .string(" unsigned: ").spaces(10).unsigned(HeapParameters.getConsumedHeapChunkZapInt()).newline(); log.string(" consumedHeapChunkZapWord:") - .string(" hex: ").hex(HeapPolicy.getConsumedHeapChunkZapWord()) - .string(" signed: ").signed(HeapPolicy.getConsumedHeapChunkZapWord()) - .string(" unsigned: ").unsigned(HeapPolicy.getConsumedHeapChunkZapWord()); + .string(" hex: ").hex(HeapParameters.getConsumedHeapChunkZapWord()) + .string(" signed: ").signed(HeapParameters.getConsumedHeapChunkZapWord()) + .string(" unsigned: ").unsigned(HeapParameters.getConsumedHeapChunkZapWord()); } log.redent(false).string("]"); // @formatter:on @@ -425,7 +408,7 @@ public static boolean usesImageHeapCardMarking() { @Fold @Override public int getPreferredAddressSpaceAlignment() { - return UnsignedUtils.safeToInt(HeapPolicy.getAlignedHeapChunkAlignment()); + return UnsignedUtils.safeToInt(HeapParameters.getAlignedHeapChunkAlignment()); } @Fold @@ -437,7 +420,7 @@ public int getImageHeapOffsetInAddressSpace() { * the heap base and the start of the image heap. The gap won't need any memory in the * native image file. */ - return NumUtil.safeToInt(HeapPolicyOptions.AlignedHeapChunkSize.getValue()); + return NumUtil.safeToInt(HeapParameters.Options.AlignedHeapChunkSize.getValue()); } return 0; } @@ -800,7 +783,7 @@ public void printDiagnostics(Log log, int invocationCount) { log.string("Heap base: ").zhex(KnownIntrinsics.heapBase()).newline(); } log.string("Object reference size: ").signed(ConfigurationValues.getObjectLayout().getReferenceSize()).newline(); - log.string("Aligned chunk size: ").unsigned(HeapPolicy.getAlignedHeapChunkSize()).newline(); + log.string("Aligned chunk size: ").unsigned(HeapParameters.getAlignedHeapChunkSize()).newline(); GCAccounting accounting = gc.getAccounting(); log.string("Incremental collections: ").unsigned(accounting.getIncrementalCollectionCount()).newline(); @@ -842,13 +825,13 @@ private long totalMemory() { @Substitute private long maxMemory() { - // Query the physical memory size, so it gets set correctly instead of being estimated. - PhysicalMemory.size(); - return HeapPolicy.getMaximumHeapSize().rawValue(); + PhysicalMemory.size(); // ensure physical memory size is set correctly and not estimated + GCImpl.getPolicy().updateSizeParameters(); + return GCImpl.getPolicy().getMaximumHeapSize().rawValue(); } @Substitute private void gc() { - HeapPolicy.maybeCauseUserRequestedCollection(); + GCImpl.getGCImpl().maybeCauseUserRequestedCollection(); } } diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/HeapParameters.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/HeapParameters.java new file mode 100644 index 000000000000..145cf91a88eb --- /dev/null +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/HeapParameters.java @@ -0,0 +1,222 @@ +/* + * Copyright (c) 2013, 2017, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.svm.core.genscavenge; + +import org.graalvm.collections.EconomicMap; +import org.graalvm.collections.UnmodifiableEconomicMap; +import org.graalvm.compiler.api.replacements.Fold; +import org.graalvm.compiler.options.Option; +import org.graalvm.compiler.options.OptionKey; +import org.graalvm.compiler.options.OptionValues; +import org.graalvm.compiler.word.Word; +import org.graalvm.nativeimage.Platform; +import org.graalvm.nativeimage.Platforms; +import org.graalvm.word.UnsignedWord; +import org.graalvm.word.WordFactory; + +import com.oracle.svm.core.SubstrateGCOptions; +import com.oracle.svm.core.SubstrateUtil; +import com.oracle.svm.core.annotate.Uninterruptible; +import com.oracle.svm.core.option.HostedOptionKey; +import com.oracle.svm.core.option.RuntimeOptionKey; +import com.oracle.svm.core.option.RuntimeOptionValues; +import com.oracle.svm.core.util.UserError; +import com.oracle.svm.core.util.VMError; + +/** Constants and variables for the size and layout of the heap and behavior of the collector. */ +public final class HeapParameters { + public static final class Options { + @Option(help = "The maximum heap size as percent of physical memory") // + public static final RuntimeOptionKey MaximumHeapSizePercent = new RuntimeOptionKey<>(80); + + @Option(help = "The maximum size of the young generation as a percentage of the maximum heap size") // + public static final RuntimeOptionKey MaximumYoungGenerationSizePercent = new RuntimeOptionKey<>(10); + + @Option(help = "The size of an aligned chunk.") // + public static final HostedOptionKey AlignedHeapChunkSize = new HostedOptionKey(1L * 1024L * 1024L) { + @Override + protected void onValueUpdate(EconomicMap, Object> values, Long oldValue, Long newValue) { + int multiple = 4096; + UserError.guarantee(newValue > 0 && newValue % multiple == 0, "%s value must be a multiple of %d.", getName(), multiple); + } + }; + + /* + * This should be a fraction of the size of an aligned chunk, else large small arrays will + * not fit in an aligned chunk. + */ + @Option(help = "The size at or above which an array will be allocated in its own unaligned chunk. 0 implies (AlignedHeapChunkSize / 8).") // + public static final HostedOptionKey LargeArrayThreshold = new HostedOptionKey<>(LARGE_ARRAY_THRESHOLD_SENTINEL_VALUE); + + @Option(help = "Fill unused memory chunks with a sentinel value.") // + public static final HostedOptionKey ZapChunks = new HostedOptionKey<>(false); + + @Option(help = "Before use, fill memory chunks with a sentinel value.") // + public static final HostedOptionKey ZapProducedHeapChunks = new HostedOptionKey<>(false); + + @Option(help = "After use, Fill memory chunks with a sentinel value.") // + public static final HostedOptionKey ZapConsumedHeapChunks = new HostedOptionKey<>(false); + + @Option(help = "Trace heap chunks during collections, if +VerboseGC and +PrintHeapShape.") // + public static final RuntimeOptionKey TraceHeapChunks = new RuntimeOptionKey<>(false); + + @Option(help = "Maximum number of survivor spaces.") // + public static final HostedOptionKey MaxSurvivorSpaces = new HostedOptionKey(null) { + @Override + public Integer getValueOrDefault(UnmodifiableEconomicMap, Object> values) { + Integer value = (Integer) values.get(this); + UserError.guarantee(value == null || value >= 0, "%s value must be greater than or equal to 0", getName()); + return CollectionPolicy.getMaxSurvivorSpaces(value); + } + + @Override + public Integer getValue(OptionValues values) { + assert checkDescriptorExists(); + return getValueOrDefault(values.getMap()); + } + }; + + @Option(help = "Determines if a full GC collects the young generation separately or together with the old generation.") // + public static final RuntimeOptionKey CollectYoungGenerationSeparately = new RuntimeOptionKey<>(false); + + private Options() { + } + } + + private static final long LARGE_ARRAY_THRESHOLD_SENTINEL_VALUE = 0; + private static final int ALIGNED_HEAP_CHUNK_FRACTION_FOR_LARGE_ARRAY_THRESHOLD = 8; + + @Platforms(Platform.HOSTED_ONLY.class) + static void initialize() { + if (!SubstrateUtil.isPowerOf2(getAlignedHeapChunkSize().rawValue())) { + throw UserError.abort("AlignedHeapChunkSize (%d) should be a power of 2.", getAlignedHeapChunkSize().rawValue()); + } + if (!getLargeArrayThreshold().belowOrEqual(getAlignedHeapChunkSize())) { + throw UserError.abort("LargeArrayThreshold (%d) should be below or equal to AlignedHeapChunkSize (%d).", + getLargeArrayThreshold().rawValue(), getAlignedHeapChunkSize().rawValue()); + } + } + + @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) + public static Word getProducedHeapChunkZapWord() { + return (Word) producedHeapChunkZapWord; + } + + @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) + public static int getProducedHeapChunkZapInt() { + return (int) producedHeapChunkZapInt.rawValue(); + } + + @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) + public static Word getConsumedHeapChunkZapWord() { + return (Word) consumedHeapChunkZapWord; + } + + @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) + public static int getConsumedHeapChunkZapInt() { + return (int) consumedHeapChunkZapInt.rawValue(); + } + + @Fold + public static int getMaxSurvivorSpaces() { + return Options.MaxSurvivorSpaces.getValue(); + } + + /* + * Memory configuration + */ + + public static void setMaximumHeapSize(UnsignedWord value) { + RuntimeOptionValues.singleton().update(SubstrateGCOptions.MaxHeapSize, value.rawValue()); + } + + public static void setMinimumHeapSize(UnsignedWord value) { + RuntimeOptionValues.singleton().update(SubstrateGCOptions.MinHeapSize, value.rawValue()); + } + + static int getMaximumYoungGenerationSizePercent() { + int result = Options.MaximumYoungGenerationSizePercent.getValue(); + VMError.guarantee((result >= 0) && (result <= 100), "MaximumYoungGenerationSizePercent should be in [0 ..100]"); + return result; + } + + static int getMaximumHeapSizePercent() { + int result = Options.MaximumHeapSizePercent.getValue(); + VMError.guarantee((result >= 0) && (result <= 100), "MaximumHeapSizePercent should be in [0 ..100]"); + return result; + } + + @Fold + public static UnsignedWord getAlignedHeapChunkSize() { + return WordFactory.unsigned(Options.AlignedHeapChunkSize.getValue()); + } + + @Fold + static UnsignedWord getAlignedHeapChunkAlignment() { + return getAlignedHeapChunkSize(); + } + + @Fold + public static UnsignedWord getLargeArrayThreshold() { + long largeArrayThreshold = Options.LargeArrayThreshold.getValue(); + if (LARGE_ARRAY_THRESHOLD_SENTINEL_VALUE == largeArrayThreshold) { + return getAlignedHeapChunkSize().unsignedDivide(ALIGNED_HEAP_CHUNK_FRACTION_FOR_LARGE_ARRAY_THRESHOLD); + } else { + return WordFactory.unsigned(Options.LargeArrayThreshold.getValue()); + } + } + + /* + * Zapping + */ + + public static boolean getZapProducedHeapChunks() { + return Options.ZapChunks.getValue() || Options.ZapProducedHeapChunks.getValue(); + } + + public static boolean getZapConsumedHeapChunks() { + return Options.ZapChunks.getValue() || Options.ZapConsumedHeapChunks.getValue(); + } + + static { + Word.ensureInitialized(); + } + + private static final UnsignedWord producedHeapChunkZapInt = WordFactory.unsigned(0xbaadbabe); + private static final UnsignedWord producedHeapChunkZapWord = producedHeapChunkZapInt.shiftLeft(32).or(producedHeapChunkZapInt); + + private static final UnsignedWord consumedHeapChunkZapInt = WordFactory.unsigned(0xdeadbeef); + private static final UnsignedWord consumedHeapChunkZapWord = consumedHeapChunkZapInt.shiftLeft(32).or(consumedHeapChunkZapInt); + + public static final class TestingBackDoor { + private TestingBackDoor() { + } + + /** The size, in bytes, of what qualifies as a "large" array. */ + public static long getUnalignedObjectSize() { + return HeapParameters.getLargeArrayThreshold().rawValue(); + } + } +} diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/HeapPolicy.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/HeapPolicy.java index d0ef184a491d..6e6f97503b87 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/HeapPolicy.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/HeapPolicy.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,275 +25,39 @@ package com.oracle.svm.core.genscavenge; import org.graalvm.compiler.api.replacements.Fold; -import org.graalvm.compiler.word.Word; -import org.graalvm.nativeimage.Platform; -import org.graalvm.nativeimage.Platforms; import org.graalvm.word.UnsignedWord; -import org.graalvm.word.WordFactory; -import com.oracle.svm.core.SubstrateGCOptions; -import com.oracle.svm.core.SubstrateUtil; -import com.oracle.svm.core.annotate.Uninterruptible; -import com.oracle.svm.core.heap.GCCause; -import com.oracle.svm.core.heap.PhysicalMemory; -import com.oracle.svm.core.heap.ReferenceAccess; -import com.oracle.svm.core.jdk.UninterruptibleUtils; -import com.oracle.svm.core.option.RuntimeOptionValues; -import com.oracle.svm.core.thread.VMOperation; -import com.oracle.svm.core.util.UserError; -import com.oracle.svm.core.util.VMError; - -/** HeapPolicy contains policies for the parameters and behaviors of the heap and collector. */ +/** + * Only for compatibility with legacy code, replaced by {@link CollectionPolicy} and + * {@link HeapParameters}. + */ public final class HeapPolicy { - public static final OutOfMemoryError OUT_OF_MEMORY_ERROR = new OutOfMemoryError("Garbage-collected heap size exceeded."); - - static final long LARGE_ARRAY_THRESHOLD_SENTINEL_VALUE = 0; - static final int ALIGNED_HEAP_CHUNK_FRACTION_FOR_LARGE_ARRAY_THRESHOLD = 8; - - @Platforms(Platform.HOSTED_ONLY.class) - HeapPolicy() { - if (!SubstrateUtil.isPowerOf2(getAlignedHeapChunkSize().rawValue())) { - throw UserError.abort("AlignedHeapChunkSize (%d) should be a power of 2.", getAlignedHeapChunkSize().rawValue()); - } - if (!getLargeArrayThreshold().belowOrEqual(getAlignedHeapChunkSize())) { - throw UserError.abort("LargeArrayThreshold (%d) should be below or equal to AlignedHeapChunkSize (%d).", - getLargeArrayThreshold().rawValue(), getAlignedHeapChunkSize().rawValue()); - } - } - - @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) - public static Word getProducedHeapChunkZapWord() { - return (Word) producedHeapChunkZapWord; - } - - @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) - public static int getProducedHeapChunkZapInt() { - return (int) producedHeapChunkZapInt.rawValue(); - } - - @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) - public static Word getConsumedHeapChunkZapWord() { - return (Word) consumedHeapChunkZapWord; - } - - @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) - public static int getConsumedHeapChunkZapInt() { - return (int) consumedHeapChunkZapInt.rawValue(); - } - - public static UnsignedWord m(long bytes) { - assert 0 <= bytes; - return WordFactory.unsigned(bytes).multiply(1024).multiply(1024); - } - - @Fold - public static int getMaxSurvivorSpaces() { - return HeapPolicyOptions.MaxSurvivorSpaces.getValue(); - } - - /* - * Memory configuration - */ - - public static UnsignedWord getMaximumYoungGenerationSize() { - long runtimeValue = SubstrateGCOptions.MaxNewSize.getValue(); - if (runtimeValue != 0L) { - return WordFactory.unsigned(runtimeValue); - } - - /* If no value is set, use a fraction of the maximum heap size. */ - UnsignedWord maxHeapSize = getMaximumHeapSize(); - UnsignedWord youngSizeAsFraction = maxHeapSize.unsignedDivide(100).multiply(getMaximumYoungGenerationSizePercent()); - /* But not more than 256MB. */ - UnsignedWord maxSize = m(256); - UnsignedWord youngSize = (youngSizeAsFraction.belowOrEqual(maxSize) ? youngSizeAsFraction : maxSize); - /* But do not cache the result as it is based on values that might change. */ - return youngSize; - } - - private static int getMaximumYoungGenerationSizePercent() { - int result = HeapPolicyOptions.MaximumYoungGenerationSizePercent.getValue(); - VMError.guarantee((result >= 0) && (result <= 100), "MaximumYoungGenerationSizePercent should be in [0 ..100]"); - return result; - } - public static UnsignedWord getMaximumHeapSize() { - long runtimeValue = SubstrateGCOptions.MaxHeapSize.getValue(); - if (runtimeValue != 0L) { - return WordFactory.unsigned(runtimeValue); - } - - /* - * If the physical size is known yet, the maximum size of the heap is a fraction of the size - * of the physical memory. - */ - UnsignedWord addressSpaceSize = ReferenceAccess.singleton().getAddressSpaceSize(); - if (PhysicalMemory.isInitialized()) { - UnsignedWord physicalMemorySize = PhysicalMemory.getCachedSize(); - int maximumHeapSizePercent = getMaximumHeapSizePercent(); - /* Do not cache because `-Xmx` option parsing may not have happened yet. */ - UnsignedWord result = physicalMemorySize.unsignedDivide(100).multiply(maximumHeapSizePercent); - if (result.belowThan(addressSpaceSize)) { - return result; - } - } - return addressSpaceSize; - } - - private static int getMaximumHeapSizePercent() { - int result = HeapPolicyOptions.MaximumHeapSizePercent.getValue(); - VMError.guarantee((result >= 0) && (result <= 100), "MaximumHeapSizePercent should be in [0 ..100]"); - return result; + return GCImpl.getPolicy().getMaximumHeapSize(); } public static UnsignedWord getMinimumHeapSize() { - long runtimeValue = SubstrateGCOptions.MinHeapSize.getValue(); - if (runtimeValue != 0L) { - /* If `-Xms` has been parsed from the command line, use that value. */ - return WordFactory.unsigned(runtimeValue); - } - - /* A default value chosen to delay the first full collection. */ - UnsignedWord result = getMaximumYoungGenerationSize().multiply(2); - /* But not larger than -Xmx. */ - if (result.aboveThan(getMaximumHeapSize())) { - result = getMaximumHeapSize(); - } - /* But do not cache the result as it is based on values that might change. */ - return result; + return GCImpl.getPolicy().getMinimumHeapSize(); } public static void setMaximumHeapSize(UnsignedWord value) { - RuntimeOptionValues.singleton().update(SubstrateGCOptions.MaxHeapSize, value.rawValue()); + HeapParameters.setMaximumHeapSize(value); } public static void setMinimumHeapSize(UnsignedWord value) { - RuntimeOptionValues.singleton().update(SubstrateGCOptions.MinHeapSize, value.rawValue()); + HeapParameters.setMinimumHeapSize(value); } @Fold public static UnsignedWord getAlignedHeapChunkSize() { - return WordFactory.unsigned(HeapPolicyOptions.AlignedHeapChunkSize.getValue()); - } - - @Fold - static UnsignedWord getAlignedHeapChunkAlignment() { - return getAlignedHeapChunkSize(); + return HeapParameters.getAlignedHeapChunkSize(); } @Fold public static UnsignedWord getLargeArrayThreshold() { - long largeArrayThreshold = HeapPolicyOptions.LargeArrayThreshold.getValue(); - if (LARGE_ARRAY_THRESHOLD_SENTINEL_VALUE == largeArrayThreshold) { - return getAlignedHeapChunkSize().unsignedDivide(ALIGNED_HEAP_CHUNK_FRACTION_FOR_LARGE_ARRAY_THRESHOLD); - } else { - return WordFactory.unsigned(HeapPolicyOptions.LargeArrayThreshold.getValue()); - } + return HeapParameters.getLargeArrayThreshold(); } - /* - * Zapping - */ - - public static boolean getZapProducedHeapChunks() { - return HeapPolicyOptions.ZapChunks.getValue() || HeapPolicyOptions.ZapProducedHeapChunks.getValue(); - } - - public static boolean getZapConsumedHeapChunks() { - return HeapPolicyOptions.ZapChunks.getValue() || HeapPolicyOptions.ZapConsumedHeapChunks.getValue(); - } - - static { - Word.ensureInitialized(); - } - - private static final UnsignedWord producedHeapChunkZapInt = WordFactory.unsigned(0xbaadbeef); - private static final UnsignedWord producedHeapChunkZapWord = producedHeapChunkZapInt.shiftLeft(32).or(producedHeapChunkZapInt); - - private static final UnsignedWord consumedHeapChunkZapInt = WordFactory.unsigned(0xdeadbeef); - private static final UnsignedWord consumedHeapChunkZapWord = consumedHeapChunkZapInt.shiftLeft(32).or(consumedHeapChunkZapInt); - - /* - * Collection-triggering Policies - */ - - private static final UninterruptibleUtils.AtomicUnsigned edenUsedBytes = new UninterruptibleUtils.AtomicUnsigned(); - private static final UninterruptibleUtils.AtomicUnsigned youngUsedBytes = new UninterruptibleUtils.AtomicUnsigned(); - - public static void setEdenAndYoungGenBytes(UnsignedWord edenBytes, UnsignedWord youngBytes) { - assert VMOperation.isGCInProgress() : "would cause races otherwise"; - youngUsedBytes.set(youngBytes); - edenUsedBytes.set(edenBytes); - } - - public static void increaseEdenUsedBytes(UnsignedWord value) { - youngUsedBytes.addAndGet(value); - edenUsedBytes.addAndGet(value); - } - - @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) - public static UnsignedWord getYoungUsedBytes() { - assert !VMOperation.isGCInProgress() : "value is incorrect during a GC"; - return youngUsedBytes.get(); - } - - public static UnsignedWord getEdenUsedBytes() { - assert !VMOperation.isGCInProgress() : "value is incorrect during a GC"; - return edenUsedBytes.get(); - } - - private static UnsignedWord getAllocationBeforePhysicalMemorySize() { - return WordFactory.unsigned(HeapPolicyOptions.AllocationBeforePhysicalMemorySize.getValue()); - } - - public static void maybeCollectOnAllocation() { - if (GCImpl.hasNeverCollectPolicy()) { - // Don't initiate a safepoint if we won't do a collection anyways. - if (HeapPolicy.getEdenUsedBytes().aboveThan(HeapPolicy.getMaximumHeapSize())) { - throw OUT_OF_MEMORY_ERROR; - } - } else { - UnsignedWord maxYoungSize = getMaximumYoungGenerationSize(); - boolean outOfMemory = maybeCollectOnAllocation(maxYoungSize); - if (outOfMemory) { - throw OUT_OF_MEMORY_ERROR; - } - } - } - - @Uninterruptible(reason = "Avoid races with other threads that also try to trigger a GC") - private static boolean maybeCollectOnAllocation(UnsignedWord maxYoungSize) { - if (youngUsedBytes.get().aboveOrEqual(maxYoungSize)) { - return GCImpl.getGCImpl().collectWithoutAllocating(GenScavengeGCCause.OnAllocation, false); - } - return false; - } - - public static void maybeCauseUserRequestedCollection() { - if (!SubstrateGCOptions.DisableExplicitGC.getValue()) { - HeapImpl.getHeapImpl().getGC().collectCompletely(GCCause.JavaLangSystemGC); - } - } - - public static final class TestingBackDoor { - private TestingBackDoor() { - } - - /** The size, in bytes, of what qualifies as a "large" array. */ - public static long getUnalignedObjectSize() { - return HeapPolicy.getLargeArrayThreshold().rawValue(); - } - } - - /* - * Periodic tasks - */ - - /** Sample the physical memory size, before the first collection but after some allocation. */ - static void samplePhysicalMemorySize() { - if (HeapImpl.getHeapImpl().getGCImpl().getCollectionEpoch().equal(WordFactory.zero()) && - getYoungUsedBytes().aboveThan(getAllocationBeforePhysicalMemorySize())) { - PhysicalMemory.tryInitialize(); - } + private HeapPolicy() { } } diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/HeapPolicyOptions.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/HeapPolicyOptions.java index 56f824ca4135..8801d8d1cf23 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/HeapPolicyOptions.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/HeapPolicyOptions.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,57 +24,13 @@ */ package com.oracle.svm.core.genscavenge; -import org.graalvm.collections.EconomicMap; -import org.graalvm.compiler.options.Option; -import org.graalvm.compiler.options.OptionKey; - import com.oracle.svm.core.option.HostedOptionKey; -import com.oracle.svm.core.option.RuntimeOptionKey; -import com.oracle.svm.core.util.UserError; +/** + * Only for compatibility with legacy code, replaced by {@link HeapParameters.Options}. + */ public final class HeapPolicyOptions { - @Option(help = "The maximum heap size as percent of physical memory") // - public static final RuntimeOptionKey MaximumHeapSizePercent = new RuntimeOptionKey<>(80); - - @Option(help = "The maximum size of the young generation as a percentage of the maximum heap size") // - public static final RuntimeOptionKey MaximumYoungGenerationSizePercent = new RuntimeOptionKey<>(10); - - @Option(help = "Bytes that can be allocated before (re-)querying the physical memory size") // - public static final HostedOptionKey AllocationBeforePhysicalMemorySize = new HostedOptionKey<>(1L * 1024L * 1024L); - - @Option(help = "The size of an aligned chunk.") // - public static final HostedOptionKey AlignedHeapChunkSize = new HostedOptionKey(1L * 1024L * 1024L) { - @Override - protected void onValueUpdate(EconomicMap, Object> values, Long oldValue, Long newValue) { - int multiple = 4096; - UserError.guarantee(newValue > 0 && newValue % multiple == 0, "%s value must be a multiple of %d.", getName(), multiple); - } - }; - - /* - * This should be a fraction of the size of an aligned chunk, else large small arrays will not - * fit in an aligned chunk. - */ - @Option(help = "The size at or above which an array will be allocated in its own unaligned chunk. 0 implies (AlignedHeapChunkSize / 8).") // - public static final HostedOptionKey LargeArrayThreshold = new HostedOptionKey<>(HeapPolicy.LARGE_ARRAY_THRESHOLD_SENTINEL_VALUE); - - @Option(help = "Fill unused memory chunks with a sentinel value.") // - public static final HostedOptionKey ZapChunks = new HostedOptionKey<>(false); - - @Option(help = "Before use, fill memory chunks with a sentinel value.") // - public static final HostedOptionKey ZapProducedHeapChunks = new HostedOptionKey<>(false); - - @Option(help = "After use, Fill memory chunks with a sentinel value.") // - public static final HostedOptionKey ZapConsumedHeapChunks = new HostedOptionKey<>(false); - - @Option(help = "Trace heap chunks during collections, if +VerboseGC and +PrintHeapShape.") // - public static final RuntimeOptionKey TraceHeapChunks = new RuntimeOptionKey<>(false); - - @Option(help = "Maximum number of survivor spaces.") // - public static final HostedOptionKey MaxSurvivorSpaces = new HostedOptionKey<>(0); - - @Option(help = "Determines if a full GC collects the young generation separately or together with the old generation.") // - public static final RuntimeOptionKey CollectYoungGenerationSeparately = new RuntimeOptionKey<>(false); + public static final HostedOptionKey AlignedHeapChunkSize = HeapParameters.Options.AlignedHeapChunkSize; private HeapPolicyOptions() { } diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/HeapVerifier.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/HeapVerifier.java index 9790e7f69fe4..279319c09629 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/HeapVerifier.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/HeapVerifier.java @@ -450,7 +450,7 @@ public void initialize(Object parentObject) { } @Override - public boolean visitObjectReference(Pointer objRef, boolean compressed) { + public boolean visitObjectReference(Pointer objRef, boolean compressed, Object holderObject) { result &= verifyReference(parentObject, objRef, compressed); return true; } diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/ImageHeapWalker.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/ImageHeapWalker.java index bbc330ff68b4..cb1e2d91ce15 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/ImageHeapWalker.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/ImageHeapWalker.java @@ -95,7 +95,7 @@ private static boolean walkPartitionInline(Object firstObject, Object lastObject base = HeapImpl.getImageHeapStart(); } Pointer offset = current.subtract(base); - UnsignedWord chunkOffset = alignedChunks ? UnsignedUtils.roundDown(offset, HeapPolicy.getAlignedHeapChunkAlignment()) + UnsignedWord chunkOffset = alignedChunks ? UnsignedUtils.roundDown(offset, HeapParameters.getAlignedHeapChunkAlignment()) : offset.subtract(UnalignedHeapChunk.getObjectStartOffset()); currentChunk = (HeapChunk.Header) chunkOffset.add(base); diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/ObjectHeaderImpl.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/ObjectHeaderImpl.java index 41d4dcbd8e9a..36fa275c4cb6 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/ObjectHeaderImpl.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/ObjectHeaderImpl.java @@ -229,18 +229,18 @@ public static UnsignedWord clearBits(UnsignedWord header) { @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) public static boolean isProducedHeapChunkZapped(UnsignedWord header) { if (getReferenceSize() == Integer.BYTES) { - return header.equal(HeapPolicy.getProducedHeapChunkZapInt()); + return header.equal(HeapParameters.getProducedHeapChunkZapInt()); } else { - return header.equal(HeapPolicy.getProducedHeapChunkZapWord()); + return header.equal(HeapParameters.getProducedHeapChunkZapWord()); } } @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) public static boolean isConsumedHeapChunkZapped(UnsignedWord header) { if (getReferenceSize() == Integer.BYTES) { - return header.equal(HeapPolicy.getConsumedHeapChunkZapInt()); + return header.equal(HeapParameters.getConsumedHeapChunkZapInt()); } else { - return header.equal(HeapPolicy.getConsumedHeapChunkZapWord()); + return header.equal(HeapParameters.getConsumedHeapChunkZapWord()); } } diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/OldGeneration.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/OldGeneration.java index 82220acd97e0..8b3e042e34f1 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/OldGeneration.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/OldGeneration.java @@ -24,6 +24,9 @@ */ package com.oracle.svm.core.genscavenge; +import static org.graalvm.compiler.nodes.extended.BranchProbabilityNode.EXTREMELY_SLOW_PATH_PROBABILITY; +import static org.graalvm.compiler.nodes.extended.BranchProbabilityNode.probability; + import org.graalvm.nativeimage.Platform; import org.graalvm.nativeimage.Platforms; import org.graalvm.word.UnsignedWord; @@ -32,8 +35,11 @@ import com.oracle.svm.core.annotate.AlwaysInline; import com.oracle.svm.core.annotate.Uninterruptible; import com.oracle.svm.core.genscavenge.GCImpl.ChunkReleaser; +import com.oracle.svm.core.genscavenge.remset.RememberedSet; import com.oracle.svm.core.heap.ObjectVisitor; import com.oracle.svm.core.log.Log; +import com.oracle.svm.core.thread.VMOperation; +import com.oracle.svm.core.util.VMError; /** * An OldGeneration has two Spaces, {@link #fromSpace} for existing objects, and {@link #toSpace} @@ -49,7 +55,7 @@ public final class OldGeneration extends Generation { @Platforms(Platform.HOSTED_ONLY.class) OldGeneration(String name) { super(name); - int age = HeapPolicy.getMaxSurvivorSpaces() + 1; + int age = HeapParameters.getMaxSurvivorSpaces() + 1; this.fromSpace = new Space("oldFromSpace", true, age); this.toSpace = new Space("oldToSpace", false, age); } @@ -68,36 +74,28 @@ public boolean walkObjects(ObjectVisitor visitor) { /** Promote an Object to ToSpace if it is not already in ToSpace. */ @AlwaysInline("GC performance") @Override - public Object promoteObject(Object original, UnsignedWord header) { - if (ObjectHeaderImpl.isAlignedHeader(header)) { - AlignedHeapChunk.AlignedHeader chunk = AlignedHeapChunk.getEnclosingChunk(original); - Space originalSpace = HeapChunk.getSpace(chunk); - if (originalSpace.isFromSpace()) { - return promoteAlignedObject(original, originalSpace); - } - } else { - assert ObjectHeaderImpl.isUnalignedHeader(header); - UnalignedHeapChunk.UnalignedHeader chunk = UnalignedHeapChunk.getEnclosingChunk(original); - Space originalSpace = HeapChunk.getSpace(chunk); - if (originalSpace.isFromSpace()) { - promoteUnalignedChunk(chunk, originalSpace); - } - } - return original; - } - - @AlwaysInline("GC performance") - public Object promoteAlignedObject(Object original, Space originalSpace) { + public Object promoteAlignedObject(Object original, AlignedHeapChunk.AlignedHeader originalChunk, Space originalSpace) { + assert originalSpace.isFromSpace(); return getToSpace().promoteAlignedObject(original, originalSpace); } @AlwaysInline("GC performance") - public void promoteUnalignedChunk(UnalignedHeapChunk.UnalignedHeader chunk, Space originalSpace) { - getToSpace().promoteUnalignedHeapChunk(chunk, originalSpace); + @Override + protected Object promoteUnalignedObject(Object original, UnalignedHeapChunk.UnalignedHeader originalChunk, Space originalSpace) { + assert originalSpace.isFromSpace(); + getToSpace().promoteUnalignedHeapChunk(originalChunk, originalSpace); + return original; } - public void promoteObjectChunk(Object obj) { - getToSpace().promoteObjectChunk(obj); + @Override + protected boolean promoteChunk(HeapChunk.Header originalChunk, boolean isAligned, Space originalSpace) { + assert originalSpace.isFromSpace(); + if (isAligned) { + getToSpace().promoteAlignedHeapChunk((AlignedHeapChunk.AlignedHeader) originalChunk, originalSpace); + } else { + getToSpace().promoteUnalignedHeapChunk((UnalignedHeapChunk.UnalignedHeader) originalChunk, originalSpace); + } + return true; } void releaseSpaces(ChunkReleaser chunkReleaser) { @@ -159,4 +157,16 @@ UnsignedWord getChunkBytes() { UnsignedWord toBytes = getToSpace().getChunkBytes(); return fromBytes.add(toBytes); } + + @SuppressWarnings("static-method") + AlignedHeapChunk.AlignedHeader requestAlignedChunk() { + assert VMOperation.isGCInProgress() : "Should only be called from the collector."; + AlignedHeapChunk.AlignedHeader chunk = HeapImpl.getChunkProvider().produceAlignedChunk(); + if (probability(EXTREMELY_SLOW_PATH_PROBABILITY, chunk.isNull())) { + Log.log().string("[! OldGeneration.requestAlignedChunk: failure to allocate aligned chunk!]"); + throw VMError.shouldNotReachHere("Promotion failure"); + } + RememberedSet.get().enableRememberedSetForChunk(chunk); + return chunk; + } } diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/PathExhibitor.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/PathExhibitor.java index 2f8f1f26cb08..4b95f0050f7f 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/PathExhibitor.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/PathExhibitor.java @@ -293,7 +293,7 @@ void initialize(CodePointer ipArg, DeoptimizedFrame deoptFrameArg, TargetMatcher } @Override - public boolean visitObjectReference(Pointer stackSlot, boolean compressed) { + public boolean visitObjectReference(Pointer stackSlot, boolean compressed, Object holderObject) { Log trace = Log.noopLog(); if (stackSlot.isNull()) { return true; @@ -320,7 +320,7 @@ void initialize(Object containerObj, TargetMatcher targetMatcher, PathEdge resul } @Override - public boolean visitObjectReference(Pointer objRef, boolean compressed) { + public boolean visitObjectReference(Pointer objRef, boolean compressed, Object holderObject) { if (objRef.isNull()) { return true; } @@ -363,7 +363,7 @@ public void initialize(Pointer container, TargetMatcher targetMatcher, PathEdge } @Override - public boolean visitObjectReference(Pointer objRef, boolean compressed) { + public boolean visitObjectReference(Pointer objRef, boolean compressed, Object holderObject) { if (objRef.isNull()) { return true; } diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/ProportionateSpacesPolicy.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/ProportionateSpacesPolicy.java new file mode 100644 index 000000000000..76dfc1ecfe7b --- /dev/null +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/ProportionateSpacesPolicy.java @@ -0,0 +1,207 @@ +/* + * Copyright (c) 2021, 2021, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.svm.core.genscavenge; + +import org.graalvm.word.UnsignedWord; +import org.graalvm.word.WordFactory; + +import com.oracle.svm.core.annotate.Uninterruptible; +import com.oracle.svm.core.heap.GCCause; +import com.oracle.svm.core.util.UnsignedUtils; + +/** A port of HotSpot's SerialGC size policy. */ +final class ProportionateSpacesPolicy extends AbstractCollectionPolicy { + + /* + * Constants that can be made options if desirable. These are -XX options in HotSpot, refer to + * their descriptions for details. The values are HotSpot defaults unless labeled otherwise. + * + * Don't change these values individually without carefully going over their occurrences in + * HotSpot source code, there are dependencies between them that are not handled in our code. + */ + static final int MIN_HEAP_FREE_RATIO = 40; + static final int MAX_HEAP_FREE_RATIO = 70; + static final boolean SHRINK_HEAP_IN_STEPS = true; + static final int SURVIVOR_RATIO = 8; + static final int MAX_TENURING_THRESHOLD = 15; + static final int TARGET_SURVIVOR_RATIO = 50; + + private int totalCollections; + private boolean oldSizeExceededInPreviousCollection; + private int shrinkFactor; + + ProportionateSpacesPolicy() { + super(MAX_TENURING_THRESHOLD); + } + + @Override + public String getName() { + return "proportionate"; + } + + @Override + public boolean shouldCollectCompletely(boolean followingIncrementalCollection) { + guaranteeSizeParametersInitialized(); + + if (followingIncrementalCollection && oldSizeExceededInPreviousCollection) { + /* + * We promoted objects to the old generation beyond its current capacity to avoid a + * promotion failure, but due to the chunked nature of our heap, we should still be + * within the maximum heap size. Follow up with a full collection during which we either + * reclaim enough space or expand the old generation. + */ + return true; + } + return false; + } + + @Override + public void onCollectionBegin(boolean completeCollection) { + } + + @Override + public void onCollectionEnd(boolean completeCollection, GCCause cause) { + UnsignedWord oldLive = GCImpl.getGCImpl().getAccounting().getOldGenerationAfterChunkBytes(); + oldSizeExceededInPreviousCollection = oldLive.aboveThan(oldSize); + + boolean resizeOldOnlyForPromotions = !completeCollection; + computeNewOldGenSize(resizeOldOnlyForPromotions); + computeNewYoungGenSize(); + adjustDesiredTenuringThreshold(); + + totalCollections++; + } + + private void adjustDesiredTenuringThreshold() { // DefNewGeneration::adjust_desired_tenuring_threshold + // Set the desired survivor size to half the real survivor space + UnsignedWord desiredSurvivorSize = UnsignedUtils.fromDouble(UnsignedUtils.toDouble(survivorSize) * TARGET_SURVIVOR_RATIO / 100); + + // AgeTable::compute_tenuring_threshold + YoungGeneration youngGen = HeapImpl.getHeapImpl().getYoungGeneration(); + UnsignedWord total = WordFactory.zero(); + int i; + for (i = 0; i < HeapParameters.getMaxSurvivorSpaces(); i++) { + Space space = youngGen.getSurvivorFromSpaceAt(0); + total = total.add(space.getChunkBytes()); + if (total.aboveThan(desiredSurvivorSize)) { + break; + } + i++; + } + + tenuringThreshold = Math.min(i + 1, MAX_TENURING_THRESHOLD); + } + + private void computeNewOldGenSize(boolean resizeOnlyForPromotions) { // CardGeneration::compute_new_size + UnsignedWord capacityAtPrologue = oldSize; + UnsignedWord usedAfterGc = GCImpl.getGCImpl().getAccounting().getOldGenerationAfterChunkBytes(); + if (oldSize.belowThan(usedAfterGc)) { + oldSize = usedAfterGc; + } + if (resizeOnlyForPromotions) { + return; + } + + int currentShrinkFactor = shrinkFactor; + shrinkFactor = 0; + + double minimumFreePercentage = MIN_HEAP_FREE_RATIO / 100.0; + double maximumUsedPercentage = 1 - minimumFreePercentage; + + UnsignedWord minimumDesiredCapacity = UnsignedUtils.fromDouble(UnsignedUtils.toDouble(usedAfterGc) / maximumUsedPercentage); + minimumDesiredCapacity = UnsignedUtils.max(minimumDesiredCapacity, sizes.initialOldSize()); + + if (oldSize.belowThan(minimumDesiredCapacity)) { + oldSize = alignUp(minimumDesiredCapacity); + return; + } + + UnsignedWord maxShrinkBytes = oldSize.subtract(minimumDesiredCapacity); + UnsignedWord shrinkBytes = WordFactory.zero(); + if (MAX_HEAP_FREE_RATIO < 100) { + double maximumFreePercentage = MAX_HEAP_FREE_RATIO / 100.0; + double minimumUsedPercentage = 1 - maximumFreePercentage; + UnsignedWord maximumDesiredCapacity = UnsignedUtils.fromDouble(UnsignedUtils.toDouble(usedAfterGc) / minimumUsedPercentage); + maximumDesiredCapacity = UnsignedUtils.max(maximumDesiredCapacity, sizes.initialOldSize()); + assert minimumDesiredCapacity.belowOrEqual(maximumDesiredCapacity); + + if (oldSize.aboveThan(maximumDesiredCapacity)) { + shrinkBytes = oldSize.subtract(maximumDesiredCapacity); + if (SHRINK_HEAP_IN_STEPS) { + /* + * We don't want to shrink all the way back to initSize if people call + * System.gc(), because some programs do that between "phases" and then we'd + * just have to grow the heap up again for the next phase. So we damp the + * shrinking: 0% on the first call, 10% on the second call, 40% on the third + * call, and 100% by the fourth call. But if we recompute size without + * shrinking, it goes back to 0%. + */ + shrinkBytes = shrinkBytes.unsignedDivide(100).multiply(currentShrinkFactor); + if (currentShrinkFactor == 0) { + shrinkFactor = 10; + } else { + shrinkFactor = Math.min(currentShrinkFactor * 4, 100); + } + } + assert shrinkBytes.belowOrEqual(maxShrinkBytes); + } + } + + if (oldSize.aboveThan(capacityAtPrologue)) { + /* + * We might have expanded for promotions, in which case we might want to take back that + * expansion if there's room after GC. That keeps us from stretching the heap with + * promotions when there's plenty of room. + */ + UnsignedWord expansionForPromotion = oldSize.subtract(capacityAtPrologue); + expansionForPromotion = UnsignedUtils.min(expansionForPromotion, maxShrinkBytes); + shrinkBytes = UnsignedUtils.max(shrinkBytes, expansionForPromotion); + } + + if (shrinkBytes.aboveThan(MIN_HEAP_FREE_RATIO)) { + oldSize = oldSize.subtract(shrinkBytes); + } + } + + private void computeNewYoungGenSize() { // DefNewGeneration::compute_new_size + UnsignedWord desiredNewSize = oldSize.unsignedDivide(NEW_RATIO); + desiredNewSize = UnsignedUtils.clamp(desiredNewSize, sizes.initialYoungSize(), sizes.maxYoungSize); + + // DefNewGeneration::compute_space_boundaries, DefNewGeneration::compute_survivor_size + survivorSize = minSpaceSize(alignDown(desiredNewSize.unsignedDivide(SURVIVOR_RATIO))); + UnsignedWord desiredEdenSize = WordFactory.zero(); + if (desiredNewSize.aboveThan(survivorSize.multiply(2))) { + desiredEdenSize = desiredNewSize.subtract(survivorSize.multiply(2)); + } + edenSize = minSpaceSize(alignDown(desiredEdenSize)); + assert edenSize.aboveThan(0) && survivorSize.belowOrEqual(edenSize); + } + + @Override + @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) + protected long gcCount() { + return totalCollections; + } +} diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/ReciprocalLeastSquareFit.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/ReciprocalLeastSquareFit.java new file mode 100644 index 000000000000..0b2f3efb8ab8 --- /dev/null +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/ReciprocalLeastSquareFit.java @@ -0,0 +1,75 @@ +/* + * Copyright (c) 2021, 2021, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.svm.core.genscavenge; + +/** + * Least squares fitting on a data set to generate an equation y = b + a / x. Uses exponential decay + * to assign a higher weight to newly added data points and effectively drop old data points without + * keeping a history. + * + * Henrik J Blok, Discounted Least Squares Curve Fitting, 1997. + * + * Press, W.H. et al, Numerical Recipes in C: The Art of Scientific Computing, Second Edition, 1992. + */ +final class ReciprocalLeastSquareFit { + private final double discount; + + private double sumY; + private double sumXReciprocal; + private double sumYdivX; + private double sumXSquareReciprocal; + private double count; + + private double a; + private double b; + + ReciprocalLeastSquareFit(int effectiveHistoryLength) { + this.discount = (effectiveHistoryLength - 1.0) / effectiveHistoryLength; + } + + public void sample(double x, double y) { + assert x != 0 : "division by zero"; + + sumY = y + discount * sumY; + sumXReciprocal = 1 / x + discount * sumXReciprocal; + sumYdivX = y / x + discount * sumYdivX; + sumXSquareReciprocal = 1 / (x * x) + discount * sumXSquareReciprocal; + count = 1 + discount * count; + + double denominator = count * sumXSquareReciprocal - sumXReciprocal * sumXReciprocal; + if (denominator != 0) { + b = (count * sumYdivX - sumXReciprocal * sumY) / denominator; + a = (sumY - b * sumXReciprocal) / count; + } + } + + public double estimate(double x) { + return a + b / x; + } + + public double getSlope(double x) { + return -b / (x * x); + } +} diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/ReferenceObjectProcessing.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/ReferenceObjectProcessing.java index 67c2f410793b..b4bc60f18ae1 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/ReferenceObjectProcessing.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/ReferenceObjectProcessing.java @@ -110,7 +110,7 @@ private static void discover(Object obj, ObjectReferenceVisitor refVisitor) { } if (maybeUpdateForwardedReference(dr, referentAddr)) { // Some other object had a strong reference to the referent, so the referent was already - // promoted. The called above updated the reference so that it now points to the + // promoted. The call above updated the reference object so that it now points to the // promoted object. return; } @@ -129,7 +129,9 @@ private static void discover(Object obj, ObjectReferenceVisitor refVisitor) { } UnsignedWord elapsed = WordFactory.unsigned(clock - timestamp); if (elapsed.belowThan(maxSoftRefAccessIntervalMs)) { - refVisitor.visitObjectReference(ReferenceInternals.getReferentFieldAddress(dr), true); + // Important: we need to pass the reference object as holder so that the remembered + // set can be updated accordingly! + refVisitor.visitObjectReference(ReferenceInternals.getReferentFieldAddress(dr), true, dr); return; // referent will survive and referent field has been updated } } @@ -177,10 +179,10 @@ static Reference processRememberedReferences() { return pendingHead; } - static void afterCollection(UnsignedWord usedBytes, UnsignedWord maxBytes) { + static void afterCollection(UnsignedWord freeBytes) { assert rememberedRefsList == null; - UnsignedWord unusedMbytes = maxBytes.subtract(usedBytes).unsignedDivide(1024 * 1024 /* MB */); - maxSoftRefAccessIntervalMs = unusedMbytes.multiply(HeapOptions.SoftRefLRUPolicyMSPerMB.getValue()); + UnsignedWord unused = freeBytes.unsignedDivide(1024 * 1024 /* MB */); + maxSoftRefAccessIntervalMs = unused.multiply(HeapOptions.SoftRefLRUPolicyMSPerMB.getValue()); ReferenceInternals.updateSoftReferenceClock(); if (initialSoftRefClock == 0) { initialSoftRefClock = ReferenceInternals.getSoftReferenceClock(); diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/RuntimeCodeCacheReachabilityAnalyzer.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/RuntimeCodeCacheReachabilityAnalyzer.java index 412ac21e8320..2ecddc381c75 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/RuntimeCodeCacheReachabilityAnalyzer.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/RuntimeCodeCacheReachabilityAnalyzer.java @@ -52,7 +52,7 @@ public boolean hasUnreachableObjects() { } @Override - public boolean visitObjectReference(Pointer ptrPtrToObject, boolean compressed) { + public boolean visitObjectReference(Pointer ptrPtrToObject, boolean compressed, Object holderObject) { assert !unreachableObjects; Pointer ptrToObj = ReferenceAccess.singleton().readObjectAsUntrackedPointer(ptrPtrToObject, compressed); if (ptrToObj.isNonNull() && !isReachable(ptrToObj)) { diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/Space.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/Space.java index 0c0a8da445af..d47b6d0bd869 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/Space.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/Space.java @@ -24,7 +24,7 @@ */ package com.oracle.svm.core.genscavenge; -import static org.graalvm.compiler.nodes.extended.BranchProbabilityNode.EXTREMELY_SLOW_PATH_PROBABILITY; +import static org.graalvm.compiler.nodes.extended.BranchProbabilityNode.VERY_SLOW_PATH_PROBABILITY; import static org.graalvm.compiler.nodes.extended.BranchProbabilityNode.probability; import org.graalvm.compiler.word.Word; @@ -45,23 +45,18 @@ import com.oracle.svm.core.log.Log; import com.oracle.svm.core.thread.VMOperation; import com.oracle.svm.core.thread.VMThreads; -import com.oracle.svm.core.util.VMError; /** * A Space is a collection of HeapChunks. * * Each Space keeps two collections: one of {@link AlignedHeapChunk} and one of * {@link UnalignedHeapChunk}. - * - * The Space for the YoungGeneration is special because it keeps Pointers to the "top" and "end" of - * the current aligned allocation chunk for fast-path allocation without any indirections. The - * complication is the "top" pointer has to be flushed back to the chunk to make the heap parsable. */ public final class Space { private final String name; private final boolean isFromSpace; private final int age; - private final SpaceAccounting accounting; + private final ChunksAccounting accounting; /* Heads and tails of the HeapChunk lists. */ private AlignedHeapChunk.AlignedHeader firstAlignedHeapChunk; @@ -76,11 +71,16 @@ public final class Space { */ @Platforms(Platform.HOSTED_ONLY.class) Space(String name, boolean isFromSpace, int age) { - this.name = name; + this(name, isFromSpace, age, null); + } + + @Platforms(Platform.HOSTED_ONLY.class) + Space(String name, boolean isFromSpace, int age, ChunksAccounting accounting) { assert name != null : "Space name should not be null."; + this.name = name; this.isFromSpace = isFromSpace; this.age = age; - this.accounting = new SpaceAccounting(); + this.accounting = new ChunksAccounting(accounting); } @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) @@ -104,16 +104,16 @@ boolean isEdenSpace() { } public boolean isYoungSpace() { - return age <= HeapPolicy.getMaxSurvivorSpaces(); + return age <= HeapParameters.getMaxSurvivorSpaces(); } boolean isSurvivorSpace() { - return age > 0 && age <= HeapPolicy.getMaxSurvivorSpaces(); + return age > 0 && age <= HeapParameters.getMaxSurvivorSpaces(); } @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) public boolean isOldSpace() { - return age == (HeapPolicy.getMaxSurvivorSpaces() + 1); + return age == (HeapParameters.getMaxSurvivorSpaces() + 1); } int getAge() { @@ -193,29 +193,6 @@ private Pointer allocateMemory(UnsignedWord objectSize) { return result; } - /** - * Promote the HeapChunk containing an Object from its original space to this Space. - * - * This turns all the Objects in the chunk from white to grey: the objects are in this Space, - * but have not yet had their interior pointers visited. - */ - void promoteObjectChunk(Object original) { - if (ObjectHeaderImpl.isAlignedObject(original)) { - AlignedHeapChunk.AlignedHeader aChunk = AlignedHeapChunk.getEnclosingChunk(original); - Space originalSpace = HeapChunk.getSpace(aChunk); - if (originalSpace.isFromSpace()) { - promoteAlignedHeapChunk(aChunk, originalSpace); - } - } else { - assert ObjectHeaderImpl.isUnalignedObject(original); - UnalignedHeapChunk.UnalignedHeader uChunk = UnalignedHeapChunk.getEnclosingChunk(original); - Space originalSpace = HeapChunk.getSpace(uChunk); - if (originalSpace.isFromSpace()) { - promoteUnalignedHeapChunk(uChunk, originalSpace); - } - } - } - public void releaseChunks(ChunkReleaser chunkReleaser) { chunkReleaser.add(firstAlignedHeapChunk); chunkReleaser.add(firstUnalignedHeapChunk); @@ -237,7 +214,7 @@ void appendAlignedHeapChunk(AlignedHeapChunk.AlignedHeader aChunk) { VMThreads.guaranteeOwnsThreadMutex("Trying to append an aligned heap chunk but no mutual exclusion."); } appendAlignedHeapChunkUninterruptibly(aChunk); - accounting.noteAlignedHeapChunk(aChunk); + accounting.noteAlignedHeapChunk(); } @Uninterruptible(reason = "Must not interact with garbage collections.") @@ -258,7 +235,7 @@ private void appendAlignedHeapChunkUninterruptibly(AlignedHeapChunk.AlignedHeade void extractAlignedHeapChunk(AlignedHeapChunk.AlignedHeader aChunk) { assert VMOperation.isGCInProgress() : "Should only be called by the collector."; extractAlignedHeapChunkUninterruptibly(aChunk); - accounting.unnoteAlignedHeapChunk(aChunk); + accounting.unnoteAlignedHeapChunk(); } @Uninterruptible(reason = "Must not interact with garbage collections.") @@ -380,7 +357,9 @@ Object promoteAlignedObject(Object original, Space originalSpace) { assert this != originalSpace && originalSpace.isFromSpace(); Object copy = copyAlignedObject(original); - ObjectHeaderImpl.installForwardingPointer(original, copy); + if (copy != null) { + ObjectHeaderImpl.installForwardingPointer(original, copy); + } return copy; } @@ -390,12 +369,8 @@ private Object copyAlignedObject(Object originalObj) { UnsignedWord size = LayoutEncoding.getSizeFromObject(originalObj); Pointer copyMemory = allocateMemory(size); - if (probability(EXTREMELY_SLOW_PATH_PROBABILITY, copyMemory.isNull())) { - Log failureLog = Log.log().string("[! Space.copyAlignedObject:").indent(true); - failureLog.string(" failure to allocate ").unsigned(size).string(" bytes").newline(); - failureLog.string(" object to be promoted: ").object(originalObj).string(" header ").hex(ObjectHeaderImpl.readHeaderFromObject(originalObj)).newline(); - failureLog.string(" !]").indent(false); - throw VMError.shouldNotReachHere("Promotion failure"); + if (probability(VERY_SLOW_PATH_PROBABILITY, copyMemory.isNull())) { + return null; } /* @@ -417,7 +392,7 @@ private Object copyAlignedObject(Object originalObj) { } /** Promote an AlignedHeapChunk by moving it to this space. */ - private void promoteAlignedHeapChunk(AlignedHeapChunk.AlignedHeader chunk, Space originalSpace) { + void promoteAlignedHeapChunk(AlignedHeapChunk.AlignedHeader chunk, Space originalSpace) { assert this != originalSpace && originalSpace.isFromSpace(); originalSpace.extractAlignedHeapChunk(chunk); @@ -451,15 +426,17 @@ void promoteUnalignedHeapChunk(UnalignedHeapChunk.UnalignedHeader chunk, Space o } private AlignedHeapChunk.AlignedHeader requestAlignedHeapChunk() { - assert VMOperation.isGCInProgress() : "Should only be called from the collector."; - AlignedHeapChunk.AlignedHeader aChunk = HeapImpl.getChunkProvider().produceAlignedChunk(); - if (aChunk.isNonNull()) { - if (this.isOldSpace()) { - RememberedSet.get().enableRememberedSetForChunk(aChunk); - } - appendAlignedHeapChunk(aChunk); + AlignedHeapChunk.AlignedHeader chunk; + if (isYoungSpace()) { + assert isSurvivorSpace(); + chunk = HeapImpl.getHeapImpl().getYoungGeneration().requestAlignedSurvivorChunk(); + } else { + chunk = HeapImpl.getHeapImpl().getOldGeneration().requestAlignedChunk(); + } + if (chunk.isNonNull()) { + appendAlignedHeapChunk(chunk); } - return aChunk; + return chunk; } void absorb(Space src) { @@ -505,7 +482,12 @@ boolean walkHeapChunks(MemoryWalker.Visitor visitor) { @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) UnsignedWord getChunkBytes() { assert !isEdenSpace() || VMOperation.isGCInProgress() : "eden data is only accurate during a GC"; - return accounting.getAlignedChunkBytes().add(accounting.getUnalignedChunkBytes()); + return getAlignedChunkBytes().add(accounting.getUnalignedChunkBytes()); + } + + @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) + UnsignedWord getAlignedChunkBytes() { + return accounting.getAlignedChunkBytes(); } UnsignedWord computeObjectBytes() { @@ -535,80 +517,3 @@ private UnsignedWord computeUnalignedObjectBytes() { return result; } } - -/** - * Accounting for a {@link Space}. For the eden space, the values are inaccurate outside of a GC - * (see {@link HeapPolicy#getYoungUsedBytes()} and {@link HeapPolicy#getEdenUsedBytes()}. - */ -final class SpaceAccounting { - private long alignedCount; - private UnsignedWord alignedChunkBytes; - private long unalignedCount; - private UnsignedWord unalignedChunkBytes; - - @Platforms(Platform.HOSTED_ONLY.class) - SpaceAccounting() { - reset(); - } - - public void reset() { - alignedCount = 0L; - alignedChunkBytes = WordFactory.zero(); - unalignedCount = 0L; - unalignedChunkBytes = WordFactory.zero(); - } - - public UnsignedWord getChunkBytes() { - return getAlignedChunkBytes().add(getUnalignedChunkBytes()); - } - - public long getAlignedChunkCount() { - return alignedCount; - } - - @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) - public UnsignedWord getAlignedChunkBytes() { - return alignedChunkBytes; - } - - public long getUnalignedChunkCount() { - return unalignedCount; - } - - @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) - public UnsignedWord getUnalignedChunkBytes() { - return unalignedChunkBytes; - } - - void report(Log reportLog) { - reportLog.string("aligned: ").unsigned(alignedChunkBytes).string("/").unsigned(alignedCount); - reportLog.string(" "); - reportLog.string("unaligned: ").unsigned(unalignedChunkBytes).string("/").unsigned(unalignedCount); - } - - @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) - void noteAlignedHeapChunk(AlignedHeapChunk.AlignedHeader chunk) { - UnsignedWord size = AlignedHeapChunk.getCommittedObjectMemory(chunk); - alignedCount += 1; - alignedChunkBytes = alignedChunkBytes.add(size); - } - - void unnoteAlignedHeapChunk(AlignedHeapChunk.AlignedHeader chunk) { - UnsignedWord size = AlignedHeapChunk.getCommittedObjectMemory(chunk); - alignedCount -= 1; - alignedChunkBytes = alignedChunkBytes.subtract(size); - } - - @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) - void noteUnalignedHeapChunk(UnalignedHeapChunk.UnalignedHeader chunk) { - UnsignedWord size = UnalignedHeapChunk.getCommittedObjectMemory(chunk); - unalignedCount += 1; - unalignedChunkBytes = unalignedChunkBytes.add(size); - } - - void unnoteUnalignedHeapChunk(UnalignedHeapChunk.UnalignedHeader chunk) { - UnsignedWord size = UnalignedHeapChunk.getCommittedObjectMemory(chunk); - unalignedCount -= 1; - unalignedChunkBytes = unalignedChunkBytes.subtract(size); - } -} diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/StackVerifier.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/StackVerifier.java index dc48435984f1..eb0802f16273 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/StackVerifier.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/StackVerifier.java @@ -103,8 +103,8 @@ public void reset() { } @Override - public boolean visitObjectReference(Pointer objRef, boolean compressed) { - result &= HeapVerifier.verifyReference(null, objRef, compressed); + public boolean visitObjectReference(Pointer objRef, boolean compressed, Object holderObject) { + result &= HeapVerifier.verifyReference(holderObject, objRef, compressed); return true; } } diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/ThreadLocalAllocation.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/ThreadLocalAllocation.java index f56e5362cfce..663df156da27 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/ThreadLocalAllocation.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/ThreadLocalAllocation.java @@ -170,7 +170,7 @@ private static Object slowPathNewInstance(Word objectHeader, UnsignedWord size) /** Use the end of slow-path allocation as a place to run periodic hook code. */ private static void runSlowPathHooks() { - HeapPolicy.samplePhysicalMemorySize(); + GCImpl.getPolicy().updateSizeParameters(); } @RestrictHeapAccess(access = RestrictHeapAccess.Access.NO_ALLOCATION, reason = "Must not allocate in the implementation of allocation.") @@ -178,7 +178,7 @@ private static Object slowPathNewInstanceWithoutAllocating(DynamicHub hub, Unsig DeoptTester.disableDeoptTesting(); try { HeapImpl.exitIfAllocationDisallowed("ThreadLocalAllocation.allocateNewInstance", DynamicHub.toClass(hub).getName()); - HeapPolicy.maybeCollectOnAllocation(); + GCImpl.getGCImpl().maybeCollectOnAllocation(); AlignedHeader newTlab = HeapImpl.getChunkProvider().produceAlignedChunk(); return allocateInstanceInNewTlab(hub, size, newTlab); @@ -206,7 +206,8 @@ private static Object slowPathNewArray(Word objectHeader, int length, int fillSt * has other objects in it, and the next collection could throw an OutOfMemoryError if * this object is allocated and survives. */ - if (size.aboveOrEqual(HeapPolicy.getMaximumHeapSize())) { + GCImpl.getPolicy().ensureSizeParametersInitialized(); + if (size.aboveOrEqual(GCImpl.getPolicy().getMaximumHeapSize())) { throw new OutOfMemoryError("Array allocation too large."); } @@ -228,17 +229,23 @@ private static Object slowPathNewArrayWithoutAllocating(DynamicHub hub, int leng DeoptTester.disableDeoptTesting(); try { HeapImpl.exitIfAllocationDisallowed("Heap.allocateNewArray", DynamicHub.toClass(hub).getName()); - HeapPolicy.maybeCollectOnAllocation(); + GCImpl.getGCImpl().maybeCollectOnAllocation(); - if (size.aboveOrEqual(HeapPolicy.getLargeArrayThreshold())) { + if (size.aboveOrEqual(HeapParameters.getLargeArrayThreshold())) { /* Large arrays go into their own unaligned chunk. */ UnalignedHeapChunk.UnalignedHeader newTlabChunk = HeapImpl.getChunkProvider().produceUnalignedChunk(size); return allocateLargeArrayInNewTlab(hub, length, size, fillStartOffset, newTlabChunk); - } else { - /* Small arrays go into the regular aligned chunk. */ + } + /* Small arrays go into the regular aligned chunk. */ + + // We might have allocated in the caller and acquired a TLAB with enough space already + // (but we need to check in an uninterruptible method to be safe) + Object array = allocateSmallArrayInCurrentTlab(hub, length, size, fillStartOffset); + if (array == null) { // We need a new chunk. AlignedHeader newTlabChunk = HeapImpl.getChunkProvider().produceAlignedChunk(); - return allocateSmallArrayInNewTlab(hub, length, size, fillStartOffset, newTlabChunk); + array = allocateSmallArrayInNewTlab(hub, length, size, fillStartOffset, newTlabChunk); } + return array; } finally { DeoptTester.enableDeoptTesting(); } @@ -250,6 +257,15 @@ private static Object allocateInstanceInNewTlab(DynamicHub hub, UnsignedWord siz return FormatObjectNode.formatObject(memory, DynamicHub.toClass(hub), false, true, true); } + @Uninterruptible(reason = "Holds uninitialized memory.") + private static Object allocateSmallArrayInCurrentTlab(DynamicHub hub, int length, UnsignedWord size, int fillStartOffset) { + if (size.aboveThan(availableTlabMemory(getTlab()))) { + return null; + } + Pointer memory = allocateRawMemoryInTlab(size, getTlab()); + return FormatArrayNode.formatArray(memory, DynamicHub.toClass(hub), length, false, false, true, fillStartOffset, true); + } + @Uninterruptible(reason = "Holds uninitialized memory.") private static Object allocateSmallArrayInNewTlab(DynamicHub hub, int length, UnsignedWord size, int fillStartOffset, AlignedHeader newTlabChunk) { Pointer memory = allocateRawMemoryInNewTlab(size, newTlabChunk); @@ -262,6 +278,7 @@ private static Object allocateLargeArrayInNewTlab(DynamicHub hub, int length, Un HeapChunk.setNext(newTlabChunk, tlab.getUnalignedChunk()); tlab.setUnalignedChunk(newTlabChunk); + HeapImpl.getHeapImpl().getAccounting().increaseEdenUsedBytes(size); Pointer memory = UnalignedHeapChunk.allocateMemory(newTlabChunk, size); assert memory.isNonNull(); @@ -277,9 +294,15 @@ private static Pointer allocateRawMemoryInNewTlab(UnsignedWord size, AlignedHead retireCurrentAllocationChunk(tlab); registerNewAllocationChunk(tlab, newTlabChunk); + + return allocateRawMemoryInTlab(size, tlab); + } + + @Uninterruptible(reason = "Returns uninitialized memory, modifies TLAB", callerMustBe = true) + private static Pointer allocateRawMemoryInTlab(UnsignedWord size, Descriptor tlab) { assert size.belowOrEqual(availableTlabMemory(tlab)) : "Not enough TLAB space for allocation"; - // We just registered a new chunk, so TLAB top cannot be null. + // The (uninterruptible) caller has ensured that we have a TLAB. Pointer top = KnownIntrinsics.nonNullPointer(tlab.getAllocationTop(TLAB_TOP_IDENTITY)); tlab.setAllocationTop(top.add(size), TLAB_TOP_IDENTITY); return top; @@ -372,6 +395,7 @@ static void retireToSpace(Descriptor tlab, Space space) { private static void registerNewAllocationChunk(Descriptor tlab, AlignedHeader newChunk) { HeapChunk.setNext(newChunk, tlab.getAlignedChunk()); tlab.setAlignedChunk(newChunk); + HeapImpl.getHeapImpl().getAccounting().increaseEdenUsedBytes(HeapParameters.getAlignedHeapChunkSize()); resumeAllocationInCurrentChunk(tlab); } diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/YoungGeneration.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/YoungGeneration.java index 76ab70d84263..7d07ab5ae45f 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/YoungGeneration.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/YoungGeneration.java @@ -35,25 +35,29 @@ import com.oracle.svm.core.genscavenge.GCImpl.ChunkReleaser; import com.oracle.svm.core.heap.ObjectVisitor; import com.oracle.svm.core.log.Log; +import com.oracle.svm.core.thread.VMOperation; public final class YoungGeneration extends Generation { private final Space eden; private final Space[] survivorFromSpaces; private final Space[] survivorToSpaces; private final GreyObjectsWalker[] survivorGreyObjectsWalkers; + private final ChunksAccounting survivorsToSpacesAccounting; private final int maxSurvivorSpaces; @Platforms(Platform.HOSTED_ONLY.class) YoungGeneration(String name) { super(name); this.eden = new Space("edenSpace", true, 0); - this.maxSurvivorSpaces = HeapPolicy.getMaxSurvivorSpaces(); + this.maxSurvivorSpaces = HeapParameters.getMaxSurvivorSpaces(); this.survivorFromSpaces = new Space[maxSurvivorSpaces]; this.survivorToSpaces = new Space[maxSurvivorSpaces]; this.survivorGreyObjectsWalkers = new GreyObjectsWalker[maxSurvivorSpaces]; + this.survivorsToSpacesAccounting = new ChunksAccounting(); for (int i = 0; i < maxSurvivorSpaces; i++) { - this.survivorFromSpaces[i] = new Space("Survivor-" + (i + 1) + " From", true, (i + 1)); - this.survivorToSpaces[i] = new Space("Survivor-" + (i + 1) + " To", false, (i + 1)); + int age = i + 1; + this.survivorFromSpaces[i] = new Space("Survivor-" + age + " From", true, age); + this.survivorToSpaces[i] = new Space("Survivor-" + age + " To", false, age, survivorsToSpacesAccounting); this.survivorGreyObjectsWalkers[i] = new GreyObjectsWalker(); } } @@ -128,42 +132,10 @@ private GreyObjectsWalker getSurvivorGreyObjectsWalker(int index) { return survivorGreyObjectsWalkers[index]; } - @AlwaysInline("GC performance") - @Override - protected Object promoteObject(Object original, UnsignedWord header) { - if (ObjectHeaderImpl.isAlignedHeader(header)) { - AlignedHeapChunk.AlignedHeader originalChunk = AlignedHeapChunk.getEnclosingChunk(original); - Space originalSpace = HeapChunk.getSpace(originalChunk); - if (originalSpace.isFromSpace()) { - return promoteAlignedObject(original, originalSpace); - } - } else { - assert ObjectHeaderImpl.isUnalignedHeader(header); - UnalignedHeapChunk.UnalignedHeader chunk = UnalignedHeapChunk.getEnclosingChunk(original); - Space originalSpace = HeapChunk.getSpace(chunk); - if (originalSpace.isFromSpace()) { - promoteUnalignedObject(chunk, originalSpace); - } - } - return original; - } - - private void releaseSurvivorSpaces(ChunkReleaser chunkReleaser, boolean isFromSpace) { - for (int i = 0; i < maxSurvivorSpaces; i++) { - if (isFromSpace) { - getSurvivorFromSpaceAt(i).releaseChunks(chunkReleaser); - } else { - getSurvivorToSpaceAt(i).releaseChunks(chunkReleaser); - } - } - } - void releaseSpaces(ChunkReleaser chunkReleaser) { getEden().releaseChunks(chunkReleaser); - - releaseSurvivorSpaces(chunkReleaser, true); - if (HeapImpl.getHeapImpl().getGCImpl().isCompleteCollection()) { - releaseSurvivorSpaces(chunkReleaser, false); + for (int i = 0; i < maxSurvivorSpaces; i++) { + getSurvivorFromSpaceAt(i).releaseChunks(chunkReleaser); } } @@ -173,6 +145,16 @@ void swapSpaces() { assert getSurvivorFromSpaceAt(i).getChunkBytes().equal(0) : "Chunk bytes must be 0"; getSurvivorFromSpaceAt(i).absorb(getSurvivorToSpaceAt(i)); } + assert survivorsToSpacesAccounting.getChunkBytes().equal(0); + } + + void emptyFromSpacesIntoToSpaces() { + assert getEden().isEmpty() && getEden().getChunkBytes().equal(0) : "Eden must be empty."; + assert survivorsToSpacesAccounting.getChunkBytes().equal(0) : "Survivor to-spaces must be empty"; + for (int i = 0; i < maxSurvivorSpaces; i++) { + assert getSurvivorToSpaceAt(i).isEmpty() && getSurvivorToSpaceAt(i).getChunkBytes().equal(0) : "Survivor to-space must be empty."; + getSurvivorToSpaceAt(i).absorb(getSurvivorFromSpaceAt(i)); + } } boolean walkHeapChunks(MemoryWalker.Visitor visitor) { @@ -222,7 +204,8 @@ UnsignedWord getChunkBytes() { return getEden().getChunkBytes().add(getSurvivorChunkBytes()); } - private UnsignedWord getSurvivorChunkBytes() { + /** This value is only updated during a GC, be careful: see {@link #getChunkBytes}. */ + UnsignedWord getSurvivorChunkBytes() { UnsignedWord chunkBytes = WordFactory.zero(); for (int i = 0; i < maxSurvivorSpaces; i++) { chunkBytes = chunkBytes.add(this.survivorFromSpaces[i].getChunkBytes()); @@ -231,11 +214,26 @@ private UnsignedWord getSurvivorChunkBytes() { return chunkBytes; } + /** This value is only updated during a GC, be careful: see {@link #getChunkBytes}. */ + UnsignedWord getAlignedChunkBytes() { + return getEden().getAlignedChunkBytes().add(getSurvivorAlignedChunkBytes()); + } + + /** This value is only updated during a GC, be careful: see {@link #getChunkBytes}. */ + UnsignedWord getSurvivorAlignedChunkBytes() { + UnsignedWord chunkBytes = WordFactory.zero(); + for (int i = 0; i < maxSurvivorSpaces; i++) { + chunkBytes = chunkBytes.add(this.survivorFromSpaces[i].getAlignedChunkBytes()); + chunkBytes = chunkBytes.add(this.survivorToSpaces[i].getAlignedChunkBytes()); + } + return chunkBytes; + } + UnsignedWord computeObjectBytes() { return getEden().computeObjectBytes().add(computeSurvivorObjectBytes()); } - private UnsignedWord computeSurvivorObjectBytes() { + UnsignedWord computeSurvivorObjectBytes() { UnsignedWord usedObjectBytes = WordFactory.zero(); for (int i = 0; i < maxSurvivorSpaces; i++) { usedObjectBytes = usedObjectBytes.add(survivorFromSpaces[i].computeObjectBytes()); @@ -258,30 +256,76 @@ public boolean contains(Object object) { } @AlwaysInline("GC performance") - private Object promoteAlignedObject(Object original, Space originalSpace) { + @Override + protected Object promoteAlignedObject(Object original, AlignedHeapChunk.AlignedHeader originalChunk, Space originalSpace) { + assert originalSpace.isFromSpace(); assert ObjectHeaderImpl.isAlignedObject(original); - assert originalSpace.isEdenSpace() || originalSpace.isSurvivorSpace() : "Should be Eden or survivor."; - assert originalSpace.isFromSpace() : "must not be called for other objects"; + assert originalSpace.getAge() < maxSurvivorSpaces; - if (originalSpace.getAge() < maxSurvivorSpaces) { - int age = originalSpace.getNextAgeForPromotion(); - Space toSpace = getSurvivorToSpaceAt(age - 1); - return toSpace.promoteAlignedObject(original, originalSpace); - } else { - return HeapImpl.getHeapImpl().getOldGeneration().promoteAlignedObject(original, originalSpace); - } + // The object might fit in an existing chunk in the survivor space. If it doesn't, we get + // called back in requestAlignedSurvivorChunk() and decide if another chunk fits in the + // survivor space. If it does not, we return null here to tell the caller. + int age = originalSpace.getNextAgeForPromotion(); + Space toSpace = getSurvivorToSpaceAt(age - 1); + return toSpace.promoteAlignedObject(original, originalSpace); } @AlwaysInline("GC performance") - private void promoteUnalignedObject(UnalignedHeapChunk.UnalignedHeader originalChunk, Space originalSpace) { - assert originalSpace.isFromSpace() : "must not be called for other objects"; + @Override + protected Object promoteUnalignedObject(Object original, UnalignedHeapChunk.UnalignedHeader originalChunk, Space originalSpace) { + assert originalSpace.isFromSpace(); + assert originalSpace.getAge() < maxSurvivorSpaces; + if (!unalignedChunkFitsInSurvivors(originalChunk)) { + return null; + } - if (originalSpace.getAge() < maxSurvivorSpaces) { - int age = originalSpace.getNextAgeForPromotion(); - Space toSpace = getSurvivorToSpaceAt(age - 1); - toSpace.promoteUnalignedHeapChunk(originalChunk, originalSpace); + int age = originalSpace.getNextAgeForPromotion(); + Space toSpace = getSurvivorToSpaceAt(age - 1); + toSpace.promoteUnalignedHeapChunk(originalChunk, originalSpace); + return original; + } + + @Override + protected boolean promoteChunk(HeapChunk.Header originalChunk, boolean isAligned, Space originalSpace) { + assert originalSpace.isFromSpace(); + assert originalSpace.getAge() < maxSurvivorSpaces; + if (!fitsInSurvivors(originalChunk, isAligned)) { + return false; + } + + int age = originalSpace.getNextAgeForPromotion(); + Space toSpace = getSurvivorToSpaceAt(age - 1); + if (isAligned) { + toSpace.promoteAlignedHeapChunk((AlignedHeapChunk.AlignedHeader) originalChunk, originalSpace); } else { - HeapImpl.getHeapImpl().getOldGeneration().promoteUnalignedChunk(originalChunk, originalSpace); + toSpace.promoteUnalignedHeapChunk((UnalignedHeapChunk.UnalignedHeader) originalChunk, originalSpace); + } + return true; + } + + private boolean fitsInSurvivors(HeapChunk.Header chunk, boolean isAligned) { + if (isAligned) { + return alignedChunkFitsInSurvivors(); + } + return unalignedChunkFitsInSurvivors((UnalignedHeapChunk.UnalignedHeader) chunk); + } + + private boolean alignedChunkFitsInSurvivors() { + UnsignedWord sum = survivorsToSpacesAccounting.getChunkBytes().add(HeapParameters.getAlignedHeapChunkSize()); + return sum.belowOrEqual(GCImpl.getPolicy().getSurvivorSpacesCapacity()); + } + + private boolean unalignedChunkFitsInSurvivors(UnalignedHeapChunk.UnalignedHeader chunk) { + UnsignedWord size = UnalignedHeapChunk.getCommittedObjectMemory(chunk); + UnsignedWord sum = survivorsToSpacesAccounting.getChunkBytes().add(size); + return sum.belowOrEqual(GCImpl.getPolicy().getSurvivorSpacesCapacity()); + } + + AlignedHeapChunk.AlignedHeader requestAlignedSurvivorChunk() { + assert VMOperation.isGCInProgress() : "Should only be called from the collector."; + if (!alignedChunkFitsInSurvivors()) { + return WordFactory.nullPointer(); } + return HeapImpl.getChunkProvider().produceAlignedChunk(); } } diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/graal/GenScavengeAllocationSnippets.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/graal/GenScavengeAllocationSnippets.java index e9a4a9340eb8..02c26ff0112b 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/graal/GenScavengeAllocationSnippets.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/graal/GenScavengeAllocationSnippets.java @@ -46,7 +46,7 @@ import org.graalvm.word.UnsignedWord; import org.graalvm.word.WordFactory; -import com.oracle.svm.core.genscavenge.HeapPolicy; +import com.oracle.svm.core.genscavenge.HeapParameters; import com.oracle.svm.core.genscavenge.ObjectHeaderImpl; import com.oracle.svm.core.genscavenge.ThreadLocalAllocation; import com.oracle.svm.core.genscavenge.ThreadLocalAllocation.Descriptor; @@ -116,7 +116,7 @@ public boolean useTLAB() { @Override protected boolean shouldAllocateInTLAB(UnsignedWord size, boolean isArray) { - return !isArray || size.belowThan(HeapPolicy.getLargeArrayThreshold()); + return !isArray || size.belowThan(HeapParameters.getLargeArrayThreshold()); } @Override diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/graal/HeapFeature.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/graal/HeapFeature.java index 8a13ae2913ee..00e4218eb80f 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/graal/HeapFeature.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/graal/HeapFeature.java @@ -74,7 +74,7 @@ public List> getRequiredFeatures() { @Override public void afterRegistration(AfterRegistrationAccess access) { - HeapImpl heap = new HeapImpl(access, SubstrateOptions.getPageSize()); + HeapImpl heap = new HeapImpl(SubstrateOptions.getPageSize()); ImageSingletons.add(Heap.class, heap); ImageSingletons.add(SubstrateAllocationSnippets.class, new GenScavengeAllocationSnippets()); ImageSingletons.add(RememberedSet.class, createRememberedSet()); diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/remset/AlignedChunkRememberedSet.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/remset/AlignedChunkRememberedSet.java index 9b20da888c89..786ee8d82642 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/remset/AlignedChunkRememberedSet.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/remset/AlignedChunkRememberedSet.java @@ -42,7 +42,7 @@ import com.oracle.svm.core.genscavenge.AlignedHeapChunk.AlignedHeader; import com.oracle.svm.core.genscavenge.GreyToBlackObjectVisitor; import com.oracle.svm.core.genscavenge.HeapChunk; -import com.oracle.svm.core.genscavenge.HeapPolicy; +import com.oracle.svm.core.genscavenge.HeapParameters; import com.oracle.svm.core.genscavenge.ObjectHeaderImpl; import com.oracle.svm.core.hub.LayoutEncoding; import com.oracle.svm.core.image.ImageHeapObject; @@ -173,7 +173,7 @@ static UnsignedWord getStructSize() { static UnsignedWord getCardTableSize() { // We conservatively compute the size as a fraction of the size of the entire chunk. UnsignedWord structSize = getStructSize(); - UnsignedWord available = HeapPolicy.getAlignedHeapChunkSize().subtract(structSize); + UnsignedWord available = HeapParameters.getAlignedHeapChunkSize().subtract(structSize); UnsignedWord requiredSize = CardTable.tableSizeForMemorySize(available); UnsignedWord alignment = WordFactory.unsigned(ConfigurationValues.getObjectLayout().getAlignment()); return UnsignedUtils.roundUp(requiredSize, alignment); diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/remset/CardTable.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/remset/CardTable.java index c26ae959cda8..5665077139ab 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/remset/CardTable.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/remset/CardTable.java @@ -200,7 +200,7 @@ public void initialize(Object parentObject, Pointer cardTableStart, Pointer obje @Override @SuppressFBWarnings(value = {"NS_DANGEROUS_NON_SHORT_CIRCUIT"}, justification = "Non-short circuit logic is used on purpose here.") - public boolean visitObjectReference(Pointer reference, boolean compressed) { + public boolean visitObjectReference(Pointer reference, boolean compressed, Object holderObject) { Pointer referencedObject = ReferenceAccess.singleton().readObjectAsUntrackedPointer(reference, compressed); success &= verifyReference(parentObject, cardTableStart, objectsStart, reference, referencedObject); return true; diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/remset/CardTableBasedRememberedSet.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/remset/CardTableBasedRememberedSet.java index b0696edf0fd3..251992239f3e 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/remset/CardTableBasedRememberedSet.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/remset/CardTableBasedRememberedSet.java @@ -37,7 +37,7 @@ import com.oracle.svm.core.genscavenge.GreyToBlackObjectVisitor; import com.oracle.svm.core.genscavenge.HeapChunk; import com.oracle.svm.core.genscavenge.HeapImpl; -import com.oracle.svm.core.genscavenge.HeapPolicy; +import com.oracle.svm.core.genscavenge.HeapParameters; import com.oracle.svm.core.genscavenge.ObjectHeaderImpl; import com.oracle.svm.core.genscavenge.Space; import com.oracle.svm.core.genscavenge.UnalignedHeapChunk.UnalignedHeader; @@ -135,12 +135,12 @@ public void dirtyCardIfNecessary(Object holderObject, Object object) { return; } // We dirty the cards of ... - if (HeapPolicy.getMaxSurvivorSpaces() != 0 && !GCImpl.getGCImpl().isCompleteCollection() && HeapImpl.getHeapImpl().getYoungGeneration().contains(object)) { + if (HeapParameters.getMaxSurvivorSpaces() != 0 && HeapImpl.getHeapImpl().getYoungGeneration().contains(object)) { /* - * ...references from the old generation to the young generation, unless there cannot be - * any such references if we do not use survivor spaces, or if we do but are doing a - * complete collection: in both cases, all objects are promoted to the old generation. - * (We avoid an extra old generation check and might remark a few image heap cards, too) + * ...references from the old generation to the young generation, unless we do not use + * survivor spaces, in which case there will be no such references because all young + * objects are promoted to the old generation. (We avoid an extra old generation check + * and might remark a few image heap cards, too) */ } else if (HeapImpl.usesImageHeapCardMarking() && GCImpl.getGCImpl().isCompleteCollection() && HeapImpl.getHeapImpl().isInImageHeap(holderObject)) { // ...references from the image heap to the runtime heap, but we clean and remark those diff --git a/substratevm/src/com.oracle.svm.core.posix/src/com/oracle/svm/core/posix/PosixVirtualMemoryProvider.java b/substratevm/src/com.oracle.svm.core.posix/src/com/oracle/svm/core/posix/PosixVirtualMemoryProvider.java index cd565eec6f87..ff20c8a7e50b 100644 --- a/substratevm/src/com.oracle.svm.core.posix/src/com/oracle/svm/core/posix/PosixVirtualMemoryProvider.java +++ b/substratevm/src/com.oracle.svm.core.posix/src/com/oracle/svm/core/posix/PosixVirtualMemoryProvider.java @@ -168,7 +168,7 @@ public int protect(PointerBase start, UnsignedWord nbytes, int access) { @Override @Uninterruptible(reason = "May be called from uninterruptible code.", mayBeInlined = true) public int uncommit(PointerBase start, UnsignedWord nbytes) { - final Pointer result = mmap(start, nbytes, PROT_NONE(), MAP_ANON() | MAP_PRIVATE() | MAP_NORESERVE(), NO_FD, NO_FD_OFFSET); + final Pointer result = mmap(start, nbytes, PROT_NONE(), MAP_FIXED() | MAP_ANON() | MAP_PRIVATE() | MAP_NORESERVE(), NO_FD, NO_FD_OFFSET); return result.notEqual(MAP_FAILED()) ? 0 : -1; } diff --git a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/c/NonmovableArrays.java b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/c/NonmovableArrays.java index ac4287adf680..42f2b6a4734c 100644 --- a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/c/NonmovableArrays.java +++ b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/c/NonmovableArrays.java @@ -464,7 +464,7 @@ public static boolean walkUnmanagedObjectArray(NonmovableObjectArray array, O assert refSize == (1 << readElementShift(array)); Pointer p = ((Pointer) array).add(readArrayBase(array)).add(startIndex * refSize); for (int i = 0; i < count; i++) { - if (!visitor.visitObjectReference(p, true)) { + if (!visitor.visitObjectReference(p, true, null)) { return false; } p = p.add(refSize); diff --git a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/code/CodeInfoEncoder.java b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/code/CodeInfoEncoder.java index cf7e1bea6483..bc4d7e60c59b 100644 --- a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/code/CodeInfoEncoder.java +++ b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/code/CodeInfoEncoder.java @@ -623,12 +623,12 @@ class CollectingObjectReferenceVisitor implements ObjectReferenceVisitor { protected final SubstrateReferenceMap result = new SubstrateReferenceMap(); @Override - public boolean visitObjectReference(Pointer objRef, boolean compressed) { - return visitObjectReferenceInline(objRef, 0, compressed); + public boolean visitObjectReference(Pointer objRef, boolean compressed, Object holderObject) { + return visitObjectReferenceInline(objRef, 0, compressed, holderObject); } @Override - public boolean visitObjectReferenceInline(Pointer objRef, int innerOffset, boolean compressed) { + public boolean visitObjectReferenceInline(Pointer objRef, int innerOffset, boolean compressed, Object holderObject) { int derivedOffset = NumUtil.safeToInt(objRef.rawValue()); result.markReferenceAtOffset(derivedOffset, derivedOffset - innerOffset, compressed); return true; diff --git a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/heap/CodeReferenceMapDecoder.java b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/heap/CodeReferenceMapDecoder.java index c02cda8a987b..adf30aad2aa1 100644 --- a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/heap/CodeReferenceMapDecoder.java +++ b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/heap/CodeReferenceMapDecoder.java @@ -125,7 +125,7 @@ public static boolean walkOffsetsFromPointer(PointerBase baseAddress, Nonmovable */ Pointer basePtr = baseAddress.isNull() ? objRef : objRef.readWord(0); - final boolean visitResult = visitor.visitObjectReferenceInline(objRef, 0, compressed); + final boolean visitResult = visitor.visitObjectReferenceInline(objRef, 0, compressed, null); if (!visitResult) { return false; } @@ -157,7 +157,7 @@ public static boolean walkOffsetsFromPointer(PointerBase baseAddress, Nonmovable Pointer derivedPtr = baseAddress.isNull() ? derivedRef : derivedRef.readWord(0); int innerOffset = NumUtil.safeToInt(derivedPtr.subtract(basePtr).rawValue()); - final boolean derivedVisitResult = visitor.visitObjectReferenceInline(derivedRef, innerOffset, compressed); + final boolean derivedVisitResult = visitor.visitObjectReferenceInline(derivedRef, innerOffset, compressed, null); if (!derivedVisitResult) { return false; } @@ -165,7 +165,7 @@ public static boolean walkOffsetsFromPointer(PointerBase baseAddress, Nonmovable objRef = objRef.add(refSize); } else { for (long c = 0; c < count; c += 1) { - final boolean visitResult = visitor.visitObjectReferenceInline(objRef, 0, compressed); + final boolean visitResult = visitor.visitObjectReferenceInline(objRef, 0, compressed, null); if (!visitResult) { return false; } diff --git a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/heap/ObjectReferenceVisitor.java b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/heap/ObjectReferenceVisitor.java index d2738d803989..79e55b36b506 100644 --- a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/heap/ObjectReferenceVisitor.java +++ b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/heap/ObjectReferenceVisitor.java @@ -26,44 +26,33 @@ import org.graalvm.word.Pointer; +import com.oracle.svm.core.annotate.AlwaysInline; import com.oracle.svm.core.annotate.RestrictHeapAccess; +import com.oracle.svm.core.util.VMError; -/** - * Visit an object reference. The visitObjectReference method takes a Pointer as a parameter, but - * that Pointer is *not* a pointer to an Object, but a Pointer to an object reference. - */ +/** Visitor for object references. */ public interface ObjectReferenceVisitor { /** - * Visit an Object reference. - * - * To get the corresponding Object reference.readObject can be used. + * Visit an object reference. * - * @param objRef The Object reference to be visited. + * @param objRef Address of object reference to visit (not address of the referenced object). * @param compressed True if the reference is in compressed form, false otherwise. - * @return True if visiting should continue, false if visiting should stop. + * @param holderObject The object containing the reference, or {@code null} if the reference is + * not part of an object. + * @return {@code true} if visiting should continue, {@code false} if visiting should stop. */ @RestrictHeapAccess(access = RestrictHeapAccess.Access.UNRESTRICTED, overridesCallers = true, reason = "Some implementations allocate.") - boolean visitObjectReference(Pointer objRef, boolean compressed); - - /** Like visitObjectReference(Pointer), but always inlined for performance. */ - @RestrictHeapAccess(access = RestrictHeapAccess.Access.UNRESTRICTED, overridesCallers = true, reason = "Some implementations allocate.") - default boolean visitObjectReferenceInline(Pointer objRef, boolean compressed) { - return visitObjectReference(objRef, compressed); - } - - @RestrictHeapAccess(access = RestrictHeapAccess.Access.UNRESTRICTED, overridesCallers = true, reason = "Some implementations allocate.") - default boolean visitObjectReferenceInline(Pointer objRef, @SuppressWarnings("unused") int innerOffset, boolean compressed) { - return visitObjectReference(objRef, compressed); - } - - /** Like visitObjectReference(Pointer), but always inlined for performance. */ - @RestrictHeapAccess(access = RestrictHeapAccess.Access.UNRESTRICTED, overridesCallers = true, reason = "Some implementations allocate.") - default boolean visitObjectReferenceInline(Pointer objRef, boolean compressed, @SuppressWarnings("unused") Object holderObject) { - return visitObjectReferenceInline(objRef, compressed); - } + boolean visitObjectReference(Pointer objRef, boolean compressed, Object holderObject); + /** + * @param innerOffset If the reference is a {@linkplain CodeReferenceMapDecoder derived + * reference}, a positive integer that must be subtracted from the address to which + * the object reference points in order to get the start of the referenced object. + */ + @AlwaysInline("GC performance") @RestrictHeapAccess(access = RestrictHeapAccess.Access.UNRESTRICTED, overridesCallers = true, reason = "Some implementations allocate.") - default boolean visitObjectReferenceInline(Pointer objRef, @SuppressWarnings("unused") int innerOffset, boolean compressed, @SuppressWarnings("unused") Object holderObject) { - return visitObjectReferenceInline(objRef, innerOffset, compressed); + default boolean visitObjectReferenceInline(Pointer objRef, int innerOffset, boolean compressed, Object holderObject) { + VMError.guarantee(innerOffset == 0, "visitor does not support derived references"); + return visitObjectReference(objRef, compressed, holderObject); } } diff --git a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/hub/InteriorObjRefWalker.java b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/hub/InteriorObjRefWalker.java index 74b15111e6f5..9522479008f7 100644 --- a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/hub/InteriorObjRefWalker.java +++ b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/hub/InteriorObjRefWalker.java @@ -73,7 +73,7 @@ public static boolean walkObjectInline(final Object obj, final ObjectReferenceVi Pointer pos = objPointer.add(LayoutEncoding.getArrayBaseOffset(layoutEncoding)); Pointer end = pos.add(WordFactory.unsigned(referenceSize).multiply(length)); while (pos.belowThan(end)) { - final boolean visitResult = visitor.visitObjectReferenceInline(pos, isCompressed, obj); + final boolean visitResult = visitor.visitObjectReferenceInline(pos, 0, isCompressed, obj); if (!visitResult) { return false; } diff --git a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/jdk/UninterruptibleUtils.java b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/jdk/UninterruptibleUtils.java index df9bc3d15c07..de2cd802b210 100644 --- a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/jdk/UninterruptibleUtils.java +++ b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/jdk/UninterruptibleUtils.java @@ -376,6 +376,11 @@ public static long max(long a, long b) { return (a >= b) ? a : b; } + @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) + public static int clamp(int value, int min, int max) { + return min(max(value, min), max); + } + @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) public static long abs(long a) { return (a < 0) ? -a : a; diff --git a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/os/CommittedMemoryProvider.java b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/os/CommittedMemoryProvider.java index e9526a53fe1b..5c982b45b59b 100644 --- a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/os/CommittedMemoryProvider.java +++ b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/os/CommittedMemoryProvider.java @@ -124,10 +124,8 @@ default void beforeGarbageCollection() { /** * Called by the garbage collector after a collection has ended, as an opportunity to perform * lazy operations, sanity checks or clean-ups. - * - * @param completeCollection Whether the garbage collector has performed a full collection. */ - default void afterGarbageCollection(boolean completeCollection) { + default void afterGarbageCollection() { } enum Access { diff --git a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/util/TimeUtils.java b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/util/TimeUtils.java index 9c19564f4c21..ca6a338a4e80 100644 --- a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/util/TimeUtils.java +++ b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/util/TimeUtils.java @@ -101,6 +101,10 @@ public static long divideNanosToSeconds(long nanos) { return (nanos / nanosPerSecond); } + public static double nanosToSecondsDouble(long nanos) { + return (nanos / (double) nanosPerSecond); + } + /** Return the nanoseconds remaining after taking out all the seconds. */ @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) public static long remainderNanosToSeconds(long nanos) { diff --git a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/util/UnsignedUtils.java b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/util/UnsignedUtils.java index 6e9ce38b61bf..0cd33267c675 100644 --- a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/util/UnsignedUtils.java +++ b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/util/UnsignedUtils.java @@ -96,6 +96,7 @@ public static UnsignedWord min(UnsignedWord x, UnsignedWord y) { * @param y Another Unsigned. * @return The whichever Unsigned is larger. */ + @Uninterruptible(reason = "Used in uninterruptible code.", mayBeInlined = true) public static UnsignedWord max(UnsignedWord x, UnsignedWord y) { return (x.aboveOrEqual(y)) ? x : y; } @@ -109,4 +110,42 @@ public static int safeToInt(UnsignedWord w) { assert l >= 0 && l == (int) l; return (int) l; } + + @Uninterruptible(reason = "Used in uninterruptible code.", mayBeInlined = true) + public static UnsignedWord clamp(UnsignedWord value, UnsignedWord min, UnsignedWord max) { + return min(max(value, min), max); + } + + public static double toDouble(UnsignedWord u) { + long l = u.rawValue(); + if (l >= 0) { + return l; + } + /* + * The shift does not lose precision because the double's mantissa has fewer bits than long + * anyway. The bitwise and of the LSB rounds to nearest as required by JLS 5.1.2. + */ + return ((l >>> 1) | (l & 1)) * 2.0; + } + + public static UnsignedWord fromDouble(double d) { // follows JLS 5.1.3 + long l = (long) d; + if (Double.isNaN(d) || l <= 0) { // includes -inf + return WordFactory.zero(); + } + if (l < Long.MAX_VALUE) { + return WordFactory.unsigned(l); + } + /* + * This division does not lose precision with these large numbers because the double's + * mantissa has fewer bits than long does. For the same reason, it also doesn't matter that + * we could not distinguish UnsignedUtils.MAX_VALUE - 1 from +inf or "too large to fit" -- + * it simply does not have an exact representation as a double. + */ + l = (long) (d / 2.0); + if (l == Long.MAX_VALUE) { // too large or +inf + return UnsignedUtils.MAX_VALUE; + } + return WordFactory.unsigned(l).shiftLeft(1); + } }