From f32f2ee9ad89019658e2d7251b231c7974be6a1c Mon Sep 17 00:00:00 2001 From: Thomas Schrott Date: Mon, 19 May 2025 10:37:43 +0200 Subject: [PATCH 1/9] Register options for getting parsed by IsolateArgumentParser --- .../svm/core/IsolateArgumentParser.java | 61 +++++++++---------- .../oracle/svm/core/SubstrateGCOptions.java | 9 +-- .../com/oracle/svm/core/SubstrateOptions.java | 10 +-- .../svm/core/option/RuntimeOptionKey.java | 18 +++++- .../oracle/svm/core/util/ImageHeapList.java | 5 ++ .../hosted/SubstrateDiagnosticFeature.java | 13 +--- .../hosted/option/RuntimeOptionFeature.java | 37 +++++++++++ 7 files changed, 98 insertions(+), 55 deletions(-) diff --git a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/IsolateArgumentParser.java b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/IsolateArgumentParser.java index f103edbb59a2..98085dd7cdd6 100644 --- a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/IsolateArgumentParser.java +++ b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/IsolateArgumentParser.java @@ -34,8 +34,8 @@ import java.nio.ByteBuffer; import java.nio.ByteOrder; import java.nio.charset.StandardCharsets; +import java.util.List; -import jdk.graal.compiler.word.Word; import org.graalvm.nativeimage.ImageSingletons; import org.graalvm.nativeimage.Platform; import org.graalvm.nativeimage.Platforms; @@ -55,9 +55,11 @@ import com.oracle.svm.core.headers.LibC; import com.oracle.svm.core.memory.UntrackedNullableNativeMemory; import com.oracle.svm.core.option.RuntimeOptionKey; +import com.oracle.svm.core.util.ImageHeapList; import com.oracle.svm.core.util.VMError; import jdk.graal.compiler.api.replacements.Fold; +import jdk.graal.compiler.word.Word; /** * Parses a small subset of the runtime arguments before the image heap is mapped and before the @@ -68,16 +70,8 @@ */ @AutomaticallyRegisteredImageSingleton public class IsolateArgumentParser { - private static final RuntimeOptionKey[] OPTIONS = { - SubstrateGCOptions.MinHeapSize, - SubstrateGCOptions.MaxHeapSize, - SubstrateGCOptions.MaxNewSize, - SubstrateGCOptions.ReservedAddressSpaceSize, - SubstrateOptions.ActiveProcessorCount, - SubstrateOptions.ConcealedOptions.AutomaticReferenceHandling, - SubstrateOptions.ConcealedOptions.UsePerfData, - SubstrateOptions.ConcealedOptions.MaxRAM - }; + @SuppressWarnings("unchecked")// + private final List> options = (List>) ImageHeapList.createGeneric(RuntimeOptionKey.class); private static final CGlobalData OPTION_NAMES = CGlobalDataFactory.createBytes(IsolateArgumentParser::createOptionNames); private static final CGlobalData OPTION_NAME_POSITIONS = CGlobalDataFactory.createBytes(IsolateArgumentParser::createOptionNamePosition); private static final CGlobalData OPTION_TYPES = CGlobalDataFactory.createBytes(IsolateArgumentParser::createOptionTypes); @@ -86,11 +80,8 @@ public class IsolateArgumentParser { /** * All values (regardless of their type) are stored as 8 byte values. See * {@link IsolateArguments#setParsedArgs(CLongPointer)} for more information. - * - * For directly initializing this array, no method depending on - * {@link IsolateArgumentParser#singleton()} may be used. */ - private final long[] parsedOptionValues = new long[getOptions0().length]; + private long[] parsedOptionValues; private static final long K = 1024; private static final long M = K * K; @@ -108,11 +99,24 @@ public static IsolateArgumentParser singleton() { return ImageSingletons.lookup(IsolateArgumentParser.class); } + @Platforms(Platform.HOSTED_ONLY.class) + public synchronized void register(RuntimeOptionKey optionKey) { + assert optionKey != null; + assert singleton().parsedOptionValues == null; + + singleton().options.add(optionKey); + } + + @Platforms(Platform.HOSTED_ONLY.class) + public synchronized void sealOptions() { + singleton().parsedOptionValues = new long[getOptionCount()]; + } + @Platforms(Platform.HOSTED_ONLY.class) private static byte[] createOptionNames() { StringBuilder optionNames = new StringBuilder(); for (int i = 0; i < getOptionCount(); i++) { - optionNames.append(getOptions()[i].getName()); + optionNames.append(getOptions().get(i).getName()); optionNames.append("\0"); } return optionNames.toString().getBytes(StandardCharsets.ISO_8859_1); @@ -138,7 +142,7 @@ private static byte[] createOptionTypes() { byte[] result = new byte[Byte.BYTES * getOptionCount()]; ByteBuffer buffer = ByteBuffer.wrap(result).order(ByteOrder.nativeOrder()); for (int i = 0; i < getOptionCount(); i++) { - Class optionValueType = getOptions()[i].getDescriptor().getOptionValueType(); + Class optionValueType = getOptions().get(i).getDescriptor().getOptionValueType(); buffer.put(OptionValueType.fromClass(optionValueType)); } return result; @@ -149,7 +153,7 @@ private static byte[] createDefaultValues() { byte[] result = new byte[Long.BYTES * getOptionCount()]; ByteBuffer buffer = ByteBuffer.wrap(result).order(ByteOrder.nativeOrder()); for (int i = 0; i < getOptionCount(); i++) { - RuntimeOptionKey option = getOptions()[i]; + RuntimeOptionKey option = getOptions().get(i); VMError.guarantee(option.isIsolateCreationOnly(), "Options parsed by IsolateArgumentParser should all have the IsolateCreationOnly flag. %s doesn't", option); long value = toLong(option.getHostedValue(), option.getDescriptor().getOptionValueType()); buffer.putLong(value); @@ -173,18 +177,13 @@ private static long toLong(Object value, Class clazz) { } @Fold - protected static RuntimeOptionKey[] getOptions() { - return singleton().getOptions0(); - } - - @Fold - protected RuntimeOptionKey[] getOptions0() { - return OPTIONS; + protected static List> getOptions() { + return singleton().options; } @Fold protected static int getOptionCount() { - return getOptions().length; + return getOptions().size(); } @Uninterruptible(reason = "Still being initialized.") @@ -296,7 +295,7 @@ public void persistOptions(IsolateArguments arguments) { public void verifyOptionValues() { for (int i = 0; i < getOptionCount(); i++) { - RuntimeOptionKey option = getOptions()[i]; + RuntimeOptionKey option = getOptions().get(i); if (shouldValidate(option)) { validate(option, getOptionValue(i)); } @@ -336,7 +335,7 @@ protected CCharPointer getCCharPointerOptionValue(int index) { } protected Object getOptionValue(int index) { - Class optionValueType = getOptions()[index].getDescriptor().getOptionValueType(); + Class optionValueType = getOptions().get(index).getDescriptor().getOptionValueType(); long value = parsedOptionValues[index]; if (optionValueType == Boolean.class) { assert value == 0L || value == 1L : value; @@ -553,9 +552,9 @@ private static CCharPointer startsWith(CCharPointer input, CCharPointer prefix) @Fold public static int getOptionIndex(RuntimeOptionKey key) { - RuntimeOptionKey[] options = getOptions(); - for (int i = 0; i < options.length; i++) { - if (options[i] == key) { + List> options = getOptions(); + for (int i = 0; i < options.size(); i++) { + if (options.get(i) == key) { return i; } } diff --git a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/SubstrateGCOptions.java b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/SubstrateGCOptions.java index 982ded5cea10..e926ef55a45d 100644 --- a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/SubstrateGCOptions.java +++ b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/SubstrateGCOptions.java @@ -26,6 +26,7 @@ import static com.oracle.svm.core.option.RuntimeOptionKey.RuntimeOptionKeyFlag.Immutable; import static com.oracle.svm.core.option.RuntimeOptionKey.RuntimeOptionKeyFlag.IsolateCreationOnly; +import static com.oracle.svm.core.option.RuntimeOptionKey.RuntimeOptionKeyFlag.RegisterForIsolateArgumentParser; import jdk.graal.compiler.word.Word; import org.graalvm.collections.EconomicMap; @@ -47,7 +48,7 @@ @DuplicatedInNativeCode public class SubstrateGCOptions { @Option(help = "The minimum heap size at run-time, in bytes.", type = OptionType.User)// - public static final RuntimeOptionKey MinHeapSize = new NotifyGCRuntimeOptionKey<>(0L, IsolateCreationOnly) { + public static final RuntimeOptionKey MinHeapSize = new NotifyGCRuntimeOptionKey<>(0L, RegisterForIsolateArgumentParser) { @Override protected void onValueUpdate(EconomicMap, Object> values, Long oldValue, Long newValue) { if (!SubstrateUtil.HOSTED) { @@ -58,7 +59,7 @@ protected void onValueUpdate(EconomicMap, Object> values, Long oldV }; @Option(help = "The maximum heap size at run-time, in bytes.", type = OptionType.User)// - public static final RuntimeOptionKey MaxHeapSize = new NotifyGCRuntimeOptionKey<>(0L, IsolateCreationOnly) { + public static final RuntimeOptionKey MaxHeapSize = new NotifyGCRuntimeOptionKey<>(0L, RegisterForIsolateArgumentParser) { @Override protected void onValueUpdate(EconomicMap, Object> values, Long oldValue, Long newValue) { if (!SubstrateUtil.HOSTED) { @@ -69,7 +70,7 @@ protected void onValueUpdate(EconomicMap, Object> values, Long oldV }; @Option(help = "The maximum size of the young generation at run-time, in bytes", type = OptionType.User)// - public static final RuntimeOptionKey MaxNewSize = new NotifyGCRuntimeOptionKey<>(0L, IsolateCreationOnly) { + public static final RuntimeOptionKey MaxNewSize = new NotifyGCRuntimeOptionKey<>(0L, RegisterForIsolateArgumentParser) { @Override protected void onValueUpdate(EconomicMap, Object> values, Long oldValue, Long newValue) { if (!SubstrateUtil.HOSTED) { @@ -80,7 +81,7 @@ protected void onValueUpdate(EconomicMap, Object> values, Long oldV }; @Option(help = "The number of bytes that should be reserved for the heap address space.", type = OptionType.Expert)// - public static final RuntimeOptionKey ReservedAddressSpaceSize = new RuntimeOptionKey<>(0L, IsolateCreationOnly); + public static final RuntimeOptionKey ReservedAddressSpaceSize = new RuntimeOptionKey<>(0L, RegisterForIsolateArgumentParser); @Option(help = "Exit on the first occurrence of an out-of-memory error that is thrown because the Java heap is out of memory.", type = OptionType.Expert)// public static final RuntimeOptionKey ExitOnOutOfMemoryError = new RuntimeOptionKey<>(false, Immutable); diff --git a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/SubstrateOptions.java b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/SubstrateOptions.java index 73d2aa17ab04..7d96b7094577 100644 --- a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/SubstrateOptions.java +++ b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/SubstrateOptions.java @@ -25,7 +25,7 @@ package com.oracle.svm.core; import static com.oracle.svm.core.option.RuntimeOptionKey.RuntimeOptionKeyFlag.Immutable; -import static com.oracle.svm.core.option.RuntimeOptionKey.RuntimeOptionKeyFlag.IsolateCreationOnly; +import static com.oracle.svm.core.option.RuntimeOptionKey.RuntimeOptionKeyFlag.RegisterForIsolateArgumentParser; import static com.oracle.svm.core.option.RuntimeOptionKey.RuntimeOptionKeyFlag.RelevantForCompilationIsolates; import static jdk.graal.compiler.core.common.SpectrePHTMitigations.None; import static jdk.graal.compiler.core.common.SpectrePHTMitigations.Options.SpectrePHTBarriers; @@ -1121,11 +1121,11 @@ public Boolean getValue(OptionValues values) { /** Use {@link ReferenceHandler#isExecutedManually()} instead. */ @Option(help = "Determines if the reference handling is executed automatically or manually.", type = OptionType.Expert) // - public static final RuntimeOptionKey AutomaticReferenceHandling = new RuntimeOptionKey<>(true, IsolateCreationOnly); + public static final RuntimeOptionKey AutomaticReferenceHandling = new RuntimeOptionKey<>(true, RegisterForIsolateArgumentParser); /** Use {@link com.oracle.svm.core.jvmstat.PerfManager#usePerfData()} instead. */ @Option(help = "Flag to disable jvmstat instrumentation for performance testing.")// - public static final RuntimeOptionKey UsePerfData = new RuntimeOptionKey<>(true, IsolateCreationOnly); + public static final RuntimeOptionKey UsePerfData = new RuntimeOptionKey<>(true, RegisterForIsolateArgumentParser); /** Use {@link SubstrateOptions#maxJavaStackTraceDepth()} instead. */ @Option(help = "The maximum number of lines in the stack trace for Java exceptions (0 means all)", type = OptionType.User)// @@ -1147,7 +1147,7 @@ protected void onValueUpdate(EconomicMap, Object> values, Integer o protected static final HostedOptionKey InstallExitHandlers = new HostedOptionKey<>(false); @Option(help = "Physical memory size (in bytes). By default, the value is queried from the OS/container during VM startup.", type = OptionType.Expert)// - public static final RuntimeOptionKey MaxRAM = new RuntimeOptionKey<>(0L, IsolateCreationOnly); + public static final RuntimeOptionKey MaxRAM = new RuntimeOptionKey<>(0L, RegisterForIsolateArgumentParser); /** Use {@link SubstrateOptions#getAllocatePrefetchStyle()} instead. */ @Option(help = "Generated code style for prefetch instructions: for 0 or less no prefetch instructions are generated and for 1 or more prefetch instructions are introduced after each allocation.")// @@ -1176,7 +1176,7 @@ public static boolean needsExitHandlers() { } @Option(help = "Overwrites the available number of processors provided by the OS. Any value <= 0 means using the processor count from the OS.")// - public static final RuntimeOptionKey ActiveProcessorCount = new RuntimeOptionKey<>(-1, IsolateCreationOnly, RelevantForCompilationIsolates); + public static final RuntimeOptionKey ActiveProcessorCount = new RuntimeOptionKey<>(-1, RegisterForIsolateArgumentParser, RelevantForCompilationIsolates); @Option(help = "For internal purposes only. Disables type id result verification even when running with assertions enabled.", stability = OptionStability.EXPERIMENTAL, type = OptionType.Debug)// public static final HostedOptionKey DisableTypeIdResultVerification = new HostedOptionKey<>(true); diff --git a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/option/RuntimeOptionKey.java b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/option/RuntimeOptionKey.java index 2312bf14feab..63ca336e6832 100644 --- a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/option/RuntimeOptionKey.java +++ b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/option/RuntimeOptionKey.java @@ -112,11 +112,16 @@ public boolean shouldCopyToCompilationIsolate() { } public boolean isImmutable() { - return EnumBitmask.hasBit(flags, RuntimeOptionKeyFlag.Immutable) || EnumBitmask.hasBit(flags, RuntimeOptionKeyFlag.IsolateCreationOnly); + return EnumBitmask.hasBit(flags, RuntimeOptionKeyFlag.Immutable) || EnumBitmask.hasBit(flags, RuntimeOptionKeyFlag.IsolateCreationOnly) || + EnumBitmask.hasBit(flags, RuntimeOptionKeyFlag.RegisterForIsolateArgumentParser); } public boolean isIsolateCreationOnly() { - return EnumBitmask.hasBit(flags, RuntimeOptionKeyFlag.IsolateCreationOnly); + return EnumBitmask.hasBit(flags, RuntimeOptionKeyFlag.IsolateCreationOnly) || EnumBitmask.hasBit(flags, RuntimeOptionKeyFlag.RegisterForIsolateArgumentParser); + } + + public boolean shouldRegisterForIsolateArgumentParser() { + return EnumBitmask.hasBit(flags, RuntimeOptionKeyFlag.RegisterForIsolateArgumentParser); } @Fold @@ -137,10 +142,17 @@ public enum RuntimeOptionKeyFlag { /** * If this flag is set, then the option is parsed during isolate creation and its value can * typically only be set during isolate creation. This implies {@link #Immutable}. + */ + IsolateCreationOnly, + /** + * If this flag is set, then the option is always included in the image. The option is also + * registered for being parsed by {@link IsolateArgumentParser} and its value can typically + * only be set during isolate creation. This implies {@link #Immutable} and + * {@link #IsolateCreationOnly}. *

* See {@link IsolateArgumentParser#verifyOptionValues()} for the validation that these * options are not changed after isolate creation and potential exceptions to the rule. */ - IsolateCreationOnly + RegisterForIsolateArgumentParser, } } diff --git a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/util/ImageHeapList.java b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/util/ImageHeapList.java index 2500eafc09b9..f42b7b4e7541 100644 --- a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/util/ImageHeapList.java +++ b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/util/ImageHeapList.java @@ -57,6 +57,11 @@ public static List create(Class elementClass) { return create(elementClass, null); } + @Platforms(Platform.HOSTED_ONLY.class) // + public static List createGeneric(Class elementClass) { + return create(elementClass, null); + } + @Platforms(Platform.HOSTED_ONLY.class) // public static List create(Class elementClass, Comparator comparator) { VMError.guarantee(!BuildPhaseProvider.isAnalysisFinished(), "Trying to create an ImageHeapList after analysis."); diff --git a/substratevm/src/com.oracle.svm.hosted/src/com/oracle/svm/hosted/SubstrateDiagnosticFeature.java b/substratevm/src/com.oracle.svm.hosted/src/com/oracle/svm/hosted/SubstrateDiagnosticFeature.java index 71d71c77a119..139a1ebcb8c7 100644 --- a/substratevm/src/com.oracle.svm.hosted/src/com/oracle/svm/hosted/SubstrateDiagnosticFeature.java +++ b/substratevm/src/com.oracle.svm.hosted/src/com/oracle/svm/hosted/SubstrateDiagnosticFeature.java @@ -24,19 +24,17 @@ */ package com.oracle.svm.hosted; -import java.lang.reflect.Field; +import static com.oracle.svm.hosted.option.RuntimeOptionFeature.registerOptionAsRead; import org.graalvm.nativeimage.ImageSingletons; import org.graalvm.nativeimage.hosted.Feature; -import com.oracle.graal.pointsto.meta.AnalysisField; import com.oracle.svm.core.SubstrateDiagnostics; import com.oracle.svm.core.SubstrateDiagnostics.DiagnosticThunkRegistry; import com.oracle.svm.core.SubstrateDiagnostics.FatalErrorState; import com.oracle.svm.core.SubstrateOptions; import com.oracle.svm.core.feature.AutomaticallyRegisteredFeature; import com.oracle.svm.core.feature.InternalFeature; -import com.oracle.svm.core.util.VMError; import com.oracle.svm.hosted.FeatureImpl.BeforeAnalysisAccessImpl; @AutomaticallyRegisteredFeature @@ -58,13 +56,4 @@ public void beforeAnalysis(Feature.BeforeAnalysisAccess access) { registerOptionAsRead(accessImpl, SubstrateDiagnostics.Options.class, SubstrateDiagnostics.Options.ImplicitExceptionWithoutStacktraceIsFatal.getName()); } - private static void registerOptionAsRead(BeforeAnalysisAccessImpl accessImpl, Class clazz, String fieldName) { - try { - Field javaField = clazz.getField(fieldName); - AnalysisField analysisField = accessImpl.getMetaAccess().lookupJavaField(javaField); - accessImpl.registerAsRead(analysisField, "it is a runtime option field"); - } catch (NoSuchFieldException | SecurityException e) { - throw VMError.shouldNotReachHere(e); - } - } } diff --git a/substratevm/src/com.oracle.svm.hosted/src/com/oracle/svm/hosted/option/RuntimeOptionFeature.java b/substratevm/src/com.oracle.svm.hosted/src/com/oracle/svm/hosted/option/RuntimeOptionFeature.java index c694221f47eb..f6c37112611d 100644 --- a/substratevm/src/com.oracle.svm.hosted/src/com/oracle/svm/hosted/option/RuntimeOptionFeature.java +++ b/substratevm/src/com.oracle.svm.hosted/src/com/oracle/svm/hosted/option/RuntimeOptionFeature.java @@ -24,12 +24,19 @@ */ package com.oracle.svm.hosted.option; +import java.lang.reflect.Field; + +import org.graalvm.collections.UnmodifiableEconomicMap; import org.graalvm.nativeimage.ImageSingletons; import com.oracle.graal.pointsto.ObjectScanner; +import com.oracle.graal.pointsto.meta.AnalysisField; +import com.oracle.svm.core.IsolateArgumentParser; import com.oracle.svm.core.feature.AutomaticallyRegisteredFeature; import com.oracle.svm.core.feature.InternalFeature; import com.oracle.svm.core.option.HostedOptionKey; +import com.oracle.svm.core.option.HostedOptionValues; +import com.oracle.svm.core.option.RuntimeOptionKey; import com.oracle.svm.core.option.RuntimeOptionParser; import com.oracle.svm.core.util.VMError; import com.oracle.svm.hosted.FeatureImpl; @@ -54,6 +61,26 @@ public void duringSetup(DuringSetupAccess a) { access.registerObjectReachableCallback(OptionKey.class, this::collectOptionKeys); } + @Override + public void beforeAnalysis(BeforeAnalysisAccess access) { + FeatureImpl.BeforeAnalysisAccessImpl accessImpl = (FeatureImpl.BeforeAnalysisAccessImpl) access; + + UnmodifiableEconomicMap, Object> map = HostedOptionValues.singleton().getMap(); + for (OptionKey key : map.getKeys()) { + if (key instanceof RuntimeOptionKey runtimeOptionKey && runtimeOptionKey.shouldRegisterForIsolateArgumentParser()) { + /* + * The list of options IsolateArgumentParser has to parse, is built dynamically, to + * include only options of the current configuration. Here, all options that should + * get parsed by the IsolateArgumentParser are added to this list. + */ + IsolateArgumentParser.singleton().register(runtimeOptionKey); + registerOptionAsRead(accessImpl, runtimeOptionKey.getDescriptor().getDeclaringClass(), runtimeOptionKey.getName()); + } + } + + IsolateArgumentParser.singleton().sealOptions(); + } + @SuppressWarnings("unused") private void collectOptionKeys(DuringAnalysisAccess access, OptionKey optionKey, ObjectScanner.ScanReason reason) { if (optionKey instanceof HostedOptionKey) { @@ -70,4 +97,14 @@ private void collectOptionKeys(DuringAnalysisAccess access, OptionKey optionK runtimeOptionParser.addDescriptor(optionDescriptor); } } + + public static void registerOptionAsRead(FeatureImpl.BeforeAnalysisAccessImpl accessImpl, Class clazz, String fieldName) { + try { + Field javaField = clazz.getField(fieldName); + AnalysisField analysisField = accessImpl.getMetaAccess().lookupJavaField(javaField); + accessImpl.registerAsRead(analysisField, "it is a runtime option field"); + } catch (NoSuchFieldException | SecurityException e) { + throw VMError.shouldNotReachHere(e); + } + } } From 1703686da08c6da972fb85e3226b077ffcc8e2fd Mon Sep 17 00:00:00 2001 From: Thomas Schrott Date: Fri, 8 Nov 2024 14:19:57 +0100 Subject: [PATCH 2/9] Move TLAB options --- .../SerialAndEpsilonGCOptions.java | 9 ++++++++ .../graal/GenScavengeAllocationSupport.java | 3 ++- .../oracle/svm/core/SubstrateGCOptions.java | 21 ++++++++++++++++++- 3 files changed, 31 insertions(+), 2 deletions(-) diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/SerialAndEpsilonGCOptions.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/SerialAndEpsilonGCOptions.java index 417f10071ceb..a09f1b06db0c 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/SerialAndEpsilonGCOptions.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/SerialAndEpsilonGCOptions.java @@ -24,11 +24,14 @@ */ package com.oracle.svm.core.genscavenge; +import static com.oracle.svm.core.option.RuntimeOptionKey.RuntimeOptionKeyFlag.RegisterForIsolateArgumentParser; + import com.oracle.svm.core.SubstrateOptions; import com.oracle.svm.core.option.HostedOptionKey; import com.oracle.svm.core.option.NotifyGCRuntimeOptionKey; import com.oracle.svm.core.option.RuntimeOptionKey; import com.oracle.svm.core.util.UserError; + import jdk.graal.compiler.options.Option; import jdk.graal.compiler.options.OptionType; @@ -62,6 +65,12 @@ public final class SerialAndEpsilonGCOptions { @Option(help = "Number of bytes at the beginning of each heap chunk that are not used for payload data, i.e., can be freely used as metadata by the heap chunk provider. Serial and epsilon GC only.", type = OptionType.Debug) // public static final HostedOptionKey HeapChunkHeaderPadding = new HostedOptionKey<>(0, SerialAndEpsilonGCOptions::validateSerialOrEpsilonHostedOption); + @Option(help = "Starting TLAB size (in bytes); zero means set ergonomically.", type = OptionType.Expert)// + public static final RuntimeOptionKey InitialTLABSize = new RuntimeOptionKey<>(8 * 1024L, SerialAndEpsilonGCOptions::validateSerialOrEpsilonRuntimeOption, RegisterForIsolateArgumentParser); + + @Option(help = "Print information about TLABs. Printed when The TLABs are retired before a GC, and during the resizing of the TLABs. Serial and epsilon GC only.", type = OptionType.Expert)// + public static final HostedOptionKey PrintTLAB = new HostedOptionKey<>(false, SerialAndEpsilonGCOptions::validateSerialOrEpsilonHostedOption); + private SerialAndEpsilonGCOptions() { } diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/graal/GenScavengeAllocationSupport.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/graal/GenScavengeAllocationSupport.java index b935cef578f3..bbd305f6e723 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/graal/GenScavengeAllocationSupport.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/graal/GenScavengeAllocationSupport.java @@ -30,6 +30,7 @@ import com.oracle.svm.core.Uninterruptible; import com.oracle.svm.core.genscavenge.HeapImpl; +import com.oracle.svm.core.SubstrateGCOptions; import com.oracle.svm.core.genscavenge.HeapParameters; import com.oracle.svm.core.genscavenge.ThreadLocalAllocation; import com.oracle.svm.core.graal.meta.SubstrateForeignCallsProvider; @@ -99,7 +100,7 @@ public SubstrateForeignCallDescriptor getNewDynamicHub() { @Override public boolean useTLAB() { - return true; + return SubstrateGCOptions.TlabOptions.UseTLAB.getValue(); } @Override diff --git a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/SubstrateGCOptions.java b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/SubstrateGCOptions.java index e926ef55a45d..2280e8b53958 100644 --- a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/SubstrateGCOptions.java +++ b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/SubstrateGCOptions.java @@ -28,7 +28,6 @@ import static com.oracle.svm.core.option.RuntimeOptionKey.RuntimeOptionKeyFlag.IsolateCreationOnly; import static com.oracle.svm.core.option.RuntimeOptionKey.RuntimeOptionKeyFlag.RegisterForIsolateArgumentParser; -import jdk.graal.compiler.word.Word; import org.graalvm.collections.EconomicMap; import com.oracle.svm.core.heap.HeapSizeVerifier; @@ -40,6 +39,7 @@ import jdk.graal.compiler.options.Option; import jdk.graal.compiler.options.OptionKey; import jdk.graal.compiler.options.OptionType; +import jdk.graal.compiler.word.Word; /** * Garbage collection-specific options that are supported by all garbage collectors. Some of these @@ -47,6 +47,8 @@ */ @DuplicatedInNativeCode public class SubstrateGCOptions { + private static final int K = 1024; + @Option(help = "The minimum heap size at run-time, in bytes.", type = OptionType.User)// public static final RuntimeOptionKey MinHeapSize = new NotifyGCRuntimeOptionKey<>(0L, RegisterForIsolateArgumentParser) { @Override @@ -103,4 +105,21 @@ protected void onValueUpdate(EconomicMap, Object> values, Long oldV @Option(help = "Determines if references from runtime-compiled code to Java heap objects should be treated as strong or weak.", type = OptionType.Debug)// public static final HostedOptionKey TreatRuntimeCodeInfoReferencesAsWeak = new HostedOptionKey<>(true); + + @DuplicatedInNativeCode + public static class TlabOptions { + @Option(help = "Use thread-local object allocation.", type = OptionType.Expert)// + public static final HostedOptionKey UseTLAB = new HostedOptionKey<>(true); + + @Option(help = "Dynamically resize TLAB size for threads.", type = OptionType.Expert)// + public static final RuntimeOptionKey ResizeTLAB = new RuntimeOptionKey<>(true, IsolateCreationOnly); + + @Option(help = "Minimum allowed TLAB size (in bytes).", type = OptionType.Expert)// + public static final RuntimeOptionKey MinTLABSize = new RuntimeOptionKey<>(2L * K, RegisterForIsolateArgumentParser); + + @Option(help = "Starting TLAB size (in bytes); zero means set ergonomically.", type = OptionType.Expert)// + public static final RuntimeOptionKey TLABSize = new RuntimeOptionKey<>(0L, RegisterForIsolateArgumentParser); + + } + } From 88981d57baa5e4d357aed04701f91f8e050d8fd1 Mon Sep 17 00:00:00 2001 From: Thomas Schrott Date: Wed, 18 Dec 2024 13:04:43 +0100 Subject: [PATCH 3/9] Add support for runtime option validation --- .../oracle/svm/core/jdk/RuntimeSupport.java | 3 + .../svm/core/option/RuntimeOptionKey.java | 11 +-- .../RuntimeOptionValidationSupport.java | 84 +++++++++++++++++++ 3 files changed, 93 insertions(+), 5 deletions(-) create mode 100644 substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/option/RuntimeOptionValidationSupport.java diff --git a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/jdk/RuntimeSupport.java b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/jdk/RuntimeSupport.java index a07ad24ac362..4f5c9121b63a 100644 --- a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/jdk/RuntimeSupport.java +++ b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/jdk/RuntimeSupport.java @@ -38,6 +38,7 @@ import com.oracle.svm.core.Isolates; import com.oracle.svm.core.feature.AutomaticallyRegisteredImageSingleton; import com.oracle.svm.core.heap.HeapSizeVerifier; +import com.oracle.svm.core.option.RuntimeOptionValidationSupport; import com.oracle.svm.core.util.VMError; import jdk.graal.compiler.api.replacements.Fold; @@ -95,6 +96,8 @@ public boolean isUninitialized() { public void initialize() { boolean shouldInitialize = initializationState.compareAndSet(InitializationState.Uninitialized, InitializationState.InProgress); if (shouldInitialize) { + RuntimeOptionValidationSupport.singleton().validate(); + IsolateArgumentParser.singleton().verifyOptionValues(); HeapSizeVerifier.verifyHeapOptions(); diff --git a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/option/RuntimeOptionKey.java b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/option/RuntimeOptionKey.java index 63ca336e6832..c5d2f9ec7939 100644 --- a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/option/RuntimeOptionKey.java +++ b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/option/RuntimeOptionKey.java @@ -47,7 +47,8 @@ * @see com.oracle.svm.core.option */ public class RuntimeOptionKey extends OptionKey implements SubstrateOptionKey { - private final Consumer> validation; + @Platforms(Platform.HOSTED_ONLY.class)// + private final Consumer> buildTimeValidation; private final int flags; @Platforms(Platform.HOSTED_ONLY.class) @@ -56,9 +57,9 @@ public RuntimeOptionKey(T defaultValue, RuntimeOptionKeyFlag... flags) { } @Platforms(Platform.HOSTED_ONLY.class) - public RuntimeOptionKey(T defaultValue, Consumer> validation, RuntimeOptionKeyFlag... flags) { + public RuntimeOptionKey(T defaultValue, Consumer> buildTimeValidation, RuntimeOptionKeyFlag... flags) { super(defaultValue); - this.validation = validation; + this.buildTimeValidation = buildTimeValidation; this.flags = EnumBitmask.computeBitmask(flags); } @@ -102,8 +103,8 @@ public boolean hasBeenSet() { @Override @Platforms(Platform.HOSTED_ONLY.class) public void validate() { - if (validation != null) { - validation.accept(this); + if (buildTimeValidation != null) { + buildTimeValidation.accept(this); } } diff --git a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/option/RuntimeOptionValidationSupport.java b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/option/RuntimeOptionValidationSupport.java new file mode 100644 index 000000000000..5b9d1926c411 --- /dev/null +++ b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/option/RuntimeOptionValidationSupport.java @@ -0,0 +1,84 @@ +/* + * Copyright (c) 2025, 2025, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.svm.core.option; + +import java.util.List; +import java.util.function.Consumer; + +import org.graalvm.nativeimage.ImageSingletons; +import org.graalvm.nativeimage.Platform; +import org.graalvm.nativeimage.Platforms; + +import com.oracle.svm.core.feature.AutomaticallyRegisteredImageSingleton; +import com.oracle.svm.core.util.ImageHeapList; + +import jdk.graal.compiler.api.replacements.Fold; + +/** + * Allows to register commands for validating runtime options at run time. + */ +@AutomaticallyRegisteredImageSingleton +public class RuntimeOptionValidationSupport { + @SuppressWarnings("unchecked")// + private final List> validations = (List>) ImageHeapList.createGeneric(RuntimeOptionValidation.class); + + @Platforms(Platform.HOSTED_ONLY.class) + public RuntimeOptionValidationSupport() { + } + + @Fold + public static RuntimeOptionValidationSupport singleton() { + return ImageSingletons.lookup(RuntimeOptionValidationSupport.class); + } + + @Platforms(Platform.HOSTED_ONLY.class) + public synchronized void register(RuntimeOptionValidation validation) { + assert validation != null; + validations.add(validation); + } + + public void validate() { + for (RuntimeOptionValidation validation : validations) { + validation.validate(); + } + } + + public static class RuntimeOptionValidation { + + private final Consumer> validation; + private final RuntimeOptionKey optionKey; + + public RuntimeOptionValidation(Consumer> validation, RuntimeOptionKey optionKey) { + this.validation = validation; + this.optionKey = optionKey; + } + + void validate() { + validation.accept(optionKey); + } + + } + +} From 9c7c1c5f1f2f005e59abed2f05aa8d6409ffc393 Mon Sep 17 00:00:00 2001 From: Thomas Schrott Date: Wed, 18 Dec 2024 13:04:31 +0100 Subject: [PATCH 4/9] Cache and validate TLAB options --- .../svm/core/genscavenge/TlabOptionCache.java | 174 ++++++++++++++++++ .../graal/GenScavengeGCFeature.java | 4 + 2 files changed, 178 insertions(+) create mode 100644 substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/TlabOptionCache.java diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/TlabOptionCache.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/TlabOptionCache.java new file mode 100644 index 000000000000..0a853ab3b301 --- /dev/null +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/TlabOptionCache.java @@ -0,0 +1,174 @@ +/* + * Copyright (c) 2025, 2025, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.svm.core.genscavenge; + +import static com.oracle.svm.core.Uninterruptible.CALLED_FROM_UNINTERRUPTIBLE_CODE; +import static com.oracle.svm.core.genscavenge.TlabSupport.maxSize; + +import org.graalvm.nativeimage.ImageSingletons; +import org.graalvm.nativeimage.Platform; +import org.graalvm.nativeimage.Platforms; + +import com.oracle.svm.core.IsolateArgumentParser; +import com.oracle.svm.core.SubstrateGCOptions; +import com.oracle.svm.core.SubstrateOptions; +import com.oracle.svm.core.Uninterruptible; +import com.oracle.svm.core.config.ConfigurationValues; +import com.oracle.svm.core.jdk.UninterruptibleUtils; +import com.oracle.svm.core.option.RuntimeOptionValidationSupport; + +import jdk.graal.compiler.api.replacements.Fold; +import jdk.graal.compiler.core.common.NumUtil; + +/** + * Sanitize and cache TLAB option values. Unfortunately, proper error reporting is impossible during + * early VM startup. So, we need to ensure that the used values are good enough so that the VM + * startup can finish. Once the VM reaches a point where it can execute Java code, it validates the + * options and reports errors (see {@link #registerOptionValidations}). + */ +public class TlabOptionCache { + + private long minTlabSize; + private long tlabSize; + private long initialTLABSize; + + @Platforms(Platform.HOSTED_ONLY.class) + public TlabOptionCache() { + minTlabSize = getAbsoluteMinTlabSize(); + tlabSize = SubstrateGCOptions.TlabOptions.TLABSize.getHostedValue(); + initialTLABSize = SerialAndEpsilonGCOptions.InitialTLABSize.getHostedValue(); + } + + @Fold + public static TlabOptionCache singleton() { + return ImageSingletons.lookup(TlabOptionCache.class); + } + + /* + * The minimum size a TLAB must always have. A smaller TLAB may lead to a VM crash. + */ + @Fold + static long getAbsoluteMinTlabSize() { + int additionalHeaderBytes = SubstrateOptions.AdditionalHeaderBytes.getValue(); + long absoluteMinTlabSize = 2 * 1024L + additionalHeaderBytes; + return NumUtil.roundUp(absoluteMinTlabSize, ConfigurationValues.getObjectLayout().getAlignment()); + } + + @Uninterruptible(reason = CALLED_FROM_UNINTERRUPTIBLE_CODE, mayBeInlined = true) + public long getMinTlabSize() { + return minTlabSize; + } + + @Uninterruptible(reason = CALLED_FROM_UNINTERRUPTIBLE_CODE, mayBeInlined = true) + public long getTlabSize() { + return tlabSize; + } + + @Uninterruptible(reason = CALLED_FROM_UNINTERRUPTIBLE_CODE, mayBeInlined = true) + public long getInitialTLABSize() { + return initialTLABSize; + } + + @Uninterruptible(reason = CALLED_FROM_UNINTERRUPTIBLE_CODE, mayBeInlined = true) + public void cacheOptionValues() { + int minTlabSizeIdx = IsolateArgumentParser.getOptionIndex(SubstrateGCOptions.TlabOptions.MinTLABSize); + long minTlabSizeValue = IsolateArgumentParser.singleton().getLongOptionValue(minTlabSizeIdx); + cacheMinTlabSize(minTlabSizeValue); + + int tlabSizeIdx = IsolateArgumentParser.getOptionIndex(SubstrateGCOptions.TlabOptions.TLABSize); + long tlabSizeValue = IsolateArgumentParser.singleton().getLongOptionValue(tlabSizeIdx); + cacheTlabSize(tlabSizeValue); + + int initialTlabSizeIdx = IsolateArgumentParser.getOptionIndex(SerialAndEpsilonGCOptions.InitialTLABSize); + long initialTlabSizeValue = IsolateArgumentParser.singleton().getLongOptionValue(initialTlabSizeIdx); + cacheInitialTlabSize(initialTlabSizeValue, initialTLABSize != initialTlabSizeValue); + } + + public static void registerOptionValidations() { + + long maxSize = maxSize().rawValue(); + + RuntimeOptionValidationSupport validationSupport = RuntimeOptionValidationSupport.singleton(); + + validationSupport.register(new RuntimeOptionValidationSupport.RuntimeOptionValidation<>(optionKey -> { + long minTlabSizeValue = optionKey.getValue(); + + if (optionKey.hasBeenSet() && minTlabSizeValue < getAbsoluteMinTlabSize()) { + throw new IllegalArgumentException(String.format("MinTLABSize (%d) must be greater than or equal to reserved area in TLAB (%d).", minTlabSizeValue, getAbsoluteMinTlabSize())); + } + if (minTlabSizeValue > maxSize) { + throw new IllegalArgumentException(String.format("MinTLABSize (%d) must be less than or equal to ergonomic TLAB maximum (%d).", minTlabSizeValue, maxSize)); + } + }, SubstrateGCOptions.TlabOptions.MinTLABSize)); + + validationSupport.register(new RuntimeOptionValidationSupport.RuntimeOptionValidation<>(optionKey -> { + // Check that TLABSize is still the default value or size >= abs min && size <= abs max. + long tlabSizeValue = optionKey.getValue(); + if (optionKey.hasBeenSet() && tlabSizeValue < SubstrateGCOptions.TlabOptions.MinTLABSize.getValue()) { + throw new IllegalArgumentException( + String.format("TLABSize (%d) must be greater than or equal to MinTLABSize (%d).", tlabSizeValue, SubstrateGCOptions.TlabOptions.MinTLABSize.getValue())); + } + if (tlabSizeValue > maxSize) { + throw new IllegalArgumentException(String.format("TLABSize (%d) must be less than or equal to ergonomic TLAB maximum size (%d).", tlabSizeValue, maxSize)); + } + }, SubstrateGCOptions.TlabOptions.TLABSize)); + + validationSupport.register(new RuntimeOptionValidationSupport.RuntimeOptionValidation<>(optionKey -> { + long initialTlabSizeValue = optionKey.getValue(); + if (initialTlabSizeValue < SubstrateGCOptions.TlabOptions.MinTLABSize.getValue()) { + throw new IllegalArgumentException( + String.format("InitialTLABSize (%d) must be greater than or equal to MinTLABSize (%d).", initialTlabSizeValue, SubstrateGCOptions.TlabOptions.MinTLABSize.getValue())); + } + if (initialTlabSizeValue > maxSize) { + throw new IllegalArgumentException(String.format("TLABSize (%d) must be less than or equal to ergonomic TLAB maximum size (%d).", initialTlabSizeValue, maxSize)); + } + }, SerialAndEpsilonGCOptions.InitialTLABSize)); + + } + + @Uninterruptible(reason = CALLED_FROM_UNINTERRUPTIBLE_CODE, mayBeInlined = true) + private void cacheMinTlabSize(long optionValue) { + if (getAbsoluteMinTlabSize() <= optionValue && optionValue <= maxSize().rawValue()) { + minTlabSize = optionValue; + } + } + + @Uninterruptible(reason = CALLED_FROM_UNINTERRUPTIBLE_CODE, mayBeInlined = true) + private void cacheTlabSize(long optionValue) { + if (getAbsoluteMinTlabSize() <= optionValue && optionValue <= maxSize().rawValue()) { + tlabSize = optionValue; + } + } + + @Uninterruptible(reason = CALLED_FROM_UNINTERRUPTIBLE_CODE, mayBeInlined = true) + private void cacheInitialTlabSize(long optionValue, boolean hasBeenSet) { + if (!hasBeenSet && minTlabSize > initialTLABSize) { + initialTLABSize = minTlabSize; + } else if (getAbsoluteMinTlabSize() <= optionValue && optionValue <= maxSize().rawValue()) { + initialTLABSize = UninterruptibleUtils.Math.max(minTlabSize, optionValue); + } + } + +} diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/graal/GenScavengeGCFeature.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/graal/GenScavengeGCFeature.java index 0ad525eaf17f..b73da342d527 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/graal/GenScavengeGCFeature.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/graal/GenScavengeGCFeature.java @@ -45,6 +45,7 @@ import com.oracle.svm.core.genscavenge.ImageHeapInfo; import com.oracle.svm.core.genscavenge.PinnedObjectSupportImpl; import com.oracle.svm.core.genscavenge.SerialGCOptions; +import com.oracle.svm.core.genscavenge.TlabOptionCache; import com.oracle.svm.core.genscavenge.jvmstat.EpsilonGCPerfData; import com.oracle.svm.core.genscavenge.jvmstat.SerialGCPerfData; import com.oracle.svm.core.genscavenge.remset.CardTableBasedRememberedSet; @@ -101,6 +102,7 @@ public void duringSetup(DuringSetupAccess access) { ImageSingletons.add(Heap.class, new HeapImpl()); ImageSingletons.add(ImageHeapInfo.class, new ImageHeapInfo()); ImageSingletons.add(GCAllocationSupport.class, new GenScavengeAllocationSupport()); + ImageSingletons.add(TlabOptionCache.class, new TlabOptionCache()); if (ImageLayerBuildingSupport.firstImageBuild()) { ImageSingletons.add(PinnedObjectSupport.class, new PinnedObjectSupportImpl()); } @@ -141,6 +143,8 @@ public void beforeAnalysis(BeforeAnalysisAccess access) { // Needed for the barrier set. access.registerAsUsed(Object[].class); + + TlabOptionCache.registerOptionValidations(); } private static ImageHeapInfo getCurrentLayerImageHeapInfo() { From e742187f23ae747195bf85acad73261e15ddc4ee Mon Sep 17 00:00:00 2001 From: Thomas Schrott Date: Mon, 11 Nov 2024 09:26:39 +0100 Subject: [PATCH 5/9] Add heap allocation --- .../oracle/svm/core/genscavenge/GCImpl.java | 3 +- .../svm/core/genscavenge/HeapAllocation.java | 308 ++++++++++++++++++ .../svm/core/genscavenge/HeapChunk.java | 26 +- .../oracle/svm/core/genscavenge/HeapImpl.java | 24 +- .../RuntimeImageHeapChunkWriter.java | 19 +- .../svm/core/genscavenge/YoungGeneration.java | 19 +- .../compacting/PlanningVisitor.java | 5 +- 7 files changed, 377 insertions(+), 27 deletions(-) create mode 100644 substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/HeapAllocation.java diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/GCImpl.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/GCImpl.java index 15793380c44e..717fced5a6a8 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/GCImpl.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/GCImpl.java @@ -245,7 +245,8 @@ assert getCollectionEpoch().equal(data.getRequestingEpoch()) || Timer collectionTimer = timers.collection.start(); try { - ThreadLocalAllocation.disableAndFlushForAllThreads(); + HeapImpl.getHeapImpl().makeParseable(); + GenScavengeMemoryPoolMXBeans.singleton().notifyBeforeCollection(); HeapImpl.getAccounting().notifyBeforeCollection(); diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/HeapAllocation.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/HeapAllocation.java new file mode 100644 index 000000000000..7e8efcf7aed5 --- /dev/null +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/HeapAllocation.java @@ -0,0 +1,308 @@ +/* + * Copyright (c) 2025, 2025, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.svm.core.genscavenge; + +import static com.oracle.svm.core.Uninterruptible.CALLED_FROM_UNINTERRUPTIBLE_CODE; +import static com.oracle.svm.core.genscavenge.AlignedHeapChunk.AlignedHeader; +import static com.oracle.svm.core.genscavenge.HeapChunk.CHUNK_HEADER_TOP_IDENTITY; +import static jdk.graal.compiler.nodes.extended.MembarNode.FenceKind; + +import org.graalvm.nativeimage.Platform; +import org.graalvm.nativeimage.Platforms; +import org.graalvm.nativeimage.StackValue; +import org.graalvm.nativeimage.c.type.WordPointer; +import org.graalvm.word.Pointer; +import org.graalvm.word.UnsignedWord; + +import com.oracle.svm.core.Uninterruptible; +import com.oracle.svm.core.config.ConfigurationValues; +import com.oracle.svm.core.config.ObjectLayout; +import com.oracle.svm.core.log.Log; +import com.oracle.svm.core.thread.JavaSpinLockUtils; +import com.oracle.svm.core.thread.VMOperation; +import com.oracle.svm.core.util.BasedOnJDKFile; +import com.oracle.svm.core.util.UnsignedUtils; + +import jdk.graal.compiler.nodes.extended.MembarNode; +import jdk.graal.compiler.word.Word; +import jdk.internal.misc.Unsafe; + +/** + * Per-isolate bump-pointer allocation inside {@link AlignedHeapChunk}. First the allocation is + * tried within {@link HeapAllocation#retainedChunk}. If this fails the allocation is tried within + * {@link HeapAllocation#currentChunk}. If this also fails a new chunk is requested. + * + * Both chunk fields may only be written if {@link HeapAllocation#lock} is locked with + * {@link JavaSpinLockUtils} or during a safepoint. + */ +public final class HeapAllocation { + + private static final Unsafe UNSAFE = Unsafe.getUnsafe(); + private static final long LOCK_OFFSET = UNSAFE.objectFieldOffset(HeapAllocation.class, "lock"); + + @SuppressWarnings("unused") private volatile int lock; + + /** + * Current allocation chunk, and also the head of the list of aligned chunks that were allocated + * since the last collection. + */ + private AlignedHeader currentChunk; + + /** + * Retained allocation chunk. Used to lower the waste generated during mutation by having two + * active chunks if the free space in a chunk about to be retired still could fit a TLAB. + */ + private AlignedHeader retainedChunk; + + @Platforms(Platform.HOSTED_ONLY.class) + public HeapAllocation() { + } + + @BasedOnJDKFile("https://github.com/openjdk/jdk/blob/jdk-23-ga/src/hotspot/share/gc/g1/g1CollectedHeap.cpp#L383-L390") + @Uninterruptible(reason = "Returns uninitialized memory.", callerMustBe = true) + public Pointer allocateNewTlab(UnsignedWord minSize, UnsignedWord requestedSize, WordPointer actualSize) { + assert fitsInAlignedChunk(requestedSize) : "We do not allow TLABs larger than an aligned chunk."; + return attemptAllocation(minSize, requestedSize, actualSize); + } + + @BasedOnJDKFile("https://github.com/openjdk/jdk/blob/jdk-23-ga/src/hotspot/share/gc/g1/g1CollectedHeap.cpp#L392-L402") + @Uninterruptible(reason = "Returns uninitialized memory.", callerMustBe = true) + public Pointer allocateOutsideTlab(UnsignedWord size) { + assert fitsInAlignedChunk(size) : "Must not be called for allocation requests that require an unaligned chunk."; + WordPointer actualSize = StackValue.get(WordPointer.class); + Pointer result = attemptAllocation(size, size, actualSize); + assert actualSize.read() == size; + return result; + } + + @BasedOnJDKFile("https://github.com/openjdk/jdk/blob/jdk-25+15/src/hotspot/share/gc/g1/g1Allocator.inline.hpp#L52-L62") + @BasedOnJDKFile("https://github.com/openjdk/jdk/blob/jdk-23-ga/src/hotspot/share/gc/g1/g1AllocRegion.inline.hpp#L89-L95") + @Uninterruptible(reason = "Returns uninitialized memory and acquires a lock without a thread state transition.", callerMustBe = true) + private Pointer attemptAllocation(UnsignedWord minSize, UnsignedWord requestedSize, WordPointer actualSize) { + Pointer result = attemptAllocationParallel(retainedChunk, minSize, requestedSize, actualSize); + if (result.isNonNull()) { + return result; + } + + result = attemptAllocationParallel(currentChunk, minSize, requestedSize, actualSize); + if (result.isNonNull()) { + return result; + } + + JavaSpinLockUtils.lockNoTransition(this, LOCK_OFFSET); + try { + // Another thread might already have allocated a new chunk. + result = attemptAllocationParallel(retainedChunk, minSize, requestedSize, actualSize); + if (result.isNonNull()) { + return result; + } + result = attemptAllocationParallel(currentChunk, minSize, requestedSize, actualSize); + if (result.isNonNull()) { + return result; + } + return attemptAllocationInNewChunk(requestedSize, actualSize); + } finally { + JavaSpinLockUtils.unlock(this, LOCK_OFFSET); + } + } + + @BasedOnJDKFile("https://github.com/openjdk/jdk/blob/jdk-25+4/src/hotspot/share/gc/g1/g1AllocRegion.inline.hpp#L51-L65") + @Uninterruptible(reason = "Returns uninitialized memory.", callerMustBe = true) + private static Pointer attemptAllocationParallel(AlignedHeader chunk, UnsignedWord minSize, UnsignedWord requestedSize, WordPointer actualSize) { + if (chunk.isNonNull()) { + return allocateParallel(chunk, minSize, requestedSize, actualSize); + } + return Word.nullPointer(); + } + + @BasedOnJDKFile("https://github.com/openjdk/jdk/blob/jdk-23-ga/src/hotspot/share/gc/g1/g1AllocRegion.inline.hpp#L97-L109") + @Uninterruptible(reason = "Returns uninitialized memory.", callerMustBe = true) + private Pointer attemptAllocationInNewChunk(UnsignedWord requestedSize, WordPointer actualSize) { + assert JavaSpinLockUtils.isLocked(this, LOCK_OFFSET); + + retainAllocChunk(); + Pointer result = newAllocChunkAndAllocate(requestedSize); + actualSize.write(requestedSize); + return result; + } + + @BasedOnJDKFile("https://github.com/openjdk/jdk/blob/jdk-23-ga/src/hotspot/share/gc/g1/g1AllocRegion.cpp#L289-L311") + @Uninterruptible(reason = "Modifies allocation chunks.") + private void retainAllocChunk() { + assert JavaSpinLockUtils.isLocked(this, LOCK_OFFSET); + + if (currentChunk.isNonNull()) { + /* + * Retain the current chunk if it fits a TLAB and has more free space than the currently + * retained chunk. + */ + if (shouldRetain()) { + retainedChunk = currentChunk; + } + } + } + + @BasedOnJDKFile("https://github.com/openjdk/jdk/blob/jdk-23-ga/src/hotspot/share/gc/g1/g1AllocRegion.cpp#L275-L287") + @Uninterruptible(reason = CALLED_FROM_UNINTERRUPTIBLE_CODE, mayBeInlined = true) + private boolean shouldRetain() { + assert JavaSpinLockUtils.isLocked(this, LOCK_OFFSET); + + UnsignedWord freeBytes = HeapChunk.availableObjectMemory(currentChunk); + UnsignedWord minTlabSize = Word.unsigned(TlabOptionCache.singleton().getMinTlabSize()); + if (freeBytes.belowThan(minTlabSize)) { + return false; + } + + return retainedChunk.isNull() || freeBytes.aboveOrEqual(HeapChunk.availableObjectMemory(retainedChunk)); + } + + @BasedOnJDKFile("https://github.com/openjdk/jdk/blob/jdk-25+4/src/hotspot/share/gc/g1/g1AllocRegion.cpp#L130-L154") + @Uninterruptible(reason = "Returns uninitialized memory.", callerMustBe = true) + private Pointer newAllocChunkAndAllocate(UnsignedWord requestedSize) { + assert JavaSpinLockUtils.isLocked(this, LOCK_OFFSET); + + AlignedHeader newChunk = requestNewAlignedChunk(); + if (newChunk.isNonNull()) { + Pointer result = AlignedHeapChunk.allocateMemory(newChunk, requestedSize); + assert result.isNonNull(); + + HeapChunk.setNext(newChunk, currentChunk); + + /* Publish the new chunk (other threads need to see a fully initialized chunk). */ + MembarNode.memoryBarrier(FenceKind.STORE_STORE); + currentChunk = newChunk; + return result; + } else { + return Word.nullPointer(); + } + } + + @BasedOnJDKFile("https://github.com/openjdk/jdk/blob/jdk-25+4/src/hotspot/share/gc/g1/g1HeapRegion.inline.hpp#L186-L208") + @Uninterruptible(reason = "Returns uninitialized memory, modifies alloc chunk.", callerMustBe = true) + private static Pointer allocateParallel(AlignedHeader chunk, UnsignedWord minSize, UnsignedWord requestedSize, WordPointer actualSize) { + + do { + Pointer top = (Pointer) chunk.getTopOffset(CHUNK_HEADER_TOP_IDENTITY); + + UnsignedWord available = chunk.getEndOffset().subtract(top); + UnsignedWord wantToAllocate = UnsignedUtils.min(available, requestedSize); + if (wantToAllocate.belowThan(minSize)) { + return Word.nullPointer(); + } + + UnsignedWord newTop = top.add(wantToAllocate); + ObjectLayout ol = ConfigurationValues.getObjectLayout(); + assert ol.isAligned(top.rawValue()) && ol.isAligned(newTop.rawValue()); + if (((Pointer) chunk).logicCompareAndSwapWord(HeapChunk.Header.offsetOfTopOffset(), top, newTop, CHUNK_HEADER_TOP_IDENTITY)) { + actualSize.write(wantToAllocate); + return HeapChunk.asPointer(chunk).add(top); + } + } while (true); + + } + + @Uninterruptible(reason = CALLED_FROM_UNINTERRUPTIBLE_CODE, mayBeInlined = true) + private static AlignedHeader requestNewAlignedChunk() { + AlignedHeader newChunk = HeapImpl.getChunkProvider().produceAlignedChunk(); + HeapImpl.getAccounting().increaseEdenUsedBytes(HeapParameters.getAlignedHeapChunkSize()); + return newChunk; + } + + public void retireChunksToEden() { + VMOperation.guaranteeInProgressAtSafepoint("HeapAllocation.retireChunksToEden"); + + AlignedHeader chunk = currentChunk; + currentChunk = Word.nullPointer(); + retainedChunk = Word.nullPointer(); + + Space eden = HeapImpl.getHeapImpl().getYoungGeneration().getEden(); + while (chunk.isNonNull()) { + AlignedHeader next = HeapChunk.getNext(chunk); + HeapChunk.setNext(chunk, Word.nullPointer()); + eden.appendAlignedHeapChunk(chunk); + chunk = next; + } + + } + + /** + * Return the remaining space in the current alloc chunk, but not less than the min. TLAB size. + * + * Also, this value can be at most the size available for objects within an aligned chunk, as + * bigger TLABs are not possible. + */ + @BasedOnJDKFile("https://github.com/openjdk/jdk/blob/jdk-23-ga/src/hotspot/share/gc/g1/g1Allocator.cpp#L184-L203") + @Uninterruptible(reason = CALLED_FROM_UNINTERRUPTIBLE_CODE, mayBeInlined = true) + public UnsignedWord unsafeMaxTlabAllocSize() { + UnsignedWord maxTlabSize = AlignedHeapChunk.getUsableSizeForObjects(); + UnsignedWord minTlabSize = Word.unsigned(TlabOptionCache.singleton().getMinTlabSize()); + if (currentChunk.isNull() || HeapChunk.availableObjectMemory(currentChunk).belowThan(minTlabSize)) { + /* + * The next TLAB allocation will most probably happen in a new chunk, therefore we can + * attempt to allocate the maximum allowed TLAB size. + */ + return maxTlabSize; + } + + return UnsignedUtils.min(HeapChunk.availableObjectMemory(currentChunk), maxTlabSize); + } + + @Uninterruptible(reason = CALLED_FROM_UNINTERRUPTIBLE_CODE, mayBeInlined = true) + private static boolean fitsInAlignedChunk(UnsignedWord size) { + return size.belowOrEqual(AlignedHeapChunk.getUsableSizeForObjects()); + } + + @Uninterruptible(reason = CALLED_FROM_UNINTERRUPTIBLE_CODE, mayBeInlined = true) + public void tearDown() { + // This implicitly frees retainedChunk as well. + HeapChunkProvider.freeAlignedChunkList(currentChunk); + currentChunk = Word.nullPointer(); + retainedChunk = Word.nullPointer(); + } + + boolean printLocationInfo(Log log, Pointer ptr) { + AlignedHeader chunk = currentChunk; + while (chunk.isNonNull()) { + if (HeapChunk.asPointer(chunk).belowOrEqual(ptr) && ptr.belowThan(HeapChunk.getEndPointer(chunk))) { + boolean unusablePart = ptr.aboveOrEqual(HeapChunk.getTopPointer(chunk)); + printChunkInfo(log, chunk, unusablePart); + return true; + } + + chunk = HeapChunk.getNext(chunk); + } + return false; + } + + private static void printChunkInfo(Log log, AlignedHeader chunk, boolean unusablePart) { + String unusable = unusablePart ? "unusable part of " : ""; + log.string("points into ").string(unusable).string("heap allocation aligned chunk").spaces(1).zhex(chunk).spaces(1); + } + + void logChunks(Log log, String spaceName) { + HeapChunkLogging.logChunks(log, currentChunk, spaceName, false); + } + +} diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/HeapChunk.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/HeapChunk.java index e2f0bd078926..74d46b80bdaf 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/HeapChunk.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/HeapChunk.java @@ -27,6 +27,7 @@ import java.util.function.IntUnaryOperator; import org.graalvm.nativeimage.c.struct.RawField; +import org.graalvm.nativeimage.c.struct.RawFieldOffset; import org.graalvm.nativeimage.c.struct.RawFieldAddress; import org.graalvm.nativeimage.c.struct.RawStructure; import org.graalvm.nativeimage.c.struct.UniqueLocationIdentity; @@ -44,8 +45,10 @@ import com.oracle.svm.core.heap.ObjectVisitor; import com.oracle.svm.core.hub.LayoutEncoding; import com.oracle.svm.core.identityhashcode.IdentityHashCodeSupport; +import com.oracle.svm.core.util.VMError; import jdk.graal.compiler.api.directives.GraalDirectives; +import jdk.graal.compiler.nodes.NamedLocationIdentity; import jdk.graal.compiler.word.Word; /** @@ -78,6 +81,9 @@ * allocated within the HeapChunk are examined by the collector. */ public final class HeapChunk { + + public static final LocationIdentity CHUNK_HEADER_TOP_IDENTITY = NamedLocationIdentity.mutable("ChunkHeader.top"); + private HeapChunk() { // all static } @@ -106,12 +112,16 @@ public interface Header> extends HeaderPadding { * in the chunk. */ @RawField - @UniqueLocationIdentity - UnsignedWord getTopOffset(); + UnsignedWord getTopOffset(LocationIdentity topIdentity); @RawField - @UniqueLocationIdentity - void setTopOffset(UnsignedWord newTop); + void setTopOffset(UnsignedWord newTop, LocationIdentity topIdentity); + + @RawFieldOffset + static int offsetOfTopOffset() { + // replaced + throw VMError.shouldNotReachHereAtRuntime(); // ExcludeFromJacocoGeneratedReport + } /** Offset of the limit of memory available for allocation. */ @RawField @@ -194,18 +204,18 @@ public static void initialize(Header chunk, Pointer objectsStart, UnsignedWor @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) public static UnsignedWord getTopOffset(Header that) { assert getTopPointer(that).isNonNull() : "Not safe: top currently points to NULL."; - return that.getTopOffset(); + return that.getTopOffset(CHUNK_HEADER_TOP_IDENTITY); } @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) public static Pointer getTopPointer(Header that) { - return asPointer(that).add(that.getTopOffset()); + return asPointer(that).add(that.getTopOffset(CHUNK_HEADER_TOP_IDENTITY)); } @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) public static void setTopPointer(Header that, Pointer newTop) { // Note that the address arithmetic also works for newTop == NULL, e.g. in TLAB allocation - that.setTopOffset(newTop.subtract(asPointer(that))); + that.setTopOffset(newTop.subtract(asPointer(that)), CHUNK_HEADER_TOP_IDENTITY); } @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) @@ -324,7 +334,7 @@ private static void callVisitor(ObjectVisitor visitor, Object obj) { @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) public static UnsignedWord availableObjectMemory(Header that) { - return that.getEndOffset().subtract(that.getTopOffset()); + return that.getEndOffset().subtract(that.getTopOffset(CHUNK_HEADER_TOP_IDENTITY)); } @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/HeapImpl.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/HeapImpl.java index a8d3171daae1..80f80153766d 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/HeapImpl.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/HeapImpl.java @@ -502,7 +502,7 @@ public void walkImageHeapObjects(ObjectVisitor visitor) { @Override public void walkCollectedHeapObjects(ObjectVisitor visitor) { VMOperation.guaranteeInProgressAtSafepoint("Must only be called at a safepoint"); - ThreadLocalAllocation.disableAndFlushForAllThreads(); + makeParseable(); getYoungGeneration().walkObjects(visitor); getOldGeneration().walkObjects(visitor); } @@ -514,6 +514,10 @@ public void doReferenceHandling() { } } + void makeParseable() { + youngGeneration.makeParseable(); + } + @SuppressFBWarnings(value = "VO_VOLATILE_INCREMENT", justification = "Only the GC increments the volatile field 'refListOfferCounter'.") void addToReferencePendingList(Reference list) { assert VMOperation.isGCInProgress(); @@ -797,10 +801,22 @@ private boolean printLocationInfo(Log log, Pointer ptr, boolean allowJavaHeapAcc } if (allowUnsafeOperations || VMOperation.isInProgressAtSafepoint()) { - // If we are not at a safepoint, then it is unsafe to access thread locals of another - // thread as the IsolateThread could be freed at any time. - return printTlabInfo(log, ptr); + /* + * If we are not at a safepoint, then it is unsafe to access thread locals of another + * thread as the IsolateThread could be freed at any time. + */ + if (printTlabInfo(log, ptr)) { + return true; + } } + + if (allowJavaHeapAccess) { + // Accessing chunks is safe if we prevent a GC. + if (YoungGeneration.getHeapAllocation().printLocationInfo(log, ptr)) { + return true; + } + } + return false; } diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/RuntimeImageHeapChunkWriter.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/RuntimeImageHeapChunkWriter.java index 505dfee7cf32..2799f00272ce 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/RuntimeImageHeapChunkWriter.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/RuntimeImageHeapChunkWriter.java @@ -53,26 +53,25 @@ private Pointer getChunkPointerInBuffer(int chunkPosition) { @Override public void initializeAlignedChunk(int chunkPosition, long topOffset, long endOffset, long offsetToPreviousChunk, long offsetToNextChunk) { - AlignedHeapChunk.AlignedHeader header = (AlignedHeapChunk.AlignedHeader) getChunkPointerInBuffer(chunkPosition); - header.setTopOffset(Word.unsigned(topOffset)); - header.setEndOffset(Word.unsigned(endOffset)); - header.setSpace(null); - header.setOffsetToPreviousChunk(Word.unsigned(offsetToPreviousChunk)); - header.setOffsetToNextChunk(Word.unsigned(offsetToNextChunk)); - header.setIdentityHashSalt(Word.zero(), IdentityHashCodeSupport.IDENTITY_HASHCODE_SALT_LOCATION); + initializeChunk(chunkPosition, topOffset, endOffset, offsetToPreviousChunk, offsetToNextChunk); } @Override public void initializeUnalignedChunk(int chunkPosition, long topOffset, long endOffset, long offsetToPreviousChunk, long offsetToNextChunk, long objectSize) { + initializeChunk(chunkPosition, topOffset, endOffset, offsetToPreviousChunk, offsetToNextChunk); + UnalignedHeapChunk.UnalignedHeader header = (UnalignedHeapChunk.UnalignedHeader) getChunkPointerInBuffer(chunkPosition); - header.setTopOffset(Word.unsigned(topOffset)); + UnalignedHeapChunk.initializeObjectStartOffset(header, Word.unsigned(objectSize)); + } + + private void initializeChunk(int chunkPosition, long topOffset, long endOffset, long offsetToPreviousChunk, long offsetToNextChunk) { + HeapChunk.Header header = (HeapChunk.Header) getChunkPointerInBuffer(chunkPosition); + header.setTopOffset(Word.unsigned(topOffset), HeapChunk.CHUNK_HEADER_TOP_IDENTITY); header.setEndOffset(Word.unsigned(endOffset)); header.setSpace(null); header.setOffsetToPreviousChunk(Word.unsigned(offsetToPreviousChunk)); header.setOffsetToNextChunk(Word.unsigned(offsetToNextChunk)); header.setIdentityHashSalt(Word.zero(), IdentityHashCodeSupport.IDENTITY_HASHCODE_SALT_LOCATION); - - UnalignedHeapChunk.initializeObjectStartOffset(header, Word.unsigned(objectSize)); } @Override diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/YoungGeneration.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/YoungGeneration.java index e8f74da76ef7..66b1cfed1a6f 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/YoungGeneration.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/YoungGeneration.java @@ -40,6 +40,7 @@ import com.oracle.svm.core.thread.VMOperation; import com.oracle.svm.core.thread.VMThreads; +import jdk.graal.compiler.api.replacements.Fold; import jdk.graal.compiler.word.Word; public final class YoungGeneration extends Generation { @@ -49,6 +50,7 @@ public final class YoungGeneration extends Generation { private final GreyObjectsWalker[] survivorGreyObjectsWalkers; private final ChunksAccounting survivorsToSpacesAccounting; private final int maxSurvivorSpaces; + private final HeapAllocation heapAllocation = new HeapAllocation(); @Platforms(Platform.HOSTED_ONLY.class) YoungGeneration(String name) { @@ -67,6 +69,11 @@ public final class YoungGeneration extends Generation { } } + @Fold + public static HeapAllocation getHeapAllocation() { + return HeapImpl.getHeapImpl().getYoungGeneration().heapAllocation; + } + @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) public int getMaxSurvivorSpaces() { return maxSurvivorSpaces; @@ -74,6 +81,7 @@ public int getMaxSurvivorSpaces() { @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) void tearDown() { + heapAllocation.tearDown(); ThreadLocalAllocation.tearDown(); eden.tearDown(); for (int i = 0; i < maxSurvivorSpaces; i++) { @@ -84,9 +92,6 @@ void tearDown() { @Override public void walkObjects(ObjectVisitor visitor) { - /* Flush the thread-local allocation data. */ - ThreadLocalAllocation.disableAndFlushForAllThreads(); - getEden().walkObjects(visitor); for (int i = 0; i < maxSurvivorSpaces; i++) { survivorFromSpaces[i].walkObjects(visitor); @@ -105,6 +110,8 @@ public void logUsage(Log log) { public void logChunks(Log log, boolean allowUnsafe) { if (allowUnsafe) { + heapAllocation.logChunks(log, eden.getShortName()); + for (IsolateThread thread = VMThreads.firstThreadUnsafe(); thread.isNonNull(); thread = VMThreads.nextThread(thread)) { logTlabChunks(log, thread); } @@ -378,4 +385,10 @@ public boolean printLocationInfo(Log log, Pointer ptr) { } return false; } + + void makeParseable() { + ThreadLocalAllocation.disableAndFlushForAllThreads(); + heapAllocation.retireChunksToEden(); + } + } diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/compacting/PlanningVisitor.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/compacting/PlanningVisitor.java index cfc32de4cb38..df3cb9f95f6d 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/compacting/PlanningVisitor.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/compacting/PlanningVisitor.java @@ -24,6 +24,8 @@ */ package com.oracle.svm.core.genscavenge.compacting; +import static com.oracle.svm.core.genscavenge.HeapChunk.CHUNK_HEADER_TOP_IDENTITY; + import org.graalvm.nativeimage.Platform; import org.graalvm.nativeimage.Platforms; import org.graalvm.word.Pointer; @@ -141,7 +143,8 @@ public boolean visitChunk(AlignedHeapChunk.AlignedHeader chunk) { assert gapSize.equal(0) || objSeqSize.equal(0); if (gapSize.notEqual(0)) { // truncate gap at chunk end - chunk.setTopOffset(chunk.getTopOffset().subtract(gapSize)); + UnsignedWord newTopOffset = chunk.getTopOffset(CHUNK_HEADER_TOP_IDENTITY).subtract(gapSize); + chunk.setTopOffset(newTopOffset, CHUNK_HEADER_TOP_IDENTITY); } else if (objSeqSize.notEqual(0)) { Pointer newAddress = sweeping ? objSeq : allocate(objSeqSize); ObjectMoveInfo.setNewAddress(objSeq, newAddress); From 1181773517609dc103fc8303a0c597e04c1fb652 Mon Sep 17 00:00:00 2001 From: Thomas Schrott Date: Mon, 11 Nov 2024 09:29:33 +0100 Subject: [PATCH 6/9] Add util class for filler objects --- .../core/genscavenge/FillerObjectUtil.java | 78 +++++++++++++++++++ .../compacting/SweepingVisitor.java | 40 +--------- .../oracle/svm/core/config/ObjectLayout.java | 2 + 3 files changed, 82 insertions(+), 38 deletions(-) create mode 100644 substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/FillerObjectUtil.java diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/FillerObjectUtil.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/FillerObjectUtil.java new file mode 100644 index 000000000000..ffbfe5c3c750 --- /dev/null +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/FillerObjectUtil.java @@ -0,0 +1,78 @@ +/* + * Copyright (c) 2025, 2025, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.svm.core.genscavenge; + +import static com.oracle.svm.core.Uninterruptible.CALLED_FROM_UNINTERRUPTIBLE_CODE; +import static jdk.graal.compiler.replacements.AllocationSnippets.FillContent.WITH_GARBAGE_IF_ASSERTIONS_ENABLED; + +import org.graalvm.word.Pointer; +import org.graalvm.word.UnsignedWord; + +import com.oracle.svm.core.Uninterruptible; +import com.oracle.svm.core.config.ConfigurationValues; +import com.oracle.svm.core.genscavenge.graal.nodes.FormatArrayNode; +import com.oracle.svm.core.genscavenge.graal.nodes.FormatObjectNode; +import com.oracle.svm.core.heap.FillerArray; +import com.oracle.svm.core.heap.FillerObject; +import com.oracle.svm.core.hub.LayoutEncoding; +import com.oracle.svm.core.util.UnsignedUtils; + +import jdk.graal.compiler.api.replacements.Fold; +import jdk.graal.compiler.core.common.NumUtil; +import jdk.graal.compiler.word.Word; +import jdk.vm.ci.meta.JavaKind; + +public class FillerObjectUtil { + private static final Class ARRAY_CLASS = FillerArray.class; + private static final JavaKind ARRAY_ELEMENT_KIND = JavaKind.Int; + private static final int ARRAY_ELEMENT_SIZE = ARRAY_ELEMENT_KIND.getByteCount(); + + @Fold + public static UnsignedWord objectMinSize() { + return Word.unsigned(ConfigurationValues.getObjectLayout().getMinImageHeapObjectSize()); + } + + @Fold + static int arrayMinSize() { + return NumUtil.safeToInt(ConfigurationValues.getObjectLayout().getArraySize(ARRAY_ELEMENT_KIND, 0, false)); + } + + @Fold + static int arrayBaseOffset() { + return ConfigurationValues.getObjectLayout().getArrayBaseOffset(ARRAY_ELEMENT_KIND); + } + + @Uninterruptible(reason = CALLED_FROM_UNINTERRUPTIBLE_CODE, mayBeInlined = true) + public static void writeFillerObjectAt(Pointer p, UnsignedWord size) { + assert size.aboveThan(0); + if (size.aboveOrEqual(arrayMinSize())) { + int length = UnsignedUtils.safeToInt(size.subtract(arrayBaseOffset()).unsignedDivide(ARRAY_ELEMENT_SIZE)); + FormatArrayNode.formatArray(p, ARRAY_CLASS, length, true, false, WITH_GARBAGE_IF_ASSERTIONS_ENABLED, false); + } else { + FormatObjectNode.formatObject(p, FillerObject.class, true, WITH_GARBAGE_IF_ASSERTIONS_ENABLED, false); + } + assert LayoutEncoding.getSizeFromObjectInGC(p.toObject()).equal(size); + } +} diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/compacting/SweepingVisitor.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/compacting/SweepingVisitor.java index 4244e706095e..16b999e37cdc 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/compacting/SweepingVisitor.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/compacting/SweepingVisitor.java @@ -24,43 +24,18 @@ */ package com.oracle.svm.core.genscavenge.compacting; -import static jdk.graal.compiler.replacements.AllocationSnippets.FillContent.WITH_GARBAGE_IF_ASSERTIONS_ENABLED; - import org.graalvm.word.Pointer; import org.graalvm.word.UnsignedWord; -import com.oracle.svm.core.config.ConfigurationValues; import com.oracle.svm.core.genscavenge.AlignedHeapChunk; +import com.oracle.svm.core.genscavenge.FillerObjectUtil; import com.oracle.svm.core.genscavenge.HeapChunk; -import com.oracle.svm.core.genscavenge.graal.nodes.FormatArrayNode; -import com.oracle.svm.core.genscavenge.graal.nodes.FormatObjectNode; -import com.oracle.svm.core.heap.FillerArray; -import com.oracle.svm.core.heap.FillerObject; -import com.oracle.svm.core.hub.LayoutEncoding; -import com.oracle.svm.core.util.UnsignedUtils; - -import jdk.graal.compiler.api.replacements.Fold; -import jdk.graal.compiler.core.common.NumUtil; -import jdk.vm.ci.meta.JavaKind; /** * Overwrites dead objects with filler objects so that heap walks or scans that use card tables * cannot encounter them (and their broken references). */ public final class SweepingVisitor implements ObjectMoveInfo.Visitor { - private static final Class ARRAY_CLASS = FillerArray.class; - private static final JavaKind ARRAY_ELEMENT_KIND = JavaKind.Int; - private static final int ARRAY_ELEMENT_SIZE = ARRAY_ELEMENT_KIND.getByteCount(); - - @Fold - static int arrayMinSize() { - return NumUtil.safeToInt(ConfigurationValues.getObjectLayout().getArraySize(ARRAY_ELEMENT_KIND, 0, false)); - } - - @Fold - static int arrayBaseOffset() { - return ConfigurationValues.getObjectLayout().getArrayBaseOffset(ARRAY_ELEMENT_KIND); - } @Override public boolean visit(Pointer objSeq, UnsignedWord size, Pointer newAddress, Pointer nextObjSeq) { @@ -68,7 +43,7 @@ public boolean visit(Pointer objSeq, UnsignedWord size, Pointer newAddress, Poin if (nextObjSeq.isNonNull()) { Pointer gapStart = objSeq.add(size); assert gapStart.belowThan(nextObjSeq); - writeFillerObjectAt(gapStart, nextObjSeq.subtract(gapStart)); + FillerObjectUtil.writeFillerObjectAt(gapStart, nextObjSeq.subtract(gapStart)); // Note that we have already added first object table entries for fillers during fixup. } else { AlignedHeapChunk.AlignedHeader chunk = AlignedHeapChunk.getEnclosingChunkFromObjectPointer(objSeq); @@ -76,15 +51,4 @@ public boolean visit(Pointer objSeq, UnsignedWord size, Pointer newAddress, Poin } return true; } - - private static void writeFillerObjectAt(Pointer p, UnsignedWord size) { - assert size.aboveThan(0); - if (size.aboveOrEqual(arrayMinSize())) { - int length = UnsignedUtils.safeToInt(size.subtract(arrayBaseOffset()).unsignedDivide(ARRAY_ELEMENT_SIZE)); - FormatArrayNode.formatArray(p, ARRAY_CLASS, length, true, false, WITH_GARBAGE_IF_ASSERTIONS_ENABLED, false); - } else { - FormatObjectNode.formatObject(p, FillerObject.class, true, WITH_GARBAGE_IF_ASSERTIONS_ENABLED, false); - } - assert LayoutEncoding.getSizeFromObjectInGC(p.toObject()).equal(size); - } } diff --git a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/config/ObjectLayout.java b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/config/ObjectLayout.java index e3c1b99ad306..58686d32899a 100644 --- a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/config/ObjectLayout.java +++ b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/config/ObjectLayout.java @@ -44,6 +44,7 @@ import com.oracle.svm.core.util.VMError; import jdk.graal.compiler.api.directives.GraalDirectives; +import jdk.graal.compiler.api.replacements.Fold; import jdk.graal.compiler.core.common.NumUtil; import jdk.graal.compiler.replacements.ReplacementsUtil; import jdk.vm.ci.code.CodeUtil; @@ -305,6 +306,7 @@ public int getMinImageHeapArraySize() { return NumUtil.safeToInt(getArraySize(JavaKind.Byte, 0, true)); } + @Fold public int getMinImageHeapObjectSize() { return Math.min(getMinImageHeapArraySize(), getMinImageHeapInstanceSize()); } From ba375694fc71d725ce4596a4050baf8cddf931b5 Mon Sep 17 00:00:00 2001 From: Thomas Schrott Date: Wed, 13 Nov 2024 15:20:03 +0100 Subject: [PATCH 7/9] Add raw struct variant of AdaptiveWeightedAverage --- .../genscavenge/AdaptiveWeightedAverage.java | 6 +- .../AdaptiveWeightedAverageStruct.java | 119 ++++++++++++++++++ .../svm/core/jdk/UninterruptibleUtils.java | 12 ++ 3 files changed, 136 insertions(+), 1 deletion(-) create mode 100644 substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/AdaptiveWeightedAverageStruct.java diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/AdaptiveWeightedAverage.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/AdaptiveWeightedAverage.java index 5e60f8f6c52b..2dd421c60262 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/AdaptiveWeightedAverage.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/AdaptiveWeightedAverage.java @@ -24,8 +24,11 @@ */ package com.oracle.svm.core.genscavenge; +import static com.oracle.svm.core.Uninterruptible.CALLED_FROM_UNINTERRUPTIBLE_CODE; + import org.graalvm.word.UnsignedWord; +import com.oracle.svm.core.Uninterruptible; import com.oracle.svm.core.util.UnsignedUtils; /** @@ -101,7 +104,8 @@ protected double computeAdaptiveAverage(double sample, double avg) { return expAvg(avg, sample, adaptiveWeight); } - private static double expAvg(double avg, double sample, double adaptiveWeight) { + @Uninterruptible(reason = CALLED_FROM_UNINTERRUPTIBLE_CODE, mayBeInlined = true) + static double expAvg(double avg, double sample, double adaptiveWeight) { assert adaptiveWeight > 0 && adaptiveWeight <= 100 : "weight must be a percentage"; return (100.0 - adaptiveWeight) * avg / 100.0 + adaptiveWeight * sample / 100.0; } diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/AdaptiveWeightedAverageStruct.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/AdaptiveWeightedAverageStruct.java new file mode 100644 index 000000000000..0829a53bdf6e --- /dev/null +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/AdaptiveWeightedAverageStruct.java @@ -0,0 +1,119 @@ +/* + * Copyright (c) 2025, 2025, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.svm.core.genscavenge; + +import static com.oracle.svm.core.Uninterruptible.CALLED_FROM_UNINTERRUPTIBLE_CODE; +import static com.oracle.svm.core.genscavenge.AdaptiveWeightedAverage.OLD_THRESHOLD; + +import org.graalvm.nativeimage.c.struct.RawField; +import org.graalvm.nativeimage.c.struct.RawStructure; +import org.graalvm.word.PointerBase; +import org.graalvm.word.UnsignedWord; + +import com.oracle.svm.core.Uninterruptible; +import com.oracle.svm.core.jdk.UninterruptibleUtils; +import com.oracle.svm.core.util.UnsignedUtils; + +/** + * This class provides a raw structure implementation of {@link AdaptiveWeightedAverage}. For + * further information see {@link AdaptiveWeightedAverage}. + */ +class AdaptiveWeightedAverageStruct { + + @RawStructure + interface Data extends PointerBase { + + @RawField + void setWeight(double weight); + + @RawField + double getWeight(); + + @RawField + void setAverage(double average); + + @RawField + double getAverage(); + + @RawField + void setSampleCount(long sampleCount); + + @RawField + long getSampleCount(); + + @RawField + void setIsOld(boolean isOld); + + @RawField + boolean getIsOld(); + } + + @Uninterruptible(reason = CALLED_FROM_UNINTERRUPTIBLE_CODE, mayBeInlined = true) + static void initialize(Data data, double weight) { + initialize(data, weight, 0); + } + + @Uninterruptible(reason = CALLED_FROM_UNINTERRUPTIBLE_CODE, mayBeInlined = true) + static void initialize(Data data, double weight, double avg) { + assert weight > 0 && weight <= 100; + data.setWeight(weight); + data.setAverage(avg); + } + + @Uninterruptible(reason = CALLED_FROM_UNINTERRUPTIBLE_CODE, mayBeInlined = true) + public static double getAverage(Data data) { + return data.getAverage(); + } + + @Uninterruptible(reason = CALLED_FROM_UNINTERRUPTIBLE_CODE, mayBeInlined = true) + public static void sample(Data data, double value) { + data.setSampleCount(data.getSampleCount() + 1); + if (!data.getIsOld() && data.getSampleCount() > OLD_THRESHOLD) { + data.setIsOld(true); + } + data.setAverage(computeAdaptiveAverage(data, value, data.getAverage())); + } + + @Uninterruptible(reason = CALLED_FROM_UNINTERRUPTIBLE_CODE, mayBeInlined = true) + public static void sample(Data data, UnsignedWord value) { + sample(data, UnsignedUtils.toDouble(value)); + } + + @Uninterruptible(reason = CALLED_FROM_UNINTERRUPTIBLE_CODE, mayBeInlined = true) + protected static double computeAdaptiveAverage(Data data, double sample, double avg) { + /* + * We smoothen the samples by not using weight directly until we've had enough data to make + * it meaningful. We'd like the first weight used to be 1, the second to be 1/2, etc until + * we have OLD_THRESHOLD/weight samples. + */ + double countWeight = 0; + if (!data.getIsOld()) { // avoid division by zero if the counter wraps + countWeight = OLD_THRESHOLD / (double) data.getSampleCount(); + } + double adaptiveWeight = UninterruptibleUtils.Math.max(data.getWeight(), countWeight); + return AdaptiveWeightedAverage.expAvg(avg, sample, adaptiveWeight); + } + +} diff --git a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/jdk/UninterruptibleUtils.java b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/jdk/UninterruptibleUtils.java index 6e744b290c7c..ba84f084c31d 100644 --- a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/jdk/UninterruptibleUtils.java +++ b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/jdk/UninterruptibleUtils.java @@ -427,6 +427,18 @@ public static long max(long a, long b) { return (a >= b) ? a : b; } + @Uninterruptible(reason = CALLED_FROM_UNINTERRUPTIBLE_CODE, mayBeInlined = true) + public static double max(double a, double b) { + if (a != a) { + return a; // a is NaN + } + if ((a == 0.0d) && (b == 0.0d) && (Double.doubleToRawLongBits(a) == Double.doubleToRawLongBits(-0.0d))) { + // Raw conversion ok since NaN can't map to -0.0. + return b; + } + return (a >= b) ? a : b; + } + @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) public static int clamp(int value, int min, int max) { assert min <= max; From 4370d98edd9e2db8b2743dae48d1fe14ee11689f Mon Sep 17 00:00:00 2001 From: Thomas Schrott Date: Wed, 13 Nov 2024 15:20:12 +0100 Subject: [PATCH 8/9] Allocate TLAB as part of chunks and size them dynamically --- .../genscavenge/BasicCollectionPolicies.java | 5 +- .../oracle/svm/core/genscavenge/GCImpl.java | 11 +- .../oracle/svm/core/genscavenge/HeapImpl.java | 65 +-- .../oracle/svm/core/genscavenge/Space.java | 3 +- .../genscavenge/ThreadLocalAllocation.java | 281 +++++----- .../svm/core/genscavenge/TlabSupport.java | 523 ++++++++++++++++++ .../svm/core/genscavenge/YoungGeneration.java | 15 +- .../snippets/SubstrateAllocationSnippets.java | 5 +- .../svm/core/jdk/UninterruptibleUtils.java | 18 + 9 files changed, 697 insertions(+), 229 deletions(-) create mode 100644 substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/TlabSupport.java diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/BasicCollectionPolicies.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/BasicCollectionPolicies.java index 3a33256131b2..8023715ea030 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/BasicCollectionPolicies.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/BasicCollectionPolicies.java @@ -221,10 +221,7 @@ public static final class OnlyCompletely extends BasicPolicy { @Override public boolean shouldCollectCompletely(boolean followingIncrementalCollection) { - if (!followingIncrementalCollection && shouldCollectYoungGenSeparately(false)) { - return false; - } - return true; + return followingIncrementalCollection || !shouldCollectYoungGenSeparately(false); } @Override diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/GCImpl.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/GCImpl.java index 717fced5a6a8..cb50e02fcd29 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/GCImpl.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/GCImpl.java @@ -260,6 +260,7 @@ assert getCollectionEpoch().equal(data.getRequestingEpoch()) || collectionTimer.stop(); } + resizeAllTlabs(); accounting.updateCollectionCountAndTime(completeCollection, collectionTimer.totalNanos()); HeapImpl.getAccounting().notifyAfterCollection(); GenScavengeMemoryPoolMXBeans.singleton().notifyAfterCollection(); @@ -402,6 +403,14 @@ public static UnsignedWord getChunkBytes() { return youngBytes.add(oldBytes); } + private static void resizeAllTlabs() { + if (SubstrateGCOptions.TlabOptions.ResizeTLAB.getValue()) { + for (IsolateThread thread = VMThreads.firstThread(); thread.isNonNull(); thread = VMThreads.nextThread(thread)) { + TlabSupport.resize(thread); + } + } + } + private void printGCBefore(GCCause cause) { if (!SubstrateGCOptions.VerboseGC.getValue()) { return; @@ -1317,7 +1326,7 @@ protected PrintGCSummaryOperation() { @Override protected void operate() { - ThreadLocalAllocation.disableAndFlushForAllThreads(); + HeapImpl.getHeapImpl().makeParseable(); Log log = Log.log(); log.string("GC summary").indent(true); diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/HeapImpl.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/HeapImpl.java index 80f80153766d..987fc9e56321 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/HeapImpl.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/HeapImpl.java @@ -50,8 +50,6 @@ import com.oracle.svm.core.annotate.Substitute; import com.oracle.svm.core.annotate.TargetClass; import com.oracle.svm.core.genscavenge.AlignedHeapChunk.AlignedHeader; -import com.oracle.svm.core.genscavenge.ThreadLocalAllocation.Descriptor; -import com.oracle.svm.core.genscavenge.UnalignedHeapChunk.UnalignedHeader; import com.oracle.svm.core.genscavenge.graal.nodes.FormatArrayNode; import com.oracle.svm.core.genscavenge.remset.RememberedSet; import com.oracle.svm.core.graal.snippets.SubstrateAllocationSnippets; @@ -196,7 +194,7 @@ public boolean isInPrimaryImageHeap(Pointer objPointer) { @Override @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) public void suspendAllocation() { - ThreadLocalAllocation.suspendInCurrentThread(); + TlabSupport.suspendAllocationInCurrentThread(); } @Override @@ -415,13 +413,14 @@ public void endSafepoint() { @Uninterruptible(reason = "Called during startup.") @Override public void attachThread(IsolateThread isolateThread) { - // nothing to do + TlabSupport.startupInitialization(); + TlabSupport.initialize(isolateThread); } @Override @Uninterruptible(reason = "Thread is detaching and holds the THREAD_MUTEX.") public void detachThread(IsolateThread isolateThread) { - ThreadLocalAllocation.disableAndFlushForThread(isolateThread); + TlabSupport.disableAndFlushForThread(isolateThread); } @Fold @@ -707,7 +706,7 @@ public void optionValueChanged(RuntimeOptionKey key) { @Override @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) public long getThreadAllocatedMemory(IsolateThread thread) { - UnsignedWord allocatedBytes = ThreadLocalAllocation.allocatedBytes.getVolatile(thread); + UnsignedWord allocatedBytes = ThreadLocalAllocation.getAllocatedBytes(thread); /* * The current aligned chunk in the TLAB is only partially filled and therefore not yet @@ -717,9 +716,8 @@ public long getThreadAllocatedMemory(IsolateThread thread) { * can cause that the memory in the current aligned TLAB chunk is counted twice. */ ThreadLocalAllocation.Descriptor tlab = ThreadLocalAllocation.getTlab(thread); - AlignedHeader alignedTlab = tlab.getAlignedChunk(); + Pointer start = tlab.getAlignedAllocationStart(SubstrateAllocationSnippets.TLAB_START_IDENTITY); Pointer top = tlab.getAllocationTop(SubstrateAllocationSnippets.TLAB_TOP_IDENTITY); - Pointer start = AlignedHeapChunk.getObjectsStart(alignedTlab); if (top.aboveThan(start)) { UnsignedWord usedTlabSize = top.subtract(start); @@ -787,7 +785,7 @@ private boolean printLocationInfo(Log log, Pointer ptr, boolean allowJavaHeapAcc if (AuxiliaryImageHeap.isPresent() && AuxiliaryImageHeap.singleton().containsObject(ptr)) { log.string("points into the auxiliary image heap"); return true; - } else if (printTlabInfo(log, ptr, CurrentIsolate.getCurrentThread())) { + } else if (TlabSupport.printTlabInfo(log, ptr, CurrentIsolate.getCurrentThread())) { return true; } @@ -805,7 +803,7 @@ private boolean printLocationInfo(Log log, Pointer ptr, boolean allowJavaHeapAcc * If we are not at a safepoint, then it is unsafe to access thread locals of another * thread as the IsolateThread could be freed at any time. */ - if (printTlabInfo(log, ptr)) { + if (TlabSupport.printTlabInfo(log, ptr)) { return true; } } @@ -824,53 +822,6 @@ boolean isInHeap(Pointer ptr) { return isInImageHeap(ptr) || youngGeneration.isInSpace(ptr) || oldGeneration.isInSpace(ptr); } - private static boolean printTlabInfo(Log log, Pointer ptr) { - for (IsolateThread thread = VMThreads.firstThreadUnsafe(); thread.isNonNull(); thread = VMThreads.nextThread(thread)) { - if (printTlabInfo(log, ptr, thread)) { - return true; - } - } - return false; - } - - private static boolean printTlabInfo(Log log, Pointer ptr, IsolateThread thread) { - ThreadLocalAllocation.Descriptor tlab = getTlabUnsafe(thread); - AlignedHeader aChunk = tlab.getAlignedChunk(); - while (aChunk.isNonNull()) { - if (HeapChunk.asPointer(aChunk).belowOrEqual(ptr) && ptr.belowThan(HeapChunk.getEndPointer(aChunk))) { - /* top may be null for a thread's current aligned allocation chunk. */ - boolean unusablePart = HeapChunk.getTopPointer(aChunk).isNonNull() && ptr.aboveOrEqual(HeapChunk.getTopPointer(aChunk)); - printTlabChunkInfo(log, thread, aChunk, "aligned", unusablePart); - return true; - } - aChunk = HeapChunk.getNext(aChunk); - } - - UnalignedHeader uChunk = tlab.getUnalignedChunk(); - while (uChunk.isNonNull()) { - if (HeapChunk.asPointer(uChunk).belowOrEqual(ptr) && ptr.belowThan(HeapChunk.getEndPointer(uChunk))) { - boolean unusablePart = ptr.aboveOrEqual(HeapChunk.getTopPointer(uChunk)); - printTlabChunkInfo(log, thread, uChunk, "unaligned", unusablePart); - return true; - } - uChunk = HeapChunk.getNext(uChunk); - } - - return false; - } - - private static void printTlabChunkInfo(Log log, IsolateThread thread, HeapChunk.Header chunk, String chunkType, boolean unusablePart) { - String unusable = unusablePart ? "unusable part of " : ""; - log.string("points into ").string(unusable).string(chunkType).string(" chunk ").zhex(chunk).spaces(1); - log.string("(TLAB of thread ").zhex(thread).string(")"); - } - - @Uninterruptible(reason = "This whole method is unsafe, so it is only uninterruptible to satisfy the checks.") - static Descriptor getTlabUnsafe(IsolateThread thread) { - assert SubstrateDiagnostics.isFatalErrorHandlingThread() : "can cause crashes, so it may only be used while printing diagnostics"; - return ThreadLocalAllocation.getTlab(thread); - } - @Override @Uninterruptible(reason = "Called during early startup.") public boolean verifyImageHeapMapping() { diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/Space.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/Space.java index 86cda02b8a30..5c7adca1a575 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/Space.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/Space.java @@ -40,6 +40,7 @@ import com.oracle.svm.core.config.ConfigurationValues; import com.oracle.svm.core.genscavenge.GCImpl.ChunkReleaser; import com.oracle.svm.core.genscavenge.remset.RememberedSet; +import com.oracle.svm.core.graal.snippets.SubstrateAllocationSnippets; import com.oracle.svm.core.heap.Heap; import com.oracle.svm.core.heap.ObjectHeader; import com.oracle.svm.core.heap.ObjectVisitor; @@ -549,7 +550,7 @@ private static boolean areEdenBytesCorrect() { /* Verify that there are no threads that have a TLAB. */ for (IsolateThread thread = VMThreads.firstThread(); thread.isNonNull(); thread = VMThreads.nextThread(thread)) { ThreadLocalAllocation.Descriptor tlab = ThreadLocalAllocation.getTlab(thread); - if (tlab.getAlignedChunk().isNonNull() || tlab.getUnalignedChunk().isNonNull()) { + if (tlab.getAlignedAllocationStart(SubstrateAllocationSnippets.TLAB_START_IDENTITY).isNonNull() || tlab.getUnalignedChunk().isNonNull()) { return false; } } diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/ThreadLocalAllocation.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/ThreadLocalAllocation.java index b4ffafc007d4..c05194fd24b6 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/ThreadLocalAllocation.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/ThreadLocalAllocation.java @@ -24,18 +24,27 @@ */ package com.oracle.svm.core.genscavenge; +import static com.oracle.svm.core.Uninterruptible.CALLED_FROM_UNINTERRUPTIBLE_CODE; +import static com.oracle.svm.core.genscavenge.TlabSupport.computeMinSizeOfNewTlab; +import static com.oracle.svm.core.genscavenge.TlabSupport.computeSizeOfNewTlab; +import static com.oracle.svm.core.genscavenge.TlabSupport.fillTlab; +import static com.oracle.svm.core.genscavenge.TlabSupport.recordSlowAllocation; +import static com.oracle.svm.core.genscavenge.TlabSupport.retireTlabBeforeAllocation; +import static com.oracle.svm.core.genscavenge.TlabSupport.shouldRetainTlab; import static com.oracle.svm.core.graal.snippets.SubstrateAllocationSnippets.TLAB_END_IDENTITY; +import static com.oracle.svm.core.graal.snippets.SubstrateAllocationSnippets.TLAB_START_IDENTITY; import static com.oracle.svm.core.graal.snippets.SubstrateAllocationSnippets.TLAB_TOP_IDENTITY; -import org.graalvm.nativeimage.CurrentIsolate; import org.graalvm.nativeimage.IsolateThread; import org.graalvm.nativeimage.Platform; import org.graalvm.nativeimage.Platforms; +import org.graalvm.nativeimage.StackValue; import org.graalvm.nativeimage.c.struct.RawField; import org.graalvm.nativeimage.c.struct.RawFieldOffset; import org.graalvm.nativeimage.c.struct.RawStructure; import org.graalvm.nativeimage.c.struct.SizeOf; import org.graalvm.nativeimage.c.struct.UniqueLocationIdentity; +import org.graalvm.nativeimage.c.type.WordPointer; import org.graalvm.word.LocationIdentity; import org.graalvm.word.Pointer; import org.graalvm.word.PointerBase; @@ -44,7 +53,6 @@ import com.oracle.svm.core.SubstrateGCOptions; import com.oracle.svm.core.Uninterruptible; import com.oracle.svm.core.config.ConfigurationValues; -import com.oracle.svm.core.genscavenge.AlignedHeapChunk.AlignedHeader; import com.oracle.svm.core.genscavenge.UnalignedHeapChunk.UnalignedHeader; import com.oracle.svm.core.genscavenge.graal.GenScavengeAllocationSupport; import com.oracle.svm.core.genscavenge.graal.nodes.FormatArrayNode; @@ -62,25 +70,22 @@ import com.oracle.svm.core.jfr.JfrTicks; import com.oracle.svm.core.jfr.SubstrateJVM; import com.oracle.svm.core.jfr.events.JfrAllocationEvents; -import com.oracle.svm.core.log.Log; import com.oracle.svm.core.snippets.KnownIntrinsics; import com.oracle.svm.core.thread.ContinuationSupport; -import com.oracle.svm.core.thread.VMOperation; -import com.oracle.svm.core.thread.VMThreads; import com.oracle.svm.core.threadlocal.FastThreadLocal; import com.oracle.svm.core.threadlocal.FastThreadLocalBytes; import com.oracle.svm.core.threadlocal.FastThreadLocalFactory; import com.oracle.svm.core.threadlocal.FastThreadLocalWord; +import com.oracle.svm.core.util.BasedOnJDKFile; import com.oracle.svm.core.util.UnsignedUtils; import com.oracle.svm.core.util.VMError; -import jdk.graal.compiler.api.replacements.Fold; import jdk.graal.compiler.replacements.AllocationSnippets.FillContent; import jdk.graal.compiler.word.Word; /** * Bump-pointer allocation from thread-local top and end Pointers. Many of these methods are called - * from allocation snippets, so they can not do anything fancy. q It happens that prefetch + * from allocation snippets, so they can not do anything fancy. It happens that prefetch * instructions access memory outside the TLAB. At the moment, this is not an issue as we only * support architectures where the prefetch instructions never cause a segfault, even if they try to * access memory that is not accessible. @@ -88,17 +93,12 @@ public final class ThreadLocalAllocation { @RawStructure public interface Descriptor extends PointerBase { - /** - * Current allocation chunk, and also the head of the list of aligned chunks that were - * allocated by the current thread (since the last collection, typically). - */ + @RawField - @UniqueLocationIdentity - AlignedHeader getAlignedChunk(); + Word getAlignedAllocationStart(LocationIdentity topIdentity); @RawField - @UniqueLocationIdentity - void setAlignedChunk(AlignedHeader chunk); + void setAlignedAllocationStart(Pointer start, LocationIdentity topIdentity); /** * List of unaligned chunks which have been allocated by the current thread (since the last @@ -138,11 +138,16 @@ static int offsetOfAllocationEnd() { } /* - * Stores the number of bytes that this thread allocated in the past on the Java heap. This - * excludes allocations that were done in the latest not yet retired {@link AlignedHeapChunk} of - * the TLAB. + * Stores the number of bytes that this thread allocated in the past in aligned chunks on the + * Java heap. This excludes allocations that were done in the latest not yet retired TLAB. */ - public static final FastThreadLocalWord allocatedBytes = FastThreadLocalFactory.createWord("ThreadLocalAllocation.allocatedBytes"); + static final FastThreadLocalWord allocatedAlignedBytes = FastThreadLocalFactory.createWord("ThreadLocalAllocation.allocatedAlignedBytes"); + + /* + * Stores the number of bytes that this thread allocated in unaligned chunks in the past on the + * Java heap. + */ + private static final FastThreadLocalWord allocatedUnalignedBytes = FastThreadLocalFactory.createWord("ThreadLocalAllocation.allocatedUnalignedBytes"); /** * Don't read this value directly, use the {@link Uninterruptible} accessor methods instead. @@ -151,14 +156,10 @@ static int offsetOfAllocationEnd() { private static final FastThreadLocalBytes regularTLAB = FastThreadLocalFactory.createBytes(ThreadLocalAllocation::getTlabDescriptorSize, "ThreadLocalAllocation.regularTLAB") .setMaxOffset(FastThreadLocal.BYTE_OFFSET); + @Platforms(Platform.HOSTED_ONLY.class) private ThreadLocalAllocation() { } - @Fold - static Log log() { - return Log.noopLog(); - } - @Platforms(Platform.HOSTED_ONLY.class) private static int getTlabDescriptorSize() { return SizeOf.get(Descriptor.class); @@ -174,10 +175,20 @@ public static Descriptor getTlab(IsolateThread vmThread) { } @Uninterruptible(reason = "Accesses TLAB", callerMustBe = true) - private static Descriptor getTlab() { + static Descriptor getTlab() { return regularTLAB.getAddress(); } + @Uninterruptible(reason = CALLED_FROM_UNINTERRUPTIBLE_CODE, mayBeInlined = true) + static UnsignedWord getAllocatedBytes(IsolateThread thread) { + return allocatedAlignedBytes.getVolatile(thread).add(allocatedUnalignedBytes.getVolatile(thread)); + } + + @Uninterruptible(reason = CALLED_FROM_UNINTERRUPTIBLE_CODE, mayBeInlined = true) + static UnsignedWord getAlignedAllocatedBytes(IsolateThread thread) { + return allocatedAlignedBytes.getVolatile(thread); + } + /** * NOTE: Multiple threads may execute this method concurrently. All code that is transitively * reachable from this method may get executed as a side effect of an allocation slow path. To @@ -231,10 +242,9 @@ private static Object slowPathNewInstanceWithoutAllocating(DynamicHub hub, Unsig HeapImpl.exitIfAllocationDisallowed("ThreadLocalAllocation.slowPathNewInstanceWithoutAllocating", DynamicHub.toClass(hub).getName()); GCImpl.getGCImpl().maybeCollectOnAllocation(size); - AlignedHeader newTlab = HeapImpl.getChunkProvider().produceAlignedChunk(); - return allocateInstanceInNewTlab(hub, size, newTlab); + return allocateInstanceSlow(hub, size); } finally { - JfrAllocationEvents.emit(startTicks, hub, size, HeapParameters.getAlignedHeapChunkSize()); + JfrAllocationEvents.emit(startTicks, hub, size, getTlabSize()); DeoptTester.enableDeoptTesting(); } } @@ -269,7 +279,7 @@ public static Object slowPathNewArrayLikeObject(Word objectHeader, int length, b private static Object slowPathNewArrayLikeObject0(DynamicHub hub, int length, UnsignedWord size, byte[] podReferenceMap) { DeoptTester.disableDeoptTesting(); long startTicks = JfrTicks.elapsedTicks(); - UnsignedWord tlabSize = HeapParameters.getAlignedHeapChunkSize(); + UnsignedWord tlabSize = Word.zero(); try { HeapImpl.exitIfAllocationDisallowed("ThreadLocalAllocation.slowPathNewArrayOrPodWithoutAllocating", DynamicHub.toClass(hub).getName()); GCImpl.getGCImpl().maybeCollectOnAllocation(size); @@ -294,10 +304,10 @@ private static Object slowPathNewArrayLikeObject0(DynamicHub hub, int length, Un * uninterruptible method to be safe). */ Object array = allocateSmallArrayLikeObjectInCurrentTlab(hub, length, size, podReferenceMap); - if (array == null) { // We need a new chunk. - AlignedHeader newTlabChunk = HeapImpl.getChunkProvider().produceAlignedChunk(); - array = allocateSmallArrayLikeObjectInNewTlab(hub, length, size, newTlabChunk, podReferenceMap); + if (array == null) { + array = allocateArraySlow(hub, length, size, podReferenceMap); } + tlabSize = getTlabSize(); return array; } finally { JfrAllocationEvents.emit(startTicks, hub, size, tlabSize); @@ -307,7 +317,7 @@ private static Object slowPathNewArrayLikeObject0(DynamicHub hub, int length, Un @Uninterruptible(reason = "Holds uninitialized memory.") private static Object allocateInstanceInCurrentTlab(DynamicHub hub, UnsignedWord size) { - if (size.aboveThan(availableTlabMemory(getTlab()))) { + if (!fitsInTlab(getTlab(), size)) { return null; } assert size.equal(LayoutEncoding.getPureInstanceAllocationSize(hub.getLayoutEncoding())); @@ -316,15 +326,15 @@ private static Object allocateInstanceInCurrentTlab(DynamicHub hub, UnsignedWord } @Uninterruptible(reason = "Holds uninitialized memory.") - private static Object allocateInstanceInNewTlab(DynamicHub hub, UnsignedWord size, AlignedHeader newTlabChunk) { + private static Object allocateInstanceSlow(DynamicHub hub, UnsignedWord size) { assert size.equal(LayoutEncoding.getPureInstanceAllocationSize(hub.getLayoutEncoding())); - Pointer memory = allocateRawMemoryInNewTlab(size, newTlabChunk); + Pointer memory = allocateRawMemory(size); return FormatObjectNode.formatObject(memory, DynamicHub.toClass(hub), false, FillContent.WITH_ZEROES, true); } @Uninterruptible(reason = "Holds uninitialized memory.") private static Object allocateSmallArrayLikeObjectInCurrentTlab(DynamicHub hub, int length, UnsignedWord size, byte[] podReferenceMap) { - if (size.aboveThan(availableTlabMemory(getTlab()))) { + if (!fitsInTlab(getTlab(), size)) { return null; } Pointer memory = allocateRawMemoryInTlab(size, getTlab()); @@ -332,11 +342,74 @@ private static Object allocateSmallArrayLikeObjectInCurrentTlab(DynamicHub hub, } @Uninterruptible(reason = "Holds uninitialized memory.") - private static Object allocateSmallArrayLikeObjectInNewTlab(DynamicHub hub, int length, UnsignedWord size, AlignedHeader newTlabChunk, byte[] podReferenceMap) { - Pointer memory = allocateRawMemoryInNewTlab(size, newTlabChunk); + private static Object allocateArraySlow(DynamicHub hub, int length, UnsignedWord size, byte[] podReferenceMap) { + Pointer memory = allocateRawMemory(size); return formatArrayLikeObject(memory, hub, length, false, FillContent.WITH_ZEROES, podReferenceMap); } + @BasedOnJDKFile("https://github.com/openjdk/jdk/blob/jdk-23-ga/src/hotspot/share/gc/shared/memAllocator.cpp#L333-L341") + @Uninterruptible(reason = "Holds uninitialized memory.") + private static Pointer allocateRawMemory(UnsignedWord size) { + Pointer memory = allocateRawMemoryInTlabSlow(size); + if (memory.isNonNull()) { + return memory; + } + return allocateRawMemoryOutsideTlab(size); + } + + @BasedOnJDKFile("https://github.com/openjdk/jdk/blob/jdk-25+8/src/hotspot/share/gc/shared/memAllocator.cpp#L256-L318") + @Uninterruptible(reason = "Holds uninitialized memory.") + private static Pointer allocateRawMemoryInTlabSlow(UnsignedWord size) { + ThreadLocalAllocation.Descriptor tlab = getTlab(); + + /* + * Retain tlab and allocate object as an heap allocation if the amount free in the tlab is + * too large to discard. + */ + if (shouldRetainTlab(tlab)) { + recordSlowAllocation(); + return Word.nullPointer(); + } + + /* + * Discard tlab and allocate a new one. To minimize fragmentation, the last tlab may be + * smaller than the rest. + */ + UnsignedWord newTlabSize = computeSizeOfNewTlab(size); + + retireTlabBeforeAllocation(); + + if (newTlabSize.equal(0)) { + return Word.nullPointer(); + } + + /* + * Allocate a new TLAB requesting newTlabSize. Any size between minimal and newTlabSize is + * accepted. + */ + + UnsignedWord computedMinSize = computeMinSizeOfNewTlab(size); + + WordPointer allocatedTlabSize = StackValue.get(WordPointer.class); + Pointer memory = YoungGeneration.getHeapAllocation().allocateNewTlab(computedMinSize, newTlabSize, allocatedTlabSize); + if (memory.isNull()) { + assert Word.unsigned(0).equal(allocatedTlabSize.read()) : "Allocation failed, but actual size was updated."; + return Word.nullPointer(); + } + assert Word.unsigned(0).notEqual(allocatedTlabSize.read()) : "Allocation succeeded but actual size not updated."; + + fillTlab(memory, memory.add(size), allocatedTlabSize); + return memory; + } + + @BasedOnJDKFile("https://github.com/openjdk/jdk/blob/jdk-23-ga/src/hotspot/share/gc/shared/memAllocator.cpp#L240-L251") + @Uninterruptible(reason = "Holds uninitialized memory.") + private static Pointer allocateRawMemoryOutsideTlab(UnsignedWord size) { + Pointer memory = YoungGeneration.getHeapAllocation().allocateOutsideTlab(size); + allocatedAlignedBytes.set(allocatedAlignedBytes.get().add(size)); + return memory; + } + @Uninterruptible(reason = "Holds uninitialized memory, modifies TLAB") private static Object allocateLargeArrayLikeObjectInNewTlab(DynamicHub hub, int length, UnsignedWord size, UnalignedHeader newTlabChunk, boolean needsZeroing, byte[] podReferenceMap) { ThreadLocalAllocation.Descriptor tlab = getTlab(); @@ -344,7 +417,7 @@ private static Object allocateLargeArrayLikeObjectInNewTlab(DynamicHub hub, int HeapChunk.setNext(newTlabChunk, tlab.getUnalignedChunk()); tlab.setUnalignedChunk(newTlabChunk); - allocatedBytes.set(allocatedBytes.get().add(size)); + allocatedUnalignedBytes.set(allocatedUnalignedBytes.get().add(size)); HeapImpl.getAccounting().increaseEdenUsedBytes(size); Pointer memory = UnalignedHeapChunk.allocateMemory(newTlabChunk, size); @@ -374,19 +447,9 @@ private static Object formatArrayLikeObject(Pointer memory, DynamicHub hub, int return FormatArrayNode.formatArray(memory, clazz, length, false, unaligned, fillContent, true); } - @Uninterruptible(reason = "Returns uninitialized memory, modifies TLAB", callerMustBe = true) - private static Pointer allocateRawMemoryInNewTlab(UnsignedWord size, AlignedHeader newTlabChunk) { - assert DeoptTester.enabled() || availableTlabMemory(getTlab()).belowThan(size) : "Slowpath allocation was used even though TLAB had sufficient space"; - - Descriptor tlab = retireCurrentAllocationChunk(CurrentIsolate.getCurrentThread()); - registerNewAllocationChunk(tlab, newTlabChunk); - - return allocateRawMemoryInTlab(size, tlab); - } - @Uninterruptible(reason = "Returns uninitialized memory, modifies TLAB", callerMustBe = true) private static Pointer allocateRawMemoryInTlab(UnsignedWord size, Descriptor tlab) { - assert size.belowOrEqual(availableTlabMemory(tlab)) : "Not enough TLAB space for allocation"; + assert fitsInTlab(tlab, size) : "Not enough TLAB space for allocation"; // The (uninterruptible) caller has ensured that we have a TLAB. Pointer top = KnownIntrinsics.nonNullPointer(tlab.getAllocationTop(TLAB_TOP_IDENTITY)); @@ -395,15 +458,13 @@ private static Pointer allocateRawMemoryInTlab(UnsignedWord size, Descriptor tla } @Uninterruptible(reason = "Accesses TLAB") - private static UnsignedWord availableTlabMemory(Descriptor allocator) { - Pointer top = allocator.getAllocationTop(TLAB_TOP_IDENTITY); - Pointer end = allocator.getAllocationEnd(TLAB_END_IDENTITY); + private static boolean fitsInTlab(Descriptor tlab, UnsignedWord size) { + Pointer top = tlab.getAllocationTop(TLAB_TOP_IDENTITY); + Pointer end = tlab.getAllocationEnd(TLAB_END_IDENTITY); assert top.belowOrEqual(end); - if (top.isNull() || end.isNull()) { - return Word.unsigned(0); - } - return end.subtract(top); + Pointer newTop = top.add(size); + return newTop.belowOrEqual(end); } @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) @@ -420,106 +481,22 @@ private static void guaranteeZeroed(Pointer memory, UnsignedWord size) { } } - static void disableAndFlushForAllThreads() { - VMOperation.guaranteeInProgressAtSafepoint("ThreadLocalAllocation.disableAndFlushForAllThreads"); - - for (IsolateThread vmThread = VMThreads.firstThread(); vmThread.isNonNull(); vmThread = VMThreads.nextThread(vmThread)) { - disableAndFlushForThread(vmThread); + private static void sampleSlowPathAllocation(Object obj, UnsignedWord allocatedSize, int arrayLength) { + if (HasJfrSupport.get()) { + SubstrateJVM.getOldObjectProfiler().sample(obj, allocatedSize, arrayLength); } } @Uninterruptible(reason = "Accesses TLAB") - static void disableAndFlushForThread(IsolateThread vmThread) { - retireTlabToEden(vmThread); - } - - @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) - static void tearDown() { - // no other thread is alive, so it is always safe to access the first thread - IsolateThread thread = VMThreads.firstThreadUnsafe(); - VMError.guarantee(VMThreads.nextThread(thread).isNull(), "Other isolate threads are still active"); - freeHeapChunks(getTlab(thread)); - } - - @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) - private static void freeHeapChunks(Descriptor tlab) { - HeapChunkProvider.freeAlignedChunkList(tlab.getAlignedChunk()); - HeapChunkProvider.freeUnalignedChunkList(tlab.getUnalignedChunk()); - } - - @Uninterruptible(reason = "Accesses TLAB") - static void suspendInCurrentThread() { - retireCurrentAllocationChunk(CurrentIsolate.getCurrentThread()); - } - - @Uninterruptible(reason = "Accesses TLAB") - private static void retireTlabToEden(IsolateThread thread) { - VMThreads.guaranteeOwnsThreadMutex("Otherwise, we wouldn't be allowed to access the space.", true); - - Descriptor tlab = retireCurrentAllocationChunk(thread); - AlignedHeader alignedChunk = tlab.getAlignedChunk(); - UnalignedHeader unalignedChunk = tlab.getUnalignedChunk(); - tlab.setAlignedChunk(Word.nullPointer()); - tlab.setUnalignedChunk(Word.nullPointer()); - - Space eden = HeapImpl.getHeapImpl().getYoungGeneration().getEden(); - while (alignedChunk.isNonNull()) { - AlignedHeader next = HeapChunk.getNext(alignedChunk); - HeapChunk.setNext(alignedChunk, Word.nullPointer()); - eden.appendAlignedHeapChunk(alignedChunk); - alignedChunk = next; - } - - while (unalignedChunk.isNonNull()) { - UnalignedHeader next = HeapChunk.getNext(unalignedChunk); - HeapChunk.setNext(unalignedChunk, Word.nullPointer()); - eden.appendUnalignedHeapChunk(unalignedChunk); - unalignedChunk = next; - } - } - - @Uninterruptible(reason = "Modifies TLAB") - private static void registerNewAllocationChunk(Descriptor tlab, AlignedHeader newChunk) { - assert tlab.getAllocationTop(TLAB_TOP_IDENTITY).isNull(); - assert tlab.getAllocationEnd(TLAB_END_IDENTITY).isNull(); - - HeapChunk.setNext(newChunk, tlab.getAlignedChunk()); - tlab.setAlignedChunk(newChunk); - HeapImpl.getAccounting().increaseEdenUsedBytes(HeapParameters.getAlignedHeapChunkSize()); - - tlab.setAllocationTop(HeapChunk.getTopPointer(newChunk), TLAB_TOP_IDENTITY); - tlab.setAllocationEnd(HeapChunk.getEndPointer(newChunk), TLAB_END_IDENTITY); - HeapChunk.setTopPointer(newChunk, Word.nullPointer()); - } - - @Uninterruptible(reason = "Modifies and returns TLAB", callerMustBe = true) - private static Descriptor retireCurrentAllocationChunk(IsolateThread thread) { - Descriptor tlab = getTlab(thread); - Pointer allocationTop = tlab.getAllocationTop(TLAB_TOP_IDENTITY); - if (allocationTop.isNonNull()) { - AlignedHeader alignedChunk = tlab.getAlignedChunk(); + private static UnsignedWord getTlabSize() { + Descriptor tlab = getTlab(); + UnsignedWord allocationEnd = tlab.getAllocationEnd(TLAB_END_IDENTITY); + UnsignedWord allocationStart = tlab.getAlignedAllocationStart(TLAB_START_IDENTITY); - assert HeapChunk.getTopPointer(alignedChunk).isNull(); - assert HeapChunk.getEndPointer(alignedChunk).equal(tlab.getAllocationEnd(TLAB_END_IDENTITY)); + assert allocationStart.belowThan(allocationEnd) || (allocationStart.equal(0) && allocationEnd.equal(0)); + UnsignedWord tlabSize = allocationEnd.subtract(allocationStart); - /* - * While the aligned chunk is the allocation chunk its top value is always 'null' and it - * doesn't reflect the upper limit of allocated memory. The 'top' is stored in the TLAB - * and only set in the top aligned chunk when it is retired. - */ - HeapChunk.setTopPointer(alignedChunk, allocationTop); - tlab.setAllocationTop(Word.nullPointer(), TLAB_TOP_IDENTITY); - tlab.setAllocationEnd(Word.nullPointer(), TLAB_END_IDENTITY); - - UnsignedWord usedTlabSize = HeapChunk.getTopPointer(alignedChunk).subtract(AlignedHeapChunk.getObjectsStart(alignedChunk)); - allocatedBytes.set(thread, allocatedBytes.get(thread).add(usedTlabSize)); - } - return tlab; - } - - private static void sampleSlowPathAllocation(Object obj, UnsignedWord allocatedSize, int arrayLength) { - if (HasJfrSupport.get()) { - SubstrateJVM.getOldObjectProfiler().sample(obj, allocatedSize, arrayLength); - } + assert UnsignedUtils.isAMultiple(tlabSize, Word.unsigned(ConfigurationValues.getObjectLayout().getAlignment())); + return tlabSize; } } diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/TlabSupport.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/TlabSupport.java new file mode 100644 index 000000000000..dfc79749e3bb --- /dev/null +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/TlabSupport.java @@ -0,0 +1,523 @@ +/* + * Copyright (c) 2025, 2025, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.svm.core.genscavenge; + +import static com.oracle.svm.core.Uninterruptible.CALLED_FROM_UNINTERRUPTIBLE_CODE; +import static com.oracle.svm.core.genscavenge.ThreadLocalAllocation.Descriptor; +import static com.oracle.svm.core.genscavenge.ThreadLocalAllocation.allocatedAlignedBytes; +import static com.oracle.svm.core.genscavenge.ThreadLocalAllocation.getTlab; +import static com.oracle.svm.core.graal.snippets.SubstrateAllocationSnippets.TLAB_END_IDENTITY; +import static com.oracle.svm.core.graal.snippets.SubstrateAllocationSnippets.TLAB_START_IDENTITY; +import static com.oracle.svm.core.graal.snippets.SubstrateAllocationSnippets.TLAB_TOP_IDENTITY; + +import org.graalvm.nativeimage.CurrentIsolate; +import org.graalvm.nativeimage.IsolateThread; +import org.graalvm.nativeimage.c.struct.SizeOf; +import org.graalvm.nativeimage.c.type.WordPointer; +import org.graalvm.word.Pointer; +import org.graalvm.word.UnsignedWord; + +import com.oracle.svm.core.SubstrateDiagnostics; +import com.oracle.svm.core.SubstrateGCOptions; +import com.oracle.svm.core.Uninterruptible; +import com.oracle.svm.core.config.ConfigurationValues; +import com.oracle.svm.core.config.ObjectLayout; +import com.oracle.svm.core.graal.snippets.SubstrateAllocationSnippets; +import com.oracle.svm.core.jdk.UninterruptibleUtils; +import com.oracle.svm.core.log.Log; +import com.oracle.svm.core.thread.VMOperation; +import com.oracle.svm.core.thread.VMThreads; +import com.oracle.svm.core.threadlocal.FastThreadLocalBytes; +import com.oracle.svm.core.threadlocal.FastThreadLocalFactory; +import com.oracle.svm.core.threadlocal.FastThreadLocalInt; +import com.oracle.svm.core.threadlocal.FastThreadLocalWord; +import com.oracle.svm.core.util.BasedOnJDKFile; +import com.oracle.svm.core.util.UnsignedUtils; +import com.oracle.svm.core.util.VMError; + +import jdk.graal.compiler.api.replacements.Fold; +import jdk.graal.compiler.word.Word; + +/** + * Provides methods for initializing, calculating the size and retiring TLABs used in + * {@link ThreadLocalAllocation}. Additionally, it provides the functionality to resize the TLAB + * after a GC, based on {@link #allocatedBytesAvg}, the average a thread allocated in TLABs. + * Therefore, different threads may have TLABs of different size. + *

+ * Below is an example for the dynamic resizing of the TLAB. One thread allocates lots of objects + * and another thread allocates nothing. Between GC 23 and GC 24 the allocation behaviour of these + * two threads switches. The allocation average and the TLAB size adapt to the new allocation + * behavior. + * + *

+ * +-----+---------------------------------------++---------------------------------------+
+ * | #GC | Thread 1                              || Thread 2                              |
+ * |     | alloc. bytes | alloc. avg | TLAB size || alloc. bytes | alloc. avg | TLAB size |
+ * +-----+--------------+------------+-----------++--------------+------------+-----------+
+ * |  22 |       3,54MB |     3,25MB |   66,66kB ||           0B |   402,35kB |    8,05kB |
+ * |  23 |       3,55MB |     3,36MB |   68,77kB ||           0B |   261,53kB |    5,23kB |
+ * +-----+--------------+------------+-----------++--------------+------------+-----------+ <-- allocation behaviour
+ * |  24 |       0,27MB |     2,28MB |   46,62kB ||       3,20MB |     1,29MB |   26,37kB |     switched
+ * |  25 |           0B |     1,48MB |   30,30kB ||       3,51MB |     2,06MB |   42,29kB |
+ * |  26 |           0B |   984,75kB |   19,70kB ||       3,52MB |     2,58MB |   52,75kB |
+ * |  27 |           0B |   640,09kB |   12,80kB ||       3,54MB |     2,91MB |   59,64kB |
+ * |  28 |           0B |   416,06kB |    8,32kB ||       3,54MB |     3,13MB |   64,16kB |
+ * |  29 |           0B |   270,44kB |    5,41kB ||       3,55MB |     3,28MB |   67,14kB |
+ * +-----+--------------+------------+-----------++--------------+------------+-----------+
+ * 
+ * + * A thread allocating a very large amount of memory will also have a high + * {@link #allocatedBytesAvg}. If such a thread later changes its allocation behaviour and only + * allocates a small amount of memory the {@link #allocatedBytesAvg} starts decreasing with the next + * GC. But the {@link #desiredSize} will only change after the {@link #allocatedBytesAvg} decreased + * enough, which may take a few GCs. + */ +public class TlabSupport { + + /* + * Constants for tuning the resizing of TLABs. These constants match certain option values in + * HotSpot. + */ + @BasedOnJDKFile("https://github.com/openjdk/jdk/blob/jdk-23-ga/src/hotspot/share/gc/shared/tlab_globals.hpp#L65-L67")// + private static final long TLAB_ALLOCATION_WEIGHT = 35L; + @BasedOnJDKFile("https://github.com/openjdk/jdk/blob/jdk-23-ga/src/hotspot/share/gc/shared/tlab_globals.hpp#L69-L76")// + private static final long TLAB_WASTE_TARGET_PERCENT = 1L; + @BasedOnJDKFile("https://github.com/openjdk/jdk/blob/jdk-23-ga/src/hotspot/share/gc/shared/tlab_globals.hpp#L78-L80")// + private static final long TLAB_REFILL_WASTE_FRACTION = 64L; + @BasedOnJDKFile("https://github.com/openjdk/jdk/blob/jdk-23-ga/src/hotspot/share/gc/shared/tlab_globals.hpp#L82-L85")// + private static final long TLAB_WASTE_INCREMENT = 4; + + // The desired size of the TLAB, including the reserve for filling the unused memory. + private static final FastThreadLocalWord desiredSize = FastThreadLocalFactory.createWord("TlabSupport.desiredSize"); + + private static final FastThreadLocalWord tlabAllocatedAlignedBytesBeforeLastGC = FastThreadLocalFactory.createWord("TlabSupport.tlabAllocatedAlignedBytesBeforeLastGC"); + + private static final FastThreadLocalInt numberOfRefills = FastThreadLocalFactory.createInt("TlabSupport.numberOfRefills"); + private static final FastThreadLocalInt refillWaste = FastThreadLocalFactory.createInt("TlabSupport.refillWaste"); + private static final FastThreadLocalInt gcWaste = FastThreadLocalFactory.createInt("TlabSupport.gcWaste"); + + // Average of allocated bytes in TLABs of this thread. + private static final FastThreadLocalBytes allocatedBytesAvg = FastThreadLocalFactory + .createBytes(() -> SizeOf.get(AdaptiveWeightedAverageStruct.Data.class), "TlabSupport.allocatedBytesAvg"); + + // Hold onto the TLAB if availableTlabMemory() is larger than this. + private static final FastThreadLocalWord refillWasteLimit = FastThreadLocalFactory.createWord("TlabSupport.refillWasteLimit"); + + private static final FastThreadLocalInt slowAllocations = FastThreadLocalFactory.createInt("TlabSupport.slowAllocations"); + + // Expected number of refills between GCs. + private static UnsignedWord targetRefills = Word.unsigned(1); + + private static boolean initialized; + + @BasedOnJDKFile("https://github.com/openjdk/jdk/blob/jdk-25+8/src/hotspot/share/gc/shared/threadLocalAllocBuffer.cpp#L226-L267") + @Uninterruptible(reason = "Accesses TLAB") + public static void startupInitialization() { + if (!initialized) { + TlabOptionCache.singleton().cacheOptionValues(); + + // Assuming each thread's active tlab is, on average, 1/2 full at a GC. + targetRefills = Word.unsigned(100 / (2 * TLAB_WASTE_TARGET_PERCENT)); + // The value has to be at least one as it is used in a division. + targetRefills = UnsignedUtils.max(targetRefills, Word.unsigned(1)); + + initialized = true; + } + } + + @BasedOnJDKFile("https://github.com/openjdk/jdk/blob/jdk-23-ga/src/hotspot/share/gc/shared/threadLocalAllocBuffer.cpp#L208-L225") + @Uninterruptible(reason = "Accesses TLAB") + public static void initialize(IsolateThread thread) { + initialize(getTlab(thread), Word.nullPointer(), Word.nullPointer(), Word.nullPointer()); + desiredSize.set(thread, initialDesiredSize()); + + AdaptiveWeightedAverageStruct.initialize(allocatedBytesAvg.getAddress(thread), TLAB_ALLOCATION_WEIGHT); + + refillWasteLimit.set(initialRefillWasteLimit()); + + resetStatistics(thread); + } + + @BasedOnJDKFile("https://github.com/openjdk/jdk/blob/jdk-23-ga/src/hotspot/share/gc/shared/threadLocalAllocBuffer.cpp#L183-L195") + @Uninterruptible(reason = "Accesses TLAB") + static void fillTlab(Pointer start, Pointer top, WordPointer newSize) { + numberOfRefills.set(numberOfRefills.get() + 1); + + Pointer hardEnd = start.add(newSize.read()); + Pointer end = hardEnd.subtract(getFillerObjectSize()); + + assert top.belowOrEqual(end) : "size too small"; + + initialize(getTlab(), start, top, end); + + // Reset amount of internal fragmentation + refillWasteLimit.set(initialRefillWasteLimit()); + } + + @BasedOnJDKFile("https://github.com/openjdk/jdk/blob/jdk-23-ga/src/hotspot/share/gc/shared/threadLocalAllocBuffer.cpp#L150-L153") + @Uninterruptible(reason = "Accesses TLAB") + static void retireTlabBeforeAllocation() { + long availableTlabMemory = availableTlabMemory(getTlab()).rawValue(); + refillWaste.set(refillWaste.get() + UninterruptibleUtils.NumUtil.safeToInt(availableTlabMemory)); + retireCurrentTlab(CurrentIsolate.getCurrentThread(), false); + } + + @BasedOnJDKFile("https://github.com/openjdk/jdk/blob/jdk-23-ga/src/hotspot/share/gc/shared/threadLocalAllocBuffer.cpp#L137-L148") + @Uninterruptible(reason = "Accesses TLAB") + private static void retireCurrentTlab(IsolateThread thread, boolean calculateStats) { + ThreadLocalAllocation.Descriptor tlab = getTlab(thread); + + if (tlab.getAllocationEnd(TLAB_END_IDENTITY).isNonNull()) { + assert checkInvariants(tlab); + + UnsignedWord usedTlabSize = getUsedTlabSize(tlab); + allocatedAlignedBytes.set(thread, allocatedAlignedBytes.get(thread).add(usedTlabSize)); + insertFiller(tlab); + initialize(tlab, Word.nullPointer(), Word.nullPointer(), Word.nullPointer()); + } + + /* + * Collect statistics after the TLAB has been retired. Otherwise, the current TLAB is + * excluded from the statistics. + */ + if (calculateStats) { + accumulateAndResetStatistics(thread); + } + } + + @Uninterruptible(reason = "Accesses TLAB") + private static UnsignedWord getUsedTlabSize(Descriptor tlab) { + UnsignedWord start = tlab.getAlignedAllocationStart(TLAB_START_IDENTITY); + UnsignedWord top = tlab.getAllocationTop(TLAB_TOP_IDENTITY); + + assert top.aboveOrEqual(start); + return top.subtract(start); + } + + @BasedOnJDKFile("https://github.com/openjdk/jdk/blob/jdk-23-ga/src/hotspot/share/gc/shared/threadLocalAllocBuffer.cpp#L197-L206") + @Uninterruptible(reason = "Accesses TLAB") + private static void initialize(ThreadLocalAllocation.Descriptor tlab, Pointer start, Pointer top, Pointer end) { + VMError.guarantee(top.belowOrEqual(end), "top greater end during initialization"); + + tlab.setAlignedAllocationStart(start, TLAB_START_IDENTITY); + tlab.setAllocationTop(top, TLAB_TOP_IDENTITY); + tlab.setAllocationEnd(end, TLAB_END_IDENTITY); + + assert checkInvariants(tlab); + } + + @BasedOnJDKFile("https://github.com/openjdk/jdk/blob/jdk-23-ga/src/hotspot/share/gc/shared/threadLocalAllocBuffer.hpp#L90") + @Uninterruptible(reason = "Accesses TLAB") + private static boolean checkInvariants(Descriptor tlab) { + return tlab.getAllocationTop(TLAB_TOP_IDENTITY).aboveOrEqual(tlab.getAlignedAllocationStart(TLAB_START_IDENTITY)) && + tlab.getAllocationTop(TLAB_TOP_IDENTITY).belowOrEqual(tlab.getAllocationEnd(TLAB_END_IDENTITY)); + } + + @Uninterruptible(reason = "Accesses TLAB") + static void suspendAllocationInCurrentThread() { + /* The statistics for this thread will be updated later. */ + retireCurrentTlab(CurrentIsolate.getCurrentThread(), false); + } + + @Uninterruptible(reason = CALLED_FROM_UNINTERRUPTIBLE_CODE, mayBeInlined = true) + static void tearDown() { + // no other thread is alive, so it is always safe to access the first thread + IsolateThread thread = VMThreads.firstThreadUnsafe(); + VMError.guarantee(VMThreads.nextThread(thread).isNull(), "Other isolate threads are still active"); + + // Aligned chunks are handled in HeapAllocation. + HeapChunkProvider.freeUnalignedChunkList(getTlab(thread).getUnalignedChunk()); + } + + static void disableAndFlushForAllThreads() { + VMOperation.guaranteeInProgressAtSafepoint("TlabSupport.disableAndFlushForAllThreads"); + + for (IsolateThread vmThread = VMThreads.firstThread(); vmThread.isNonNull(); vmThread = VMThreads.nextThread(vmThread)) { + disableAndFlushForThread(vmThread); + } + } + + @Uninterruptible(reason = "Accesses TLAB") + static void disableAndFlushForThread(IsolateThread vmThread) { + retireTlabToEden(vmThread); + } + + @Uninterruptible(reason = "Accesses TLAB") + private static void retireTlabToEden(IsolateThread thread) { + VMThreads.guaranteeOwnsThreadMutex("Otherwise, we wouldn't be allowed to access the space.", true); + + retireCurrentTlab(thread, true); + + Descriptor tlab = getTlab(thread); + UnalignedHeapChunk.UnalignedHeader unalignedChunk = tlab.getUnalignedChunk(); + tlab.setUnalignedChunk(Word.nullPointer()); + + Space eden = HeapImpl.getHeapImpl().getYoungGeneration().getEden(); + + while (unalignedChunk.isNonNull()) { + UnalignedHeapChunk.UnalignedHeader next = HeapChunk.getNext(unalignedChunk); + HeapChunk.setNext(unalignedChunk, Word.nullPointer()); + eden.appendUnalignedHeapChunk(unalignedChunk); + unalignedChunk = next; + } + } + + @Uninterruptible(reason = "Accesses TLAB") + static UnsignedWord availableTlabMemory(Descriptor tlab) { + Pointer top = tlab.getAllocationTop(TLAB_TOP_IDENTITY); + Pointer end = tlab.getAllocationEnd(TLAB_END_IDENTITY); + assert top.belowOrEqual(end); + + if (top.isNull() || end.isNull()) { + return Word.unsigned(0); + } + return end.subtract(top); + } + + /** + * If the minimum object size is greater than {@link ObjectLayout#getAlignment()}, we can end up + * with a shard at the end of the buffer that's smaller than the smallest object (see + * {@link com.oracle.svm.core.heap.FillerObject}). We can't allow that because the buffer must + * look like it's full of objects when we retire it, so we make sure we have enough space for a + * {@link com.oracle.svm.core.heap.FillerArray}) object. + */ + @BasedOnJDKFile("https://github.com/openjdk/jdk/blob/jdk-23-ga/src/hotspot/share/gc/shared/collectedHeap.cpp#L253-L259") + @Uninterruptible(reason = CALLED_FROM_UNINTERRUPTIBLE_CODE, mayBeInlined = true) + private static UnsignedWord getFillerObjectSize() { + UnsignedWord minSize = FillerObjectUtil.objectMinSize(); + return minSize.aboveThan(ConfigurationValues.getObjectLayout().getAlignment()) ? minSize : Word.zero(); + } + + @BasedOnJDKFile("https://github.com/openjdk/jdk/blob/jdk-23-ga/src/hotspot/share/gc/shared/threadLocalAllocBuffer.cpp#L119-L124") + @Uninterruptible(reason = "Accesses TLAB") + private static void insertFiller(ThreadLocalAllocation.Descriptor tlab) { + assert tlab.getAllocationTop(TLAB_TOP_IDENTITY).isNonNull() : "Must not be retired"; + assert tlab.getAllocationEnd(TLAB_END_IDENTITY).isNonNull() : "Must not be retired"; + + Pointer top = tlab.getAllocationTop(TLAB_TOP_IDENTITY); + UnsignedWord hardEnd = tlab.getAllocationEnd(TLAB_END_IDENTITY).add(getFillerObjectSize()); + UnsignedWord size = hardEnd.subtract(top); + + if (top.belowThan(hardEnd)) { + FillerObjectUtil.writeFillerObjectAt(top, size); + } + + } + + @BasedOnJDKFile("https://github.com/openjdk/jdk/blob/jdk-23-ga/src/hotspot/share/gc/shared/threadLocalAllocBuffer.cpp#L175-L181") + @Uninterruptible(reason = CALLED_FROM_UNINTERRUPTIBLE_CODE, mayBeInlined = true) + private static void resetStatistics(IsolateThread thread) { + numberOfRefills.set(thread, 0); + refillWaste.set(thread, 0); + gcWaste.set(thread, 0); + slowAllocations.set(thread, 0); + } + + @BasedOnJDKFile("https://github.com/openjdk/jdk/blob/jdk-23-ga/src/hotspot/share/gc/shared/threadLocalAllocBuffer.cpp#L270-L289") + @Uninterruptible(reason = CALLED_FROM_UNINTERRUPTIBLE_CODE, mayBeInlined = true) + private static UnsignedWord initialDesiredSize() { + UnsignedWord initSize; + + if (TlabOptionCache.singleton().getTlabSize() > 0) { + long tlabSize = TlabOptionCache.singleton().getTlabSize(); + initSize = Word.unsigned(ConfigurationValues.getObjectLayout().alignUp(tlabSize)); + } else { + long initialTLABSize = TlabOptionCache.singleton().getInitialTLABSize(); + initSize = Word.unsigned(ConfigurationValues.getObjectLayout().alignUp(initialTLABSize)); + } + long minTlabSize = TlabOptionCache.singleton().getMinTlabSize(); + return UnsignedUtils.clamp(initSize, Word.unsigned(minTlabSize), maxSize()); + } + + /** + * Compute the next tlab size using expected allocation amount. + */ + @BasedOnJDKFile("https://github.com/openjdk/jdk/blob/jdk-25+11/src/hotspot/share/gc/shared/threadLocalAllocBuffer.cpp#L154-L172") + public static void resize(IsolateThread thread) { + assert SubstrateGCOptions.TlabOptions.ResizeTLAB.getValue(); + assert VMOperation.isGCInProgress(); + + UnsignedWord allocatedAvg = Word.unsigned((long) AdaptiveWeightedAverageStruct.getAverage(allocatedBytesAvg.getAddress(thread))); + UnsignedWord newSize = allocatedAvg.unsignedDivide(targetRefills); + + long minTlabSize = TlabOptionCache.singleton().getMinTlabSize(); + newSize = UnsignedUtils.clamp(newSize, Word.unsigned(minTlabSize), maxSize()); + UnsignedWord alignedNewSize = Word.unsigned(ConfigurationValues.getObjectLayout().alignUp(newSize.rawValue())); + + if (SerialAndEpsilonGCOptions.PrintTLAB.getValue()) { + Log.log().string("TLAB new size: thread ").zhex(thread) + .string(" target refills: ").unsigned(targetRefills) + .string(" alloc avg.: ").unsigned(allocatedAvg) + .string(" desired size: ").hex(desiredSize.get(thread)) + .string(" -> ").hex(alignedNewSize).newline(); + } + + desiredSize.set(thread, alignedNewSize); + } + + @BasedOnJDKFile("https://github.com/openjdk/jdk/blob/jdk-23-ga/src/hotspot/share/gc/shared/threadLocalAllocBuffer.cpp#L64") + @Uninterruptible(reason = CALLED_FROM_UNINTERRUPTIBLE_CODE, mayBeInlined = true) + private static UnsignedWord initialRefillWasteLimit() { + return desiredSize.get().unsignedDivide(Word.unsigned(TLAB_REFILL_WASTE_FRACTION)); + } + + @BasedOnJDKFile("https://github.com/openjdk/jdk/blob/jdk-25+8/src/hotspot/share/gc/shared/threadLocalAllocBuffer.inline.hpp#L54-L71") + @Uninterruptible(reason = CALLED_FROM_UNINTERRUPTIBLE_CODE, mayBeInlined = true) + static UnsignedWord computeSizeOfNewTlab(UnsignedWord allocationSize) { + assert UnsignedUtils.isAMultiple(allocationSize, Word.unsigned(ConfigurationValues.getObjectLayout().getAlignment())); + + /* + * Compute the size for the new TLAB. The "last" TLAB may be smaller to reduce + * fragmentation. unsafeMaxTlabAlloc is just a hint. + */ + UnsignedWord availableSize = YoungGeneration.getHeapAllocation().unsafeMaxTlabAllocSize(); + UnsignedWord newTlabSize = UnsignedUtils.min(UnsignedUtils.min(availableSize, desiredSize.get().add(allocationSize)), maxSize()); + + if (newTlabSize.belowThan(computeMinSizeOfNewTlab(allocationSize))) { + // If there isn't enough room for the allocation, return failure. + return Word.zero(); + } + return newTlabSize; + } + + @BasedOnJDKFile("https://github.com/openjdk/jdk/blob/jdk-23-ga/src/hotspot/share/gc/shared/threadLocalAllocBuffer.inline.hpp#L73-L77") + @Uninterruptible(reason = CALLED_FROM_UNINTERRUPTIBLE_CODE, mayBeInlined = true) + static UnsignedWord computeMinSizeOfNewTlab(UnsignedWord allocationSize) { + UnsignedWord alignedSize = Word.unsigned(ConfigurationValues.getObjectLayout().alignUp(allocationSize.rawValue())); + UnsignedWord sizeWithReserve = alignedSize.add(getFillerObjectSize()); + long minTlabSize = TlabOptionCache.singleton().getMinTlabSize(); + + return UnsignedUtils.max(sizeWithReserve, Word.unsigned(minTlabSize)); + } + + @Uninterruptible(reason = CALLED_FROM_UNINTERRUPTIBLE_CODE, mayBeInlined = true) + static boolean shouldRetainTlab(Descriptor tlab) { + return availableTlabMemory(tlab).aboveThan(refillWasteLimit.get()); + } + + @BasedOnJDKFile("https://github.com/openjdk/jdk/blob/jdk-25+11/src/hotspot/share/gc/shared/threadLocalAllocBuffer.inline.hpp#L79-L94") + @Uninterruptible(reason = CALLED_FROM_UNINTERRUPTIBLE_CODE, mayBeInlined = true) + static void recordSlowAllocation() { + /* + * Raise size required to bypass TLAB next time. Else there's a risk that a thread that + * repeatedly allocates objects of one size will get stuck on this slow path. + */ + refillWasteLimit.set(refillWasteLimit.get().add(Word.unsigned(TLAB_WASTE_INCREMENT))); + slowAllocations.set(slowAllocations.get() + 1); + } + + @Fold + static UnsignedWord maxSize() { + return AlignedHeapChunk.getUsableSizeForObjects(); + } + + @BasedOnJDKFile("https://github.com/openjdk/jdk/blob/jdk-23-ga/src/hotspot/share/gc/shared/threadLocalAllocBuffer.cpp#L76-L117") + @Uninterruptible(reason = CALLED_FROM_UNINTERRUPTIBLE_CODE, mayBeInlined = true) + private static void accumulateAndResetStatistics(IsolateThread thread) { + gcWaste.set(thread, gcWaste.get() + UninterruptibleUtils.NumUtil.safeToInt(availableTlabMemory(getTlab(thread)).rawValue())); + UnsignedWord totalAlignedAllocated = ThreadLocalAllocation.getAlignedAllocatedBytes(thread); + UnsignedWord allocatedAlignedSinceLastGC = totalAlignedAllocated.subtract(tlabAllocatedAlignedBytesBeforeLastGC.get(thread)); + tlabAllocatedAlignedBytesBeforeLastGC.set(thread, totalAlignedAllocated); + + AdaptiveWeightedAverageStruct.sample(allocatedBytesAvg.getAddress(thread), allocatedAlignedSinceLastGC.rawValue()); + + printStats(thread, allocatedAlignedSinceLastGC); + + resetStatistics(thread); + } + + @Uninterruptible(reason = "Bridge between uninterruptible and interruptible code", calleeMustBe = false) + private static void printStats(IsolateThread thread, UnsignedWord allocatedBytesSinceLastGC) { + if (!SerialAndEpsilonGCOptions.PrintTLAB.getValue() || !VMOperation.isGCInProgress()) { + return; + } + + long waste = gcWaste.get(thread) + refillWaste.get(thread); + Log.log().string("TLAB: thread: ").zhex(thread) + .string(" slow allocs: ").unsigned(slowAllocations.get(thread)) + .string(" refills: ").unsigned(numberOfRefills.get(thread)) + .string(" alloc bytes: ").unsigned(allocatedBytesSinceLastGC) + .string(" alloc avg.: ").unsigned((long) allocatedBytesAvg.getAddress(thread).getAverage()) + .string(" waste bytes: ").zhex(waste) + .string(" GC waste: ").unsigned(gcWaste.get(thread)) + .string(" refill waste: ").unsigned(refillWaste.get(thread)).newline(); + } + + static void logTlabChunks(Log log, IsolateThread thread, String shortSpaceName) { + ThreadLocalAllocation.Descriptor tlab = getTlabUnsafe(thread); + + // Aligned chunks are handled in HeapAllocation. + + UnalignedHeapChunk.UnalignedHeader uChunk = tlab.getUnalignedChunk(); + HeapChunkLogging.logChunks(log, uChunk, shortSpaceName, false); + } + + static boolean printTlabInfo(Log log, Pointer ptr) { + for (IsolateThread thread = VMThreads.firstThreadUnsafe(); thread.isNonNull(); thread = VMThreads.nextThread(thread)) { + if (printTlabInfo(log, ptr, thread)) { + return true; + } + } + return false; + } + + static boolean printTlabInfo(Log log, Pointer ptr, IsolateThread thread) { + ThreadLocalAllocation.Descriptor tlab = getTlabUnsafe(thread); + Pointer start = tlab.getAlignedAllocationStart(SubstrateAllocationSnippets.TLAB_START_IDENTITY); + Pointer end = tlab.getAllocationEnd(SubstrateAllocationSnippets.TLAB_END_IDENTITY); + if (start.belowOrEqual(ptr) && ptr.belowOrEqual(end)) { + /* top may be null for a thread's current TLAB. */ + Pointer top = tlab.getAllocationTop(SubstrateAllocationSnippets.TLAB_TOP_IDENTITY); + boolean unusablePart = top.isNonNull() && ptr.aboveOrEqual(top); + printTlabMemoryInfo(log, thread, start, "aligned TLAB", unusablePart); + return true; + } + + UnalignedHeapChunk.UnalignedHeader uChunk = tlab.getUnalignedChunk(); + while (uChunk.isNonNull()) { + if (HeapChunk.asPointer(uChunk).belowOrEqual(ptr) && ptr.belowThan(HeapChunk.getEndPointer(uChunk))) { + boolean unusablePart = ptr.aboveOrEqual(HeapChunk.getTopPointer(uChunk)); + printTlabMemoryInfo(log, thread, HeapChunk.asPointer(uChunk), "unaligned chunk", unusablePart); + return true; + } + uChunk = HeapChunk.getNext(uChunk); + } + + return false; + } + + private static void printTlabMemoryInfo(Log log, IsolateThread thread, Pointer start, String memoryType, boolean unusablePart) { + String unusable = unusablePart ? "unusable part of " : ""; + log.string("points into ").string(unusable).string(memoryType).spaces(1).zhex(start).spaces(1); + log.string("(TLAB of thread ").zhex(thread).string(")"); + } + + @Uninterruptible(reason = "This whole method is unsafe, so it is only uninterruptible to satisfy the checks.") + private static Descriptor getTlabUnsafe(IsolateThread thread) { + assert SubstrateDiagnostics.isFatalErrorHandlingThread() : "can cause crashes, so it may only be used while printing diagnostics"; + return ThreadLocalAllocation.getTlab(thread); + } + +} diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/YoungGeneration.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/YoungGeneration.java index 66b1cfed1a6f..0a1a47726a67 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/YoungGeneration.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/YoungGeneration.java @@ -82,7 +82,7 @@ public int getMaxSurvivorSpaces() { @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) void tearDown() { heapAllocation.tearDown(); - ThreadLocalAllocation.tearDown(); + TlabSupport.tearDown(); eden.tearDown(); for (int i = 0; i < maxSurvivorSpaces; i++) { survivorFromSpaces[i].tearDown(); @@ -113,7 +113,7 @@ public void logChunks(Log log, boolean allowUnsafe) { heapAllocation.logChunks(log, eden.getShortName()); for (IsolateThread thread = VMThreads.firstThreadUnsafe(); thread.isNonNull(); thread = VMThreads.nextThread(thread)) { - logTlabChunks(log, thread); + TlabSupport.logTlabChunks(log, thread, eden.getShortName()); } } @@ -124,15 +124,6 @@ public void logChunks(Log log, boolean allowUnsafe) { } } - private void logTlabChunks(Log log, IsolateThread thread) { - ThreadLocalAllocation.Descriptor tlab = HeapImpl.getTlabUnsafe(thread); - AlignedHeapChunk.AlignedHeader aChunk = tlab.getAlignedChunk(); - HeapChunkLogging.logChunks(log, aChunk, eden.getShortName(), false); - - UnalignedHeapChunk.UnalignedHeader uChunk = tlab.getUnalignedChunk(); - HeapChunkLogging.logChunks(log, uChunk, eden.getShortName(), false); - } - @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) Space getEden() { return eden; @@ -387,7 +378,7 @@ public boolean printLocationInfo(Log log, Pointer ptr) { } void makeParseable() { - ThreadLocalAllocation.disableAndFlushForAllThreads(); + TlabSupport.disableAndFlushForAllThreads(); heapAllocation.retireChunksToEden(); } diff --git a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/graal/snippets/SubstrateAllocationSnippets.java b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/graal/snippets/SubstrateAllocationSnippets.java index 01b5981adae9..77e0f4915152 100644 --- a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/graal/snippets/SubstrateAllocationSnippets.java +++ b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/graal/snippets/SubstrateAllocationSnippets.java @@ -118,11 +118,12 @@ import jdk.vm.ci.meta.ResolvedJavaType; public class SubstrateAllocationSnippets extends AllocationSnippets { + public static final LocationIdentity TLAB_START_IDENTITY = NamedLocationIdentity.mutable("TLAB.start"); public static final LocationIdentity TLAB_TOP_IDENTITY = NamedLocationIdentity.mutable("TLAB.top"); public static final LocationIdentity TLAB_END_IDENTITY = NamedLocationIdentity.mutable("TLAB.end"); - public static final Object[] ALLOCATION_LOCATIONS = new Object[]{TLAB_TOP_IDENTITY, TLAB_END_IDENTITY, IdentityHashCodeSupport.IDENTITY_HASHCODE_SALT_LOCATION, + public static final Object[] ALLOCATION_LOCATIONS = new Object[]{TLAB_START_IDENTITY, TLAB_TOP_IDENTITY, TLAB_END_IDENTITY, IdentityHashCodeSupport.IDENTITY_HASHCODE_SALT_LOCATION, AllocationCounter.COUNT_FIELD, AllocationCounter.SIZE_FIELD}; - public static final LocationIdentity[] GC_LOCATIONS = new LocationIdentity[]{TLAB_TOP_IDENTITY, TLAB_END_IDENTITY, IdentityHashCodeSupport.IDENTITY_HASHCODE_SALT_LOCATION}; + public static final LocationIdentity[] GC_LOCATIONS = new LocationIdentity[]{TLAB_START_IDENTITY, TLAB_TOP_IDENTITY, TLAB_END_IDENTITY, IdentityHashCodeSupport.IDENTITY_HASHCODE_SALT_LOCATION}; private static final SubstrateForeignCallDescriptor NEW_MULTI_ARRAY = SnippetRuntime.findForeignCall(SubstrateAllocationSnippets.class, "newMultiArrayStub", NO_SIDE_EFFECT); private static final SubstrateForeignCallDescriptor SLOW_PATH_HUB_OR_UNSAFE_INSTANTIATE_ERROR = SnippetRuntime.findForeignCall(SubstrateAllocationSnippets.class, diff --git a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/jdk/UninterruptibleUtils.java b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/jdk/UninterruptibleUtils.java index ba84f084c31d..7fecb50887ca 100644 --- a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/jdk/UninterruptibleUtils.java +++ b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/jdk/UninterruptibleUtils.java @@ -474,6 +474,24 @@ public static long ceilToLong(double a) { } } + public static class NumUtil { + + /** + * Determines if a given {@code long} value is the range of signed int values. + */ + @Uninterruptible(reason = CALLED_FROM_UNINTERRUPTIBLE_CODE, mayBeInlined = true) + public static boolean isInt(long l) { + return (int) l == l; + } + + @Uninterruptible(reason = CALLED_FROM_UNINTERRUPTIBLE_CODE, mayBeInlined = true) + public static int safeToInt(long v) { + assert isInt(v); + return (int) v; + } + + } + public static class Byte { @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) @SuppressWarnings("cast") From ef188762bd84c59229aab8917d143399da8dd307 Mon Sep 17 00:00:00 2001 From: Thomas Schrott Date: Wed, 15 Jan 2025 14:00:04 +0100 Subject: [PATCH 9/9] Add JFR event for object allocation outside a TLAB --- .../AddressRangeCommittedMemoryProvider.java | 1 + .../core/genscavenge/HeapChunkProvider.java | 4 + .../genscavenge/ThreadLocalAllocation.java | 63 +++++---- .../core/genscavenge/UnalignedHeapChunk.java | 4 + .../graal/GenScavengeAllocationSupport.java | 4 +- .../remset/CardTableBasedRememberedSet.java | 2 + .../genscavenge/remset/NoRememberedSet.java | 2 + .../genscavenge/remset/RememberedSet.java | 2 + .../remset/UnalignedChunkRememberedSet.java | 3 + .../src/com/oracle/svm/core/jfr/JfrEvent.java | 1 + .../core/jfr/events/JfrAllocationEvents.java | 30 ++++- .../os/ChunkBasedCommittedMemoryProvider.java | 4 + .../TestObjectAllocationInNewTLABEvent.java | 11 +- .../TestObjectAllocationOutsideTLABEvent.java | 123 ++++++++++++++++++ .../jfr/TestObjectAllocationSampleEvent.java | 4 +- 15 files changed, 224 insertions(+), 34 deletions(-) create mode 100644 substratevm/src/com.oracle.svm.test/src/com/oracle/svm/test/jfr/TestObjectAllocationOutsideTLABEvent.java diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/AddressRangeCommittedMemoryProvider.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/AddressRangeCommittedMemoryProvider.java index 0a8002f1b3a6..ed7c7c4b9c47 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/AddressRangeCommittedMemoryProvider.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/AddressRangeCommittedMemoryProvider.java @@ -351,6 +351,7 @@ protected OutOfMemoryError reportAlignedChunkAllocationFailed(int error) { } @Override + @Uninterruptible(reason = CALLED_FROM_UNINTERRUPTIBLE_CODE, mayBeInlined = true) public Pointer allocateUnalignedChunk(UnsignedWord nbytes) { WordPointer allocOut = UnsafeStackValue.get(WordPointer.class); int error = allocateInHeapAddressSpace(nbytes, getAlignmentForUnalignedChunks(), allocOut); diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/HeapChunkProvider.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/HeapChunkProvider.java index 8d28931ae8f9..3b78479b2c8d 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/HeapChunkProvider.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/HeapChunkProvider.java @@ -24,6 +24,8 @@ */ package com.oracle.svm.core.genscavenge; +import static com.oracle.svm.core.Uninterruptible.CALLED_FROM_UNINTERRUPTIBLE_CODE; + import org.graalvm.nativeimage.Platform; import org.graalvm.nativeimage.Platforms; import org.graalvm.word.Pointer; @@ -229,6 +231,7 @@ private void freeUnusedAlignedChunksAtSafepoint(UnsignedWord count) { /** Acquire an UnalignedHeapChunk from the operating system. */ @SuppressWarnings("static-method") + @Uninterruptible(reason = CALLED_FROM_UNINTERRUPTIBLE_CODE, mayBeInlined = true) UnalignedHeader produceUnalignedChunk(UnsignedWord objectSize) { UnsignedWord chunkSize = UnalignedHeapChunk.getChunkSizeForObject(objectSize); @@ -243,6 +246,7 @@ UnalignedHeader produceUnalignedChunk(UnsignedWord objectSize) { return result; } + @Uninterruptible(reason = CALLED_FROM_UNINTERRUPTIBLE_CODE, mayBeInlined = true) public static boolean areUnalignedChunksZeroed() { return ChunkBasedCommittedMemoryProvider.get().areUnalignedChunksZeroed(); } diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/ThreadLocalAllocation.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/ThreadLocalAllocation.java index c05194fd24b6..5d6e2d96ed05 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/ThreadLocalAllocation.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/ThreadLocalAllocation.java @@ -52,6 +52,7 @@ import com.oracle.svm.core.SubstrateGCOptions; import com.oracle.svm.core.Uninterruptible; +import com.oracle.svm.core.c.BooleanPointer; import com.oracle.svm.core.config.ConfigurationValues; import com.oracle.svm.core.genscavenge.UnalignedHeapChunk.UnalignedHeader; import com.oracle.svm.core.genscavenge.graal.GenScavengeAllocationSupport; @@ -59,7 +60,6 @@ import com.oracle.svm.core.genscavenge.graal.nodes.FormatObjectNode; import com.oracle.svm.core.genscavenge.graal.nodes.FormatPodNode; import com.oracle.svm.core.genscavenge.graal.nodes.FormatStoredContinuationNode; -import com.oracle.svm.core.graal.snippets.DeoptTester; import com.oracle.svm.core.heap.OutOfMemoryUtil; import com.oracle.svm.core.heap.Pod; import com.oracle.svm.core.heap.RestrictHeapAccess; @@ -236,16 +236,23 @@ public static Object slowPathNewInstance(Word objectHeader) { @RestrictHeapAccess(access = RestrictHeapAccess.Access.NO_ALLOCATION, reason = "Must not allocate in the implementation of allocation.") private static Object slowPathNewInstanceWithoutAllocating(DynamicHub hub, UnsignedWord size) { - DeoptTester.disableDeoptTesting(); + HeapImpl.exitIfAllocationDisallowed("ThreadLocalAllocation.slowPathNewInstanceWithoutAllocating", DynamicHub.toClass(hub).getName()); + GCImpl.getGCImpl().maybeCollectOnAllocation(size); + + return slowPathNewInstanceWithoutAllocation0(hub, size); + } + + @Uninterruptible(reason = "Possible use of StackValue in virtual thread.") + private static Object slowPathNewInstanceWithoutAllocation0(DynamicHub hub, UnsignedWord size) { long startTicks = JfrTicks.elapsedTicks(); - try { - HeapImpl.exitIfAllocationDisallowed("ThreadLocalAllocation.slowPathNewInstanceWithoutAllocating", DynamicHub.toClass(hub).getName()); - GCImpl.getGCImpl().maybeCollectOnAllocation(size); - return allocateInstanceSlow(hub, size); + BooleanPointer allocatedOutsideTlab = StackValue.get(BooleanPointer.class); + allocatedOutsideTlab.write(false); + + try { + return allocateInstanceSlow(hub, size, allocatedOutsideTlab); } finally { - JfrAllocationEvents.emit(startTicks, hub, size, getTlabSize()); - DeoptTester.enableDeoptTesting(); + JfrAllocationEvents.emit(startTicks, hub, size, getTlabSize(), allocatedOutsideTlab.read()); } } @@ -267,7 +274,7 @@ public static Object slowPathNewArrayLikeObject(Word objectHeader, int length, b throw OutOfMemoryUtil.reportOutOfMemoryError(outOfMemoryError); } - Object result = slowPathNewArrayLikeObject0(hub, length, size, podReferenceMap); + Object result = slowPathNewArrayLikeObjectWithoutAllocating(hub, length, size, podReferenceMap); runSlowPathHooks(); sampleSlowPathAllocation(result, size, length); @@ -276,14 +283,22 @@ public static Object slowPathNewArrayLikeObject(Word objectHeader, int length, b } @RestrictHeapAccess(access = RestrictHeapAccess.Access.NO_ALLOCATION, reason = "Must not allocate in the implementation of allocation.") - private static Object slowPathNewArrayLikeObject0(DynamicHub hub, int length, UnsignedWord size, byte[] podReferenceMap) { - DeoptTester.disableDeoptTesting(); + private static Object slowPathNewArrayLikeObjectWithoutAllocating(DynamicHub hub, int length, UnsignedWord size, byte[] podReferenceMap) { + HeapImpl.exitIfAllocationDisallowed("ThreadLocalAllocation.slowPathNewArrayLikeObjectWithoutAllocating", DynamicHub.toClass(hub).getName()); + GCImpl.getGCImpl().maybeCollectOnAllocation(size); + + return slowPathNewArrayLikeObjectWithoutAllocation0(hub, length, size, podReferenceMap); + } + + @Uninterruptible(reason = "Possible use of StackValue in virtual thread.") + private static Object slowPathNewArrayLikeObjectWithoutAllocation0(DynamicHub hub, int length, UnsignedWord size, byte[] podReferenceMap) { long startTicks = JfrTicks.elapsedTicks(); UnsignedWord tlabSize = Word.zero(); - try { - HeapImpl.exitIfAllocationDisallowed("ThreadLocalAllocation.slowPathNewArrayOrPodWithoutAllocating", DynamicHub.toClass(hub).getName()); - GCImpl.getGCImpl().maybeCollectOnAllocation(size); + BooleanPointer allocatedOutsideTlab = StackValue.get(BooleanPointer.class); + allocatedOutsideTlab.write(false); + + try { if (!GenScavengeAllocationSupport.arrayAllocatedInAlignedChunk(size)) { /* * Large arrays go into their own unaligned chunk. Only arrays and stored @@ -305,13 +320,12 @@ private static Object slowPathNewArrayLikeObject0(DynamicHub hub, int length, Un */ Object array = allocateSmallArrayLikeObjectInCurrentTlab(hub, length, size, podReferenceMap); if (array == null) { - array = allocateArraySlow(hub, length, size, podReferenceMap); + array = allocateArraySlow(hub, length, size, podReferenceMap, allocatedOutsideTlab); } tlabSize = getTlabSize(); return array; } finally { - JfrAllocationEvents.emit(startTicks, hub, size, tlabSize); - DeoptTester.enableDeoptTesting(); + JfrAllocationEvents.emit(startTicks, hub, size, tlabSize, allocatedOutsideTlab.read()); } } @@ -326,9 +340,9 @@ private static Object allocateInstanceInCurrentTlab(DynamicHub hub, UnsignedWord } @Uninterruptible(reason = "Holds uninitialized memory.") - private static Object allocateInstanceSlow(DynamicHub hub, UnsignedWord size) { + private static Object allocateInstanceSlow(DynamicHub hub, UnsignedWord size, BooleanPointer allocatedOutsideTlab) { assert size.equal(LayoutEncoding.getPureInstanceAllocationSize(hub.getLayoutEncoding())); - Pointer memory = allocateRawMemory(size); + Pointer memory = allocateRawMemory(size, allocatedOutsideTlab); return FormatObjectNode.formatObject(memory, DynamicHub.toClass(hub), false, FillContent.WITH_ZEROES, true); } @@ -342,19 +356,19 @@ private static Object allocateSmallArrayLikeObjectInCurrentTlab(DynamicHub hub, } @Uninterruptible(reason = "Holds uninitialized memory.") - private static Object allocateArraySlow(DynamicHub hub, int length, UnsignedWord size, byte[] podReferenceMap) { - Pointer memory = allocateRawMemory(size); + private static Object allocateArraySlow(DynamicHub hub, int length, UnsignedWord size, byte[] podReferenceMap, BooleanPointer allocatedOutsideTlab) { + Pointer memory = allocateRawMemory(size, allocatedOutsideTlab); return formatArrayLikeObject(memory, hub, length, false, FillContent.WITH_ZEROES, podReferenceMap); } @BasedOnJDKFile("https://github.com/openjdk/jdk/blob/jdk-23-ga/src/hotspot/share/gc/shared/memAllocator.cpp#L333-L341") @Uninterruptible(reason = "Holds uninitialized memory.") - private static Pointer allocateRawMemory(UnsignedWord size) { + private static Pointer allocateRawMemory(UnsignedWord size, BooleanPointer allocatedOutsideTlab) { Pointer memory = allocateRawMemoryInTlabSlow(size); if (memory.isNonNull()) { return memory; } - return allocateRawMemoryOutsideTlab(size); + return allocateRawMemoryOutsideTlab(size, allocatedOutsideTlab); } @BasedOnJDKFile("https://github.com/openjdk/jdk/blob/jdk-25+8/src/hotspot/share/gc/shared/memAllocator.cpp#L256-L318") @@ -404,7 +418,8 @@ private static Pointer allocateRawMemoryInTlabSlow(UnsignedWord size) { @BasedOnJDKFile("https://github.com/openjdk/jdk/blob/jdk-23-ga/src/hotspot/share/gc/shared/memAllocator.cpp#L240-L251") @Uninterruptible(reason = "Holds uninitialized memory.") - private static Pointer allocateRawMemoryOutsideTlab(UnsignedWord size) { + private static Pointer allocateRawMemoryOutsideTlab(UnsignedWord size, BooleanPointer allocatedOutsideTlab) { + allocatedOutsideTlab.write(true); Pointer memory = YoungGeneration.getHeapAllocation().allocateOutsideTlab(size); allocatedAlignedBytes.set(allocatedAlignedBytes.get().add(size)); return memory; diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/UnalignedHeapChunk.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/UnalignedHeapChunk.java index 58738b2f3dd5..cfc23a4594fb 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/UnalignedHeapChunk.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/UnalignedHeapChunk.java @@ -100,6 +100,7 @@ public static void initialize(HostedByteBufferPointer chunk, UnsignedWord object RememberedSet.get().setObjectStartOffsetOfUnalignedChunk(chunk, objectStartOffset); } + @Uninterruptible(reason = CALLED_FROM_UNINTERRUPTIBLE_CODE, mayBeInlined = true) public static void initialize(UnalignedHeader chunk, UnsignedWord chunkSize, UnsignedWord objectSize) { assert chunk.isNonNull(); UnsignedWord objectStartOffset = calculateObjectStartOffset(objectSize); @@ -116,6 +117,7 @@ public static Pointer getObjectEnd(UnalignedHeader that) { return HeapChunk.getEndPointer(that); } + @Uninterruptible(reason = CALLED_FROM_UNINTERRUPTIBLE_CODE, mayBeInlined = true) static UnsignedWord getChunkSizeForObject(UnsignedWord objectSize) { UnsignedWord objectStart = RememberedSet.get().getHeaderSizeOfUnalignedChunk(objectSize); UnsignedWord alignment = Word.unsigned(ConfigurationValues.getObjectLayout().getAlignment()); @@ -155,10 +157,12 @@ public static void initializeObjectStartOffset(UnalignedHeader that, UnsignedWor setObjectStartOffset(that, objectStartOffset); } + @Uninterruptible(reason = CALLED_FROM_UNINTERRUPTIBLE_CODE, mayBeInlined = true) public static UnsignedWord calculateObjectStartOffset(UnsignedWord objectSize) { return RememberedSet.get().getHeaderSizeOfUnalignedChunk(objectSize); } + @Uninterruptible(reason = CALLED_FROM_UNINTERRUPTIBLE_CODE, mayBeInlined = true) public static void setObjectStartOffset(UnalignedHeader that, UnsignedWord objectStartOffset) { RememberedSet.get().setObjectStartOffsetOfUnalignedChunk(that, objectStartOffset); } diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/graal/GenScavengeAllocationSupport.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/graal/GenScavengeAllocationSupport.java index bbd305f6e723..e28c8f812c72 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/graal/GenScavengeAllocationSupport.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/graal/GenScavengeAllocationSupport.java @@ -24,13 +24,14 @@ */ package com.oracle.svm.core.genscavenge.graal; +import static com.oracle.svm.core.Uninterruptible.CALLED_FROM_UNINTERRUPTIBLE_CODE; import static jdk.graal.compiler.core.common.spi.ForeignCallDescriptor.CallSideEffect.NO_SIDE_EFFECT; import org.graalvm.word.UnsignedWord; +import com.oracle.svm.core.SubstrateGCOptions; import com.oracle.svm.core.Uninterruptible; import com.oracle.svm.core.genscavenge.HeapImpl; -import com.oracle.svm.core.SubstrateGCOptions; import com.oracle.svm.core.genscavenge.HeapParameters; import com.oracle.svm.core.genscavenge.ThreadLocalAllocation; import com.oracle.svm.core.graal.meta.SubstrateForeignCallsProvider; @@ -123,6 +124,7 @@ public int tlabEndOffset() { return ThreadLocalAllocation.Descriptor.offsetOfAllocationEnd(); } + @Uninterruptible(reason = CALLED_FROM_UNINTERRUPTIBLE_CODE, mayBeInlined = true) public static boolean arrayAllocatedInAlignedChunk(UnsignedWord objectSize) { return objectSize.belowThan(HeapParameters.getLargeArrayThreshold()); } diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/remset/CardTableBasedRememberedSet.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/remset/CardTableBasedRememberedSet.java index 7f85fc4f4009..82f3f81639b6 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/remset/CardTableBasedRememberedSet.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/remset/CardTableBasedRememberedSet.java @@ -82,6 +82,7 @@ public UnsignedWord getHeaderSizeOfAlignedChunk() { } @Override + @Uninterruptible(reason = CALLED_FROM_UNINTERRUPTIBLE_CODE, mayBeInlined = true) public UnsignedWord getHeaderSizeOfUnalignedChunk(UnsignedWord objectSize) { return UnalignedChunkRememberedSet.getHeaderSize(objectSize); } @@ -93,6 +94,7 @@ public void setObjectStartOffsetOfUnalignedChunk(HostedByteBufferPointer chunk, } @Override + @Uninterruptible(reason = CALLED_FROM_UNINTERRUPTIBLE_CODE, mayBeInlined = true) public void setObjectStartOffsetOfUnalignedChunk(UnalignedHeader chunk, UnsignedWord objectStartOffset) { UnalignedChunkRememberedSet.setObjectStartOffset(chunk, objectStartOffset); } diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/remset/NoRememberedSet.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/remset/NoRememberedSet.java index 1159f8bebe11..66b071b08884 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/remset/NoRememberedSet.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/remset/NoRememberedSet.java @@ -70,6 +70,7 @@ public UnsignedWord getHeaderSizeOfAlignedChunk() { } @Override + @Uninterruptible(reason = CALLED_FROM_UNINTERRUPTIBLE_CODE, mayBeInlined = true) public UnsignedWord getHeaderSizeOfUnalignedChunk(UnsignedWord objectSize) { return getHeaderSizeOfUnalignedChunk(); } @@ -88,6 +89,7 @@ public void setObjectStartOffsetOfUnalignedChunk(HostedByteBufferPointer chunk, } @Override + @Uninterruptible(reason = CALLED_FROM_UNINTERRUPTIBLE_CODE, mayBeInlined = true) public void setObjectStartOffsetOfUnalignedChunk(UnalignedHeader chunk, UnsignedWord objectStartOffset) { assert objectStartOffset.equal(getHeaderSizeOfUnalignedChunk()); } diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/remset/RememberedSet.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/remset/RememberedSet.java index 0a0b84290752..3f197d36fc37 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/remset/RememberedSet.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/remset/RememberedSet.java @@ -61,6 +61,7 @@ static RememberedSet get() { UnsignedWord getHeaderSizeOfAlignedChunk(); /** Returns the header size of an unaligned chunk for a given object size. */ + @Uninterruptible(reason = CALLED_FROM_UNINTERRUPTIBLE_CODE, mayBeInlined = true) UnsignedWord getHeaderSizeOfUnalignedChunk(UnsignedWord objectSize); /** Sets the object start offset in the unaligned chunk. */ @@ -68,6 +69,7 @@ static RememberedSet get() { void setObjectStartOffsetOfUnalignedChunk(HostedByteBufferPointer chunk, UnsignedWord objectStartOffset); /** Sets the object start offset in the unaligned chunk. */ + @Uninterruptible(reason = CALLED_FROM_UNINTERRUPTIBLE_CODE, mayBeInlined = true) void setObjectStartOffsetOfUnalignedChunk(UnalignedHeader chunk, UnsignedWord objectStartOffset); /** diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/remset/UnalignedChunkRememberedSet.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/remset/UnalignedChunkRememberedSet.java index 967d45f67d46..105ce62170db 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/remset/UnalignedChunkRememberedSet.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/remset/UnalignedChunkRememberedSet.java @@ -65,6 +65,7 @@ final class UnalignedChunkRememberedSet { private UnalignedChunkRememberedSet() { } + @Uninterruptible(reason = CALLED_FROM_UNINTERRUPTIBLE_CODE, mayBeInlined = true) public static UnsignedWord getHeaderSize(UnsignedWord objectSize) { UnsignedWord headerSize = getCardTableLimitOffset(objectSize); headerSize = headerSize.add(sizeOfObjectStartOffsetField()); @@ -78,6 +79,7 @@ public static void setObjectStartOffset(HostedByteBufferPointer chunk, UnsignedW chunk.writeWord(objectStartOffset.subtract(sizeOfObjectStartOffsetField()), objectStartOffset); } + @Uninterruptible(reason = CALLED_FROM_UNINTERRUPTIBLE_CODE, mayBeInlined = true) public static void setObjectStartOffset(UnalignedHeader chunk, UnsignedWord objectStartOffset) { HeapChunk.asPointer(chunk).writeWord(objectStartOffset.subtract(sizeOfObjectStartOffsetField()), objectStartOffset); assert getObjectStartOffset(chunk).equal(objectStartOffset); @@ -385,6 +387,7 @@ private static UnsignedWord getCardTableSize(Pointer obj) { return getOffsetForObject(obj).subtract(sizeOfObjectStartOffsetField()).subtract(getCardTableStartOffset()); } + @Uninterruptible(reason = CALLED_FROM_UNINTERRUPTIBLE_CODE, mayBeInlined = true) private static UnsignedWord getCardTableLimitOffset(UnsignedWord objectSize) { UnsignedWord tableStart = getCardTableStartOffset(); UnsignedWord tableSize = getCardTableSize(objectSize); diff --git a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/jfr/JfrEvent.java b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/jfr/JfrEvent.java index fff97e050cd6..a1cde362cb98 100644 --- a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/jfr/JfrEvent.java +++ b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/jfr/JfrEvent.java @@ -65,6 +65,7 @@ public final class JfrEvent { public static final JfrEvent JavaMonitorWait = create("jdk.JavaMonitorWait", 5, JfrEventFlags.HasDuration); public static final JfrEvent JavaMonitorInflate = create("jdk.JavaMonitorInflate", 5, JfrEventFlags.HasDuration); public static final JfrEvent ObjectAllocationInNewTLAB = create("jdk.ObjectAllocationInNewTLAB", 5); + public static final JfrEvent ObjectAllocationOutsideTLAB = create("jdk.ObjectAllocationOutsideTLAB", 5); public static final JfrEvent GCHeapSummary = create("jdk.GCHeapSummary"); public static final JfrEvent ThreadAllocationStatistics = create("jdk.ThreadAllocationStatistics"); public static final JfrEvent SystemGC = create("jdk.SystemGC", 5, JfrEventFlags.HasDuration); diff --git a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/jfr/events/JfrAllocationEvents.java b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/jfr/events/JfrAllocationEvents.java index 5f0a23bb3af5..a57cd05cff20 100644 --- a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/jfr/events/JfrAllocationEvents.java +++ b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/jfr/events/JfrAllocationEvents.java @@ -26,6 +26,8 @@ package com.oracle.svm.core.jfr.events; +import static com.oracle.svm.core.Uninterruptible.CALLED_FROM_UNINTERRUPTIBLE_CODE; + import org.graalvm.nativeimage.IsolateThread; import org.graalvm.nativeimage.StackValue; import org.graalvm.word.UnsignedWord; @@ -42,6 +44,7 @@ import com.oracle.svm.core.thread.VMThreads; import com.oracle.svm.core.threadlocal.FastThreadLocalFactory; import com.oracle.svm.core.threadlocal.FastThreadLocalLong; +import com.oracle.svm.core.util.BasedOnJDKFile; public class JfrAllocationEvents { private static final FastThreadLocalLong lastThreadAllocatedBytes = FastThreadLocalFactory.createLong("JfrAllocationEvents.lastThreadAllocatedBytes"); @@ -52,9 +55,16 @@ public static void reset() { } } - public static void emit(long startTicks, DynamicHub hub, UnsignedWord allocationSize, UnsignedWord tlabSize) { + @BasedOnJDKFile("https://github.com/openjdk/jdk/blob/jdk-23-ga/src/hotspot/share/gc/shared/memAllocator.cpp#L209-L220") + @Uninterruptible(reason = CALLED_FROM_UNINTERRUPTIBLE_CODE, mayBeInlined = true) + public static void emit(long startTicks, DynamicHub hub, UnsignedWord allocationSize, UnsignedWord tlabSize, boolean allocatedOutsideTlab) { if (HasJfrSupport.get()) { - emitObjectAllocationInNewTLAB(startTicks, hub, allocationSize, tlabSize); + + if (allocatedOutsideTlab) { + emitObjectAllocationOutsideTLAB(startTicks, hub, allocationSize); + } else if (tlabSize.notEqual(0)) { + emitObjectAllocationInNewTLAB(startTicks, hub, allocationSize, tlabSize); + } emitObjectAllocationSample(startTicks, hub); } } @@ -76,6 +86,22 @@ private static void emitObjectAllocationInNewTLAB(long startTicks, DynamicHub hu } } + @Uninterruptible(reason = "Accesses a JFR buffer.") + private static void emitObjectAllocationOutsideTLAB(long startTicks, DynamicHub hub, UnsignedWord allocationSize) { + if (JfrEvent.ObjectAllocationOutsideTLAB.shouldEmit()) { + JfrNativeEventWriterData data = StackValue.get(JfrNativeEventWriterData.class); + JfrNativeEventWriterDataAccess.initializeThreadLocalNativeBuffer(data); + + JfrNativeEventWriter.beginSmallEvent(data, JfrEvent.ObjectAllocationOutsideTLAB); + JfrNativeEventWriter.putLong(data, startTicks); + JfrNativeEventWriter.putEventThread(data); + JfrNativeEventWriter.putLong(data, SubstrateJVM.get().getStackTraceId(JfrEvent.ObjectAllocationOutsideTLAB)); + JfrNativeEventWriter.putClass(data, DynamicHub.toClass(hub)); + JfrNativeEventWriter.putLong(data, allocationSize.rawValue()); + JfrNativeEventWriter.endSmallEvent(data); + } + } + @Uninterruptible(reason = "Accesses a JFR buffer.") private static void emitObjectAllocationSample(long startTicks, DynamicHub hub) { if (JfrEvent.ObjectAllocationSample.shouldEmit()) { diff --git a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/os/ChunkBasedCommittedMemoryProvider.java b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/os/ChunkBasedCommittedMemoryProvider.java index 369d027a1ccc..128f0c8775b7 100644 --- a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/os/ChunkBasedCommittedMemoryProvider.java +++ b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/os/ChunkBasedCommittedMemoryProvider.java @@ -24,6 +24,8 @@ */ package com.oracle.svm.core.os; +import static com.oracle.svm.core.Uninterruptible.CALLED_FROM_UNINTERRUPTIBLE_CODE; + import org.graalvm.nativeimage.ImageSingletons; import org.graalvm.word.Pointer; import org.graalvm.word.PointerBase; @@ -60,6 +62,7 @@ public Pointer allocateAlignedChunk(UnsignedWord nbytes, UnsignedWord alignment) } /** Returns a non-null value or throws a pre-allocated exception. */ + @Uninterruptible(reason = CALLED_FROM_UNINTERRUPTIBLE_CODE, mayBeInlined = true) public Pointer allocateUnalignedChunk(UnsignedWord nbytes) { Pointer result = allocate(nbytes, getAlignmentForUnalignedChunks(), false, NmtCategory.JavaHeap); if (result.isNull()) { @@ -72,6 +75,7 @@ public Pointer allocateUnalignedChunk(UnsignedWord nbytes) { * This method returns {@code true} if the memory returned by {@link #allocateUnalignedChunk} is * guaranteed to be zeroed. */ + @Uninterruptible(reason = CALLED_FROM_UNINTERRUPTIBLE_CODE, mayBeInlined = true) public boolean areUnalignedChunksZeroed() { return false; } diff --git a/substratevm/src/com.oracle.svm.test/src/com/oracle/svm/test/jfr/TestObjectAllocationInNewTLABEvent.java b/substratevm/src/com.oracle.svm.test/src/com/oracle/svm/test/jfr/TestObjectAllocationInNewTLABEvent.java index 3be0c4f80650..8d67e6caf918 100644 --- a/substratevm/src/com.oracle.svm.test/src/com/oracle/svm/test/jfr/TestObjectAllocationInNewTLABEvent.java +++ b/substratevm/src/com.oracle.svm.test/src/com/oracle/svm/test/jfr/TestObjectAllocationInNewTLABEvent.java @@ -33,6 +33,7 @@ import org.junit.Test; import com.oracle.svm.core.NeverInline; +import com.oracle.svm.core.SubstrateGCOptions; import com.oracle.svm.core.genscavenge.HeapParameters; import com.oracle.svm.core.jfr.JfrEvent; import com.oracle.svm.core.util.UnsignedUtils; @@ -95,13 +96,13 @@ private static void validateEvents(List events) { } else if (className.equals(byte[].class.getName())) { foundBigByteArray = true; } - checkTopStackFrame(event, "slowPathNewArrayLikeObject0"); - } else if (allocationSize >= K && tlabSize == alignedHeapChunkSize && className.equals(byte[].class.getName())) { + checkTopStackFrame(event, "slowPathNewArrayLikeObjectWithoutAllocation0"); + } else if (allocationSize >= K && tlabSize >= SubstrateGCOptions.TlabOptions.MinTLABSize.getValue() && tlabSize <= alignedHeapChunkSize && className.equals(byte[].class.getName())) { foundSmallByteArray = true; - checkTopStackFrame(event, "slowPathNewArrayLikeObject0"); - } else if (tlabSize == alignedHeapChunkSize && className.equals(Helper.class.getName())) { + checkTopStackFrame(event, "slowPathNewArrayLikeObjectWithoutAllocation0"); + } else if (tlabSize >= SubstrateGCOptions.TlabOptions.MinTLABSize.getValue() && tlabSize <= alignedHeapChunkSize && className.equals(Helper.class.getName())) { foundInstance = true; - checkTopStackFrame(event, "slowPathNewInstanceWithoutAllocating"); + checkTopStackFrame(event, "slowPathNewInstanceWithoutAllocation0"); } } diff --git a/substratevm/src/com.oracle.svm.test/src/com/oracle/svm/test/jfr/TestObjectAllocationOutsideTLABEvent.java b/substratevm/src/com.oracle.svm.test/src/com/oracle/svm/test/jfr/TestObjectAllocationOutsideTLABEvent.java new file mode 100644 index 000000000000..e08dc3fc31c7 --- /dev/null +++ b/substratevm/src/com.oracle.svm.test/src/com/oracle/svm/test/jfr/TestObjectAllocationOutsideTLABEvent.java @@ -0,0 +1,123 @@ +/* + * Copyright (c) 2025, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2025, 2025, Red Hat Inc. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.svm.test.jfr; + +import static org.junit.Assert.assertTrue; + +import java.util.List; + +import org.junit.Test; + +import com.oracle.svm.core.NeverInline; +import com.oracle.svm.core.genscavenge.SerialAndEpsilonGCOptions; +import com.oracle.svm.core.jfr.JfrEvent; + +import jdk.graal.compiler.api.directives.GraalDirectives; +import jdk.graal.compiler.core.common.NumUtil; +import jdk.jfr.Recording; +import jdk.jfr.consumer.RecordedClass; +import jdk.jfr.consumer.RecordedEvent; +import jdk.jfr.consumer.RecordedThread; + +/** + * Objects are allocated outside the TLAB if they don't fit into the current TLAB and the remaining + * space in the current TLAB is larger than the refill waste limit. Additionally, the objects must + * be smaller than the {@link SerialAndEpsilonGCOptions#LargeArrayThreshold}. + */ +public class TestObjectAllocationOutsideTLABEvent extends JfrRecordingTest { + + private static final String TEST_THREAD_NAME = "eventTestThread"; + + @Test + public void test() throws Throwable { + String[] events = new String[]{JfrEvent.ObjectAllocationOutsideTLAB.getName()}; + Recording recording = startRecording(events); + + /* + * Use a separate thread for allocating the objects, to have better control over the TLAB + * and to make sure the objects are actually allocated outside the TLAB. + */ + Thread testThread = new Thread(() -> { + final long largeObjectThreshold = SerialAndEpsilonGCOptions.LargeArrayThreshold.getValue(); + final int arraySize = NumUtil.safeToInt(largeObjectThreshold - 1024); + + // Allocate a small object to make sure we have a TLAB. + Object o = new Object(); + GraalDirectives.blackhole(o); + + allocateByteArray(arraySize / Byte.BYTES); + allocateCharArray(arraySize / Character.BYTES); + }, TEST_THREAD_NAME); + + testThread.start(); + testThread.join(); + + stopRecording(recording, TestObjectAllocationOutsideTLABEvent::validateEvents); + + } + + @NeverInline("Prevent escape analysis.") + private static byte[] allocateByteArray(int length) { + return new byte[length]; + } + + @NeverInline("Prevent escape analysis.") + private static char[] allocateCharArray(int length) { + return new char[length]; + } + + private static void validateEvents(List events) { + final long largeObjectThreshold = SerialAndEpsilonGCOptions.LargeArrayThreshold.getValue(); + final int arrayLength = NumUtil.safeToInt(largeObjectThreshold - 1024); + + boolean foundByteArray = false; + boolean foundCharArray = false; + + for (RecordedEvent event : events) { + String eventThread = event. getValue("eventThread").getJavaName(); + if (!eventThread.equals(TEST_THREAD_NAME)) { + continue; + } + + long allocationSize = event. getValue("allocationSize"); + String className = event. getValue("objectClass").getName(); + + if (allocationSize >= arrayLength) { + if (className.equals(char[].class.getName())) { + foundCharArray = true; + } else if (className.equals(byte[].class.getName())) { + foundByteArray = true; + } + checkTopStackFrame(event, "slowPathNewArrayLikeObjectWithoutAllocation0"); + } + + } + + assertTrue(foundByteArray); + assertTrue(foundCharArray); + } + +} diff --git a/substratevm/src/com.oracle.svm.test/src/com/oracle/svm/test/jfr/TestObjectAllocationSampleEvent.java b/substratevm/src/com.oracle.svm.test/src/com/oracle/svm/test/jfr/TestObjectAllocationSampleEvent.java index b279d1e06a0d..58e3060dbe98 100644 --- a/substratevm/src/com.oracle.svm.test/src/com/oracle/svm/test/jfr/TestObjectAllocationSampleEvent.java +++ b/substratevm/src/com.oracle.svm.test/src/com/oracle/svm/test/jfr/TestObjectAllocationSampleEvent.java @@ -77,10 +77,10 @@ private static void validateEvents(List events) { // verify previous owner if (className.equals(char[].class.getName())) { foundCharArray = true; - checkTopStackFrame(event, "slowPathNewArrayLikeObject0"); + checkTopStackFrame(event, "slowPathNewArrayLikeObjectWithoutAllocation0"); } else if (className.equals(byte[].class.getName())) { foundByteArray = true; - checkTopStackFrame(event, "slowPathNewArrayLikeObject0"); + checkTopStackFrame(event, "slowPathNewArrayLikeObjectWithoutAllocation0"); } } }