diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/GCImpl.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/GCImpl.java index cb1b06eb987d..7ae3eafc89e6 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/GCImpl.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/GCImpl.java @@ -31,7 +31,6 @@ import java.lang.ref.Reference; -import jdk.graal.compiler.word.Word; import org.graalvm.nativeimage.CurrentIsolate; import org.graalvm.nativeimage.IsolateThread; import org.graalvm.nativeimage.Platform; @@ -80,6 +79,7 @@ import com.oracle.svm.core.heap.ReferenceMapIndex; import com.oracle.svm.core.heap.RestrictHeapAccess; import com.oracle.svm.core.heap.RuntimeCodeCacheCleaner; +import com.oracle.svm.core.heap.SuspendSerialGCMaxHeapSize; import com.oracle.svm.core.heap.VMOperationInfos; import com.oracle.svm.core.interpreter.InterpreterSupport; import com.oracle.svm.core.jdk.RuntimeSupport; @@ -105,6 +105,7 @@ import com.oracle.svm.core.util.VMError; import jdk.graal.compiler.api.replacements.Fold; +import jdk.graal.compiler.word.Word; /** * Garbage collector (incremental or complete) for {@link HeapImpl}. @@ -208,7 +209,7 @@ boolean collectWithoutAllocating(GCCause cause, boolean forceFullGC) { @Uninterruptible(reason = CALLED_FROM_UNINTERRUPTIBLE_CODE, mayBeInlined = true) public static boolean shouldIgnoreOutOfMemory() { - return SerialGCOptions.IgnoreMaxHeapSizeWhileInVMInternalCode.getValue() && inVMInternalCode(); + return SerialGCOptions.IgnoreMaxHeapSizeWhileInVMInternalCode.getValue() && (inVMInternalCode() || SuspendSerialGCMaxHeapSize.isSuspended()); } @Uninterruptible(reason = CALLED_FROM_UNINTERRUPTIBLE_CODE, mayBeInlined = true) diff --git a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/deopt/Deoptimizer.java b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/deopt/Deoptimizer.java index 4e991ff355c1..741532a612a0 100644 --- a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/deopt/Deoptimizer.java +++ b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/deopt/Deoptimizer.java @@ -61,6 +61,7 @@ import com.oracle.svm.core.heap.GCCause; import com.oracle.svm.core.heap.Heap; import com.oracle.svm.core.heap.ReferenceAccess; +import com.oracle.svm.core.heap.SuspendSerialGCMaxHeapSize; import com.oracle.svm.core.heap.VMOperationInfos; import com.oracle.svm.core.log.Log; import com.oracle.svm.core.log.StringBuilderLog; @@ -85,6 +86,7 @@ import jdk.graal.compiler.api.replacements.Fold; import jdk.graal.compiler.core.common.NumUtil; import jdk.graal.compiler.core.common.util.TypeConversion; +import jdk.graal.compiler.nodes.UnreachableNode; import jdk.graal.compiler.options.Option; import jdk.graal.compiler.word.BarrieredAccess; import jdk.graal.compiler.word.Word; @@ -798,53 +800,51 @@ public static UnsignedWord lazyDeoptStubPrimitiveReturn(Pointer framePointer, Un @Uninterruptible(reason = "frame will hold objects in unmanaged storage") private static UnsignedWord lazyDeoptStubCore(Pointer framePointer, UnsignedWord gpReturnValue, UnsignedWord fpReturnValue, boolean hasException, Object gpReturnValueObject) { DeoptimizedFrame deoptFrame; - Pointer newSp; - StackOverflowCheck.singleton().makeYellowZoneAvailable(); - try { - /* The original return address is at offset 0 from the stack pointer */ - CodePointer originalReturnAddress = framePointer.readWord(0); - assert originalReturnAddress.isNonNull(); + /* The original return address is at offset 0 from the stack pointer */ + CodePointer originalReturnAddress = framePointer.readWord(0); + VMError.guarantee(originalReturnAddress.isNonNull()); + + /* Clear the deoptimization slot. */ + framePointer.writeWord(0, Word.nullPointer()); - /* Clear the deoptimization slot. */ - framePointer.writeWord(0, Word.nullPointer()); + /* + * Write the old return address to the return address slot, so that stack walks see a + * consistent stack. + */ + FrameAccess.singleton().writeReturnAddress(CurrentIsolate.getCurrentThread(), framePointer, originalReturnAddress); + try { + deoptFrame = constructLazilyDeoptimizedFrameInterruptibly(framePointer, originalReturnAddress, hasException); + } catch (OutOfMemoryError ex) { /* - * Write the old return address to the return address slot, so that stack walks see a - * consistent stack. + * If a OutOfMemoryError occurs during lazy deoptimization, we cannot let the frame + * being deoptimized handle the exception, because it might have been invalidated due to + * incorrect assumptions. Note that since unwindExceptionSkippingCaller does not return, + * this try...catch must not have a finally block, as it will not be executed. */ - FrameAccess.singleton().writeReturnAddress(CurrentIsolate.getCurrentThread(), framePointer, originalReturnAddress); + ExceptionUnwind.unwindExceptionSkippingCaller(ex, framePointer); + throw UnreachableNode.unreachable(); + } - UntetheredCodeInfo untetheredInfo = CodeInfoTable.lookupCodeInfo(originalReturnAddress); - Object tether = CodeInfoAccess.acquireTether(untetheredInfo); - try { - CodeInfo info = CodeInfoAccess.convert(untetheredInfo, tether); - deoptFrame = constructLazilyDeoptimizedFrameInterruptibly(framePointer, info, originalReturnAddress, hasException); - } finally { - CodeInfoAccess.releaseTether(untetheredInfo, tether); - } + DeoptimizationCounters.counters().deoptCount.inc(); + VMError.guarantee(deoptFrame != null, "was not able to lazily construct a deoptimized frame"); - DeoptimizationCounters.counters().deoptCount.inc(); - assert deoptFrame != null : "was not able to lazily construct a deoptimized frame"; + Pointer newSp = computeNewFramePointer(framePointer, deoptFrame); - newSp = computeNewFramePointer(framePointer, deoptFrame); + /* Build the content of the deopt target stack frames. */ + deoptFrame.buildContent(newSp); - /* Build the content of the deopt target stack frames. */ - deoptFrame.buildContent(newSp); + /* + * We fail fatally if eager deoptimization is invoked when the lazy deopt stub is executing, + * because eager deoptimization should only be invoked through stack introspection, which + * can only be called from the current thread. Thus, there is no use case for eager + * deoptimization to happen if the current thread is executing the lazy deopt stub. + */ + VMError.guarantee(framePointer.readWord(0) == Word.nullPointer(), "Eager deoptimization should not occur when lazy deoptimization is in progress"); - /* - * We fail fatally if eager deoptimization is invoked when the lazy deopt stub is - * executing, because eager deoptimization should only be invoked through stack - * introspection, which can only be called from the current thread. Thus, there is no - * use case for eager deoptimization to happen if the current thread is executing the - * lazy deopt stub. - */ - VMError.guarantee(framePointer.readWord(0) == Word.nullPointer(), "Eager deoptimization should not occur when lazy deoptimization is in progress"); + recentDeoptimizationEvents.append(deoptFrame.getCompletedMessage()); - recentDeoptimizationEvents.append(deoptFrame.getCompletedMessage()); - } finally { - StackOverflowCheck.singleton().protectYellowZone(); - } // From this point on, only uninterruptible code may be executed. UnsignedWord updatedGpReturnValue = gpReturnValue; if (gpReturnValueObject != null) { @@ -856,8 +856,23 @@ private static UnsignedWord lazyDeoptStubCore(Pointer framePointer, UnsignedWord } @Uninterruptible(reason = "Wrapper to call interruptible methods", calleeMustBe = false) - private static DeoptimizedFrame constructLazilyDeoptimizedFrameInterruptibly(Pointer sourceSp, CodeInfo info, CodePointer ip, boolean hasException) { - return constructLazilyDeoptimizedFrameInterruptibly0(sourceSp, info, ip, hasException); + private static DeoptimizedFrame constructLazilyDeoptimizedFrameInterruptibly(Pointer sourceSp, CodePointer ip, boolean hasException) { + StackOverflowCheck.singleton().makeYellowZoneAvailable(); + SuspendSerialGCMaxHeapSize.suspendInCurrentThread(); + + try { + UntetheredCodeInfo untetheredInfo = CodeInfoTable.lookupCodeInfo(ip); + Object tether = CodeInfoAccess.acquireTether(untetheredInfo); + try { + CodeInfo info = CodeInfoAccess.convert(untetheredInfo, tether); + return constructLazilyDeoptimizedFrameInterruptibly0(sourceSp, info, ip, hasException); + } finally { + CodeInfoAccess.releaseTether(untetheredInfo, tether); + } + } finally { + SuspendSerialGCMaxHeapSize.resumeInCurrentThread(); + StackOverflowCheck.singleton().protectYellowZone(); + } } private static DeoptimizedFrame constructLazilyDeoptimizedFrameInterruptibly0(Pointer sourceSp, CodeInfo info, CodePointer ip, boolean hasException) { diff --git a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/heap/SuspendSerialGCMaxHeapSize.java b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/heap/SuspendSerialGCMaxHeapSize.java new file mode 100644 index 000000000000..e5b727a6c192 --- /dev/null +++ b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/heap/SuspendSerialGCMaxHeapSize.java @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2025, 2025, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.svm.core.heap; + +import static com.oracle.svm.core.Uninterruptible.CALLED_FROM_UNINTERRUPTIBLE_CODE; + +import com.oracle.svm.core.Uninterruptible; +import com.oracle.svm.core.threadlocal.FastThreadLocalFactory; +import com.oracle.svm.core.threadlocal.FastThreadLocalInt; + +/** + * Allows the max heap size restriction to be temporarily suspended, in order to avoid running out + * of memory during critical VM operations. Note calling {@link #suspendInCurrentThread()} will have + * effect only for the current thread, so other threads may still attempt to allocate and throw an + * {@link OutOfMemoryError}. + * + * This option will only take effect if the SerialGC is used, and the option + * SerialGCOptions.IgnoreMaxHeapSizeWhileInVMInternalCode is enabled. + */ +public class SuspendSerialGCMaxHeapSize { + private static final FastThreadLocalInt nestingDepth = FastThreadLocalFactory.createInt("SuspendSerialGCMaxHeapSize.nestingDepth"); + + /** + * Temporarily suspend the heap limit for the current thread. Must be paired with a call to + * {@link #resumeInCurrentThread}, best placed in a {@code finally} block. This method may be + * called multiple times in a nested fashion. + */ + @Uninterruptible(reason = "Called from code that must not allocate before suspending the heap limit.", callerMustBe = true) + public static void suspendInCurrentThread() { + int oldValue = nestingDepth.get(); + int newValue = oldValue + 1; + assert oldValue >= 0; + nestingDepth.set(newValue); + } + + /** + * Undoes suspending the heap limit for the current thread. This may only be called after a call + * to {@link #suspendInCurrentThread}. + */ + @Uninterruptible(reason = "Called from code that must not allocate after resuming the heap limit.", callerMustBe = true) + public static void resumeInCurrentThread() { + int oldValue = nestingDepth.get(); + int newValue = oldValue - 1; + assert newValue >= 0; + nestingDepth.set(newValue); + } + + /** + * Returns true if the heap limit is currently suspended. + */ + @Uninterruptible(reason = CALLED_FROM_UNINTERRUPTIBLE_CODE, mayBeInlined = true) + public static boolean isSuspended() { + return nestingDepth.get() > 0; + } +} diff --git a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/snippets/ExceptionUnwind.java b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/snippets/ExceptionUnwind.java index b6565be5a656..63b1120608dc 100644 --- a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/snippets/ExceptionUnwind.java +++ b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/snippets/ExceptionUnwind.java @@ -27,14 +27,11 @@ import static com.oracle.svm.core.Uninterruptible.CALLED_FROM_UNINTERRUPTIBLE_CODE; import static jdk.graal.compiler.core.common.spi.ForeignCallDescriptor.CallSideEffect.NO_SIDE_EFFECT; -import com.oracle.svm.core.threadlocal.FastThreadLocalBytes; -import jdk.graal.compiler.word.Word; import org.graalvm.nativeimage.CurrentIsolate; import org.graalvm.nativeimage.ImageSingletons; import org.graalvm.nativeimage.IsolateThread; import org.graalvm.nativeimage.StackValue; import org.graalvm.nativeimage.c.function.CodePointer; -import com.oracle.svm.core.c.BooleanPointer; import org.graalvm.nativeimage.c.struct.SizeOf; import org.graalvm.nativeimage.hosted.Feature; import org.graalvm.word.LocationIdentity; @@ -42,6 +39,7 @@ import org.graalvm.word.UnsignedWord; import com.oracle.svm.core.Uninterruptible; +import com.oracle.svm.core.c.BooleanPointer; import com.oracle.svm.core.code.CodeInfoQueryResult; import com.oracle.svm.core.deopt.DeoptimizationSupport; import com.oracle.svm.core.deopt.DeoptimizedFrame; @@ -55,11 +53,13 @@ import com.oracle.svm.core.stack.JavaStackWalker; import com.oracle.svm.core.stack.StackOverflowCheck; import com.oracle.svm.core.thread.VMThreads; +import com.oracle.svm.core.threadlocal.FastThreadLocalBytes; import com.oracle.svm.core.threadlocal.FastThreadLocalFactory; import com.oracle.svm.core.threadlocal.FastThreadLocalObject; import com.oracle.svm.core.util.VMError; import jdk.graal.compiler.nodes.UnreachableNode; +import jdk.graal.compiler.word.Word; public abstract class ExceptionUnwind { @@ -109,7 +109,7 @@ private static void unwindExceptionWithoutCalleeSavedRegisters(Throwable excepti */ StackOverflowCheck.singleton().makeYellowZoneAvailable(); - unwindExceptionInterruptible(exception, callerSP, false); + unwindExceptionInterruptible(exception, callerSP, false, false); } /** Foreign call: {@link #UNWIND_EXCEPTION_WITH_CALLEE_SAVED_REGISTERS}. */ @@ -119,7 +119,15 @@ private static void unwindExceptionWithoutCalleeSavedRegisters(Throwable excepti private static void unwindExceptionWithCalleeSavedRegisters(Throwable exception, Pointer callerSP) { StackOverflowCheck.singleton().makeYellowZoneAvailable(); - unwindExceptionInterruptible(exception, callerSP, true); + unwindExceptionInterruptible(exception, callerSP, true, false); + } + + @Uninterruptible(reason = "Must not execute recurring callbacks or a stack overflow check.", calleeMustBe = false) + @RestrictHeapAccess(access = RestrictHeapAccess.Access.NO_ALLOCATION, reason = "Must not allocate when unwinding the stack.") + public static void unwindExceptionSkippingCaller(Throwable exception, Pointer callerSP) { + StackOverflowCheck.singleton().makeYellowZoneAvailable(); + + unwindExceptionInterruptible(exception, callerSP, true, true); } /* @@ -127,7 +135,7 @@ private static void unwindExceptionWithCalleeSavedRegisters(Throwable exception, * can use them simultaneously. All state must be in separate VMThreadLocals. */ @RestrictHeapAccess(access = RestrictHeapAccess.Access.NO_ALLOCATION, reason = "Must not allocate when unwinding the stack.") - private static void unwindExceptionInterruptible(Throwable exception, Pointer callerSP, boolean fromMethodWithCalleeSavedRegisters) { + private static void unwindExceptionInterruptible(Throwable exception, Pointer callerSP, boolean fromMethodWithCalleeSavedRegisters, boolean skipCaller) { if (currentException.get() != null) { reportRecursiveUnwind(exception); return; /* Unreachable code. */ @@ -140,9 +148,10 @@ private static void unwindExceptionInterruptible(Throwable exception, Pointer ca } if (ImageSingletons.contains(ExceptionUnwind.class)) { + VMError.guarantee(!skipCaller, "Skipping the caller frame is not supported with custom exception unwind"); ImageSingletons.lookup(ExceptionUnwind.class).customUnwindException(callerSP); } else { - defaultUnwindException(callerSP, fromMethodWithCalleeSavedRegisters); + defaultUnwindException(callerSP, fromMethodWithCalleeSavedRegisters, skipCaller); } /* @@ -193,15 +202,24 @@ private static void reportUnhandledException(Throwable exception) { /** Hook to allow a {@link Feature} to install custom exception unwind code. */ protected abstract void customUnwindException(Pointer callerSP); + /** + * Unwinds the stack to find an exception handler. + * + * @param startSP The SP from which to start the stack unwinding. + * @param fromMethodWithCalleeSavedRegisters Whether the first frame (identified by callerSP) + * has callee saved registers. + * @param skipCaller Whether the first (caller) frame should be skipped. If this is true, then + * the value of fromMethodWithCalleeSavedRegisters will be ignored. + */ @Uninterruptible(reason = "Prevent deoptimization apart from the few places explicitly considered safe for deoptimization") - private static void defaultUnwindException(Pointer startSP, boolean fromMethodWithCalleeSavedRegisters) { + private static void defaultUnwindException(Pointer startSP, boolean fromMethodWithCalleeSavedRegisters, boolean skipCaller) { IsolateThread thread = CurrentIsolate.getCurrentThread(); boolean hasCalleeSavedRegisters = fromMethodWithCalleeSavedRegisters; + boolean skipFrame = skipCaller; /* * callerSP identifies the caller of the frame that wants to unwind an exception. So we can - * start looking for the exception handler immediately in that frame, without skipping any - * frames in between. + * start looking for the exception handler in that frame, possibly skipping one frame. */ JavaStackWalk walk = StackValue.get(JavaStackWalker.sizeOfJavaStackWalk()); JavaStackWalker.initialize(walk, thread, startSP); @@ -210,37 +228,41 @@ private static void defaultUnwindException(Pointer startSP, boolean fromMethodWi JavaFrame frame = JavaStackWalker.getCurrentFrame(walk); VMError.guarantee(!JavaFrames.isUnknownFrame(frame), "Exception unwinding must not encounter unknown frame"); - Pointer sp = frame.getSP(); - if (DeoptimizationSupport.enabled()) { - DeoptimizedFrame deoptFrame = Deoptimizer.checkEagerDeoptimized(frame); - if (deoptFrame != null) { - /* Deoptimization entry points always have an exception handler. */ - deoptTakeExceptionInterruptible(deoptFrame); - jumpToHandler(sp, DeoptimizationSupport.getEagerDeoptStubPointer(), hasCalleeSavedRegisters); - UnreachableNode.unreachable(); - return; /* Unreachable */ - } else if (Deoptimizer.checkLazyDeoptimized(frame)) { - long exceptionOffset = frame.getExceptionOffset(); - if (exceptionOffset != CodeInfoQueryResult.NO_EXCEPTION_OFFSET) { - setLazyDeoptStubShouldReturnToExceptionHandler(true); - /* - * When handling exceptions, we always jump to the "object return" lazy - * deopt stub, because the Exception object is always passed as the return - * value. - */ - jumpToHandler(sp, DeoptimizationSupport.getLazyDeoptStubObjectReturnPointer(), hasCalleeSavedRegisters); + if (!skipFrame) { + Pointer sp = frame.getSP(); + if (DeoptimizationSupport.enabled()) { + DeoptimizedFrame deoptFrame = Deoptimizer.checkEagerDeoptimized(frame); + if (deoptFrame != null) { + /* Deoptimization entry points always have an exception handler. */ + deoptTakeExceptionInterruptible(deoptFrame); + jumpToHandler(sp, DeoptimizationSupport.getEagerDeoptStubPointer(), hasCalleeSavedRegisters); UnreachableNode.unreachable(); return; /* Unreachable */ + } else if (Deoptimizer.checkLazyDeoptimized(frame)) { + long exceptionOffset = frame.getExceptionOffset(); + if (exceptionOffset != CodeInfoQueryResult.NO_EXCEPTION_OFFSET) { + setLazyDeoptStubShouldReturnToExceptionHandler(true); + /* + * When handling exceptions, we always jump to the "object return" lazy + * deopt stub, because the Exception object is always passed as the + * return value. + */ + jumpToHandler(sp, DeoptimizationSupport.getLazyDeoptStubObjectReturnPointer(), hasCalleeSavedRegisters); + UnreachableNode.unreachable(); + return; /* Unreachable */ + } } } - } - long exceptionOffset = frame.getExceptionOffset(); - if (exceptionOffset != CodeInfoQueryResult.NO_EXCEPTION_OFFSET) { - CodePointer handlerIP = (CodePointer) ((UnsignedWord) frame.getIP()).add(Word.signed(exceptionOffset)); - jumpToHandler(sp, handlerIP, hasCalleeSavedRegisters); - UnreachableNode.unreachable(); - return; /* Unreachable */ + long exceptionOffset = frame.getExceptionOffset(); + if (exceptionOffset != CodeInfoQueryResult.NO_EXCEPTION_OFFSET) { + CodePointer handlerIP = (CodePointer) ((UnsignedWord) frame.getIP()).add(Word.signed(exceptionOffset)); + jumpToHandler(sp, handlerIP, hasCalleeSavedRegisters); + UnreachableNode.unreachable(); + return; /* Unreachable */ + } + } else { + skipFrame = false; } /* No handler found in this frame, walk to caller frame. */