From e1f97206e1e4be2640a7031e08457f656624fbe9 Mon Sep 17 00:00:00 2001 From: Josef Eisl Date: Wed, 10 Apr 2024 12:09:23 +0200 Subject: [PATCH 1/8] sdk: add missing aarch64 entry for MUSL_CMAKE_TOOLCHAIN --- sdk/mx.sdk/suite.py | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/sdk/mx.sdk/suite.py b/sdk/mx.sdk/suite.py index bee3de300737..3977afe5778f 100644 --- a/sdk/mx.sdk/suite.py +++ b/sdk/mx.sdk/suite.py @@ -1382,10 +1382,24 @@ class UniversalDetector { "toolchain.cmake" : { "source_type": "string", "value": ''' -set(CMAKE_SYSTEM_NAME Linux) set(CMAKE_C_COMPILER /x86_64-linux-musl-native/bin/gcc) set(CMAKE_CXX_COMPILER /x86_64-linux-musl-native/bin/g++) set(CMAKE_AR /x86_64-linux-musl-native/bin/ar) +''' + }, + }, + "dependencies": [ + "MUSL_GCC_TOOLCHAIN", + ], + }, + "aarch64": { + "layout" : { + "toolchain.cmake" : { + "source_type": "string", + "value": ''' +set(CMAKE_C_COMPILER /aarch64-linux-musl-native/bin/gcc) +set(CMAKE_CXX_COMPILER /aarch64-linux-musl-native/bin/g++) +set(CMAKE_AR /aarch64-linux-musl-native/bin/ar) ''' }, }, From a9732ca83397b94d90bfaa7deba4076158f42019 Mon Sep 17 00:00:00 2001 From: Josef Eisl Date: Tue, 9 Apr 2024 16:02:57 +0200 Subject: [PATCH 2/8] svm/ci: svm gate should use the svm default dependencies --- substratevm/ci/ci.jsonnet | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/substratevm/ci/ci.jsonnet b/substratevm/ci/ci.jsonnet index af0d9f734dea..254de446aa84 100644 --- a/substratevm/ci/ci.jsonnet +++ b/substratevm/ci/ci.jsonnet @@ -16,7 +16,7 @@ local t(limit) = task_spec({timelimit: limit}), // mx gate build config - local mxgate(tags) = os_arch_jdk_mixin + sg.mxgate(tags, suite="substratevm", suite_short="svm"), + local mxgate(tags) = os_arch_jdk_mixin + sg.mxgate(tags, suite="substratevm", suite_short="svm") + task_spec(common.deps.svm), local eclipse = task_spec(common.deps.eclipse), local jdt = task_spec(common.deps.jdt), From bcfd9cd29a70d52ca5cbdf128efbca67e91b460f Mon Sep 17 00:00:00 2001 From: Christian Haeubl Date: Tue, 2 Apr 2024 18:17:49 +0200 Subject: [PATCH 3/8] svm: add native CGroups implementation (libsvm_contatiner) --- substratevm/mx.substratevm/suite.py | 45 + .../oracle/svm/core/genscavenge/HeapImpl.java | 2 - .../svm/core/genscavenge/HeapParameters.java | 12 +- ...a => DarwinPhysicalMemorySupportImpl.java} | 2 +- .../src/com/oracle/svm/core/Containers.java | 131 - .../src/com/oracle/svm/core/Processor.java | 63 - .../oracle/svm/core/SubstrateDiagnostics.java | 81 +- .../com/oracle/svm/core/SubstrateOptions.java | 4 +- .../oracle/svm/core/container/Container.java | 190 + .../svm/core/container/ContainerLibrary.java | 106 + .../svm/core/container/OperatingSystem.java | 80 + .../oracle/svm/core/heap/PhysicalMemory.java | 41 +- .../svm/core/jdk/JavaLangSubstitutions.java | 14 +- .../svm/core/jdk/Target_java_nio_Bits.java | 5 +- .../core/jdk/Target_jdk_internal_misc_VM.java | 12 - ...t_jdk_internal_platform_CgroupMetrics.java | 7 +- .../core/jfr/Target_jdk_jfr_internal_JVM.java | 15 +- .../Target_jdk_jfr_internal_JVM_JDK21.java | 4 +- .../README.md | 71 + .../configure.py | 163 + .../ninja.template | 48 + .../src/hotspot/jni.h | 2001 ++++++ .../src/hotspot/jni_md.h | 66 + .../os/linux/cgroupSubsystem_linux.cpp | 577 ++ .../os/linux/cgroupSubsystem_linux.hpp | 355 ++ .../os/linux/cgroupV1Subsystem_linux.cpp | 351 ++ .../os/linux/cgroupV1Subsystem_linux.hpp | 138 + .../os/linux/cgroupV2Subsystem_linux.cpp | 279 + .../os/linux/cgroupV2Subsystem_linux.hpp | 102 + .../hotspot/os/linux/osContainer_linux.cpp | 166 + .../hotspot/os/linux/osContainer_linux.hpp | 82 + .../src/hotspot/os/linux/os_linux.cpp | 5533 +++++++++++++++++ .../src/hotspot/os/linux/os_linux.hpp | 443 ++ .../src/hotspot/os/linux/os_linux.inline.hpp | 62 + .../src/hotspot/os/posix/include/jvm_md.h | 94 + .../src/hotspot/os/posix/os_posix.cpp | 2042 ++++++ .../src/hotspot/os/posix/os_posix.hpp | 109 + .../src/hotspot/os/posix/os_posix.inline.hpp | 71 + .../src/hotspot/share/memory/allStatic.hpp | 38 + .../src/hotspot/share/memory/allocation.hpp | 649 ++ .../share/memory/allocation.inline.hpp | 117 + .../src/hotspot/share/runtime/os.cpp | 2410 +++++++ .../src/hotspot/share/runtime/os.hpp | 1111 ++++ .../src/hotspot/share/runtime/os.inline.hpp | 65 + .../share/utilities/attributeNoreturn.hpp | 51 + .../hotspot/share/utilities/checkedCast.hpp | 47 + .../share/utilities/compilerWarnings.hpp | 107 + .../share/utilities/compilerWarnings_gcc.hpp | 99 + .../share/utilities/globalDefinitions.hpp | 1365 ++++ .../share/utilities/globalDefinitions_gcc.hpp | 164 + .../src/hotspot/share/utilities/macros.hpp | 639 ++ .../src/hotspot/share/utilities/ostream.cpp | 1148 ++++ .../src/hotspot/share/utilities/ostream.hpp | 350 ++ .../src/hotspot/svm/share/logging/log.hpp | 71 + .../hotspot/svm/share/memory/allocation.cpp | 48 + .../src/hotspot/svm/share/runtime/globals.hpp | 35 + .../src/hotspot/svm/share/utilities/debug.cpp | 61 + .../src/hotspot/svm/share/utilities/debug.hpp | 73 + .../src/hotspot/svm/svm_container.cpp | 113 + .../src/hotspot/svm/svm_container.hpp | 56 + .../svm/test/jfr/TestContainerEvent.java | 4 +- 61 files changed, 22106 insertions(+), 282 deletions(-) rename substratevm/src/com.oracle.svm.core.posix/src/com/oracle/svm/core/posix/darwin/{PhysicalMemorySupportImpl.java => DarwinPhysicalMemorySupportImpl.java} (97%) delete mode 100644 substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/Containers.java delete mode 100644 substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/Processor.java create mode 100644 substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/container/Container.java create mode 100644 substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/container/ContainerLibrary.java create mode 100644 substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/container/OperatingSystem.java create mode 100644 substratevm/src/com.oracle.svm.native.libcontainer/README.md create mode 100644 substratevm/src/com.oracle.svm.native.libcontainer/configure.py create mode 100644 substratevm/src/com.oracle.svm.native.libcontainer/ninja.template create mode 100644 substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/jni.h create mode 100644 substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/jni_md.h create mode 100644 substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/os/linux/cgroupSubsystem_linux.cpp create mode 100644 substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/os/linux/cgroupSubsystem_linux.hpp create mode 100644 substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/os/linux/cgroupV1Subsystem_linux.cpp create mode 100644 substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/os/linux/cgroupV1Subsystem_linux.hpp create mode 100644 substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/os/linux/cgroupV2Subsystem_linux.cpp create mode 100644 substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/os/linux/cgroupV2Subsystem_linux.hpp create mode 100644 substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/os/linux/osContainer_linux.cpp create mode 100644 substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/os/linux/osContainer_linux.hpp create mode 100644 substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/os/linux/os_linux.cpp create mode 100644 substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/os/linux/os_linux.hpp create mode 100644 substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/os/linux/os_linux.inline.hpp create mode 100644 substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/os/posix/include/jvm_md.h create mode 100644 substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/os/posix/os_posix.cpp create mode 100644 substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/os/posix/os_posix.hpp create mode 100644 substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/os/posix/os_posix.inline.hpp create mode 100644 substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/share/memory/allStatic.hpp create mode 100644 substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/share/memory/allocation.hpp create mode 100644 substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/share/memory/allocation.inline.hpp create mode 100644 substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/share/runtime/os.cpp create mode 100644 substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/share/runtime/os.hpp create mode 100644 substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/share/runtime/os.inline.hpp create mode 100644 substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/share/utilities/attributeNoreturn.hpp create mode 100644 substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/share/utilities/checkedCast.hpp create mode 100644 substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/share/utilities/compilerWarnings.hpp create mode 100644 substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/share/utilities/compilerWarnings_gcc.hpp create mode 100644 substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/share/utilities/globalDefinitions.hpp create mode 100644 substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/share/utilities/globalDefinitions_gcc.hpp create mode 100644 substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/share/utilities/macros.hpp create mode 100644 substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/share/utilities/ostream.cpp create mode 100644 substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/share/utilities/ostream.hpp create mode 100644 substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/svm/share/logging/log.hpp create mode 100644 substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/svm/share/memory/allocation.cpp create mode 100644 substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/svm/share/runtime/globals.hpp create mode 100644 substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/svm/share/utilities/debug.cpp create mode 100644 substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/svm/share/utilities/debug.hpp create mode 100644 substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/svm/svm_container.cpp create mode 100644 substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/svm/svm_container.hpp diff --git a/substratevm/mx.substratevm/suite.py b/substratevm/mx.substratevm/suite.py index d67637fc8af6..fbf8dc72dfba 100644 --- a/substratevm/mx.substratevm/suite.py +++ b/substratevm/mx.substratevm/suite.py @@ -850,6 +850,50 @@ "jacoco" : "exclude", }, + "com.oracle.svm.native.libcontainer": { + "subDir": "src", + "native": "static_lib", + "multitarget": { + "libc": ["glibc", "musl", "default"], + }, + "deliverable" : "svm_container", + "os_arch": { + "linux": { + "": { + "cflags": ["-O2", "-fno-rtti", "-fno-exceptions", "-fvisibility=hidden", "-fPIC", + # defines + "-DNATIVE_IMAGE", "-DLINUX", "-DINCLUDE_SUFFIX_COMPILER=_gcc", + # uncomment to enable debugging + # Note: -O0 might run into linker error because it does not purge unused symbols, + # e.g., '__cxa_pure_virtual'. -O1 or higher avoids the problem. + # "-DASSERT", "-DPRINT_WARNINGS", "-g", "-O1", "-DLOG_LEVEL=6", + # include dirs + "-I/src/hotspot", + "-I/src/hotspot/share", + "-I/src/hotspot/svm", + "-I/src/hotspot/svm/share", + "-I/src/hotspot/os/linux", + "-I/src/hotspot/os/posix", + "-I/src/hotspot/os/posix/include", + # HotSpot standard flags + # See https://github.com/openjdk/jdk/blob/master/make/autoconf/flags-cflags.m4 + # C++ standard + "-std=c++14", + # Always enable optional macros + "-D__STDC_FORMAT_MACROS", "-D__STDC_LIMIT_MACROS", "-D__STDC_CONSTANT_MACROS", + ], + "ldflags": ["-Wl,-z,noexecstack"], + }, + }, + "": { + "": { + "ignore": "only needed on linux", + }, + }, + }, + "jacoco" : "exclude", + }, + "svm-jvmfuncs-fallback-builder": { "class" : "SubstrateJvmFuncsFallbacksBuilder", }, @@ -1798,6 +1842,7 @@ "./": [ "dependency:com.oracle.svm.native.libchelper/*", "dependency:com.oracle.svm.native.jvm.posix/*", + "dependency:com.oracle.svm.native.libcontainer/*", ], }, }, diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/HeapImpl.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/HeapImpl.java index ade6b3c53d96..5032e2664dc4 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/HeapImpl.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/HeapImpl.java @@ -59,7 +59,6 @@ import com.oracle.svm.core.heap.NoAllocationVerifier; import com.oracle.svm.core.heap.ObjectHeader; import com.oracle.svm.core.heap.ObjectVisitor; -import com.oracle.svm.core.heap.PhysicalMemory; import com.oracle.svm.core.heap.ReferenceHandler; import com.oracle.svm.core.heap.ReferenceHandlerThread; import com.oracle.svm.core.heap.ReferenceInternals; @@ -904,7 +903,6 @@ private long totalMemory() { @Substitute private long maxMemory() { - PhysicalMemory.size(); // ensure physical memory size is set correctly and not estimated GCImpl.getPolicy().updateSizeParameters(); return GCImpl.getPolicy().getMaximumHeapSize().rawValue(); } diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/HeapParameters.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/HeapParameters.java index 665f5f0aca59..6ac97f272626 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/HeapParameters.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/HeapParameters.java @@ -24,8 +24,6 @@ */ package com.oracle.svm.core.genscavenge; -import jdk.graal.compiler.api.replacements.Fold; -import jdk.graal.compiler.word.Word; import org.graalvm.nativeimage.Platform; import org.graalvm.nativeimage.Platforms; import org.graalvm.word.UnsignedWord; @@ -38,6 +36,9 @@ import com.oracle.svm.core.util.UserError; import com.oracle.svm.core.util.VMError; +import jdk.graal.compiler.api.replacements.Fold; +import jdk.graal.compiler.word.Word; + /** Constants and variables for the size and layout of the heap and behavior of the collector. */ public final class HeapParameters { @Platforms(Platform.HOSTED_ONLY.class) @@ -87,6 +88,13 @@ public static int getMaxSurvivorSpaces() { * Memory configuration */ + /** + * Sets the {@link SubstrateGCOptions#MaxHeapSize} option value. + * + * Note that the value is used during VM initialization and stored in various places in the JDK + * in direct or derived form. These usages will not be updated! Thus, changing + * the maximum heap size at runtime is not recommended. + */ public static void setMaximumHeapSize(UnsignedWord value) { SubstrateGCOptions.MaxHeapSize.update(value.rawValue()); } diff --git a/substratevm/src/com.oracle.svm.core.posix/src/com/oracle/svm/core/posix/darwin/PhysicalMemorySupportImpl.java b/substratevm/src/com.oracle.svm.core.posix/src/com/oracle/svm/core/posix/darwin/DarwinPhysicalMemorySupportImpl.java similarity index 97% rename from substratevm/src/com.oracle.svm.core.posix/src/com/oracle/svm/core/posix/darwin/PhysicalMemorySupportImpl.java rename to substratevm/src/com.oracle.svm.core.posix/src/com/oracle/svm/core/posix/darwin/DarwinPhysicalMemorySupportImpl.java index f7424c81e5c2..efc0ef928943 100644 --- a/substratevm/src/com.oracle.svm.core.posix/src/com/oracle/svm/core/posix/darwin/PhysicalMemorySupportImpl.java +++ b/substratevm/src/com.oracle.svm.core.posix/src/com/oracle/svm/core/posix/darwin/DarwinPhysicalMemorySupportImpl.java @@ -40,7 +40,7 @@ import com.oracle.svm.core.util.VMError; @AutomaticallyRegisteredImageSingleton(PhysicalMemorySupport.class) -class PhysicalMemorySupportImpl implements PhysicalMemorySupport { +class DarwinPhysicalMemorySupportImpl implements PhysicalMemorySupport { @Override public UnsignedWord size() { diff --git a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/Containers.java b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/Containers.java deleted file mode 100644 index c4491fae652a..000000000000 --- a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/Containers.java +++ /dev/null @@ -1,131 +0,0 @@ -/* - * Copyright (c) 2020, 2020, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. Oracle designates this - * particular file as subject to the "Classpath" exception as provided - * by Oracle in the LICENSE file that accompanied this code. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ -package com.oracle.svm.core; - -import static com.oracle.svm.core.Containers.Options.UseContainerSupport; - -import jdk.graal.compiler.options.Option; -import org.graalvm.nativeimage.Platform; - -import com.oracle.svm.core.jdk.Jvm; -import com.oracle.svm.core.option.HostedOptionKey; -import com.oracle.svm.core.util.VMError; - -import jdk.internal.platform.Container; -import jdk.internal.platform.Metrics; - -/** - * Provides container awareness to the rest of the VM. - * - * The implementation is based on the Container Metrics API from JDK 17. - */ -public class Containers { - - public static class Options { - @Option(help = "Enable detection and runtime container configuration support.")// - public static final HostedOptionKey UseContainerSupport = new HostedOptionKey<>(true); - } - - /** - * Calculates an appropriate number of active processors for the VM to use. The calculation is - * based on these two inputs: - *
    - *
  • cpu affinity - *
  • cpu quota & cpu period - *
- * - * @return number of CPUs - */ - public static int activeProcessorCount() { - /*- - * Algorithm (adapted from `src/hotspot/os/linux/cgroupSubsystem_linux.cpp`): - * - * Determine the number of available CPUs from sched_getaffinity. - * - * If user specified a quota (quota != -1), calculate the number of - * required CPUs by dividing quota by period. - * - * All results of division are rounded up to the next whole number. - * - * If quotas have not been specified, return the - * number of active processors in the system. - * - * If quotas have been specified, the resulting number - * returned will never exceed the number of active processors. - */ - int cpuCount = Jvm.JVM_ActiveProcessorCount(); - - int limitCount = cpuCount; - if (UseContainerSupport.getValue() && Platform.includedIn(Platform.LINUX.class)) { - Metrics metrics = Container.metrics(); - if (metrics != null) { - long quota = metrics.getCpuQuota(); - long period = metrics.getCpuPeriod(); - - int quotaCount = 0; - if (quota > -1 && period > 0) { - quotaCount = (int) Math.ceil(((double) quota) / period); - } - - /* Use quotas. */ - if (quotaCount != 0) { - limitCount = quotaCount; - } - } - } - - return Math.min(cpuCount, limitCount); - } - - /** - * Returns {@code true} if containerized execution was detected. - */ - public static boolean isContainerized() { - if (UseContainerSupport.getValue() && Platform.includedIn(Platform.LINUX.class)) { - return Container.metrics() != null; - } - return false; - } - - /** - * Returns the limit of available memory for this process. - * - * @return memory limit in bytes or -1 for unlimited - */ - public static long memoryLimitInBytes() { - if (UseContainerSupport.getValue() && Platform.includedIn(Platform.LINUX.class)) { - Metrics metrics; - try { - metrics = Container.metrics(); - } catch (StackOverflowError e) { - throw VMError.shouldNotReachHere("Could not get container metrics, likely due to using NIO in the container code of the JDK (JDK-8309191).", e); - } - if (metrics != null) { - return metrics.getMemoryLimit(); - } - } - return -1; - } -} diff --git a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/Processor.java b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/Processor.java deleted file mode 100644 index 430cb6ef7900..000000000000 --- a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/Processor.java +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Copyright (c) 2023, 2023, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. Oracle designates this - * particular file as subject to the "Classpath" exception as provided - * by Oracle in the LICENSE file that accompanied this code. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ -package com.oracle.svm.core; - -import org.graalvm.nativeimage.ImageSingletons; - -import com.oracle.svm.core.feature.AutomaticallyRegisteredImageSingleton; -import com.oracle.svm.core.util.VMError; - -import jdk.graal.compiler.api.replacements.Fold; - -@AutomaticallyRegisteredImageSingleton -public class Processor { - private int lastQueriedActiveProcessorCount = -1; - - @Fold - public static Processor singleton() { - return ImageSingletons.lookup(Processor.class); - } - - public int getActiveProcessorCount() { - VMError.guarantee(!SubstrateUtil.HOSTED, "must not be executed during the image build"); - - int result = getActiveProcessorCount0(); - lastQueriedActiveProcessorCount = result; - return result; - } - - private static int getActiveProcessorCount0() { - int optionValue = SubstrateOptions.ActiveProcessorCount.getValue(); - if (optionValue > 0) { - return optionValue; - } - - return Containers.activeProcessorCount(); - } - - public int getLastQueriedActiveProcessorCount() { - return lastQueriedActiveProcessorCount; - } -} diff --git a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/SubstrateDiagnostics.java b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/SubstrateDiagnostics.java index 822e8fa90410..a416839fac73 100644 --- a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/SubstrateDiagnostics.java +++ b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/SubstrateDiagnostics.java @@ -30,6 +30,7 @@ import java.util.ArrayList; import java.util.Arrays; +import com.oracle.svm.core.container.OperatingSystem; import org.graalvm.collections.EconomicMap; import org.graalvm.nativeimage.CurrentIsolate; import org.graalvm.nativeimage.ImageSingletons; @@ -55,17 +56,16 @@ import com.oracle.svm.core.code.RuntimeCodeInfoHistory; import com.oracle.svm.core.code.RuntimeCodeInfoMemory; import com.oracle.svm.core.config.ConfigurationValues; +import com.oracle.svm.core.container.Container; import com.oracle.svm.core.deopt.DeoptimizationSupport; import com.oracle.svm.core.deopt.Deoptimizer; import com.oracle.svm.core.feature.AutomaticallyRegisteredImageSingleton; import com.oracle.svm.core.graal.RuntimeCompilation; import com.oracle.svm.core.graal.stackvalue.UnsafeStackValue; import com.oracle.svm.core.heap.Heap; -import com.oracle.svm.core.heap.PhysicalMemory; import com.oracle.svm.core.heap.RestrictHeapAccess; import com.oracle.svm.core.hub.DynamicHub; import com.oracle.svm.core.hub.LayoutEncoding; -import com.oracle.svm.core.jdk.Jvm; import com.oracle.svm.core.jdk.UninterruptibleUtils; import com.oracle.svm.core.jdk.UninterruptibleUtils.AtomicWord; import com.oracle.svm.core.locks.VMLockSupport; @@ -506,6 +506,11 @@ private static boolean pointsIntoNativeImageCode(CodePointer possibleIp) { return CodeInfoTable.lookupCodeInfo(possibleIp).isNonNull(); } + private static boolean isContainerized() { + boolean allowInit = !SubstrateOptions.AsyncSignalSafeDiagnostics.getValue(); + return Container.singleton().isContainerized(allowInit); + } + public static class FatalErrorState { AtomicWord diagnosticThread; volatile int diagnosticThunkIndex; @@ -855,7 +860,7 @@ public void printDiagnostics(Log log, ErrorContext context, int maxDiagnosticLev Platform platform = ImageSingletons.lookup(Platform.class); log.string("Platform: ").string(platform.getOS()).string("/").string(platform.getArchitecture()).newline(); log.string("Page size: ").unsigned(SubstrateOptions.getPageSize()).newline(); - log.string("Container support: ").bool(Containers.Options.UseContainerSupport.getValue()).newline(); + log.string("Containerized: ").bool(isContainerized()).newline(); log.string("CPU features used for AOT compiled code: ").string(getBuildTimeCpuFeatures()).newline(); log.indent(false); } @@ -877,24 +882,38 @@ public int maxInvocationCount() { public void printDiagnostics(Log log, ErrorContext context, int maxDiagnosticLevel, int invocationCount) { log.string("Runtime information:").indent(true); - int activeProcessorCount = Processor.singleton().getLastQueriedActiveProcessorCount(); - log.string("CPU cores (container): "); - if (activeProcessorCount > 0) { - log.signed(activeProcessorCount).newline(); - } else { - log.string("unknown").newline(); + if (isContainerized()) { + log.string("CPU cores (container): "); + int processorCount = getContainerActiveProcessorCount(); + if (processorCount > 0) { + log.signed(processorCount).newline(); + } else { + log.string("unknown").newline(); + } } log.string("CPU cores (OS): "); - if (!SubstrateOptions.AsyncSignalSafeDiagnostics.getValue() && SubstrateOptions.JNI.getValue()) { - log.signed(Jvm.JVM_ActiveProcessorCount()).newline(); + int processorCount = getOsProcessorCount(); + if (processorCount > 0) { + log.signed(processorCount).newline(); } else { log.string("unknown").newline(); } - log.string("Memory: "); - if (PhysicalMemory.isInitialized()) { - log.rational(PhysicalMemory.getCachedSize(), 1024 * 1024, 0).string("M").newline(); + if (isContainerized()) { + log.string("Memory (container): "); + UnsignedWord memory = getContainerPhysicalMemory(); + if (memory.aboveThan(0)) { + log.rational(memory, 1024 * 1024, 0).string("M").newline(); + } else { + log.string("unknown").newline(); + } + } + + log.string("Memory (OS): "); + UnsignedWord memory = getOsPhysicalMemorySize(); + if (memory.aboveThan(0)) { + log.rational(memory, 1024 * 1024, 0).string("M").newline(); } else { log.string("unknown").newline(); } @@ -916,6 +935,40 @@ public void printDiagnostics(Log log, ErrorContext context, int maxDiagnosticLev log.indent(false); } + + private static int getOsProcessorCount() { + if (SubstrateOptions.AsyncSignalSafeDiagnostics.getValue()) { + return OperatingSystem.singleton().getCachedActiveProcessorCount(); + } else if (SubstrateOptions.JNI.getValue()) { + return OperatingSystem.singleton().getActiveProcessorCount(); + } else { + return -1; + } + } + + private static UnsignedWord getOsPhysicalMemorySize() { + if (SubstrateOptions.AsyncSignalSafeDiagnostics.getValue()) { + return OperatingSystem.singleton().getCachedPhysicalMemorySize(); + } else { + return OperatingSystem.singleton().getPhysicalMemorySize(); + } + } + + private static int getContainerActiveProcessorCount() { + if (SubstrateOptions.AsyncSignalSafeDiagnostics.getValue()) { + return Container.singleton().getCachedActiveProcessorCount(); + } else { + return Container.singleton().getActiveProcessorCount(); + } + } + + private static UnsignedWord getContainerPhysicalMemory() { + if (SubstrateOptions.AsyncSignalSafeDiagnostics.getValue()) { + return Container.singleton().getCachedPhysicalMemory(); + } else { + return Container.singleton().getPhysicalMemory(); + } + } } private static class DumpCounters extends DiagnosticThunk { diff --git a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/SubstrateOptions.java b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/SubstrateOptions.java index 17b06f2bc1e7..35d6ea4eaaed 100644 --- a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/SubstrateOptions.java +++ b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/SubstrateOptions.java @@ -24,7 +24,6 @@ */ package com.oracle.svm.core; -import static com.oracle.svm.core.Containers.Options.UseContainerSupport; import static com.oracle.svm.core.option.RuntimeOptionKey.RuntimeOptionKeyFlag.Immutable; import static com.oracle.svm.core.option.RuntimeOptionKey.RuntimeOptionKeyFlag.RelevantForCompilationIsolates; import static jdk.graal.compiler.core.common.SpectrePHTMitigations.None; @@ -530,6 +529,9 @@ protected void onValueUpdate(EconomicMap, Object> values, Boolean o @Option(help = "Physical memory size (in bytes). By default, the value is queried from the OS/container during VM startup.", type = OptionType.Expert)// public static final RuntimeOptionKey MaxRAM = new RuntimeOptionKey<>(0L, Immutable); + @Option(help = "Enable detection and runtime container configuration support.")// + public static final HostedOptionKey UseContainerSupport = new HostedOptionKey<>(true); + @Option(help = "The size of each thread stack at run-time, in bytes.", type = OptionType.User)// public static final RuntimeOptionKey StackSize = new RuntimeOptionKey<>(0L); diff --git a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/container/Container.java b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/container/Container.java new file mode 100644 index 000000000000..15d2b0c74f96 --- /dev/null +++ b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/container/Container.java @@ -0,0 +1,190 @@ +/* + * Copyright (c) 2024, 2024, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.svm.core.container; + +import static com.oracle.svm.core.Uninterruptible.CALLED_FROM_UNINTERRUPTIBLE_CODE; + +import org.graalvm.nativeimage.ImageSingletons; +import org.graalvm.nativeimage.Platform; +import org.graalvm.nativeimage.Platforms; +import org.graalvm.word.LocationIdentity; +import org.graalvm.word.Pointer; +import org.graalvm.word.UnsignedWord; +import org.graalvm.word.WordFactory; + +import com.oracle.svm.core.SubstrateOptions; +import com.oracle.svm.core.Uninterruptible; +import com.oracle.svm.core.c.CGlobalData; +import com.oracle.svm.core.c.CGlobalDataFactory; +import com.oracle.svm.core.feature.AutomaticallyRegisteredImageSingleton; +import com.oracle.svm.core.util.VMError; + +import jdk.graal.compiler.api.replacements.Fold; +import jdk.graal.compiler.nodes.PauseNode; + +/** Provides container awareness to the rest of the VM. */ +@AutomaticallyRegisteredImageSingleton +public class Container { + /* The C++ library is shared between multiple isolates. */ + private static final CGlobalData STATE = CGlobalDataFactory.createWord(); + private static final int CACHE_MS = 20; + + private long activeProcessorCountTimeoutMs; + private int cachedActiveProcessorCount; + + private long physicalMemoryTimeoutMs; + private UnsignedWord cachedPhysicalMemorySize; + private long memoryLimitInBytesTimeoutMs; + private long cachedMemoryLimitInBytes; + + @Platforms(Platform.HOSTED_ONLY.class) + Container() { + } + + @Fold + public static boolean isSupported() { + return SubstrateOptions.UseContainerSupport.getValue() && Platform.includedIn(Platform.LINUX.class); + } + + @Fold + public static Container singleton() { + return ImageSingletons.lookup(Container.class); + } + + @Uninterruptible(reason = CALLED_FROM_UNINTERRUPTIBLE_CODE, mayBeInlined = true) + public boolean isContainerized() { + return isContainerized(true); + } + + @Uninterruptible(reason = CALLED_FROM_UNINTERRUPTIBLE_CODE, mayBeInlined = true) + public boolean isContainerized(boolean allowInit) { + if (!isSupported()) { + return false; + } + + UnsignedWord value = STATE.get().readWord(0); + if (allowInit && value == State.UNINITIALIZED) { + value = initialize(); + } + + assert value == State.CONTAINERIZED || value == State.NOT_CONTAINERIZED || !allowInit; + return value == State.CONTAINERIZED; + } + + @Uninterruptible(reason = CALLED_FROM_UNINTERRUPTIBLE_CODE, mayBeInlined = true) + private static UnsignedWord initialize() { + Pointer statePtr = STATE.get(); + UnsignedWord value = statePtr.compareAndSwapWord(0, State.UNINITIALIZED, State.INITIALIZING, LocationIdentity.ANY_LOCATION); + if (value == State.UNINITIALIZED) { + value = switch (ContainerLibrary.initialize(ContainerLibrary.VERSION)) { + case ContainerLibrary.SUCCESS_IS_NOT_CONTAINERIZED: + yield State.NOT_CONTAINERIZED; + case ContainerLibrary.SUCCESS_IS_CONTAINERIZED: + yield State.CONTAINERIZED; + case ContainerLibrary.ERROR_LIBCONTAINER_TOO_OLD: + yield State.ERROR_LIBCONTAINER_TOO_OLD; + case ContainerLibrary.ERROR_LIBCONTAINER_TOO_NEW: + yield State.ERROR_LIBCONTAINER_TOO_NEW; + default: + yield State.ERROR_UNKNOWN; + }; + // write + statePtr.writeWordVolatile(0, value); + } else { + while (value == State.INITIALIZING) { + PauseNode.pause(); + value = statePtr.readWordVolatile(0, LocationIdentity.ANY_LOCATION); + } + } + VMError.guarantee(value != State.ERROR_LIBCONTAINER_TOO_OLD, "native-image tries to use a libsvm_container version that is too old"); + VMError.guarantee(value != State.ERROR_LIBCONTAINER_TOO_NEW, "native-image tries to use a libsvm_container version that is too new"); + VMError.guarantee(value == State.CONTAINERIZED || value == State.NOT_CONTAINERIZED, "unexpected libsvm_container initialize result"); + return value; + } + + @Uninterruptible(reason = CALLED_FROM_UNINTERRUPTIBLE_CODE, mayBeInlined = true) + public int getActiveProcessorCount() { + VMError.guarantee(isContainerized(false)); + + long currentMs = System.currentTimeMillis(); + if (currentMs > activeProcessorCountTimeoutMs) { + cachedActiveProcessorCount = ContainerLibrary.getActiveProcessorCount(); + activeProcessorCountTimeoutMs = currentMs + CACHE_MS; + } + return cachedActiveProcessorCount; + } + + @Uninterruptible(reason = CALLED_FROM_UNINTERRUPTIBLE_CODE, mayBeInlined = true) + public int getCachedActiveProcessorCount() { + VMError.guarantee(isContainerized(false)); + return cachedActiveProcessorCount; + } + + @Uninterruptible(reason = CALLED_FROM_UNINTERRUPTIBLE_CODE, mayBeInlined = true) + public UnsignedWord getPhysicalMemory() { + VMError.guarantee(isContainerized(false)); + + long currentMs = System.currentTimeMillis(); + if (currentMs > physicalMemoryTimeoutMs) { + cachedPhysicalMemorySize = ContainerLibrary.physicalMemory(); + physicalMemoryTimeoutMs = currentMs + CACHE_MS; + } + return cachedPhysicalMemorySize; + } + + @Uninterruptible(reason = CALLED_FROM_UNINTERRUPTIBLE_CODE, mayBeInlined = true) + public UnsignedWord getCachedPhysicalMemory() { + VMError.guarantee(isContainerized(false)); + return cachedPhysicalMemorySize; + } + + @Uninterruptible(reason = CALLED_FROM_UNINTERRUPTIBLE_CODE, mayBeInlined = true) + public long getMemoryLimitInBytes() { + VMError.guarantee(isContainerized(false)); + + long currentMs = System.currentTimeMillis(); + if (currentMs > memoryLimitInBytesTimeoutMs) { + cachedMemoryLimitInBytes = ContainerLibrary.getMemoryLimitInBytes(); + memoryLimitInBytesTimeoutMs = currentMs + CACHE_MS; + } + return cachedMemoryLimitInBytes; + } + + @Uninterruptible(reason = CALLED_FROM_UNINTERRUPTIBLE_CODE, mayBeInlined = true) + public long getCachedMemoryLimitInBytes() { + VMError.guarantee(isContainerized(false)); + return cachedMemoryLimitInBytes; + } + + private static class State { + static final UnsignedWord UNINITIALIZED = WordFactory.unsigned(0); + static final UnsignedWord INITIALIZING = WordFactory.unsigned(1); + static final UnsignedWord NOT_CONTAINERIZED = WordFactory.unsigned(2); + static final UnsignedWord CONTAINERIZED = WordFactory.unsigned(3); + static final UnsignedWord ERROR_LIBCONTAINER_TOO_OLD = WordFactory.unsigned(4); + static final UnsignedWord ERROR_LIBCONTAINER_TOO_NEW = WordFactory.unsigned(5); + static final UnsignedWord ERROR_UNKNOWN = WordFactory.unsigned(6); + } +} diff --git a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/container/ContainerLibrary.java b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/container/ContainerLibrary.java new file mode 100644 index 000000000000..b08c0b42582a --- /dev/null +++ b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/container/ContainerLibrary.java @@ -0,0 +1,106 @@ +/* + * Copyright (c) 2024, 2024, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.svm.core.container; + +import org.graalvm.nativeimage.Platform; +import org.graalvm.nativeimage.Platforms; +import org.graalvm.nativeimage.c.CContext; +import org.graalvm.nativeimage.c.function.CFunction; +import org.graalvm.nativeimage.c.function.CFunction.Transition; +import org.graalvm.nativeimage.c.function.CLibrary; +import org.graalvm.word.UnsignedWord; + +import com.oracle.svm.core.SubstrateOptions; + +@CContext(ContainerLibraryDirectives.class) +@CLibrary(value = "svm_container", requireStatic = true, dependsOn = "m") +class ContainerLibrary { + static final int VERSION = 240100; + + // keep in sync with svm_container.hpp + static final int SUCCESS_IS_NOT_CONTAINERIZED = 0; + static final int SUCCESS_IS_CONTAINERIZED = 1; + static final int ERROR_LIBCONTAINER_TOO_OLD = 2; + static final int ERROR_LIBCONTAINER_TOO_NEW = 3; + + /** + * Initializes the native container library. + * + * @param version should always be called with {@link #VERSION} + * @return {@link #SUCCESS_IS_CONTAINERIZED}, if native image runs in a container, + * {@link #SUCCESS_IS_NOT_CONTAINERIZED} if not. If the native library version does not + * match {@link #VERSION}, either {@link #ERROR_LIBCONTAINER_TOO_OLD} or + * {@link #ERROR_LIBCONTAINER_TOO_NEW} is returned. + */ + @CFunction(value = "svm_container_initialize", transition = Transition.NO_TRANSITION) + public static native int initialize(int version); + + @CFunction(value = "svm_container_physical_memory", transition = Transition.NO_TRANSITION) + public static native UnsignedWord physicalMemory(); + + @CFunction(value = "svm_container_memory_limit_in_bytes", transition = Transition.NO_TRANSITION) + public static native long getMemoryLimitInBytes(); + + @CFunction(value = "svm_container_memory_and_swap_limit_in_bytes", transition = Transition.NO_TRANSITION) + public static native long getMemoryAndSwapLimitInBytes(); + + @CFunction(value = "svm_container_memory_soft_limit_in_bytes", transition = Transition.NO_TRANSITION) + public static native long getMemorySoftLimitInBytes(); + + @CFunction(value = "svm_container_memory_usage_in_bytes", transition = Transition.NO_TRANSITION) + public static native long getMemoryUsageInBytes(); + + @CFunction(value = "svm_container_memory_max_usage_in_bytes", transition = Transition.NO_TRANSITION) + public static native long getMemoryMaxUsageInBytes(); + + @CFunction(value = "svm_container_rss_usage_in_bytes", transition = Transition.NO_TRANSITION) + public static native long getRssUsageInBytes(); + + @CFunction(value = "svm_container_cache_usage_in_bytes", transition = Transition.NO_TRANSITION) + public static native long getCacheUsageInBytes(); + + @CFunction(value = "svm_container_active_processor_count", transition = Transition.NO_TRANSITION) + public static native int getActiveProcessorCount(); +} + +@Platforms(Platform.HOSTED_ONLY.class) +class ContainerLibraryDirectives implements CContext.Directives { + /** + * True if the {@link ContainerLibrary} should be linked or not. + * + * Note that although this method returns {@code true} only if + * {@linkplain SubstrateOptions#useSerialGC() serial GC} or + * {@linkplain SubstrateOptions#useEpsilonGC() epsilon GC} is enabled, the {@link CFunction}s + * defined in {@link ContainerLibrary} are always registered and can be called even if this + * method returns {@code false}. Other GCs can provide alternative implementations themselves, + * or manually link against the native {@code + * svm_container} library, e.g., by calling + * {@code com.oracle.svm.hosted.c.NativeLibraries#addStaticJniLibrary}. + */ + @Override + public boolean isInConfiguration() { + return Container.isSupported() && (SubstrateOptions.useSerialGC() || SubstrateOptions.useEpsilonGC()); + } +} diff --git a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/container/OperatingSystem.java b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/container/OperatingSystem.java new file mode 100644 index 000000000000..e7c871de5799 --- /dev/null +++ b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/container/OperatingSystem.java @@ -0,0 +1,80 @@ +/* + * Copyright (c) 2024, 2024, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.svm.core.container; + +import com.oracle.svm.core.Uninterruptible; +import org.graalvm.nativeimage.ImageSingletons; +import org.graalvm.nativeimage.Platform; +import org.graalvm.nativeimage.Platforms; +import org.graalvm.word.UnsignedWord; + +import com.oracle.svm.core.feature.AutomaticallyRegisteredImageSingleton; +import com.oracle.svm.core.heap.PhysicalMemory; +import com.oracle.svm.core.jdk.Jvm; + +import jdk.graal.compiler.api.replacements.Fold; + +import static com.oracle.svm.core.Uninterruptible.CALLED_FROM_UNINTERRUPTIBLE_CODE; + +/** + * Query information about the machine that executes the native executable. This class bypasses the + * container support (see {@link Container}) and directly queries the information from the operating + * system. + */ +@AutomaticallyRegisteredImageSingleton +public class OperatingSystem { + private int cachedActiveProcessorCount; + private UnsignedWord cachedPhysicalMemorySize; + + @Platforms(Platform.HOSTED_ONLY.class) + OperatingSystem() { + } + + @Fold + public static OperatingSystem singleton() { + return ImageSingletons.lookup(OperatingSystem.class); + } + + public int getActiveProcessorCount() { + int value = Jvm.JVM_ActiveProcessorCount(); + cachedActiveProcessorCount = value; + return value; + } + + public int getCachedActiveProcessorCount() { + return cachedActiveProcessorCount; + } + + public UnsignedWord getPhysicalMemorySize() { + UnsignedWord value = ImageSingletons.lookup(PhysicalMemory.PhysicalMemorySupport.class).size(); + cachedPhysicalMemorySize = value; + return value; + } + + @Uninterruptible(reason = CALLED_FROM_UNINTERRUPTIBLE_CODE, mayBeInlined = true) + public UnsignedWord getCachedPhysicalMemorySize() { + return cachedPhysicalMemorySize; + } +} diff --git a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/heap/PhysicalMemory.java b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/heap/PhysicalMemory.java index 4797e5cff21f..020fb0b35d67 100644 --- a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/heap/PhysicalMemory.java +++ b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/heap/PhysicalMemory.java @@ -31,20 +31,16 @@ import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.List; -import java.util.concurrent.locks.ReentrantLock; -import org.graalvm.nativeimage.ImageSingletons; import org.graalvm.nativeimage.Platform; import org.graalvm.word.UnsignedWord; import org.graalvm.word.WordFactory; -import com.oracle.svm.core.Containers; import com.oracle.svm.core.SubstrateOptions; import com.oracle.svm.core.Uninterruptible; +import com.oracle.svm.core.container.Container; +import com.oracle.svm.core.container.OperatingSystem; import com.oracle.svm.core.layeredimagesingleton.RuntimeOnlyImageSingleton; -import com.oracle.svm.core.stack.StackOverflowCheck; -import com.oracle.svm.core.thread.PlatformThreads; -import com.oracle.svm.core.thread.VMOperation; import com.oracle.svm.core.util.UnsignedUtils; import com.oracle.svm.core.util.VMError; import com.sun.management.OperatingSystemMXBean; @@ -62,7 +58,6 @@ public interface PhysicalMemorySupport extends RuntimeOnlyImageSingleton { private static final long K = 1024; - private static final ReentrantLock LOCK = new ReentrantLock(); private static final UnsignedWord UNSET_SENTINEL = UnsignedUtils.MAX_VALUE; private static UnsignedWord cachedSize = UNSET_SENTINEL; @@ -71,10 +66,6 @@ public static boolean isInitialized() { return cachedSize != UNSET_SENTINEL; } - public static boolean isInitializationInProgress() { - return LOCK.isHeldByCurrentThread(); - } - @Uninterruptible(reason = "May only be called during early startup.") public static void setSize(UnsignedWord value) { VMError.guarantee(!isInitialized(), "PhysicalMemorySize must not be initialized yet."); @@ -89,30 +80,14 @@ public static void setSize(UnsignedWord value) { * a VMOperation or during early stages of a thread or isolate. */ public static UnsignedWord size() { - if (isInitializationDisallowed()) { - /* - * Note that we want to have this safety check even when the cache is already - * initialized, so that we always detect wrong usages that could lead to problems. - */ - throw VMError.shouldNotReachHere("Accessing the physical memory size may require allocation and synchronization"); - } - if (!isInitialized()) { long memoryLimit = SubstrateOptions.MaxRAM.getValue(); if (memoryLimit > 0) { cachedSize = WordFactory.unsigned(memoryLimit); + } else if (Container.singleton().isContainerized()) { + cachedSize = Container.singleton().getPhysicalMemory(); } else { - LOCK.lock(); - try { - if (!isInitialized()) { - memoryLimit = Containers.memoryLimitInBytes(); - cachedSize = memoryLimit > 0 - ? WordFactory.unsigned(memoryLimit) - : ImageSingletons.lookup(PhysicalMemorySupport.class).size(); - } - } finally { - LOCK.unlock(); - } + cachedSize = OperatingSystem.singleton().getPhysicalMemorySize(); } } @@ -124,7 +99,7 @@ public static long usedSize() { // Windows, macOS, and containerized Linux use the OS bean. if (Platform.includedIn(Platform.WINDOWS.class) || Platform.includedIn(Platform.MACOS.class) || - (Containers.isContainerized() && Containers.memoryLimitInBytes() > 0)) { + (Container.singleton().isContainerized() && Container.singleton().getMemoryLimitInBytes() > 0)) { OperatingSystemMXBean osBean = (com.sun.management.OperatingSystemMXBean) ManagementFactory.getOperatingSystemMXBean(); return osBean.getTotalMemorySize() - osBean.getFreeMemorySize(); } @@ -195,8 +170,4 @@ public static UnsignedWord getCachedSize() { VMError.guarantee(isInitialized(), "Cached physical memory size is not available"); return cachedSize; } - - private static boolean isInitializationDisallowed() { - return Heap.getHeap().isAllocationDisallowed() || VMOperation.isInProgress() || !PlatformThreads.isCurrentAssigned() || StackOverflowCheck.singleton().isYellowZoneAvailable(); - } } diff --git a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/jdk/JavaLangSubstitutions.java b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/jdk/JavaLangSubstitutions.java index ee160e3b8b03..511e8fb2f5b1 100644 --- a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/jdk/JavaLangSubstitutions.java +++ b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/jdk/JavaLangSubstitutions.java @@ -50,7 +50,7 @@ import org.graalvm.nativeimage.impl.InternalPlatform; import com.oracle.svm.core.NeverInline; -import com.oracle.svm.core.Processor; +import com.oracle.svm.core.SubstrateOptions; import com.oracle.svm.core.SubstrateUtil; import com.oracle.svm.core.Uninterruptible; import com.oracle.svm.core.annotate.Alias; @@ -62,6 +62,8 @@ import com.oracle.svm.core.annotate.Substitute; import com.oracle.svm.core.annotate.TargetClass; import com.oracle.svm.core.annotate.TargetElement; +import com.oracle.svm.core.container.Container; +import com.oracle.svm.core.container.OperatingSystem; import com.oracle.svm.core.fieldvaluetransformer.FieldValueTransformerWithAvailability; import com.oracle.svm.core.hub.ClassForNameSupport; import com.oracle.svm.core.hub.DynamicHub; @@ -339,7 +341,15 @@ public void runFinalization() { @Substitute @Platforms(InternalPlatform.PLATFORM_JNI.class) private int availableProcessors() { - return Processor.singleton().getActiveProcessorCount(); + int optionValue = SubstrateOptions.ActiveProcessorCount.getValue(); + if (optionValue > 0) { + return optionValue; + } + + if (Container.singleton().isContainerized()) { + return Container.singleton().getActiveProcessorCount(); + } + return OperatingSystem.singleton().getActiveProcessorCount(); } } diff --git a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/jdk/Target_java_nio_Bits.java b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/jdk/Target_java_nio_Bits.java index 8f7ef8445213..9cfc3b9fbb85 100644 --- a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/jdk/Target_java_nio_Bits.java +++ b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/jdk/Target_java_nio_Bits.java @@ -31,7 +31,6 @@ import com.oracle.svm.core.annotate.RecomputeFieldValue; import com.oracle.svm.core.annotate.RecomputeFieldValue.Kind; import com.oracle.svm.core.annotate.TargetClass; -import com.oracle.svm.core.heap.PhysicalMemory; @TargetClass(className = "java.nio.Bits") final class Target_java_nio_Bits { @@ -59,8 +58,8 @@ final class Target_java_nio_Bits { /** * {@code java.nio.Bits} caches the max. direct memory size in the field {@code MAX_MEMORY}. We * disable this cache and always call {@link DirectMemoryAccessors#getDirectMemory()} instead, which - * uses our own cache. Otherwise, it could happen that {@code MAX_MEMORY} caches a temporary value - * that is used during early VM startup, before {@link PhysicalMemory} is fully initialized. + * uses our own caching logic. Otherwise, it could happen that {@code MAX_MEMORY} caches an outdated + * value (with the serial GC, the max. heap size can change at run-time). */ final class MaxMemoryAccessor { // Checkstyle: stop diff --git a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/jdk/Target_jdk_internal_misc_VM.java b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/jdk/Target_jdk_internal_misc_VM.java index fa2e5e45ba14..ae7b5fee71f8 100644 --- a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/jdk/Target_jdk_internal_misc_VM.java +++ b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/jdk/Target_jdk_internal_misc_VM.java @@ -35,7 +35,6 @@ import com.oracle.svm.core.annotate.RecomputeFieldValue.Kind; import com.oracle.svm.core.annotate.Substitute; import com.oracle.svm.core.annotate.TargetClass; -import com.oracle.svm.core.heap.PhysicalMemory; import com.oracle.svm.core.snippets.KnownIntrinsics; import jdk.internal.misc.Unsafe; @@ -73,8 +72,6 @@ public static ClassLoader latestUserDefinedLoader0() { } final class DirectMemoryAccessors { - private static final long DIRECT_MEMORY_DURING_INITIALIZATION = 25 * 1024 * 1024; - /* * Not volatile to avoid a memory barrier when reading the values. Instead, an explicit barrier * is inserted when writing the values. @@ -102,15 +99,6 @@ private static long tryInitialize() { * No value explicitly specified. The default in the JDK in this case is the maximum * heap size. */ - if (PhysicalMemory.isInitializationInProgress()) { - /* - * When initializing PhysicalMemory, we use NIO/cgroups code that calls - * VM.getDirectMemory(). When this initialization is in progress, we need to prevent - * that Runtime.maxMemory() is called below because it would trigger a recursive - * initialization of PhysicalMemory. So, we return a temporary value. - */ - return DIRECT_MEMORY_DURING_INITIALIZATION; - } newDirectMemory = Runtime.getRuntime().maxMemory(); } diff --git a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/jdk/Target_jdk_internal_platform_CgroupMetrics.java b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/jdk/Target_jdk_internal_platform_CgroupMetrics.java index 134d4b8a8eb5..fe9e2184a5a1 100644 --- a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/jdk/Target_jdk_internal_platform_CgroupMetrics.java +++ b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/jdk/Target_jdk_internal_platform_CgroupMetrics.java @@ -24,12 +24,9 @@ */ package com.oracle.svm.core.jdk; -import static com.oracle.svm.core.Containers.Options.UseContainerSupport; - -import org.graalvm.nativeimage.Platform; - import com.oracle.svm.core.annotate.Substitute; import com.oracle.svm.core.annotate.TargetClass; +import com.oracle.svm.core.container.Container; @TargetClass(className = "jdk.internal.platform.CgroupMetrics", onlyWith = PlatformHasClass.class) final class Target_jdk_internal_platform_CgroupMetrics { @@ -40,6 +37,6 @@ public static boolean isUseContainerSupport() { * i.e., only relies on hosted options and other conditions that are constant. Inlining * before analysis ensures that the constant is propagated out to call sites. */ - return UseContainerSupport.getValue() && Platform.includedIn(Platform.LINUX.class); + return Container.isSupported(); } } diff --git a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/jfr/Target_jdk_jfr_internal_JVM.java b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/jfr/Target_jdk_jfr_internal_JVM.java index 6834024ea13d..eae3988f906a 100644 --- a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/jfr/Target_jdk_jfr_internal_JVM.java +++ b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/jfr/Target_jdk_jfr_internal_JVM.java @@ -27,19 +27,18 @@ import java.util.List; import java.util.function.BooleanSupplier; -import org.graalvm.nativeimage.ImageSingletons; import org.graalvm.nativeimage.Platform; import org.graalvm.nativeimage.Platforms; import org.graalvm.nativeimage.ProcessProperties; -import com.oracle.svm.core.Containers; import com.oracle.svm.core.Uninterruptible; import com.oracle.svm.core.annotate.Alias; import com.oracle.svm.core.annotate.RecomputeFieldValue; import com.oracle.svm.core.annotate.Substitute; import com.oracle.svm.core.annotate.TargetClass; import com.oracle.svm.core.annotate.TargetElement; -import com.oracle.svm.core.heap.PhysicalMemory.PhysicalMemorySupport; +import com.oracle.svm.core.container.Container; +import com.oracle.svm.core.container.OperatingSystem; import com.oracle.svm.core.jdk.JDKLatest; import com.oracle.svm.core.jfr.traceid.JfrTraceId; import com.oracle.svm.core.util.PlatformTimeUtils; @@ -530,15 +529,17 @@ public static long getTypeId(String name) { @Substitute @TargetElement(onlyWith = JDKLatest.class) // public static boolean isContainerized() { - return Containers.isContainerized(); + return Container.singleton().isContainerized(); } @Substitute @TargetElement(onlyWith = JDKLatest.class) // public static long hostTotalMemory() { - // This is intentionally using PhysicalMemorySupport since we are - // interested in the host values (and not the containerized values). - return ImageSingletons.lookup(PhysicalMemorySupport.class).size().rawValue(); + /* + * This is unconditionally using Machine#getPhysicalMemorySize since we are interested in + * the host values (and not the containerized values). + */ + return OperatingSystem.singleton().getPhysicalMemorySize().rawValue(); } @Substitute diff --git a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/jfr/Target_jdk_jfr_internal_JVM_JDK21.java b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/jfr/Target_jdk_jfr_internal_JVM_JDK21.java index 585a31f45409..f7f47ef66c78 100644 --- a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/jfr/Target_jdk_jfr_internal_JVM_JDK21.java +++ b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/jfr/Target_jdk_jfr_internal_JVM_JDK21.java @@ -30,12 +30,12 @@ import org.graalvm.nativeimage.ImageSingletons; import org.graalvm.nativeimage.ProcessProperties; -import com.oracle.svm.core.Containers; import com.oracle.svm.core.Uninterruptible; import com.oracle.svm.core.annotate.Alias; import com.oracle.svm.core.annotate.RecomputeFieldValue; import com.oracle.svm.core.annotate.Substitute; import com.oracle.svm.core.annotate.TargetClass; +import com.oracle.svm.core.container.Container; import com.oracle.svm.core.jdk.JDK21OrEarlier; import com.oracle.svm.core.jfr.traceid.JfrTraceId; @@ -358,7 +358,7 @@ public long getTypeId(String name) { @Substitute public boolean isContainerized() { - return Containers.isContainerized(); + return Container.singleton().isContainerized(); } @Substitute diff --git a/substratevm/src/com.oracle.svm.native.libcontainer/README.md b/substratevm/src/com.oracle.svm.native.libcontainer/README.md new file mode 100644 index 000000000000..824c436f2c68 --- /dev/null +++ b/substratevm/src/com.oracle.svm.native.libcontainer/README.md @@ -0,0 +1,71 @@ +# Native cgroup support for SVM + +This contains `libsvm_container`, the native cgroup support for SVM (libsvm_container). +The C code is ported from the OpenJDK and currently based on: +https://github.com/openjdk/jdk/tree/9049402a1b9394095b04287eef1f2d46c4da60e9/src/hotspot + +## Building + +The code is built via `mx build`, so no special actions are required. There is +[`configure.py`](./configure.py), that generates a `build.ninja` that can help with local debugging, +but it is not used by `mx build` and there is no guarantee that it works correctly. Use at your own +risk. The default `mx build` command only uses settings from `suite.py`, so values in +`build.ninja` or those generated by `configure.py` might be outdated. + +## Dependencies + +For building `libsvm_container`, a C++14 compiler is required. Some parts of `libsvm_container` depend on `libm`. +At image build time, `native-image` will automatically link statically against `libsvm_container` and `libm`. +No further dependencies are needed at image build or image run time. +Specifically, `libsvm_container` does _not_ depend on any code from the C++ standard library. + +## Code Layout + +The file structure is inherited from the OpenJDK. The Files in [`src/hotspot`](./src/hotspot) are +mostly unmodified except for sections guarded with `#ifdef NATIVE_IMAGE`. The Files in +[`src/svm`](./src/svm) are replacements for files that exist in the OpenJDK, but are completely +custom. They only provide the minimal required functionality and are specific to SVM. + +## Updating + +While the code in here is completely independent and does not need to be in sync with the OpenJDK, +it should be updated regularly to profit from upstream fixes and improvements. To do so, replace +the files in [`src/hotspot`](./src/hotspot) with those from the OpenJDK. Then reapply all the +changes (`#ifdef` guards) using the diff tool of your choice. Finally, adopt the files in +[`src/svm`](./src/svm) to provide new functionality, if needed. Don't forget to update the import +revision mention in this file. + +## Local Testing + +There are various ways for running applications like native images in cgroups. +Also note that many containerization tools such as Docker use cgroups for resource constraints. + +### Using CGroup commands + +The most basic way of running a program in a cgroup is to create them manually. For example: + +```bash +# create cgroup +cgcreate -a -t -g memory,cpu:testgroup +ls -l /sys/fs/cgroup/cpu/testgroup +ls -l /sys/fs/cgroup/memory/testgroup + +# set limits +echo 10000000 > /sys/fs/cgroup/memory/testgroup/memory.limit_in_bytes +echo 12000000 > /sys/fs/cgroup/memory/testgroup/memory.soft_limit_in_bytes +echo 100 > /sys/fs/cgroup/cpu/testgroup/cpu.shares + +# run image in cgroup +cgexec -g memory,cpu:testgroup ./test + +# delete cgroup +cgdelete -g memory,cpu:testgroup +``` + +### Using systemd + +Another possibility is to run a program in a transient systemd scope with resource constraints: + +```bash +systemd-run --scope -p MemoryMax=2G --user ./test +``` diff --git a/substratevm/src/com.oracle.svm.native.libcontainer/configure.py b/substratevm/src/com.oracle.svm.native.libcontainer/configure.py new file mode 100644 index 000000000000..4c9d52e8bddb --- /dev/null +++ b/substratevm/src/com.oracle.svm.native.libcontainer/configure.py @@ -0,0 +1,163 @@ +# +# Copyright (c) 2024, 2024, Oracle and/or its affiliates. All rights reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. Oracle designates this +# particular file as subject to the "Classpath" exception as provided +# by Oracle in the LICENSE file that accompanied this code. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +# or visit www.oracle.com if you need additional information or have any +# questions. +# +import os +import pathlib +import shutil + +from pathlib import Path + +ROOTS = ['src'] +SOURCE_EXTENSIONS = ['*.c', '*.cpp'] +LIB_NAME = 'libcontainer' +CONFIGS = ['product', 'fastdebug', 'debug'] + +# Find all source files. +cur_dir = os.getcwd() +source_files = [] +for root in ROOTS: + for ext in SOURCE_EXTENSIONS: + pattern = f'**{os.sep}{ext}' + source_files += Path(root).glob(pattern) + +# Determine which libc variants are supported. +libcs = [("glibc", "g++", "ar")] +if "MUSL_TOOLCHAIN" in os.environ: + musl_gcc = os.environ["MUSL_TOOLCHAIN"] + "/bin/x86_64-linux-musl-c++" + musl_ar = os.environ["MUSL_TOOLCHAIN"] + "/bin/x86_64-linux-musl-gcc-ar" + libcs += [("musl", musl_gcc, musl_ar)] + +# Generate rules for all configs. +rules = "" +for config in CONFIGS: + # Generate rules for all libc variants. + for libc, gcc, ar in libcs: + # Generate cxx. + rules += os.linesep + rules += f"cxx_{config}_{libc} = {gcc}{os.linesep}" + + # Generate cxx rule. + for file_type in ["a", "so"]: + rules += os.linesep + rules += f"rule cxx_{config}_{file_type}_{libc}{os.linesep}" + rules += f" command = $cxx_{config}_{libc} -MMD -MT $out -MF $out.d $cflags_linux_{config}_{file_type} -c $in -o $out{os.linesep}" + rules += f" description = CXX $out{os.linesep}" + rules += f" depfile = $out.d{os.linesep}" + rules += f" deps = gcc{os.linesep}" + + # Generate ar rule. + rules += os.linesep + rules += f"ar_{config}_{libc} = {ar}{os.linesep}" + + rules += os.linesep + rules += f"rule ar_{config}_{libc}{os.linesep}" + rules += f" command = rm -f $out && $ar_{config}_{libc} crs $out $in{os.linesep}" + rules += f" description = AR $out{os.linesep}" + + # Generate rule to build source files into object files for static lib. + rules += os.linesep + rules += "# Build source files into object files." + rules += os.linesep + + for path in source_files: + rules += f'build $builddir{os.sep}{config}{os.sep}a{os.sep}{libc}{os.sep}{path.parent}{os.sep}{path.stem}.o: cxx_{config}_a_{libc} $root{os.sep}{path}{os.linesep}' + + # Generate rule to create a static lib from all the object files. + rules += os.linesep + rules += "# Create a static lib from all the object files." + rules += os.linesep + rules += f'build $builddir{os.sep}{LIB_NAME}_{config}_{libc}.a: ar_{config}_{libc}' + + for path in source_files: + rules += f' ${os.linesep}' + rules += f' $builddir{os.sep}{config}{os.sep}a{os.sep}{libc}{os.sep}{path.parent}{os.sep}{path.stem}.o' + + rules += os.linesep + + # Generate a build target for the static lib. + rules += os.linesep + rules += f'build build_{config}_{libc}_a: phony $builddir/{LIB_NAME}_{config}_{libc}.a{os.linesep}' + + # Generate link. + rules += os.linesep + rules += f"link_{config}_{libc} = {gcc}{os.linesep}" + + # Generate link rule. + rules += os.linesep + rules += f"rule link_{config}_{libc}{os.linesep}" + rules += f" command = $link_{config}_{libc} $ldflags_linux_{config} -o $out $in{os.linesep}" + rules += f" description = LINK $out{os.linesep}" + + # Generate rule to build source files into object files for shared lib. + rules += os.linesep + rules += "# Build source files into object files for shared library." + rules += os.linesep + + for path in source_files: + rules += f'build $builddir{os.sep}{config}{os.sep}so{os.sep}{libc}{os.sep}{path.parent}{os.sep}{path.stem}.o: cxx_{config}_so_{libc} $root{os.sep}{path}{os.linesep}' + + # Generate rule to create a shared lib from all the object files. + rules += os.linesep + rules += "# Create a shared lib from all the object files." + rules += os.linesep + rules += f'build $builddir{os.sep}{LIB_NAME}_{config}_{libc}.so: link_{config}_{libc}' + + for path in source_files: + rules += f' ${os.linesep}' + rules += f' $builddir{os.sep}{config}{os.sep}so{os.sep}{libc}{os.sep}{path.parent}{os.sep}{path.stem}.o' + + rules += os.linesep + + # Generate a build target for the shared lib. + rules += os.linesep + rules += f'build build_{config}_{libc}_so: phony $builddir{os.sep}{LIB_NAME}_{config}_{libc}.so{os.linesep}' + + # Generate config-specific build targets. + rules += os.linesep + rules += "# Build targets." + rules += os.linesep + for file_type in ["a", "so"]: + rules += f"build build_{config}_{file_type}: phony" + for libc, _, _ in libcs: + rules += f" build_{config}_{libc}_{file_type}" + rules += os.linesep + + rules += f"build build_{config}: phony" + for file_type in ["a", "so"]: + rules += f" build_{config}_{file_type}" + rules += os.linesep + +# Generate target that builds all libs. +rules += os.linesep +rules += f"build build_all: phony" +for config in CONFIGS: + rules += f" build_{config}" +rules += os.linesep +rules += os.linesep +rules += f"default build_all{os.linesep}" + +# Append the rules to the ninja template. +shutil.copyfile('ninja.template', 'build.ninja') +with open('build.ninja', 'a') as file: + file.write(rules) diff --git a/substratevm/src/com.oracle.svm.native.libcontainer/ninja.template b/substratevm/src/com.oracle.svm.native.libcontainer/ninja.template new file mode 100644 index 000000000000..386a984d6437 --- /dev/null +++ b/substratevm/src/com.oracle.svm.native.libcontainer/ninja.template @@ -0,0 +1,48 @@ +# Copyright (c) 2024, 2024, Oracle and/or its affiliates. All rights reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. Oracle designates this +# particular file as subject to the "Classpath" exception as provided +# by Oracle in the LICENSE file that accompanied this code. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +# or visit www.oracle.com if you need additional information or have any +# questions. +# + +root = . +builddir = build + +includes = -Isrc/hotspot -Isrc/hotspot/share -Isrc/hotspot/svm -Isrc/hotspot/svm/share +includes_linux = $includes -Isrc/hotspot/os/linux -Isrc/hotspot/os/posix -Isrc/hotspot/os/posix/include +defines_linux = -DNATIVE_IMAGE -DLINUX -DINCLUDE_SUFFIX_COMPILER=_gcc -D__STDC_FORMAT_MACROS -D__STDC_LIMIT_MACROS -D__STDC_CONSTANT_MACROS + +cflags_linux = $defines_linux -std=c++14 $includes_linux -fno-rtti -fno-exceptions -fvisibility=hidden -fPIC +cflags_linux_product = $cflags_linux -O2 +cflags_linux_fastdebug = $cflags_linux -O2 -g -DASSERT -DPRINT_WARNINGS +cflags_linux_debug = $cflags_linux -O0 -g -DASSERT -DPRINT_WARNINGS + +cflags_linux_product_a = $cflags_linux_product +cflags_linux_fastdebug_a = $cflags_linux_fastdebug +cflags_linux_debug_a = $cflags_linux_debug + +cflags_linux_product_so = $cflags_linux_product +cflags_linux_fastdebug_so = $cflags_linux_fastdebug +cflags_linux_debug_so = $cflags_linux_debug + +ldflags_linux = -shared -Wl,-z,noexecstack +ldflags_linux_product = $ldflags_linux +ldflags_linux_fastdebug = $ldflags_linux -g +ldflags_linux_debug = $ldflags_linux -g diff --git a/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/jni.h b/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/jni.h new file mode 100644 index 000000000000..c85da1bc67f2 --- /dev/null +++ b/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/jni.h @@ -0,0 +1,2001 @@ +/* + * Copyright (c) 1996, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * We used part of Netscape's Java Runtime Interface (JRI) as the starting + * point of our design and implementation. + */ + +/****************************************************************************** + * Java Runtime Interface + * Copyright (c) 1996 Netscape Communications Corporation. All rights reserved. + *****************************************************************************/ + +#ifndef _JAVASOFT_JNI_H_ +#define _JAVASOFT_JNI_H_ + +#include +#include + +/* jni_md.h contains the machine-dependent typedefs for jbyte, jint + and jlong */ + +#include "jni_md.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * JNI Types + */ + +#ifndef JNI_TYPES_ALREADY_DEFINED_IN_JNI_MD_H + +typedef unsigned char jboolean; +typedef unsigned short jchar; +typedef short jshort; +typedef float jfloat; +typedef double jdouble; + +typedef jint jsize; + +#ifdef __cplusplus + +class _jobject {}; +class _jclass : public _jobject {}; +class _jthrowable : public _jobject {}; +class _jstring : public _jobject {}; +class _jarray : public _jobject {}; +class _jbooleanArray : public _jarray {}; +class _jbyteArray : public _jarray {}; +class _jcharArray : public _jarray {}; +class _jshortArray : public _jarray {}; +class _jintArray : public _jarray {}; +class _jlongArray : public _jarray {}; +class _jfloatArray : public _jarray {}; +class _jdoubleArray : public _jarray {}; +class _jobjectArray : public _jarray {}; + +typedef _jobject *jobject; +typedef _jclass *jclass; +typedef _jthrowable *jthrowable; +typedef _jstring *jstring; +typedef _jarray *jarray; +typedef _jbooleanArray *jbooleanArray; +typedef _jbyteArray *jbyteArray; +typedef _jcharArray *jcharArray; +typedef _jshortArray *jshortArray; +typedef _jintArray *jintArray; +typedef _jlongArray *jlongArray; +typedef _jfloatArray *jfloatArray; +typedef _jdoubleArray *jdoubleArray; +typedef _jobjectArray *jobjectArray; + +#else + +struct _jobject; + +typedef struct _jobject *jobject; +typedef jobject jclass; +typedef jobject jthrowable; +typedef jobject jstring; +typedef jobject jarray; +typedef jarray jbooleanArray; +typedef jarray jbyteArray; +typedef jarray jcharArray; +typedef jarray jshortArray; +typedef jarray jintArray; +typedef jarray jlongArray; +typedef jarray jfloatArray; +typedef jarray jdoubleArray; +typedef jarray jobjectArray; + +#endif + +typedef jobject jweak; + +typedef union jvalue { + jboolean z; + jbyte b; + jchar c; + jshort s; + jint i; + jlong j; + jfloat f; + jdouble d; + jobject l; +} jvalue; + +struct _jfieldID; +typedef struct _jfieldID *jfieldID; + +struct _jmethodID; +typedef struct _jmethodID *jmethodID; + +/* Return values from jobjectRefType */ +typedef enum _jobjectType { + JNIInvalidRefType = 0, + JNILocalRefType = 1, + JNIGlobalRefType = 2, + JNIWeakGlobalRefType = 3 +} jobjectRefType; + + +#endif /* JNI_TYPES_ALREADY_DEFINED_IN_JNI_MD_H */ + +/* + * jboolean constants + */ + +#define JNI_FALSE 0 +#define JNI_TRUE 1 + +/* + * possible return values for JNI functions. + */ + +#define JNI_OK 0 /* success */ +#define JNI_ERR (-1) /* unknown error */ +#define JNI_EDETACHED (-2) /* thread detached from the VM */ +#define JNI_EVERSION (-3) /* JNI version error */ +#define JNI_ENOMEM (-4) /* not enough memory */ +#define JNI_EEXIST (-5) /* VM already created */ +#define JNI_EINVAL (-6) /* invalid arguments */ + +/* + * used in ReleaseScalarArrayElements + */ + +#define JNI_COMMIT 1 +#define JNI_ABORT 2 + +/* + * used in RegisterNatives to describe native method name, signature, + * and function pointer. + */ + +typedef struct { + char *name; + char *signature; + void *fnPtr; +} JNINativeMethod; + +/* + * JNI Native Method Interface. + */ + +struct JNINativeInterface_; + +struct JNIEnv_; + +#ifdef __cplusplus +typedef JNIEnv_ JNIEnv; +#else +typedef const struct JNINativeInterface_ *JNIEnv; +#endif + +/* + * JNI Invocation Interface. + */ + +struct JNIInvokeInterface_; + +struct JavaVM_; + +#ifdef __cplusplus +typedef JavaVM_ JavaVM; +#else +typedef const struct JNIInvokeInterface_ *JavaVM; +#endif + +struct JNINativeInterface_ { + void *reserved0; + void *reserved1; + void *reserved2; + + void *reserved3; + jint (JNICALL *GetVersion)(JNIEnv *env); + + jclass (JNICALL *DefineClass) + (JNIEnv *env, const char *name, jobject loader, const jbyte *buf, + jsize len); + jclass (JNICALL *FindClass) + (JNIEnv *env, const char *name); + + jmethodID (JNICALL *FromReflectedMethod) + (JNIEnv *env, jobject method); + jfieldID (JNICALL *FromReflectedField) + (JNIEnv *env, jobject field); + + jobject (JNICALL *ToReflectedMethod) + (JNIEnv *env, jclass cls, jmethodID methodID, jboolean isStatic); + + jclass (JNICALL *GetSuperclass) + (JNIEnv *env, jclass sub); + jboolean (JNICALL *IsAssignableFrom) + (JNIEnv *env, jclass sub, jclass sup); + + jobject (JNICALL *ToReflectedField) + (JNIEnv *env, jclass cls, jfieldID fieldID, jboolean isStatic); + + jint (JNICALL *Throw) + (JNIEnv *env, jthrowable obj); + jint (JNICALL *ThrowNew) + (JNIEnv *env, jclass clazz, const char *msg); + jthrowable (JNICALL *ExceptionOccurred) + (JNIEnv *env); + void (JNICALL *ExceptionDescribe) + (JNIEnv *env); + void (JNICALL *ExceptionClear) + (JNIEnv *env); + void (JNICALL *FatalError) + (JNIEnv *env, const char *msg); + + jint (JNICALL *PushLocalFrame) + (JNIEnv *env, jint capacity); + jobject (JNICALL *PopLocalFrame) + (JNIEnv *env, jobject result); + + jobject (JNICALL *NewGlobalRef) + (JNIEnv *env, jobject lobj); + void (JNICALL *DeleteGlobalRef) + (JNIEnv *env, jobject gref); + void (JNICALL *DeleteLocalRef) + (JNIEnv *env, jobject obj); + jboolean (JNICALL *IsSameObject) + (JNIEnv *env, jobject obj1, jobject obj2); + jobject (JNICALL *NewLocalRef) + (JNIEnv *env, jobject ref); + jint (JNICALL *EnsureLocalCapacity) + (JNIEnv *env, jint capacity); + + jobject (JNICALL *AllocObject) + (JNIEnv *env, jclass clazz); + jobject (JNICALL *NewObject) + (JNIEnv *env, jclass clazz, jmethodID methodID, ...); + jobject (JNICALL *NewObjectV) + (JNIEnv *env, jclass clazz, jmethodID methodID, va_list args); + jobject (JNICALL *NewObjectA) + (JNIEnv *env, jclass clazz, jmethodID methodID, const jvalue *args); + + jclass (JNICALL *GetObjectClass) + (JNIEnv *env, jobject obj); + jboolean (JNICALL *IsInstanceOf) + (JNIEnv *env, jobject obj, jclass clazz); + + jmethodID (JNICALL *GetMethodID) + (JNIEnv *env, jclass clazz, const char *name, const char *sig); + + jobject (JNICALL *CallObjectMethod) + (JNIEnv *env, jobject obj, jmethodID methodID, ...); + jobject (JNICALL *CallObjectMethodV) + (JNIEnv *env, jobject obj, jmethodID methodID, va_list args); + jobject (JNICALL *CallObjectMethodA) + (JNIEnv *env, jobject obj, jmethodID methodID, const jvalue * args); + + jboolean (JNICALL *CallBooleanMethod) + (JNIEnv *env, jobject obj, jmethodID methodID, ...); + jboolean (JNICALL *CallBooleanMethodV) + (JNIEnv *env, jobject obj, jmethodID methodID, va_list args); + jboolean (JNICALL *CallBooleanMethodA) + (JNIEnv *env, jobject obj, jmethodID methodID, const jvalue * args); + + jbyte (JNICALL *CallByteMethod) + (JNIEnv *env, jobject obj, jmethodID methodID, ...); + jbyte (JNICALL *CallByteMethodV) + (JNIEnv *env, jobject obj, jmethodID methodID, va_list args); + jbyte (JNICALL *CallByteMethodA) + (JNIEnv *env, jobject obj, jmethodID methodID, const jvalue *args); + + jchar (JNICALL *CallCharMethod) + (JNIEnv *env, jobject obj, jmethodID methodID, ...); + jchar (JNICALL *CallCharMethodV) + (JNIEnv *env, jobject obj, jmethodID methodID, va_list args); + jchar (JNICALL *CallCharMethodA) + (JNIEnv *env, jobject obj, jmethodID methodID, const jvalue *args); + + jshort (JNICALL *CallShortMethod) + (JNIEnv *env, jobject obj, jmethodID methodID, ...); + jshort (JNICALL *CallShortMethodV) + (JNIEnv *env, jobject obj, jmethodID methodID, va_list args); + jshort (JNICALL *CallShortMethodA) + (JNIEnv *env, jobject obj, jmethodID methodID, const jvalue *args); + + jint (JNICALL *CallIntMethod) + (JNIEnv *env, jobject obj, jmethodID methodID, ...); + jint (JNICALL *CallIntMethodV) + (JNIEnv *env, jobject obj, jmethodID methodID, va_list args); + jint (JNICALL *CallIntMethodA) + (JNIEnv *env, jobject obj, jmethodID methodID, const jvalue *args); + + jlong (JNICALL *CallLongMethod) + (JNIEnv *env, jobject obj, jmethodID methodID, ...); + jlong (JNICALL *CallLongMethodV) + (JNIEnv *env, jobject obj, jmethodID methodID, va_list args); + jlong (JNICALL *CallLongMethodA) + (JNIEnv *env, jobject obj, jmethodID methodID, const jvalue *args); + + jfloat (JNICALL *CallFloatMethod) + (JNIEnv *env, jobject obj, jmethodID methodID, ...); + jfloat (JNICALL *CallFloatMethodV) + (JNIEnv *env, jobject obj, jmethodID methodID, va_list args); + jfloat (JNICALL *CallFloatMethodA) + (JNIEnv *env, jobject obj, jmethodID methodID, const jvalue *args); + + jdouble (JNICALL *CallDoubleMethod) + (JNIEnv *env, jobject obj, jmethodID methodID, ...); + jdouble (JNICALL *CallDoubleMethodV) + (JNIEnv *env, jobject obj, jmethodID methodID, va_list args); + jdouble (JNICALL *CallDoubleMethodA) + (JNIEnv *env, jobject obj, jmethodID methodID, const jvalue *args); + + void (JNICALL *CallVoidMethod) + (JNIEnv *env, jobject obj, jmethodID methodID, ...); + void (JNICALL *CallVoidMethodV) + (JNIEnv *env, jobject obj, jmethodID methodID, va_list args); + void (JNICALL *CallVoidMethodA) + (JNIEnv *env, jobject obj, jmethodID methodID, const jvalue * args); + + jobject (JNICALL *CallNonvirtualObjectMethod) + (JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID, ...); + jobject (JNICALL *CallNonvirtualObjectMethodV) + (JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID, + va_list args); + jobject (JNICALL *CallNonvirtualObjectMethodA) + (JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID, + const jvalue * args); + + jboolean (JNICALL *CallNonvirtualBooleanMethod) + (JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID, ...); + jboolean (JNICALL *CallNonvirtualBooleanMethodV) + (JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID, + va_list args); + jboolean (JNICALL *CallNonvirtualBooleanMethodA) + (JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID, + const jvalue * args); + + jbyte (JNICALL *CallNonvirtualByteMethod) + (JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID, ...); + jbyte (JNICALL *CallNonvirtualByteMethodV) + (JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID, + va_list args); + jbyte (JNICALL *CallNonvirtualByteMethodA) + (JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID, + const jvalue *args); + + jchar (JNICALL *CallNonvirtualCharMethod) + (JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID, ...); + jchar (JNICALL *CallNonvirtualCharMethodV) + (JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID, + va_list args); + jchar (JNICALL *CallNonvirtualCharMethodA) + (JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID, + const jvalue *args); + + jshort (JNICALL *CallNonvirtualShortMethod) + (JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID, ...); + jshort (JNICALL *CallNonvirtualShortMethodV) + (JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID, + va_list args); + jshort (JNICALL *CallNonvirtualShortMethodA) + (JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID, + const jvalue *args); + + jint (JNICALL *CallNonvirtualIntMethod) + (JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID, ...); + jint (JNICALL *CallNonvirtualIntMethodV) + (JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID, + va_list args); + jint (JNICALL *CallNonvirtualIntMethodA) + (JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID, + const jvalue *args); + + jlong (JNICALL *CallNonvirtualLongMethod) + (JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID, ...); + jlong (JNICALL *CallNonvirtualLongMethodV) + (JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID, + va_list args); + jlong (JNICALL *CallNonvirtualLongMethodA) + (JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID, + const jvalue *args); + + jfloat (JNICALL *CallNonvirtualFloatMethod) + (JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID, ...); + jfloat (JNICALL *CallNonvirtualFloatMethodV) + (JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID, + va_list args); + jfloat (JNICALL *CallNonvirtualFloatMethodA) + (JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID, + const jvalue *args); + + jdouble (JNICALL *CallNonvirtualDoubleMethod) + (JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID, ...); + jdouble (JNICALL *CallNonvirtualDoubleMethodV) + (JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID, + va_list args); + jdouble (JNICALL *CallNonvirtualDoubleMethodA) + (JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID, + const jvalue *args); + + void (JNICALL *CallNonvirtualVoidMethod) + (JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID, ...); + void (JNICALL *CallNonvirtualVoidMethodV) + (JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID, + va_list args); + void (JNICALL *CallNonvirtualVoidMethodA) + (JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID, + const jvalue * args); + + jfieldID (JNICALL *GetFieldID) + (JNIEnv *env, jclass clazz, const char *name, const char *sig); + + jobject (JNICALL *GetObjectField) + (JNIEnv *env, jobject obj, jfieldID fieldID); + jboolean (JNICALL *GetBooleanField) + (JNIEnv *env, jobject obj, jfieldID fieldID); + jbyte (JNICALL *GetByteField) + (JNIEnv *env, jobject obj, jfieldID fieldID); + jchar (JNICALL *GetCharField) + (JNIEnv *env, jobject obj, jfieldID fieldID); + jshort (JNICALL *GetShortField) + (JNIEnv *env, jobject obj, jfieldID fieldID); + jint (JNICALL *GetIntField) + (JNIEnv *env, jobject obj, jfieldID fieldID); + jlong (JNICALL *GetLongField) + (JNIEnv *env, jobject obj, jfieldID fieldID); + jfloat (JNICALL *GetFloatField) + (JNIEnv *env, jobject obj, jfieldID fieldID); + jdouble (JNICALL *GetDoubleField) + (JNIEnv *env, jobject obj, jfieldID fieldID); + + void (JNICALL *SetObjectField) + (JNIEnv *env, jobject obj, jfieldID fieldID, jobject val); + void (JNICALL *SetBooleanField) + (JNIEnv *env, jobject obj, jfieldID fieldID, jboolean val); + void (JNICALL *SetByteField) + (JNIEnv *env, jobject obj, jfieldID fieldID, jbyte val); + void (JNICALL *SetCharField) + (JNIEnv *env, jobject obj, jfieldID fieldID, jchar val); + void (JNICALL *SetShortField) + (JNIEnv *env, jobject obj, jfieldID fieldID, jshort val); + void (JNICALL *SetIntField) + (JNIEnv *env, jobject obj, jfieldID fieldID, jint val); + void (JNICALL *SetLongField) + (JNIEnv *env, jobject obj, jfieldID fieldID, jlong val); + void (JNICALL *SetFloatField) + (JNIEnv *env, jobject obj, jfieldID fieldID, jfloat val); + void (JNICALL *SetDoubleField) + (JNIEnv *env, jobject obj, jfieldID fieldID, jdouble val); + + jmethodID (JNICALL *GetStaticMethodID) + (JNIEnv *env, jclass clazz, const char *name, const char *sig); + + jobject (JNICALL *CallStaticObjectMethod) + (JNIEnv *env, jclass clazz, jmethodID methodID, ...); + jobject (JNICALL *CallStaticObjectMethodV) + (JNIEnv *env, jclass clazz, jmethodID methodID, va_list args); + jobject (JNICALL *CallStaticObjectMethodA) + (JNIEnv *env, jclass clazz, jmethodID methodID, const jvalue *args); + + jboolean (JNICALL *CallStaticBooleanMethod) + (JNIEnv *env, jclass clazz, jmethodID methodID, ...); + jboolean (JNICALL *CallStaticBooleanMethodV) + (JNIEnv *env, jclass clazz, jmethodID methodID, va_list args); + jboolean (JNICALL *CallStaticBooleanMethodA) + (JNIEnv *env, jclass clazz, jmethodID methodID, const jvalue *args); + + jbyte (JNICALL *CallStaticByteMethod) + (JNIEnv *env, jclass clazz, jmethodID methodID, ...); + jbyte (JNICALL *CallStaticByteMethodV) + (JNIEnv *env, jclass clazz, jmethodID methodID, va_list args); + jbyte (JNICALL *CallStaticByteMethodA) + (JNIEnv *env, jclass clazz, jmethodID methodID, const jvalue *args); + + jchar (JNICALL *CallStaticCharMethod) + (JNIEnv *env, jclass clazz, jmethodID methodID, ...); + jchar (JNICALL *CallStaticCharMethodV) + (JNIEnv *env, jclass clazz, jmethodID methodID, va_list args); + jchar (JNICALL *CallStaticCharMethodA) + (JNIEnv *env, jclass clazz, jmethodID methodID, const jvalue *args); + + jshort (JNICALL *CallStaticShortMethod) + (JNIEnv *env, jclass clazz, jmethodID methodID, ...); + jshort (JNICALL *CallStaticShortMethodV) + (JNIEnv *env, jclass clazz, jmethodID methodID, va_list args); + jshort (JNICALL *CallStaticShortMethodA) + (JNIEnv *env, jclass clazz, jmethodID methodID, const jvalue *args); + + jint (JNICALL *CallStaticIntMethod) + (JNIEnv *env, jclass clazz, jmethodID methodID, ...); + jint (JNICALL *CallStaticIntMethodV) + (JNIEnv *env, jclass clazz, jmethodID methodID, va_list args); + jint (JNICALL *CallStaticIntMethodA) + (JNIEnv *env, jclass clazz, jmethodID methodID, const jvalue *args); + + jlong (JNICALL *CallStaticLongMethod) + (JNIEnv *env, jclass clazz, jmethodID methodID, ...); + jlong (JNICALL *CallStaticLongMethodV) + (JNIEnv *env, jclass clazz, jmethodID methodID, va_list args); + jlong (JNICALL *CallStaticLongMethodA) + (JNIEnv *env, jclass clazz, jmethodID methodID, const jvalue *args); + + jfloat (JNICALL *CallStaticFloatMethod) + (JNIEnv *env, jclass clazz, jmethodID methodID, ...); + jfloat (JNICALL *CallStaticFloatMethodV) + (JNIEnv *env, jclass clazz, jmethodID methodID, va_list args); + jfloat (JNICALL *CallStaticFloatMethodA) + (JNIEnv *env, jclass clazz, jmethodID methodID, const jvalue *args); + + jdouble (JNICALL *CallStaticDoubleMethod) + (JNIEnv *env, jclass clazz, jmethodID methodID, ...); + jdouble (JNICALL *CallStaticDoubleMethodV) + (JNIEnv *env, jclass clazz, jmethodID methodID, va_list args); + jdouble (JNICALL *CallStaticDoubleMethodA) + (JNIEnv *env, jclass clazz, jmethodID methodID, const jvalue *args); + + void (JNICALL *CallStaticVoidMethod) + (JNIEnv *env, jclass cls, jmethodID methodID, ...); + void (JNICALL *CallStaticVoidMethodV) + (JNIEnv *env, jclass cls, jmethodID methodID, va_list args); + void (JNICALL *CallStaticVoidMethodA) + (JNIEnv *env, jclass cls, jmethodID methodID, const jvalue * args); + + jfieldID (JNICALL *GetStaticFieldID) + (JNIEnv *env, jclass clazz, const char *name, const char *sig); + jobject (JNICALL *GetStaticObjectField) + (JNIEnv *env, jclass clazz, jfieldID fieldID); + jboolean (JNICALL *GetStaticBooleanField) + (JNIEnv *env, jclass clazz, jfieldID fieldID); + jbyte (JNICALL *GetStaticByteField) + (JNIEnv *env, jclass clazz, jfieldID fieldID); + jchar (JNICALL *GetStaticCharField) + (JNIEnv *env, jclass clazz, jfieldID fieldID); + jshort (JNICALL *GetStaticShortField) + (JNIEnv *env, jclass clazz, jfieldID fieldID); + jint (JNICALL *GetStaticIntField) + (JNIEnv *env, jclass clazz, jfieldID fieldID); + jlong (JNICALL *GetStaticLongField) + (JNIEnv *env, jclass clazz, jfieldID fieldID); + jfloat (JNICALL *GetStaticFloatField) + (JNIEnv *env, jclass clazz, jfieldID fieldID); + jdouble (JNICALL *GetStaticDoubleField) + (JNIEnv *env, jclass clazz, jfieldID fieldID); + + void (JNICALL *SetStaticObjectField) + (JNIEnv *env, jclass clazz, jfieldID fieldID, jobject value); + void (JNICALL *SetStaticBooleanField) + (JNIEnv *env, jclass clazz, jfieldID fieldID, jboolean value); + void (JNICALL *SetStaticByteField) + (JNIEnv *env, jclass clazz, jfieldID fieldID, jbyte value); + void (JNICALL *SetStaticCharField) + (JNIEnv *env, jclass clazz, jfieldID fieldID, jchar value); + void (JNICALL *SetStaticShortField) + (JNIEnv *env, jclass clazz, jfieldID fieldID, jshort value); + void (JNICALL *SetStaticIntField) + (JNIEnv *env, jclass clazz, jfieldID fieldID, jint value); + void (JNICALL *SetStaticLongField) + (JNIEnv *env, jclass clazz, jfieldID fieldID, jlong value); + void (JNICALL *SetStaticFloatField) + (JNIEnv *env, jclass clazz, jfieldID fieldID, jfloat value); + void (JNICALL *SetStaticDoubleField) + (JNIEnv *env, jclass clazz, jfieldID fieldID, jdouble value); + + jstring (JNICALL *NewString) + (JNIEnv *env, const jchar *unicode, jsize len); + jsize (JNICALL *GetStringLength) + (JNIEnv *env, jstring str); + const jchar *(JNICALL *GetStringChars) + (JNIEnv *env, jstring str, jboolean *isCopy); + void (JNICALL *ReleaseStringChars) + (JNIEnv *env, jstring str, const jchar *chars); + + jstring (JNICALL *NewStringUTF) + (JNIEnv *env, const char *utf); + jsize (JNICALL *GetStringUTFLength) + (JNIEnv *env, jstring str); + const char* (JNICALL *GetStringUTFChars) + (JNIEnv *env, jstring str, jboolean *isCopy); + void (JNICALL *ReleaseStringUTFChars) + (JNIEnv *env, jstring str, const char* chars); + + + jsize (JNICALL *GetArrayLength) + (JNIEnv *env, jarray array); + + jobjectArray (JNICALL *NewObjectArray) + (JNIEnv *env, jsize len, jclass clazz, jobject init); + jobject (JNICALL *GetObjectArrayElement) + (JNIEnv *env, jobjectArray array, jsize index); + void (JNICALL *SetObjectArrayElement) + (JNIEnv *env, jobjectArray array, jsize index, jobject val); + + jbooleanArray (JNICALL *NewBooleanArray) + (JNIEnv *env, jsize len); + jbyteArray (JNICALL *NewByteArray) + (JNIEnv *env, jsize len); + jcharArray (JNICALL *NewCharArray) + (JNIEnv *env, jsize len); + jshortArray (JNICALL *NewShortArray) + (JNIEnv *env, jsize len); + jintArray (JNICALL *NewIntArray) + (JNIEnv *env, jsize len); + jlongArray (JNICALL *NewLongArray) + (JNIEnv *env, jsize len); + jfloatArray (JNICALL *NewFloatArray) + (JNIEnv *env, jsize len); + jdoubleArray (JNICALL *NewDoubleArray) + (JNIEnv *env, jsize len); + + jboolean * (JNICALL *GetBooleanArrayElements) + (JNIEnv *env, jbooleanArray array, jboolean *isCopy); + jbyte * (JNICALL *GetByteArrayElements) + (JNIEnv *env, jbyteArray array, jboolean *isCopy); + jchar * (JNICALL *GetCharArrayElements) + (JNIEnv *env, jcharArray array, jboolean *isCopy); + jshort * (JNICALL *GetShortArrayElements) + (JNIEnv *env, jshortArray array, jboolean *isCopy); + jint * (JNICALL *GetIntArrayElements) + (JNIEnv *env, jintArray array, jboolean *isCopy); + jlong * (JNICALL *GetLongArrayElements) + (JNIEnv *env, jlongArray array, jboolean *isCopy); + jfloat * (JNICALL *GetFloatArrayElements) + (JNIEnv *env, jfloatArray array, jboolean *isCopy); + jdouble * (JNICALL *GetDoubleArrayElements) + (JNIEnv *env, jdoubleArray array, jboolean *isCopy); + + void (JNICALL *ReleaseBooleanArrayElements) + (JNIEnv *env, jbooleanArray array, jboolean *elems, jint mode); + void (JNICALL *ReleaseByteArrayElements) + (JNIEnv *env, jbyteArray array, jbyte *elems, jint mode); + void (JNICALL *ReleaseCharArrayElements) + (JNIEnv *env, jcharArray array, jchar *elems, jint mode); + void (JNICALL *ReleaseShortArrayElements) + (JNIEnv *env, jshortArray array, jshort *elems, jint mode); + void (JNICALL *ReleaseIntArrayElements) + (JNIEnv *env, jintArray array, jint *elems, jint mode); + void (JNICALL *ReleaseLongArrayElements) + (JNIEnv *env, jlongArray array, jlong *elems, jint mode); + void (JNICALL *ReleaseFloatArrayElements) + (JNIEnv *env, jfloatArray array, jfloat *elems, jint mode); + void (JNICALL *ReleaseDoubleArrayElements) + (JNIEnv *env, jdoubleArray array, jdouble *elems, jint mode); + + void (JNICALL *GetBooleanArrayRegion) + (JNIEnv *env, jbooleanArray array, jsize start, jsize l, jboolean *buf); + void (JNICALL *GetByteArrayRegion) + (JNIEnv *env, jbyteArray array, jsize start, jsize len, jbyte *buf); + void (JNICALL *GetCharArrayRegion) + (JNIEnv *env, jcharArray array, jsize start, jsize len, jchar *buf); + void (JNICALL *GetShortArrayRegion) + (JNIEnv *env, jshortArray array, jsize start, jsize len, jshort *buf); + void (JNICALL *GetIntArrayRegion) + (JNIEnv *env, jintArray array, jsize start, jsize len, jint *buf); + void (JNICALL *GetLongArrayRegion) + (JNIEnv *env, jlongArray array, jsize start, jsize len, jlong *buf); + void (JNICALL *GetFloatArrayRegion) + (JNIEnv *env, jfloatArray array, jsize start, jsize len, jfloat *buf); + void (JNICALL *GetDoubleArrayRegion) + (JNIEnv *env, jdoubleArray array, jsize start, jsize len, jdouble *buf); + + void (JNICALL *SetBooleanArrayRegion) + (JNIEnv *env, jbooleanArray array, jsize start, jsize l, const jboolean *buf); + void (JNICALL *SetByteArrayRegion) + (JNIEnv *env, jbyteArray array, jsize start, jsize len, const jbyte *buf); + void (JNICALL *SetCharArrayRegion) + (JNIEnv *env, jcharArray array, jsize start, jsize len, const jchar *buf); + void (JNICALL *SetShortArrayRegion) + (JNIEnv *env, jshortArray array, jsize start, jsize len, const jshort *buf); + void (JNICALL *SetIntArrayRegion) + (JNIEnv *env, jintArray array, jsize start, jsize len, const jint *buf); + void (JNICALL *SetLongArrayRegion) + (JNIEnv *env, jlongArray array, jsize start, jsize len, const jlong *buf); + void (JNICALL *SetFloatArrayRegion) + (JNIEnv *env, jfloatArray array, jsize start, jsize len, const jfloat *buf); + void (JNICALL *SetDoubleArrayRegion) + (JNIEnv *env, jdoubleArray array, jsize start, jsize len, const jdouble *buf); + + jint (JNICALL *RegisterNatives) + (JNIEnv *env, jclass clazz, const JNINativeMethod *methods, + jint nMethods); + jint (JNICALL *UnregisterNatives) + (JNIEnv *env, jclass clazz); + + jint (JNICALL *MonitorEnter) + (JNIEnv *env, jobject obj); + jint (JNICALL *MonitorExit) + (JNIEnv *env, jobject obj); + + jint (JNICALL *GetJavaVM) + (JNIEnv *env, JavaVM **vm); + + void (JNICALL *GetStringRegion) + (JNIEnv *env, jstring str, jsize start, jsize len, jchar *buf); + void (JNICALL *GetStringUTFRegion) + (JNIEnv *env, jstring str, jsize start, jsize len, char *buf); + + void * (JNICALL *GetPrimitiveArrayCritical) + (JNIEnv *env, jarray array, jboolean *isCopy); + void (JNICALL *ReleasePrimitiveArrayCritical) + (JNIEnv *env, jarray array, void *carray, jint mode); + + const jchar * (JNICALL *GetStringCritical) + (JNIEnv *env, jstring string, jboolean *isCopy); + void (JNICALL *ReleaseStringCritical) + (JNIEnv *env, jstring string, const jchar *cstring); + + jweak (JNICALL *NewWeakGlobalRef) + (JNIEnv *env, jobject obj); + void (JNICALL *DeleteWeakGlobalRef) + (JNIEnv *env, jweak ref); + + jboolean (JNICALL *ExceptionCheck) + (JNIEnv *env); + + jobject (JNICALL *NewDirectByteBuffer) + (JNIEnv* env, void* address, jlong capacity); + void* (JNICALL *GetDirectBufferAddress) + (JNIEnv* env, jobject buf); + jlong (JNICALL *GetDirectBufferCapacity) + (JNIEnv* env, jobject buf); + + /* New JNI 1.6 Features */ + + jobjectRefType (JNICALL *GetObjectRefType) + (JNIEnv* env, jobject obj); + + /* Module Features */ + + jobject (JNICALL *GetModule) + (JNIEnv* env, jclass clazz); + + /* Virtual threads */ + + jboolean (JNICALL *IsVirtualThread) + (JNIEnv* env, jobject obj); +}; + +/* + * We use inlined functions for C++ so that programmers can write: + * + * env->FindClass("java/lang/String") + * + * in C++ rather than: + * + * (*env)->FindClass(env, "java/lang/String") + * + * in C. + */ + +struct JNIEnv_ { + const struct JNINativeInterface_ *functions; +#ifdef __cplusplus + + jint GetVersion() { + return functions->GetVersion(this); + } + jclass DefineClass(const char *name, jobject loader, const jbyte *buf, + jsize len) { + return functions->DefineClass(this, name, loader, buf, len); + } + jclass FindClass(const char *name) { + return functions->FindClass(this, name); + } + jmethodID FromReflectedMethod(jobject method) { + return functions->FromReflectedMethod(this,method); + } + jfieldID FromReflectedField(jobject field) { + return functions->FromReflectedField(this,field); + } + + jobject ToReflectedMethod(jclass cls, jmethodID methodID, jboolean isStatic) { + return functions->ToReflectedMethod(this, cls, methodID, isStatic); + } + + jclass GetSuperclass(jclass sub) { + return functions->GetSuperclass(this, sub); + } + jboolean IsAssignableFrom(jclass sub, jclass sup) { + return functions->IsAssignableFrom(this, sub, sup); + } + + jobject ToReflectedField(jclass cls, jfieldID fieldID, jboolean isStatic) { + return functions->ToReflectedField(this,cls,fieldID,isStatic); + } + + jint Throw(jthrowable obj) { + return functions->Throw(this, obj); + } + jint ThrowNew(jclass clazz, const char *msg) { + return functions->ThrowNew(this, clazz, msg); + } + jthrowable ExceptionOccurred() { + return functions->ExceptionOccurred(this); + } + void ExceptionDescribe() { + functions->ExceptionDescribe(this); + } + void ExceptionClear() { + functions->ExceptionClear(this); + } + void FatalError(const char *msg) { + functions->FatalError(this, msg); + } + + jint PushLocalFrame(jint capacity) { + return functions->PushLocalFrame(this,capacity); + } + jobject PopLocalFrame(jobject result) { + return functions->PopLocalFrame(this,result); + } + + jobject NewGlobalRef(jobject lobj) { + return functions->NewGlobalRef(this,lobj); + } + void DeleteGlobalRef(jobject gref) { + functions->DeleteGlobalRef(this,gref); + } + void DeleteLocalRef(jobject obj) { + functions->DeleteLocalRef(this, obj); + } + + jboolean IsSameObject(jobject obj1, jobject obj2) { + return functions->IsSameObject(this,obj1,obj2); + } + + jobject NewLocalRef(jobject ref) { + return functions->NewLocalRef(this,ref); + } + jint EnsureLocalCapacity(jint capacity) { + return functions->EnsureLocalCapacity(this,capacity); + } + + jobject AllocObject(jclass clazz) { + return functions->AllocObject(this,clazz); + } + jobject NewObject(jclass clazz, jmethodID methodID, ...) { + va_list args; + jobject result; + va_start(args, methodID); + result = functions->NewObjectV(this,clazz,methodID,args); + va_end(args); + return result; + } + jobject NewObjectV(jclass clazz, jmethodID methodID, + va_list args) { + return functions->NewObjectV(this,clazz,methodID,args); + } + jobject NewObjectA(jclass clazz, jmethodID methodID, + const jvalue *args) { + return functions->NewObjectA(this,clazz,methodID,args); + } + + jclass GetObjectClass(jobject obj) { + return functions->GetObjectClass(this,obj); + } + jboolean IsInstanceOf(jobject obj, jclass clazz) { + return functions->IsInstanceOf(this,obj,clazz); + } + + jmethodID GetMethodID(jclass clazz, const char *name, + const char *sig) { + return functions->GetMethodID(this,clazz,name,sig); + } + + jobject CallObjectMethod(jobject obj, jmethodID methodID, ...) { + va_list args; + jobject result; + va_start(args,methodID); + result = functions->CallObjectMethodV(this,obj,methodID,args); + va_end(args); + return result; + } + jobject CallObjectMethodV(jobject obj, jmethodID methodID, + va_list args) { + return functions->CallObjectMethodV(this,obj,methodID,args); + } + jobject CallObjectMethodA(jobject obj, jmethodID methodID, + const jvalue * args) { + return functions->CallObjectMethodA(this,obj,methodID,args); + } + + jboolean CallBooleanMethod(jobject obj, + jmethodID methodID, ...) { + va_list args; + jboolean result; + va_start(args,methodID); + result = functions->CallBooleanMethodV(this,obj,methodID,args); + va_end(args); + return result; + } + jboolean CallBooleanMethodV(jobject obj, jmethodID methodID, + va_list args) { + return functions->CallBooleanMethodV(this,obj,methodID,args); + } + jboolean CallBooleanMethodA(jobject obj, jmethodID methodID, + const jvalue * args) { + return functions->CallBooleanMethodA(this,obj,methodID, args); + } + + jbyte CallByteMethod(jobject obj, jmethodID methodID, ...) { + va_list args; + jbyte result; + va_start(args,methodID); + result = functions->CallByteMethodV(this,obj,methodID,args); + va_end(args); + return result; + } + jbyte CallByteMethodV(jobject obj, jmethodID methodID, + va_list args) { + return functions->CallByteMethodV(this,obj,methodID,args); + } + jbyte CallByteMethodA(jobject obj, jmethodID methodID, + const jvalue * args) { + return functions->CallByteMethodA(this,obj,methodID,args); + } + + jchar CallCharMethod(jobject obj, jmethodID methodID, ...) { + va_list args; + jchar result; + va_start(args,methodID); + result = functions->CallCharMethodV(this,obj,methodID,args); + va_end(args); + return result; + } + jchar CallCharMethodV(jobject obj, jmethodID methodID, + va_list args) { + return functions->CallCharMethodV(this,obj,methodID,args); + } + jchar CallCharMethodA(jobject obj, jmethodID methodID, + const jvalue * args) { + return functions->CallCharMethodA(this,obj,methodID,args); + } + + jshort CallShortMethod(jobject obj, jmethodID methodID, ...) { + va_list args; + jshort result; + va_start(args,methodID); + result = functions->CallShortMethodV(this,obj,methodID,args); + va_end(args); + return result; + } + jshort CallShortMethodV(jobject obj, jmethodID methodID, + va_list args) { + return functions->CallShortMethodV(this,obj,methodID,args); + } + jshort CallShortMethodA(jobject obj, jmethodID methodID, + const jvalue * args) { + return functions->CallShortMethodA(this,obj,methodID,args); + } + + jint CallIntMethod(jobject obj, jmethodID methodID, ...) { + va_list args; + jint result; + va_start(args,methodID); + result = functions->CallIntMethodV(this,obj,methodID,args); + va_end(args); + return result; + } + jint CallIntMethodV(jobject obj, jmethodID methodID, + va_list args) { + return functions->CallIntMethodV(this,obj,methodID,args); + } + jint CallIntMethodA(jobject obj, jmethodID methodID, + const jvalue * args) { + return functions->CallIntMethodA(this,obj,methodID,args); + } + + jlong CallLongMethod(jobject obj, jmethodID methodID, ...) { + va_list args; + jlong result; + va_start(args,methodID); + result = functions->CallLongMethodV(this,obj,methodID,args); + va_end(args); + return result; + } + jlong CallLongMethodV(jobject obj, jmethodID methodID, + va_list args) { + return functions->CallLongMethodV(this,obj,methodID,args); + } + jlong CallLongMethodA(jobject obj, jmethodID methodID, + const jvalue * args) { + return functions->CallLongMethodA(this,obj,methodID,args); + } + + jfloat CallFloatMethod(jobject obj, jmethodID methodID, ...) { + va_list args; + jfloat result; + va_start(args,methodID); + result = functions->CallFloatMethodV(this,obj,methodID,args); + va_end(args); + return result; + } + jfloat CallFloatMethodV(jobject obj, jmethodID methodID, + va_list args) { + return functions->CallFloatMethodV(this,obj,methodID,args); + } + jfloat CallFloatMethodA(jobject obj, jmethodID methodID, + const jvalue * args) { + return functions->CallFloatMethodA(this,obj,methodID,args); + } + + jdouble CallDoubleMethod(jobject obj, jmethodID methodID, ...) { + va_list args; + jdouble result; + va_start(args,methodID); + result = functions->CallDoubleMethodV(this,obj,methodID,args); + va_end(args); + return result; + } + jdouble CallDoubleMethodV(jobject obj, jmethodID methodID, + va_list args) { + return functions->CallDoubleMethodV(this,obj,methodID,args); + } + jdouble CallDoubleMethodA(jobject obj, jmethodID methodID, + const jvalue * args) { + return functions->CallDoubleMethodA(this,obj,methodID,args); + } + + void CallVoidMethod(jobject obj, jmethodID methodID, ...) { + va_list args; + va_start(args,methodID); + functions->CallVoidMethodV(this,obj,methodID,args); + va_end(args); + } + void CallVoidMethodV(jobject obj, jmethodID methodID, + va_list args) { + functions->CallVoidMethodV(this,obj,methodID,args); + } + void CallVoidMethodA(jobject obj, jmethodID methodID, + const jvalue * args) { + functions->CallVoidMethodA(this,obj,methodID,args); + } + + jobject CallNonvirtualObjectMethod(jobject obj, jclass clazz, + jmethodID methodID, ...) { + va_list args; + jobject result; + va_start(args,methodID); + result = functions->CallNonvirtualObjectMethodV(this,obj,clazz, + methodID,args); + va_end(args); + return result; + } + jobject CallNonvirtualObjectMethodV(jobject obj, jclass clazz, + jmethodID methodID, va_list args) { + return functions->CallNonvirtualObjectMethodV(this,obj,clazz, + methodID,args); + } + jobject CallNonvirtualObjectMethodA(jobject obj, jclass clazz, + jmethodID methodID, const jvalue * args) { + return functions->CallNonvirtualObjectMethodA(this,obj,clazz, + methodID,args); + } + + jboolean CallNonvirtualBooleanMethod(jobject obj, jclass clazz, + jmethodID methodID, ...) { + va_list args; + jboolean result; + va_start(args,methodID); + result = functions->CallNonvirtualBooleanMethodV(this,obj,clazz, + methodID,args); + va_end(args); + return result; + } + jboolean CallNonvirtualBooleanMethodV(jobject obj, jclass clazz, + jmethodID methodID, va_list args) { + return functions->CallNonvirtualBooleanMethodV(this,obj,clazz, + methodID,args); + } + jboolean CallNonvirtualBooleanMethodA(jobject obj, jclass clazz, + jmethodID methodID, const jvalue * args) { + return functions->CallNonvirtualBooleanMethodA(this,obj,clazz, + methodID, args); + } + + jbyte CallNonvirtualByteMethod(jobject obj, jclass clazz, + jmethodID methodID, ...) { + va_list args; + jbyte result; + va_start(args,methodID); + result = functions->CallNonvirtualByteMethodV(this,obj,clazz, + methodID,args); + va_end(args); + return result; + } + jbyte CallNonvirtualByteMethodV(jobject obj, jclass clazz, + jmethodID methodID, va_list args) { + return functions->CallNonvirtualByteMethodV(this,obj,clazz, + methodID,args); + } + jbyte CallNonvirtualByteMethodA(jobject obj, jclass clazz, + jmethodID methodID, const jvalue * args) { + return functions->CallNonvirtualByteMethodA(this,obj,clazz, + methodID,args); + } + + jchar CallNonvirtualCharMethod(jobject obj, jclass clazz, + jmethodID methodID, ...) { + va_list args; + jchar result; + va_start(args,methodID); + result = functions->CallNonvirtualCharMethodV(this,obj,clazz, + methodID,args); + va_end(args); + return result; + } + jchar CallNonvirtualCharMethodV(jobject obj, jclass clazz, + jmethodID methodID, va_list args) { + return functions->CallNonvirtualCharMethodV(this,obj,clazz, + methodID,args); + } + jchar CallNonvirtualCharMethodA(jobject obj, jclass clazz, + jmethodID methodID, const jvalue * args) { + return functions->CallNonvirtualCharMethodA(this,obj,clazz, + methodID,args); + } + + jshort CallNonvirtualShortMethod(jobject obj, jclass clazz, + jmethodID methodID, ...) { + va_list args; + jshort result; + va_start(args,methodID); + result = functions->CallNonvirtualShortMethodV(this,obj,clazz, + methodID,args); + va_end(args); + return result; + } + jshort CallNonvirtualShortMethodV(jobject obj, jclass clazz, + jmethodID methodID, va_list args) { + return functions->CallNonvirtualShortMethodV(this,obj,clazz, + methodID,args); + } + jshort CallNonvirtualShortMethodA(jobject obj, jclass clazz, + jmethodID methodID, const jvalue * args) { + return functions->CallNonvirtualShortMethodA(this,obj,clazz, + methodID,args); + } + + jint CallNonvirtualIntMethod(jobject obj, jclass clazz, + jmethodID methodID, ...) { + va_list args; + jint result; + va_start(args,methodID); + result = functions->CallNonvirtualIntMethodV(this,obj,clazz, + methodID,args); + va_end(args); + return result; + } + jint CallNonvirtualIntMethodV(jobject obj, jclass clazz, + jmethodID methodID, va_list args) { + return functions->CallNonvirtualIntMethodV(this,obj,clazz, + methodID,args); + } + jint CallNonvirtualIntMethodA(jobject obj, jclass clazz, + jmethodID methodID, const jvalue * args) { + return functions->CallNonvirtualIntMethodA(this,obj,clazz, + methodID,args); + } + + jlong CallNonvirtualLongMethod(jobject obj, jclass clazz, + jmethodID methodID, ...) { + va_list args; + jlong result; + va_start(args,methodID); + result = functions->CallNonvirtualLongMethodV(this,obj,clazz, + methodID,args); + va_end(args); + return result; + } + jlong CallNonvirtualLongMethodV(jobject obj, jclass clazz, + jmethodID methodID, va_list args) { + return functions->CallNonvirtualLongMethodV(this,obj,clazz, + methodID,args); + } + jlong CallNonvirtualLongMethodA(jobject obj, jclass clazz, + jmethodID methodID, const jvalue * args) { + return functions->CallNonvirtualLongMethodA(this,obj,clazz, + methodID,args); + } + + jfloat CallNonvirtualFloatMethod(jobject obj, jclass clazz, + jmethodID methodID, ...) { + va_list args; + jfloat result; + va_start(args,methodID); + result = functions->CallNonvirtualFloatMethodV(this,obj,clazz, + methodID,args); + va_end(args); + return result; + } + jfloat CallNonvirtualFloatMethodV(jobject obj, jclass clazz, + jmethodID methodID, + va_list args) { + return functions->CallNonvirtualFloatMethodV(this,obj,clazz, + methodID,args); + } + jfloat CallNonvirtualFloatMethodA(jobject obj, jclass clazz, + jmethodID methodID, + const jvalue * args) { + return functions->CallNonvirtualFloatMethodA(this,obj,clazz, + methodID,args); + } + + jdouble CallNonvirtualDoubleMethod(jobject obj, jclass clazz, + jmethodID methodID, ...) { + va_list args; + jdouble result; + va_start(args,methodID); + result = functions->CallNonvirtualDoubleMethodV(this,obj,clazz, + methodID,args); + va_end(args); + return result; + } + jdouble CallNonvirtualDoubleMethodV(jobject obj, jclass clazz, + jmethodID methodID, + va_list args) { + return functions->CallNonvirtualDoubleMethodV(this,obj,clazz, + methodID,args); + } + jdouble CallNonvirtualDoubleMethodA(jobject obj, jclass clazz, + jmethodID methodID, + const jvalue * args) { + return functions->CallNonvirtualDoubleMethodA(this,obj,clazz, + methodID,args); + } + + void CallNonvirtualVoidMethod(jobject obj, jclass clazz, + jmethodID methodID, ...) { + va_list args; + va_start(args,methodID); + functions->CallNonvirtualVoidMethodV(this,obj,clazz,methodID,args); + va_end(args); + } + void CallNonvirtualVoidMethodV(jobject obj, jclass clazz, + jmethodID methodID, + va_list args) { + functions->CallNonvirtualVoidMethodV(this,obj,clazz,methodID,args); + } + void CallNonvirtualVoidMethodA(jobject obj, jclass clazz, + jmethodID methodID, + const jvalue * args) { + functions->CallNonvirtualVoidMethodA(this,obj,clazz,methodID,args); + } + + jfieldID GetFieldID(jclass clazz, const char *name, + const char *sig) { + return functions->GetFieldID(this,clazz,name,sig); + } + + jobject GetObjectField(jobject obj, jfieldID fieldID) { + return functions->GetObjectField(this,obj,fieldID); + } + jboolean GetBooleanField(jobject obj, jfieldID fieldID) { + return functions->GetBooleanField(this,obj,fieldID); + } + jbyte GetByteField(jobject obj, jfieldID fieldID) { + return functions->GetByteField(this,obj,fieldID); + } + jchar GetCharField(jobject obj, jfieldID fieldID) { + return functions->GetCharField(this,obj,fieldID); + } + jshort GetShortField(jobject obj, jfieldID fieldID) { + return functions->GetShortField(this,obj,fieldID); + } + jint GetIntField(jobject obj, jfieldID fieldID) { + return functions->GetIntField(this,obj,fieldID); + } + jlong GetLongField(jobject obj, jfieldID fieldID) { + return functions->GetLongField(this,obj,fieldID); + } + jfloat GetFloatField(jobject obj, jfieldID fieldID) { + return functions->GetFloatField(this,obj,fieldID); + } + jdouble GetDoubleField(jobject obj, jfieldID fieldID) { + return functions->GetDoubleField(this,obj,fieldID); + } + + void SetObjectField(jobject obj, jfieldID fieldID, jobject val) { + functions->SetObjectField(this,obj,fieldID,val); + } + void SetBooleanField(jobject obj, jfieldID fieldID, + jboolean val) { + functions->SetBooleanField(this,obj,fieldID,val); + } + void SetByteField(jobject obj, jfieldID fieldID, + jbyte val) { + functions->SetByteField(this,obj,fieldID,val); + } + void SetCharField(jobject obj, jfieldID fieldID, + jchar val) { + functions->SetCharField(this,obj,fieldID,val); + } + void SetShortField(jobject obj, jfieldID fieldID, + jshort val) { + functions->SetShortField(this,obj,fieldID,val); + } + void SetIntField(jobject obj, jfieldID fieldID, + jint val) { + functions->SetIntField(this,obj,fieldID,val); + } + void SetLongField(jobject obj, jfieldID fieldID, + jlong val) { + functions->SetLongField(this,obj,fieldID,val); + } + void SetFloatField(jobject obj, jfieldID fieldID, + jfloat val) { + functions->SetFloatField(this,obj,fieldID,val); + } + void SetDoubleField(jobject obj, jfieldID fieldID, + jdouble val) { + functions->SetDoubleField(this,obj,fieldID,val); + } + + jmethodID GetStaticMethodID(jclass clazz, const char *name, + const char *sig) { + return functions->GetStaticMethodID(this,clazz,name,sig); + } + + jobject CallStaticObjectMethod(jclass clazz, jmethodID methodID, + ...) { + va_list args; + jobject result; + va_start(args,methodID); + result = functions->CallStaticObjectMethodV(this,clazz,methodID,args); + va_end(args); + return result; + } + jobject CallStaticObjectMethodV(jclass clazz, jmethodID methodID, + va_list args) { + return functions->CallStaticObjectMethodV(this,clazz,methodID,args); + } + jobject CallStaticObjectMethodA(jclass clazz, jmethodID methodID, + const jvalue *args) { + return functions->CallStaticObjectMethodA(this,clazz,methodID,args); + } + + jboolean CallStaticBooleanMethod(jclass clazz, + jmethodID methodID, ...) { + va_list args; + jboolean result; + va_start(args,methodID); + result = functions->CallStaticBooleanMethodV(this,clazz,methodID,args); + va_end(args); + return result; + } + jboolean CallStaticBooleanMethodV(jclass clazz, + jmethodID methodID, va_list args) { + return functions->CallStaticBooleanMethodV(this,clazz,methodID,args); + } + jboolean CallStaticBooleanMethodA(jclass clazz, + jmethodID methodID, const jvalue *args) { + return functions->CallStaticBooleanMethodA(this,clazz,methodID,args); + } + + jbyte CallStaticByteMethod(jclass clazz, + jmethodID methodID, ...) { + va_list args; + jbyte result; + va_start(args,methodID); + result = functions->CallStaticByteMethodV(this,clazz,methodID,args); + va_end(args); + return result; + } + jbyte CallStaticByteMethodV(jclass clazz, + jmethodID methodID, va_list args) { + return functions->CallStaticByteMethodV(this,clazz,methodID,args); + } + jbyte CallStaticByteMethodA(jclass clazz, + jmethodID methodID, const jvalue *args) { + return functions->CallStaticByteMethodA(this,clazz,methodID,args); + } + + jchar CallStaticCharMethod(jclass clazz, + jmethodID methodID, ...) { + va_list args; + jchar result; + va_start(args,methodID); + result = functions->CallStaticCharMethodV(this,clazz,methodID,args); + va_end(args); + return result; + } + jchar CallStaticCharMethodV(jclass clazz, + jmethodID methodID, va_list args) { + return functions->CallStaticCharMethodV(this,clazz,methodID,args); + } + jchar CallStaticCharMethodA(jclass clazz, + jmethodID methodID, const jvalue *args) { + return functions->CallStaticCharMethodA(this,clazz,methodID,args); + } + + jshort CallStaticShortMethod(jclass clazz, + jmethodID methodID, ...) { + va_list args; + jshort result; + va_start(args,methodID); + result = functions->CallStaticShortMethodV(this,clazz,methodID,args); + va_end(args); + return result; + } + jshort CallStaticShortMethodV(jclass clazz, + jmethodID methodID, va_list args) { + return functions->CallStaticShortMethodV(this,clazz,methodID,args); + } + jshort CallStaticShortMethodA(jclass clazz, + jmethodID methodID, const jvalue *args) { + return functions->CallStaticShortMethodA(this,clazz,methodID,args); + } + + jint CallStaticIntMethod(jclass clazz, + jmethodID methodID, ...) { + va_list args; + jint result; + va_start(args,methodID); + result = functions->CallStaticIntMethodV(this,clazz,methodID,args); + va_end(args); + return result; + } + jint CallStaticIntMethodV(jclass clazz, + jmethodID methodID, va_list args) { + return functions->CallStaticIntMethodV(this,clazz,methodID,args); + } + jint CallStaticIntMethodA(jclass clazz, + jmethodID methodID, const jvalue *args) { + return functions->CallStaticIntMethodA(this,clazz,methodID,args); + } + + jlong CallStaticLongMethod(jclass clazz, + jmethodID methodID, ...) { + va_list args; + jlong result; + va_start(args,methodID); + result = functions->CallStaticLongMethodV(this,clazz,methodID,args); + va_end(args); + return result; + } + jlong CallStaticLongMethodV(jclass clazz, + jmethodID methodID, va_list args) { + return functions->CallStaticLongMethodV(this,clazz,methodID,args); + } + jlong CallStaticLongMethodA(jclass clazz, + jmethodID methodID, const jvalue *args) { + return functions->CallStaticLongMethodA(this,clazz,methodID,args); + } + + jfloat CallStaticFloatMethod(jclass clazz, + jmethodID methodID, ...) { + va_list args; + jfloat result; + va_start(args,methodID); + result = functions->CallStaticFloatMethodV(this,clazz,methodID,args); + va_end(args); + return result; + } + jfloat CallStaticFloatMethodV(jclass clazz, + jmethodID methodID, va_list args) { + return functions->CallStaticFloatMethodV(this,clazz,methodID,args); + } + jfloat CallStaticFloatMethodA(jclass clazz, + jmethodID methodID, const jvalue *args) { + return functions->CallStaticFloatMethodA(this,clazz,methodID,args); + } + + jdouble CallStaticDoubleMethod(jclass clazz, + jmethodID methodID, ...) { + va_list args; + jdouble result; + va_start(args,methodID); + result = functions->CallStaticDoubleMethodV(this,clazz,methodID,args); + va_end(args); + return result; + } + jdouble CallStaticDoubleMethodV(jclass clazz, + jmethodID methodID, va_list args) { + return functions->CallStaticDoubleMethodV(this,clazz,methodID,args); + } + jdouble CallStaticDoubleMethodA(jclass clazz, + jmethodID methodID, const jvalue *args) { + return functions->CallStaticDoubleMethodA(this,clazz,methodID,args); + } + + void CallStaticVoidMethod(jclass cls, jmethodID methodID, ...) { + va_list args; + va_start(args,methodID); + functions->CallStaticVoidMethodV(this,cls,methodID,args); + va_end(args); + } + void CallStaticVoidMethodV(jclass cls, jmethodID methodID, + va_list args) { + functions->CallStaticVoidMethodV(this,cls,methodID,args); + } + void CallStaticVoidMethodA(jclass cls, jmethodID methodID, + const jvalue * args) { + functions->CallStaticVoidMethodA(this,cls,methodID,args); + } + + jfieldID GetStaticFieldID(jclass clazz, const char *name, + const char *sig) { + return functions->GetStaticFieldID(this,clazz,name,sig); + } + jobject GetStaticObjectField(jclass clazz, jfieldID fieldID) { + return functions->GetStaticObjectField(this,clazz,fieldID); + } + jboolean GetStaticBooleanField(jclass clazz, jfieldID fieldID) { + return functions->GetStaticBooleanField(this,clazz,fieldID); + } + jbyte GetStaticByteField(jclass clazz, jfieldID fieldID) { + return functions->GetStaticByteField(this,clazz,fieldID); + } + jchar GetStaticCharField(jclass clazz, jfieldID fieldID) { + return functions->GetStaticCharField(this,clazz,fieldID); + } + jshort GetStaticShortField(jclass clazz, jfieldID fieldID) { + return functions->GetStaticShortField(this,clazz,fieldID); + } + jint GetStaticIntField(jclass clazz, jfieldID fieldID) { + return functions->GetStaticIntField(this,clazz,fieldID); + } + jlong GetStaticLongField(jclass clazz, jfieldID fieldID) { + return functions->GetStaticLongField(this,clazz,fieldID); + } + jfloat GetStaticFloatField(jclass clazz, jfieldID fieldID) { + return functions->GetStaticFloatField(this,clazz,fieldID); + } + jdouble GetStaticDoubleField(jclass clazz, jfieldID fieldID) { + return functions->GetStaticDoubleField(this,clazz,fieldID); + } + + void SetStaticObjectField(jclass clazz, jfieldID fieldID, + jobject value) { + functions->SetStaticObjectField(this,clazz,fieldID,value); + } + void SetStaticBooleanField(jclass clazz, jfieldID fieldID, + jboolean value) { + functions->SetStaticBooleanField(this,clazz,fieldID,value); + } + void SetStaticByteField(jclass clazz, jfieldID fieldID, + jbyte value) { + functions->SetStaticByteField(this,clazz,fieldID,value); + } + void SetStaticCharField(jclass clazz, jfieldID fieldID, + jchar value) { + functions->SetStaticCharField(this,clazz,fieldID,value); + } + void SetStaticShortField(jclass clazz, jfieldID fieldID, + jshort value) { + functions->SetStaticShortField(this,clazz,fieldID,value); + } + void SetStaticIntField(jclass clazz, jfieldID fieldID, + jint value) { + functions->SetStaticIntField(this,clazz,fieldID,value); + } + void SetStaticLongField(jclass clazz, jfieldID fieldID, + jlong value) { + functions->SetStaticLongField(this,clazz,fieldID,value); + } + void SetStaticFloatField(jclass clazz, jfieldID fieldID, + jfloat value) { + functions->SetStaticFloatField(this,clazz,fieldID,value); + } + void SetStaticDoubleField(jclass clazz, jfieldID fieldID, + jdouble value) { + functions->SetStaticDoubleField(this,clazz,fieldID,value); + } + + jstring NewString(const jchar *unicode, jsize len) { + return functions->NewString(this,unicode,len); + } + jsize GetStringLength(jstring str) { + return functions->GetStringLength(this,str); + } + const jchar *GetStringChars(jstring str, jboolean *isCopy) { + return functions->GetStringChars(this,str,isCopy); + } + void ReleaseStringChars(jstring str, const jchar *chars) { + functions->ReleaseStringChars(this,str,chars); + } + + jstring NewStringUTF(const char *utf) { + return functions->NewStringUTF(this,utf); + } + jsize GetStringUTFLength(jstring str) { + return functions->GetStringUTFLength(this,str); + } + const char* GetStringUTFChars(jstring str, jboolean *isCopy) { + return functions->GetStringUTFChars(this,str,isCopy); + } + void ReleaseStringUTFChars(jstring str, const char* chars) { + functions->ReleaseStringUTFChars(this,str,chars); + } + + jsize GetArrayLength(jarray array) { + return functions->GetArrayLength(this,array); + } + + jobjectArray NewObjectArray(jsize len, jclass clazz, + jobject init) { + return functions->NewObjectArray(this,len,clazz,init); + } + jobject GetObjectArrayElement(jobjectArray array, jsize index) { + return functions->GetObjectArrayElement(this,array,index); + } + void SetObjectArrayElement(jobjectArray array, jsize index, + jobject val) { + functions->SetObjectArrayElement(this,array,index,val); + } + + jbooleanArray NewBooleanArray(jsize len) { + return functions->NewBooleanArray(this,len); + } + jbyteArray NewByteArray(jsize len) { + return functions->NewByteArray(this,len); + } + jcharArray NewCharArray(jsize len) { + return functions->NewCharArray(this,len); + } + jshortArray NewShortArray(jsize len) { + return functions->NewShortArray(this,len); + } + jintArray NewIntArray(jsize len) { + return functions->NewIntArray(this,len); + } + jlongArray NewLongArray(jsize len) { + return functions->NewLongArray(this,len); + } + jfloatArray NewFloatArray(jsize len) { + return functions->NewFloatArray(this,len); + } + jdoubleArray NewDoubleArray(jsize len) { + return functions->NewDoubleArray(this,len); + } + + jboolean * GetBooleanArrayElements(jbooleanArray array, jboolean *isCopy) { + return functions->GetBooleanArrayElements(this,array,isCopy); + } + jbyte * GetByteArrayElements(jbyteArray array, jboolean *isCopy) { + return functions->GetByteArrayElements(this,array,isCopy); + } + jchar * GetCharArrayElements(jcharArray array, jboolean *isCopy) { + return functions->GetCharArrayElements(this,array,isCopy); + } + jshort * GetShortArrayElements(jshortArray array, jboolean *isCopy) { + return functions->GetShortArrayElements(this,array,isCopy); + } + jint * GetIntArrayElements(jintArray array, jboolean *isCopy) { + return functions->GetIntArrayElements(this,array,isCopy); + } + jlong * GetLongArrayElements(jlongArray array, jboolean *isCopy) { + return functions->GetLongArrayElements(this,array,isCopy); + } + jfloat * GetFloatArrayElements(jfloatArray array, jboolean *isCopy) { + return functions->GetFloatArrayElements(this,array,isCopy); + } + jdouble * GetDoubleArrayElements(jdoubleArray array, jboolean *isCopy) { + return functions->GetDoubleArrayElements(this,array,isCopy); + } + + void ReleaseBooleanArrayElements(jbooleanArray array, + jboolean *elems, + jint mode) { + functions->ReleaseBooleanArrayElements(this,array,elems,mode); + } + void ReleaseByteArrayElements(jbyteArray array, + jbyte *elems, + jint mode) { + functions->ReleaseByteArrayElements(this,array,elems,mode); + } + void ReleaseCharArrayElements(jcharArray array, + jchar *elems, + jint mode) { + functions->ReleaseCharArrayElements(this,array,elems,mode); + } + void ReleaseShortArrayElements(jshortArray array, + jshort *elems, + jint mode) { + functions->ReleaseShortArrayElements(this,array,elems,mode); + } + void ReleaseIntArrayElements(jintArray array, + jint *elems, + jint mode) { + functions->ReleaseIntArrayElements(this,array,elems,mode); + } + void ReleaseLongArrayElements(jlongArray array, + jlong *elems, + jint mode) { + functions->ReleaseLongArrayElements(this,array,elems,mode); + } + void ReleaseFloatArrayElements(jfloatArray array, + jfloat *elems, + jint mode) { + functions->ReleaseFloatArrayElements(this,array,elems,mode); + } + void ReleaseDoubleArrayElements(jdoubleArray array, + jdouble *elems, + jint mode) { + functions->ReleaseDoubleArrayElements(this,array,elems,mode); + } + + void GetBooleanArrayRegion(jbooleanArray array, + jsize start, jsize len, jboolean *buf) { + functions->GetBooleanArrayRegion(this,array,start,len,buf); + } + void GetByteArrayRegion(jbyteArray array, + jsize start, jsize len, jbyte *buf) { + functions->GetByteArrayRegion(this,array,start,len,buf); + } + void GetCharArrayRegion(jcharArray array, + jsize start, jsize len, jchar *buf) { + functions->GetCharArrayRegion(this,array,start,len,buf); + } + void GetShortArrayRegion(jshortArray array, + jsize start, jsize len, jshort *buf) { + functions->GetShortArrayRegion(this,array,start,len,buf); + } + void GetIntArrayRegion(jintArray array, + jsize start, jsize len, jint *buf) { + functions->GetIntArrayRegion(this,array,start,len,buf); + } + void GetLongArrayRegion(jlongArray array, + jsize start, jsize len, jlong *buf) { + functions->GetLongArrayRegion(this,array,start,len,buf); + } + void GetFloatArrayRegion(jfloatArray array, + jsize start, jsize len, jfloat *buf) { + functions->GetFloatArrayRegion(this,array,start,len,buf); + } + void GetDoubleArrayRegion(jdoubleArray array, + jsize start, jsize len, jdouble *buf) { + functions->GetDoubleArrayRegion(this,array,start,len,buf); + } + + void SetBooleanArrayRegion(jbooleanArray array, jsize start, jsize len, + const jboolean *buf) { + functions->SetBooleanArrayRegion(this,array,start,len,buf); + } + void SetByteArrayRegion(jbyteArray array, jsize start, jsize len, + const jbyte *buf) { + functions->SetByteArrayRegion(this,array,start,len,buf); + } + void SetCharArrayRegion(jcharArray array, jsize start, jsize len, + const jchar *buf) { + functions->SetCharArrayRegion(this,array,start,len,buf); + } + void SetShortArrayRegion(jshortArray array, jsize start, jsize len, + const jshort *buf) { + functions->SetShortArrayRegion(this,array,start,len,buf); + } + void SetIntArrayRegion(jintArray array, jsize start, jsize len, + const jint *buf) { + functions->SetIntArrayRegion(this,array,start,len,buf); + } + void SetLongArrayRegion(jlongArray array, jsize start, jsize len, + const jlong *buf) { + functions->SetLongArrayRegion(this,array,start,len,buf); + } + void SetFloatArrayRegion(jfloatArray array, jsize start, jsize len, + const jfloat *buf) { + functions->SetFloatArrayRegion(this,array,start,len,buf); + } + void SetDoubleArrayRegion(jdoubleArray array, jsize start, jsize len, + const jdouble *buf) { + functions->SetDoubleArrayRegion(this,array,start,len,buf); + } + + jint RegisterNatives(jclass clazz, const JNINativeMethod *methods, + jint nMethods) { + return functions->RegisterNatives(this,clazz,methods,nMethods); + } + jint UnregisterNatives(jclass clazz) { + return functions->UnregisterNatives(this,clazz); + } + + jint MonitorEnter(jobject obj) { + return functions->MonitorEnter(this,obj); + } + jint MonitorExit(jobject obj) { + return functions->MonitorExit(this,obj); + } + + jint GetJavaVM(JavaVM **vm) { + return functions->GetJavaVM(this,vm); + } + + void GetStringRegion(jstring str, jsize start, jsize len, jchar *buf) { + functions->GetStringRegion(this,str,start,len,buf); + } + void GetStringUTFRegion(jstring str, jsize start, jsize len, char *buf) { + functions->GetStringUTFRegion(this,str,start,len,buf); + } + + void * GetPrimitiveArrayCritical(jarray array, jboolean *isCopy) { + return functions->GetPrimitiveArrayCritical(this,array,isCopy); + } + void ReleasePrimitiveArrayCritical(jarray array, void *carray, jint mode) { + functions->ReleasePrimitiveArrayCritical(this,array,carray,mode); + } + + const jchar * GetStringCritical(jstring string, jboolean *isCopy) { + return functions->GetStringCritical(this,string,isCopy); + } + void ReleaseStringCritical(jstring string, const jchar *cstring) { + functions->ReleaseStringCritical(this,string,cstring); + } + + jweak NewWeakGlobalRef(jobject obj) { + return functions->NewWeakGlobalRef(this,obj); + } + void DeleteWeakGlobalRef(jweak ref) { + functions->DeleteWeakGlobalRef(this,ref); + } + + jboolean ExceptionCheck() { + return functions->ExceptionCheck(this); + } + + jobject NewDirectByteBuffer(void* address, jlong capacity) { + return functions->NewDirectByteBuffer(this, address, capacity); + } + void* GetDirectBufferAddress(jobject buf) { + return functions->GetDirectBufferAddress(this, buf); + } + jlong GetDirectBufferCapacity(jobject buf) { + return functions->GetDirectBufferCapacity(this, buf); + } + jobjectRefType GetObjectRefType(jobject obj) { + return functions->GetObjectRefType(this, obj); + } + + /* Module Features */ + + jobject GetModule(jclass clazz) { + return functions->GetModule(this, clazz); + } + + /* Virtual threads */ + + jboolean IsVirtualThread(jobject obj) { + return functions->IsVirtualThread(this, obj); + } + +#endif /* __cplusplus */ +}; + +/* + * optionString may be any option accepted by the JVM, or one of the + * following: + * + * -D= Set a system property. + * -verbose[:class|gc|jni] Enable verbose output, comma-separated. E.g. + * "-verbose:class" or "-verbose:gc,class" + * Standard names include: gc, class, and jni. + * All nonstandard (VM-specific) names must begin + * with "X". + * vfprintf extraInfo is a pointer to the vfprintf hook. + * exit extraInfo is a pointer to the exit hook. + * abort extraInfo is a pointer to the abort hook. + */ +typedef struct JavaVMOption { + char *optionString; + void *extraInfo; +} JavaVMOption; + +typedef struct JavaVMInitArgs { + jint version; + + jint nOptions; + JavaVMOption *options; + jboolean ignoreUnrecognized; +} JavaVMInitArgs; + +typedef struct JavaVMAttachArgs { + jint version; + + char *name; + jobject group; +} JavaVMAttachArgs; + +/* These will be VM-specific. */ + +#define JDK1_2 +#define JDK1_4 + +/* End VM-specific. */ + +struct JNIInvokeInterface_ { + void *reserved0; + void *reserved1; + void *reserved2; + + jint (JNICALL *DestroyJavaVM)(JavaVM *vm); + + jint (JNICALL *AttachCurrentThread)(JavaVM *vm, void **penv, void *args); + + jint (JNICALL *DetachCurrentThread)(JavaVM *vm); + + jint (JNICALL *GetEnv)(JavaVM *vm, void **penv, jint version); + + jint (JNICALL *AttachCurrentThreadAsDaemon)(JavaVM *vm, void **penv, void *args); +}; + +struct JavaVM_ { + const struct JNIInvokeInterface_ *functions; +#ifdef __cplusplus + + jint DestroyJavaVM() { + return functions->DestroyJavaVM(this); + } + jint AttachCurrentThread(void **penv, void *args) { + return functions->AttachCurrentThread(this, penv, args); + } + jint DetachCurrentThread() { + return functions->DetachCurrentThread(this); + } + + jint GetEnv(void **penv, jint version) { + return functions->GetEnv(this, penv, version); + } + jint AttachCurrentThreadAsDaemon(void **penv, void *args) { + return functions->AttachCurrentThreadAsDaemon(this, penv, args); + } +#endif +}; + +#ifdef _JNI_IMPLEMENTATION_ +#define _JNI_IMPORT_OR_EXPORT_ JNIEXPORT +#else +#define _JNI_IMPORT_OR_EXPORT_ JNIIMPORT +#endif +_JNI_IMPORT_OR_EXPORT_ jint JNICALL +JNI_GetDefaultJavaVMInitArgs(void *args); + +_JNI_IMPORT_OR_EXPORT_ jint JNICALL +JNI_CreateJavaVM(JavaVM **pvm, void **penv, void *args); + +_JNI_IMPORT_OR_EXPORT_ jint JNICALL +JNI_GetCreatedJavaVMs(JavaVM **, jsize, jsize *); + +/* Defined by native libraries. */ +JNIEXPORT jint JNICALL +JNI_OnLoad(JavaVM *vm, void *reserved); + +JNIEXPORT void JNICALL +JNI_OnUnload(JavaVM *vm, void *reserved); + +#define JNI_VERSION_1_1 0x00010001 +#define JNI_VERSION_1_2 0x00010002 +#define JNI_VERSION_1_4 0x00010004 +#define JNI_VERSION_1_6 0x00010006 +#define JNI_VERSION_1_8 0x00010008 +#define JNI_VERSION_9 0x00090000 +#define JNI_VERSION_10 0x000a0000 +#define JNI_VERSION_19 0x00130000 +#define JNI_VERSION_20 0x00140000 +#define JNI_VERSION_21 0x00150000 + +#ifdef __cplusplus +} /* extern "C" */ +#endif /* __cplusplus */ + +#endif /* !_JAVASOFT_JNI_H_ */ diff --git a/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/jni_md.h b/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/jni_md.h new file mode 100644 index 000000000000..6e583da7147e --- /dev/null +++ b/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/jni_md.h @@ -0,0 +1,66 @@ +/* + * Copyright (c) 1996, 2020, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef _JAVASOFT_JNI_MD_H_ +#define _JAVASOFT_JNI_MD_H_ + +#ifndef __has_attribute + #define __has_attribute(x) 0 +#endif + +#ifndef JNIEXPORT + #if (defined(__GNUC__) && ((__GNUC__ > 4) || (__GNUC__ == 4) && (__GNUC_MINOR__ > 2))) || __has_attribute(visibility) + #ifdef ARM + #define JNIEXPORT __attribute__((externally_visible,visibility("default"))) + #else + #define JNIEXPORT __attribute__((visibility("default"))) + #endif + #else + #define JNIEXPORT + #endif +#endif + +#if (defined(__GNUC__) && ((__GNUC__ > 4) || (__GNUC__ == 4) && (__GNUC_MINOR__ > 2))) || __has_attribute(visibility) + #ifdef ARM + #define JNIIMPORT __attribute__((externally_visible,visibility("default"))) + #else + #define JNIIMPORT __attribute__((visibility("default"))) + #endif +#else + #define JNIIMPORT +#endif + +#define JNICALL + +typedef int jint; +#ifdef _LP64 +typedef long jlong; +#else +typedef long long jlong; +#endif + +typedef signed char jbyte; + +#endif /* !_JAVASOFT_JNI_MD_H_ */ diff --git a/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/os/linux/cgroupSubsystem_linux.cpp b/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/os/linux/cgroupSubsystem_linux.cpp new file mode 100644 index 000000000000..bc3c943bf674 --- /dev/null +++ b/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/os/linux/cgroupSubsystem_linux.cpp @@ -0,0 +1,577 @@ +/* + * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include +#include +#include +#include "cgroupSubsystem_linux.hpp" +#include "cgroupV1Subsystem_linux.hpp" +#include "cgroupV2Subsystem_linux.hpp" +#include "logging/log.hpp" +#include "memory/allocation.hpp" +#include "os_linux.hpp" +#include "runtime/globals.hpp" +#include "runtime/os.hpp" +#include "utilities/globalDefinitions.hpp" + +// controller names have to match the *_IDX indices +static const char* cg_controller_name[] = { "cpu", "cpuset", "cpuacct", "memory", "pids" }; + +CgroupSubsystem* CgroupSubsystemFactory::create() { + CgroupV1MemoryController* memory = nullptr; + CgroupV1Controller* cpuset = nullptr; + CgroupV1Controller* cpu = nullptr; + CgroupV1Controller* cpuacct = nullptr; + CgroupV1Controller* pids = nullptr; + CgroupInfo cg_infos[CG_INFO_LENGTH]; + u1 cg_type_flags = INVALID_CGROUPS_GENERIC; + const char* proc_cgroups = "/proc/cgroups"; + const char* proc_self_cgroup = "/proc/self/cgroup"; + const char* proc_self_mountinfo = "/proc/self/mountinfo"; + + bool valid_cgroup = determine_type(cg_infos, proc_cgroups, proc_self_cgroup, proc_self_mountinfo, &cg_type_flags); + + if (!valid_cgroup) { + // Could not detect cgroup type + return nullptr; + } + assert(is_valid_cgroup(&cg_type_flags), "Expected valid cgroup type"); + + if (is_cgroup_v2(&cg_type_flags)) { + // Cgroups v2 case, we have all the info we need. + // Construct the subsystem, free resources and return + // Note: any index in cg_infos will do as the path is the same for + // all controllers. + CgroupController* unified = new CgroupV2Controller(cg_infos[MEMORY_IDX]._mount_path, cg_infos[MEMORY_IDX]._cgroup_path); + log_debug(os, container)("Detected cgroups v2 unified hierarchy"); + cleanup(cg_infos); + return new CgroupV2Subsystem(unified); + } + + /* + * Cgroup v1 case: + * + * Use info gathered previously from /proc/self/cgroup + * and map host mount point to + * local one via /proc/self/mountinfo content above + * + * Docker example: + * 5:memory:/docker/6558aed8fc662b194323ceab5b964f69cf36b3e8af877a14b80256e93aecb044 + * + * Host example: + * 5:memory:/user.slice + * + * Construct a path to the process specific memory and cpuset + * cgroup directory. + * + * For a container running under Docker from memory example above + * the paths would be: + * + * /sys/fs/cgroup/memory + * + * For a Host from memory example above the path would be: + * + * /sys/fs/cgroup/memory/user.slice + * + */ + assert(is_cgroup_v1(&cg_type_flags), "Cgroup v1 expected"); + for (int i = 0; i < CG_INFO_LENGTH; i++) { + CgroupInfo info = cg_infos[i]; + if (info._data_complete) { // pids controller might have incomplete data + if (strcmp(info._name, "memory") == 0) { + memory = new CgroupV1MemoryController(info._root_mount_path, info._mount_path); + memory->set_subsystem_path(info._cgroup_path); + } else if (strcmp(info._name, "cpuset") == 0) { + cpuset = new CgroupV1Controller(info._root_mount_path, info._mount_path); + cpuset->set_subsystem_path(info._cgroup_path); + } else if (strcmp(info._name, "cpu") == 0) { + cpu = new CgroupV1Controller(info._root_mount_path, info._mount_path); + cpu->set_subsystem_path(info._cgroup_path); + } else if (strcmp(info._name, "cpuacct") == 0) { + cpuacct = new CgroupV1Controller(info._root_mount_path, info._mount_path); + cpuacct->set_subsystem_path(info._cgroup_path); + } else if (strcmp(info._name, "pids") == 0) { + pids = new CgroupV1Controller(info._root_mount_path, info._mount_path); + pids->set_subsystem_path(info._cgroup_path); + } + } else { + log_debug(os, container)("CgroupInfo for %s not complete", cg_controller_name[i]); + } + } + cleanup(cg_infos); + return new CgroupV1Subsystem(cpuset, cpu, cpuacct, pids, memory); +} + +void CgroupSubsystemFactory::set_controller_paths(CgroupInfo* cg_infos, + int controller, + const char* name, + char* mount_path, + char* root_path) { + if (cg_infos[controller]._mount_path != nullptr) { + // On some systems duplicate controllers get mounted in addition to + // the main cgroup controllers most likely under /sys/fs/cgroup. In that + // case pick the one under /sys/fs/cgroup and discard others. + if (strstr(cg_infos[controller]._mount_path, "/sys/fs/cgroup") != cg_infos[controller]._mount_path) { + log_debug(os, container)("Duplicate %s controllers detected. Picking %s, skipping %s.", + name, mount_path, cg_infos[controller]._mount_path); + os::free(cg_infos[controller]._mount_path); + os::free(cg_infos[controller]._root_mount_path); + cg_infos[controller]._mount_path = os::strdup(mount_path); + cg_infos[controller]._root_mount_path = os::strdup(root_path); + } else { + log_debug(os, container)("Duplicate %s controllers detected. Picking %s, skipping %s.", + name, cg_infos[controller]._mount_path, mount_path); + } + } else { + cg_infos[controller]._mount_path = os::strdup(mount_path); + cg_infos[controller]._root_mount_path = os::strdup(root_path); + } +} + +bool CgroupSubsystemFactory::determine_type(CgroupInfo* cg_infos, + const char* proc_cgroups, + const char* proc_self_cgroup, + const char* proc_self_mountinfo, + u1* flags) { + FILE *mntinfo = nullptr; + FILE *cgroups = nullptr; + FILE *cgroup = nullptr; + char buf[MAXPATHLEN+1]; + char *p; + bool is_cgroupsV2; + // true iff all required controllers, memory, cpu, cpuset, cpuacct are enabled + // at the kernel level. + // pids might not be enabled on older Linux distros (SLES 12.1, RHEL 7.1) + bool all_required_controllers_enabled; + + /* + * Read /proc/cgroups so as to be able to distinguish cgroups v2 vs cgroups v1. + * + * For cgroups v1 hierarchy (hybrid or legacy), cpu, cpuacct, cpuset, memory controllers + * must have non-zero for the hierarchy ID field and relevant controllers mounted. + * Conversely, for cgroups v2 (unified hierarchy), cpu, cpuacct, cpuset, memory + * controllers must have hierarchy ID 0 and the unified controller mounted. + */ + cgroups = os::fopen(proc_cgroups, "r"); + if (cgroups == nullptr) { + log_debug(os, container)("Can't open %s, %s", proc_cgroups, os::strerror(errno)); + *flags = INVALID_CGROUPS_GENERIC; + return false; + } + + while ((p = fgets(buf, MAXPATHLEN, cgroups)) != nullptr) { + char name[MAXPATHLEN+1]; + int hierarchy_id; + int enabled; + + // Format of /proc/cgroups documented via man 7 cgroups + if (sscanf(p, "%s %d %*d %d", name, &hierarchy_id, &enabled) != 3) { + continue; + } + if (strcmp(name, "memory") == 0) { + cg_infos[MEMORY_IDX]._name = os::strdup(name); + cg_infos[MEMORY_IDX]._hierarchy_id = hierarchy_id; + cg_infos[MEMORY_IDX]._enabled = (enabled == 1); + } else if (strcmp(name, "cpuset") == 0) { + cg_infos[CPUSET_IDX]._name = os::strdup(name); + cg_infos[CPUSET_IDX]._hierarchy_id = hierarchy_id; + cg_infos[CPUSET_IDX]._enabled = (enabled == 1); + } else if (strcmp(name, "cpu") == 0) { + cg_infos[CPU_IDX]._name = os::strdup(name); + cg_infos[CPU_IDX]._hierarchy_id = hierarchy_id; + cg_infos[CPU_IDX]._enabled = (enabled == 1); + } else if (strcmp(name, "cpuacct") == 0) { + cg_infos[CPUACCT_IDX]._name = os::strdup(name); + cg_infos[CPUACCT_IDX]._hierarchy_id = hierarchy_id; + cg_infos[CPUACCT_IDX]._enabled = (enabled == 1); + } else if (strcmp(name, "pids") == 0) { + log_debug(os, container)("Detected optional pids controller entry in %s", proc_cgroups); + cg_infos[PIDS_IDX]._name = os::strdup(name); + cg_infos[PIDS_IDX]._hierarchy_id = hierarchy_id; + cg_infos[PIDS_IDX]._enabled = (enabled == 1); + } + } + fclose(cgroups); + + is_cgroupsV2 = true; + all_required_controllers_enabled = true; + for (int i = 0; i < CG_INFO_LENGTH; i++) { + // pids controller is optional. All other controllers are required + if (i != PIDS_IDX) { + is_cgroupsV2 = is_cgroupsV2 && cg_infos[i]._hierarchy_id == 0; + all_required_controllers_enabled = all_required_controllers_enabled && cg_infos[i]._enabled; + } + if (log_is_enabled(Debug, os, container) && !cg_infos[i]._enabled) { + log_debug(os, container)("controller %s is not enabled\n", cg_controller_name[i]); + } + } + + if (!all_required_controllers_enabled) { + // one or more required controllers disabled, disable container support + log_debug(os, container)("One or more required controllers disabled at kernel level."); + cleanup(cg_infos); + *flags = INVALID_CGROUPS_GENERIC; + return false; + } + + /* + * Read /proc/self/cgroup and determine: + * - the cgroup path for cgroups v2 or + * - on a cgroups v1 system, collect info for mapping + * the host mount point to the local one via /proc/self/mountinfo below. + */ + cgroup = os::fopen(proc_self_cgroup, "r"); + if (cgroup == nullptr) { + log_debug(os, container)("Can't open %s, %s", + proc_self_cgroup, os::strerror(errno)); + cleanup(cg_infos); + *flags = INVALID_CGROUPS_GENERIC; + return false; + } + + while ((p = fgets(buf, MAXPATHLEN, cgroup)) != nullptr) { + char *controllers; + char *token; + char *hierarchy_id_str; + int hierarchy_id; + char *cgroup_path; + + hierarchy_id_str = strsep(&p, ":"); + hierarchy_id = atoi(hierarchy_id_str); + /* Get controllers and base */ + controllers = strsep(&p, ":"); + cgroup_path = strsep(&p, "\n"); + + if (controllers == nullptr) { + continue; + } + + while (!is_cgroupsV2 && (token = strsep(&controllers, ",")) != nullptr) { + if (strcmp(token, "memory") == 0) { + assert(hierarchy_id == cg_infos[MEMORY_IDX]._hierarchy_id, "/proc/cgroups and /proc/self/cgroup hierarchy mismatch for memory"); + cg_infos[MEMORY_IDX]._cgroup_path = os::strdup(cgroup_path); + } else if (strcmp(token, "cpuset") == 0) { + assert(hierarchy_id == cg_infos[CPUSET_IDX]._hierarchy_id, "/proc/cgroups and /proc/self/cgroup hierarchy mismatch for cpuset"); + cg_infos[CPUSET_IDX]._cgroup_path = os::strdup(cgroup_path); + } else if (strcmp(token, "cpu") == 0) { + assert(hierarchy_id == cg_infos[CPU_IDX]._hierarchy_id, "/proc/cgroups and /proc/self/cgroup hierarchy mismatch for cpu"); + cg_infos[CPU_IDX]._cgroup_path = os::strdup(cgroup_path); + } else if (strcmp(token, "cpuacct") == 0) { + assert(hierarchy_id == cg_infos[CPUACCT_IDX]._hierarchy_id, "/proc/cgroups and /proc/self/cgroup hierarchy mismatch for cpuacc"); + cg_infos[CPUACCT_IDX]._cgroup_path = os::strdup(cgroup_path); + } else if (strcmp(token, "pids") == 0) { + assert(hierarchy_id == cg_infos[PIDS_IDX]._hierarchy_id, "/proc/cgroups (%d) and /proc/self/cgroup (%d) hierarchy mismatch for pids", + cg_infos[PIDS_IDX]._hierarchy_id, hierarchy_id); + cg_infos[PIDS_IDX]._cgroup_path = os::strdup(cgroup_path); + } + } + if (is_cgroupsV2) { + // On some systems we have mixed cgroups v1 and cgroups v2 controllers (e.g. freezer on cg1 and + // all relevant controllers on cg2). Only set the cgroup path when we see a hierarchy id of 0. + if (hierarchy_id != 0) { + continue; + } + for (int i = 0; i < CG_INFO_LENGTH; i++) { + assert(cg_infos[i]._cgroup_path == nullptr, "cgroup path must only be set once"); + cg_infos[i]._cgroup_path = os::strdup(cgroup_path); + } + } + } + fclose(cgroup); + + // Find various mount points by reading /proc/self/mountinfo + // mountinfo format is documented at https://www.kernel.org/doc/Documentation/filesystems/proc.txt + mntinfo = os::fopen(proc_self_mountinfo, "r"); + if (mntinfo == nullptr) { + log_debug(os, container)("Can't open %s, %s", + proc_self_mountinfo, os::strerror(errno)); + cleanup(cg_infos); + *flags = INVALID_CGROUPS_GENERIC; + return false; + } + + bool cgroupv2_mount_point_found = false; + bool any_cgroup_mounts_found = false; + while ((p = fgets(buf, MAXPATHLEN, mntinfo)) != nullptr) { + char tmp_fs_type[MAXPATHLEN+1]; + char tmproot[MAXPATHLEN+1]; + char tmpmount[MAXPATHLEN+1]; + char tmpcgroups[MAXPATHLEN+1]; + char *cptr = tmpcgroups; + char *token; + + // Cgroup v2 relevant info. We only look for the _mount_path iff is_cgroupsV2 so + // as to avoid memory stomping of the _mount_path pointer later on in the cgroup v1 + // block in the hybrid case. + if (is_cgroupsV2 && sscanf(p, "%*d %*d %*d:%*d %s %s %*[^-]- %s %*s %*s", tmproot, tmpmount, tmp_fs_type) == 3) { + // we likely have an early match return (e.g. cgroup fs match), be sure we have cgroup2 as fstype + if (strcmp("cgroup2", tmp_fs_type) == 0) { + cgroupv2_mount_point_found = true; + any_cgroup_mounts_found = true; + for (int i = 0; i < CG_INFO_LENGTH; i++) { + set_controller_paths(cg_infos, i, "(cg2, unified)", tmpmount, tmproot); + } + } + } + + /* Cgroup v1 relevant info + * + * Find the cgroup mount point for memory, cpuset, cpu, cpuacct, pids + * + * Example for docker: + * 219 214 0:29 /docker/7208cebd00fa5f2e342b1094f7bed87fa25661471a4637118e65f1c995be8a34 /sys/fs/cgroup/memory ro,nosuid,nodev,noexec,relatime - cgroup cgroup rw,memory + * + * Example for host: + * 34 28 0:29 / /sys/fs/cgroup/memory rw,nosuid,nodev,noexec,relatime shared:16 - cgroup cgroup rw,memory + * + * 44 31 0:39 / /sys/fs/cgroup/pids rw,nosuid,nodev,noexec,relatime shared:23 - cgroup cgroup rw,pids + */ + if (sscanf(p, "%*d %*d %*d:%*d %s %s %*[^-]- %s %*s %s", tmproot, tmpmount, tmp_fs_type, tmpcgroups) == 4) { + if (strcmp("cgroup", tmp_fs_type) != 0) { + // Skip cgroup2 fs lines on hybrid or unified hierarchy. + continue; + } + while ((token = strsep(&cptr, ",")) != nullptr) { + if (strcmp(token, "memory") == 0) { + any_cgroup_mounts_found = true; + set_controller_paths(cg_infos, MEMORY_IDX, token, tmpmount, tmproot); + cg_infos[MEMORY_IDX]._data_complete = true; + } else if (strcmp(token, "cpuset") == 0) { + any_cgroup_mounts_found = true; + set_controller_paths(cg_infos, CPUSET_IDX, token, tmpmount, tmproot); + cg_infos[CPUSET_IDX]._data_complete = true; + } else if (strcmp(token, "cpu") == 0) { + any_cgroup_mounts_found = true; + set_controller_paths(cg_infos, CPU_IDX, token, tmpmount, tmproot); + cg_infos[CPU_IDX]._data_complete = true; + } else if (strcmp(token, "cpuacct") == 0) { + any_cgroup_mounts_found = true; + set_controller_paths(cg_infos, CPUACCT_IDX, token, tmpmount, tmproot); + cg_infos[CPUACCT_IDX]._data_complete = true; + } else if (strcmp(token, "pids") == 0) { + any_cgroup_mounts_found = true; + set_controller_paths(cg_infos, PIDS_IDX, token, tmpmount, tmproot); + cg_infos[PIDS_IDX]._data_complete = true; + } + } + } + } + fclose(mntinfo); + + // Neither cgroup2 nor cgroup filesystems mounted via /proc/self/mountinfo + // No point in continuing. + if (!any_cgroup_mounts_found) { + log_trace(os, container)("No relevant cgroup controllers mounted."); + cleanup(cg_infos); + *flags = INVALID_CGROUPS_NO_MOUNT; + return false; + } + + if (is_cgroupsV2) { + if (!cgroupv2_mount_point_found) { + log_trace(os, container)("Mount point for cgroupv2 not found in /proc/self/mountinfo"); + cleanup(cg_infos); + *flags = INVALID_CGROUPS_V2; + return false; + } + // Cgroups v2 case, we have all the info we need. + *flags = CGROUPS_V2; + return true; + } + + // What follows is cgroups v1 + log_debug(os, container)("Detected cgroups hybrid or legacy hierarchy, using cgroups v1 controllers"); + + if (!cg_infos[MEMORY_IDX]._data_complete) { + log_debug(os, container)("Required cgroup v1 memory subsystem not found"); + cleanup(cg_infos); + *flags = INVALID_CGROUPS_V1; + return false; + } + if (!cg_infos[CPUSET_IDX]._data_complete) { + log_debug(os, container)("Required cgroup v1 cpuset subsystem not found"); + cleanup(cg_infos); + *flags = INVALID_CGROUPS_V1; + return false; + } + if (!cg_infos[CPU_IDX]._data_complete) { + log_debug(os, container)("Required cgroup v1 cpu subsystem not found"); + cleanup(cg_infos); + *flags = INVALID_CGROUPS_V1; + return false; + } + if (!cg_infos[CPUACCT_IDX]._data_complete) { + log_debug(os, container)("Required cgroup v1 cpuacct subsystem not found"); + cleanup(cg_infos); + *flags = INVALID_CGROUPS_V1; + return false; + } + if (log_is_enabled(Debug, os, container) && !cg_infos[PIDS_IDX]._data_complete) { + log_debug(os, container)("Optional cgroup v1 pids subsystem not found"); + // keep the other controller info, pids is optional + } + // Cgroups v1 case, we have all the info we need. + *flags = CGROUPS_V1; + return true; +}; + +void CgroupSubsystemFactory::cleanup(CgroupInfo* cg_infos) { + assert(cg_infos != nullptr, "Invariant"); + for (int i = 0; i < CG_INFO_LENGTH; i++) { + os::free(cg_infos[i]._name); + os::free(cg_infos[i]._cgroup_path); + os::free(cg_infos[i]._root_mount_path); + os::free(cg_infos[i]._mount_path); + } +} + +/* active_processor_count + * + * Calculate an appropriate number of active processors for the + * VM to use based on these three inputs. + * + * cpu affinity + * cgroup cpu quota & cpu period + * cgroup cpu shares + * + * Algorithm: + * + * Determine the number of available CPUs from sched_getaffinity + * + * If user specified a quota (quota != -1), calculate the number of + * required CPUs by dividing quota by period. + * + * All results of division are rounded up to the next whole number. + * + * If quotas have not been specified, return the + * number of active processors in the system. + * + * If quotas have been specified, the resulting number + * returned will never exceed the number of active processors. + * + * return: + * number of CPUs + */ +int CgroupSubsystem::active_processor_count() { + int quota_count = 0; + int cpu_count, limit_count; + int result; + + // We use a cache with a timeout to avoid performing expensive + // computations in the event this function is called frequently. + // [See 8227006]. + CachingCgroupController* contrl = cpu_controller(); + CachedMetric* cpu_limit = contrl->metrics_cache(); + if (!cpu_limit->should_check_metric()) { + int val = (int)cpu_limit->value(); + log_trace(os, container)("CgroupSubsystem::active_processor_count (cached): %d", val); + return val; + } + + cpu_count = limit_count = os::Linux::active_processor_count(); + int quota = cpu_quota(); + int period = cpu_period(); + + if (quota > -1 && period > 0) { + quota_count = ceilf((float)quota / (float)period); + log_trace(os, container)("CPU Quota count based on quota/period: %d", quota_count); + } + + // Use quotas + if (quota_count != 0) { + limit_count = quota_count; + } + + result = MIN2(cpu_count, limit_count); + log_trace(os, container)("OSContainer::active_processor_count: %d", result); + + // Update cached metric to avoid re-reading container settings too often + cpu_limit->set_value(result, OSCONTAINER_CACHE_TIMEOUT); + + return result; +} + +/* memory_limit_in_bytes + * + * Return the limit of available memory for this process. + * + * return: + * memory limit in bytes or + * -1 for unlimited + * OSCONTAINER_ERROR for not supported + */ +jlong CgroupSubsystem::memory_limit_in_bytes() { + CachingCgroupController* contrl = memory_controller(); + CachedMetric* memory_limit = contrl->metrics_cache(); + if (!memory_limit->should_check_metric()) { + return memory_limit->value(); + } + jlong phys_mem = os::Linux::physical_memory(); + log_trace(os, container)("total physical memory: " JLONG_FORMAT, phys_mem); + jlong mem_limit = read_memory_limit_in_bytes(); + + if (mem_limit <= 0 || mem_limit >= phys_mem) { + jlong read_mem_limit = mem_limit; + const char *reason; + if (mem_limit >= phys_mem) { + // Exceeding physical memory is treated as unlimited. Cg v1's implementation + // of read_memory_limit_in_bytes() caps this at phys_mem since Cg v1 has no + // value to represent 'max'. Cg v2 may return a value >= phys_mem if e.g. the + // container engine was started with a memory flag exceeding it. + reason = "ignored"; + mem_limit = -1; + } else if (OSCONTAINER_ERROR == mem_limit) { + reason = "failed"; + } else { + assert(mem_limit == -1, "Expected unlimited"); + reason = "unlimited"; + } + log_debug(os, container)("container memory limit %s: " JLONG_FORMAT ", using host value " JLONG_FORMAT, + reason, read_mem_limit, phys_mem); + } + + // Update cached metric to avoid re-reading container settings too often + memory_limit->set_value(mem_limit, OSCONTAINER_CACHE_TIMEOUT); + return mem_limit; +} + +jlong CgroupSubsystem::limit_from_str(char* limit_str) { + if (limit_str == nullptr) { + return OSCONTAINER_ERROR; + } + // Unlimited memory in cgroups is the literal string 'max' for + // some controllers, for example the pids controller. + if (strcmp("max", limit_str) == 0) { + os::free(limit_str); + return (jlong)-1; + } + julong limit; + if (sscanf(limit_str, JULONG_FORMAT, &limit) != 1) { + os::free(limit_str); + return OSCONTAINER_ERROR; + } + os::free(limit_str); + return (jlong)limit; +} diff --git a/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/os/linux/cgroupSubsystem_linux.hpp b/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/os/linux/cgroupSubsystem_linux.hpp new file mode 100644 index 000000000000..8827a39e8f14 --- /dev/null +++ b/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/os/linux/cgroupSubsystem_linux.hpp @@ -0,0 +1,355 @@ +/* + * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef CGROUP_SUBSYSTEM_LINUX_HPP +#define CGROUP_SUBSYSTEM_LINUX_HPP + +#include "memory/allocation.hpp" +#include "runtime/os.hpp" +#include "logging/log.hpp" +#include "utilities/globalDefinitions.hpp" +#include "utilities/macros.hpp" +#include "osContainer_linux.hpp" + +// Shared cgroups code (used by cgroup version 1 and version 2) + +/* + * PER_CPU_SHARES has been set to 1024 because CPU shares' quota + * is commonly used in cloud frameworks like Kubernetes[1], + * AWS[2] and Mesos[3] in a similar way. They spawn containers with + * --cpu-shares option values scaled by PER_CPU_SHARES. Thus, we do + * the inverse for determining the number of possible available + * CPUs to the JVM inside a container. See JDK-8216366. + * + * [1] https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#meaning-of-cpu + * In particular: + * When using Docker: + * The spec.containers[].resources.requests.cpu is converted to its core value, which is potentially + * fractional, and multiplied by 1024. The greater of this number or 2 is used as the value of the + * --cpu-shares flag in the docker run command. + * [2] https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_ContainerDefinition.html + * [3] https://github.com/apache/mesos/blob/3478e344fb77d931f6122980c6e94cd3913c441d/src/docker/docker.cpp#L648 + * https://github.com/apache/mesos/blob/3478e344fb77d931f6122980c6e94cd3913c441d/src/slave/containerizer/mesos/isolators/cgroups/constants.hpp#L30 + */ +#define PER_CPU_SHARES 1024 + +#define CGROUPS_V1 1 +#define CGROUPS_V2 2 +#define INVALID_CGROUPS_V2 3 +#define INVALID_CGROUPS_V1 4 +#define INVALID_CGROUPS_NO_MOUNT 5 +#define INVALID_CGROUPS_GENERIC 6 + +// Five controllers: cpu, cpuset, cpuacct, memory, pids +#define CG_INFO_LENGTH 5 +#define CPUSET_IDX 0 +#define CPU_IDX 1 +#define CPUACCT_IDX 2 +#define MEMORY_IDX 3 +#define PIDS_IDX 4 + +typedef char * cptr; + +class CgroupController: public CHeapObj { + public: + virtual char *subsystem_path() = 0; +}; + +PRAGMA_DIAG_PUSH +PRAGMA_FORMAT_NONLITERAL_IGNORED +// Parses a subsystem's file, looking for a matching line. +// If key is null, then the first line will be matched with scan_fmt. +// If key isn't null, then each line will be matched, looking for something that matches "$key $scan_fmt". +// The matching value will be assigned to returnval. +// scan_fmt uses scanf() syntax. +// Return value: 0 on match, OSCONTAINER_ERROR on error. +template int subsystem_file_line_contents(CgroupController* c, + const char *filename, + const char *key, + const char *scan_fmt, + T returnval) { + if (c == nullptr) { + log_debug(os, container)("subsystem_file_line_contents: CgroupController* is null"); + return OSCONTAINER_ERROR; + } + if (c->subsystem_path() == nullptr) { + log_debug(os, container)("subsystem_file_line_contents: subsystem path is null"); + return OSCONTAINER_ERROR; + } + + stringStream file_path; + file_path.print_raw(c->subsystem_path()); + file_path.print_raw(filename); + + if (file_path.size() > (MAXPATHLEN-1)) { + log_debug(os, container)("File path too long %s, %s", file_path.base(), filename); + return OSCONTAINER_ERROR; + } + const char* absolute_path = file_path.freeze(); + log_trace(os, container)("Path to %s is %s", filename, absolute_path); + + FILE* fp = os::fopen(absolute_path, "r"); + if (fp == nullptr) { + log_debug(os, container)("Open of file %s failed, %s", absolute_path, os::strerror(errno)); + return OSCONTAINER_ERROR; + } + + const int buf_len = MAXPATHLEN+1; + char buf[buf_len]; + char* line = fgets(buf, buf_len, fp); + if (line == nullptr) { + log_debug(os, container)("Empty file %s", absolute_path); + fclose(fp); + return OSCONTAINER_ERROR; + } + + bool found_match = false; + if (key == nullptr) { + // File consists of a single line according to caller, with only a value + int matched = sscanf(line, scan_fmt, returnval); + found_match = matched == 1; + } else { + // File consists of multiple lines in a "key value" + // fashion, we have to find the key. + const int key_len = (int)strlen(key); + for (; line != nullptr; line = fgets(buf, buf_len, fp)) { + char* key_substr = strstr(line, key); + char after_key = line[key_len]; + if (key_substr == line + && isspace(after_key) != 0 + && after_key != '\n') { + // Skip key, skip space + const char* value_substr = line + key_len + 1; + int matched = sscanf(value_substr, scan_fmt, returnval); + found_match = matched == 1; + if (found_match) { + break; + } + } + } + } + fclose(fp); + if (found_match) { + return 0; + } + log_debug(os, container)("Type %s (key == %s) not found in file %s", scan_fmt, + (key == nullptr ? "null" : key), absolute_path); + return OSCONTAINER_ERROR; +} +PRAGMA_DIAG_POP + +// log_fmt can be different than scan_fmt. For example +// cpu_period() for cgv2 uses log_fmt='%d' and scan_fmt='%*s %d' +#define GET_CONTAINER_INFO(return_type, subsystem, filename, \ + logstring, log_fmt, scan_fmt, variable) \ + return_type variable; \ +{ \ + int err; \ + err = subsystem_file_line_contents(subsystem, \ + filename, \ + nullptr, \ + scan_fmt, \ + &variable); \ + if (err != 0) { \ + log_trace(os, container)(logstring "%d", OSCONTAINER_ERROR); \ + return (return_type) OSCONTAINER_ERROR; \ + } \ + \ + log_trace(os, container)(logstring log_fmt, variable); \ +} + +#define GET_CONTAINER_INFO_CPTR(return_type, subsystem, filename, \ + logstring, scan_fmt, variable, bufsize) \ + char variable[bufsize]; \ +{ \ + int err; \ + err = subsystem_file_line_contents(subsystem, \ + filename, \ + nullptr, \ + scan_fmt, \ + variable); \ + if (err != 0) \ + return (return_type) nullptr; \ + \ + log_trace(os, container)(logstring, variable); \ +} + +#define GET_CONTAINER_INFO_LINE(return_type, controller, filename, \ + matchline, logstring, scan_fmt, variable) \ + return_type variable; \ +{ \ + int err; \ + err = subsystem_file_line_contents(controller, \ + filename, \ + matchline, \ + scan_fmt, \ + &variable); \ + if (err != 0) \ + return (return_type) OSCONTAINER_ERROR; \ + \ + log_trace(os, container)(logstring, variable); \ +} + + +class CachedMetric : public CHeapObj{ + private: + volatile jlong _metric; + volatile jlong _next_check_counter; + public: + CachedMetric() { + _metric = -1; + _next_check_counter = min_jlong; + } + bool should_check_metric() { +#ifdef NATIVE_IMAGE + // NOTE (chaeubl): we do all caching on the Java-side instead of the C-side + return true; +#else + return os::elapsed_counter() > _next_check_counter; +#endif // NATIVE_IMAGE + } + jlong value() { return _metric; } + void set_value(jlong value, jlong timeout) { + _metric = value; +#ifndef NATIVE_IMAGE + // Metric is unlikely to change, but we want to remain + // responsive to configuration changes. A very short grace time + // between re-read avoids excessive overhead during startup without + // significantly reducing the VMs ability to promptly react to changed + // metric config + _next_check_counter = os::elapsed_counter() + timeout; +#endif // !NATIVE_IMAGE + } +}; + +class CachingCgroupController : public CHeapObj { + private: + CgroupController* _controller; + CachedMetric* _metrics_cache; + + public: + CachingCgroupController(CgroupController* cont) { + _controller = cont; + _metrics_cache = new CachedMetric(); + } + + CachedMetric* metrics_cache() { return _metrics_cache; } + CgroupController* controller() { return _controller; } +}; + +class CgroupSubsystem: public CHeapObj { + public: + jlong memory_limit_in_bytes(); + int active_processor_count(); + jlong limit_from_str(char* limit_str); + + virtual int cpu_quota() = 0; + virtual int cpu_period() = 0; + virtual int cpu_shares() = 0; + virtual jlong pids_max() = 0; + virtual jlong pids_current() = 0; + virtual jlong memory_usage_in_bytes() = 0; + virtual jlong memory_and_swap_limit_in_bytes() = 0; + virtual jlong memory_soft_limit_in_bytes() = 0; + virtual jlong memory_max_usage_in_bytes() = 0; + virtual jlong rss_usage_in_bytes() = 0; + virtual jlong cache_usage_in_bytes() = 0; + + virtual char * cpu_cpuset_cpus() = 0; + virtual char * cpu_cpuset_memory_nodes() = 0; + virtual jlong read_memory_limit_in_bytes() = 0; + virtual const char * container_type() = 0; + virtual CachingCgroupController* memory_controller() = 0; + virtual CachingCgroupController* cpu_controller() = 0; + +#ifndef NATIVE_IMAGE + virtual void print_version_specific_info(outputStream* st) = 0; +#endif // !NATIVE_IMAGE +}; + +// Utility class for storing info retrieved from /proc/cgroups, +// /proc/self/cgroup and /proc/self/mountinfo +// For reference see man 7 cgroups and CgroupSubsystemFactory +class CgroupInfo : public StackObj { + friend class CgroupSubsystemFactory; + friend class WhiteBox; + + private: + char* _name; + int _hierarchy_id; + bool _enabled; + bool _data_complete; // indicating cgroup v1 data is complete for this controller + char* _cgroup_path; // cgroup controller path from /proc/self/cgroup + char* _root_mount_path; // root mount path from /proc/self/mountinfo. Unused for cgroup v2 + char* _mount_path; // mount path from /proc/self/mountinfo. + + public: + CgroupInfo() { + _name = nullptr; + _hierarchy_id = -1; + _enabled = false; + _data_complete = false; + _cgroup_path = nullptr; + _root_mount_path = nullptr; + _mount_path = nullptr; + } + +}; + +class CgroupSubsystemFactory: AllStatic { + friend class WhiteBox; + + public: + static CgroupSubsystem* create(); + private: + static inline bool is_cgroup_v2(u1* flags) { + return *flags == CGROUPS_V2; + } + +#ifdef ASSERT + static inline bool is_valid_cgroup(u1* flags) { + return *flags == CGROUPS_V1 || *flags == CGROUPS_V2; + } + static inline bool is_cgroup_v1(u1* flags) { + return *flags == CGROUPS_V1; + } +#endif + + static void set_controller_paths(CgroupInfo* cg_infos, + int controller, + const char* name, + char* mount_path, + char* root_path); + // Determine the cgroup type (version 1 or version 2), given + // relevant paths to files. Sets 'flags' accordingly. + static bool determine_type(CgroupInfo* cg_infos, + const char* proc_cgroups, + const char* proc_self_cgroup, + const char* proc_self_mountinfo, + u1* flags); + static void cleanup(CgroupInfo* cg_infos); +}; + +#endif // CGROUP_SUBSYSTEM_LINUX_HPP diff --git a/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/os/linux/cgroupV1Subsystem_linux.cpp b/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/os/linux/cgroupV1Subsystem_linux.cpp new file mode 100644 index 000000000000..86f289c6dc7a --- /dev/null +++ b/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/os/linux/cgroupV1Subsystem_linux.cpp @@ -0,0 +1,351 @@ +/* + * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include +#include +#include +#include "cgroupV1Subsystem_linux.hpp" +#include "logging/log.hpp" +#include "memory/allocation.hpp" +#include "runtime/globals.hpp" +#include "runtime/os.hpp" +#include "utilities/globalDefinitions.hpp" +#include "os_linux.hpp" + +/* + * Set directory to subsystem specific files based + * on the contents of the mountinfo and cgroup files. + */ +void CgroupV1Controller::set_subsystem_path(char *cgroup_path) { + stringStream ss; + if (_root != nullptr && cgroup_path != nullptr) { + if (strcmp(_root, "/") == 0) { + ss.print_raw(_mount_point); + if (strcmp(cgroup_path,"/") != 0) { + ss.print_raw(cgroup_path); + } + _path = os::strdup(ss.base()); + } else { + if (strcmp(_root, cgroup_path) == 0) { + ss.print_raw(_mount_point); + _path = os::strdup(ss.base()); + } else { + char *p = strstr(cgroup_path, _root); + if (p != nullptr && p == _root) { + if (strlen(cgroup_path) > strlen(_root)) { + ss.print_raw(_mount_point); + const char* cg_path_sub = cgroup_path + strlen(_root); + ss.print_raw(cg_path_sub); + _path = os::strdup(ss.base()); + } + } + } + } + } +} + +/* uses_mem_hierarchy + * + * Return whether or not hierarchical cgroup accounting is being + * done. + * + * return: + * A number > 0 if true, or + * OSCONTAINER_ERROR for not supported + */ +jlong CgroupV1MemoryController::uses_mem_hierarchy() { + GET_CONTAINER_INFO(jlong, this, "/memory.use_hierarchy", + "Use Hierarchy is: ", JLONG_FORMAT, JLONG_FORMAT, use_hierarchy); + return use_hierarchy; +} + +void CgroupV1MemoryController::set_subsystem_path(char *cgroup_path) { + CgroupV1Controller::set_subsystem_path(cgroup_path); + jlong hierarchy = uses_mem_hierarchy(); + if (hierarchy > 0) { + set_hierarchical(true); + } +} + +jlong CgroupV1Subsystem::read_memory_limit_in_bytes() { + GET_CONTAINER_INFO(julong, _memory->controller(), "/memory.limit_in_bytes", + "Memory Limit is: ", JULONG_FORMAT, JULONG_FORMAT, memlimit); + + if (memlimit >= os::Linux::physical_memory()) { + log_trace(os, container)("Non-Hierarchical Memory Limit is: Unlimited"); + CgroupV1MemoryController* mem_controller = reinterpret_cast(_memory->controller()); + if (mem_controller->is_hierarchical()) { + GET_CONTAINER_INFO_LINE(julong, _memory->controller(), "/memory.stat", "hierarchical_memory_limit", + "Hierarchical Memory Limit is: " JULONG_FORMAT, JULONG_FORMAT, hier_memlimit) + if (hier_memlimit >= os::Linux::physical_memory()) { + log_trace(os, container)("Hierarchical Memory Limit is: Unlimited"); + } else { + return (jlong)hier_memlimit; + } + } + return (jlong)-1; + } + else { + return (jlong)memlimit; + } +} + +/* read_mem_swap + * + * Determine the memory and swap limit metric. Returns a positive limit value strictly + * lower than the physical memory and swap limit iff there is a limit. Otherwise a + * negative value is returned indicating the determined status. + * + * returns: + * * A number > 0 if the limit is available and lower than a physical upper bound. + * * OSCONTAINER_ERROR if the limit cannot be retrieved (i.e. not supported) or + * * -1 if there isn't any limit in place (note: includes values which exceed a physical + * upper bound) + */ +jlong CgroupV1Subsystem::read_mem_swap() { + julong host_total_memsw; + GET_CONTAINER_INFO(julong, _memory->controller(), "/memory.memsw.limit_in_bytes", + "Memory and Swap Limit is: ", JULONG_FORMAT, JULONG_FORMAT, memswlimit); + host_total_memsw = os::Linux::host_swap() + os::Linux::physical_memory(); + if (memswlimit >= host_total_memsw) { + log_trace(os, container)("Non-Hierarchical Memory and Swap Limit is: Unlimited"); + CgroupV1MemoryController* mem_controller = reinterpret_cast(_memory->controller()); + if (mem_controller->is_hierarchical()) { + const char* matchline = "hierarchical_memsw_limit"; + GET_CONTAINER_INFO_LINE(julong, _memory->controller(), "/memory.stat", matchline, + "Hierarchical Memory and Swap Limit is : " JULONG_FORMAT, JULONG_FORMAT, hier_memswlimit) + if (hier_memswlimit >= host_total_memsw) { + log_trace(os, container)("Hierarchical Memory and Swap Limit is: Unlimited"); + } else { + return (jlong)hier_memswlimit; + } + } + return (jlong)-1; + } else { + return (jlong)memswlimit; + } +} + +jlong CgroupV1Subsystem::memory_and_swap_limit_in_bytes() { + jlong memory_swap = read_mem_swap(); + if (memory_swap == -1) { + return memory_swap; + } + // If there is a swap limit, but swappiness == 0, reset the limit + // to the memory limit. Do the same for cases where swap isn't + // supported. + jlong swappiness = read_mem_swappiness(); + if (swappiness == 0 || memory_swap == OSCONTAINER_ERROR) { + jlong memlimit = read_memory_limit_in_bytes(); + if (memory_swap == OSCONTAINER_ERROR) { + log_trace(os, container)("Memory and Swap Limit has been reset to " JLONG_FORMAT " because swap is not supported", memlimit); + } else { + log_trace(os, container)("Memory and Swap Limit has been reset to " JLONG_FORMAT " because swappiness is 0", memlimit); + } + return memlimit; + } + return memory_swap; +} + +jlong CgroupV1Subsystem::read_mem_swappiness() { + GET_CONTAINER_INFO(julong, _memory->controller(), "/memory.swappiness", + "Swappiness is: ", JULONG_FORMAT, JULONG_FORMAT, swappiness); + return swappiness; +} + +jlong CgroupV1Subsystem::memory_soft_limit_in_bytes() { + GET_CONTAINER_INFO(julong, _memory->controller(), "/memory.soft_limit_in_bytes", + "Memory Soft Limit is: ", JULONG_FORMAT, JULONG_FORMAT, memsoftlimit); + if (memsoftlimit >= os::Linux::physical_memory()) { + log_trace(os, container)("Memory Soft Limit is: Unlimited"); + return (jlong)-1; + } else { + return (jlong)memsoftlimit; + } +} + +/* memory_usage_in_bytes + * + * Return the amount of used memory for this process. + * + * return: + * memory usage in bytes or + * -1 for unlimited + * OSCONTAINER_ERROR for not supported + */ +jlong CgroupV1Subsystem::memory_usage_in_bytes() { + GET_CONTAINER_INFO(jlong, _memory->controller(), "/memory.usage_in_bytes", + "Memory Usage is: ", JLONG_FORMAT, JLONG_FORMAT, memusage); + return memusage; +} + +/* memory_max_usage_in_bytes + * + * Return the maximum amount of used memory for this process. + * + * return: + * max memory usage in bytes or + * OSCONTAINER_ERROR for not supported + */ +jlong CgroupV1Subsystem::memory_max_usage_in_bytes() { + GET_CONTAINER_INFO(jlong, _memory->controller(), "/memory.max_usage_in_bytes", + "Maximum Memory Usage is: ", JLONG_FORMAT, JLONG_FORMAT, memmaxusage); + return memmaxusage; +} + +jlong CgroupV1Subsystem::rss_usage_in_bytes() { + GET_CONTAINER_INFO_LINE(julong, _memory->controller(), "/memory.stat", + "rss", JULONG_FORMAT, JULONG_FORMAT, rss); + return rss; +} + +jlong CgroupV1Subsystem::cache_usage_in_bytes() { + GET_CONTAINER_INFO_LINE(julong, _memory->controller(), "/memory.stat", + "cache", JULONG_FORMAT, JULONG_FORMAT, cache); + return cache; +} + +jlong CgroupV1Subsystem::kernel_memory_usage_in_bytes() { + GET_CONTAINER_INFO(jlong, _memory->controller(), "/memory.kmem.usage_in_bytes", + "Kernel Memory Usage is: ", JLONG_FORMAT, JLONG_FORMAT, kmem_usage); + return kmem_usage; +} + +jlong CgroupV1Subsystem::kernel_memory_limit_in_bytes() { + GET_CONTAINER_INFO(julong, _memory->controller(), "/memory.kmem.limit_in_bytes", + "Kernel Memory Limit is: ", JULONG_FORMAT, JULONG_FORMAT, kmem_limit); + if (kmem_limit >= os::Linux::physical_memory()) { + return (jlong)-1; + } + return (jlong)kmem_limit; +} + +jlong CgroupV1Subsystem::kernel_memory_max_usage_in_bytes() { + GET_CONTAINER_INFO(jlong, _memory->controller(), "/memory.kmem.max_usage_in_bytes", + "Maximum Kernel Memory Usage is: ", JLONG_FORMAT, JLONG_FORMAT, kmem_max_usage); + return kmem_max_usage; +} + +#ifndef NATIVE_IMAGE +void CgroupV1Subsystem::print_version_specific_info(outputStream* st) { + jlong kmem_usage = kernel_memory_usage_in_bytes(); + jlong kmem_limit = kernel_memory_limit_in_bytes(); + jlong kmem_max_usage = kernel_memory_max_usage_in_bytes(); + + OSContainer::print_container_helper(st, kmem_usage, "kernel_memory_usage_in_bytes"); + OSContainer::print_container_helper(st, kmem_limit, "kernel_memory_max_usage_in_bytes"); + OSContainer::print_container_helper(st, kmem_max_usage, "kernel_memory_limit_in_bytes"); +} +#endif // !NATIVE_IMAGE + +char * CgroupV1Subsystem::cpu_cpuset_cpus() { + GET_CONTAINER_INFO_CPTR(cptr, _cpuset, "/cpuset.cpus", + "cpuset.cpus is: %s", "%1023s", cpus, 1024); + return os::strdup(cpus); +} + +char * CgroupV1Subsystem::cpu_cpuset_memory_nodes() { + GET_CONTAINER_INFO_CPTR(cptr, _cpuset, "/cpuset.mems", + "cpuset.mems is: %s", "%1023s", mems, 1024); + return os::strdup(mems); +} + +/* cpu_quota + * + * Return the number of microseconds per period + * process is guaranteed to run. + * + * return: + * quota time in microseconds + * -1 for no quota + * OSCONTAINER_ERROR for not supported + */ +int CgroupV1Subsystem::cpu_quota() { + GET_CONTAINER_INFO(int, _cpu->controller(), "/cpu.cfs_quota_us", + "CPU Quota is: ", "%d", "%d", quota); + return quota; +} + +int CgroupV1Subsystem::cpu_period() { + GET_CONTAINER_INFO(int, _cpu->controller(), "/cpu.cfs_period_us", + "CPU Period is: ", "%d", "%d", period); + return period; +} + +/* cpu_shares + * + * Return the amount of cpu shares available to the process + * + * return: + * Share number (typically a number relative to 1024) + * (2048 typically expresses 2 CPUs worth of processing) + * -1 for no share setup + * OSCONTAINER_ERROR for not supported + */ +int CgroupV1Subsystem::cpu_shares() { + GET_CONTAINER_INFO(int, _cpu->controller(), "/cpu.shares", + "CPU Shares is: ", "%d", "%d", shares); + // Convert 1024 to no shares setup + if (shares == 1024) return -1; + + return shares; +} + + +char* CgroupV1Subsystem::pids_max_val() { + GET_CONTAINER_INFO_CPTR(cptr, _pids, "/pids.max", + "Maximum number of tasks is: %s", "%1023s", pidsmax, 1024); + return os::strdup(pidsmax); +} + +/* pids_max + * + * Return the maximum number of tasks available to the process + * + * return: + * maximum number of tasks + * -1 for unlimited + * OSCONTAINER_ERROR for not supported + */ +jlong CgroupV1Subsystem::pids_max() { + if (_pids == nullptr) return OSCONTAINER_ERROR; + char * pidsmax_str = pids_max_val(); + return limit_from_str(pidsmax_str); +} + +/* pids_current + * + * The number of tasks currently in the cgroup (and its descendants) of the process + * + * return: + * current number of tasks + * OSCONTAINER_ERROR for not supported + */ +jlong CgroupV1Subsystem::pids_current() { + if (_pids == nullptr) return OSCONTAINER_ERROR; + GET_CONTAINER_INFO(jlong, _pids, "/pids.current", + "Current number of tasks is: ", JLONG_FORMAT, JLONG_FORMAT, pids_current); + return pids_current; +} diff --git a/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/os/linux/cgroupV1Subsystem_linux.hpp b/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/os/linux/cgroupV1Subsystem_linux.hpp new file mode 100644 index 000000000000..e1c57a78fdb5 --- /dev/null +++ b/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/os/linux/cgroupV1Subsystem_linux.hpp @@ -0,0 +1,138 @@ +/* + * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef CGROUP_V1_SUBSYSTEM_LINUX_HPP +#define CGROUP_V1_SUBSYSTEM_LINUX_HPP + +#include "runtime/os.hpp" +#include "memory/allocation.hpp" +#include "cgroupSubsystem_linux.hpp" + +// Cgroups version 1 specific implementation + +class CgroupV1Controller: public CgroupController { + private: + /* mountinfo contents */ + char *_root; + char *_mount_point; + + /* Constructed subsystem directory */ + char *_path; + + public: + CgroupV1Controller(char *root, char *mountpoint) { + _root = os::strdup(root); + _mount_point = os::strdup(mountpoint); + _path = nullptr; + } + + virtual void set_subsystem_path(char *cgroup_path); + char *subsystem_path() { return _path; } +}; + +class CgroupV1MemoryController: public CgroupV1Controller { + + public: + bool is_hierarchical() { return _uses_mem_hierarchy; } + void set_subsystem_path(char *cgroup_path); + private: + /* Some container runtimes set limits via cgroup + * hierarchy. If set to true consider also memory.stat + * file if everything else seems unlimited */ + bool _uses_mem_hierarchy; + jlong uses_mem_hierarchy(); + void set_hierarchical(bool value) { _uses_mem_hierarchy = value; } + + public: + CgroupV1MemoryController(char *root, char *mountpoint) : CgroupV1Controller(root, mountpoint) { + _uses_mem_hierarchy = false; + } + +}; + +class CgroupV1Subsystem: public CgroupSubsystem { + + public: + jlong read_memory_limit_in_bytes(); + jlong memory_and_swap_limit_in_bytes(); + jlong memory_soft_limit_in_bytes(); + jlong memory_usage_in_bytes(); + jlong memory_max_usage_in_bytes(); + jlong rss_usage_in_bytes(); + jlong cache_usage_in_bytes(); + + jlong kernel_memory_usage_in_bytes(); + jlong kernel_memory_limit_in_bytes(); + jlong kernel_memory_max_usage_in_bytes(); + + char * cpu_cpuset_cpus(); + char * cpu_cpuset_memory_nodes(); + + int cpu_quota(); + int cpu_period(); + + int cpu_shares(); + + jlong pids_max(); + jlong pids_current(); + +#ifndef NATIVE_IMAGE + void print_version_specific_info(outputStream* st); +#endif // !NATIVE_IMAGE + + const char * container_type() { + return "cgroupv1"; + } + CachingCgroupController * memory_controller() { return _memory; } + CachingCgroupController * cpu_controller() { return _cpu; } + + private: + /* controllers */ + CachingCgroupController* _memory = nullptr; + CgroupV1Controller* _cpuset = nullptr; + CachingCgroupController* _cpu = nullptr; + CgroupV1Controller* _cpuacct = nullptr; + CgroupV1Controller* _pids = nullptr; + + char * pids_max_val(); + + jlong read_mem_swappiness(); + jlong read_mem_swap(); + + public: + CgroupV1Subsystem(CgroupV1Controller* cpuset, + CgroupV1Controller* cpu, + CgroupV1Controller* cpuacct, + CgroupV1Controller* pids, + CgroupV1MemoryController* memory) { + _cpuset = cpuset; + _cpu = new CachingCgroupController(cpu); + _cpuacct = cpuacct; + _pids = pids; + _memory = new CachingCgroupController(memory); + } +}; + +#endif // CGROUP_V1_SUBSYSTEM_LINUX_HPP diff --git a/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/os/linux/cgroupV2Subsystem_linux.cpp b/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/os/linux/cgroupV2Subsystem_linux.cpp new file mode 100644 index 000000000000..196d22fd7ac8 --- /dev/null +++ b/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/os/linux/cgroupV2Subsystem_linux.cpp @@ -0,0 +1,279 @@ +/* + * Copyright (c) 2024, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2022, Red Hat Inc. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "cgroupV2Subsystem_linux.hpp" + +/* cpu_shares + * + * Return the amount of cpu shares available to the process + * + * return: + * Share number (typically a number relative to 1024) + * (2048 typically expresses 2 CPUs worth of processing) + * -1 for no share setup + * OSCONTAINER_ERROR for not supported + */ +int CgroupV2Subsystem::cpu_shares() { + GET_CONTAINER_INFO(int, _unified, "/cpu.weight", + "Raw value for CPU Shares is: ", "%d", "%d", shares); + // Convert default value of 100 to no shares setup + if (shares == 100) { + log_debug(os, container)("CPU Shares is: %d", -1); + return -1; + } + + // CPU shares (OCI) value needs to get translated into + // a proper Cgroups v2 value. See: + // https://github.com/containers/crun/blob/master/crun.1.md#cpu-controller + // + // Use the inverse of (x == OCI value, y == cgroupsv2 value): + // ((262142 * y - 1)/9999) + 2 = x + // + int x = 262142 * shares - 1; + double frac = x/9999.0; + x = ((int)frac) + 2; + log_trace(os, container)("Scaled CPU shares value is: %d", x); + // Since the scaled value is not precise, return the closest + // multiple of PER_CPU_SHARES for a more conservative mapping + if ( x <= PER_CPU_SHARES ) { + // will always map to 1 CPU + log_debug(os, container)("CPU Shares is: %d", x); + return x; + } + int f = x/PER_CPU_SHARES; + int lower_multiple = f * PER_CPU_SHARES; + int upper_multiple = (f + 1) * PER_CPU_SHARES; + int distance_lower = MAX2(lower_multiple, x) - MIN2(lower_multiple, x); + int distance_upper = MAX2(upper_multiple, x) - MIN2(upper_multiple, x); + x = distance_lower <= distance_upper ? lower_multiple : upper_multiple; + log_trace(os, container)("Closest multiple of %d of the CPU Shares value is: %d", PER_CPU_SHARES, x); + log_debug(os, container)("CPU Shares is: %d", x); + return x; +} + +/* cpu_quota + * + * Return the number of microseconds per period + * process is guaranteed to run. + * + * return: + * quota time in microseconds + * -1 for no quota + * OSCONTAINER_ERROR for not supported + */ +int CgroupV2Subsystem::cpu_quota() { + char * cpu_quota_str = cpu_quota_val(); + int limit = (int)limit_from_str(cpu_quota_str); + log_trace(os, container)("CPU Quota is: %d", limit); + return limit; +} + +char * CgroupV2Subsystem::cpu_cpuset_cpus() { + GET_CONTAINER_INFO_CPTR(cptr, _unified, "/cpuset.cpus", + "cpuset.cpus is: %s", "%1023s", cpus, 1024); + return os::strdup(cpus); +} + +char* CgroupV2Subsystem::cpu_quota_val() { + GET_CONTAINER_INFO_CPTR(cptr, _unified, "/cpu.max", + "Raw value for CPU quota is: %s", "%1023s %*d", quota, 1024); + return os::strdup(quota); +} + +char * CgroupV2Subsystem::cpu_cpuset_memory_nodes() { + GET_CONTAINER_INFO_CPTR(cptr, _unified, "/cpuset.mems", + "cpuset.mems is: %s", "%1023s", mems, 1024); + return os::strdup(mems); +} + +int CgroupV2Subsystem::cpu_period() { + GET_CONTAINER_INFO(int, _unified, "/cpu.max", + "CPU Period is: ", "%d", "%*s %d", period); + return period; +} + +/* memory_usage_in_bytes + * + * Return the amount of used memory used by this cgroup and descendents + * + * return: + * memory usage in bytes or + * -1 for unlimited + * OSCONTAINER_ERROR for not supported + */ +jlong CgroupV2Subsystem::memory_usage_in_bytes() { + GET_CONTAINER_INFO(jlong, _unified, "/memory.current", + "Memory Usage is: ", JLONG_FORMAT, JLONG_FORMAT, memusage); + return memusage; +} + +jlong CgroupV2Subsystem::memory_soft_limit_in_bytes() { + char* mem_soft_limit_str = mem_soft_limit_val(); + return limit_from_str(mem_soft_limit_str); +} + +jlong CgroupV2Subsystem::memory_max_usage_in_bytes() { + // Log this string at trace level so as to make tests happy. + log_trace(os, container)("Maximum Memory Usage is not supported."); + return OSCONTAINER_ERROR; // not supported +} + +jlong CgroupV2Subsystem::rss_usage_in_bytes() { + GET_CONTAINER_INFO_LINE(julong, _memory->controller(), "/memory.stat", + "anon", JULONG_FORMAT, JULONG_FORMAT, rss); + return rss; +} + +jlong CgroupV2Subsystem::cache_usage_in_bytes() { + GET_CONTAINER_INFO_LINE(julong, _memory->controller(), "/memory.stat", + "file", JULONG_FORMAT, JULONG_FORMAT, cache); + return cache; +} + +char* CgroupV2Subsystem::mem_soft_limit_val() { + GET_CONTAINER_INFO_CPTR(cptr, _unified, "/memory.low", + "Memory Soft Limit is: %s", "%1023s", mem_soft_limit_str, 1024); + return os::strdup(mem_soft_limit_str); +} + +// Note that for cgroups v2 the actual limits set for swap and +// memory live in two different files, memory.swap.max and memory.max +// respectively. In order to properly report a cgroup v1 like +// compound value we need to sum the two values. Setting a swap limit +// without also setting a memory limit is not allowed. +jlong CgroupV2Subsystem::memory_and_swap_limit_in_bytes() { + char* mem_swp_limit_str = mem_swp_limit_val(); + if (mem_swp_limit_str == nullptr) { + // Some container tests rely on this trace logging to happen. + log_trace(os, container)("Memory and Swap Limit is: %d", OSCONTAINER_ERROR); + // swap disabled at kernel level, treat it as no swap + return read_memory_limit_in_bytes(); + } + jlong swap_limit = limit_from_str(mem_swp_limit_str); + if (swap_limit >= 0) { + jlong memory_limit = read_memory_limit_in_bytes(); + assert(memory_limit >= 0, "swap limit without memory limit?"); + return memory_limit + swap_limit; + } + log_trace(os, container)("Memory and Swap Limit is: " JLONG_FORMAT, swap_limit); + return swap_limit; +} + +char* CgroupV2Subsystem::mem_swp_limit_val() { + GET_CONTAINER_INFO_CPTR(cptr, _unified, "/memory.swap.max", + "Memory and Swap Limit is: %s", "%1023s", mem_swp_limit_str, 1024); + return os::strdup(mem_swp_limit_str); +} + +// memory.swap.current : total amount of swap currently used by the cgroup and its descendants +char* CgroupV2Subsystem::mem_swp_current_val() { + GET_CONTAINER_INFO_CPTR(cptr, _unified, "/memory.swap.current", + "Swap currently used is: %s", "%1023s", mem_swp_current_str, 1024); + return os::strdup(mem_swp_current_str); +} + +/* memory_limit_in_bytes + * + * Return the limit of available memory for this process. + * + * return: + * memory limit in bytes or + * -1 for unlimited, OSCONTAINER_ERROR for an error + */ +jlong CgroupV2Subsystem::read_memory_limit_in_bytes() { + char * mem_limit_str = mem_limit_val(); + jlong limit = limit_from_str(mem_limit_str); + if (log_is_enabled(Trace, os, container)) { + if (limit == -1) { + log_trace(os, container)("Memory Limit is: Unlimited"); + } else { + log_trace(os, container)("Memory Limit is: " JLONG_FORMAT, limit); + } + } + return limit; +} + +char* CgroupV2Subsystem::mem_limit_val() { + GET_CONTAINER_INFO_CPTR(cptr, _unified, "/memory.max", + "Raw value for memory limit is: %s", "%1023s", mem_limit_str, 1024); + return os::strdup(mem_limit_str); +} + +#ifndef NATIVE_IMAGE +void CgroupV2Subsystem::print_version_specific_info(outputStream* st) { + char* mem_swp_current_str = mem_swp_current_val(); + jlong swap_current = limit_from_str(mem_swp_current_str); + + char* mem_swp_limit_str = mem_swp_limit_val(); + jlong swap_limit = limit_from_str(mem_swp_limit_str); + + OSContainer::print_container_helper(st, swap_current, "memory_swap_current_in_bytes"); + OSContainer::print_container_helper(st, swap_limit, "memory_swap_max_limit_in_bytes"); +} +#endif // !NATIVE_IMAGE + +char* CgroupV2Controller::construct_path(char* mount_path, char *cgroup_path) { + stringStream ss; + ss.print_raw(mount_path); + if (strcmp(cgroup_path, "/") != 0) { + ss.print_raw(cgroup_path); + } + return os::strdup(ss.base()); +} + +char* CgroupV2Subsystem::pids_max_val() { + GET_CONTAINER_INFO_CPTR(cptr, _unified, "/pids.max", + "Maximum number of tasks is: %s", "%1023s", pidsmax, 1024); + return os::strdup(pidsmax); +} + +/* pids_max + * + * Return the maximum number of tasks available to the process + * + * return: + * maximum number of tasks + * -1 for unlimited + * OSCONTAINER_ERROR for not supported + */ +jlong CgroupV2Subsystem::pids_max() { + char * pidsmax_str = pids_max_val(); + return limit_from_str(pidsmax_str); +} + +/* pids_current + * + * The number of tasks currently in the cgroup (and its descendants) of the process + * + * return: + * current number of tasks + * OSCONTAINER_ERROR for not supported + */ +jlong CgroupV2Subsystem::pids_current() { + GET_CONTAINER_INFO(jlong, _unified, "/pids.current", + "Current number of tasks is: ", JLONG_FORMAT, JLONG_FORMAT, pids_current); + return pids_current; +} diff --git a/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/os/linux/cgroupV2Subsystem_linux.hpp b/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/os/linux/cgroupV2Subsystem_linux.hpp new file mode 100644 index 000000000000..c25b740b3b63 --- /dev/null +++ b/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/os/linux/cgroupV2Subsystem_linux.hpp @@ -0,0 +1,102 @@ +/* + * Copyright (c) 2024, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2022, Red Hat Inc. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef CGROUP_V2_SUBSYSTEM_LINUX_HPP +#define CGROUP_V2_SUBSYSTEM_LINUX_HPP + +#include "cgroupSubsystem_linux.hpp" + +class CgroupV2Controller: public CgroupController { + private: + /* the mount path of the cgroup v2 hierarchy */ + char *_mount_path; + /* The cgroup path for the controller */ + char *_cgroup_path; + + /* Constructed full path to the subsystem directory */ + char *_path; + static char* construct_path(char* mount_path, char *cgroup_path); + + public: + CgroupV2Controller(char * mount_path, char *cgroup_path) { + _mount_path = mount_path; + _cgroup_path = os::strdup(cgroup_path); + _path = construct_path(mount_path, cgroup_path); + } + + char *subsystem_path() { return _path; } +}; + +class CgroupV2Subsystem: public CgroupSubsystem { + private: + /* One unified controller */ + CgroupController* _unified = nullptr; + /* Caching wrappers for cpu/memory metrics */ + CachingCgroupController* _memory = nullptr; + CachingCgroupController* _cpu = nullptr; + + char *mem_limit_val(); + char *mem_swp_limit_val(); + char *mem_swp_current_val(); + char *mem_soft_limit_val(); + char *cpu_quota_val(); + char *pids_max_val(); + + public: + CgroupV2Subsystem(CgroupController * unified) { + _unified = unified; + _memory = new CachingCgroupController(unified); + _cpu = new CachingCgroupController(unified); + } + + jlong read_memory_limit_in_bytes(); + int cpu_quota(); + int cpu_period(); + int cpu_shares(); + jlong memory_and_swap_limit_in_bytes(); + jlong memory_soft_limit_in_bytes(); + jlong memory_usage_in_bytes(); + jlong memory_max_usage_in_bytes(); + jlong rss_usage_in_bytes(); + jlong cache_usage_in_bytes(); + + char * cpu_cpuset_cpus(); + char * cpu_cpuset_memory_nodes(); + jlong pids_max(); + jlong pids_current(); + +#ifndef NATIVE_IMAGE + void print_version_specific_info(outputStream* st); +#endif // !NATIVE_IMAGE + + const char * container_type() { + return "cgroupv2"; + } + CachingCgroupController * memory_controller() { return _memory; } + CachingCgroupController * cpu_controller() { return _cpu; } +}; + +#endif // CGROUP_V2_SUBSYSTEM_LINUX_HPP diff --git a/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/os/linux/osContainer_linux.cpp b/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/os/linux/osContainer_linux.cpp new file mode 100644 index 000000000000..d3f1aca2356b --- /dev/null +++ b/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/os/linux/osContainer_linux.cpp @@ -0,0 +1,166 @@ +/* + * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include +#include +#include +#include "runtime/globals.hpp" +#include "runtime/os.hpp" +#include "logging/log.hpp" +#include "os_linux.hpp" +#include "osContainer_linux.hpp" +#include "cgroupSubsystem_linux.hpp" + + +bool OSContainer::_is_initialized = false; +bool OSContainer::_is_containerized = false; +CgroupSubsystem* cgroup_subsystem; + +/* init + * + * Initialize the container support and determine if + * we are running under cgroup control. + */ +void OSContainer::init() { + assert(!_is_initialized, "Initializing OSContainer more than once"); + + _is_initialized = true; + _is_containerized = false; + + log_trace(os, container)("OSContainer::init: Initializing Container Support"); + if (!UseContainerSupport) { + log_trace(os, container)("Container Support not enabled"); + return; + } + + cgroup_subsystem = CgroupSubsystemFactory::create(); + if (cgroup_subsystem == nullptr) { + return; // Required subsystem files not found or other error + } + + _is_containerized = true; +} + +const char * OSContainer::container_type() { + assert(cgroup_subsystem != nullptr, "cgroup subsystem not available"); + return cgroup_subsystem->container_type(); +} + +jlong OSContainer::memory_limit_in_bytes() { + assert(cgroup_subsystem != nullptr, "cgroup subsystem not available"); + return cgroup_subsystem->memory_limit_in_bytes(); +} + +jlong OSContainer::memory_and_swap_limit_in_bytes() { + assert(cgroup_subsystem != nullptr, "cgroup subsystem not available"); + return cgroup_subsystem->memory_and_swap_limit_in_bytes(); +} + +jlong OSContainer::memory_soft_limit_in_bytes() { + assert(cgroup_subsystem != nullptr, "cgroup subsystem not available"); + return cgroup_subsystem->memory_soft_limit_in_bytes(); +} + +jlong OSContainer::memory_usage_in_bytes() { + assert(cgroup_subsystem != nullptr, "cgroup subsystem not available"); + return cgroup_subsystem->memory_usage_in_bytes(); +} + +jlong OSContainer::memory_max_usage_in_bytes() { + assert(cgroup_subsystem != nullptr, "cgroup subsystem not available"); + return cgroup_subsystem->memory_max_usage_in_bytes(); +} + +jlong OSContainer::rss_usage_in_bytes() { + assert(cgroup_subsystem != nullptr, "cgroup subsystem not available"); + return cgroup_subsystem->rss_usage_in_bytes(); +} + +jlong OSContainer::cache_usage_in_bytes() { + assert(cgroup_subsystem != nullptr, "cgroup subsystem not available"); + return cgroup_subsystem->cache_usage_in_bytes(); +} + +#ifndef NATIVE_IMAGE +void OSContainer::print_version_specific_info(outputStream* st) { + assert(cgroup_subsystem != nullptr, "cgroup subsystem not available"); + cgroup_subsystem->print_version_specific_info(st); +} +#endif // !NATIVE_IMAGE + +char * OSContainer::cpu_cpuset_cpus() { + assert(cgroup_subsystem != nullptr, "cgroup subsystem not available"); + return cgroup_subsystem->cpu_cpuset_cpus(); +} + +char * OSContainer::cpu_cpuset_memory_nodes() { + assert(cgroup_subsystem != nullptr, "cgroup subsystem not available"); + return cgroup_subsystem->cpu_cpuset_memory_nodes(); +} + +int OSContainer::active_processor_count() { + assert(cgroup_subsystem != nullptr, "cgroup subsystem not available"); + return cgroup_subsystem->active_processor_count(); +} + +int OSContainer::cpu_quota() { + assert(cgroup_subsystem != nullptr, "cgroup subsystem not available"); + return cgroup_subsystem->cpu_quota(); +} + +int OSContainer::cpu_period() { + assert(cgroup_subsystem != nullptr, "cgroup subsystem not available"); + return cgroup_subsystem->cpu_period(); +} + +int OSContainer::cpu_shares() { + assert(cgroup_subsystem != nullptr, "cgroup subsystem not available"); + return cgroup_subsystem->cpu_shares(); +} + +jlong OSContainer::pids_max() { + assert(cgroup_subsystem != nullptr, "cgroup subsystem not available"); + return cgroup_subsystem->pids_max(); +} + +jlong OSContainer::pids_current() { + assert(cgroup_subsystem != nullptr, "cgroup subsystem not available"); + return cgroup_subsystem->pids_current(); +} + +#ifndef NATIVE_IMAGE +void OSContainer::print_container_helper(outputStream* st, jlong j, const char* metrics) { + st->print("%s: ", metrics); + if (j > 0) { + if (j >= 1024) { + st->print_cr(UINT64_FORMAT " k", uint64_t(j) / K); + } else { + st->print_cr(UINT64_FORMAT, uint64_t(j)); + } + } else { + st->print_cr("%s", j == OSCONTAINER_ERROR ? "not supported" : "unlimited"); + } +} +#endif // !NATIVE_IMAGE diff --git a/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/os/linux/osContainer_linux.hpp b/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/os/linux/osContainer_linux.hpp new file mode 100644 index 000000000000..e8ab1bd2a28c --- /dev/null +++ b/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/os/linux/osContainer_linux.hpp @@ -0,0 +1,82 @@ +/* + * Copyright (c) 2017, 2022, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef OS_LINUX_OSCONTAINER_LINUX_HPP +#define OS_LINUX_OSCONTAINER_LINUX_HPP + +#include "utilities/globalDefinitions.hpp" +#include "utilities/macros.hpp" +#include "utilities/ostream.hpp" +#include "memory/allStatic.hpp" + +#define OSCONTAINER_ERROR (-2) + +// 20ms timeout between re-reads of memory limit and _active_processor_count. +#define OSCONTAINER_CACHE_TIMEOUT (NANOSECS_PER_SEC/50) + +class OSContainer: AllStatic { + + private: + static bool _is_initialized; + static bool _is_containerized; + static int _active_processor_count; + + public: + static void init(); +#ifndef NATIVE_IMAGE + static void print_version_specific_info(outputStream* st); + static void print_container_helper(outputStream* st, jlong j, const char* metrics); +#endif // !NATIVE_IMAGE + + static inline bool is_containerized(); + static const char * container_type(); + + static jlong memory_limit_in_bytes(); + static jlong memory_and_swap_limit_in_bytes(); + static jlong memory_soft_limit_in_bytes(); + static jlong memory_usage_in_bytes(); + static jlong memory_max_usage_in_bytes(); + static jlong rss_usage_in_bytes(); + static jlong cache_usage_in_bytes(); + + static int active_processor_count(); + + static char * cpu_cpuset_cpus(); + static char * cpu_cpuset_memory_nodes(); + + static int cpu_quota(); + static int cpu_period(); + + static int cpu_shares(); + + static jlong pids_max(); + static jlong pids_current(); +}; + +inline bool OSContainer::is_containerized() { + return _is_containerized; +} + +#endif // OS_LINUX_OSCONTAINER_LINUX_HPP diff --git a/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/os/linux/os_linux.cpp b/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/os/linux/os_linux.cpp new file mode 100644 index 000000000000..d6463aaa9cfa --- /dev/null +++ b/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/os/linux/os_linux.cpp @@ -0,0 +1,5533 @@ +/* + * Copyright (c) 1999, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2024, SAP SE. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef NATIVE_IMAGE +// no precompiled headers +#include "classfile/vmSymbols.hpp" +#include "code/icBuffer.hpp" +#include "code/vtableStubs.hpp" +#include "compiler/compileBroker.hpp" +#include "compiler/disassembler.hpp" +#include "hugepages.hpp" +#include "interpreter/interpreter.hpp" +#include "jvm.h" +#include "jvmtifiles/jvmti.h" +#endif // !NATIVE_IMAGE +#include "logging/log.hpp" +#ifndef NATIVE_IMAGE +#include "logging/logStream.hpp" +#include "memory/allocation.inline.hpp" +#include "nmt/memTracker.hpp" +#include "oops/oop.inline.hpp" +#endif // !NATIVE_IMAGE +#include "osContainer_linux.hpp" +#include "os_linux.inline.hpp" +#include "os_posix.inline.hpp" +#ifndef NATIVE_IMAGE +#include "prims/jniFastGetField.hpp" +#include "prims/jvm_misc.hpp" +#include "runtime/arguments.hpp" +#include "runtime/atomic.hpp" +#endif // !NATIVE_IMAGE +#include "runtime/globals.hpp" +#ifndef NATIVE_IMAGE +#include "runtime/globals_extension.hpp" +#include "runtime/init.hpp" +#include "runtime/interfaceSupport.inline.hpp" +#include "runtime/java.hpp" +#include "runtime/javaCalls.hpp" +#include "runtime/javaThread.hpp" +#include "runtime/mutexLocker.hpp" +#include "runtime/objectMonitor.hpp" +#include "runtime/osInfo.hpp" +#include "runtime/osThread.hpp" +#include "runtime/perfMemory.hpp" +#include "runtime/sharedRuntime.hpp" +#include "runtime/statSampler.hpp" +#include "runtime/stubRoutines.hpp" +#include "runtime/threadCritical.hpp" +#include "runtime/threads.hpp" +#include "runtime/threadSMR.hpp" +#include "runtime/timer.hpp" +#include "runtime/vm_version.hpp" +#include "semaphore_posix.hpp" +#include "services/runtimeService.hpp" +#include "signals_posix.hpp" +#include "utilities/align.hpp" +#endif // !NATIVE_IMAGE +#include "utilities/checkedCast.hpp" +#ifndef NATIVE_IMAGE +#include "utilities/debug.hpp" +#include "utilities/decoder.hpp" +#include "utilities/defaultStream.hpp" +#include "utilities/elfFile.hpp" +#include "utilities/events.hpp" +#include "utilities/globalDefinitions.hpp" +#include "utilities/growableArray.hpp" +#include "utilities/macros.hpp" +#include "utilities/powerOfTwo.hpp" +#include "utilities/vmError.hpp" +#if INCLUDE_JFR +#include "jfr/jfrEvents.hpp" +#include "jfr/support/jfrNativeLibraryLoadEvent.hpp" +#endif +#endif // !NATIVE_IMAGE + +// put OS-includes here +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +#ifdef __GLIBC__ +# include +#endif + +#ifndef _GNU_SOURCE + #define _GNU_SOURCE + #include + #undef _GNU_SOURCE +#else + #include +#endif + +#ifndef NATIVE_IMAGE +// if RUSAGE_THREAD for getrusage() has not been defined, do it here. The code calling +// getrusage() is prepared to handle the associated failure. +#ifndef RUSAGE_THREAD + #define RUSAGE_THREAD (1) /* only the calling thread */ +#endif + +#define MAX_PATH (2 * K) + +#define MAX_SECS 100000000 + +// for timer info max values which include all bits +#define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF) + +#ifdef MUSL_LIBC +// dlvsym is not a part of POSIX +// and musl libc doesn't implement it. +static void *dlvsym(void *handle, + const char *symbol, + const char *version) { + // load the latest version of symbol + return dlsym(handle, symbol); +} +#endif + +enum CoredumpFilterBit { + FILE_BACKED_PVT_BIT = 1 << 2, + FILE_BACKED_SHARED_BIT = 1 << 3, + LARGEPAGES_BIT = 1 << 6, + DAX_SHARED_BIT = 1 << 8 +}; +#endif // !NATIVE_IMAGE + +//////////////////////////////////////////////////////////////////////////////// +// global variables +julong os::Linux::_physical_memory = 0; + +#ifndef NATIVE_IMAGE +address os::Linux::_initial_thread_stack_bottom = nullptr; +uintptr_t os::Linux::_initial_thread_stack_size = 0; + +int (*os::Linux::_pthread_getcpuclockid)(pthread_t, clockid_t *) = nullptr; +int (*os::Linux::_pthread_setname_np)(pthread_t, const char*) = nullptr; +pthread_t os::Linux::_main_thread; +bool os::Linux::_supports_fast_thread_cpu_time = false; +const char * os::Linux::_libc_version = nullptr; +const char * os::Linux::_libpthread_version = nullptr; + +bool os::Linux::_thp_requested{false}; + +#ifdef __GLIBC__ +// We want to be buildable and runnable on older and newer glibcs, so resolve both +// mallinfo and mallinfo2 dynamically. +struct old_mallinfo { + int arena; + int ordblks; + int smblks; + int hblks; + int hblkhd; + int usmblks; + int fsmblks; + int uordblks; + int fordblks; + int keepcost; +}; +typedef struct old_mallinfo (*mallinfo_func_t)(void); +static mallinfo_func_t g_mallinfo = nullptr; + +struct new_mallinfo { + size_t arena; + size_t ordblks; + size_t smblks; + size_t hblks; + size_t hblkhd; + size_t usmblks; + size_t fsmblks; + size_t uordblks; + size_t fordblks; + size_t keepcost; +}; +typedef struct new_mallinfo (*mallinfo2_func_t)(void); +static mallinfo2_func_t g_mallinfo2 = nullptr; + +typedef int (*malloc_info_func_t)(int options, FILE *stream); +static malloc_info_func_t g_malloc_info = nullptr; +#endif // __GLIBC__ + +static int clock_tics_per_sec = 100; + +// If the VM might have been created on the primordial thread, we need to resolve the +// primordial thread stack bounds and check if the current thread might be the +// primordial thread in places. If we know that the primordial thread is never used, +// such as when the VM was created by one of the standard java launchers, we can +// avoid this +static bool suppress_primordial_thread_resolution = false; + +// utility functions + +julong os::Linux::available_memory_in_container() { + julong avail_mem = static_cast(-1L); + if (OSContainer::is_containerized()) { + jlong mem_limit = OSContainer::memory_limit_in_bytes(); + jlong mem_usage; + if (mem_limit > 0 && (mem_usage = OSContainer::memory_usage_in_bytes()) < 1) { + log_debug(os, container)("container memory usage failed: " JLONG_FORMAT ", using host value", mem_usage); + } + if (mem_limit > 0 && mem_usage > 0) { + avail_mem = mem_limit > mem_usage ? (julong)mem_limit - (julong)mem_usage : 0; + } + } + return avail_mem; +} + +julong os::available_memory() { + return Linux::available_memory(); +} + +julong os::Linux::available_memory() { + julong avail_mem = available_memory_in_container(); + if (avail_mem != static_cast(-1L)) { + log_trace(os)("available container memory: " JULONG_FORMAT, avail_mem); + return avail_mem; + } + + FILE *fp = os::fopen("/proc/meminfo", "r"); + if (fp != nullptr) { + char buf[80]; + do { + if (fscanf(fp, "MemAvailable: " JULONG_FORMAT " kB", &avail_mem) == 1) { + avail_mem *= K; + break; + } + } while (fgets(buf, sizeof(buf), fp) != nullptr); + fclose(fp); + } + if (avail_mem == static_cast(-1L)) { + avail_mem = free_memory(); + } + log_trace(os)("available memory: " JULONG_FORMAT, avail_mem); + return avail_mem; +} + +julong os::free_memory() { + return Linux::free_memory(); +} + +julong os::Linux::free_memory() { + // values in struct sysinfo are "unsigned long" + struct sysinfo si; + julong free_mem = available_memory_in_container(); + if (free_mem != static_cast(-1L)) { + log_trace(os)("free container memory: " JULONG_FORMAT, free_mem); + return free_mem; + } + + sysinfo(&si); + free_mem = (julong)si.freeram * si.mem_unit; + log_trace(os)("free memory: " JULONG_FORMAT, free_mem); + return free_mem; +} +#endif // !NATIVE_IMAGE + +julong os::physical_memory() { + jlong phys_mem = 0; + if (OSContainer::is_containerized()) { + jlong mem_limit; + if ((mem_limit = OSContainer::memory_limit_in_bytes()) > 0) { + log_trace(os)("total container memory: " JLONG_FORMAT, mem_limit); + return mem_limit; + } + } + + phys_mem = Linux::physical_memory(); + log_trace(os)("total system memory: " JLONG_FORMAT, phys_mem); + return phys_mem; +} + +#ifndef NATIVE_IMAGE +static uint64_t initial_total_ticks = 0; +static uint64_t initial_steal_ticks = 0; +static bool has_initial_tick_info = false; + +static void next_line(FILE *f) { + int c; + do { + c = fgetc(f); + } while (c != '\n' && c != EOF); +} + +bool os::Linux::get_tick_information(CPUPerfTicks* pticks, int which_logical_cpu) { + FILE* fh; + uint64_t userTicks, niceTicks, systemTicks, idleTicks; + // since at least kernel 2.6 : iowait: time waiting for I/O to complete + // irq: time servicing interrupts; softirq: time servicing softirqs + uint64_t iowTicks = 0, irqTicks = 0, sirqTicks= 0; + // steal (since kernel 2.6.11): time spent in other OS when running in a virtualized environment + uint64_t stealTicks = 0; + // guest (since kernel 2.6.24): time spent running a virtual CPU for guest OS under the + // control of the Linux kernel + uint64_t guestNiceTicks = 0; + int logical_cpu = -1; + const int required_tickinfo_count = (which_logical_cpu == -1) ? 4 : 5; + int n; + + memset(pticks, 0, sizeof(CPUPerfTicks)); + + if ((fh = os::fopen("/proc/stat", "r")) == nullptr) { + return false; + } + + if (which_logical_cpu == -1) { + n = fscanf(fh, "cpu " UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " " + UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " " + UINT64_FORMAT " " UINT64_FORMAT " ", + &userTicks, &niceTicks, &systemTicks, &idleTicks, + &iowTicks, &irqTicks, &sirqTicks, + &stealTicks, &guestNiceTicks); + } else { + // Move to next line + next_line(fh); + + // find the line for requested cpu faster to just iterate linefeeds? + for (int i = 0; i < which_logical_cpu; i++) { + next_line(fh); + } + + n = fscanf(fh, "cpu%u " UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " " + UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " " + UINT64_FORMAT " " UINT64_FORMAT " ", + &logical_cpu, &userTicks, &niceTicks, + &systemTicks, &idleTicks, &iowTicks, &irqTicks, &sirqTicks, + &stealTicks, &guestNiceTicks); + } + + fclose(fh); + if (n < required_tickinfo_count || logical_cpu != which_logical_cpu) { + return false; + } + pticks->used = userTicks + niceTicks; + pticks->usedKernel = systemTicks + irqTicks + sirqTicks; + pticks->total = userTicks + niceTicks + systemTicks + idleTicks + + iowTicks + irqTicks + sirqTicks + stealTicks + guestNiceTicks; + + if (n > required_tickinfo_count + 3) { + pticks->steal = stealTicks; + pticks->has_steal_ticks = true; + } else { + pticks->steal = 0; + pticks->has_steal_ticks = false; + } + + return true; +} + +#ifndef SYS_gettid +// i386: 224, ia64: 1105, amd64: 186, sparc: 143 + #ifdef __ia64__ + #define SYS_gettid 1105 + #else + #ifdef __i386__ + #define SYS_gettid 224 + #else + #ifdef __amd64__ + #define SYS_gettid 186 + #else + #ifdef __sparc__ + #define SYS_gettid 143 + #else + #error define gettid for the arch + #endif + #endif + #endif + #endif +#endif + + +// pid_t gettid() +// +// Returns the kernel thread id of the currently running thread. Kernel +// thread id is used to access /proc. +pid_t os::Linux::gettid() { + long rslt = syscall(SYS_gettid); + assert(rslt != -1, "must be."); // old linuxthreads implementation? + return (pid_t)rslt; +} +#endif // !NATIVE_IMAGE + +// Returns the amount of swap currently configured, in bytes. +// This can change at any time. +julong os::Linux::host_swap() { + struct sysinfo si; + sysinfo(&si); + return (julong)si.totalswap; +} + +#ifndef NATIVE_IMAGE +// Most versions of linux have a bug where the number of processors are +// determined by looking at the /proc file system. In a chroot environment, +// the system call returns 1. +static bool unsafe_chroot_detected = false; +static const char *unstable_chroot_error = "/proc file system not found.\n" + "Java may be unstable running multithreaded in a chroot " + "environment on Linux when /proc filesystem is not mounted."; +#endif // !NATIVE_IMAGE + +void os::Linux::initialize_system_info() { + set_processor_count((int)sysconf(_SC_NPROCESSORS_CONF)); +#ifndef NATIVE_IMAGE + if (processor_count() == 1) { + pid_t pid = os::Linux::gettid(); + char fname[32]; + jio_snprintf(fname, sizeof(fname), "/proc/%d", pid); + FILE *fp = os::fopen(fname, "r"); + if (fp == nullptr) { + unsafe_chroot_detected = true; + } else { + fclose(fp); + } + } +#endif // !NATIVE_IMAGE + _physical_memory = (julong)sysconf(_SC_PHYS_PAGES) * (julong)sysconf(_SC_PAGESIZE); + assert(processor_count() > 0, "linux error"); +} + +#ifndef NATIVE_IMAGE +void os::init_system_properties_values() { + // The next steps are taken in the product version: + // + // Obtain the JAVA_HOME value from the location of libjvm.so. + // This library should be located at: + // /lib/{client|server}/libjvm.so. + // + // If "/jre/lib/" appears at the right place in the path, then we + // assume libjvm.so is installed in a JDK and we use this path. + // + // Otherwise exit with message: "Could not create the Java virtual machine." + // + // The following extra steps are taken in the debugging version: + // + // If "/jre/lib/" does NOT appear at the right place in the path + // instead of exit check for $JAVA_HOME environment variable. + // + // If it is defined and we are able to locate $JAVA_HOME/jre/lib/, + // then we append a fake suffix "hotspot/libjvm.so" to this path so + // it looks like libjvm.so is installed there + // /jre/lib//hotspot/libjvm.so. + // + // Otherwise exit. + // + // Important note: if the location of libjvm.so changes this + // code needs to be changed accordingly. + + // See ld(1): + // The linker uses the following search paths to locate required + // shared libraries: + // 1: ... + // ... + // 7: The default directories, normally /lib and /usr/lib. +#ifndef OVERRIDE_LIBPATH + #if defined(_LP64) + #define DEFAULT_LIBPATH "/usr/lib64:/lib64:/lib:/usr/lib" + #else + #define DEFAULT_LIBPATH "/lib:/usr/lib" + #endif +#else + #define DEFAULT_LIBPATH OVERRIDE_LIBPATH +#endif + +// Base path of extensions installed on the system. +#define SYS_EXT_DIR "/usr/java/packages" +#define EXTENSIONS_DIR "/lib/ext" + + // Buffer that fits several snprintfs. + // Note that the space for the colon and the trailing null are provided + // by the nulls included by the sizeof operator. + const size_t bufsize = + MAX2((size_t)MAXPATHLEN, // For dll_dir & friends. + (size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR) + sizeof(SYS_EXT_DIR) + sizeof(EXTENSIONS_DIR)); // extensions dir + char *buf = NEW_C_HEAP_ARRAY(char, bufsize, mtInternal); + + // sysclasspath, java_home, dll_dir + { + char *pslash; + os::jvm_path(buf, bufsize); + + // Found the full path to libjvm.so. + // Now cut the path to /jre if we can. + pslash = strrchr(buf, '/'); + if (pslash != nullptr) { + *pslash = '\0'; // Get rid of /libjvm.so. + } + pslash = strrchr(buf, '/'); + if (pslash != nullptr) { + *pslash = '\0'; // Get rid of /{client|server|hotspot}. + } + Arguments::set_dll_dir(buf); + + if (pslash != nullptr) { + pslash = strrchr(buf, '/'); + if (pslash != nullptr) { + *pslash = '\0'; // Get rid of /lib. + } + } + Arguments::set_java_home(buf); + if (!set_boot_path('/', ':')) { + vm_exit_during_initialization("Failed setting boot class path.", nullptr); + } + } + + // Where to look for native libraries. + // + // Note: Due to a legacy implementation, most of the library path + // is set in the launcher. This was to accommodate linking restrictions + // on legacy Linux implementations (which are no longer supported). + // Eventually, all the library path setting will be done here. + // + // However, to prevent the proliferation of improperly built native + // libraries, the new path component /usr/java/packages is added here. + // Eventually, all the library path setting will be done here. + { + // Get the user setting of LD_LIBRARY_PATH, and prepended it. It + // should always exist (until the legacy problem cited above is + // addressed). + const char *v = ::getenv("LD_LIBRARY_PATH"); + const char *v_colon = ":"; + if (v == nullptr) { v = ""; v_colon = ""; } + // That's +1 for the colon and +1 for the trailing '\0'. + size_t pathsize = strlen(v) + 1 + sizeof(SYS_EXT_DIR) + sizeof("/lib/") + sizeof(DEFAULT_LIBPATH) + 1; + char *ld_library_path = NEW_C_HEAP_ARRAY(char, pathsize, mtInternal); + os::snprintf_checked(ld_library_path, pathsize, "%s%s" SYS_EXT_DIR "/lib:" DEFAULT_LIBPATH, v, v_colon); + Arguments::set_library_path(ld_library_path); + FREE_C_HEAP_ARRAY(char, ld_library_path); + } + + // Extensions directories. + os::snprintf_checked(buf, bufsize, "%s" EXTENSIONS_DIR ":" SYS_EXT_DIR EXTENSIONS_DIR, Arguments::get_java_home()); + Arguments::set_ext_dirs(buf); + + FREE_C_HEAP_ARRAY(char, buf); + +#undef DEFAULT_LIBPATH +#undef SYS_EXT_DIR +#undef EXTENSIONS_DIR +} + +//////////////////////////////////////////////////////////////////////////////// +// breakpoint support + +void os::breakpoint() { + BREAKPOINT; +} + +extern "C" void breakpoint() { + // use debugger to set breakpoint here +} + +////////////////////////////////////////////////////////////////////////////// +// detecting pthread library + +void os::Linux::libpthread_init() { + // Save glibc and pthread version strings. +#if !defined(_CS_GNU_LIBC_VERSION) || \ + !defined(_CS_GNU_LIBPTHREAD_VERSION) + #error "glibc too old (< 2.3.2)" +#endif + +#ifdef MUSL_LIBC + // confstr() from musl libc returns EINVAL for + // _CS_GNU_LIBC_VERSION and _CS_GNU_LIBPTHREAD_VERSION + os::Linux::set_libc_version("musl - unknown"); + os::Linux::set_libpthread_version("musl - unknown"); +#else + size_t n = confstr(_CS_GNU_LIBC_VERSION, nullptr, 0); + assert(n > 0, "cannot retrieve glibc version"); + char *str = (char *)malloc(n, mtInternal); + confstr(_CS_GNU_LIBC_VERSION, str, n); + os::Linux::set_libc_version(str); + + n = confstr(_CS_GNU_LIBPTHREAD_VERSION, nullptr, 0); + assert(n > 0, "cannot retrieve pthread version"); + str = (char *)malloc(n, mtInternal); + confstr(_CS_GNU_LIBPTHREAD_VERSION, str, n); + os::Linux::set_libpthread_version(str); +#endif +} + +///////////////////////////////////////////////////////////////////////////// +// thread stack expansion + +// os::Linux::manually_expand_stack() takes care of expanding the thread +// stack. Note that this is normally not needed: pthread stacks allocate +// thread stack using mmap() without MAP_NORESERVE, so the stack is already +// committed. Therefore it is not necessary to expand the stack manually. +// +// Manually expanding the stack was historically needed on LinuxThreads +// thread stacks, which were allocated with mmap(MAP_GROWSDOWN). Nowadays +// it is kept to deal with very rare corner cases: +// +// For one, user may run the VM on an own implementation of threads +// whose stacks are - like the old LinuxThreads - implemented using +// mmap(MAP_GROWSDOWN). +// +// Also, this coding may be needed if the VM is running on the primordial +// thread. Normally we avoid running on the primordial thread; however, +// user may still invoke the VM on the primordial thread. +// +// The following historical comment describes the details about running +// on a thread stack allocated with mmap(MAP_GROWSDOWN): + + +// Force Linux kernel to expand current thread stack. If "bottom" is close +// to the stack guard, caller should block all signals. +// +// MAP_GROWSDOWN: +// A special mmap() flag that is used to implement thread stacks. It tells +// kernel that the memory region should extend downwards when needed. This +// allows early versions of LinuxThreads to only mmap the first few pages +// when creating a new thread. Linux kernel will automatically expand thread +// stack as needed (on page faults). +// +// However, because the memory region of a MAP_GROWSDOWN stack can grow on +// demand, if a page fault happens outside an already mapped MAP_GROWSDOWN +// region, it's hard to tell if the fault is due to a legitimate stack +// access or because of reading/writing non-exist memory (e.g. buffer +// overrun). As a rule, if the fault happens below current stack pointer, +// Linux kernel does not expand stack, instead a SIGSEGV is sent to the +// application (see Linux kernel fault.c). +// +// This Linux feature can cause SIGSEGV when VM bangs thread stack for +// stack overflow detection. +// +// Newer version of LinuxThreads (since glibc-2.2, or, RH-7.x) and NPTL do +// not use MAP_GROWSDOWN. +// +// To get around the problem and allow stack banging on Linux, we need to +// manually expand thread stack after receiving the SIGSEGV. +// +// There are two ways to expand thread stack to address "bottom", we used +// both of them in JVM before 1.5: +// 1. adjust stack pointer first so that it is below "bottom", and then +// touch "bottom" +// 2. mmap() the page in question +// +// Now alternate signal stack is gone, it's harder to use 2. For instance, +// if current sp is already near the lower end of page 101, and we need to +// call mmap() to map page 100, it is possible that part of the mmap() frame +// will be placed in page 100. When page 100 is mapped, it is zero-filled. +// That will destroy the mmap() frame and cause VM to crash. +// +// The following code works by adjusting sp first, then accessing the "bottom" +// page to force a page fault. Linux kernel will then automatically expand the +// stack mapping. +// +// _expand_stack_to() assumes its frame size is less than page size, which +// should always be true if the function is not inlined. + +static void NOINLINE _expand_stack_to(address bottom) { + address sp; + size_t size; + volatile char *p; + + // Adjust bottom to point to the largest address within the same page, it + // gives us a one-page buffer if alloca() allocates slightly more memory. + bottom = (address)align_down((uintptr_t)bottom, os::vm_page_size()); + bottom += os::vm_page_size() - 1; + + // sp might be slightly above current stack pointer; if that's the case, we + // will alloca() a little more space than necessary, which is OK. Don't use + // os::current_stack_pointer(), as its result can be slightly below current + // stack pointer, causing us to not alloca enough to reach "bottom". + sp = (address)&sp; + + if (sp > bottom) { + size = sp - bottom; + p = (volatile char *)alloca(size); + assert(p != nullptr && p <= (volatile char *)bottom, "alloca problem?"); + p[0] = '\0'; + } +} + +void os::Linux::expand_stack_to(address bottom) { + _expand_stack_to(bottom); +} + +bool os::Linux::manually_expand_stack(JavaThread * t, address addr) { + assert(t!=nullptr, "just checking"); + assert(t->osthread()->expanding_stack(), "expand should be set"); + + if (t->is_in_usable_stack(addr)) { + sigset_t mask_all, old_sigset; + sigfillset(&mask_all); + pthread_sigmask(SIG_SETMASK, &mask_all, &old_sigset); + _expand_stack_to(addr); + pthread_sigmask(SIG_SETMASK, &old_sigset, nullptr); + return true; + } + return false; +} + +////////////////////////////////////////////////////////////////////////////// +// create new thread + +// Thread start routine for all newly created threads +static void *thread_native_entry(Thread *thread) { + + thread->record_stack_base_and_size(); + +#ifndef __GLIBC__ + // Try to randomize the cache line index of hot stack frames. + // This helps when threads of the same stack traces evict each other's + // cache lines. The threads can be either from the same JVM instance, or + // from different JVM instances. The benefit is especially true for + // processors with hyperthreading technology. + // This code is not needed anymore in glibc because it has MULTI_PAGE_ALIASING + // and we did not see any degradation in performance without `alloca()`. + static int counter = 0; + int pid = os::current_process_id(); + int random = ((pid ^ counter++) & 7) * 128; + void *stackmem = alloca(random != 0 ? random : 1); // ensure we allocate > 0 + // Ensure the alloca result is used in a way that prevents the compiler from eliding it. + *(char *)stackmem = 1; +#endif + + thread->initialize_thread_current(); + + OSThread* osthread = thread->osthread(); + Monitor* sync = osthread->startThread_lock(); + + osthread->set_thread_id(checked_cast(os::current_thread_id())); + + if (UseNUMA) { + int lgrp_id = os::numa_get_group_id(); + if (lgrp_id != -1) { + thread->set_lgrp_id(lgrp_id); + } + } + // initialize signal mask for this thread + PosixSignals::hotspot_sigmask(thread); + + // initialize floating point control register + os::Linux::init_thread_fpu_state(); + + // handshaking with parent thread + { + MutexLocker ml(sync, Mutex::_no_safepoint_check_flag); + + // notify parent thread + osthread->set_state(INITIALIZED); + sync->notify_all(); + + // wait until os::start_thread() + while (osthread->get_state() == INITIALIZED) { + sync->wait_without_safepoint_check(); + } + } + + log_info(os, thread)("Thread is alive (tid: " UINTX_FORMAT ", pthread id: " UINTX_FORMAT ").", + os::current_thread_id(), (uintx) pthread_self()); + + assert(osthread->pthread_id() != 0, "pthread_id was not set as expected"); + + if (DelayThreadStartALot) { + os::naked_short_sleep(100); + } + + // call one more level start routine + thread->call_run(); + + // Note: at this point the thread object may already have deleted itself. + // Prevent dereferencing it from here on out. + thread = nullptr; + + log_info(os, thread)("Thread finished (tid: " UINTX_FORMAT ", pthread id: " UINTX_FORMAT ").", + os::current_thread_id(), (uintx) pthread_self()); + + return 0; +} + +// On Linux, glibc places static TLS blocks (for __thread variables) on +// the thread stack. This decreases the stack size actually available +// to threads. +// +// For large static TLS sizes, this may cause threads to malfunction due +// to insufficient stack space. This is a well-known issue in glibc: +// http://sourceware.org/bugzilla/show_bug.cgi?id=11787. +// +// As a workaround, we call a private but assumed-stable glibc function, +// __pthread_get_minstack() to obtain the minstack size and derive the +// static TLS size from it. We then increase the user requested stack +// size by this TLS size. The same function is used to determine whether +// adjustStackSizeForGuardPages() needs to be true. +// +// Due to compatibility concerns, this size adjustment is opt-in and +// controlled via AdjustStackSizeForTLS. +typedef size_t (*GetMinStack)(const pthread_attr_t *attr); + +GetMinStack _get_minstack_func = nullptr; // Initialized via os::init_2() + +// Returns the size of the static TLS area glibc puts on thread stacks. +// The value is cached on first use, which occurs when the first thread +// is created during VM initialization. +static size_t get_static_tls_area_size(const pthread_attr_t *attr) { + size_t tls_size = 0; + if (_get_minstack_func != nullptr) { + // Obtain the pthread minstack size by calling __pthread_get_minstack. + size_t minstack_size = _get_minstack_func(attr); + + // Remove non-TLS area size included in minstack size returned + // by __pthread_get_minstack() to get the static TLS size. + // If adjustStackSizeForGuardPages() is true, minstack size includes + // guard_size. Otherwise guard_size is automatically added + // to the stack size by pthread_create and is no longer included + // in minstack size. In both cases, the guard_size is taken into + // account, so there is no need to adjust the result for that. + // + // Although __pthread_get_minstack() is a private glibc function, + // it is expected to have a stable behavior across future glibc + // versions while glibc still allocates the static TLS blocks off + // the stack. Following is glibc 2.28 __pthread_get_minstack(): + // + // size_t + // __pthread_get_minstack (const pthread_attr_t *attr) + // { + // return GLRO(dl_pagesize) + __static_tls_size + PTHREAD_STACK_MIN; + // } + // + // + // The following 'minstack_size > os::vm_page_size() + PTHREAD_STACK_MIN' + // if check is done for precaution. + if (minstack_size > os::vm_page_size() + PTHREAD_STACK_MIN) { + tls_size = minstack_size - os::vm_page_size() - PTHREAD_STACK_MIN; + } + } + + log_info(os, thread)("Stack size adjustment for TLS is " SIZE_FORMAT, + tls_size); + return tls_size; +} + +// In glibc versions prior to 2.27 the guard size mechanism +// was not implemented properly. The POSIX standard requires adding +// the size of the guard pages to the stack size, instead glibc +// took the space out of 'stacksize'. Thus we need to adapt the requested +// stack_size by the size of the guard pages to mimic proper behaviour. +// The fix in glibc 2.27 has now been backported to numerous earlier +// glibc versions so we need to do a dynamic runtime check. +static bool _adjustStackSizeForGuardPages = true; +bool os::Linux::adjustStackSizeForGuardPages() { + return _adjustStackSizeForGuardPages; +} + +#ifdef __GLIBC__ +static void init_adjust_stacksize_for_guard_pages() { + assert(_get_minstack_func == nullptr, "initialization error"); + _get_minstack_func =(GetMinStack)dlsym(RTLD_DEFAULT, "__pthread_get_minstack"); + log_info(os, thread)("Lookup of __pthread_get_minstack %s", + _get_minstack_func == nullptr ? "failed" : "succeeded"); + + if (_get_minstack_func != nullptr) { + pthread_attr_t attr; + pthread_attr_init(&attr); + size_t min_stack = _get_minstack_func(&attr); + size_t guard = 16 * K; // Actual value doesn't matter as it is not examined + pthread_attr_setguardsize(&attr, guard); + size_t min_stack2 = _get_minstack_func(&attr); + pthread_attr_destroy(&attr); + // If the minimum stack size changed when we added the guard page space + // then we need to perform the adjustment. + _adjustStackSizeForGuardPages = (min_stack2 != min_stack); + log_info(os)("Glibc stack size guard page adjustment is %sneeded", + _adjustStackSizeForGuardPages ? "" : "not "); + } +} +#endif // GLIBC + +bool os::create_thread(Thread* thread, ThreadType thr_type, + size_t req_stack_size) { + assert(thread->osthread() == nullptr, "caller responsible"); + + // Allocate the OSThread object + OSThread* osthread = new (std::nothrow) OSThread(); + if (osthread == nullptr) { + return false; + } + + // set the correct thread state + osthread->set_thread_type(thr_type); + + // Initial state is ALLOCATED but not INITIALIZED + osthread->set_state(ALLOCATED); + + thread->set_osthread(osthread); + + // init thread attributes + pthread_attr_t attr; + int rslt = pthread_attr_init(&attr); + if (rslt != 0) { + thread->set_osthread(nullptr); + delete osthread; + return false; + } + pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); + + // Calculate stack size if it's not specified by caller. + size_t stack_size = os::Posix::get_initial_stack_size(thr_type, req_stack_size); + size_t guard_size = os::Linux::default_guard_size(thr_type); + + // Configure glibc guard page. Must happen before calling + // get_static_tls_area_size(), which uses the guard_size. + pthread_attr_setguardsize(&attr, guard_size); + + // Apply stack size adjustments if needed. However, be careful not to end up + // with a size of zero due to overflow. Don't add the adjustment in that case. + size_t stack_adjust_size = 0; + if (AdjustStackSizeForTLS) { + // Adjust the stack_size for on-stack TLS - see get_static_tls_area_size(). + stack_adjust_size += get_static_tls_area_size(&attr); + } else if (os::Linux::adjustStackSizeForGuardPages()) { + stack_adjust_size += guard_size; + } + + stack_adjust_size = align_up(stack_adjust_size, os::vm_page_size()); + if (stack_size <= SIZE_MAX - stack_adjust_size) { + stack_size += stack_adjust_size; + } + assert(is_aligned(stack_size, os::vm_page_size()), "stack_size not aligned"); + + if (THPStackMitigation) { + // In addition to the glibc guard page that prevents inter-thread-stack hugepage + // coalescing (see comment in os::Linux::default_guard_size()), we also make + // sure the stack size itself is not huge-page-size aligned; that makes it much + // more likely for thread stack boundaries to be unaligned as well and hence + // protects thread stacks from being targeted by khugepaged. + if (HugePages::thp_pagesize() > 0 && + is_aligned(stack_size, HugePages::thp_pagesize())) { + stack_size += os::vm_page_size(); + } + } + + int status = pthread_attr_setstacksize(&attr, stack_size); + if (status != 0) { + // pthread_attr_setstacksize() function can fail + // if the stack size exceeds a system-imposed limit. + assert_status(status == EINVAL, status, "pthread_attr_setstacksize"); + log_warning(os, thread)("The %sthread stack size specified is invalid: " SIZE_FORMAT "k", + (thr_type == compiler_thread) ? "compiler " : ((thr_type == java_thread) ? "" : "VM "), + stack_size / K); + thread->set_osthread(nullptr); + delete osthread; + pthread_attr_destroy(&attr); + return false; + } + + ThreadState state; + + { + ResourceMark rm; + pthread_t tid; + int ret = 0; + int limit = 3; + do { + ret = pthread_create(&tid, &attr, (void* (*)(void*)) thread_native_entry, thread); + } while (ret == EAGAIN && limit-- > 0); + + char buf[64]; + if (ret == 0) { + log_info(os, thread)("Thread \"%s\" started (pthread id: " UINTX_FORMAT ", attributes: %s). ", + thread->name(), (uintx) tid, os::Posix::describe_pthread_attr(buf, sizeof(buf), &attr)); + + // Print current timer slack if override is enabled and timer slack value is available. + // Avoid calling prctl otherwise for extra safety. + if (TimerSlack >= 0) { + int slack = prctl(PR_GET_TIMERSLACK); + if (slack >= 0) { + log_info(os, thread)("Thread \"%s\" (pthread id: " UINTX_FORMAT ") timer slack: %dns", + thread->name(), (uintx) tid, slack); + } + } + } else { + log_warning(os, thread)("Failed to start thread \"%s\" - pthread_create failed (%s) for attributes: %s.", + thread->name(), os::errno_name(ret), os::Posix::describe_pthread_attr(buf, sizeof(buf), &attr)); + // Log some OS information which might explain why creating the thread failed. + log_info(os, thread)("Number of threads approx. running in the VM: %d", Threads::number_of_threads()); + LogStream st(Log(os, thread)::info()); + os::Posix::print_rlimit_info(&st); + os::print_memory_info(&st); + os::Linux::print_proc_sys_info(&st); + os::Linux::print_container_info(&st); + } + + pthread_attr_destroy(&attr); + + if (ret != 0) { + // Need to clean up stuff we've allocated so far + thread->set_osthread(nullptr); + delete osthread; + return false; + } + + // Store pthread info into the OSThread + osthread->set_pthread_id(tid); + + // Wait until child thread is either initialized or aborted + { + Monitor* sync_with_child = osthread->startThread_lock(); + MutexLocker ml(sync_with_child, Mutex::_no_safepoint_check_flag); + while ((state = osthread->get_state()) == ALLOCATED) { + sync_with_child->wait_without_safepoint_check(); + } + } + } + + // The thread is returned suspended (in state INITIALIZED), + // and is started higher up in the call chain + assert(state == INITIALIZED, "race condition"); + return true; +} + +///////////////////////////////////////////////////////////////////////////// +// attach existing thread + +// bootstrap the main thread +bool os::create_main_thread(JavaThread* thread) { + assert(os::Linux::_main_thread == pthread_self(), "should be called inside main thread"); + return create_attached_thread(thread); +} + +bool os::create_attached_thread(JavaThread* thread) { +#ifdef ASSERT + thread->verify_not_published(); +#endif + + // Allocate the OSThread object + OSThread* osthread = new (std::nothrow) OSThread(); + + if (osthread == nullptr) { + return false; + } + + // Store pthread info into the OSThread + osthread->set_thread_id(os::Linux::gettid()); + osthread->set_pthread_id(::pthread_self()); + + // initialize floating point control register + os::Linux::init_thread_fpu_state(); + + // Initial thread state is RUNNABLE + osthread->set_state(RUNNABLE); + + thread->set_osthread(osthread); + + if (UseNUMA) { + int lgrp_id = os::numa_get_group_id(); + if (lgrp_id != -1) { + thread->set_lgrp_id(lgrp_id); + } + } + + if (os::is_primordial_thread()) { + // If current thread is primordial thread, its stack is mapped on demand, + // see notes about MAP_GROWSDOWN. Here we try to force kernel to map + // the entire stack region to avoid SEGV in stack banging. + // It is also useful to get around the heap-stack-gap problem on SuSE + // kernel (see 4821821 for details). We first expand stack to the top + // of yellow zone, then enable stack yellow zone (order is significant, + // enabling yellow zone first will crash JVM on SuSE Linux), so there + // is no gap between the last two virtual memory regions. + + StackOverflow* overflow_state = thread->stack_overflow_state(); + address addr = overflow_state->stack_reserved_zone_base(); + assert(addr != nullptr, "initialization problem?"); + assert(overflow_state->stack_available(addr) > 0, "stack guard should not be enabled"); + + osthread->set_expanding_stack(); + os::Linux::manually_expand_stack(thread, addr); + osthread->clear_expanding_stack(); + } + + // initialize signal mask for this thread + // and save the caller's signal mask + PosixSignals::hotspot_sigmask(thread); + + log_info(os, thread)("Thread attached (tid: " UINTX_FORMAT ", pthread id: " UINTX_FORMAT + ", stack: " PTR_FORMAT " - " PTR_FORMAT " (" SIZE_FORMAT "K) ).", + os::current_thread_id(), (uintx) pthread_self(), + p2i(thread->stack_base()), p2i(thread->stack_end()), thread->stack_size() / K); + + return true; +} + +void os::pd_start_thread(Thread* thread) { + OSThread * osthread = thread->osthread(); + assert(osthread->get_state() != INITIALIZED, "just checking"); + Monitor* sync_with_child = osthread->startThread_lock(); + MutexLocker ml(sync_with_child, Mutex::_no_safepoint_check_flag); + sync_with_child->notify(); +} + +// Free Linux resources related to the OSThread +void os::free_thread(OSThread* osthread) { + assert(osthread != nullptr, "osthread not set"); + + // We are told to free resources of the argument thread, + // but we can only really operate on the current thread. + assert(Thread::current()->osthread() == osthread, + "os::free_thread but not current thread"); + +#ifdef ASSERT + sigset_t current; + sigemptyset(¤t); + pthread_sigmask(SIG_SETMASK, nullptr, ¤t); + assert(!sigismember(¤t, PosixSignals::SR_signum), "SR signal should not be blocked!"); +#endif + + // Restore caller's signal mask + sigset_t sigmask = osthread->caller_sigmask(); + pthread_sigmask(SIG_SETMASK, &sigmask, nullptr); + + delete osthread; +} + +////////////////////////////////////////////////////////////////////////////// +// primordial thread + +// Check if current thread is the primordial thread, similar to Solaris thr_main. +bool os::is_primordial_thread(void) { + if (suppress_primordial_thread_resolution) { + return false; + } + char dummy; + // If called before init complete, thread stack bottom will be null. + // Can be called if fatal error occurs before initialization. + if (os::Linux::initial_thread_stack_bottom() == nullptr) return false; + assert(os::Linux::initial_thread_stack_bottom() != nullptr && + os::Linux::initial_thread_stack_size() != 0, + "os::init did not locate primordial thread's stack region"); + if ((address)&dummy >= os::Linux::initial_thread_stack_bottom() && + (address)&dummy < os::Linux::initial_thread_stack_bottom() + + os::Linux::initial_thread_stack_size()) { + return true; + } else { + return false; + } +} + +// Find the virtual memory area that contains addr +static bool find_vma(address addr, address* vma_low, address* vma_high) { + FILE *fp = os::fopen("/proc/self/maps", "r"); + if (fp) { + address low, high; + while (!feof(fp)) { + if (fscanf(fp, "%p-%p", &low, &high) == 2) { + if (low <= addr && addr < high) { + if (vma_low) *vma_low = low; + if (vma_high) *vma_high = high; + fclose(fp); + return true; + } + } + for (;;) { + int ch = fgetc(fp); + if (ch == EOF || ch == (int)'\n') break; + } + } + fclose(fp); + } + return false; +} + +// Locate primordial thread stack. This special handling of primordial thread stack +// is needed because pthread_getattr_np() on most (all?) Linux distros returns +// bogus value for the primordial process thread. While the launcher has created +// the VM in a new thread since JDK 6, we still have to allow for the use of the +// JNI invocation API from a primordial thread. +void os::Linux::capture_initial_stack(size_t max_size) { + + // max_size is either 0 (which means accept OS default for thread stacks) or + // a user-specified value known to be at least the minimum needed. If we + // are actually on the primordial thread we can make it appear that we have a + // smaller max_size stack by inserting the guard pages at that location. But we + // cannot do anything to emulate a larger stack than what has been provided by + // the OS or threading library. In fact if we try to use a stack greater than + // what is set by rlimit then we will crash the hosting process. + + // Maximum stack size is the easy part, get it from RLIMIT_STACK. + // If this is "unlimited" then it will be a huge value. + struct rlimit rlim; + getrlimit(RLIMIT_STACK, &rlim); + size_t stack_size = rlim.rlim_cur; + + // 6308388: a bug in ld.so will relocate its own .data section to the + // lower end of primordial stack; reduce ulimit -s value a little bit + // so we won't install guard page on ld.so's data section. + // But ensure we don't underflow the stack size - allow 1 page spare + if (stack_size >= 3 * os::vm_page_size()) { + stack_size -= 2 * os::vm_page_size(); + } + + // Try to figure out where the stack base (top) is. This is harder. + // + // When an application is started, glibc saves the initial stack pointer in + // a global variable "__libc_stack_end", which is then used by system + // libraries. __libc_stack_end should be pretty close to stack top. The + // variable is available since the very early days. However, because it is + // a private interface, it could disappear in the future. + // + // Linux kernel saves start_stack information in /proc//stat. Similar + // to __libc_stack_end, it is very close to stack top, but isn't the real + // stack top. Note that /proc may not exist if VM is running as a chroot + // program, so reading /proc//stat could fail. Also the contents of + // /proc//stat could change in the future (though unlikely). + // + // We try __libc_stack_end first. If that doesn't work, look for + // /proc//stat. If neither of them works, we use current stack pointer + // as a hint, which should work well in most cases. + + uintptr_t stack_start; + + // try __libc_stack_end first + uintptr_t *p = (uintptr_t *)dlsym(RTLD_DEFAULT, "__libc_stack_end"); + if (p && *p) { + stack_start = *p; + } else { + // see if we can get the start_stack field from /proc/self/stat + FILE *fp; + int pid; + char state; + int ppid; + int pgrp; + int session; + int nr; + int tpgrp; + unsigned long flags; + unsigned long minflt; + unsigned long cminflt; + unsigned long majflt; + unsigned long cmajflt; + unsigned long utime; + unsigned long stime; + long cutime; + long cstime; + long prio; + long nice; + long junk; + long it_real; + uintptr_t start; + uintptr_t vsize; + intptr_t rss; + uintptr_t rsslim; + uintptr_t scodes; + uintptr_t ecode; + int i; + + // Figure what the primordial thread stack base is. Code is inspired + // by email from Hans Boehm. /proc/self/stat begins with current pid, + // followed by command name surrounded by parentheses, state, etc. + char stat[2048]; + size_t statlen; + + fp = os::fopen("/proc/self/stat", "r"); + if (fp) { + statlen = fread(stat, 1, 2047, fp); + stat[statlen] = '\0'; + fclose(fp); + + // Skip pid and the command string. Note that we could be dealing with + // weird command names, e.g. user could decide to rename java launcher + // to "java 1.4.2 :)", then the stat file would look like + // 1234 (java 1.4.2 :)) R ... ... + // We don't really need to know the command string, just find the last + // occurrence of ")" and then start parsing from there. See bug 4726580. + char * s = strrchr(stat, ')'); + + i = 0; + if (s) { + // Skip blank chars + do { s++; } while (s && isspace(*s)); + +#define _UFM UINTX_FORMAT +#define _DFM INTX_FORMAT + + // 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 + // 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 + i = sscanf(s, "%c %d %d %d %d %d %lu %lu %lu %lu %lu %lu %lu %ld %ld %ld %ld %ld %ld " _UFM _UFM _DFM _UFM _UFM _UFM _UFM, + &state, // 3 %c + &ppid, // 4 %d + &pgrp, // 5 %d + &session, // 6 %d + &nr, // 7 %d + &tpgrp, // 8 %d + &flags, // 9 %lu + &minflt, // 10 %lu + &cminflt, // 11 %lu + &majflt, // 12 %lu + &cmajflt, // 13 %lu + &utime, // 14 %lu + &stime, // 15 %lu + &cutime, // 16 %ld + &cstime, // 17 %ld + &prio, // 18 %ld + &nice, // 19 %ld + &junk, // 20 %ld + &it_real, // 21 %ld + &start, // 22 UINTX_FORMAT + &vsize, // 23 UINTX_FORMAT + &rss, // 24 INTX_FORMAT + &rsslim, // 25 UINTX_FORMAT + &scodes, // 26 UINTX_FORMAT + &ecode, // 27 UINTX_FORMAT + &stack_start); // 28 UINTX_FORMAT + } + +#undef _UFM +#undef _DFM + + if (i != 28 - 2) { + assert(false, "Bad conversion from /proc/self/stat"); + // product mode - assume we are the primordial thread, good luck in the + // embedded case. + warning("Can't detect primordial thread stack location - bad conversion"); + stack_start = (uintptr_t) &rlim; + } + } else { + // For some reason we can't open /proc/self/stat (for example, running on + // FreeBSD with a Linux emulator, or inside chroot), this should work for + // most cases, so don't abort: + warning("Can't detect primordial thread stack location - no /proc/self/stat"); + stack_start = (uintptr_t) &rlim; + } + } + + // Now we have a pointer (stack_start) very close to the stack top, the + // next thing to do is to figure out the exact location of stack top. We + // can find out the virtual memory area that contains stack_start by + // reading /proc/self/maps, it should be the last vma in /proc/self/maps, + // and its upper limit is the real stack top. (again, this would fail if + // running inside chroot, because /proc may not exist.) + + uintptr_t stack_top; + address low, high; + if (find_vma((address)stack_start, &low, &high)) { + // success, "high" is the true stack top. (ignore "low", because initial + // thread stack grows on demand, its real bottom is high - RLIMIT_STACK.) + stack_top = (uintptr_t)high; + } else { + // failed, likely because /proc/self/maps does not exist + warning("Can't detect primordial thread stack location - find_vma failed"); + // best effort: stack_start is normally within a few pages below the real + // stack top, use it as stack top, and reduce stack size so we won't put + // guard page outside stack. + stack_top = stack_start; + stack_size -= 16 * os::vm_page_size(); + } + + // stack_top could be partially down the page so align it + stack_top = align_up(stack_top, os::vm_page_size()); + + // Allowed stack value is minimum of max_size and what we derived from rlimit + if (max_size > 0) { + _initial_thread_stack_size = MIN2(max_size, stack_size); + } else { + // Accept the rlimit max, but if stack is unlimited then it will be huge, so + // clamp it at 8MB as we do on Solaris + _initial_thread_stack_size = MIN2(stack_size, 8*M); + } + _initial_thread_stack_size = align_down(_initial_thread_stack_size, os::vm_page_size()); + _initial_thread_stack_bottom = (address)stack_top - _initial_thread_stack_size; + + assert(_initial_thread_stack_bottom < (address)stack_top, "overflow!"); + + if (log_is_enabled(Info, os, thread)) { + // See if we seem to be on primordial process thread + bool primordial = uintptr_t(&rlim) > uintptr_t(_initial_thread_stack_bottom) && + uintptr_t(&rlim) < stack_top; + + log_info(os, thread)("Capturing initial stack in %s thread: req. size: " SIZE_FORMAT "K, actual size: " + SIZE_FORMAT "K, top=" INTPTR_FORMAT ", bottom=" INTPTR_FORMAT, + primordial ? "primordial" : "user", max_size / K, _initial_thread_stack_size / K, + stack_top, intptr_t(_initial_thread_stack_bottom)); + } +} + +//////////////////////////////////////////////////////////////////////////////// +// time support +double os::elapsedVTime() { + struct rusage usage; + int retval = getrusage(RUSAGE_THREAD, &usage); + if (retval == 0) { + return (double) (usage.ru_utime.tv_sec + usage.ru_stime.tv_sec) + (double) (usage.ru_utime.tv_usec + usage.ru_stime.tv_usec) / (1000 * 1000); + } else { + // better than nothing, but not much + return elapsedTime(); + } +} + +void os::Linux::fast_thread_clock_init() { + if (!UseLinuxPosixThreadCPUClocks) { + return; + } + clockid_t clockid; + struct timespec tp; + int (*pthread_getcpuclockid_func)(pthread_t, clockid_t *) = + (int(*)(pthread_t, clockid_t *)) dlsym(RTLD_DEFAULT, "pthread_getcpuclockid"); + + // Switch to using fast clocks for thread cpu time if + // the clock_getres() returns 0 error code. + // Note, that some kernels may support the current thread + // clock (CLOCK_THREAD_CPUTIME_ID) but not the clocks + // returned by the pthread_getcpuclockid(). + // If the fast POSIX clocks are supported then the clock_getres() + // must return at least tp.tv_sec == 0 which means a resolution + // better than 1 sec. This is extra check for reliability. + + if (pthread_getcpuclockid_func && + pthread_getcpuclockid_func(_main_thread, &clockid) == 0 && + clock_getres(clockid, &tp) == 0 && tp.tv_sec == 0) { + _supports_fast_thread_cpu_time = true; + _pthread_getcpuclockid = pthread_getcpuclockid_func; + } +} + +// thread_id is kernel thread id (similar to Solaris LWP id) +intx os::current_thread_id() { return os::Linux::gettid(); } +int os::current_process_id() { + return ::getpid(); +} + +// DLL functions + +// This must be hard coded because it's the system's temporary +// directory not the java application's temp directory, ala java.io.tmpdir. +const char* os::get_temp_directory() { return "/tmp"; } + +// check if addr is inside libjvm.so +bool os::address_is_in_vm(address addr) { + static address libjvm_base_addr; + Dl_info dlinfo; + + if (libjvm_base_addr == nullptr) { + if (dladdr(CAST_FROM_FN_PTR(void *, os::address_is_in_vm), &dlinfo) != 0) { + libjvm_base_addr = (address)dlinfo.dli_fbase; + } + assert(libjvm_base_addr !=nullptr, "Cannot obtain base address for libjvm"); + } + + if (dladdr((void *)addr, &dlinfo) != 0) { + if (libjvm_base_addr == (address)dlinfo.dli_fbase) return true; + } + + return false; +} + +void os::prepare_native_symbols() { +} + +bool os::dll_address_to_function_name(address addr, char *buf, + int buflen, int *offset, + bool demangle) { + // buf is not optional, but offset is optional + assert(buf != nullptr, "sanity check"); + + Dl_info dlinfo; + + if (dladdr((void*)addr, &dlinfo) != 0) { + // see if we have a matching symbol + if (dlinfo.dli_saddr != nullptr && dlinfo.dli_sname != nullptr) { + if (!(demangle && Decoder::demangle(dlinfo.dli_sname, buf, buflen))) { + jio_snprintf(buf, buflen, "%s", dlinfo.dli_sname); + } + if (offset != nullptr) *offset = pointer_delta_as_int(addr, (address)dlinfo.dli_saddr); + return true; + } + // no matching symbol so try for just file info + if (dlinfo.dli_fname != nullptr && dlinfo.dli_fbase != nullptr) { + if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase), + buf, buflen, offset, dlinfo.dli_fname, demangle)) { + return true; + } + } + } + + buf[0] = '\0'; + if (offset != nullptr) *offset = -1; + return false; +} + +bool os::dll_address_to_library_name(address addr, char* buf, + int buflen, int* offset) { + // buf is not optional, but offset is optional + assert(buf != nullptr, "sanity check"); + + Dl_info dlinfo; + if (dladdr((void*)addr, &dlinfo) != 0) { + if (dlinfo.dli_fname != nullptr) { + jio_snprintf(buf, buflen, "%s", dlinfo.dli_fname); + } + if (dlinfo.dli_fbase != nullptr && offset != nullptr) { + *offset = pointer_delta_as_int(addr, (address)dlinfo.dli_fbase); + } + return true; + } + buf[0] = '\0'; + if (offset) *offset = -1; + return false; +} + +// Remember the stack's state. The Linux dynamic linker will change +// the stack to 'executable' at most once, so we must safepoint only once. +bool os::Linux::_stack_is_executable = false; + +// VM operation that loads a library. This is necessary if stack protection +// of the Java stacks can be lost during loading the library. If we +// do not stop the Java threads, they can stack overflow before the stacks +// are protected again. +class VM_LinuxDllLoad: public VM_Operation { + private: + const char *_filename; + char *_ebuf; + int _ebuflen; + void *_lib; + public: + VM_LinuxDllLoad(const char *fn, char *ebuf, int ebuflen) : + _filename(fn), _ebuf(ebuf), _ebuflen(ebuflen), _lib(nullptr) {} + VMOp_Type type() const { return VMOp_LinuxDllLoad; } + void doit() { + _lib = os::Linux::dll_load_in_vmthread(_filename, _ebuf, _ebuflen); + os::Linux::_stack_is_executable = true; + } + void* loaded_library() { return _lib; } +}; + +void * os::dll_load(const char *filename, char *ebuf, int ebuflen) { + void * result = nullptr; + bool load_attempted = false; + + log_info(os)("attempting shared library load of %s", filename); + + // Check whether the library to load might change execution rights + // of the stack. If they are changed, the protection of the stack + // guard pages will be lost. We need a safepoint to fix this. + // + // See Linux man page execstack(8) for more info. + if (os::uses_stack_guard_pages() && !os::Linux::_stack_is_executable) { + if (!ElfFile::specifies_noexecstack(filename)) { + if (!is_init_completed()) { + os::Linux::_stack_is_executable = true; + // This is OK - No Java threads have been created yet, and hence no + // stack guard pages to fix. + // + // Dynamic loader will make all stacks executable after + // this function returns, and will not do that again. + assert(Threads::number_of_threads() == 0, "no Java threads should exist yet."); + } else { + warning("You have loaded library %s which might have disabled stack guard. " + "The VM will try to fix the stack guard now.\n" + "It's highly recommended that you fix the library with " + "'execstack -c ', or link it with '-z noexecstack'.", + filename); + + JavaThread *jt = JavaThread::current(); + if (jt->thread_state() != _thread_in_native) { + // This happens when a compiler thread tries to load a hsdis-.so file + // that requires ExecStack. Cannot enter safe point. Let's give up. + warning("Unable to fix stack guard. Giving up."); + } else { + if (!LoadExecStackDllInVMThread) { + // This is for the case where the DLL has an static + // constructor function that executes JNI code. We cannot + // load such DLLs in the VMThread. + result = os::Linux::dlopen_helper(filename, ebuf, ebuflen); + } + + ThreadInVMfromNative tiv(jt); + debug_only(VMNativeEntryWrapper vew;) + + VM_LinuxDllLoad op(filename, ebuf, ebuflen); + VMThread::execute(&op); + if (LoadExecStackDllInVMThread) { + result = op.loaded_library(); + } + load_attempted = true; + } + } + } + } + + if (!load_attempted) { + result = os::Linux::dlopen_helper(filename, ebuf, ebuflen); + } + + if (result != nullptr) { + // Successful loading + return result; + } + + Elf32_Ehdr elf_head; + size_t prefix_len = strlen(ebuf); + ssize_t diag_msg_max_length = ebuflen - prefix_len; + if (diag_msg_max_length <= 0) { + // No more space in ebuf for additional diagnostics message + return nullptr; + } + + char* diag_msg_buf = ebuf + prefix_len; + + int file_descriptor= ::open(filename, O_RDONLY | O_NONBLOCK); + + if (file_descriptor < 0) { + // Can't open library, report dlerror() message + return nullptr; + } + + bool failed_to_read_elf_head= + (sizeof(elf_head)!= + (::read(file_descriptor, &elf_head,sizeof(elf_head)))); + + ::close(file_descriptor); + if (failed_to_read_elf_head) { + // file i/o error - report dlerror() msg + return nullptr; + } + + if (elf_head.e_ident[EI_DATA] != LITTLE_ENDIAN_ONLY(ELFDATA2LSB) BIG_ENDIAN_ONLY(ELFDATA2MSB)) { + // handle invalid/out of range endianness values + if (elf_head.e_ident[EI_DATA] == 0 || elf_head.e_ident[EI_DATA] > 2) { + return nullptr; + } + +#if defined(VM_LITTLE_ENDIAN) + // VM is LE, shared object BE + elf_head.e_machine = be16toh(elf_head.e_machine); +#else + // VM is BE, shared object LE + elf_head.e_machine = le16toh(elf_head.e_machine); +#endif + } + + typedef struct { + Elf32_Half code; // Actual value as defined in elf.h + Elf32_Half compat_class; // Compatibility of archs at VM's sense + unsigned char elf_class; // 32 or 64 bit + unsigned char endianness; // MSB or LSB + char* name; // String representation + } arch_t; + +#ifndef EM_AARCH64 + #define EM_AARCH64 183 /* ARM AARCH64 */ +#endif +#ifndef EM_RISCV + #define EM_RISCV 243 /* RISC-V */ +#endif +#ifndef EM_LOONGARCH + #define EM_LOONGARCH 258 /* LoongArch */ +#endif + + static const arch_t arch_array[]={ + {EM_386, EM_386, ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"}, + {EM_486, EM_386, ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"}, + {EM_IA_64, EM_IA_64, ELFCLASS64, ELFDATA2LSB, (char*)"IA 64"}, + {EM_X86_64, EM_X86_64, ELFCLASS64, ELFDATA2LSB, (char*)"AMD 64"}, + {EM_SPARC, EM_SPARC, ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"}, + {EM_SPARC32PLUS, EM_SPARC, ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"}, + {EM_SPARCV9, EM_SPARCV9, ELFCLASS64, ELFDATA2MSB, (char*)"Sparc v9 64"}, + {EM_PPC, EM_PPC, ELFCLASS32, ELFDATA2MSB, (char*)"Power PC 32"}, +#if defined(VM_LITTLE_ENDIAN) + {EM_PPC64, EM_PPC64, ELFCLASS64, ELFDATA2LSB, (char*)"Power PC 64 LE"}, + {EM_SH, EM_SH, ELFCLASS32, ELFDATA2LSB, (char*)"SuperH"}, +#else + {EM_PPC64, EM_PPC64, ELFCLASS64, ELFDATA2MSB, (char*)"Power PC 64"}, + {EM_SH, EM_SH, ELFCLASS32, ELFDATA2MSB, (char*)"SuperH BE"}, +#endif + {EM_ARM, EM_ARM, ELFCLASS32, ELFDATA2LSB, (char*)"ARM"}, + // we only support 64 bit z architecture + {EM_S390, EM_S390, ELFCLASS64, ELFDATA2MSB, (char*)"IBM System/390"}, + {EM_ALPHA, EM_ALPHA, ELFCLASS64, ELFDATA2LSB, (char*)"Alpha"}, + {EM_MIPS_RS3_LE, EM_MIPS_RS3_LE, ELFCLASS32, ELFDATA2LSB, (char*)"MIPSel"}, + {EM_MIPS, EM_MIPS, ELFCLASS32, ELFDATA2MSB, (char*)"MIPS"}, + {EM_PARISC, EM_PARISC, ELFCLASS32, ELFDATA2MSB, (char*)"PARISC"}, + {EM_68K, EM_68K, ELFCLASS32, ELFDATA2MSB, (char*)"M68k"}, + {EM_AARCH64, EM_AARCH64, ELFCLASS64, ELFDATA2LSB, (char*)"AARCH64"}, +#ifdef _LP64 + {EM_RISCV, EM_RISCV, ELFCLASS64, ELFDATA2LSB, (char*)"RISCV64"}, +#else + {EM_RISCV, EM_RISCV, ELFCLASS32, ELFDATA2LSB, (char*)"RISCV32"}, +#endif + {EM_LOONGARCH, EM_LOONGARCH, ELFCLASS64, ELFDATA2LSB, (char*)"LoongArch"}, + }; + +#if (defined IA32) + static Elf32_Half running_arch_code=EM_386; +#elif (defined AMD64) || (defined X32) + static Elf32_Half running_arch_code=EM_X86_64; +#elif (defined IA64) + static Elf32_Half running_arch_code=EM_IA_64; +#elif (defined __sparc) && (defined _LP64) + static Elf32_Half running_arch_code=EM_SPARCV9; +#elif (defined __sparc) && (!defined _LP64) + static Elf32_Half running_arch_code=EM_SPARC; +#elif (defined __powerpc64__) + static Elf32_Half running_arch_code=EM_PPC64; +#elif (defined __powerpc__) + static Elf32_Half running_arch_code=EM_PPC; +#elif (defined AARCH64) + static Elf32_Half running_arch_code=EM_AARCH64; +#elif (defined ARM) + static Elf32_Half running_arch_code=EM_ARM; +#elif (defined S390) + static Elf32_Half running_arch_code=EM_S390; +#elif (defined ALPHA) + static Elf32_Half running_arch_code=EM_ALPHA; +#elif (defined MIPSEL) + static Elf32_Half running_arch_code=EM_MIPS_RS3_LE; +#elif (defined PARISC) + static Elf32_Half running_arch_code=EM_PARISC; +#elif (defined MIPS) + static Elf32_Half running_arch_code=EM_MIPS; +#elif (defined M68K) + static Elf32_Half running_arch_code=EM_68K; +#elif (defined SH) + static Elf32_Half running_arch_code=EM_SH; +#elif (defined RISCV) + static Elf32_Half running_arch_code=EM_RISCV; +#elif (defined LOONGARCH64) + static Elf32_Half running_arch_code=EM_LOONGARCH; +#else + #error Method os::dll_load requires that one of following is defined:\ + AARCH64, ALPHA, ARM, AMD64, IA32, IA64, LOONGARCH64, M68K, MIPS, MIPSEL, PARISC, __powerpc__, __powerpc64__, RISCV, S390, SH, __sparc +#endif + + // Identify compatibility class for VM's architecture and library's architecture + // Obtain string descriptions for architectures + + arch_t lib_arch={elf_head.e_machine,0,elf_head.e_ident[EI_CLASS], elf_head.e_ident[EI_DATA], nullptr}; + int running_arch_index=-1; + + for (unsigned int i=0; i < ARRAY_SIZE(arch_array); i++) { + if (running_arch_code == arch_array[i].code) { + running_arch_index = i; + } + if (lib_arch.code == arch_array[i].code) { + lib_arch.compat_class = arch_array[i].compat_class; + lib_arch.name = arch_array[i].name; + } + } + + assert(running_arch_index != -1, + "Didn't find running architecture code (running_arch_code) in arch_array"); + if (running_arch_index == -1) { + // Even though running architecture detection failed + // we may still continue with reporting dlerror() message + return nullptr; + } + + if (lib_arch.compat_class != arch_array[running_arch_index].compat_class) { + if (lib_arch.name != nullptr) { + ::snprintf(diag_msg_buf, diag_msg_max_length-1, + " (Possible cause: can't load %s .so on a %s platform)", + lib_arch.name, arch_array[running_arch_index].name); + } else { + ::snprintf(diag_msg_buf, diag_msg_max_length-1, + " (Possible cause: can't load this .so (machine code=0x%x) on a %s platform)", + lib_arch.code, arch_array[running_arch_index].name); + } + return nullptr; + } + + if (lib_arch.endianness != arch_array[running_arch_index].endianness) { + ::snprintf(diag_msg_buf, diag_msg_max_length-1, " (Possible cause: endianness mismatch)"); + return nullptr; + } + + // ELF file class/capacity : 0 - invalid, 1 - 32bit, 2 - 64bit + if (lib_arch.elf_class > 2 || lib_arch.elf_class < 1) { + ::snprintf(diag_msg_buf, diag_msg_max_length-1, " (Possible cause: invalid ELF file class)"); + return nullptr; + } + + if (lib_arch.elf_class != arch_array[running_arch_index].elf_class) { + ::snprintf(diag_msg_buf, diag_msg_max_length-1, + " (Possible cause: architecture word width mismatch, can't load %d-bit .so on a %d-bit platform)", + (int) lib_arch.elf_class * 32, arch_array[running_arch_index].elf_class * 32); + return nullptr; + } + + return nullptr; +} + +void * os::Linux::dlopen_helper(const char *filename, char *ebuf, int ebuflen) { +#ifndef IA32 + bool ieee_handling = IEEE_subnormal_handling_OK(); + if (!ieee_handling) { + Events::log_dll_message(nullptr, "IEEE subnormal handling check failed before loading %s", filename); + log_info(os)("IEEE subnormal handling check failed before loading %s", filename); + if (CheckJNICalls) { + tty->print_cr("WARNING: IEEE subnormal handling check failed before loading %s", filename); + Thread* current = Thread::current(); + if (current->is_Java_thread()) { + JavaThread::cast(current)->print_jni_stack(); + } + } + } + + // Save and restore the floating-point environment around dlopen(). + // There are known cases where global library initialization sets + // FPU flags that affect computation accuracy, for example, enabling + // Flush-To-Zero and Denormals-Are-Zero. Do not let those libraries + // break Java arithmetic. Unfortunately, this might affect libraries + // that might depend on these FPU features for performance and/or + // numerical "accuracy", but we need to protect Java semantics first + // and foremost. See JDK-8295159. + + // This workaround is ineffective on IA32 systems because the MXCSR + // register (which controls flush-to-zero mode) is not stored in the + // legacy fenv. + + fenv_t default_fenv; + int rtn = fegetenv(&default_fenv); + assert(rtn == 0, "fegetenv must succeed"); +#endif // IA32 + + void* result; + JFR_ONLY(NativeLibraryLoadEvent load_event(filename, &result);) + result = ::dlopen(filename, RTLD_LAZY); + if (result == nullptr) { + const char* error_report = ::dlerror(); + if (error_report == nullptr) { + error_report = "dlerror returned no error description"; + } + if (ebuf != nullptr && ebuflen > 0) { + ::strncpy(ebuf, error_report, ebuflen-1); + ebuf[ebuflen-1]='\0'; + } + Events::log_dll_message(nullptr, "Loading shared library %s failed, %s", filename, error_report); + log_info(os)("shared library load of %s failed, %s", filename, error_report); + JFR_ONLY(load_event.set_error_msg(error_report);) + } else { + Events::log_dll_message(nullptr, "Loaded shared library %s", filename); + log_info(os)("shared library load of %s was successful", filename); +#ifndef IA32 + // Quickly test to make sure subnormals are correctly handled. + if (! IEEE_subnormal_handling_OK()) { + // We just dlopen()ed a library that mangled the floating-point flags. + // Attempt to fix things now. + JFR_ONLY(load_event.set_fp_env_correction_attempt(true);) + int rtn = fesetenv(&default_fenv); + assert(rtn == 0, "fesetenv must succeed"); + + if (IEEE_subnormal_handling_OK()) { + Events::log_dll_message(nullptr, "IEEE subnormal handling had to be corrected after loading %s", filename); + log_info(os)("IEEE subnormal handling had to be corrected after loading %s", filename); + JFR_ONLY(load_event.set_fp_env_correction_success(true);) + } else { + Events::log_dll_message(nullptr, "IEEE subnormal handling could not be corrected after loading %s", filename); + log_info(os)("IEEE subnormal handling could not be corrected after loading %s", filename); + if (CheckJNICalls) { + tty->print_cr("WARNING: IEEE subnormal handling could not be corrected after loading %s", filename); + Thread* current = Thread::current(); + if (current->is_Java_thread()) { + JavaThread::cast(current)->print_jni_stack(); + } + } + assert(false, "fesetenv didn't work"); + } + } +#endif // IA32 + } + return result; +} + +void * os::Linux::dll_load_in_vmthread(const char *filename, char *ebuf, + int ebuflen) { + void * result = nullptr; + if (LoadExecStackDllInVMThread) { + result = dlopen_helper(filename, ebuf, ebuflen); + } + + // Since 7019808, libjvm.so is linked with -noexecstack. If the VM loads a + // library that requires an executable stack, or which does not have this + // stack attribute set, dlopen changes the stack attribute to executable. The + // read protection of the guard pages gets lost. + // + // Need to check _stack_is_executable again as multiple VM_LinuxDllLoad + // may have been queued at the same time. + + if (!_stack_is_executable) { + for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) { + StackOverflow* overflow_state = jt->stack_overflow_state(); + if (!overflow_state->stack_guard_zone_unused() && // Stack not yet fully initialized + overflow_state->stack_guards_enabled()) { // No pending stack overflow exceptions + if (!os::guard_memory((char *)jt->stack_end(), StackOverflow::stack_guard_zone_size())) { + warning("Attempt to reguard stack yellow zone failed."); + } + } + } + } + + return result; +} + +const char* os::Linux::dll_path(void* lib) { + struct link_map *lmap; + const char* l_path = nullptr; + assert(lib != nullptr, "dll_path parameter must not be null"); + + int res_dli = ::dlinfo(lib, RTLD_DI_LINKMAP, &lmap); + if (res_dli == 0) { + l_path = lmap->l_name; + } + return l_path; +} + +static unsigned count_newlines(const char* s) { + unsigned n = 0; + for (const char* s2 = strchr(s, '\n'); + s2 != nullptr; s2 = strchr(s2 + 1, '\n')) { + n++; + } + return n; +} + +static bool _print_ascii_file(const char* filename, outputStream* st, unsigned* num_lines = nullptr, const char* hdr = nullptr) { + int fd = ::open(filename, O_RDONLY); + if (fd == -1) { + return false; + } + + if (hdr != nullptr) { + st->print_cr("%s", hdr); + } + + char buf[33]; + ssize_t bytes; + buf[32] = '\0'; + unsigned lines = 0; + while ((bytes = ::read(fd, buf, sizeof(buf)-1)) > 0) { + st->print_raw(buf, bytes); + // count newlines + if (num_lines != nullptr) { + lines += count_newlines(buf); + } + } + + if (num_lines != nullptr) { + (*num_lines) = lines; + } + + ::close(fd); + + return true; +} + +static void _print_ascii_file_h(const char* header, const char* filename, outputStream* st, bool same_line = true) { + st->print("%s:%c", header, same_line ? ' ' : '\n'); + if (!_print_ascii_file(filename, st)) { + st->print_cr(""); + } +} + +void os::print_dll_info(outputStream *st) { + st->print_cr("Dynamic libraries:"); + + char fname[32]; + pid_t pid = os::Linux::gettid(); + + jio_snprintf(fname, sizeof(fname), "/proc/%d/maps", pid); + unsigned num = 0; + if (!_print_ascii_file(fname, st, &num)) { + st->print_cr("Can not get library information for pid = %d", pid); + } else { + st->print_cr("Total number of mappings: %u", num); + } +} + +struct loaded_modules_info_param { + os::LoadedModulesCallbackFunc callback; + void *param; +}; + +static int dl_iterate_callback(struct dl_phdr_info *info, size_t size, void *data) { + if ((info->dlpi_name == nullptr) || (*info->dlpi_name == '\0')) { + return 0; + } + + struct loaded_modules_info_param *callback_param = reinterpret_cast(data); + address base = nullptr; + address top = nullptr; + for (int idx = 0; idx < info->dlpi_phnum; idx++) { + const ElfW(Phdr) *phdr = info->dlpi_phdr + idx; + if (phdr->p_type == PT_LOAD) { + address raw_phdr_base = reinterpret_cast
(info->dlpi_addr + phdr->p_vaddr); + + address phdr_base = align_down(raw_phdr_base, phdr->p_align); + if ((base == nullptr) || (base > phdr_base)) { + base = phdr_base; + } + + address phdr_top = align_up(raw_phdr_base + phdr->p_memsz, phdr->p_align); + if ((top == nullptr) || (top < phdr_top)) { + top = phdr_top; + } + } + } + + return callback_param->callback(info->dlpi_name, base, top, callback_param->param); +} + +int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *param) { + struct loaded_modules_info_param callback_param = {callback, param}; + return dl_iterate_phdr(&dl_iterate_callback, &callback_param); +} + +void os::print_os_info_brief(outputStream* st) { + os::Linux::print_distro_info(st); + + os::Posix::print_uname_info(st); + + os::Linux::print_libversion_info(st); + +} + +void os::print_os_info(outputStream* st) { + st->print_cr("OS:"); + + os::Linux::print_distro_info(st); + + os::Posix::print_uname_info(st); + + os::Linux::print_uptime_info(st); + + // Print warning if unsafe chroot environment detected + if (unsafe_chroot_detected) { + st->print_cr("WARNING!! %s", unstable_chroot_error); + } + + os::Linux::print_libversion_info(st); + + os::Posix::print_rlimit_info(st); + + os::Posix::print_load_average(st); + st->cr(); + + os::Linux::print_system_memory_info(st); + st->cr(); + + os::Linux::print_process_memory_info(st); + st->cr(); + + os::Linux::print_proc_sys_info(st); + st->cr(); + + if (os::Linux::print_ld_preload_file(st)) { + st->cr(); + } + + if (os::Linux::print_container_info(st)) { + st->cr(); + } + + VM_Version::print_platform_virtualization_info(st); + + os::Linux::print_steal_info(st); +} + +// Try to identify popular distros. +// Most Linux distributions have a /etc/XXX-release file, which contains +// the OS version string. Newer Linux distributions have a /etc/lsb-release +// file that also contains the OS version string. Some have more than one +// /etc/XXX-release file (e.g. Mandrake has both /etc/mandrake-release and +// /etc/redhat-release.), so the order is important. +// Any Linux that is based on Redhat (i.e. Oracle, Mandrake, Sun JDS...) have +// their own specific XXX-release file as well as a redhat-release file. +// Because of this the XXX-release file needs to be searched for before the +// redhat-release file. +// Since Red Hat and SuSE have an lsb-release file that is not very descriptive the +// search for redhat-release / SuSE-release needs to be before lsb-release. +// Since the lsb-release file is the new standard it needs to be searched +// before the older style release files. +// Searching system-release (Red Hat) and os-release (other Linuxes) are a +// next to last resort. The os-release file is a new standard that contains +// distribution information and the system-release file seems to be an old +// standard that has been replaced by the lsb-release and os-release files. +// Searching for the debian_version file is the last resort. It contains +// an informative string like "6.0.6" or "wheezy/sid". Because of this +// "Debian " is printed before the contents of the debian_version file. + +const char* distro_files[] = { + "/etc/oracle-release", + "/etc/mandriva-release", + "/etc/mandrake-release", + "/etc/sun-release", + "/etc/redhat-release", + "/etc/lsb-release", + "/etc/turbolinux-release", + "/etc/gentoo-release", + "/etc/ltib-release", + "/etc/angstrom-version", + "/etc/system-release", + "/etc/os-release", + "/etc/SuSE-release", // Deprecated in favor of os-release since SuSE 12 + nullptr }; + +void os::Linux::print_distro_info(outputStream* st) { + for (int i = 0;; i++) { + const char* file = distro_files[i]; + if (file == nullptr) { + break; // done + } + // If file prints, we found it. + if (_print_ascii_file(file, st)) { + return; + } + } + + if (file_exists("/etc/debian_version")) { + st->print("Debian "); + _print_ascii_file("/etc/debian_version", st); + } else { + st->print_cr("Linux"); + } +} + +static void parse_os_info_helper(FILE* fp, char* distro, size_t length, bool get_first_line) { + char buf[256]; + while (fgets(buf, sizeof(buf), fp)) { + // Edit out extra stuff in expected format + if (strstr(buf, "DISTRIB_DESCRIPTION=") != nullptr || strstr(buf, "PRETTY_NAME=") != nullptr) { + char* ptr = strstr(buf, "\""); // the name is in quotes + if (ptr != nullptr) { + ptr++; // go beyond first quote + char* nl = strchr(ptr, '\"'); + if (nl != nullptr) *nl = '\0'; + strncpy(distro, ptr, length); + } else { + ptr = strstr(buf, "="); + ptr++; // go beyond equals then + char* nl = strchr(ptr, '\n'); + if (nl != nullptr) *nl = '\0'; + strncpy(distro, ptr, length); + } + return; + } else if (get_first_line) { + char* nl = strchr(buf, '\n'); + if (nl != nullptr) *nl = '\0'; + strncpy(distro, buf, length); + return; + } + } + // print last line and close + char* nl = strchr(buf, '\n'); + if (nl != nullptr) *nl = '\0'; + strncpy(distro, buf, length); +} + +static void parse_os_info(char* distro, size_t length, const char* file) { + FILE* fp = os::fopen(file, "r"); + if (fp != nullptr) { + // if suse format, print out first line + bool get_first_line = (strcmp(file, "/etc/SuSE-release") == 0); + parse_os_info_helper(fp, distro, length, get_first_line); + fclose(fp); + } +} + +void os::get_summary_os_info(char* buf, size_t buflen) { + for (int i = 0;; i++) { + const char* file = distro_files[i]; + if (file == nullptr) { + break; // ran out of distro_files + } + if (file_exists(file)) { + parse_os_info(buf, buflen, file); + return; + } + } + // special case for debian + if (file_exists("/etc/debian_version")) { + strncpy(buf, "Debian ", buflen); + if (buflen > 7) { + parse_os_info(&buf[7], buflen-7, "/etc/debian_version"); + } + } else { + strncpy(buf, "Linux", buflen); + } +} + +void os::Linux::print_libversion_info(outputStream* st) { + // libc, pthread + st->print("libc: "); + st->print("%s ", os::Linux::libc_version()); + st->print("%s ", os::Linux::libpthread_version()); + st->cr(); +} + +void os::Linux::print_proc_sys_info(outputStream* st) { + _print_ascii_file_h("/proc/sys/kernel/threads-max (system-wide limit on the number of threads)", + "/proc/sys/kernel/threads-max", st); + _print_ascii_file_h("/proc/sys/vm/max_map_count (maximum number of memory map areas a process may have)", + "/proc/sys/vm/max_map_count", st); + _print_ascii_file_h("/proc/sys/kernel/pid_max (system-wide limit on number of process identifiers)", + "/proc/sys/kernel/pid_max", st); +} + +void os::Linux::print_system_memory_info(outputStream* st) { + _print_ascii_file_h("/proc/meminfo", "/proc/meminfo", st, false); + st->cr(); + + // some information regarding THPs; for details see + // https://www.kernel.org/doc/Documentation/vm/transhuge.txt + _print_ascii_file_h("/sys/kernel/mm/transparent_hugepage/enabled", + "/sys/kernel/mm/transparent_hugepage/enabled", st); + _print_ascii_file_h("/sys/kernel/mm/transparent_hugepage/hpage_pmd_size", + "/sys/kernel/mm/transparent_hugepage/hpage_pmd_size", st); + _print_ascii_file_h("/sys/kernel/mm/transparent_hugepage/shmem_enabled", + "/sys/kernel/mm/transparent_hugepage/shmem_enabled", st); + _print_ascii_file_h("/sys/kernel/mm/transparent_hugepage/defrag (defrag/compaction efforts parameter)", + "/sys/kernel/mm/transparent_hugepage/defrag", st); +} + +bool os::Linux::query_process_memory_info(os::Linux::meminfo_t* info) { + FILE* f = os::fopen("/proc/self/status", "r"); + const int num_values = sizeof(os::Linux::meminfo_t) / sizeof(size_t); + int num_found = 0; + char buf[256]; + info->vmsize = info->vmpeak = info->vmrss = info->vmhwm = info->vmswap = + info->rssanon = info->rssfile = info->rssshmem = -1; + if (f != nullptr) { + while (::fgets(buf, sizeof(buf), f) != nullptr && num_found < num_values) { + if ( (info->vmsize == -1 && sscanf(buf, "VmSize: " SSIZE_FORMAT " kB", &info->vmsize) == 1) || + (info->vmpeak == -1 && sscanf(buf, "VmPeak: " SSIZE_FORMAT " kB", &info->vmpeak) == 1) || + (info->vmswap == -1 && sscanf(buf, "VmSwap: " SSIZE_FORMAT " kB", &info->vmswap) == 1) || + (info->vmhwm == -1 && sscanf(buf, "VmHWM: " SSIZE_FORMAT " kB", &info->vmhwm) == 1) || + (info->vmrss == -1 && sscanf(buf, "VmRSS: " SSIZE_FORMAT " kB", &info->vmrss) == 1) || + (info->rssanon == -1 && sscanf(buf, "RssAnon: " SSIZE_FORMAT " kB", &info->rssanon) == 1) || // Needs Linux 4.5 + (info->rssfile == -1 && sscanf(buf, "RssFile: " SSIZE_FORMAT " kB", &info->rssfile) == 1) || // Needs Linux 4.5 + (info->rssshmem == -1 && sscanf(buf, "RssShmem: " SSIZE_FORMAT " kB", &info->rssshmem) == 1) // Needs Linux 4.5 + ) + { + num_found ++; + } + } + fclose(f); + return true; + } + return false; +} + +#ifdef __GLIBC__ +// For Glibc, print a one-liner with the malloc tunables. +// Most important and popular is MALLOC_ARENA_MAX, but we are +// thorough and print them all. +static void print_glibc_malloc_tunables(outputStream* st) { + static const char* var[] = { + // the new variant + "GLIBC_TUNABLES", + // legacy variants + "MALLOC_CHECK_", "MALLOC_TOP_PAD_", "MALLOC_PERTURB_", + "MALLOC_MMAP_THRESHOLD_", "MALLOC_TRIM_THRESHOLD_", + "MALLOC_MMAP_MAX_", "MALLOC_ARENA_TEST", "MALLOC_ARENA_MAX", + nullptr}; + st->print("glibc malloc tunables: "); + bool printed = false; + for (int i = 0; var[i] != nullptr; i ++) { + const char* const val = ::getenv(var[i]); + if (val != nullptr) { + st->print("%s%s=%s", (printed ? ", " : ""), var[i], val); + printed = true; + } + } + if (!printed) { + st->print("(default)"); + } +} +#endif // __GLIBC__ + +void os::Linux::print_process_memory_info(outputStream* st) { + + st->print_cr("Process Memory:"); + + // Print virtual and resident set size; peak values; swap; and for + // rss its components if the kernel is recent enough. + meminfo_t info; + if (query_process_memory_info(&info)) { + st->print_cr("Virtual Size: " SSIZE_FORMAT "K (peak: " SSIZE_FORMAT "K)", info.vmsize, info.vmpeak); + st->print("Resident Set Size: " SSIZE_FORMAT "K (peak: " SSIZE_FORMAT "K)", info.vmrss, info.vmhwm); + if (info.rssanon != -1) { // requires kernel >= 4.5 + st->print(" (anon: " SSIZE_FORMAT "K, file: " SSIZE_FORMAT "K, shmem: " SSIZE_FORMAT "K)", + info.rssanon, info.rssfile, info.rssshmem); + } + st->cr(); + if (info.vmswap != -1) { // requires kernel >= 2.6.34 + st->print_cr("Swapped out: " SSIZE_FORMAT "K", info.vmswap); + } + } else { + st->print_cr("Could not open /proc/self/status to get process memory related information"); + } + + // glibc only: + // - Print outstanding allocations using mallinfo + // - Print glibc tunables +#ifdef __GLIBC__ + size_t total_allocated = 0; + size_t free_retained = 0; + bool might_have_wrapped = false; + glibc_mallinfo mi; + os::Linux::get_mallinfo(&mi, &might_have_wrapped); + total_allocated = mi.uordblks + mi.hblkhd; + free_retained = mi.fordblks; +#ifdef _LP64 + // If legacy mallinfo(), we can still print the values if we are sure they cannot have wrapped. + might_have_wrapped = might_have_wrapped && (info.vmsize * K) > UINT_MAX; +#endif + st->print_cr("C-Heap outstanding allocations: " SIZE_FORMAT "K, retained: " SIZE_FORMAT "K%s", + total_allocated / K, free_retained / K, + might_have_wrapped ? " (may have wrapped)" : ""); + // Tunables + print_glibc_malloc_tunables(st); + st->cr(); +#endif +} + +bool os::Linux::print_ld_preload_file(outputStream* st) { + return _print_ascii_file("/etc/ld.so.preload", st, nullptr, "/etc/ld.so.preload:"); +} + +void os::Linux::print_uptime_info(outputStream* st) { + struct sysinfo sinfo; + int ret = sysinfo(&sinfo); + if (ret == 0) { + os::print_dhm(st, "OS uptime:", (long) sinfo.uptime); + } +} + +bool os::Linux::print_container_info(outputStream* st) { + if (!OSContainer::is_containerized()) { + st->print_cr("container information not found."); + return false; + } + + st->print_cr("container (cgroup) information:"); + + const char *p_ct = OSContainer::container_type(); + st->print_cr("container_type: %s", p_ct != nullptr ? p_ct : "not supported"); + + char *p = OSContainer::cpu_cpuset_cpus(); + st->print_cr("cpu_cpuset_cpus: %s", p != nullptr ? p : "not supported"); + free(p); + + p = OSContainer::cpu_cpuset_memory_nodes(); + st->print_cr("cpu_memory_nodes: %s", p != nullptr ? p : "not supported"); + free(p); + + int i = OSContainer::active_processor_count(); + st->print("active_processor_count: "); + if (i > 0) { + if (ActiveProcessorCount > 0) { + st->print_cr("%d, but overridden by -XX:ActiveProcessorCount %d", i, ActiveProcessorCount); + } else { + st->print_cr("%d", i); + } + } else { + st->print_cr("not supported"); + } + + i = OSContainer::cpu_quota(); + st->print("cpu_quota: "); + if (i > 0) { + st->print_cr("%d", i); + } else { + st->print_cr("%s", i == OSCONTAINER_ERROR ? "not supported" : "no quota"); + } + + i = OSContainer::cpu_period(); + st->print("cpu_period: "); + if (i > 0) { + st->print_cr("%d", i); + } else { + st->print_cr("%s", i == OSCONTAINER_ERROR ? "not supported" : "no period"); + } + + i = OSContainer::cpu_shares(); + st->print("cpu_shares: "); + if (i > 0) { + st->print_cr("%d", i); + } else { + st->print_cr("%s", i == OSCONTAINER_ERROR ? "not supported" : "no shares"); + } + + OSContainer::print_container_helper(st, OSContainer::memory_limit_in_bytes(), "memory_limit_in_bytes"); + OSContainer::print_container_helper(st, OSContainer::memory_and_swap_limit_in_bytes(), "memory_and_swap_limit_in_bytes"); + OSContainer::print_container_helper(st, OSContainer::memory_soft_limit_in_bytes(), "memory_soft_limit_in_bytes"); + OSContainer::print_container_helper(st, OSContainer::memory_usage_in_bytes(), "memory_usage_in_bytes"); + OSContainer::print_container_helper(st, OSContainer::memory_max_usage_in_bytes(), "memory_max_usage_in_bytes"); + OSContainer::print_container_helper(st, OSContainer::rss_usage_in_bytes(), "rss_usage_in_bytes"); + OSContainer::print_container_helper(st, OSContainer::cache_usage_in_bytes(), "cache_usage_in_bytes"); + + OSContainer::print_version_specific_info(st); + + jlong j = OSContainer::pids_max(); + st->print("maximum number of tasks: "); + if (j > 0) { + st->print_cr(JLONG_FORMAT, j); + } else { + st->print_cr("%s", j == OSCONTAINER_ERROR ? "not supported" : "unlimited"); + } + + j = OSContainer::pids_current(); + st->print("current number of tasks: "); + if (j > 0) { + st->print_cr(JLONG_FORMAT, j); + } else { + if (j == OSCONTAINER_ERROR) { + st->print_cr("not supported"); + } + } + + return true; +} + +void os::Linux::print_steal_info(outputStream* st) { + if (has_initial_tick_info) { + CPUPerfTicks pticks; + bool res = os::Linux::get_tick_information(&pticks, -1); + + if (res && pticks.has_steal_ticks) { + uint64_t steal_ticks_difference = pticks.steal - initial_steal_ticks; + uint64_t total_ticks_difference = pticks.total - initial_total_ticks; + double steal_ticks_perc = 0.0; + if (total_ticks_difference != 0) { + steal_ticks_perc = (double) steal_ticks_difference / (double)total_ticks_difference; + } + st->print_cr("Steal ticks since vm start: " UINT64_FORMAT, steal_ticks_difference); + st->print_cr("Steal ticks percentage since vm start:%7.3f", steal_ticks_perc); + } + } +} + +void os::print_memory_info(outputStream* st) { + + st->print("Memory:"); + st->print(" " SIZE_FORMAT "k page", os::vm_page_size()>>10); + + // values in struct sysinfo are "unsigned long" + struct sysinfo si; + sysinfo(&si); + + st->print(", physical " UINT64_FORMAT "k", + os::physical_memory() >> 10); + st->print("(" UINT64_FORMAT "k free)", + os::available_memory() >> 10); + st->print(", swap " UINT64_FORMAT "k", + ((jlong)si.totalswap * si.mem_unit) >> 10); + st->print("(" UINT64_FORMAT "k free)", + ((jlong)si.freeswap * si.mem_unit) >> 10); + st->cr(); + st->print("Page Sizes: "); + _page_sizes.print_on(st); + st->cr(); +} + +// Print the first "model name" line and the first "flags" line +// that we find and nothing more. We assume "model name" comes +// before "flags" so if we find a second "model name", then the +// "flags" field is considered missing. +static bool print_model_name_and_flags(outputStream* st, char* buf, size_t buflen) { +#if defined(IA32) || defined(AMD64) + // Other platforms have less repetitive cpuinfo files + FILE *fp = os::fopen("/proc/cpuinfo", "r"); + if (fp) { + bool model_name_printed = false; + while (!feof(fp)) { + if (fgets(buf, (int)buflen, fp)) { + // Assume model name comes before flags + if (strstr(buf, "model name") != nullptr) { + if (!model_name_printed) { + st->print_raw("CPU Model and flags from /proc/cpuinfo:\n"); + st->print_raw(buf); + model_name_printed = true; + } else { + // model name printed but not flags? Odd, just return + fclose(fp); + return true; + } + } + // print the flags line too + if (strstr(buf, "flags") != nullptr) { + st->print_raw(buf); + fclose(fp); + return true; + } + } + } + fclose(fp); + } +#endif // x86 platforms + return false; +} + +// additional information about CPU e.g. available frequency ranges +static void print_sys_devices_cpu_info(outputStream* st) { + _print_ascii_file_h("Online cpus", "/sys/devices/system/cpu/online", st); + _print_ascii_file_h("Offline cpus", "/sys/devices/system/cpu/offline", st); + + if (ExtensiveErrorReports) { + // cache related info (cpu 0, should be similar for other CPUs) + for (unsigned int i=0; i < 10; i++) { // handle max. 10 cache entries + char hbuf_level[60]; + char hbuf_type[60]; + char hbuf_size[60]; + char hbuf_coherency_line_size[80]; + snprintf(hbuf_level, 60, "/sys/devices/system/cpu/cpu0/cache/index%u/level", i); + snprintf(hbuf_type, 60, "/sys/devices/system/cpu/cpu0/cache/index%u/type", i); + snprintf(hbuf_size, 60, "/sys/devices/system/cpu/cpu0/cache/index%u/size", i); + snprintf(hbuf_coherency_line_size, 80, "/sys/devices/system/cpu/cpu0/cache/index%u/coherency_line_size", i); + if (os::file_exists(hbuf_level)) { + _print_ascii_file_h("cache level", hbuf_level, st); + _print_ascii_file_h("cache type", hbuf_type, st); + _print_ascii_file_h("cache size", hbuf_size, st); + _print_ascii_file_h("cache coherency line size", hbuf_coherency_line_size, st); + } + } + } + + // we miss the cpufreq entries on Power and s390x +#if defined(IA32) || defined(AMD64) + _print_ascii_file_h("BIOS frequency limitation", "/sys/devices/system/cpu/cpu0/cpufreq/bios_limit", st); + _print_ascii_file_h("Frequency switch latency (ns)", "/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_transition_latency", st); + _print_ascii_file_h("Available cpu frequencies", "/sys/devices/system/cpu/cpu0/cpufreq/scaling_available_frequencies", st); + // min and max should be in the Available range but still print them (not all info might be available for all kernels) + if (ExtensiveErrorReports) { + _print_ascii_file_h("Maximum cpu frequency", "/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq", st); + _print_ascii_file_h("Minimum cpu frequency", "/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_min_freq", st); + _print_ascii_file_h("Current cpu frequency", "/sys/devices/system/cpu/cpu0/cpufreq/scaling_cur_freq", st); + } + // governors are power schemes, see https://wiki.archlinux.org/index.php/CPU_frequency_scaling + if (ExtensiveErrorReports) { + _print_ascii_file_h("Available governors", "/sys/devices/system/cpu/cpu0/cpufreq/scaling_available_governors", st); + } + _print_ascii_file_h("Current governor", "/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor", st); + // Core performance boost, see https://www.kernel.org/doc/Documentation/cpu-freq/boost.txt + // Raise operating frequency of some cores in a multi-core package if certain conditions apply, e.g. + // whole chip is not fully utilized + _print_ascii_file_h("Core performance/turbo boost", "/sys/devices/system/cpu/cpufreq/boost", st); +#endif +} + +void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) { + // Only print the model name if the platform provides this as a summary + if (!print_model_name_and_flags(st, buf, buflen)) { + _print_ascii_file_h("/proc/cpuinfo", "/proc/cpuinfo", st, false); + } + st->cr(); + print_sys_devices_cpu_info(st); +} + +#if INCLUDE_JFR + +void os::jfr_report_memory_info() { + os::Linux::meminfo_t info; + if (os::Linux::query_process_memory_info(&info)) { + // Send the RSS JFR event + EventResidentSetSize event; + event.set_size(info.vmrss * K); + event.set_peak(info.vmhwm * K); + event.commit(); + } else { + // Log a warning + static bool first_warning = true; + if (first_warning) { + log_warning(jfr)("Error fetching RSS values: query_process_memory_info failed"); + first_warning = false; + } + } +} + +#endif // INCLUDE_JFR + +#if defined(AMD64) || defined(IA32) || defined(X32) +const char* search_string = "model name"; +#elif defined(M68K) +const char* search_string = "CPU"; +#elif defined(PPC64) +const char* search_string = "cpu"; +#elif defined(S390) +const char* search_string = "machine ="; +#elif defined(SPARC) +const char* search_string = "cpu"; +#else +const char* search_string = "Processor"; +#endif + +// Parses the cpuinfo file for string representing the model name. +void os::get_summary_cpu_info(char* cpuinfo, size_t length) { + FILE* fp = os::fopen("/proc/cpuinfo", "r"); + if (fp != nullptr) { + while (!feof(fp)) { + char buf[256]; + if (fgets(buf, sizeof(buf), fp)) { + char* start = strstr(buf, search_string); + if (start != nullptr) { + char *ptr = start + strlen(search_string); + char *end = buf + strlen(buf); + while (ptr != end) { + // skip whitespace and colon for the rest of the name. + if (*ptr != ' ' && *ptr != '\t' && *ptr != ':') { + break; + } + ptr++; + } + if (ptr != end) { + // reasonable string, get rid of newline and keep the rest + char* nl = strchr(buf, '\n'); + if (nl != nullptr) *nl = '\0'; + strncpy(cpuinfo, ptr, length); + fclose(fp); + return; + } + } + } + } + fclose(fp); + } + // cpuinfo not found or parsing failed, just print generic string. The entire + // /proc/cpuinfo file will be printed later in the file (or enough of it for x86) +#if defined(AARCH64) + strncpy(cpuinfo, "AArch64", length); +#elif defined(AMD64) + strncpy(cpuinfo, "x86_64", length); +#elif defined(ARM) // Order wrt. AARCH64 is relevant! + strncpy(cpuinfo, "ARM", length); +#elif defined(IA32) + strncpy(cpuinfo, "x86_32", length); +#elif defined(IA64) + strncpy(cpuinfo, "IA64", length); +#elif defined(PPC) + strncpy(cpuinfo, "PPC64", length); +#elif defined(RISCV) + strncpy(cpuinfo, LP64_ONLY("RISCV64") NOT_LP64("RISCV32"), length); +#elif defined(S390) + strncpy(cpuinfo, "S390", length); +#elif defined(SPARC) + strncpy(cpuinfo, "sparcv9", length); +#elif defined(ZERO_LIBARCH) + strncpy(cpuinfo, ZERO_LIBARCH, length); +#else + strncpy(cpuinfo, "unknown", length); +#endif +} + +static char saved_jvm_path[MAXPATHLEN] = {0}; + +// Find the full path to the current module, libjvm.so +void os::jvm_path(char *buf, jint buflen) { + // Error checking. + if (buflen < MAXPATHLEN) { + assert(false, "must use a large-enough buffer"); + buf[0] = '\0'; + return; + } + // Lazy resolve the path to current module. + if (saved_jvm_path[0] != 0) { + strcpy(buf, saved_jvm_path); + return; + } + + char dli_fname[MAXPATHLEN]; + dli_fname[0] = '\0'; + bool ret = dll_address_to_library_name( + CAST_FROM_FN_PTR(address, os::jvm_path), + dli_fname, sizeof(dli_fname), nullptr); + assert(ret, "cannot locate libjvm"); + char *rp = nullptr; + if (ret && dli_fname[0] != '\0') { + rp = os::Posix::realpath(dli_fname, buf, buflen); + } + if (rp == nullptr) { + return; + } + + if (Arguments::sun_java_launcher_is_altjvm()) { + // Support for the java launcher's '-XXaltjvm=' option. Typical + // value for buf is "/jre/lib//libjvm.so". + // If "/jre/lib/" appears at the right place in the string, then + // assume we are installed in a JDK and we're done. Otherwise, check + // for a JAVA_HOME environment variable and fix up the path so it + // looks like libjvm.so is installed there (append a fake suffix + // hotspot/libjvm.so). + const char *p = buf + strlen(buf) - 1; + for (int count = 0; p > buf && count < 5; ++count) { + for (--p; p > buf && *p != '/'; --p) + /* empty */ ; + } + + if (strncmp(p, "/jre/lib/", 9) != 0) { + // Look for JAVA_HOME in the environment. + char* java_home_var = ::getenv("JAVA_HOME"); + if (java_home_var != nullptr && java_home_var[0] != 0) { + char* jrelib_p; + int len; + + // Check the current module name "libjvm.so". + p = strrchr(buf, '/'); + if (p == nullptr) { + return; + } + assert(strstr(p, "/libjvm") == p, "invalid library name"); + + rp = os::Posix::realpath(java_home_var, buf, buflen); + if (rp == nullptr) { + return; + } + + // determine if this is a legacy image or modules image + // modules image doesn't have "jre" subdirectory + len = checked_cast(strlen(buf)); + assert(len < buflen, "Ran out of buffer room"); + jrelib_p = buf + len; + snprintf(jrelib_p, buflen-len, "/jre/lib"); + if (0 != access(buf, F_OK)) { + snprintf(jrelib_p, buflen-len, "/lib"); + } + + if (0 == access(buf, F_OK)) { + // Use current module name "libjvm.so" + len = (int)strlen(buf); + snprintf(buf + len, buflen-len, "/hotspot/libjvm.so"); + } else { + // Go back to path of .so + rp = os::Posix::realpath(dli_fname, buf, buflen); + if (rp == nullptr) { + return; + } + } + } + } + } + + strncpy(saved_jvm_path, buf, MAXPATHLEN); + saved_jvm_path[MAXPATHLEN - 1] = '\0'; +} + +//////////////////////////////////////////////////////////////////////////////// +// Virtual Memory + +// Rationale behind this function: +// current (Mon Apr 25 20:12:18 MSD 2005) oprofile drops samples without executable +// mapping for address (see lookup_dcookie() in the kernel module), thus we cannot get +// samples for JITted code. Here we create private executable mapping over the code cache +// and then we can use standard (well, almost, as mapping can change) way to provide +// info for the reporting script by storing timestamp and location of symbol +void linux_wrap_code(char* base, size_t size) { + static volatile jint cnt = 0; + + if (!UseOprofile) { + return; + } + + char buf[PATH_MAX+1]; + int num = Atomic::add(&cnt, 1); + + snprintf(buf, sizeof(buf), "%s/hs-vm-%d-%d", + os::get_temp_directory(), os::current_process_id(), num); + unlink(buf); + + int fd = ::open(buf, O_CREAT | O_RDWR, S_IRWXU); + + if (fd != -1) { + off_t rv = ::lseek(fd, size-2, SEEK_SET); + if (rv != (off_t)-1) { + if (::write(fd, "", 1) == 1) { + mmap(base, size, + PROT_READ|PROT_WRITE|PROT_EXEC, + MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE, fd, 0); + } + } + ::close(fd); + unlink(buf); + } +} + +static bool recoverable_mmap_error(int err) { + // See if the error is one we can let the caller handle. This + // list of errno values comes from JBS-6843484. I can't find a + // Linux man page that documents this specific set of errno + // values so while this list currently matches Solaris, it may + // change as we gain experience with this failure mode. + switch (err) { + case EBADF: + case EINVAL: + case ENOTSUP: + // let the caller deal with these errors + return true; + + default: + // Any remaining errors on this OS can cause our reserved mapping + // to be lost. That can cause confusion where different data + // structures think they have the same memory mapped. The worst + // scenario is if both the VM and a library think they have the + // same memory mapped. + return false; + } +} + +static void warn_fail_commit_memory(char* addr, size_t size, bool exec, + int err) { + warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT + ", %d) failed; error='%s' (errno=%d)", p2i(addr), size, exec, + os::strerror(err), err); +} + +static void warn_fail_commit_memory(char* addr, size_t size, + size_t alignment_hint, bool exec, + int err) { + warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT + ", " SIZE_FORMAT ", %d) failed; error='%s' (errno=%d)", p2i(addr), size, + alignment_hint, exec, os::strerror(err), err); +} + +// NOTE: Linux kernel does not really reserve the pages for us. +// All it does is to check if there are enough free pages +// left at the time of mmap(). This could be a potential +// problem. +int os::Linux::commit_memory_impl(char* addr, size_t size, bool exec) { + int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE; + uintptr_t res = (uintptr_t) ::mmap(addr, size, prot, + MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0); + if (res != (uintptr_t) MAP_FAILED) { + if (UseNUMAInterleaving) { + numa_make_global(addr, size); + } + return 0; + } + + int err = errno; // save errno from mmap() call above + + if (!recoverable_mmap_error(err)) { + warn_fail_commit_memory(addr, size, exec, err); + vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "committing reserved memory."); + } + + return err; +} + +bool os::pd_commit_memory(char* addr, size_t size, bool exec) { + return os::Linux::commit_memory_impl(addr, size, exec) == 0; +} + +void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec, + const char* mesg) { + assert(mesg != nullptr, "mesg must be specified"); + int err = os::Linux::commit_memory_impl(addr, size, exec); + if (err != 0) { + // the caller wants all commit errors to exit with the specified mesg: + warn_fail_commit_memory(addr, size, exec, err); + vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "%s", mesg); + } +} + +// Define MAP_HUGETLB here so we can build HotSpot on old systems. +#ifndef MAP_HUGETLB + #define MAP_HUGETLB 0x40000 +#endif + +// If mmap flags are set with MAP_HUGETLB and the system supports multiple +// huge page sizes, flag bits [26:31] can be used to encode the log2 of the +// desired huge page size. Otherwise, the system's default huge page size will be used. +// See mmap(2) man page for more info (since Linux 3.8). +// https://lwn.net/Articles/533499/ +#ifndef MAP_HUGE_SHIFT + #define MAP_HUGE_SHIFT 26 +#endif + +// Define MADV_HUGEPAGE here so we can build HotSpot on old systems. +#ifndef MADV_HUGEPAGE + #define MADV_HUGEPAGE 14 +#endif + +// Note that the value for MAP_FIXED_NOREPLACE differs between architectures, but all architectures +// supported by OpenJDK share the same flag value. +#define MAP_FIXED_NOREPLACE_value 0x100000 +#ifndef MAP_FIXED_NOREPLACE + #define MAP_FIXED_NOREPLACE MAP_FIXED_NOREPLACE_value +#else + // Sanity-check our assumed default value if we build with a new enough libc. + static_assert(MAP_FIXED_NOREPLACE == MAP_FIXED_NOREPLACE_value, "MAP_FIXED_NOREPLACE != MAP_FIXED_NOREPLACE_value"); +#endif + +int os::Linux::commit_memory_impl(char* addr, size_t size, + size_t alignment_hint, bool exec) { + int err = os::Linux::commit_memory_impl(addr, size, exec); + if (err == 0) { + realign_memory(addr, size, alignment_hint); + } + return err; +} + +bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint, + bool exec) { + return os::Linux::commit_memory_impl(addr, size, alignment_hint, exec) == 0; +} + +void os::pd_commit_memory_or_exit(char* addr, size_t size, + size_t alignment_hint, bool exec, + const char* mesg) { + assert(mesg != nullptr, "mesg must be specified"); + int err = os::Linux::commit_memory_impl(addr, size, alignment_hint, exec); + if (err != 0) { + // the caller wants all commit errors to exit with the specified mesg: + warn_fail_commit_memory(addr, size, alignment_hint, exec, err); + vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "%s", mesg); + } +} + +void os::Linux::madvise_transparent_huge_pages(void* addr, size_t bytes) { + // We don't check the return value: madvise(MADV_HUGEPAGE) may not + // be supported or the memory may already be backed by huge pages. + ::madvise(addr, bytes, MADV_HUGEPAGE); +} + +void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) { + if (Linux::should_madvise_anonymous_thps() && alignment_hint > vm_page_size()) { + Linux::madvise_transparent_huge_pages(addr, bytes); + } +} + +void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) { + // This method works by doing an mmap over an existing mmaping and effectively discarding + // the existing pages. However it won't work for SHM-based large pages that cannot be + // uncommitted at all. We don't do anything in this case to avoid creating a segment with + // small pages on top of the SHM segment. This method always works for small pages, so we + // allow that in any case. + if (alignment_hint <= os::vm_page_size() || can_commit_large_page_memory()) { + commit_memory(addr, bytes, alignment_hint, !ExecMem); + } +} + +void os::numa_make_global(char *addr, size_t bytes) { + Linux::numa_interleave_memory(addr, bytes); +} + +// Define for numa_set_bind_policy(int). Setting the argument to 0 will set the +// bind policy to MPOL_PREFERRED for the current thread. +#define USE_MPOL_PREFERRED 0 + +void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) { + // To make NUMA and large pages more robust when both enabled, we need to ease + // the requirements on where the memory should be allocated. MPOL_BIND is the + // default policy and it will force memory to be allocated on the specified + // node. Changing this to MPOL_PREFERRED will prefer to allocate the memory on + // the specified node, but will not force it. Using this policy will prevent + // getting SIGBUS when trying to allocate large pages on NUMA nodes with no + // free large pages. + Linux::numa_set_bind_policy(USE_MPOL_PREFERRED); + Linux::numa_tonode_memory(addr, bytes, lgrp_hint); +} + +bool os::numa_topology_changed() { return false; } + +size_t os::numa_get_groups_num() { + // Return just the number of nodes in which it's possible to allocate memory + // (in numa terminology, configured nodes). + return Linux::numa_num_configured_nodes(); +} + +int os::numa_get_group_id() { + int cpu_id = Linux::sched_getcpu(); + if (cpu_id != -1) { + int lgrp_id = Linux::get_node_by_cpu(cpu_id); + if (lgrp_id != -1) { + return lgrp_id; + } + } + return 0; +} + +int os::numa_get_group_id_for_address(const void* address) { + void** pages = const_cast(&address); + int id = -1; + + if (os::Linux::numa_move_pages(0, 1, pages, nullptr, &id, 0) == -1) { + return -1; + } + if (id < 0) { + return -1; + } + return id; +} + +bool os::numa_get_group_ids_for_range(const void** addresses, int* lgrp_ids, size_t count) { + void** pages = const_cast(addresses); + return os::Linux::numa_move_pages(0, count, pages, nullptr, lgrp_ids, 0) == 0; +} + +int os::Linux::get_existing_num_nodes() { + int node; + int highest_node_number = Linux::numa_max_node(); + int num_nodes = 0; + + // Get the total number of nodes in the system including nodes without memory. + for (node = 0; node <= highest_node_number; node++) { + if (is_node_in_existing_nodes(node)) { + num_nodes++; + } + } + return num_nodes; +} + +size_t os::numa_get_leaf_groups(uint *ids, size_t size) { + int highest_node_number = Linux::numa_max_node(); + size_t i = 0; + + // Map all node ids in which it is possible to allocate memory. Also nodes are + // not always consecutively available, i.e. available from 0 to the highest + // node number. If the nodes have been bound explicitly using numactl membind, + // then allocate memory from those nodes only. + for (int node = 0; node <= highest_node_number; node++) { + if (Linux::is_node_in_bound_nodes(node)) { + ids[i++] = checked_cast(node); + } + } + return i; +} + +int os::Linux::sched_getcpu_syscall(void) { + unsigned int cpu = 0; + long retval = -1; + +#if defined(IA32) + #ifndef SYS_getcpu + #define SYS_getcpu 318 + #endif + retval = syscall(SYS_getcpu, &cpu, nullptr, nullptr); +#elif defined(AMD64) +// Unfortunately we have to bring all these macros here from vsyscall.h +// to be able to compile on old linuxes. + #define __NR_vgetcpu 2 + #define VSYSCALL_START (-10UL << 20) + #define VSYSCALL_SIZE 1024 + #define VSYSCALL_ADDR(vsyscall_nr) (VSYSCALL_START+VSYSCALL_SIZE*(vsyscall_nr)) + typedef long (*vgetcpu_t)(unsigned int *cpu, unsigned int *node, unsigned long *tcache); + vgetcpu_t vgetcpu = (vgetcpu_t)VSYSCALL_ADDR(__NR_vgetcpu); + retval = vgetcpu(&cpu, nullptr, nullptr); +#endif + + return (retval == -1) ? -1 : cpu; +} + +void os::Linux::sched_getcpu_init() { + // sched_getcpu() should be in libc. + set_sched_getcpu(CAST_TO_FN_PTR(sched_getcpu_func_t, + dlsym(RTLD_DEFAULT, "sched_getcpu"))); + + // If it's not, try a direct syscall. + if (sched_getcpu() == -1) { + set_sched_getcpu(CAST_TO_FN_PTR(sched_getcpu_func_t, + (void*)&sched_getcpu_syscall)); + } + + if (sched_getcpu() == -1) { + vm_exit_during_initialization("getcpu(2) system call not supported by kernel"); + } +} + +// Something to do with the numa-aware allocator needs these symbols +extern "C" JNIEXPORT void numa_warn(int number, char *where, ...) { } +extern "C" JNIEXPORT void numa_error(char *where) { } + +// Handle request to load libnuma symbol version 1.1 (API v1). If it fails +// load symbol from base version instead. +void* os::Linux::libnuma_dlsym(void* handle, const char *name) { + void *f = dlvsym(handle, name, "libnuma_1.1"); + if (f == nullptr) { + f = dlsym(handle, name); + } + return f; +} + +// Handle request to load libnuma symbol version 1.2 (API v2) only. +// Return null if the symbol is not defined in this particular version. +void* os::Linux::libnuma_v2_dlsym(void* handle, const char* name) { + return dlvsym(handle, name, "libnuma_1.2"); +} + +// Check numa dependent syscalls +static bool numa_syscall_check() { + // NUMA APIs depend on several syscalls. E.g., get_mempolicy is required for numa_get_membind and + // numa_get_interleave_mask. But these dependent syscalls can be unsupported for various reasons. + // Especially in dockers, get_mempolicy is not allowed with the default configuration. So it's necessary + // to check whether the syscalls are available. Currently, only get_mempolicy is checked since checking + // others like mbind would cause unexpected side effects. +#ifdef SYS_get_mempolicy + int dummy = 0; + if (syscall(SYS_get_mempolicy, &dummy, nullptr, 0, (void*)&dummy, 3) == -1) { + return false; + } +#endif + + return true; +} + +bool os::Linux::libnuma_init() { + // Requires sched_getcpu() and numa dependent syscalls support + if ((sched_getcpu() != -1) && numa_syscall_check()) { + void *handle = dlopen("libnuma.so.1", RTLD_LAZY); + if (handle != nullptr) { + set_numa_node_to_cpus(CAST_TO_FN_PTR(numa_node_to_cpus_func_t, + libnuma_dlsym(handle, "numa_node_to_cpus"))); + set_numa_node_to_cpus_v2(CAST_TO_FN_PTR(numa_node_to_cpus_v2_func_t, + libnuma_v2_dlsym(handle, "numa_node_to_cpus"))); + set_numa_max_node(CAST_TO_FN_PTR(numa_max_node_func_t, + libnuma_dlsym(handle, "numa_max_node"))); + set_numa_num_configured_nodes(CAST_TO_FN_PTR(numa_num_configured_nodes_func_t, + libnuma_dlsym(handle, "numa_num_configured_nodes"))); + set_numa_available(CAST_TO_FN_PTR(numa_available_func_t, + libnuma_dlsym(handle, "numa_available"))); + set_numa_tonode_memory(CAST_TO_FN_PTR(numa_tonode_memory_func_t, + libnuma_dlsym(handle, "numa_tonode_memory"))); + set_numa_interleave_memory(CAST_TO_FN_PTR(numa_interleave_memory_func_t, + libnuma_dlsym(handle, "numa_interleave_memory"))); + set_numa_interleave_memory_v2(CAST_TO_FN_PTR(numa_interleave_memory_v2_func_t, + libnuma_v2_dlsym(handle, "numa_interleave_memory"))); + set_numa_set_bind_policy(CAST_TO_FN_PTR(numa_set_bind_policy_func_t, + libnuma_dlsym(handle, "numa_set_bind_policy"))); + set_numa_bitmask_isbitset(CAST_TO_FN_PTR(numa_bitmask_isbitset_func_t, + libnuma_dlsym(handle, "numa_bitmask_isbitset"))); + set_numa_distance(CAST_TO_FN_PTR(numa_distance_func_t, + libnuma_dlsym(handle, "numa_distance"))); + set_numa_get_membind(CAST_TO_FN_PTR(numa_get_membind_func_t, + libnuma_v2_dlsym(handle, "numa_get_membind"))); + set_numa_get_interleave_mask(CAST_TO_FN_PTR(numa_get_interleave_mask_func_t, + libnuma_v2_dlsym(handle, "numa_get_interleave_mask"))); + set_numa_move_pages(CAST_TO_FN_PTR(numa_move_pages_func_t, + libnuma_dlsym(handle, "numa_move_pages"))); + set_numa_set_preferred(CAST_TO_FN_PTR(numa_set_preferred_func_t, + libnuma_dlsym(handle, "numa_set_preferred"))); + + if (numa_available() != -1) { + set_numa_all_nodes((unsigned long*)libnuma_dlsym(handle, "numa_all_nodes")); + set_numa_all_nodes_ptr((struct bitmask **)libnuma_dlsym(handle, "numa_all_nodes_ptr")); + set_numa_nodes_ptr((struct bitmask **)libnuma_dlsym(handle, "numa_nodes_ptr")); + set_numa_interleave_bitmask(_numa_get_interleave_mask()); + set_numa_membind_bitmask(_numa_get_membind()); + // Create an index -> node mapping, since nodes are not always consecutive + _nindex_to_node = new (mtInternal) GrowableArray(0, mtInternal); + rebuild_nindex_to_node_map(); + // Create a cpu -> node mapping + _cpu_to_node = new (mtInternal) GrowableArray(0, mtInternal); + rebuild_cpu_to_node_map(); + return true; + } + } + } + return false; +} + +size_t os::Linux::default_guard_size(os::ThreadType thr_type) { + + if (THPStackMitigation) { + // If THPs are unconditionally enabled, the following scenario can lead to huge RSS + // - parent thread spawns, in quick succession, multiple child threads + // - child threads are slow to start + // - thread stacks of future child threads are adjacent and get merged into one large VMA + // by the kernel, and subsequently transformed into huge pages by khugepaged + // - child threads come up, place JVM guard pages, thus splinter the large VMA, splinter + // the huge pages into many (still paged-in) small pages. + // The result of that sequence are thread stacks that are fully paged-in even though the + // threads did not even start yet. + // We prevent that by letting the glibc allocate a guard page, which causes a VMA with different + // permission bits to separate two ajacent thread stacks and therefore prevent merging stacks + // into one VMA. + // + // Yes, this means we have two guard sections - the glibc and the JVM one - per thread. But the + // cost for that one extra protected page is dwarfed from a large win in performance and memory + // that avoiding interference by khugepaged buys us. + return os::vm_page_size(); + } + + // Creating guard page is very expensive. Java thread has HotSpot + // guard pages, only enable glibc guard page for non-Java threads. + // (Remember: compiler thread is a Java thread, too!) + return ((thr_type == java_thread || thr_type == compiler_thread) ? 0 : os::vm_page_size()); +} + +void os::Linux::rebuild_nindex_to_node_map() { + int highest_node_number = Linux::numa_max_node(); + + nindex_to_node()->clear(); + for (int node = 0; node <= highest_node_number; node++) { + if (Linux::is_node_in_existing_nodes(node)) { + nindex_to_node()->append(node); + } + } +} + +// rebuild_cpu_to_node_map() constructs a table mapping cpud id to node id. +// The table is later used in get_node_by_cpu(). +void os::Linux::rebuild_cpu_to_node_map() { + const int NCPUS = 32768; // Since the buffer size computation is very obscure + // in libnuma (possible values are starting from 16, + // and continuing up with every other power of 2, but less + // than the maximum number of CPUs supported by kernel), and + // is a subject to change (in libnuma version 2 the requirements + // are more reasonable) we'll just hardcode the number they use + // in the library. + constexpr int BitsPerCLong = (int)sizeof(long) * CHAR_BIT; + + int cpu_num = processor_count(); + int cpu_map_size = NCPUS / BitsPerCLong; + int cpu_map_valid_size = + MIN2((cpu_num + BitsPerCLong - 1) / BitsPerCLong, cpu_map_size); + + cpu_to_node()->clear(); + cpu_to_node()->at_grow(cpu_num - 1); + + int node_num = get_existing_num_nodes(); + + int distance = 0; + int closest_distance = INT_MAX; + int closest_node = 0; + unsigned long *cpu_map = NEW_C_HEAP_ARRAY(unsigned long, cpu_map_size, mtInternal); + for (int i = 0; i < node_num; i++) { + // Check if node is configured (not a memory-less node). If it is not, find + // the closest configured node. Check also if node is bound, i.e. it's allowed + // to allocate memory from the node. If it's not allowed, map cpus in that node + // to the closest node from which memory allocation is allowed. + if (!is_node_in_configured_nodes(nindex_to_node()->at(i)) || + !is_node_in_bound_nodes(nindex_to_node()->at(i))) { + closest_distance = INT_MAX; + // Check distance from all remaining nodes in the system. Ignore distance + // from itself, from another non-configured node, and from another non-bound + // node. + for (int m = 0; m < node_num; m++) { + if (m != i && + is_node_in_configured_nodes(nindex_to_node()->at(m)) && + is_node_in_bound_nodes(nindex_to_node()->at(m))) { + distance = numa_distance(nindex_to_node()->at(i), nindex_to_node()->at(m)); + // If a closest node is found, update. There is always at least one + // configured and bound node in the system so there is always at least + // one node close. + if (distance != 0 && distance < closest_distance) { + closest_distance = distance; + closest_node = nindex_to_node()->at(m); + } + } + } + } else { + // Current node is already a configured node. + closest_node = nindex_to_node()->at(i); + } + + // Get cpus from the original node and map them to the closest node. If node + // is a configured node (not a memory-less node), then original node and + // closest node are the same. + if (numa_node_to_cpus(nindex_to_node()->at(i), cpu_map, cpu_map_size * (int)sizeof(unsigned long)) != -1) { + for (int j = 0; j < cpu_map_valid_size; j++) { + if (cpu_map[j] != 0) { + for (int k = 0; k < BitsPerCLong; k++) { + if (cpu_map[j] & (1UL << k)) { + int cpu_index = j * BitsPerCLong + k; + +#ifndef PRODUCT + if (UseDebuggerErgo1 && cpu_index >= (int)cpu_num) { + // Some debuggers limit the processor count without + // intercepting the NUMA APIs. Just fake the values. + cpu_index = 0; + } +#endif + + cpu_to_node()->at_put(cpu_index, closest_node); + } + } + } + } + } + } + FREE_C_HEAP_ARRAY(unsigned long, cpu_map); +} + +int os::Linux::numa_node_to_cpus(int node, unsigned long *buffer, int bufferlen) { + // use the latest version of numa_node_to_cpus if available + if (_numa_node_to_cpus_v2 != nullptr) { + + // libnuma bitmask struct + struct bitmask { + unsigned long size; /* number of bits in the map */ + unsigned long *maskp; + }; + + struct bitmask mask; + mask.maskp = (unsigned long *)buffer; + mask.size = bufferlen * 8; + return _numa_node_to_cpus_v2(node, &mask); + } else if (_numa_node_to_cpus != nullptr) { + return _numa_node_to_cpus(node, buffer, bufferlen); + } + return -1; +} + +int os::Linux::get_node_by_cpu(int cpu_id) { + if (cpu_to_node() != nullptr && cpu_id >= 0 && cpu_id < cpu_to_node()->length()) { + return cpu_to_node()->at(cpu_id); + } + return -1; +} + +GrowableArray* os::Linux::_cpu_to_node; +GrowableArray* os::Linux::_nindex_to_node; +os::Linux::sched_getcpu_func_t os::Linux::_sched_getcpu; +os::Linux::numa_node_to_cpus_func_t os::Linux::_numa_node_to_cpus; +os::Linux::numa_node_to_cpus_v2_func_t os::Linux::_numa_node_to_cpus_v2; +os::Linux::numa_max_node_func_t os::Linux::_numa_max_node; +os::Linux::numa_num_configured_nodes_func_t os::Linux::_numa_num_configured_nodes; +os::Linux::numa_available_func_t os::Linux::_numa_available; +os::Linux::numa_tonode_memory_func_t os::Linux::_numa_tonode_memory; +os::Linux::numa_interleave_memory_func_t os::Linux::_numa_interleave_memory; +os::Linux::numa_interleave_memory_v2_func_t os::Linux::_numa_interleave_memory_v2; +os::Linux::numa_set_bind_policy_func_t os::Linux::_numa_set_bind_policy; +os::Linux::numa_bitmask_isbitset_func_t os::Linux::_numa_bitmask_isbitset; +os::Linux::numa_distance_func_t os::Linux::_numa_distance; +os::Linux::numa_get_membind_func_t os::Linux::_numa_get_membind; +os::Linux::numa_get_interleave_mask_func_t os::Linux::_numa_get_interleave_mask; +os::Linux::numa_move_pages_func_t os::Linux::_numa_move_pages; +os::Linux::numa_set_preferred_func_t os::Linux::_numa_set_preferred; +os::Linux::NumaAllocationPolicy os::Linux::_current_numa_policy; +unsigned long* os::Linux::_numa_all_nodes; +struct bitmask* os::Linux::_numa_all_nodes_ptr; +struct bitmask* os::Linux::_numa_nodes_ptr; +struct bitmask* os::Linux::_numa_interleave_bitmask; +struct bitmask* os::Linux::_numa_membind_bitmask; + +bool os::pd_uncommit_memory(char* addr, size_t size, bool exec) { + uintptr_t res = (uintptr_t) ::mmap(addr, size, PROT_NONE, + MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE|MAP_ANONYMOUS, -1, 0); + return res != (uintptr_t) MAP_FAILED; +} + +static address get_stack_commited_bottom(address bottom, size_t size) { + address nbot = bottom; + address ntop = bottom + size; + + size_t page_sz = os::vm_page_size(); + unsigned pages = checked_cast(size / page_sz); + + unsigned char vec[1]; + unsigned imin = 1, imax = pages + 1, imid; + int mincore_return_value = 0; + + assert(imin <= imax, "Unexpected page size"); + + while (imin < imax) { + imid = (imax + imin) / 2; + nbot = ntop - (imid * page_sz); + + // Use a trick with mincore to check whether the page is mapped or not. + // mincore sets vec to 1 if page resides in memory and to 0 if page + // is swapped output but if page we are asking for is unmapped + // it returns -1,ENOMEM + mincore_return_value = mincore(nbot, page_sz, vec); + + if (mincore_return_value == -1) { + // Page is not mapped go up + // to find first mapped page + if (errno != EAGAIN) { + assert(errno == ENOMEM, "Unexpected mincore errno"); + imax = imid; + } + } else { + // Page is mapped go down + // to find first not mapped page + imin = imid + 1; + } + } + + nbot = nbot + page_sz; + + // Adjust stack bottom one page up if last checked page is not mapped + if (mincore_return_value == -1) { + nbot = nbot + page_sz; + } + + return nbot; +} + +bool os::committed_in_range(address start, size_t size, address& committed_start, size_t& committed_size) { + int mincore_return_value; + const size_t stripe = 1024; // query this many pages each time + unsigned char vec[stripe + 1]; + // set a guard + vec[stripe] = 'X'; + + const size_t page_sz = os::vm_page_size(); + uintx pages = size / page_sz; + + assert(is_aligned(start, page_sz), "Start address must be page aligned"); + assert(is_aligned(size, page_sz), "Size must be page aligned"); + + committed_start = nullptr; + + int loops = checked_cast((pages + stripe - 1) / stripe); + int committed_pages = 0; + address loop_base = start; + bool found_range = false; + + for (int index = 0; index < loops && !found_range; index ++) { + assert(pages > 0, "Nothing to do"); + uintx pages_to_query = (pages >= stripe) ? stripe : pages; + pages -= pages_to_query; + + // Get stable read + while ((mincore_return_value = mincore(loop_base, pages_to_query * page_sz, vec)) == -1 && errno == EAGAIN); + + // During shutdown, some memory goes away without properly notifying NMT, + // E.g. ConcurrentGCThread/WatcherThread can exit without deleting thread object. + // Bailout and return as not committed for now. + if (mincore_return_value == -1 && errno == ENOMEM) { + return false; + } + + // If mincore is not supported. + if (mincore_return_value == -1 && errno == ENOSYS) { + return false; + } + + assert(vec[stripe] == 'X', "overflow guard"); + assert(mincore_return_value == 0, "Range must be valid"); + // Process this stripe + for (uintx vecIdx = 0; vecIdx < pages_to_query; vecIdx ++) { + if ((vec[vecIdx] & 0x01) == 0) { // not committed + // End of current contiguous region + if (committed_start != nullptr) { + found_range = true; + break; + } + } else { // committed + // Start of region + if (committed_start == nullptr) { + committed_start = loop_base + page_sz * vecIdx; + } + committed_pages ++; + } + } + + loop_base += pages_to_query * page_sz; + } + + if (committed_start != nullptr) { + assert(committed_pages > 0, "Must have committed region"); + assert(committed_pages <= int(size / page_sz), "Can not commit more than it has"); + assert(committed_start >= start && committed_start < start + size, "Out of range"); + committed_size = page_sz * committed_pages; + return true; + } else { + assert(committed_pages == 0, "Should not have committed region"); + return false; + } +} + + +// Linux uses a growable mapping for the stack, and if the mapping for +// the stack guard pages is not removed when we detach a thread the +// stack cannot grow beyond the pages where the stack guard was +// mapped. If at some point later in the process the stack expands to +// that point, the Linux kernel cannot expand the stack any further +// because the guard pages are in the way, and a segfault occurs. +// +// However, it's essential not to split the stack region by unmapping +// a region (leaving a hole) that's already part of the stack mapping, +// so if the stack mapping has already grown beyond the guard pages at +// the time we create them, we have to truncate the stack mapping. +// So, we need to know the extent of the stack mapping when +// create_stack_guard_pages() is called. + +// We only need this for stacks that are growable: at the time of +// writing thread stacks don't use growable mappings (i.e. those +// creeated with MAP_GROWSDOWN), and aren't marked "[stack]", so this +// only applies to the main thread. + +// If the (growable) stack mapping already extends beyond the point +// where we're going to put our guard pages, truncate the mapping at +// that point by munmap()ping it. This ensures that when we later +// munmap() the guard pages we don't leave a hole in the stack +// mapping. This only affects the main/primordial thread + +bool os::pd_create_stack_guard_pages(char* addr, size_t size) { + if (os::is_primordial_thread()) { + // As we manually grow stack up to bottom inside create_attached_thread(), + // it's likely that os::Linux::initial_thread_stack_bottom is mapped and + // we don't need to do anything special. + // Check it first, before calling heavy function. + uintptr_t stack_extent = (uintptr_t) os::Linux::initial_thread_stack_bottom(); + unsigned char vec[1]; + + if (mincore((address)stack_extent, os::vm_page_size(), vec) == -1) { + // Fallback to slow path on all errors, including EAGAIN + assert((uintptr_t)addr >= stack_extent, + "Sanity: addr should be larger than extent, " PTR_FORMAT " >= " PTR_FORMAT, + p2i(addr), stack_extent); + stack_extent = (uintptr_t) get_stack_commited_bottom( + os::Linux::initial_thread_stack_bottom(), + (size_t)addr - stack_extent); + } + + if (stack_extent < (uintptr_t)addr) { + ::munmap((void*)stack_extent, (uintptr_t)(addr - stack_extent)); + } + } + + return os::commit_memory(addr, size, !ExecMem); +} + +// If this is a growable mapping, remove the guard pages entirely by +// munmap()ping them. If not, just call uncommit_memory(). This only +// affects the main/primordial thread, but guard against future OS changes. +// It's safe to always unmap guard pages for primordial thread because we +// always place it right after end of the mapped region. + +bool os::remove_stack_guard_pages(char* addr, size_t size) { + uintptr_t stack_extent, stack_base; + + if (os::is_primordial_thread()) { + return ::munmap(addr, size) == 0; + } + + return os::uncommit_memory(addr, size); +} + +// 'requested_addr' is only treated as a hint, the return value may or +// may not start from the requested address. Unlike Linux mmap(), this +// function returns null to indicate failure. +static char* anon_mmap(char* requested_addr, size_t bytes) { + // If a requested address was given: + // + // The POSIX-conforming way is to *omit* MAP_FIXED. This will leave existing mappings intact. + // If the requested mapping area is blocked by a pre-existing mapping, the kernel will map + // somewhere else. On Linux, that alternative address appears to have no relation to the + // requested address. + // Unfortunately, this is not what we need - if we requested a specific address, we'd want + // to map there and nowhere else. Therefore we will unmap the block again, which means we + // just executed a needless mmap->munmap cycle. + // Since Linux 4.17, the kernel offers MAP_FIXED_NOREPLACE. With this flag, if a pre- + // existing mapping exists, the kernel will not map at an alternative point but instead + // return an error. We can therefore save that unnecessary mmap-munmap cycle. + // + // Backward compatibility: Older kernels will ignore the unknown flag; so mmap will behave + // as in mode (a). + const int flags = MAP_PRIVATE | MAP_NORESERVE | MAP_ANONYMOUS | + ((requested_addr != nullptr) ? MAP_FIXED_NOREPLACE : 0); + + // Map reserved/uncommitted pages PROT_NONE so we fail early if we + // touch an uncommitted page. Otherwise, the read/write might + // succeed if we have enough swap space to back the physical page. + char* addr = (char*)::mmap(requested_addr, bytes, PROT_NONE, flags, -1, 0); + + return addr == MAP_FAILED ? nullptr : addr; +} + +// Allocate (using mmap, NO_RESERVE, with small pages) at either a given request address +// (req_addr != nullptr) or with a given alignment. +// - bytes shall be a multiple of alignment. +// - req_addr can be null. If not null, it must be a multiple of alignment. +// - alignment sets the alignment at which memory shall be allocated. +// It must be a multiple of allocation granularity. +// Returns address of memory or null. If req_addr was not null, will only return +// req_addr or null. +static char* anon_mmap_aligned(char* req_addr, size_t bytes, size_t alignment) { + size_t extra_size = bytes; + if (req_addr == nullptr && alignment > 0) { + extra_size += alignment; + } + + char* start = anon_mmap(req_addr, extra_size); + if (start != nullptr) { + if (req_addr != nullptr) { + if (start != req_addr) { + ::munmap(start, extra_size); + start = nullptr; + } + } else { + char* const start_aligned = align_up(start, alignment); + char* const end_aligned = start_aligned + bytes; + char* const end = start + extra_size; + if (start_aligned > start) { + ::munmap(start, start_aligned - start); + } + if (end_aligned < end) { + ::munmap(end_aligned, end - end_aligned); + } + start = start_aligned; + } + } + return start; +} + +static int anon_munmap(char * addr, size_t size) { + return ::munmap(addr, size) == 0; +} + +char* os::pd_reserve_memory(size_t bytes, bool exec) { + return anon_mmap(nullptr, bytes); +} + +bool os::pd_release_memory(char* addr, size_t size) { + return anon_munmap(addr, size); +} + +#ifdef CAN_SHOW_REGISTERS_ON_ASSERT +extern char* g_assert_poison; // assertion poison page address +#endif + +static bool linux_mprotect(char* addr, size_t size, int prot) { + // Linux wants the mprotect address argument to be page aligned. + char* bottom = (char*)align_down((intptr_t)addr, os::vm_page_size()); + + // According to SUSv3, mprotect() should only be used with mappings + // established by mmap(), and mmap() always maps whole pages. Unaligned + // 'addr' likely indicates problem in the VM (e.g. trying to change + // protection of malloc'ed or statically allocated memory). Check the + // caller if you hit this assert. + assert(addr == bottom, "sanity check"); + + size = align_up(pointer_delta(addr, bottom, 1) + size, os::vm_page_size()); + // Don't log anything if we're executing in the poison page signal handling + // context. It can lead to reentrant use of other parts of the VM code. +#ifdef CAN_SHOW_REGISTERS_ON_ASSERT + if (addr != g_assert_poison) +#endif + Events::log(nullptr, "Protecting memory [" INTPTR_FORMAT "," INTPTR_FORMAT "] with protection modes %x", p2i(bottom), p2i(bottom+size), prot); + return ::mprotect(bottom, size, prot) == 0; +} + +// Set protections specified +bool os::protect_memory(char* addr, size_t bytes, ProtType prot, + bool is_committed) { + unsigned int p = 0; + switch (prot) { + case MEM_PROT_NONE: p = PROT_NONE; break; + case MEM_PROT_READ: p = PROT_READ; break; + case MEM_PROT_RW: p = PROT_READ|PROT_WRITE; break; + case MEM_PROT_RWX: p = PROT_READ|PROT_WRITE|PROT_EXEC; break; + default: + ShouldNotReachHere(); + } + // is_committed is unused. + return linux_mprotect(addr, bytes, p); +} + +bool os::guard_memory(char* addr, size_t size) { + return linux_mprotect(addr, size, PROT_NONE); +} + +bool os::unguard_memory(char* addr, size_t size) { + return linux_mprotect(addr, size, PROT_READ|PROT_WRITE); +} + +static int hugetlbfs_page_size_flag(size_t page_size) { + if (page_size != HugePages::default_static_hugepage_size()) { + return (exact_log2(page_size) << MAP_HUGE_SHIFT); + } + return 0; +} + +static bool hugetlbfs_sanity_check(size_t page_size) { + const os::PageSizes page_sizes = HugePages::static_info().pagesizes(); + assert(page_sizes.contains(page_size), "Invalid page sizes passed"); + + // Include the page size flag to ensure we sanity check the correct page size. + int flags = MAP_ANONYMOUS | MAP_PRIVATE | MAP_HUGETLB | hugetlbfs_page_size_flag(page_size); + void *p = mmap(nullptr, page_size, PROT_READ|PROT_WRITE, flags, -1, 0); + + if (p != MAP_FAILED) { + // Mapping succeeded, sanity check passed. + munmap(p, page_size); + return true; + } else { + log_info(pagesize)("Large page size (" SIZE_FORMAT "%s) failed sanity check, " + "checking if smaller large page sizes are usable", + byte_size_in_exact_unit(page_size), + exact_unit_for_byte_size(page_size)); + for (size_t page_size_ = page_sizes.next_smaller(page_size); + page_size_ > os::vm_page_size(); + page_size_ = page_sizes.next_smaller(page_size_)) { + flags = MAP_ANONYMOUS | MAP_PRIVATE | MAP_HUGETLB | hugetlbfs_page_size_flag(page_size_); + p = mmap(nullptr, page_size_, PROT_READ|PROT_WRITE, flags, -1, 0); + if (p != MAP_FAILED) { + // Mapping succeeded, sanity check passed. + munmap(p, page_size_); + log_info(pagesize)("Large page size (" SIZE_FORMAT "%s) passed sanity check", + byte_size_in_exact_unit(page_size_), + exact_unit_for_byte_size(page_size_)); + return true; + } + } + } + + return false; +} + +// From the coredump_filter documentation: +// +// - (bit 0) anonymous private memory +// - (bit 1) anonymous shared memory +// - (bit 2) file-backed private memory +// - (bit 3) file-backed shared memory +// - (bit 4) ELF header pages in file-backed private memory areas (it is +// effective only if the bit 2 is cleared) +// - (bit 5) hugetlb private memory +// - (bit 6) hugetlb shared memory +// - (bit 7) dax private memory +// - (bit 8) dax shared memory +// +static void set_coredump_filter(CoredumpFilterBit bit) { + FILE *f; + long cdm; + + if ((f = os::fopen("/proc/self/coredump_filter", "r+")) == nullptr) { + return; + } + + if (fscanf(f, "%lx", &cdm) != 1) { + fclose(f); + return; + } + + long saved_cdm = cdm; + rewind(f); + cdm |= bit; + + if (cdm != saved_cdm) { + fprintf(f, "%#lx", cdm); + } + + fclose(f); +} + +// Large page support + +static size_t _large_page_size = 0; + +static void warn_no_large_pages_configured() { + if (!FLAG_IS_DEFAULT(UseLargePages)) { + log_warning(pagesize)("UseLargePages disabled, no large pages configured and available on the system."); + } +} + +struct LargePageInitializationLoggerMark { + ~LargePageInitializationLoggerMark() { + LogTarget(Info, pagesize) lt; + if (lt.is_enabled()) { + LogStream ls(lt); + if (UseLargePages) { + ls.print_cr("UseLargePages=1, UseTransparentHugePages=%d", UseTransparentHugePages); + ls.print("Large page support enabled. Usable page sizes: "); + os::page_sizes().print_on(&ls); + ls.print_cr(". Default large page size: " EXACTFMT ".", EXACTFMTARGS(os::large_page_size())); + } else { + ls.print("Large page support %sdisabled.", uses_zgc_shmem_thp() ? "partially " : ""); + } + } + } + + static bool uses_zgc_shmem_thp() { + return UseZGC && + // If user requested THP + ((os::Linux::thp_requested() && HugePages::supports_shmem_thp()) || + // If OS forced THP + HugePages::forced_shmem_thp()); + } +}; + +static bool validate_thps_configured() { + assert(UseTransparentHugePages, "Sanity"); + assert(os::Linux::thp_requested(), "Sanity"); + + if (UseZGC) { + if (!HugePages::supports_shmem_thp()) { + log_warning(pagesize)("Shared memory transparent huge pages are not enabled in the OS. " + "Set /sys/kernel/mm/transparent_hugepage/shmem_enabled to 'advise' to enable them."); + // UseTransparentHugePages has historically been tightly coupled with + // anonymous THPs. Fall through here and let the validity be determined + // by the OS configuration for anonymous THPs. ZGC doesn't use the flag + // but instead checks os::Linux::thp_requested(). + } + } + + if (!HugePages::supports_thp()) { + log_warning(pagesize)("Anonymous transparent huge pages are not enabled in the OS. " + "Set /sys/kernel/mm/transparent_hugepage/enabled to 'madvise' to enable them."); + log_warning(pagesize)("UseTransparentHugePages disabled, transparent huge pages are not supported by the operating system."); + return false; + } + + return true; +} + +void os::large_page_init() { + Linux::large_page_init(); +} + +void os::Linux::large_page_init() { + LargePageInitializationLoggerMark logger; + + // Decide if the user asked for THPs before we update UseTransparentHugePages. + const bool large_pages_turned_off = !FLAG_IS_DEFAULT(UseLargePages) && !UseLargePages; + _thp_requested = UseTransparentHugePages && !large_pages_turned_off; + + // Query OS information first. + HugePages::initialize(); + + // If THPs are unconditionally enabled (THP mode "always"), khugepaged may attempt to + // coalesce small pages in thread stacks to huge pages. That costs a lot of memory and + // is usually unwanted for thread stacks. Therefore we attempt to prevent THP formation in + // thread stacks unless the user explicitly allowed THP formation by manually disabling + // -XX:-THPStackMitigation. + if (HugePages::thp_mode() == THPMode::always) { + if (THPStackMitigation) { + log_info(pagesize)("JVM will attempt to prevent THPs in thread stacks."); + } else { + log_info(pagesize)("JVM will *not* prevent THPs in thread stacks. This may cause high RSS."); + } + } else { + FLAG_SET_ERGO(THPStackMitigation, false); // Mitigation not needed + } + + // Handle the case where we do not want to use huge pages + if (!UseLargePages && + !UseTransparentHugePages) { + // Not using large pages. + return; + } + + if (!FLAG_IS_DEFAULT(UseLargePages) && !UseLargePages) { + // The user explicitly turned off large pages. + UseTransparentHugePages = false; + return; + } + + // Check if the OS supports THPs + if (UseTransparentHugePages && !validate_thps_configured()) { + UseLargePages = UseTransparentHugePages = false; + return; + } + + // Check if the OS supports static hugepages. + if (!UseTransparentHugePages && !HugePages::supports_static_hugepages()) { + warn_no_large_pages_configured(); + UseLargePages = false; + return; + } + + if (UseTransparentHugePages) { + // In THP mode: + // - os::large_page_size() is the *THP page size* + // - os::pagesizes() has two members, the THP page size and the system page size + assert(HugePages::thp_pagesize() > 0, "Missing OS info"); + _large_page_size = HugePages::thp_pagesize(); + _page_sizes.add(_large_page_size); + _page_sizes.add(os::vm_page_size()); + // +UseTransparentHugePages implies +UseLargePages + UseLargePages = true; + + } else { + + // In static hugepage mode: + // - os::large_page_size() is the default static hugepage size (/proc/meminfo "Hugepagesize") + // - os::pagesizes() contains all hugepage sizes the kernel supports, regardless whether there + // are pages configured in the pool or not (from /sys/kernel/hugepages/hugepage-xxxx ...) + os::PageSizes all_large_pages = HugePages::static_info().pagesizes(); + const size_t default_large_page_size = HugePages::default_static_hugepage_size(); + + // 3) Consistency check and post-processing + + size_t large_page_size = 0; + + // Check LargePageSizeInBytes matches an available page size and if so set _large_page_size + // using LargePageSizeInBytes as the maximum allowed large page size. If LargePageSizeInBytes + // doesn't match an available page size set _large_page_size to default_large_page_size + // and use it as the maximum. + if (FLAG_IS_DEFAULT(LargePageSizeInBytes) || + LargePageSizeInBytes == 0 || + LargePageSizeInBytes == default_large_page_size) { + large_page_size = default_large_page_size; + log_info(pagesize)("Using the default large page size: " SIZE_FORMAT "%s", + byte_size_in_exact_unit(large_page_size), + exact_unit_for_byte_size(large_page_size)); + } else { + if (all_large_pages.contains(LargePageSizeInBytes)) { + large_page_size = LargePageSizeInBytes; + log_info(pagesize)("Overriding default large page size (" SIZE_FORMAT "%s) " + "using LargePageSizeInBytes: " SIZE_FORMAT "%s", + byte_size_in_exact_unit(default_large_page_size), + exact_unit_for_byte_size(default_large_page_size), + byte_size_in_exact_unit(large_page_size), + exact_unit_for_byte_size(large_page_size)); + } else { + large_page_size = default_large_page_size; + log_info(pagesize)("LargePageSizeInBytes is not a valid large page size (" SIZE_FORMAT "%s) " + "using the default large page size: " SIZE_FORMAT "%s", + byte_size_in_exact_unit(LargePageSizeInBytes), + exact_unit_for_byte_size(LargePageSizeInBytes), + byte_size_in_exact_unit(large_page_size), + exact_unit_for_byte_size(large_page_size)); + } + } + + // Do an additional sanity check to see if we can use the desired large page size + if (!hugetlbfs_sanity_check(large_page_size)) { + warn_no_large_pages_configured(); + UseLargePages = false; + return; + } + + _large_page_size = large_page_size; + + // Populate _page_sizes with large page sizes less than or equal to + // _large_page_size. + for (size_t page_size = _large_page_size; page_size != 0; + page_size = all_large_pages.next_smaller(page_size)) { + _page_sizes.add(page_size); + } + } + + set_coredump_filter(LARGEPAGES_BIT); +} + +bool os::Linux::thp_requested() { + return _thp_requested; +} + +bool os::Linux::should_madvise_anonymous_thps() { + return _thp_requested && HugePages::thp_mode() == THPMode::madvise; +} + +bool os::Linux::should_madvise_shmem_thps() { + return _thp_requested && HugePages::shmem_thp_mode() == ShmemTHPMode::advise; +} + +static void log_on_commit_special_failure(char* req_addr, size_t bytes, + size_t page_size, int error) { + assert(error == ENOMEM, "Only expect to fail if no memory is available"); + + log_info(pagesize)("Failed to reserve and commit memory with given page size. req_addr: " PTR_FORMAT + " size: " SIZE_FORMAT "%s, page size: " SIZE_FORMAT "%s, (errno = %d)", + p2i(req_addr), byte_size_in_exact_unit(bytes), exact_unit_for_byte_size(bytes), + byte_size_in_exact_unit(page_size), exact_unit_for_byte_size(page_size), error); +} + +static bool commit_memory_special(size_t bytes, + size_t page_size, + char* req_addr, + bool exec) { + assert(UseLargePages, "Should only get here for huge pages"); + assert(!UseTransparentHugePages, "Should only get here for static hugepage mode"); + assert(is_aligned(bytes, page_size), "Unaligned size"); + assert(is_aligned(req_addr, page_size), "Unaligned address"); + assert(req_addr != nullptr, "Must have a requested address for special mappings"); + + int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE; + int flags = MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED; + + // For large pages additional flags are required. + if (page_size > os::vm_page_size()) { + flags |= MAP_HUGETLB | hugetlbfs_page_size_flag(page_size); + } + char* addr = (char*)::mmap(req_addr, bytes, prot, flags, -1, 0); + + if (addr == MAP_FAILED) { + log_on_commit_special_failure(req_addr, bytes, page_size, errno); + return false; + } + + log_debug(pagesize)("Commit special mapping: " PTR_FORMAT ", size=" SIZE_FORMAT "%s, page size=" + SIZE_FORMAT "%s", + p2i(addr), byte_size_in_exact_unit(bytes), + exact_unit_for_byte_size(bytes), + byte_size_in_exact_unit(page_size), + exact_unit_for_byte_size(page_size)); + assert(is_aligned(addr, page_size), "Must be"); + return true; +} + +static char* reserve_memory_special_huge_tlbfs(size_t bytes, + size_t alignment, + size_t page_size, + char* req_addr, + bool exec) { + const os::PageSizes page_sizes = HugePages::static_info().pagesizes(); + assert(UseLargePages, "only for Huge TLBFS large pages"); + assert(is_aligned(req_addr, alignment), "Must be"); + assert(is_aligned(req_addr, page_size), "Must be"); + assert(is_aligned(alignment, os::vm_allocation_granularity()), "Must be"); + assert(page_sizes.contains(page_size), "Must be a valid page size"); + assert(page_size > os::vm_page_size(), "Must be a large page size"); + assert(bytes >= page_size, "Shouldn't allocate large pages for small sizes"); + + // We only end up here when at least 1 large page can be used. + // If the size is not a multiple of the large page size, we + // will mix the type of pages used, but in a descending order. + // Start off by reserving a range of the given size that is + // properly aligned. At this point no pages are committed. If + // a requested address is given it will be used and it must be + // aligned to both the large page size and the given alignment. + // The larger of the two will be used. + size_t required_alignment = MAX(page_size, alignment); + char* const aligned_start = anon_mmap_aligned(req_addr, bytes, required_alignment); + if (aligned_start == nullptr) { + return nullptr; + } + + // First commit using large pages. + size_t large_bytes = align_down(bytes, page_size); + bool large_committed = commit_memory_special(large_bytes, page_size, aligned_start, exec); + + if (large_committed && bytes == large_bytes) { + // The size was large page aligned so no additional work is + // needed even if the commit failed. + return aligned_start; + } + + // The requested size requires some small pages as well. + char* small_start = aligned_start + large_bytes; + size_t small_size = bytes - large_bytes; + if (!large_committed) { + // Failed to commit large pages, so we need to unmap the + // reminder of the orinal reservation. + ::munmap(small_start, small_size); + return nullptr; + } + + // Commit the remaining bytes using small pages. + bool small_committed = commit_memory_special(small_size, os::vm_page_size(), small_start, exec); + if (!small_committed) { + // Failed to commit the remaining size, need to unmap + // the large pages part of the reservation. + ::munmap(aligned_start, large_bytes); + return nullptr; + } + return aligned_start; +} + +char* os::pd_reserve_memory_special(size_t bytes, size_t alignment, size_t page_size, + char* req_addr, bool exec) { + assert(UseLargePages, "only for large pages"); + + char* const addr = reserve_memory_special_huge_tlbfs(bytes, alignment, page_size, req_addr, exec); + + if (addr != nullptr) { + if (UseNUMAInterleaving) { + numa_make_global(addr, bytes); + } + } + + return addr; +} + +bool os::pd_release_memory_special(char* base, size_t bytes) { + assert(UseLargePages, "only for large pages"); + // Plain munmap is sufficient + return pd_release_memory(base, bytes); +} + +size_t os::large_page_size() { + return _large_page_size; +} + +// static hugepages (hugetlbfs) allow application to commit large page memory +// on demand. +// However, when committing memory with hugepages fails, the region +// that was supposed to be committed will lose the old reservation +// and allow other threads to steal that memory region. Because of this +// behavior we can't commit hugetlbfs memory. Instead, we commit that +// memory at reservation. +bool os::can_commit_large_page_memory() { + return UseTransparentHugePages; +} + +char* os::pd_attempt_map_memory_to_file_at(char* requested_addr, size_t bytes, int file_desc) { + assert(file_desc >= 0, "file_desc is not valid"); + char* result = pd_attempt_reserve_memory_at(requested_addr, bytes, !ExecMem); + if (result != nullptr) { + if (replace_existing_mapping_with_file_mapping(result, bytes, file_desc) == nullptr) { + vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory")); + } + } + return result; +} + +// Reserve memory at an arbitrary address, only if that area is +// available (and not reserved for something else). + +char* os::pd_attempt_reserve_memory_at(char* requested_addr, size_t bytes, bool exec) { + // Assert only that the size is a multiple of the page size, since + // that's all that mmap requires, and since that's all we really know + // about at this low abstraction level. If we need higher alignment, + // we can either pass an alignment to this method or verify alignment + // in one of the methods further up the call chain. See bug 5044738. + assert(bytes % os::vm_page_size() == 0, "reserving unexpected size block"); + + // Linux mmap allows caller to pass an address as hint; give it a try first, + // if kernel honors the hint then we can return immediately. + char * addr = anon_mmap(requested_addr, bytes); + if (addr == requested_addr) { + return requested_addr; + } + + if (addr != nullptr) { + // mmap() is successful but it fails to reserve at the requested address + log_trace(os, map)("Kernel rejected " PTR_FORMAT ", offered " PTR_FORMAT ".", p2i(requested_addr), p2i(addr)); + anon_munmap(addr, bytes); + } + + return nullptr; +} + +size_t os::vm_min_address() { + // Determined by sysctl vm.mmap_min_addr. It exists as a safety zone to prevent + // NULL pointer dereferences. + // Most distros set this value to 64 KB. It *can* be zero, but rarely is. Here, + // we impose a minimum value if vm.mmap_min_addr is too low, for increased protection. + static size_t value = 0; + if (value == 0) { + assert(is_aligned(_vm_min_address_default, os::vm_allocation_granularity()), "Sanity"); + FILE* f = os::fopen("/proc/sys/vm/mmap_min_addr", "r"); + if (f != nullptr) { + if (fscanf(f, "%zu", &value) != 1) { + value = _vm_min_address_default; + } + fclose(f); + } + value = MAX2(_vm_min_address_default, value); + } + return value; +} + +// Used to convert frequent JVM_Yield() to nops +bool os::dont_yield() { + return DontYieldALot; +} + +// Linux CFS scheduler (since 2.6.23) does not guarantee sched_yield(2) will +// actually give up the CPU. Since skip buddy (v2.6.28): +// +// * Sets the yielding task as skip buddy for current CPU's run queue. +// * Picks next from run queue, if empty, picks a skip buddy (can be the yielding task). +// * Clears skip buddies for this run queue (yielding task no longer a skip buddy). +// +// An alternative is calling os::naked_short_nanosleep with a small number to avoid +// getting re-scheduled immediately. +// +void os::naked_yield() { + sched_yield(); +} + +//////////////////////////////////////////////////////////////////////////////// +// thread priority support + +// Note: Normal Linux applications are run with SCHED_OTHER policy. SCHED_OTHER +// only supports dynamic priority, static priority must be zero. For real-time +// applications, Linux supports SCHED_RR which allows static priority (1-99). +// However, for large multi-threaded applications, SCHED_RR is not only slower +// than SCHED_OTHER, but also very unstable (my volano tests hang hard 4 out +// of 5 runs - Sep 2005). +// +// The following code actually changes the niceness of kernel-thread/LWP. It +// has an assumption that setpriority() only modifies one kernel-thread/LWP, +// not the entire user process, and user level threads are 1:1 mapped to kernel +// threads. It has always been the case, but could change in the future. For +// this reason, the code should not be used as default (ThreadPriorityPolicy=0). +// It is only used when ThreadPriorityPolicy=1 and may require system level permission +// (e.g., root privilege or CAP_SYS_NICE capability). + +int os::java_to_os_priority[CriticalPriority + 1] = { + 19, // 0 Entry should never be used + + 4, // 1 MinPriority + 3, // 2 + 2, // 3 + + 1, // 4 + 0, // 5 NormPriority + -1, // 6 + + -2, // 7 + -3, // 8 + -4, // 9 NearMaxPriority + + -5, // 10 MaxPriority + + -5 // 11 CriticalPriority +}; + +static int prio_init() { + if (ThreadPriorityPolicy == 1) { + if (geteuid() != 0) { + if (!FLAG_IS_DEFAULT(ThreadPriorityPolicy) && !FLAG_IS_JIMAGE_RESOURCE(ThreadPriorityPolicy)) { + warning("-XX:ThreadPriorityPolicy=1 may require system level permission, " \ + "e.g., being the root user. If the necessary permission is not " \ + "possessed, changes to priority will be silently ignored."); + } + } + } + if (UseCriticalJavaThreadPriority) { + os::java_to_os_priority[MaxPriority] = os::java_to_os_priority[CriticalPriority]; + } + return 0; +} + +OSReturn os::set_native_priority(Thread* thread, int newpri) { + if (!UseThreadPriorities || ThreadPriorityPolicy == 0) return OS_OK; + + int ret = setpriority(PRIO_PROCESS, thread->osthread()->thread_id(), newpri); + return (ret == 0) ? OS_OK : OS_ERR; +} + +OSReturn os::get_native_priority(const Thread* const thread, + int *priority_ptr) { + if (!UseThreadPriorities || ThreadPriorityPolicy == 0) { + *priority_ptr = java_to_os_priority[NormPriority]; + return OS_OK; + } + + errno = 0; + *priority_ptr = getpriority(PRIO_PROCESS, thread->osthread()->thread_id()); + return (*priority_ptr != -1 || errno == 0 ? OS_OK : OS_ERR); +} + +// This is the fastest way to get thread cpu time on Linux. +// Returns cpu time (user+sys) for any thread, not only for current. +// POSIX compliant clocks are implemented in the kernels 2.6.16+. +// It might work on 2.6.10+ with a special kernel/glibc patch. +// For reference, please, see IEEE Std 1003.1-2004: +// http://www.unix.org/single_unix_specification + +jlong os::Linux::fast_thread_cpu_time(clockid_t clockid) { + struct timespec tp; + int status = clock_gettime(clockid, &tp); + assert(status == 0, "clock_gettime error: %s", os::strerror(errno)); + return (tp.tv_sec * NANOSECS_PER_SEC) + tp.tv_nsec; +} + +// copy data between two file descriptor within the kernel +// the number of bytes written to out_fd is returned if transfer was successful +// otherwise, returns -1 that implies an error +jlong os::Linux::sendfile(int out_fd, int in_fd, jlong* offset, jlong count) { + return ::sendfile64(out_fd, in_fd, (off64_t*)offset, (size_t)count); +} + +// Determine if the vmid is the parent pid for a child in a PID namespace. +// Return the namespace pid if so, otherwise -1. +int os::Linux::get_namespace_pid(int vmid) { + char fname[24]; + int retpid = -1; + + snprintf(fname, sizeof(fname), "/proc/%d/status", vmid); + FILE *fp = os::fopen(fname, "r"); + + if (fp) { + int pid, nspid; + int ret; + while (!feof(fp) && !ferror(fp)) { + ret = fscanf(fp, "NSpid: %d %d", &pid, &nspid); + if (ret == 1) { + break; + } + if (ret == 2) { + retpid = nspid; + break; + } + for (;;) { + int ch = fgetc(fp); + if (ch == EOF || ch == (int)'\n') break; + } + } + fclose(fp); + } + return retpid; +} + +extern void report_error(char* file_name, int line_no, char* title, + char* format, ...); + +// Some linux distributions (notably: Alpine Linux) include the +// grsecurity in the kernel. Of particular interest from a JVM perspective +// is PaX (https://pax.grsecurity.net/), which adds some security features +// related to page attributes. Specifically, the MPROTECT PaX functionality +// (https://pax.grsecurity.net/docs/mprotect.txt) prevents dynamic +// code generation by disallowing a (previously) writable page to be +// marked as executable. This is, of course, exactly what HotSpot does +// for both JIT compiled method, as well as for stubs, adapters, etc. +// +// Instead of crashing "lazily" when trying to make a page executable, +// this code probes for the presence of PaX and reports the failure +// eagerly. +static void check_pax(void) { + // Zero doesn't generate code dynamically, so no need to perform the PaX check +#ifndef ZERO + size_t size = os::vm_page_size(); + + void* p = ::mmap(nullptr, size, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); + if (p == MAP_FAILED) { + log_debug(os)("os_linux.cpp: check_pax: mmap failed (%s)" , os::strerror(errno)); + vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "failed to allocate memory for PaX check."); + } + + int res = ::mprotect(p, size, PROT_READ|PROT_WRITE|PROT_EXEC); + if (res == -1) { + log_debug(os)("os_linux.cpp: check_pax: mprotect failed (%s)" , os::strerror(errno)); + vm_exit_during_initialization( + "Failed to mark memory page as executable - check if grsecurity/PaX is enabled"); + } + + ::munmap(p, size); +#endif +} + +// this is called _before_ most of the global arguments have been parsed +void os::init(void) { + char dummy; // used to get a guess on initial stack address + + clock_tics_per_sec = checked_cast(sysconf(_SC_CLK_TCK)); + int sys_pg_size = checked_cast(sysconf(_SC_PAGESIZE)); + if (sys_pg_size < 0) { + fatal("os_linux.cpp: os::init: sysconf failed (%s)", + os::strerror(errno)); + } + size_t page_size = sys_pg_size; + OSInfo::set_vm_page_size(page_size); + OSInfo::set_vm_allocation_granularity(page_size); + if (os::vm_page_size() == 0) { + fatal("os_linux.cpp: os::init: OSInfo::set_vm_page_size failed"); + } + _page_sizes.add(os::vm_page_size()); + + Linux::initialize_system_info(); + +#ifdef __GLIBC__ + g_mallinfo = CAST_TO_FN_PTR(mallinfo_func_t, dlsym(RTLD_DEFAULT, "mallinfo")); + g_mallinfo2 = CAST_TO_FN_PTR(mallinfo2_func_t, dlsym(RTLD_DEFAULT, "mallinfo2")); + g_malloc_info = CAST_TO_FN_PTR(malloc_info_func_t, dlsym(RTLD_DEFAULT, "malloc_info")); +#endif // __GLIBC__ + + os::Linux::CPUPerfTicks pticks; + bool res = os::Linux::get_tick_information(&pticks, -1); + + if (res && pticks.has_steal_ticks) { + has_initial_tick_info = true; + initial_total_ticks = pticks.total; + initial_steal_ticks = pticks.steal; + } + + // _main_thread points to the thread that created/loaded the JVM. + Linux::_main_thread = pthread_self(); + + // retrieve entry point for pthread_setname_np + Linux::_pthread_setname_np = + (int(*)(pthread_t, const char*))dlsym(RTLD_DEFAULT, "pthread_setname_np"); + + check_pax(); + + os::Posix::init(); +} + +// To install functions for atexit system call +extern "C" { + static void perfMemory_exit_helper() { + perfMemory_exit(); + } +} + +void os::pd_init_container_support() { + OSContainer::init(); +} + +void os::Linux::numa_init() { + + // Java can be invoked as + // 1. Without numactl and heap will be allocated/configured on all nodes as + // per the system policy. + // 2. With numactl --interleave: + // Use numa_get_interleave_mask(v2) API to get nodes bitmask. The same + // API for membind case bitmask is reset. + // Interleave is only hint and Kernel can fallback to other nodes if + // no memory is available on the target nodes. + // 3. With numactl --membind: + // Use numa_get_membind(v2) API to get nodes bitmask. The same API for + // interleave case returns bitmask of all nodes. + // numa_all_nodes_ptr holds bitmask of all nodes. + // numa_get_interleave_mask(v2) and numa_get_membind(v2) APIs returns correct + // bitmask when externally configured to run on all or fewer nodes. + + if (!Linux::libnuma_init()) { + FLAG_SET_ERGO(UseNUMA, false); + FLAG_SET_ERGO(UseNUMAInterleaving, false); // Also depends on libnuma. + } else { + if ((Linux::numa_max_node() < 1) || Linux::is_bound_to_single_node()) { + // If there's only one node (they start from 0) or if the process + // is bound explicitly to a single node using membind, disable NUMA + UseNUMA = false; + } else { + LogTarget(Info,os) log; + LogStream ls(log); + + Linux::set_configured_numa_policy(Linux::identify_numa_policy()); + + struct bitmask* bmp = Linux::_numa_membind_bitmask; + const char* numa_mode = "membind"; + + if (Linux::is_running_in_interleave_mode()) { + bmp = Linux::_numa_interleave_bitmask; + numa_mode = "interleave"; + } + + ls.print("UseNUMA is enabled and invoked in '%s' mode." + " Heap will be configured using NUMA memory nodes:", numa_mode); + + for (int node = 0; node <= Linux::numa_max_node(); node++) { + if (Linux::_numa_bitmask_isbitset(bmp, node)) { + ls.print(" %d", node); + } + } + } + } + + // When NUMA requested, not-NUMA-aware allocations default to interleaving. + if (UseNUMA && !UseNUMAInterleaving) { + FLAG_SET_ERGO_IF_DEFAULT(UseNUMAInterleaving, true); + } + + if (UseParallelGC && UseNUMA && UseLargePages && !can_commit_large_page_memory()) { + // With static large pages we cannot uncommit a page, so there's no way + // we can make the adaptive lgrp chunk resizing work. If the user specified both + // UseNUMA and UseLargePages on the command line - warn and disable adaptive resizing. + if (UseAdaptiveSizePolicy || UseAdaptiveNUMAChunkSizing) { + warning("UseNUMA is not fully compatible with +UseLargePages, " + "disabling adaptive resizing (-XX:-UseAdaptiveSizePolicy -XX:-UseAdaptiveNUMAChunkSizing)"); + UseAdaptiveSizePolicy = false; + UseAdaptiveNUMAChunkSizing = false; + } + } +} + +#if defined(IA32) && !defined(ZERO) +/* + * Work-around (execute code at a high address) for broken NX emulation using CS limit, + * Red Hat patch "Exec-Shield" (IA32 only). + * + * Map and execute at a high VA to prevent CS lazy updates race with SMP MM + * invalidation.Further code generation by the JVM will no longer cause CS limit + * updates. + * + * Affects IA32: RHEL 5 & 6, Ubuntu 10.04 (LTS), 10.10, 11.04, 11.10, 12.04. + * @see JDK-8023956 + */ +static void workaround_expand_exec_shield_cs_limit() { + assert(os::Linux::initial_thread_stack_bottom() != nullptr, "sanity"); + size_t page_size = os::vm_page_size(); + + /* + * JDK-8197429 + * + * Expand the stack mapping to the end of the initial stack before + * attempting to install the codebuf. This is needed because newer + * Linux kernels impose a distance of a megabyte between stack + * memory and other memory regions. If we try to install the + * codebuf before expanding the stack the installation will appear + * to succeed but we'll get a segfault later if we expand the stack + * in Java code. + * + */ + if (os::is_primordial_thread()) { + address limit = os::Linux::initial_thread_stack_bottom(); + if (! DisablePrimordialThreadGuardPages) { + limit += StackOverflow::stack_red_zone_size() + + StackOverflow::stack_yellow_zone_size(); + } + os::Linux::expand_stack_to(limit); + } + + /* + * Take the highest VA the OS will give us and exec + * + * Although using -(pagesz) as mmap hint works on newer kernel as you would + * think, older variants affected by this work-around don't (search forward only). + * + * On the affected distributions, we understand the memory layout to be: + * + * TASK_LIMIT= 3G, main stack base close to TASK_LIMT. + * + * A few pages south main stack will do it. + * + * If we are embedded in an app other than launcher (initial != main stack), + * we don't have much control or understanding of the address space, just let it slide. + */ + char* hint = (char*)(os::Linux::initial_thread_stack_bottom() - + (StackOverflow::stack_guard_zone_size() + page_size)); + char* codebuf = os::attempt_reserve_memory_at(hint, page_size); + + if (codebuf == nullptr) { + // JDK-8197429: There may be a stack gap of one megabyte between + // the limit of the stack and the nearest memory region: this is a + // Linux kernel workaround for CVE-2017-1000364. If we failed to + // map our codebuf, try again at an address one megabyte lower. + hint -= 1 * M; + codebuf = os::attempt_reserve_memory_at(hint, page_size); + } + + if ((codebuf == nullptr) || (!os::commit_memory(codebuf, page_size, true))) { + return; // No matter, we tried, best effort. + } + + MemTracker::record_virtual_memory_type((address)codebuf, mtInternal); + + log_info(os)("[CS limit NX emulation work-around, exec code at: %p]", codebuf); + + // Some code to exec: the 'ret' instruction + codebuf[0] = 0xC3; + + // Call the code in the codebuf + __asm__ volatile("call *%0" : : "r"(codebuf)); + + // keep the page mapped so CS limit isn't reduced. +} +#endif // defined(IA32) && !defined(ZERO) + +// this is called _after_ the global arguments have been parsed +jint os::init_2(void) { + + // This could be set after os::Posix::init() but all platforms + // have to set it the same so we have to mirror Solaris. + DEBUG_ONLY(os::set_mutex_init_done();) + + os::Posix::init_2(); + + Linux::fast_thread_clock_init(); + + if (PosixSignals::init() == JNI_ERR) { + return JNI_ERR; + } + + // Check and sets minimum stack sizes against command line options + if (set_minimum_stack_sizes() == JNI_ERR) { + return JNI_ERR; + } + +#if defined(IA32) && !defined(ZERO) + // Need to ensure we've determined the process's initial stack to + // perform the workaround + Linux::capture_initial_stack(JavaThread::stack_size_at_create()); + workaround_expand_exec_shield_cs_limit(); +#else + suppress_primordial_thread_resolution = Arguments::created_by_java_launcher(); + if (!suppress_primordial_thread_resolution) { + Linux::capture_initial_stack(JavaThread::stack_size_at_create()); + } +#endif + + Linux::libpthread_init(); + Linux::sched_getcpu_init(); + log_info(os)("HotSpot is running with %s, %s", + Linux::libc_version(), Linux::libpthread_version()); + +#ifdef __GLIBC__ + // Check if we need to adjust the stack size for glibc guard pages. + init_adjust_stacksize_for_guard_pages(); +#endif + + if (UseNUMA || UseNUMAInterleaving) { + Linux::numa_init(); + } + + if (MaxFDLimit) { + // set the number of file descriptors to max. print out error + // if getrlimit/setrlimit fails but continue regardless. + struct rlimit nbr_files; + int status = getrlimit(RLIMIT_NOFILE, &nbr_files); + if (status != 0) { + log_info(os)("os::init_2 getrlimit failed: %s", os::strerror(errno)); + } else { + nbr_files.rlim_cur = nbr_files.rlim_max; + status = setrlimit(RLIMIT_NOFILE, &nbr_files); + if (status != 0) { + log_info(os)("os::init_2 setrlimit failed: %s", os::strerror(errno)); + } + } + } + + // at-exit methods are called in the reverse order of their registration. + // atexit functions are called on return from main or as a result of a + // call to exit(3C). There can be only 32 of these functions registered + // and atexit() does not set errno. + + if (PerfAllowAtExitRegistration) { + // only register atexit functions if PerfAllowAtExitRegistration is set. + // atexit functions can be delayed until process exit time, which + // can be problematic for embedded VM situations. Embedded VMs should + // call DestroyJavaVM() to assure that VM resources are released. + + // note: perfMemory_exit_helper atexit function may be removed in + // the future if the appropriate cleanup code can be added to the + // VM_Exit VMOperation's doit method. + if (atexit(perfMemory_exit_helper) != 0) { + warning("os::init_2 atexit(perfMemory_exit_helper) failed"); + } + } + + // initialize thread priority policy + prio_init(); + + if (!FLAG_IS_DEFAULT(AllocateHeapAt)) { + set_coredump_filter(DAX_SHARED_BIT); + } + + if (DumpPrivateMappingsInCore) { + set_coredump_filter(FILE_BACKED_PVT_BIT); + } + + if (DumpSharedMappingsInCore) { + set_coredump_filter(FILE_BACKED_SHARED_BIT); + } + + if (DumpPerfMapAtExit && FLAG_IS_DEFAULT(UseCodeCacheFlushing)) { + // Disable code cache flushing to ensure the map file written at + // exit contains all nmethods generated during execution. + FLAG_SET_DEFAULT(UseCodeCacheFlushing, false); + } + + // Override the timer slack value if needed. The adjustment for the main + // thread will establish the setting for child threads, which would be + // most threads in JDK/JVM. + if (TimerSlack >= 0) { + if (prctl(PR_SET_TIMERSLACK, TimerSlack) < 0) { + vm_exit_during_initialization("Setting timer slack failed: %s", os::strerror(errno)); + } + } + + return JNI_OK; +} + +// older glibc versions don't have this macro (which expands to +// an optimized bit-counting function) so we have to roll our own +#ifndef CPU_COUNT + +static int _cpu_count(const cpu_set_t* cpus) { + int count = 0; + // only look up to the number of configured processors + for (int i = 0; i < os::processor_count(); i++) { + if (CPU_ISSET(i, cpus)) { + count++; + } + } + return count; +} + +#define CPU_COUNT(cpus) _cpu_count(cpus) + +#endif // CPU_COUNT +#endif // !NATIVE_IMAGE + +// Get the current number of available processors for this process. +// This value can change at any time during a process's lifetime. +// sched_getaffinity gives an accurate answer as it accounts for cpusets. +// If it appears there may be more than 1024 processors then we do a +// dynamic check - see 6515172 for details. +// If anything goes wrong we fallback to returning the number of online +// processors - which can be greater than the number available to the process. +static int get_active_processor_count() { + // Note: keep this function, with its CPU_xx macros, *outside* the os namespace (see JDK-8289477). + cpu_set_t cpus; // can represent at most 1024 (CPU_SETSIZE) processors + cpu_set_t* cpus_p = &cpus; + size_t cpus_size = sizeof(cpu_set_t); + + int configured_cpus = os::processor_count(); // upper bound on available cpus + int cpu_count = 0; + +// old build platforms may not support dynamic cpu sets +#ifdef CPU_ALLOC + + // To enable easy testing of the dynamic path on different platforms we + // introduce a diagnostic flag: UseCpuAllocPath + if (configured_cpus >= CPU_SETSIZE || UseCpuAllocPath) { + // kernel may use a mask bigger than cpu_set_t + log_trace(os)("active_processor_count: using dynamic path %s" + "- configured processors: %d", + UseCpuAllocPath ? "(forced) " : "", + configured_cpus); + cpus_p = CPU_ALLOC(configured_cpus); + if (cpus_p != nullptr) { + cpus_size = CPU_ALLOC_SIZE(configured_cpus); + // zero it just to be safe + CPU_ZERO_S(cpus_size, cpus_p); + } + else { + // failed to allocate so fallback to online cpus + int online_cpus = checked_cast(::sysconf(_SC_NPROCESSORS_ONLN)); + log_trace(os)("active_processor_count: " + "CPU_ALLOC failed (%s) - using " + "online processor count: %d", + os::strerror(errno), online_cpus); + return online_cpus; + } + } + else { + log_trace(os)("active_processor_count: using static path - configured processors: %d", + configured_cpus); + } +#else // CPU_ALLOC +// these stubs won't be executed +#define CPU_COUNT_S(size, cpus) -1 +#define CPU_FREE(cpus) + + log_trace(os)("active_processor_count: only static path available - configured processors: %d", + configured_cpus); +#endif // CPU_ALLOC + + // pid 0 means the current thread - which we have to assume represents the process + if (sched_getaffinity(0, cpus_size, cpus_p) == 0) { + if (cpus_p != &cpus) { // can only be true when CPU_ALLOC used + cpu_count = CPU_COUNT_S(cpus_size, cpus_p); + } + else { + cpu_count = CPU_COUNT(cpus_p); + } + log_trace(os)("active_processor_count: sched_getaffinity processor count: %d", cpu_count); + } + else { + cpu_count = checked_cast(::sysconf(_SC_NPROCESSORS_ONLN)); + warning("sched_getaffinity failed (%s)- using online processor count (%d) " + "which may exceed available processors", os::strerror(errno), cpu_count); + } + + if (cpus_p != &cpus) { // can only be true when CPU_ALLOC used + CPU_FREE(cpus_p); + } + + assert(cpu_count > 0 && cpu_count <= os::processor_count(), "sanity check"); + return cpu_count; +} + +int os::Linux::active_processor_count() { + return get_active_processor_count(); +} + +#ifndef NATIVE_IMAGE +// Determine the active processor count from one of +// three different sources: +// +// 1. User option -XX:ActiveProcessorCount +// 2. kernel os calls (sched_getaffinity or sysconf(_SC_NPROCESSORS_ONLN) +// 3. extracted from cgroup cpu subsystem (shares and quotas) +// +// Option 1, if specified, will always override. +// If the cgroup subsystem is active and configured, we +// will return the min of the cgroup and option 2 results. +// This is required since tools, such as numactl, that +// alter cpu affinity do not update cgroup subsystem +// cpuset configuration files. +int os::active_processor_count() { + // User has overridden the number of active processors + if (ActiveProcessorCount > 0) { + log_trace(os)("active_processor_count: " + "active processor count set by user : %d", + ActiveProcessorCount); + return ActiveProcessorCount; + } + + int active_cpus; + if (OSContainer::is_containerized()) { + active_cpus = OSContainer::active_processor_count(); + log_trace(os)("active_processor_count: determined by OSContainer: %d", + active_cpus); + } else { + active_cpus = os::Linux::active_processor_count(); + } + + return active_cpus; +} + +static bool should_warn_invalid_processor_id() { + if (os::processor_count() == 1) { + // Don't warn if we only have one processor + return false; + } + + static volatile int warn_once = 1; + + if (Atomic::load(&warn_once) == 0 || + Atomic::xchg(&warn_once, 0) == 0) { + // Don't warn more than once + return false; + } + + return true; +} + +uint os::processor_id() { + const int id = Linux::sched_getcpu(); + + if (id < processor_count()) { + return (uint)id; + } + + // Some environments (e.g. openvz containers and the rr debugger) incorrectly + // report a processor id that is higher than the number of processors available. + // This is problematic, for example, when implementing CPU-local data structures, + // where the processor id is used to index into an array of length processor_count(). + // If this happens we return 0 here. This is is safe since we always have at least + // one processor, but it's not optimal for performance if we're actually executing + // in an environment with more than one processor. + if (should_warn_invalid_processor_id()) { + log_warning(os)("Invalid processor id reported by the operating system " + "(got processor id %d, valid processor id range is 0-%d)", + id, processor_count() - 1); + log_warning(os)("Falling back to assuming processor id is 0. " + "This could have a negative impact on performance."); + } + + return 0; +} + +void os::set_native_thread_name(const char *name) { + if (Linux::_pthread_setname_np) { + char buf [16]; // according to glibc manpage, 16 chars incl. '/0' + snprintf(buf, sizeof(buf), "%s", name); + buf[sizeof(buf) - 1] = '\0'; + const int rc = Linux::_pthread_setname_np(pthread_self(), buf); + // ERANGE should not happen; all other errors should just be ignored. + assert(rc != ERANGE, "pthread_setname_np failed"); + } +} + +//////////////////////////////////////////////////////////////////////////////// +// debug support + +bool os::find(address addr, outputStream* st) { + Dl_info dlinfo; + memset(&dlinfo, 0, sizeof(dlinfo)); + if (dladdr(addr, &dlinfo) != 0) { + st->print(PTR_FORMAT ": ", p2i(addr)); + if (dlinfo.dli_sname != nullptr && dlinfo.dli_saddr != nullptr) { + st->print("%s+" PTR_FORMAT, dlinfo.dli_sname, + p2i(addr) - p2i(dlinfo.dli_saddr)); + } else if (dlinfo.dli_fbase != nullptr) { + st->print("", p2i(addr) - p2i(dlinfo.dli_fbase)); + } else { + st->print(""); + } + if (dlinfo.dli_fname != nullptr) { + st->print(" in %s", dlinfo.dli_fname); + } + if (dlinfo.dli_fbase != nullptr) { + st->print(" at " PTR_FORMAT, p2i(dlinfo.dli_fbase)); + } + st->cr(); + + if (Verbose) { + // decode some bytes around the PC + address begin = clamp_address_in_page(addr-40, addr, os::vm_page_size()); + address end = clamp_address_in_page(addr+40, addr, os::vm_page_size()); + address lowest = (address) dlinfo.dli_sname; + if (!lowest) lowest = (address) dlinfo.dli_fbase; + if (begin < lowest) begin = lowest; + Dl_info dlinfo2; + if (dladdr(end, &dlinfo2) != 0 && dlinfo2.dli_saddr != dlinfo.dli_saddr + && end > dlinfo2.dli_saddr && dlinfo2.dli_saddr > begin) { + end = (address) dlinfo2.dli_saddr; + } + Disassembler::decode(begin, end, st); + } + return true; + } + return false; +} + +//////////////////////////////////////////////////////////////////////////////// +// misc + +// This does not do anything on Linux. This is basically a hook for being +// able to use structured exception handling (thread-local exception filters) +// on, e.g., Win32. +void +os::os_exception_wrapper(java_call_t f, JavaValue* value, const methodHandle& method, + JavaCallArguments* args, JavaThread* thread) { + f(value, method, args, thread); +} + +// This code originates from JDK's sysOpen and open64_w +// from src/solaris/hpi/src/system_md.c + +int os::open(const char *path, int oflag, int mode) { + if (strlen(path) > MAX_PATH - 1) { + errno = ENAMETOOLONG; + return -1; + } + + // All file descriptors that are opened in the Java process and not + // specifically destined for a subprocess should have the close-on-exec + // flag set. If we don't set it, then careless 3rd party native code + // might fork and exec without closing all appropriate file descriptors + // (e.g. as we do in closeDescriptors in UNIXProcess.c), and this in + // turn might: + // + // - cause end-of-file to fail to be detected on some file + // descriptors, resulting in mysterious hangs, or + // + // - might cause an fopen in the subprocess to fail on a system + // suffering from bug 1085341. + // + // (Yes, the default setting of the close-on-exec flag is a Unix + // design flaw) + // + // See: + // 1085341: 32-bit stdio routines should support file descriptors >255 + // 4843136: (process) pipe file descriptor from Runtime.exec not being closed + // 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9 + // + // Modern Linux kernels (after 2.6.23 2007) support O_CLOEXEC with open(). + // O_CLOEXEC is preferable to using FD_CLOEXEC on an open file descriptor + // because it saves a system call and removes a small window where the flag + // is unset. On ancient Linux kernels the O_CLOEXEC flag will be ignored + // and we fall back to using FD_CLOEXEC (see below). +#ifdef O_CLOEXEC + oflag |= O_CLOEXEC; +#endif + + int fd = ::open64(path, oflag, mode); + if (fd == -1) return -1; + + //If the open succeeded, the file might still be a directory + { + struct stat64 buf64; + int ret = ::fstat64(fd, &buf64); + int st_mode = buf64.st_mode; + + if (ret != -1) { + if ((st_mode & S_IFMT) == S_IFDIR) { + errno = EISDIR; + ::close(fd); + return -1; + } + } else { + ::close(fd); + return -1; + } + } + +#ifdef FD_CLOEXEC + // Validate that the use of the O_CLOEXEC flag on open above worked. + // With recent kernels, we will perform this check exactly once. + static sig_atomic_t O_CLOEXEC_is_known_to_work = 0; + if (!O_CLOEXEC_is_known_to_work) { + int flags = ::fcntl(fd, F_GETFD); + if (flags != -1) { + if ((flags & FD_CLOEXEC) != 0) + O_CLOEXEC_is_known_to_work = 1; + else + ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC); + } + } +#endif + + return fd; +} + + +// create binary file, rewriting existing file if required +int os::create_binary_file(const char* path, bool rewrite_existing) { + int oflags = O_WRONLY | O_CREAT; + oflags |= rewrite_existing ? O_TRUNC : O_EXCL; + return ::open64(path, oflags, S_IREAD | S_IWRITE); +} + +// return current position of file pointer +jlong os::current_file_offset(int fd) { + return (jlong)::lseek64(fd, (off64_t)0, SEEK_CUR); +} + +// move file pointer to the specified offset +jlong os::seek_to_file_offset(int fd, jlong offset) { + return (jlong)::lseek64(fd, (off64_t)offset, SEEK_SET); +} + +// Map a block of memory. +char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset, + char *addr, size_t bytes, bool read_only, + bool allow_exec) { + int prot; + int flags = MAP_PRIVATE; + + if (read_only) { + prot = PROT_READ; + } else { + prot = PROT_READ | PROT_WRITE; + } + + if (allow_exec) { + prot |= PROT_EXEC; + } + + if (addr != nullptr) { + flags |= MAP_FIXED; + } + + char* mapped_address = (char*)mmap(addr, (size_t)bytes, prot, flags, + fd, file_offset); + if (mapped_address == MAP_FAILED) { + return nullptr; + } + return mapped_address; +} + + +// Remap a block of memory. +char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset, + char *addr, size_t bytes, bool read_only, + bool allow_exec) { + // same as map_memory() on this OS + return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only, + allow_exec); +} + + +// Unmap a block of memory. +bool os::pd_unmap_memory(char* addr, size_t bytes) { + return munmap(addr, bytes) == 0; +} + +static jlong slow_thread_cpu_time(Thread *thread, bool user_sys_cpu_time); + +static jlong fast_cpu_time(Thread *thread) { + clockid_t clockid; + int rc = os::Linux::pthread_getcpuclockid(thread->osthread()->pthread_id(), + &clockid); + if (rc == 0) { + return os::Linux::fast_thread_cpu_time(clockid); + } else { + // It's possible to encounter a terminated native thread that failed + // to detach itself from the VM - which should result in ESRCH. + assert_status(rc == ESRCH, rc, "pthread_getcpuclockid failed"); + return -1; + } +} + +// current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool) +// are used by JVM M&M and JVMTI to get user+sys or user CPU time +// of a thread. +// +// current_thread_cpu_time() and thread_cpu_time(Thread*) returns +// the fast estimate available on the platform. + +jlong os::current_thread_cpu_time() { + if (os::Linux::supports_fast_thread_cpu_time()) { + return os::Linux::fast_thread_cpu_time(CLOCK_THREAD_CPUTIME_ID); + } else { + // return user + sys since the cost is the same + return slow_thread_cpu_time(Thread::current(), true /* user + sys */); + } +} + +jlong os::thread_cpu_time(Thread* thread) { + // consistent with what current_thread_cpu_time() returns + if (os::Linux::supports_fast_thread_cpu_time()) { + return fast_cpu_time(thread); + } else { + return slow_thread_cpu_time(thread, true /* user + sys */); + } +} + +jlong os::current_thread_cpu_time(bool user_sys_cpu_time) { + if (user_sys_cpu_time && os::Linux::supports_fast_thread_cpu_time()) { + return os::Linux::fast_thread_cpu_time(CLOCK_THREAD_CPUTIME_ID); + } else { + return slow_thread_cpu_time(Thread::current(), user_sys_cpu_time); + } +} + +jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) { + if (user_sys_cpu_time && os::Linux::supports_fast_thread_cpu_time()) { + return fast_cpu_time(thread); + } else { + return slow_thread_cpu_time(thread, user_sys_cpu_time); + } +} + +// -1 on error. +static jlong slow_thread_cpu_time(Thread *thread, bool user_sys_cpu_time) { + pid_t tid = thread->osthread()->thread_id(); + char *s; + char stat[2048]; + size_t statlen; + char proc_name[64]; + int count; + long sys_time, user_time; + char cdummy; + int idummy; + long ldummy; + FILE *fp; + + snprintf(proc_name, 64, "/proc/self/task/%d/stat", tid); + fp = os::fopen(proc_name, "r"); + if (fp == nullptr) return -1; + statlen = fread(stat, 1, 2047, fp); + stat[statlen] = '\0'; + fclose(fp); + + // Skip pid and the command string. Note that we could be dealing with + // weird command names, e.g. user could decide to rename java launcher + // to "java 1.4.2 :)", then the stat file would look like + // 1234 (java 1.4.2 :)) R ... ... + // We don't really need to know the command string, just find the last + // occurrence of ")" and then start parsing from there. See bug 4726580. + s = strrchr(stat, ')'); + if (s == nullptr) return -1; + + // Skip blank chars + do { s++; } while (s && isspace(*s)); + + count = sscanf(s,"%c %d %d %d %d %d %lu %lu %lu %lu %lu %lu %lu", + &cdummy, &idummy, &idummy, &idummy, &idummy, &idummy, + &ldummy, &ldummy, &ldummy, &ldummy, &ldummy, + &user_time, &sys_time); + if (count != 13) return -1; + if (user_sys_cpu_time) { + return ((jlong)sys_time + (jlong)user_time) * (1000000000 / clock_tics_per_sec); + } else { + return (jlong)user_time * (1000000000 / clock_tics_per_sec); + } +} + +void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) { + info_ptr->max_value = ALL_64_BITS; // will not wrap in less than 64 bits + info_ptr->may_skip_backward = false; // elapsed time not wall time + info_ptr->may_skip_forward = false; // elapsed time not wall time + info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned +} + +void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) { + info_ptr->max_value = ALL_64_BITS; // will not wrap in less than 64 bits + info_ptr->may_skip_backward = false; // elapsed time not wall time + info_ptr->may_skip_forward = false; // elapsed time not wall time + info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned +} + +bool os::is_thread_cpu_time_supported() { + return true; +} + +// System loadavg support. Returns -1 if load average cannot be obtained. +// Linux doesn't yet have a (official) notion of processor sets, +// so just return the system wide load average. +int os::loadavg(double loadavg[], int nelem) { + return ::getloadavg(loadavg, nelem); +} + +// Get the default path to the core file +// Returns the length of the string +int os::get_core_path(char* buffer, size_t bufferSize) { + /* + * Max length of /proc/sys/kernel/core_pattern is 128 characters. + * See https://www.kernel.org/doc/Documentation/sysctl/kernel.txt + */ + const int core_pattern_len = 129; + char core_pattern[core_pattern_len] = {0}; + + int core_pattern_file = ::open("/proc/sys/kernel/core_pattern", O_RDONLY); + if (core_pattern_file == -1) { + return -1; + } + + ssize_t ret = ::read(core_pattern_file, core_pattern, core_pattern_len); + ::close(core_pattern_file); + if (ret <= 0 || ret >= core_pattern_len || core_pattern[0] == '\n') { + return -1; + } + if (core_pattern[ret-1] == '\n') { + core_pattern[ret-1] = '\0'; + } else { + core_pattern[ret] = '\0'; + } + + // Replace the %p in the core pattern with the process id. NOTE: we do this + // only if the pattern doesn't start with "|", and we support only one %p in + // the pattern. + char *pid_pos = strstr(core_pattern, "%p"); + const char* tail = (pid_pos != nullptr) ? (pid_pos + 2) : ""; // skip over the "%p" + int written; + + if (core_pattern[0] == '/') { + if (pid_pos != nullptr) { + *pid_pos = '\0'; + written = jio_snprintf(buffer, bufferSize, "%s%d%s", core_pattern, + current_process_id(), tail); + } else { + written = jio_snprintf(buffer, bufferSize, "%s", core_pattern); + } + } else { + char cwd[PATH_MAX]; + + const char* p = get_current_directory(cwd, PATH_MAX); + if (p == nullptr) { + return -1; + } + + if (core_pattern[0] == '|') { + written = jio_snprintf(buffer, bufferSize, + "\"%s\" (or dumping to %s/core.%d)", + &core_pattern[1], p, current_process_id()); + } else if (pid_pos != nullptr) { + *pid_pos = '\0'; + written = jio_snprintf(buffer, bufferSize, "%s/%s%d%s", p, core_pattern, + current_process_id(), tail); + } else { + written = jio_snprintf(buffer, bufferSize, "%s/%s", p, core_pattern); + } + } + + if (written < 0) { + return -1; + } + + if (((size_t)written < bufferSize) && (pid_pos == nullptr) && (core_pattern[0] != '|')) { + int core_uses_pid_file = ::open("/proc/sys/kernel/core_uses_pid", O_RDONLY); + + if (core_uses_pid_file != -1) { + char core_uses_pid = 0; + ssize_t ret = ::read(core_uses_pid_file, &core_uses_pid, 1); + ::close(core_uses_pid_file); + + if (core_uses_pid == '1') { + jio_snprintf(buffer + written, bufferSize - written, + ".%d", current_process_id()); + } + } + } + + return checked_cast(strlen(buffer)); +} + +bool os::start_debugging(char *buf, int buflen) { + int len = (int)strlen(buf); + char *p = &buf[len]; + + jio_snprintf(p, buflen-len, + "\n\n" + "Do you want to debug the problem?\n\n" + "To debug, run 'gdb /proc/%d/exe %d'; then switch to thread " UINTX_FORMAT " (" INTPTR_FORMAT ")\n" + "Enter 'yes' to launch gdb automatically (PATH must include gdb)\n" + "Otherwise, press RETURN to abort...", + os::current_process_id(), os::current_process_id(), + os::current_thread_id(), os::current_thread_id()); + + bool yes = os::message_box("Unexpected Error", buf); + + if (yes) { + // yes, user asked VM to launch debugger + jio_snprintf(buf, sizeof(char)*buflen, "gdb /proc/%d/exe %d", + os::current_process_id(), os::current_process_id()); + + os::fork_and_exec(buf); + yes = false; + } + return yes; +} + + +// Java/Compiler thread: +// +// Low memory addresses +// P0 +------------------------+ +// | |\ Java thread created by VM does not have glibc +// | glibc guard page | - guard page, attached Java thread usually has +// | |/ 1 glibc guard page. +// P1 +------------------------+ Thread::stack_base() - Thread::stack_size() +// | |\ +// | HotSpot Guard Pages | - red, yellow and reserved pages +// | |/ +// +------------------------+ StackOverflow::stack_reserved_zone_base() +// | |\ +// | Normal Stack | - +// | |/ +// P2 +------------------------+ Thread::stack_base() +// +// Non-Java thread: +// +// Low memory addresses +// P0 +------------------------+ +// | |\ +// | glibc guard page | - usually 1 page +// | |/ +// P1 +------------------------+ Thread::stack_base() - Thread::stack_size() +// | |\ +// | Normal Stack | - +// | |/ +// P2 +------------------------+ Thread::stack_base() +// +// ** P1 (aka bottom) and size are the address and stack size +// returned from pthread_attr_getstack(). +// ** P2 (aka stack top or base) = P1 + size +// ** If adjustStackSizeForGuardPages() is true the guard pages have been taken +// out of the stack size given in pthread_attr. We work around this for +// threads created by the VM. We adjust bottom to be P1 and size accordingly. +// +#ifndef ZERO +void os::current_stack_base_and_size(address* base, size_t* size) { + address bottom; + if (os::is_primordial_thread()) { + // primordial thread needs special handling because pthread_getattr_np() + // may return bogus value. + bottom = os::Linux::initial_thread_stack_bottom(); + *size = os::Linux::initial_thread_stack_size(); + *base = bottom + *size; + } else { + pthread_attr_t attr; + + int rslt = pthread_getattr_np(pthread_self(), &attr); + + // JVM needs to know exact stack location, abort if it fails + if (rslt != 0) { + if (rslt == ENOMEM) { + vm_exit_out_of_memory(0, OOM_MMAP_ERROR, "pthread_getattr_np"); + } else { + fatal("pthread_getattr_np failed with error = %d", rslt); + } + } + + if (pthread_attr_getstack(&attr, (void **)&bottom, size) != 0) { + fatal("Cannot locate current stack attributes!"); + } + + *base = bottom + *size; + + if (os::Linux::adjustStackSizeForGuardPages()) { + size_t guard_size = 0; + rslt = pthread_attr_getguardsize(&attr, &guard_size); + if (rslt != 0) { + fatal("pthread_attr_getguardsize failed with error = %d", rslt); + } + bottom += guard_size; + *size -= guard_size; + } + + pthread_attr_destroy(&attr); + } + assert(os::current_stack_pointer() >= bottom && + os::current_stack_pointer() < *base, "just checking"); +} + +#endif + +static inline struct timespec get_mtime(const char* filename) { + struct stat st; + int ret = os::stat(filename, &st); + assert(ret == 0, "failed to stat() file '%s': %s", filename, os::strerror(errno)); + return st.st_mtim; +} + +int os::compare_file_modified_times(const char* file1, const char* file2) { + struct timespec filetime1 = get_mtime(file1); + struct timespec filetime2 = get_mtime(file2); + int diff = primitive_compare(filetime1.tv_sec, filetime2.tv_sec); + if (diff == 0) { + diff = primitive_compare(filetime1.tv_nsec, filetime2.tv_nsec); + } + return diff; +} + +bool os::supports_map_sync() { + return true; +} + +void os::print_memory_mappings(char* addr, size_t bytes, outputStream* st) { + // Note: all ranges are "[..)" + unsigned long long start = (unsigned long long)addr; + unsigned long long end = start + bytes; + FILE* f = os::fopen("/proc/self/maps", "r"); + int num_found = 0; + if (f != nullptr) { + st->print_cr("Range [%llx-%llx) contains: ", start, end); + char line[512]; + while(fgets(line, sizeof(line), f) == line) { + unsigned long long segment_start = 0; + unsigned long long segment_end = 0; + if (::sscanf(line, "%llx-%llx", &segment_start, &segment_end) == 2) { + // Lets print out every range which touches ours. + if (segment_start < end && segment_end > start) { + num_found ++; + st->print("%s", line); // line includes \n + } + } + } + ::fclose(f); + if (num_found == 0) { + st->print_cr("nothing."); + } + } +} + +#ifdef __GLIBC__ +void os::Linux::get_mallinfo(glibc_mallinfo* out, bool* might_have_wrapped) { + if (g_mallinfo2) { + new_mallinfo mi = g_mallinfo2(); + out->arena = mi.arena; + out->ordblks = mi.ordblks; + out->smblks = mi.smblks; + out->hblks = mi.hblks; + out->hblkhd = mi.hblkhd; + out->usmblks = mi.usmblks; + out->fsmblks = mi.fsmblks; + out->uordblks = mi.uordblks; + out->fordblks = mi.fordblks; + out->keepcost = mi.keepcost; + *might_have_wrapped = false; + } else if (g_mallinfo) { + old_mallinfo mi = g_mallinfo(); + // glibc reports unsigned 32-bit sizes in int form. First make unsigned, then extend. + out->arena = (size_t)(unsigned)mi.arena; + out->ordblks = (size_t)(unsigned)mi.ordblks; + out->smblks = (size_t)(unsigned)mi.smblks; + out->hblks = (size_t)(unsigned)mi.hblks; + out->hblkhd = (size_t)(unsigned)mi.hblkhd; + out->usmblks = (size_t)(unsigned)mi.usmblks; + out->fsmblks = (size_t)(unsigned)mi.fsmblks; + out->uordblks = (size_t)(unsigned)mi.uordblks; + out->fordblks = (size_t)(unsigned)mi.fordblks; + out->keepcost = (size_t)(unsigned)mi.keepcost; + *might_have_wrapped = NOT_LP64(false) LP64_ONLY(true); + } else { + // We should have either mallinfo or mallinfo2 + ShouldNotReachHere(); + } +} + +int os::Linux::malloc_info(FILE* stream) { + if (g_malloc_info == nullptr) { + return -2; + } + return g_malloc_info(0, stream); +} +#endif // __GLIBC__ + +bool os::trim_native_heap(os::size_change_t* rss_change) { +#ifdef __GLIBC__ + os::Linux::meminfo_t info1; + os::Linux::meminfo_t info2; + + bool have_info1 = rss_change != nullptr && + os::Linux::query_process_memory_info(&info1); + ::malloc_trim(0); + bool have_info2 = rss_change != nullptr && have_info1 && + os::Linux::query_process_memory_info(&info2); + ssize_t delta = (ssize_t) -1; + if (rss_change != nullptr) { + if (have_info1 && have_info2 && + info1.vmrss != -1 && info2.vmrss != -1 && + info1.vmswap != -1 && info2.vmswap != -1) { + // Note: query_process_memory_info returns values in K + rss_change->before = (info1.vmrss + info1.vmswap) * K; + rss_change->after = (info2.vmrss + info2.vmswap) * K; + } else { + rss_change->after = rss_change->before = SIZE_MAX; + } + } + + return true; +#else + return false; // musl +#endif +} + +bool os::pd_dll_unload(void* libhandle, char* ebuf, int ebuflen) { + + if (ebuf && ebuflen > 0) { + ebuf[0] = '\0'; + ebuf[ebuflen - 1] = '\0'; + } + + bool res = (0 == ::dlclose(libhandle)); + if (!res) { + // error analysis when dlopen fails + const char* error_report = ::dlerror(); + if (error_report == nullptr) { + error_report = "dlerror returned no error description"; + } + if (ebuf != nullptr && ebuflen > 0) { + snprintf(ebuf, ebuflen - 1, "%s", error_report); + } + } + + return res; +} // end: os::pd_dll_unload() + +#endif // !NATIVE_IMAGE diff --git a/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/os/linux/os_linux.hpp b/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/os/linux/os_linux.hpp new file mode 100644 index 000000000000..038141b4a3f2 --- /dev/null +++ b/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/os/linux/os_linux.hpp @@ -0,0 +1,443 @@ +/* + * Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef OS_LINUX_OS_LINUX_HPP +#define OS_LINUX_OS_LINUX_HPP + +#include "runtime/os.hpp" + +// os::Linux defines the interface to Linux operating systems + +class os::Linux { + friend class CgroupSubsystem; + friend class os; + friend class OSContainer; + +#ifndef NATIVE_IMAGE + static int (*_pthread_getcpuclockid)(pthread_t, clockid_t *); + static int (*_pthread_setname_np)(pthread_t, const char*); + + static address _initial_thread_stack_bottom; + static uintptr_t _initial_thread_stack_size; + + static const char *_libc_version; + static const char *_libpthread_version; + + static bool _supports_fast_thread_cpu_time; + + static GrowableArray* _cpu_to_node; + static GrowableArray* _nindex_to_node; + + static julong available_memory_in_container(); +#endif // !NATIVE_IMAGE + + protected: + + static julong _physical_memory; +#ifndef NATIVE_IMAGE + static pthread_t _main_thread; + + static julong available_memory(); + static julong free_memory(); +#endif // !NATIVE_IMAGE + + static int active_processor_count(); + +#ifdef NATIVE_IMAGE + public: +#endif // NATIVE_IMAGE + static void initialize_system_info(); + +#ifndef NATIVE_IMAGE + static int commit_memory_impl(char* addr, size_t bytes, bool exec); + static int commit_memory_impl(char* addr, size_t bytes, + size_t alignment_hint, bool exec); + + static void set_libc_version(const char *s) { _libc_version = s; } + static void set_libpthread_version(const char *s) { _libpthread_version = s; } + + static void rebuild_cpu_to_node_map(); + static void rebuild_nindex_to_node_map(); + static GrowableArray* cpu_to_node() { return _cpu_to_node; } + static GrowableArray* nindex_to_node() { return _nindex_to_node; } + + static void print_process_memory_info(outputStream* st); + static void print_system_memory_info(outputStream* st); + static bool print_container_info(outputStream* st); + static void print_steal_info(outputStream* st); + static void print_distro_info(outputStream* st); + static void print_libversion_info(outputStream* st); + static void print_proc_sys_info(outputStream* st); + static bool print_ld_preload_file(outputStream* st); + static void print_uptime_info(outputStream* st); + + public: + struct CPUPerfTicks { + uint64_t used; + uint64_t usedKernel; + uint64_t total; + uint64_t steal; + bool has_steal_ticks; + }; + + // which_logical_cpu=-1 returns accumulated ticks for all cpus. + static bool get_tick_information(CPUPerfTicks* pticks, int which_logical_cpu); + static bool _stack_is_executable; + static void *dlopen_helper(const char *name, char *ebuf, int ebuflen); + static void *dll_load_in_vmthread(const char *name, char *ebuf, int ebuflen); + static const char *dll_path(void* lib); + + static void init_thread_fpu_state(); + static int get_fpu_control_word(); + static void set_fpu_control_word(int fpu_control); + static pthread_t main_thread(void) { return _main_thread; } + // returns kernel thread id (similar to LWP id on Solaris), which can be + // used to access /proc + static pid_t gettid(); + + static address initial_thread_stack_bottom(void) { return _initial_thread_stack_bottom; } + static uintptr_t initial_thread_stack_size(void) { return _initial_thread_stack_size; } +#endif // !NATIVE_IMAGE + + static julong physical_memory() { return _physical_memory; } + static julong host_swap(); + +#ifndef NATIVE_IMAGE + static intptr_t* ucontext_get_sp(const ucontext_t* uc); + static intptr_t* ucontext_get_fp(const ucontext_t* uc); + + // GNU libc and libpthread version strings + static const char *libc_version() { return _libc_version; } + static const char *libpthread_version() { return _libpthread_version; } + + static void libpthread_init(); + static void sched_getcpu_init(); + static bool libnuma_init(); + static void* libnuma_dlsym(void* handle, const char* name); + // libnuma v2 (libnuma_1.2) symbols + static void* libnuma_v2_dlsym(void* handle, const char* name); + + // Return default guard size for the specified thread type + static size_t default_guard_size(os::ThreadType thr_type); + + static bool adjustStackSizeForGuardPages(); // See comments in os_linux.cpp + + static void capture_initial_stack(size_t max_size); + + // Stack overflow handling + static bool manually_expand_stack(JavaThread * t, address addr); + static void expand_stack_to(address bottom); + + // fast POSIX clocks support + static void fast_thread_clock_init(void); + + static int pthread_getcpuclockid(pthread_t tid, clockid_t *clock_id) { + return _pthread_getcpuclockid ? _pthread_getcpuclockid(tid, clock_id) : -1; + } + + static bool supports_fast_thread_cpu_time() { + return _supports_fast_thread_cpu_time; + } + + static jlong fast_thread_cpu_time(clockid_t clockid); + + static jlong sendfile(int out_fd, int in_fd, jlong* offset, jlong count); + + // Determine if the vmid is the parent pid for a child in a PID namespace. + // Return the namespace pid if so, otherwise -1. + static int get_namespace_pid(int vmid); + + // Output structure for query_process_memory_info() (all values in KB) + struct meminfo_t { + ssize_t vmsize; // current virtual size + ssize_t vmpeak; // peak virtual size + ssize_t vmrss; // current resident set size + ssize_t vmhwm; // peak resident set size + ssize_t vmswap; // swapped out + ssize_t rssanon; // resident set size (anonymous mappings, needs 4.5) + ssize_t rssfile; // resident set size (file mappings, needs 4.5) + ssize_t rssshmem; // resident set size (shared mappings, needs 4.5) + }; + + // Attempts to query memory information about the current process and return it in the output structure. + // May fail (returns false) or succeed (returns true) but not all output fields are available; unavailable + // fields will contain -1. + static bool query_process_memory_info(meminfo_t* info); + + // Tells if the user asked for transparent huge pages. + static bool _thp_requested; + + static void large_page_init(); + + static bool thp_requested(); + static bool should_madvise_anonymous_thps(); + static bool should_madvise_shmem_thps(); + + static void madvise_transparent_huge_pages(void* addr, size_t bytes); + + // Stack repair handling + + // none present + + private: + static void numa_init(); + + typedef int (*sched_getcpu_func_t)(void); + typedef int (*numa_node_to_cpus_func_t)(int node, unsigned long *buffer, int bufferlen); + typedef int (*numa_node_to_cpus_v2_func_t)(int node, void *mask); + typedef int (*numa_max_node_func_t)(void); + typedef int (*numa_num_configured_nodes_func_t)(void); + typedef int (*numa_available_func_t)(void); + typedef int (*numa_tonode_memory_func_t)(void *start, size_t size, int node); + typedef void (*numa_interleave_memory_func_t)(void *start, size_t size, unsigned long *nodemask); + typedef void (*numa_interleave_memory_v2_func_t)(void *start, size_t size, struct bitmask* mask); + typedef struct bitmask* (*numa_get_membind_func_t)(void); + typedef struct bitmask* (*numa_get_interleave_mask_func_t)(void); + typedef long (*numa_move_pages_func_t)(int pid, unsigned long count, void **pages, const int *nodes, int *status, int flags); + typedef void (*numa_set_preferred_func_t)(int node); + typedef void (*numa_set_bind_policy_func_t)(int policy); + typedef int (*numa_bitmask_isbitset_func_t)(struct bitmask *bmp, unsigned int n); + typedef int (*numa_distance_func_t)(int node1, int node2); + + static sched_getcpu_func_t _sched_getcpu; + static numa_node_to_cpus_func_t _numa_node_to_cpus; + static numa_node_to_cpus_v2_func_t _numa_node_to_cpus_v2; + static numa_max_node_func_t _numa_max_node; + static numa_num_configured_nodes_func_t _numa_num_configured_nodes; + static numa_available_func_t _numa_available; + static numa_tonode_memory_func_t _numa_tonode_memory; + static numa_interleave_memory_func_t _numa_interleave_memory; + static numa_interleave_memory_v2_func_t _numa_interleave_memory_v2; + static numa_set_bind_policy_func_t _numa_set_bind_policy; + static numa_bitmask_isbitset_func_t _numa_bitmask_isbitset; + static numa_distance_func_t _numa_distance; + static numa_get_membind_func_t _numa_get_membind; + static numa_get_interleave_mask_func_t _numa_get_interleave_mask; + static numa_move_pages_func_t _numa_move_pages; + static numa_set_preferred_func_t _numa_set_preferred; + static unsigned long* _numa_all_nodes; + static struct bitmask* _numa_all_nodes_ptr; + static struct bitmask* _numa_nodes_ptr; + static struct bitmask* _numa_interleave_bitmask; + static struct bitmask* _numa_membind_bitmask; + + static void set_sched_getcpu(sched_getcpu_func_t func) { _sched_getcpu = func; } + static void set_numa_node_to_cpus(numa_node_to_cpus_func_t func) { _numa_node_to_cpus = func; } + static void set_numa_node_to_cpus_v2(numa_node_to_cpus_v2_func_t func) { _numa_node_to_cpus_v2 = func; } + static void set_numa_max_node(numa_max_node_func_t func) { _numa_max_node = func; } + static void set_numa_num_configured_nodes(numa_num_configured_nodes_func_t func) { _numa_num_configured_nodes = func; } + static void set_numa_available(numa_available_func_t func) { _numa_available = func; } + static void set_numa_tonode_memory(numa_tonode_memory_func_t func) { _numa_tonode_memory = func; } + static void set_numa_interleave_memory(numa_interleave_memory_func_t func) { _numa_interleave_memory = func; } + static void set_numa_interleave_memory_v2(numa_interleave_memory_v2_func_t func) { _numa_interleave_memory_v2 = func; } + static void set_numa_set_bind_policy(numa_set_bind_policy_func_t func) { _numa_set_bind_policy = func; } + static void set_numa_bitmask_isbitset(numa_bitmask_isbitset_func_t func) { _numa_bitmask_isbitset = func; } + static void set_numa_distance(numa_distance_func_t func) { _numa_distance = func; } + static void set_numa_get_membind(numa_get_membind_func_t func) { _numa_get_membind = func; } + static void set_numa_get_interleave_mask(numa_get_interleave_mask_func_t func) { _numa_get_interleave_mask = func; } + static void set_numa_move_pages(numa_move_pages_func_t func) { _numa_move_pages = func; } + static void set_numa_set_preferred(numa_set_preferred_func_t func) { _numa_set_preferred = func; } + static void set_numa_all_nodes(unsigned long* ptr) { _numa_all_nodes = ptr; } + static void set_numa_all_nodes_ptr(struct bitmask **ptr) { _numa_all_nodes_ptr = (ptr == nullptr ? nullptr : *ptr); } + static void set_numa_nodes_ptr(struct bitmask **ptr) { _numa_nodes_ptr = (ptr == nullptr ? nullptr : *ptr); } + static void set_numa_interleave_bitmask(struct bitmask* ptr) { _numa_interleave_bitmask = ptr ; } + static void set_numa_membind_bitmask(struct bitmask* ptr) { _numa_membind_bitmask = ptr ; } + static int sched_getcpu_syscall(void); + + enum NumaAllocationPolicy{ + NotInitialized, + Membind, + Interleave + }; + static NumaAllocationPolicy _current_numa_policy; + + public: + static int sched_getcpu() { return _sched_getcpu != nullptr ? _sched_getcpu() : -1; } + static int numa_node_to_cpus(int node, unsigned long *buffer, int bufferlen); + static int numa_max_node() { return _numa_max_node != nullptr ? _numa_max_node() : -1; } + static int numa_num_configured_nodes() { + return _numa_num_configured_nodes != nullptr ? _numa_num_configured_nodes() : -1; + } + static int numa_available() { return _numa_available != nullptr ? _numa_available() : -1; } + static int numa_tonode_memory(void *start, size_t size, int node) { + return _numa_tonode_memory != nullptr ? _numa_tonode_memory(start, size, node) : -1; + } + + static bool is_running_in_interleave_mode() { + return _current_numa_policy == Interleave; + } + + static void set_configured_numa_policy(NumaAllocationPolicy numa_policy) { + _current_numa_policy = numa_policy; + } + + static NumaAllocationPolicy identify_numa_policy() { + for (int node = 0; node <= Linux::numa_max_node(); node++) { + if (Linux::_numa_bitmask_isbitset(Linux::_numa_interleave_bitmask, node)) { + return Interleave; + } + } + return Membind; + } + + static void numa_interleave_memory(void *start, size_t size) { + // Prefer v2 API + if (_numa_interleave_memory_v2 != nullptr) { + if (is_running_in_interleave_mode()) { + _numa_interleave_memory_v2(start, size, _numa_interleave_bitmask); + } else if (_numa_membind_bitmask != nullptr) { + _numa_interleave_memory_v2(start, size, _numa_membind_bitmask); + } + } else if (_numa_interleave_memory != nullptr) { + _numa_interleave_memory(start, size, _numa_all_nodes); + } + } + static void numa_set_preferred(int node) { + if (_numa_set_preferred != nullptr) { + _numa_set_preferred(node); + } + } + static void numa_set_bind_policy(int policy) { + if (_numa_set_bind_policy != nullptr) { + _numa_set_bind_policy(policy); + } + } + static int numa_distance(int node1, int node2) { + return _numa_distance != nullptr ? _numa_distance(node1, node2) : -1; + } + static long numa_move_pages(int pid, unsigned long count, void **pages, const int *nodes, int *status, int flags) { + return _numa_move_pages != nullptr ? _numa_move_pages(pid, count, pages, nodes, status, flags) : -1; + } + static int get_node_by_cpu(int cpu_id); + static int get_existing_num_nodes(); + // Check if numa node is configured (non-zero memory node). + static bool is_node_in_configured_nodes(unsigned int n) { + if (_numa_bitmask_isbitset != nullptr && _numa_all_nodes_ptr != nullptr) { + return _numa_bitmask_isbitset(_numa_all_nodes_ptr, n); + } else + return false; + } + // Check if numa node exists in the system (including zero memory nodes). + static bool is_node_in_existing_nodes(unsigned int n) { + if (_numa_bitmask_isbitset != nullptr && _numa_nodes_ptr != nullptr) { + return _numa_bitmask_isbitset(_numa_nodes_ptr, n); + } else if (_numa_bitmask_isbitset != nullptr && _numa_all_nodes_ptr != nullptr) { + // Not all libnuma API v2 implement numa_nodes_ptr, so it's not possible + // to trust the API version for checking its absence. On the other hand, + // numa_nodes_ptr found in libnuma 2.0.9 and above is the only way to get + // a complete view of all numa nodes in the system, hence numa_nodes_ptr + // is used to handle CPU and nodes on architectures (like PowerPC) where + // there can exist nodes with CPUs but no memory or vice-versa and the + // nodes may be non-contiguous. For most of the architectures, like + // x86_64, numa_node_ptr presents the same node set as found in + // numa_all_nodes_ptr so it's possible to use numa_all_nodes_ptr as a + // substitute. + return _numa_bitmask_isbitset(_numa_all_nodes_ptr, n); + } else + return false; + } + // Check if node is in bound node set. + static bool is_node_in_bound_nodes(int node) { + if (_numa_bitmask_isbitset != nullptr) { + if (is_running_in_interleave_mode()) { + return _numa_bitmask_isbitset(_numa_interleave_bitmask, node); + } else { + return _numa_membind_bitmask != nullptr ? _numa_bitmask_isbitset(_numa_membind_bitmask, node) : false; + } + } + return false; + } + // Check if bound to only one numa node. + // Returns true if bound to a single numa node, otherwise returns false. + static bool is_bound_to_single_node() { + int nodes = 0; + unsigned int node = 0; + unsigned int highest_node_number = 0; + + if (_numa_membind_bitmask != nullptr && _numa_max_node != nullptr && _numa_bitmask_isbitset != nullptr) { + highest_node_number = _numa_max_node(); + } else { + return false; + } + + for (node = 0; node <= highest_node_number; node++) { + if (_numa_bitmask_isbitset(_numa_membind_bitmask, node)) { + nodes++; + } + } + + if (nodes == 1) { + return true; + } else { + return false; + } + } + + static const GrowableArray* numa_nindex_to_node() { + return _nindex_to_node; + } + + static void* resolve_function_descriptor(void* p); + +#ifdef __GLIBC__ + // os::Linux::get_mallinfo() hides the complexity of dealing with mallinfo() or + // mallinfo2() from the user. Use this function instead of raw mallinfo/mallinfo2() + // to keep the JVM runtime-compatible with different glibc versions. + // + // mallinfo2() was added with glibc (>2.32). Legacy mallinfo() was deprecated with + // 2.33 and may vanish in future glibcs. So we may have both or either one of + // them. + // + // mallinfo2() is functionally equivalent to legacy mallinfo but returns sizes as + // 64-bit on 64-bit platforms. Legacy mallinfo uses 32-bit fields. However, legacy + // mallinfo is still perfectly fine to use if we know the sizes cannot have wrapped. + // For example, if the process virtual size does not exceed 4G, we cannot have + // malloc'ed more than 4G, so the results from legacy mallinfo() can still be used. + // + // os::Linux::get_mallinfo() will always prefer mallinfo2() if found, but will fall back + // to legacy mallinfo() if only that is available. In that case, it will return true + // in *might_have_wrapped. + struct glibc_mallinfo { + size_t arena; + size_t ordblks; + size_t smblks; + size_t hblks; + size_t hblkhd; + size_t usmblks; + size_t fsmblks; + size_t uordblks; + size_t fordblks; + size_t keepcost; + }; + static void get_mallinfo(glibc_mallinfo* out, bool* might_have_wrapped); + + // Calls out to GNU extension malloc_info if available + // otherwise does nothing and returns -2. + static int malloc_info(FILE* stream); +#endif // GLIBC +#endif // !NATIVE_IMAGE +}; + +#endif // OS_LINUX_OS_LINUX_HPP diff --git a/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/os/linux/os_linux.inline.hpp b/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/os/linux/os_linux.inline.hpp new file mode 100644 index 000000000000..a31cfbf30cd4 --- /dev/null +++ b/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/os/linux/os_linux.inline.hpp @@ -0,0 +1,62 @@ +/* + * Copyright (c) 1999, 2022, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef OS_LINUX_OS_LINUX_INLINE_HPP +#define OS_LINUX_OS_LINUX_INLINE_HPP + +#include "os_linux.hpp" + +#include "runtime/os.hpp" +#include "os_posix.inline.hpp" + +#ifndef NATIVE_IMAGE +inline bool os::zero_page_read_protected() { + return true; +} + +inline bool os::uses_stack_guard_pages() { + return true; +} + +inline bool os::must_commit_stack_guard_pages() { + assert(uses_stack_guard_pages(), "sanity check"); + return true; +} + +// Bang the shadow pages if they need to be touched to be mapped. +inline void os::map_stack_shadow_pages(address sp) { +} + +// Trim-native support +inline bool os::can_trim_native_heap() { +#ifdef __GLIBC__ + return true; +#else + return false; // musl +#endif +} +#endif // !NATIVE_IMAGE + +#endif // OS_LINUX_OS_LINUX_INLINE_HPP diff --git a/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/os/posix/include/jvm_md.h b/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/os/posix/include/jvm_md.h new file mode 100644 index 000000000000..2c8ebe06d706 --- /dev/null +++ b/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/os/posix/include/jvm_md.h @@ -0,0 +1,94 @@ +/* + * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef _JAVASOFT_JVM_MD_H_ +#define _JAVASOFT_JVM_MD_H_ + +/* + * This file is currently collecting system-specific dregs for the + * JNI conversion, which should be sorted out later. + */ + +#include /* For DIR */ +#include /* For MAXPATHLEN */ +#include /* For F_OK, R_OK, W_OK */ +#include /* For ptrdiff_t */ +#include /* For uintptr_t */ + +#define JNI_ONLOAD_SYMBOLS {"JNI_OnLoad"} +#define JNI_ONUNLOAD_SYMBOLS {"JNI_OnUnload"} +#define JVM_ONLOAD_SYMBOLS {"JVM_OnLoad"} +#define AGENT_ONLOAD_SYMBOLS {"Agent_OnLoad"} +#define AGENT_ONUNLOAD_SYMBOLS {"Agent_OnUnload"} +#define AGENT_ONATTACH_SYMBOLS {"Agent_OnAttach"} + +#define JNI_LIB_PREFIX "lib" +#ifdef __APPLE__ +#define JNI_LIB_SUFFIX ".dylib" +#define VERSIONED_JNI_LIB_NAME(NAME, VERSION) JNI_LIB_PREFIX NAME "." VERSION JNI_LIB_SUFFIX +#else +#define JNI_LIB_SUFFIX ".so" +#define VERSIONED_JNI_LIB_NAME(NAME, VERSION) JNI_LIB_PREFIX NAME JNI_LIB_SUFFIX "." VERSION +#endif +#define JNI_LIB_NAME(NAME) JNI_LIB_PREFIX NAME JNI_LIB_SUFFIX + +#if defined(AIX) +#define JVM_MAXPATHLEN MAXPATHLEN +#else +// Hack: MAXPATHLEN is 4095 on some Linux and 4096 on others. This may +// cause problems if JVM and the rest of JDK are built on different +// Linux releases. Here we define JVM_MAXPATHLEN to be MAXPATHLEN + 1, +// so buffers declared in VM are always >= 4096. +#define JVM_MAXPATHLEN MAXPATHLEN + 1 +#endif + +#define JVM_R_OK R_OK +#define JVM_W_OK W_OK +#define JVM_X_OK X_OK +#define JVM_F_OK F_OK + +/* + * File I/O + */ + +#include +#include +#include +#include +#include + +/* Signals */ + +#include // for socklen_t + +#define JVM_SIGINT SIGINT +#define JVM_SIGTERM SIGTERM + +#define BREAK_SIGNAL SIGQUIT /* Thread dumping support. */ +#define SHUTDOWN1_SIGNAL SIGHUP /* Shutdown Hooks support. */ +#define SHUTDOWN2_SIGNAL SIGINT +#define SHUTDOWN3_SIGNAL SIGTERM + +#endif /* !_JAVASOFT_JVM_MD_H_ */ diff --git a/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/os/posix/os_posix.cpp b/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/os/posix/os_posix.cpp new file mode 100644 index 000000000000..dcc353604f1b --- /dev/null +++ b/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/os/posix/os_posix.cpp @@ -0,0 +1,2042 @@ +/* + * Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef NATIVE_IMAGE +#include "classfile/classLoader.hpp" +#include "jvm.h" +#include "jvmtifiles/jvmti.h" +#include "logging/log.hpp" +#include "memory/allocation.inline.hpp" +#include "nmt/memTracker.hpp" +#endif // !NATIVE_IMAGE +#include "os_posix.inline.hpp" +#ifndef NATIVE_IMAGE +#include "runtime/arguments.hpp" +#include "runtime/atomic.hpp" +#include "runtime/frame.inline.hpp" +#include "runtime/globals_extension.hpp" +#include "runtime/interfaceSupport.inline.hpp" +#include "runtime/java.hpp" +#include "runtime/orderAccess.hpp" +#include "runtime/osThread.hpp" +#include "runtime/park.hpp" +#include "runtime/perfMemory.hpp" +#include "runtime/sharedRuntime.hpp" +#include "services/attachListener.hpp" +#include "utilities/align.hpp" +#endif // !NATIVE_IMAGE +#include "utilities/checkedCast.hpp" +#include "utilities/debug.hpp" +#ifndef NATIVE_IMAGE +#include "utilities/defaultStream.hpp" +#include "utilities/events.hpp" +#include "utilities/formatBuffer.hpp" +#include "utilities/globalDefinitions.hpp" +#include "utilities/macros.hpp" +#include "utilities/vmError.hpp" +#if INCLUDE_JFR +#include "jfr/support/jfrNativeLibraryLoadEvent.hpp" +#endif + +#ifdef AIX +#include "loadlib_aix.hpp" +#include "os_aix.hpp" +#endif +#ifdef LINUX +#include "os_linux.hpp" +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef __APPLE__ + #include +#endif + +#define ROOT_UID 0 + +#ifndef MAP_ANONYMOUS + #define MAP_ANONYMOUS MAP_ANON +#endif + +static jlong initial_time_count = 0; + +static int clock_tics_per_sec = 100; + +// Platform minimum stack allowed +size_t os::_os_min_stack_allowed = PTHREAD_STACK_MIN; + +// Check core dump limit and report possible place where core can be found +void os::check_dump_limit(char* buffer, size_t bufferSize) { + if (!FLAG_IS_DEFAULT(CreateCoredumpOnCrash) && !CreateCoredumpOnCrash) { + jio_snprintf(buffer, bufferSize, "CreateCoredumpOnCrash is disabled from command line"); + VMError::record_coredump_status(buffer, false); + return; + } + + int n; + struct rlimit rlim; + bool success; + + char core_path[PATH_MAX]; + n = get_core_path(core_path, PATH_MAX); + + if (n <= 0) { + jio_snprintf(buffer, bufferSize, "core.%d (may not exist)", current_process_id()); + success = true; +#ifdef LINUX + } else if (core_path[0] == '"') { // redirect to user process + jio_snprintf(buffer, bufferSize, "Core dumps may be processed with %s", core_path); + success = true; +#endif + } else if (getrlimit(RLIMIT_CORE, &rlim) != 0) { + jio_snprintf(buffer, bufferSize, "%s (may not exist)", core_path); + success = true; + } else { + switch(rlim.rlim_cur) { + case RLIM_INFINITY: + jio_snprintf(buffer, bufferSize, "%s", core_path); + success = true; + break; + case 0: + jio_snprintf(buffer, bufferSize, "Core dumps have been disabled. To enable core dumping, try \"ulimit -c unlimited\" before starting Java again"); + success = false; + break; + default: + jio_snprintf(buffer, bufferSize, "%s (max size " UINT64_FORMAT " k). To ensure a full core dump, try \"ulimit -c unlimited\" before starting Java again", core_path, uint64_t(rlim.rlim_cur) / K); + success = true; + break; + } + } + + VMError::record_coredump_status(buffer, success); +} + +int os::get_native_stack(address* stack, int frames, int toSkip) { + int frame_idx = 0; + int num_of_frames; // number of frames captured + frame fr = os::current_frame(); + while (fr.pc() && frame_idx < frames) { + if (toSkip > 0) { + toSkip --; + } else { + stack[frame_idx ++] = fr.pc(); + } + if (fr.fp() == nullptr || fr.cb() != nullptr || + fr.sender_pc() == nullptr || os::is_first_C_frame(&fr)) { + break; + } + fr = os::get_sender_for_C_frame(&fr); + } + num_of_frames = frame_idx; + for (; frame_idx < frames; frame_idx ++) { + stack[frame_idx] = nullptr; + } + + return num_of_frames; +} + +int os::get_last_error() { + return errno; +} + +size_t os::lasterror(char *buf, size_t len) { + if (errno == 0) return 0; + + const char *s = os::strerror(errno); + size_t n = ::strlen(s); + if (n >= len) { + n = len - 1; + } + ::strncpy(buf, s, n); + buf[n] = '\0'; + return n; +} + +// Return true if user is running as root. +bool os::have_special_privileges() { + static bool privileges = (getuid() != geteuid()) || (getgid() != getegid()); + return privileges; +} + +void os::wait_for_keypress_at_exit(void) { + // don't do anything on posix platforms + return; +} + +int os::create_file_for_heap(const char* dir) { + int fd; + +#if defined(LINUX) && defined(O_TMPFILE) + char* native_dir = os::strdup(dir); + if (native_dir == nullptr) { + vm_exit_during_initialization(err_msg("strdup failed during creation of backing file for heap (%s)", os::strerror(errno))); + return -1; + } + os::native_path(native_dir); + fd = os::open(dir, O_TMPFILE | O_RDWR, S_IRUSR | S_IWUSR); + os::free(native_dir); + + if (fd == -1) +#endif + { + const char name_template[] = "/jvmheap.XXXXXX"; + + size_t fullname_len = strlen(dir) + strlen(name_template); + char *fullname = (char*)os::malloc(fullname_len + 1, mtInternal); + if (fullname == nullptr) { + vm_exit_during_initialization(err_msg("Malloc failed during creation of backing file for heap (%s)", os::strerror(errno))); + return -1; + } + int n = snprintf(fullname, fullname_len + 1, "%s%s", dir, name_template); + assert((size_t)n == fullname_len, "Unexpected number of characters in string"); + + os::native_path(fullname); + + // create a new file. + fd = mkstemp(fullname); + + if (fd < 0) { + warning("Could not create file for heap with template %s", fullname); + os::free(fullname); + return -1; + } else { + // delete the name from the filesystem. When 'fd' is closed, the file (and space) will be deleted. + int ret = unlink(fullname); + assert_with_errno(ret == 0, "unlink returned error"); + } + + os::free(fullname); + } + + return fd; +} + +// Is a (classpath) directory empty? +bool os::dir_is_empty(const char* path) { + DIR *dir = nullptr; + struct dirent *ptr; + + dir = ::opendir(path); + if (dir == nullptr) return true; + + // Scan the directory + bool result = true; + while (result && (ptr = ::readdir(dir)) != nullptr) { + if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) { + result = false; + } + } + ::closedir(dir); + return result; +} + +static char* reserve_mmapped_memory(size_t bytes, char* requested_addr) { + char * addr; + int flags = MAP_PRIVATE NOT_AIX( | MAP_NORESERVE ) | MAP_ANONYMOUS; + if (requested_addr != nullptr) { + assert((uintptr_t)requested_addr % os::vm_page_size() == 0, "Requested address should be aligned to OS page size"); + flags |= MAP_FIXED; + } + + // Map reserved/uncommitted pages PROT_NONE so we fail early if we + // touch an uncommitted page. Otherwise, the read/write might + // succeed if we have enough swap space to back the physical page. + addr = (char*)::mmap(requested_addr, bytes, PROT_NONE, + flags, -1, 0); + + if (addr != MAP_FAILED) { + MemTracker::record_virtual_memory_reserve((address)addr, bytes, CALLER_PC); + return addr; + } + return nullptr; +} + +static int util_posix_fallocate(int fd, off_t offset, off_t len) { +#ifdef __APPLE__ + fstore_t store = { F_ALLOCATECONTIG, F_PEOFPOSMODE, 0, len }; + // First we try to get a continuous chunk of disk space + int ret = fcntl(fd, F_PREALLOCATE, &store); + if (ret == -1) { + // Maybe we are too fragmented, try to allocate non-continuous range + store.fst_flags = F_ALLOCATEALL; + ret = fcntl(fd, F_PREALLOCATE, &store); + } + if(ret != -1) { + return ftruncate(fd, len); + } + return -1; +#else + return posix_fallocate(fd, offset, len); +#endif +} + +// Map the given address range to the provided file descriptor. +char* os::map_memory_to_file(char* base, size_t size, int fd) { + assert(fd != -1, "File descriptor is not valid"); + + // allocate space for the file + int ret = util_posix_fallocate(fd, 0, (off_t)size); + if (ret != 0) { + vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory. error(%d)", ret)); + return nullptr; + } + + int prot = PROT_READ | PROT_WRITE; + int flags = MAP_SHARED; + if (base != nullptr) { + flags |= MAP_FIXED; + } + char* addr = (char*)mmap(base, size, prot, flags, fd, 0); + + if (addr == MAP_FAILED) { + warning("Failed mmap to file. (%s)", os::strerror(errno)); + return nullptr; + } + if (base != nullptr && addr != base) { + if (!os::release_memory(addr, size)) { + warning("Could not release memory on unsuccessful file mapping"); + } + return nullptr; + } + return addr; +} + +char* os::replace_existing_mapping_with_file_mapping(char* base, size_t size, int fd) { + assert(fd != -1, "File descriptor is not valid"); + assert(base != nullptr, "Base cannot be null"); + + return map_memory_to_file(base, size, fd); +} + +static size_t calculate_aligned_extra_size(size_t size, size_t alignment) { + assert(is_aligned(alignment, os::vm_allocation_granularity()), + "Alignment must be a multiple of allocation granularity (page size)"); + assert(is_aligned(size, os::vm_allocation_granularity()), + "Size must be a multiple of allocation granularity (page size)"); + + size_t extra_size = size + alignment; + assert(extra_size >= size, "overflow, size is too large to allow alignment"); + return extra_size; +} + +// After a bigger chunk was mapped, unmaps start and end parts to get the requested alignment. +static char* chop_extra_memory(size_t size, size_t alignment, char* extra_base, size_t extra_size) { + // Do manual alignment + char* aligned_base = align_up(extra_base, alignment); + + // [ | | ] + // ^ extra_base + // ^ extra_base + begin_offset == aligned_base + // extra_base + begin_offset + size ^ + // extra_base + extra_size ^ + // |<>| == begin_offset + // end_offset == |<>| + size_t begin_offset = aligned_base - extra_base; + size_t end_offset = (extra_base + extra_size) - (aligned_base + size); + + if (begin_offset > 0) { + os::release_memory(extra_base, begin_offset); + } + + if (end_offset > 0) { + os::release_memory(extra_base + begin_offset + size, end_offset); + } + + return aligned_base; +} + +// Multiple threads can race in this code, and can remap over each other with MAP_FIXED, +// so on posix, unmap the section at the start and at the end of the chunk that we mapped +// rather than unmapping and remapping the whole chunk to get requested alignment. +char* os::reserve_memory_aligned(size_t size, size_t alignment, bool exec) { + size_t extra_size = calculate_aligned_extra_size(size, alignment); + char* extra_base = os::reserve_memory(extra_size, exec); + if (extra_base == nullptr) { + return nullptr; + } + return chop_extra_memory(size, alignment, extra_base, extra_size); +} + +char* os::map_memory_to_file_aligned(size_t size, size_t alignment, int file_desc) { + size_t extra_size = calculate_aligned_extra_size(size, alignment); + // For file mapping, we do not call os:map_memory_to_file(size,fd) since: + // - we later chop away parts of the mapping using os::release_memory and that could fail if the + // original mmap call had been tied to an fd. + // - The memory API os::reserve_memory uses is an implementation detail. It may (and usually is) + // mmap but it also may System V shared memory which cannot be uncommitted as a whole, so + // chopping off and unmapping excess bits back and front (see below) would not work. + char* extra_base = reserve_mmapped_memory(extra_size, nullptr); + if (extra_base == nullptr) { + return nullptr; + } + char* aligned_base = chop_extra_memory(size, alignment, extra_base, extra_size); + // After we have an aligned address, we can replace anonymous mapping with file mapping + if (replace_existing_mapping_with_file_mapping(aligned_base, size, file_desc) == nullptr) { + vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory")); + } + MemTracker::record_virtual_memory_commit((address)aligned_base, size, CALLER_PC); + return aligned_base; +} +#endif // !NATIVE_IMAGE + +int os::vsnprintf(char* buf, size_t len, const char* fmt, va_list args) { + // All supported POSIX platforms provide C99 semantics. + ALLOW_C_FUNCTION(::vsnprintf, int result = ::vsnprintf(buf, len, fmt, args);) + // If an encoding error occurred (result < 0) then it's not clear + // whether the buffer is NUL terminated, so ensure it is. + if ((result < 0) && (len > 0)) { + buf[len - 1] = '\0'; + } + return result; +} + +#ifndef NATIVE_IMAGE +int os::get_fileno(FILE* fp) { + return NOT_AIX(::)fileno(fp); +} + +struct tm* os::gmtime_pd(const time_t* clock, struct tm* res) { + return gmtime_r(clock, res); +} + +void os::Posix::print_load_average(outputStream* st) { + st->print("load average: "); + double loadavg[3]; + int res = os::loadavg(loadavg, 3); + if (res != -1) { + st->print("%0.02f %0.02f %0.02f", loadavg[0], loadavg[1], loadavg[2]); + } else { + st->print(" Unavailable"); + } + st->cr(); +} + +// boot/uptime information; +// unfortunately it does not work on macOS and Linux because the utx chain has no entry +// for reboot at least on my test machines +void os::Posix::print_uptime_info(outputStream* st) { + int bootsec = -1; + time_t currsec = time(nullptr); + struct utmpx* ent; + setutxent(); + while ((ent = getutxent())) { + if (!strcmp("system boot", ent->ut_line)) { + bootsec = ent->ut_tv.tv_sec; + break; + } + } + + if (bootsec != -1) { + os::print_dhm(st, "OS uptime:", currsec-bootsec); + } +} + +static void print_rlimit(outputStream* st, const char* msg, + int resource, bool output_k = false) { + struct rlimit rlim; + + st->print(" %s ", msg); + int res = getrlimit(resource, &rlim); + if (res == -1) { + st->print("could not obtain value"); + } else { + // soft limit + if (rlim.rlim_cur == RLIM_INFINITY) { st->print("infinity"); } + else { + if (output_k) { st->print(UINT64_FORMAT "k", uint64_t(rlim.rlim_cur) / K); } + else { st->print(UINT64_FORMAT, uint64_t(rlim.rlim_cur)); } + } + // hard limit + st->print("/"); + if (rlim.rlim_max == RLIM_INFINITY) { st->print("infinity"); } + else { + if (output_k) { st->print(UINT64_FORMAT "k", uint64_t(rlim.rlim_max) / K); } + else { st->print(UINT64_FORMAT, uint64_t(rlim.rlim_max)); } + } + } +} + +void os::Posix::print_rlimit_info(outputStream* st) { + st->print("rlimit (soft/hard):"); + print_rlimit(st, "STACK", RLIMIT_STACK, true); + print_rlimit(st, ", CORE", RLIMIT_CORE, true); + +#if defined(AIX) + st->print(", NPROC "); + st->print("%d", sysconf(_SC_CHILD_MAX)); + + print_rlimit(st, ", THREADS", RLIMIT_THREADS); +#else + print_rlimit(st, ", NPROC", RLIMIT_NPROC); +#endif + + print_rlimit(st, ", NOFILE", RLIMIT_NOFILE); + print_rlimit(st, ", AS", RLIMIT_AS, true); + print_rlimit(st, ", CPU", RLIMIT_CPU); + print_rlimit(st, ", DATA", RLIMIT_DATA, true); + + // maximum size of files that the process may create + print_rlimit(st, ", FSIZE", RLIMIT_FSIZE, true); + +#if defined(LINUX) || defined(__APPLE__) + // maximum number of bytes of memory that may be locked into RAM + // (rounded down to the nearest multiple of system pagesize) + print_rlimit(st, ", MEMLOCK", RLIMIT_MEMLOCK, true); +#endif + + // MacOS; The maximum size (in bytes) to which a process's resident set size may grow. +#if defined(__APPLE__) + print_rlimit(st, ", RSS", RLIMIT_RSS, true); +#endif + + st->cr(); +} + +void os::Posix::print_uname_info(outputStream* st) { + // kernel + st->print("uname: "); + struct utsname name; + uname(&name); + st->print("%s ", name.sysname); +#ifdef ASSERT + st->print("%s ", name.nodename); +#endif + st->print("%s ", name.release); + st->print("%s ", name.version); + st->print("%s", name.machine); + st->cr(); +} + +void os::Posix::print_umask(outputStream* st, mode_t umsk) { + st->print((umsk & S_IRUSR) ? "r" : "-"); + st->print((umsk & S_IWUSR) ? "w" : "-"); + st->print((umsk & S_IXUSR) ? "x" : "-"); + st->print((umsk & S_IRGRP) ? "r" : "-"); + st->print((umsk & S_IWGRP) ? "w" : "-"); + st->print((umsk & S_IXGRP) ? "x" : "-"); + st->print((umsk & S_IROTH) ? "r" : "-"); + st->print((umsk & S_IWOTH) ? "w" : "-"); + st->print((umsk & S_IXOTH) ? "x" : "-"); +} + +void os::print_user_info(outputStream* st) { + unsigned id = (unsigned) ::getuid(); + st->print("uid : %u ", id); + id = (unsigned) ::geteuid(); + st->print("euid : %u ", id); + id = (unsigned) ::getgid(); + st->print("gid : %u ", id); + id = (unsigned) ::getegid(); + st->print_cr("egid : %u", id); + st->cr(); + + mode_t umsk = ::umask(0); + ::umask(umsk); + st->print("umask: %04o (", (unsigned) umsk); + os::Posix::print_umask(st, umsk); + st->print_cr(")"); + st->cr(); +} + +// Print all active locale categories, one line each +void os::print_active_locale(outputStream* st) { + st->print_cr("Active Locale:"); + // Posix is quiet about how exactly LC_ALL is implemented. + // Just print it out too, in case LC_ALL is held separately + // from the individual categories. + #define LOCALE_CAT_DO(f) \ + f(LC_ALL) \ + f(LC_COLLATE) \ + f(LC_CTYPE) \ + f(LC_MESSAGES) \ + f(LC_MONETARY) \ + f(LC_NUMERIC) \ + f(LC_TIME) + #define XX(cat) { cat, #cat }, + const struct { int c; const char* name; } categories[] = { + LOCALE_CAT_DO(XX) + { -1, nullptr } + }; + #undef XX + #undef LOCALE_CAT_DO + for (int i = 0; categories[i].c != -1; i ++) { + const char* locale = setlocale(categories[i].c, nullptr); + st->print_cr("%s=%s", categories[i].name, + ((locale != nullptr) ? locale : "")); + } +} + +void os::print_jni_name_prefix_on(outputStream* st, int args_size) { + // no prefix required +} + +void os::print_jni_name_suffix_on(outputStream* st, int args_size) { + // no suffix required +} + +bool os::get_host_name(char* buf, size_t buflen) { + struct utsname name; + uname(&name); + jio_snprintf(buf, buflen, "%s", name.nodename); + return true; +} + +#ifndef _LP64 +// Helper, on 32bit, for os::has_allocatable_memory_limit +static bool is_allocatable(size_t s) { + if (s < 2 * G) { + return true; + } + // Use raw anonymous mmap here; no need to go through any + // of our reservation layers. We will unmap right away. + void* p = ::mmap(nullptr, s, PROT_NONE, + MAP_PRIVATE | MAP_NORESERVE | MAP_ANONYMOUS, -1, 0); + if (p == MAP_FAILED) { + return false; + } else { + ::munmap(p, s); + return true; + } +} +#endif // !_LP64 + + +bool os::has_allocatable_memory_limit(size_t* limit) { + struct rlimit rlim; + int getrlimit_res = getrlimit(RLIMIT_AS, &rlim); + // if there was an error when calling getrlimit, assume that there is no limitation + // on virtual memory. + bool result; + if ((getrlimit_res != 0) || (rlim.rlim_cur == RLIM_INFINITY)) { + result = false; + } else { + *limit = (size_t)rlim.rlim_cur; + result = true; + } +#ifdef _LP64 + return result; +#else + // arbitrary virtual space limit for 32 bit Unices found by testing. If + // getrlimit above returned a limit, bound it with this limit. Otherwise + // directly use it. + const size_t max_virtual_limit = 3800*M; + if (result) { + *limit = MIN2(*limit, max_virtual_limit); + } else { + *limit = max_virtual_limit; + } + + // bound by actually allocatable memory. The algorithm uses two bounds, an + // upper and a lower limit. The upper limit is the current highest amount of + // memory that could not be allocated, the lower limit is the current highest + // amount of memory that could be allocated. + // The algorithm iteratively refines the result by halving the difference + // between these limits, updating either the upper limit (if that value could + // not be allocated) or the lower limit (if the that value could be allocated) + // until the difference between these limits is "small". + + // the minimum amount of memory we care about allocating. + const size_t min_allocation_size = M; + + size_t upper_limit = *limit; + + // first check a few trivial cases + if (is_allocatable(upper_limit) || (upper_limit <= min_allocation_size)) { + *limit = upper_limit; + } else if (!is_allocatable(min_allocation_size)) { + // we found that not even min_allocation_size is allocatable. Return it + // anyway. There is no point to search for a better value any more. + *limit = min_allocation_size; + } else { + // perform the binary search. + size_t lower_limit = min_allocation_size; + while ((upper_limit - lower_limit) > min_allocation_size) { + size_t temp_limit = ((upper_limit - lower_limit) / 2) + lower_limit; + temp_limit = align_down(temp_limit, min_allocation_size); + if (is_allocatable(temp_limit)) { + lower_limit = temp_limit; + } else { + upper_limit = temp_limit; + } + } + *limit = lower_limit; + } + return true; +#endif +} + +void* os::get_default_process_handle() { +#ifdef __APPLE__ + // MacOS X needs to use RTLD_FIRST instead of RTLD_LAZY + // to avoid finding unexpected symbols on second (or later) + // loads of a library. + return (void*)::dlopen(nullptr, RTLD_FIRST); +#else + return (void*)::dlopen(nullptr, RTLD_LAZY); +#endif +} + +void* os::dll_lookup(void* handle, const char* name) { + return dlsym(handle, name); +} + +void os::dll_unload(void *lib) { + // os::Linux::dll_path returns a pointer to a string that is owned by the dynamic loader. Upon + // calling dlclose the dynamic loader may free the memory containing the string, thus we need to + // copy the string to be able to reference it after dlclose. + const char* l_path = nullptr; + +#ifdef LINUX + char* l_pathdup = nullptr; + l_path = os::Linux::dll_path(lib); + if (l_path != nullptr) { + l_path = l_pathdup = os::strdup(l_path); + } +#endif // LINUX + + JFR_ONLY(NativeLibraryUnloadEvent unload_event(l_path);) + + if (l_path == nullptr) { + l_path = ""; + } + + char ebuf[1024]; + bool res = os::pd_dll_unload(lib, ebuf, sizeof(ebuf)); + + if (res) { + Events::log_dll_message(nullptr, "Unloaded shared library \"%s\" [" INTPTR_FORMAT "]", + l_path, p2i(lib)); + log_info(os)("Unloaded shared library \"%s\" [" INTPTR_FORMAT "]", l_path, p2i(lib)); + JFR_ONLY(unload_event.set_result(true);) + } else { + Events::log_dll_message(nullptr, "Attempt to unload shared library \"%s\" [" INTPTR_FORMAT "] failed, %s", + l_path, p2i(lib), ebuf); + log_info(os)("Attempt to unload shared library \"%s\" [" INTPTR_FORMAT "] failed, %s", + l_path, p2i(lib), ebuf); + JFR_ONLY(unload_event.set_error_msg(ebuf);) + } + LINUX_ONLY(os::free(l_pathdup)); +} + +jlong os::lseek(int fd, jlong offset, int whence) { + return (jlong) BSD_ONLY(::lseek) NOT_BSD(::lseek64)(fd, offset, whence); +} + +int os::ftruncate(int fd, jlong length) { + return BSD_ONLY(::ftruncate) NOT_BSD(::ftruncate64)(fd, length); +} + +const char* os::get_current_directory(char *buf, size_t buflen) { + return getcwd(buf, buflen); +} + +FILE* os::fdopen(int fd, const char* mode) { + return ::fdopen(fd, mode); +} + +ssize_t os::pd_write(int fd, const void *buf, size_t nBytes) { + ssize_t res; + RESTARTABLE(::write(fd, buf, nBytes), res); + return res; +} + +ssize_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) { + return ::pread(fd, buf, nBytes, offset); +} + +void os::flockfile(FILE* fp) { + ::flockfile(fp); +} + +void os::funlockfile(FILE* fp) { + ::funlockfile(fp); +} + +DIR* os::opendir(const char* dirname) { + assert(dirname != nullptr, "just checking"); + return ::opendir(dirname); +} + +struct dirent* os::readdir(DIR* dirp) { + assert(dirp != nullptr, "just checking"); + return ::readdir(dirp); +} + +int os::closedir(DIR *dirp) { + assert(dirp != nullptr, "just checking"); + return ::closedir(dirp); +} + +int os::socket_close(int fd) { + return ::close(fd); +} + +ssize_t os::recv(int fd, char* buf, size_t nBytes, uint flags) { + RESTARTABLE_RETURN_SSIZE_T(::recv(fd, buf, nBytes, flags)); +} + +ssize_t os::send(int fd, char* buf, size_t nBytes, uint flags) { + RESTARTABLE_RETURN_SSIZE_T(::send(fd, buf, nBytes, flags)); +} + +ssize_t os::raw_send(int fd, char* buf, size_t nBytes, uint flags) { + return os::send(fd, buf, nBytes, flags); +} + +ssize_t os::connect(int fd, struct sockaddr* him, socklen_t len) { + RESTARTABLE_RETURN_SSIZE_T(::connect(fd, him, len)); +} + +void os::exit(int num) { + ALLOW_C_FUNCTION(::exit, ::exit(num);) +} + +void os::_exit(int num) { + ALLOW_C_FUNCTION(::_exit, ::_exit(num);) +} + +// Builds a platform dependent Agent_OnLoad_ function name +// which is used to find statically linked in agents. +// Parameters: +// sym_name: Symbol in library we are looking for +// lib_name: Name of library to look in, null for shared libs. +// is_absolute_path == true if lib_name is absolute path to agent +// such as "/a/b/libL.so" +// == false if only the base name of the library is passed in +// such as "L" +char* os::build_agent_function_name(const char *sym_name, const char *lib_name, + bool is_absolute_path) { + char *agent_entry_name; + size_t len; + size_t name_len; + size_t prefix_len = strlen(JNI_LIB_PREFIX); + size_t suffix_len = strlen(JNI_LIB_SUFFIX); + const char *start; + + if (lib_name != nullptr) { + name_len = strlen(lib_name); + if (is_absolute_path) { + // Need to strip path, prefix and suffix + if ((start = strrchr(lib_name, *os::file_separator())) != nullptr) { + lib_name = ++start; + } + if (strlen(lib_name) <= (prefix_len + suffix_len)) { + return nullptr; + } + lib_name += prefix_len; + name_len = strlen(lib_name) - suffix_len; + } + } + len = (lib_name != nullptr ? name_len : 0) + strlen(sym_name) + 2; + agent_entry_name = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtThread); + if (agent_entry_name == nullptr) { + return nullptr; + } + strcpy(agent_entry_name, sym_name); + if (lib_name != nullptr) { + strcat(agent_entry_name, "_"); + strncat(agent_entry_name, lib_name, name_len); + } + return agent_entry_name; +} + +// Sleep forever; naked call to OS-specific sleep; use with CAUTION +void os::infinite_sleep() { + while (true) { // sleep forever ... + ::sleep(100); // ... 100 seconds at a time + } +} + +void os::naked_short_nanosleep(jlong ns) { + struct timespec req; + assert(ns > -1 && ns < NANOUNITS, "Un-interruptable sleep, short time use only"); + req.tv_sec = 0; + req.tv_nsec = ns; + ::nanosleep(&req, nullptr); + return; +} + +void os::naked_short_sleep(jlong ms) { + assert(ms < MILLIUNITS, "Un-interruptable sleep, short time use only"); + os::naked_short_nanosleep(millis_to_nanos(ms)); + return; +} + +char* os::Posix::describe_pthread_attr(char* buf, size_t buflen, const pthread_attr_t* attr) { + size_t stack_size = 0; + size_t guard_size = 0; + int detachstate = 0; + pthread_attr_getstacksize(attr, &stack_size); + pthread_attr_getguardsize(attr, &guard_size); + // Work around glibc stack guard issue, see os::create_thread() in os_linux.cpp. + LINUX_ONLY(if (os::Linux::adjustStackSizeForGuardPages()) stack_size -= guard_size;) + pthread_attr_getdetachstate(attr, &detachstate); + jio_snprintf(buf, buflen, "stacksize: " SIZE_FORMAT "k, guardsize: " SIZE_FORMAT "k, %s", + stack_size / K, guard_size / K, + (detachstate == PTHREAD_CREATE_DETACHED ? "detached" : "joinable")); + return buf; +} + +char* os::Posix::realpath(const char* filename, char* outbuf, size_t outbuflen) { + + if (filename == nullptr || outbuf == nullptr || outbuflen < 1) { + assert(false, "os::Posix::realpath: invalid arguments."); + errno = EINVAL; + return nullptr; + } + + char* result = nullptr; + + // This assumes platform realpath() is implemented according to POSIX.1-2008. + // POSIX.1-2008 allows to specify null for the output buffer, in which case + // output buffer is dynamically allocated and must be ::free()'d by the caller. + ALLOW_C_FUNCTION(::realpath, char* p = ::realpath(filename, nullptr);) + if (p != nullptr) { + if (strlen(p) < outbuflen) { + strcpy(outbuf, p); + result = outbuf; + } else { + errno = ENAMETOOLONG; + } + ALLOW_C_FUNCTION(::free, ::free(p);) // *not* os::free + } else { + // Fallback for platforms struggling with modern Posix standards (AIX 5.3, 6.1). If realpath + // returns EINVAL, this may indicate that realpath is not POSIX.1-2008 compatible and + // that it complains about the null we handed down as user buffer. + // In this case, use the user provided buffer but at least check whether realpath caused + // a memory overwrite. + if (errno == EINVAL) { + outbuf[outbuflen - 1] = '\0'; + ALLOW_C_FUNCTION(::realpath, p = ::realpath(filename, outbuf);) + if (p != nullptr) { + guarantee(outbuf[outbuflen - 1] == '\0', "realpath buffer overwrite detected."); + result = p; + } + } + } + return result; + +} + +int os::stat(const char *path, struct stat *sbuf) { + return ::stat(path, sbuf); +} + +char * os::native_path(char *path) { + return path; +} + +bool os::same_files(const char* file1, const char* file2) { + if (file1 == nullptr && file2 == nullptr) { + return true; + } + + if (file1 == nullptr || file2 == nullptr) { + return false; + } + + if (strcmp(file1, file2) == 0) { + return true; + } + + bool is_same = false; + struct stat st1; + struct stat st2; + + if (os::stat(file1, &st1) < 0) { + return false; + } + + if (os::stat(file2, &st2) < 0) { + return false; + } + + if (st1.st_dev == st2.st_dev && st1.st_ino == st2.st_ino) { + // same files + is_same = true; + } + return is_same; +} + +// Called when creating the thread. The minimum stack sizes have already been calculated +size_t os::Posix::get_initial_stack_size(ThreadType thr_type, size_t req_stack_size) { + size_t stack_size; + if (req_stack_size == 0) { + stack_size = default_stack_size(thr_type); + } else { + stack_size = req_stack_size; + } + + switch (thr_type) { + case os::java_thread: + // Java threads use ThreadStackSize which default value can be + // changed with the flag -Xss + if (req_stack_size == 0 && JavaThread::stack_size_at_create() > 0) { + // no requested size and we have a more specific default value + stack_size = JavaThread::stack_size_at_create(); + } + stack_size = MAX2(stack_size, + _java_thread_min_stack_allowed); + break; + case os::compiler_thread: + if (req_stack_size == 0 && CompilerThreadStackSize > 0) { + // no requested size and we have a more specific default value + stack_size = (size_t)(CompilerThreadStackSize * K); + } + stack_size = MAX2(stack_size, + _compiler_thread_min_stack_allowed); + break; + case os::vm_thread: + case os::gc_thread: + case os::watcher_thread: + default: // presume the unknown thr_type is a VM internal + if (req_stack_size == 0 && VMThreadStackSize > 0) { + // no requested size and we have a more specific default value + stack_size = (size_t)(VMThreadStackSize * K); + } + + stack_size = MAX2(stack_size, + _vm_internal_thread_min_stack_allowed); + break; + } + + // pthread_attr_setstacksize() may require that the size be rounded up to the OS page size. + // Be careful not to round up to 0. Align down in that case. + if (stack_size <= SIZE_MAX - vm_page_size()) { + stack_size = align_up(stack_size, vm_page_size()); + } else { + stack_size = align_down(stack_size, vm_page_size()); + } + + return stack_size; +} + +#ifndef ZERO +#ifndef ARM +static bool get_frame_at_stack_banging_point(JavaThread* thread, address pc, const void* ucVoid, frame* fr) { + if (Interpreter::contains(pc)) { + // interpreter performs stack banging after the fixed frame header has + // been generated while the compilers perform it before. To maintain + // semantic consistency between interpreted and compiled frames, the + // method returns the Java sender of the current frame. + *fr = os::fetch_frame_from_context(ucVoid); + if (!fr->is_first_java_frame()) { + // get_frame_at_stack_banging_point() is only called when we + // have well defined stacks so java_sender() calls do not need + // to assert safe_for_sender() first. + *fr = fr->java_sender(); + } + } else { + // more complex code with compiled code + assert(!Interpreter::contains(pc), "Interpreted methods should have been handled above"); + CodeBlob* cb = CodeCache::find_blob(pc); + if (cb == nullptr || !cb->is_nmethod() || cb->is_frame_complete_at(pc)) { + // Not sure where the pc points to, fallback to default + // stack overflow handling + return false; + } else { + // in compiled code, the stack banging is performed just after the return pc + // has been pushed on the stack + *fr = os::fetch_compiled_frame_from_context(ucVoid); + if (!fr->is_java_frame()) { + assert(!fr->is_first_frame(), "Safety check"); + // See java_sender() comment above. + *fr = fr->java_sender(); + } + } + } + assert(fr->is_java_frame(), "Safety check"); + return true; +} +#endif // ARM + +// This return true if the signal handler should just continue, ie. return after calling this +bool os::Posix::handle_stack_overflow(JavaThread* thread, address addr, address pc, + const void* ucVoid, address* stub) { + // stack overflow + StackOverflow* overflow_state = thread->stack_overflow_state(); + if (overflow_state->in_stack_yellow_reserved_zone(addr)) { + if (thread->thread_state() == _thread_in_Java) { +#ifndef ARM + // arm32 doesn't have this + // vthreads don't support this + if (!thread->is_vthread_mounted() && overflow_state->in_stack_reserved_zone(addr)) { + frame fr; + if (get_frame_at_stack_banging_point(thread, pc, ucVoid, &fr)) { + assert(fr.is_java_frame(), "Must be a Java frame"); + frame activation = + SharedRuntime::look_for_reserved_stack_annotated_method(thread, fr); + if (activation.sp() != nullptr) { + overflow_state->disable_stack_reserved_zone(); + if (activation.is_interpreted_frame()) { + overflow_state->set_reserved_stack_activation((address)(activation.fp() + // Some platforms use frame pointers for interpreter frames, others use initial sp. +#if !defined(PPC64) && !defined(S390) + + frame::interpreter_frame_initial_sp_offset +#endif + )); + } else { + overflow_state->set_reserved_stack_activation((address)activation.unextended_sp()); + } + return true; // just continue + } + } + } +#endif // ARM + // Throw a stack overflow exception. Guard pages will be re-enabled + // while unwinding the stack. + overflow_state->disable_stack_yellow_reserved_zone(); + *stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW); + } else { + // Thread was in the vm or native code. Return and try to finish. + overflow_state->disable_stack_yellow_reserved_zone(); + return true; // just continue + } + } else if (overflow_state->in_stack_red_zone(addr)) { + // Fatal red zone violation. Disable the guard pages and keep + // on handling the signal. + overflow_state->disable_stack_red_zone(); + tty->print_raw_cr("An irrecoverable stack overflow has occurred."); + + // This is a likely cause, but hard to verify. Let's just print + // it as a hint. + tty->print_raw_cr("Please check if any of your loaded .so files has " + "enabled executable stack (see man page execstack(8))"); + + } else { +#ifdef LINUX + // This only works with os::Linux::manually_expand_stack() + + // Accessing stack address below sp may cause SEGV if current + // thread has MAP_GROWSDOWN stack. This should only happen when + // current thread was created by user code with MAP_GROWSDOWN flag + // and then attached to VM. See notes in os_linux.cpp. + if (thread->osthread()->expanding_stack() == 0) { + thread->osthread()->set_expanding_stack(); + if (os::Linux::manually_expand_stack(thread, addr)) { + thread->osthread()->clear_expanding_stack(); + return true; // just continue + } + thread->osthread()->clear_expanding_stack(); + } else { + fatal("recursive segv. expanding stack."); + } +#else + tty->print_raw_cr("SIGSEGV happened inside stack but outside yellow and red zone."); +#endif // LINUX + } + return false; +} +#endif // ZERO + +bool os::Posix::is_root(uid_t uid){ + return ROOT_UID == uid; +} + +bool os::Posix::matches_effective_uid_or_root(uid_t uid) { + return is_root(uid) || geteuid() == uid; +} + +bool os::Posix::matches_effective_uid_and_gid_or_root(uid_t uid, gid_t gid) { + return is_root(uid) || (geteuid() == uid && getegid() == gid); +} + +// Shared clock/time and other supporting routines for pthread_mutex/cond +// initialization. This is enabled on Solaris but only some of the clock/time +// functionality is actually used there. + +// Shared condattr object for use with relative timed-waits. Will be associated +// with CLOCK_MONOTONIC if available to avoid issues with time-of-day changes, +// but otherwise whatever default is used by the platform - generally the +// time-of-day clock. +static pthread_condattr_t _condAttr[1]; + +// Shared mutexattr to explicitly set the type to PTHREAD_MUTEX_NORMAL as not +// all systems (e.g. FreeBSD) map the default to "normal". +static pthread_mutexattr_t _mutexAttr[1]; + +// common basic initialization that is always supported +static void pthread_init_common(void) { + int status; + if ((status = pthread_condattr_init(_condAttr)) != 0) { + fatal("pthread_condattr_init: %s", os::strerror(status)); + } + if ((status = pthread_mutexattr_init(_mutexAttr)) != 0) { + fatal("pthread_mutexattr_init: %s", os::strerror(status)); + } + if ((status = pthread_mutexattr_settype(_mutexAttr, PTHREAD_MUTEX_NORMAL)) != 0) { + fatal("pthread_mutexattr_settype: %s", os::strerror(status)); + } + PlatformMutex::init(); +} + +static int (*_pthread_condattr_setclock)(pthread_condattr_t *, clockid_t) = nullptr; + +static bool _use_clock_monotonic_condattr = false; + +// Determine what POSIX API's are present and do appropriate +// configuration. +void os::Posix::init(void) { +#if defined(_ALLBSD_SOURCE) + clock_tics_per_sec = CLK_TCK; +#else + clock_tics_per_sec = checked_cast(sysconf(_SC_CLK_TCK)); +#endif + // NOTE: no logging available when this is called. Put logging + // statements in init_2(). + + // Check for pthread_condattr_setclock support. + + // libpthread is already loaded. + int (*condattr_setclock_func)(pthread_condattr_t*, clockid_t) = + (int (*)(pthread_condattr_t*, clockid_t))dlsym(RTLD_DEFAULT, + "pthread_condattr_setclock"); + if (condattr_setclock_func != nullptr) { + _pthread_condattr_setclock = condattr_setclock_func; + } + + // Now do general initialization. + + pthread_init_common(); + + int status; + if (_pthread_condattr_setclock != nullptr) { + if ((status = _pthread_condattr_setclock(_condAttr, CLOCK_MONOTONIC)) != 0) { + if (status == EINVAL) { + _use_clock_monotonic_condattr = false; + warning("Unable to use monotonic clock with relative timed-waits" \ + " - changes to the time-of-day clock may have adverse affects"); + } else { + fatal("pthread_condattr_setclock: %s", os::strerror(status)); + } + } else { + _use_clock_monotonic_condattr = true; + } + } + + initial_time_count = javaTimeNanos(); +} + +void os::Posix::init_2(void) { + log_info(os)("Use of CLOCK_MONOTONIC is supported"); + log_info(os)("Use of pthread_condattr_setclock is%s supported", + (_pthread_condattr_setclock != nullptr ? "" : " not")); + log_info(os)("Relative timed-wait using pthread_cond_timedwait is associated with %s", + _use_clock_monotonic_condattr ? "CLOCK_MONOTONIC" : "the default clock"); +} + +// Utility to convert the given timeout to an absolute timespec +// (based on the appropriate clock) to use with pthread_cond_timewait, +// and sem_timedwait(). +// The clock queried here must be the clock used to manage the +// timeout of the condition variable or semaphore. +// +// The passed in timeout value is either a relative time in nanoseconds +// or an absolute time in milliseconds. A relative timeout will be +// associated with CLOCK_MONOTONIC if available, unless the real-time clock +// is explicitly requested; otherwise, or if absolute, +// the default time-of-day clock will be used. + +// Given time is a 64-bit value and the time_t used in the timespec is +// sometimes a signed-32-bit value we have to watch for overflow if times +// way in the future are given. Further on Solaris versions +// prior to 10 there is a restriction (see cond_timedwait) that the specified +// number of seconds, in abstime, is less than current_time + 100000000. +// As it will be over 20 years before "now + 100000000" will overflow we can +// ignore overflow and just impose a hard-limit on seconds using the value +// of "now + 100000000". This places a limit on the timeout of about 3.17 +// years from "now". +// +#define MAX_SECS 100000000 + +// Calculate a new absolute time that is "timeout" nanoseconds from "now". +// "unit" indicates the unit of "now_part_sec" (may be nanos or micros depending +// on which clock API is being used). +static void calc_rel_time(timespec* abstime, jlong timeout, jlong now_sec, + jlong now_part_sec, jlong unit) { + time_t max_secs = now_sec + MAX_SECS; + + jlong seconds = timeout / NANOUNITS; + timeout %= NANOUNITS; // remaining nanos + + if (seconds >= MAX_SECS) { + // More seconds than we can add, so pin to max_secs. + abstime->tv_sec = max_secs; + abstime->tv_nsec = 0; + } else { + abstime->tv_sec = now_sec + seconds; + long nanos = (now_part_sec * (NANOUNITS / unit)) + timeout; + if (nanos >= NANOUNITS) { // overflow + abstime->tv_sec += 1; + nanos -= NANOUNITS; + } + abstime->tv_nsec = nanos; + } +} + +// Unpack the given deadline in milliseconds since the epoch, into the given timespec. +// The current time in seconds is also passed in to enforce an upper bound as discussed above. +static void unpack_abs_time(timespec* abstime, jlong deadline, jlong now_sec) { + time_t max_secs = now_sec + MAX_SECS; + + jlong seconds = deadline / MILLIUNITS; + jlong millis = deadline % MILLIUNITS; + + if (seconds >= max_secs) { + // Absolute seconds exceeds allowed max, so pin to max_secs. + abstime->tv_sec = max_secs; + abstime->tv_nsec = 0; + } else { + abstime->tv_sec = seconds; + abstime->tv_nsec = millis_to_nanos(millis); + } +} + +static jlong millis_to_nanos_bounded(jlong millis) { + // We have to watch for overflow when converting millis to nanos, + // but if millis is that large then we will end up limiting to + // MAX_SECS anyway, so just do that here. + if (millis / MILLIUNITS > MAX_SECS) { + millis = jlong(MAX_SECS) * MILLIUNITS; + } + return millis_to_nanos(millis); +} + +static void to_abstime(timespec* abstime, jlong timeout, + bool isAbsolute, bool isRealtime) { + DEBUG_ONLY(time_t max_secs = MAX_SECS;) + + if (timeout < 0) { + timeout = 0; + } + + clockid_t clock = CLOCK_MONOTONIC; + if (isAbsolute || (!_use_clock_monotonic_condattr || isRealtime)) { + clock = CLOCK_REALTIME; + } + + struct timespec now; + int status = clock_gettime(clock, &now); + assert(status == 0, "clock_gettime error: %s", os::strerror(errno)); + + if (!isAbsolute) { + calc_rel_time(abstime, timeout, now.tv_sec, now.tv_nsec, NANOUNITS); + } else { + unpack_abs_time(abstime, timeout, now.tv_sec); + } + DEBUG_ONLY(max_secs += now.tv_sec;) + + assert(abstime->tv_sec >= 0, "tv_sec < 0"); + assert(abstime->tv_sec <= max_secs, "tv_sec > max_secs"); + assert(abstime->tv_nsec >= 0, "tv_nsec < 0"); + assert(abstime->tv_nsec < NANOUNITS, "tv_nsec >= NANOUNITS"); +} + +// Create an absolute time 'millis' milliseconds in the future, using the +// real-time (time-of-day) clock. Used by PosixSemaphore. +void os::Posix::to_RTC_abstime(timespec* abstime, int64_t millis) { + to_abstime(abstime, millis_to_nanos_bounded(millis), + false /* not absolute */, + true /* use real-time clock */); +} + +// Common (partly) shared time functions + +jlong os::javaTimeMillis() { + struct timespec ts; + int status = clock_gettime(CLOCK_REALTIME, &ts); + assert(status == 0, "clock_gettime error: %s", os::strerror(errno)); + return jlong(ts.tv_sec) * MILLIUNITS + + jlong(ts.tv_nsec) / NANOUNITS_PER_MILLIUNIT; +} + +void os::javaTimeSystemUTC(jlong &seconds, jlong &nanos) { + struct timespec ts; + int status = clock_gettime(CLOCK_REALTIME, &ts); + assert(status == 0, "clock_gettime error: %s", os::strerror(errno)); + seconds = jlong(ts.tv_sec); + nanos = jlong(ts.tv_nsec); +} + +// macOS and AIX have platform specific implementations for javaTimeNanos() +// using native clock/timer access APIs. These have historically worked well +// for those platforms, but it may be possible for them to switch to the +// generic clock_gettime mechanism in the future. +#if !defined(__APPLE__) && !defined(AIX) + +jlong os::javaTimeNanos() { + struct timespec tp; + int status = clock_gettime(CLOCK_MONOTONIC, &tp); + assert(status == 0, "clock_gettime error: %s", os::strerror(errno)); + jlong result = jlong(tp.tv_sec) * NANOSECS_PER_SEC + jlong(tp.tv_nsec); + return result; +} + +// for timer info max values which include all bits +#define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF) + +void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) { + // CLOCK_MONOTONIC - amount of time since some arbitrary point in the past + info_ptr->max_value = ALL_64_BITS; + info_ptr->may_skip_backward = false; // not subject to resetting or drifting + info_ptr->may_skip_forward = false; // not subject to resetting or drifting + info_ptr->kind = JVMTI_TIMER_ELAPSED; // elapsed not CPU time +} +#endif // ! APPLE && !AIX + +// Time since start-up in seconds to a fine granularity. +double os::elapsedTime() { + return ((double)os::elapsed_counter()) / (double)os::elapsed_frequency(); // nanosecond resolution +} + +jlong os::elapsed_counter() { + return os::javaTimeNanos() - initial_time_count; +} + +jlong os::elapsed_frequency() { + return NANOSECS_PER_SEC; // nanosecond resolution +} + +bool os::supports_vtime() { return true; } + +// Return the real, user, and system times in seconds from an +// arbitrary fixed point in the past. +bool os::getTimesSecs(double* process_real_time, + double* process_user_time, + double* process_system_time) { + struct tms ticks; + clock_t real_ticks = times(&ticks); + + if (real_ticks == (clock_t) (-1)) { + return false; + } else { + double ticks_per_second = (double) clock_tics_per_sec; + *process_user_time = ((double) ticks.tms_utime) / ticks_per_second; + *process_system_time = ((double) ticks.tms_stime) / ticks_per_second; + *process_real_time = ((double) real_ticks) / ticks_per_second; + + return true; + } +} + +char * os::local_time_string(char *buf, size_t buflen) { + struct tm t; + time_t long_time; + time(&long_time); + localtime_r(&long_time, &t); + jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d", + t.tm_year + 1900, t.tm_mon + 1, t.tm_mday, + t.tm_hour, t.tm_min, t.tm_sec); + return buf; +} + +struct tm* os::localtime_pd(const time_t* clock, struct tm* res) { + return localtime_r(clock, res); +} + +// PlatformEvent +// +// Assumption: +// Only one parker can exist on an event, which is why we allocate +// them per-thread. Multiple unparkers can coexist. +// +// _event serves as a restricted-range semaphore. +// -1 : thread is blocked, i.e. there is a waiter +// 0 : neutral: thread is running or ready, +// could have been signaled after a wait started +// 1 : signaled - thread is running or ready +// +// Having three states allows for some detection of bad usage - see +// comments on unpark(). + +PlatformEvent::PlatformEvent() { + int status = pthread_cond_init(_cond, _condAttr); + assert_status(status == 0, status, "cond_init"); + status = pthread_mutex_init(_mutex, _mutexAttr); + assert_status(status == 0, status, "mutex_init"); + _event = 0; + _nParked = 0; +} + +void PlatformEvent::park() { // AKA "down()" + // Transitions for _event: + // -1 => -1 : illegal + // 1 => 0 : pass - return immediately + // 0 => -1 : block; then set _event to 0 before returning + + // Invariant: Only the thread associated with the PlatformEvent + // may call park(). + assert(_nParked == 0, "invariant"); + + int v; + + // atomically decrement _event + for (;;) { + v = _event; + if (Atomic::cmpxchg(&_event, v, v - 1) == v) break; + } + guarantee(v >= 0, "invariant"); + + if (v == 0) { // Do this the hard way by blocking ... + int status = pthread_mutex_lock(_mutex); + assert_status(status == 0, status, "mutex_lock"); + guarantee(_nParked == 0, "invariant"); + ++_nParked; + while (_event < 0) { + // OS-level "spurious wakeups" are ignored + status = pthread_cond_wait(_cond, _mutex); + assert_status(status == 0 MACOS_ONLY(|| status == ETIMEDOUT), + status, "cond_wait"); + } + --_nParked; + + _event = 0; + status = pthread_mutex_unlock(_mutex); + assert_status(status == 0, status, "mutex_unlock"); + // Paranoia to ensure our locked and lock-free paths interact + // correctly with each other. + OrderAccess::fence(); + } + guarantee(_event >= 0, "invariant"); +} + +int PlatformEvent::park(jlong millis) { + return park_nanos(millis_to_nanos_bounded(millis)); +} + +int PlatformEvent::park_nanos(jlong nanos) { + assert(nanos > 0, "nanos are positive"); + + // Transitions for _event: + // -1 => -1 : illegal + // 1 => 0 : pass - return immediately + // 0 => -1 : block; then set _event to 0 before returning + + // Invariant: Only the thread associated with the Event/PlatformEvent + // may call park(). + assert(_nParked == 0, "invariant"); + + int v; + // atomically decrement _event + for (;;) { + v = _event; + if (Atomic::cmpxchg(&_event, v, v - 1) == v) break; + } + guarantee(v >= 0, "invariant"); + + if (v == 0) { // Do this the hard way by blocking ... + struct timespec abst; + to_abstime(&abst, nanos, false, false); + + int ret = OS_TIMEOUT; + int status = pthread_mutex_lock(_mutex); + assert_status(status == 0, status, "mutex_lock"); + guarantee(_nParked == 0, "invariant"); + ++_nParked; + + while (_event < 0) { + status = pthread_cond_timedwait(_cond, _mutex, &abst); + assert_status(status == 0 || status == ETIMEDOUT, + status, "cond_timedwait"); + // OS-level "spurious wakeups" are ignored + if (status == ETIMEDOUT) break; + } + --_nParked; + + if (_event >= 0) { + ret = OS_OK; + } + + _event = 0; + status = pthread_mutex_unlock(_mutex); + assert_status(status == 0, status, "mutex_unlock"); + // Paranoia to ensure our locked and lock-free paths interact + // correctly with each other. + OrderAccess::fence(); + return ret; + } + return OS_OK; +} + +void PlatformEvent::unpark() { + // Transitions for _event: + // 0 => 1 : just return + // 1 => 1 : just return + // -1 => either 0 or 1; must signal target thread + // That is, we can safely transition _event from -1 to either + // 0 or 1. + // See also: "Semaphores in Plan 9" by Mullender & Cox + // + // Note: Forcing a transition from "-1" to "1" on an unpark() means + // that it will take two back-to-back park() calls for the owning + // thread to block. This has the benefit of forcing a spurious return + // from the first park() call after an unpark() call which will help + // shake out uses of park() and unpark() without checking state conditions + // properly. This spurious return doesn't manifest itself in any user code + // but only in the correctly written condition checking loops of ObjectMonitor, + // Mutex/Monitor, and JavaThread::sleep + + if (Atomic::xchg(&_event, 1) >= 0) return; + + int status = pthread_mutex_lock(_mutex); + assert_status(status == 0, status, "mutex_lock"); + int anyWaiters = _nParked; + assert(anyWaiters == 0 || anyWaiters == 1, "invariant"); + status = pthread_mutex_unlock(_mutex); + assert_status(status == 0, status, "mutex_unlock"); + + // Note that we signal() *after* dropping the lock for "immortal" Events. + // This is safe and avoids a common class of futile wakeups. In rare + // circumstances this can cause a thread to return prematurely from + // cond_{timed}wait() but the spurious wakeup is benign and the victim + // will simply re-test the condition and re-park itself. + // This provides particular benefit if the underlying platform does not + // provide wait morphing. + + if (anyWaiters != 0) { + status = pthread_cond_signal(_cond); + assert_status(status == 0, status, "cond_signal"); + } +} + +// JSR166 support + + PlatformParker::PlatformParker() : _counter(0), _cur_index(-1) { + int status = pthread_cond_init(&_cond[REL_INDEX], _condAttr); + assert_status(status == 0, status, "cond_init rel"); + status = pthread_cond_init(&_cond[ABS_INDEX], nullptr); + assert_status(status == 0, status, "cond_init abs"); + status = pthread_mutex_init(_mutex, _mutexAttr); + assert_status(status == 0, status, "mutex_init"); +} + +PlatformParker::~PlatformParker() { + int status = pthread_cond_destroy(&_cond[REL_INDEX]); + assert_status(status == 0, status, "cond_destroy rel"); + status = pthread_cond_destroy(&_cond[ABS_INDEX]); + assert_status(status == 0, status, "cond_destroy abs"); + status = pthread_mutex_destroy(_mutex); + assert_status(status == 0, status, "mutex_destroy"); +} + +// Parker::park decrements count if > 0, else does a condvar wait. Unpark +// sets count to 1 and signals condvar. Only one thread ever waits +// on the condvar. Contention seen when trying to park implies that someone +// is unparking you, so don't wait. And spurious returns are fine, so there +// is no need to track notifications. + +void Parker::park(bool isAbsolute, jlong time) { + + // Optional fast-path check: + // Return immediately if a permit is available. + // We depend on Atomic::xchg() having full barrier semantics + // since we are doing a lock-free update to _counter. + if (Atomic::xchg(&_counter, 0) > 0) return; + + JavaThread *jt = JavaThread::current(); + + // Optional optimization -- avoid state transitions if there's + // an interrupt pending. + if (jt->is_interrupted(false)) { + return; + } + + // Next, demultiplex/decode time arguments + struct timespec absTime; + if (time < 0 || (isAbsolute && time == 0)) { // don't wait at all + return; + } + if (time > 0) { + to_abstime(&absTime, time, isAbsolute, false); + } + + // Enter safepoint region + // Beware of deadlocks such as 6317397. + // The per-thread Parker:: mutex is a classic leaf-lock. + // In particular a thread must never block on the Threads_lock while + // holding the Parker:: mutex. If safepoints are pending both the + // the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock. + ThreadBlockInVM tbivm(jt); + + // Can't access interrupt state now that we are _thread_blocked. If we've + // been interrupted since we checked above then _counter will be > 0. + + // Don't wait if cannot get lock since interference arises from + // unparking. + if (pthread_mutex_trylock(_mutex) != 0) { + return; + } + + int status; + if (_counter > 0) { // no wait needed + _counter = 0; + status = pthread_mutex_unlock(_mutex); + assert_status(status == 0, status, "invariant"); + // Paranoia to ensure our locked and lock-free paths interact + // correctly with each other and Java-level accesses. + OrderAccess::fence(); + return; + } + + OSThreadWaitState osts(jt->osthread(), false /* not Object.wait() */); + + assert(_cur_index == -1, "invariant"); + if (time == 0) { + _cur_index = REL_INDEX; // arbitrary choice when not timed + status = pthread_cond_wait(&_cond[_cur_index], _mutex); + assert_status(status == 0 MACOS_ONLY(|| status == ETIMEDOUT), + status, "cond_wait"); + } + else { + _cur_index = isAbsolute ? ABS_INDEX : REL_INDEX; + status = pthread_cond_timedwait(&_cond[_cur_index], _mutex, &absTime); + assert_status(status == 0 || status == ETIMEDOUT, + status, "cond_timedwait"); + } + _cur_index = -1; + + _counter = 0; + status = pthread_mutex_unlock(_mutex); + assert_status(status == 0, status, "invariant"); + // Paranoia to ensure our locked and lock-free paths interact + // correctly with each other and Java-level accesses. + OrderAccess::fence(); +} + +void Parker::unpark() { + int status = pthread_mutex_lock(_mutex); + assert_status(status == 0, status, "invariant"); + const int s = _counter; + _counter = 1; + // must capture correct index before unlocking + int index = _cur_index; + status = pthread_mutex_unlock(_mutex); + assert_status(status == 0, status, "invariant"); + + // Note that we signal() *after* dropping the lock for "immortal" Events. + // This is safe and avoids a common class of futile wakeups. In rare + // circumstances this can cause a thread to return prematurely from + // cond_{timed}wait() but the spurious wakeup is benign and the victim + // will simply re-test the condition and re-park itself. + // This provides particular benefit if the underlying platform does not + // provide wait morphing. + + if (s < 1 && index != -1) { + // thread is definitely parked + status = pthread_cond_signal(&_cond[index]); + assert_status(status == 0, status, "invariant"); + } +} + +// Platform Mutex/Monitor implementation + +#if PLATFORM_MONITOR_IMPL_INDIRECT + +PlatformMutex::Mutex::Mutex() : _next(nullptr) { + int status = pthread_mutex_init(&_mutex, _mutexAttr); + assert_status(status == 0, status, "mutex_init"); +} + +PlatformMutex::Mutex::~Mutex() { + int status = pthread_mutex_destroy(&_mutex); + assert_status(status == 0, status, "mutex_destroy"); +} + +pthread_mutex_t PlatformMutex::_freelist_lock; +PlatformMutex::Mutex* PlatformMutex::_mutex_freelist = nullptr; + +void PlatformMutex::init() { + int status = pthread_mutex_init(&_freelist_lock, _mutexAttr); + assert_status(status == 0, status, "freelist lock init"); +} + +struct PlatformMutex::WithFreeListLocked : public StackObj { + WithFreeListLocked() { + int status = pthread_mutex_lock(&_freelist_lock); + assert_status(status == 0, status, "freelist lock"); + } + + ~WithFreeListLocked() { + int status = pthread_mutex_unlock(&_freelist_lock); + assert_status(status == 0, status, "freelist unlock"); + } +}; + +PlatformMutex::PlatformMutex() { + { + WithFreeListLocked wfl; + _impl = _mutex_freelist; + if (_impl != nullptr) { + _mutex_freelist = _impl->_next; + _impl->_next = nullptr; + return; + } + } + _impl = new Mutex(); +} + +PlatformMutex::~PlatformMutex() { + WithFreeListLocked wfl; + assert(_impl->_next == nullptr, "invariant"); + _impl->_next = _mutex_freelist; + _mutex_freelist = _impl; +} + +PlatformMonitor::Cond::Cond() : _next(nullptr) { + int status = pthread_cond_init(&_cond, _condAttr); + assert_status(status == 0, status, "cond_init"); +} + +PlatformMonitor::Cond::~Cond() { + int status = pthread_cond_destroy(&_cond); + assert_status(status == 0, status, "cond_destroy"); +} + +PlatformMonitor::Cond* PlatformMonitor::_cond_freelist = nullptr; + +PlatformMonitor::PlatformMonitor() { + { + WithFreeListLocked wfl; + _impl = _cond_freelist; + if (_impl != nullptr) { + _cond_freelist = _impl->_next; + _impl->_next = nullptr; + return; + } + } + _impl = new Cond(); +} + +PlatformMonitor::~PlatformMonitor() { + WithFreeListLocked wfl; + assert(_impl->_next == nullptr, "invariant"); + _impl->_next = _cond_freelist; + _cond_freelist = _impl; +} + +#else + +PlatformMutex::PlatformMutex() { + int status = pthread_mutex_init(&_mutex, _mutexAttr); + assert_status(status == 0, status, "mutex_init"); +} + +PlatformMutex::~PlatformMutex() { + int status = pthread_mutex_destroy(&_mutex); + assert_status(status == 0, status, "mutex_destroy"); +} + +PlatformMonitor::PlatformMonitor() { + int status = pthread_cond_init(&_cond, _condAttr); + assert_status(status == 0, status, "cond_init"); +} + +PlatformMonitor::~PlatformMonitor() { + int status = pthread_cond_destroy(&_cond); + assert_status(status == 0, status, "cond_destroy"); +} + +#endif // PLATFORM_MONITOR_IMPL_INDIRECT + +// Must already be locked +int PlatformMonitor::wait(uint64_t millis) { + if (millis > 0) { + struct timespec abst; + // We have to watch for overflow when converting millis to nanos, + // but if millis is that large then we will end up limiting to + // MAX_SECS anyway, so just do that here. This also handles values + // larger than int64_t max. + if (millis / MILLIUNITS > MAX_SECS) { + millis = uint64_t(MAX_SECS) * MILLIUNITS; + } + to_abstime(&abst, millis_to_nanos(int64_t(millis)), false, false); + + int ret = OS_TIMEOUT; + int status = pthread_cond_timedwait(cond(), mutex(), &abst); + assert_status(status == 0 || status == ETIMEDOUT, + status, "cond_timedwait"); + if (status == 0) { + ret = OS_OK; + } + return ret; + } else { + int status = pthread_cond_wait(cond(), mutex()); + assert_status(status == 0 MACOS_ONLY(|| status == ETIMEDOUT), + status, "cond_wait"); + return OS_OK; + } +} + +// Darwin has no "environ" in a dynamic library. +#ifdef __APPLE__ + #define environ (*_NSGetEnviron()) +#else + extern char** environ; +#endif + +char** os::get_environ() { return environ; } + +// Run the specified command in a separate process. Return its exit value, +// or -1 on failure (e.g. can't fork a new process). +// Notes: -Unlike system(), this function can be called from signal handler. It +// doesn't block SIGINT et al. +// -this function is unsafe to use in non-error situations, mainly +// because the child process will inherit all parent descriptors. +int os::fork_and_exec(const char* cmd) { + const char* argv[4] = {"sh", "-c", cmd, nullptr}; + pid_t pid = -1; + char** env = os::get_environ(); + // Note: cast is needed because posix_spawn() requires - for compatibility with ancient + // C-code - a non-const argv/envp pointer array. But it is fine to hand in literal + // strings and just cast the constness away. See also ProcessImpl_md.c. + int rc = ::posix_spawn(&pid, "/bin/sh", nullptr, nullptr, (char**) argv, env); + if (rc == 0) { + int status; + // Wait for the child process to exit. This returns immediately if + // the child has already exited. */ + while (::waitpid(pid, &status, 0) < 0) { + switch (errno) { + case ECHILD: return 0; + case EINTR: break; + default: return -1; + } + } + if (WIFEXITED(status)) { + // The child exited normally; get its exit code. + return WEXITSTATUS(status); + } else if (WIFSIGNALED(status)) { + // The child exited because of a signal + // The best value to return is 0x80 + signal number, + // because that is what all Unix shells do, and because + // it allows callers to distinguish between process exit and + // process death by signal. + return 0x80 + WTERMSIG(status); + } else { + // Unknown exit code; pass it through + return status; + } + } else { + // Don't log, we are inside error handling + return -1; + } +} + +bool os::message_box(const char* title, const char* message) { + int i; + fdStream err(defaultStream::error_fd()); + for (i = 0; i < 78; i++) err.print_raw("="); + err.cr(); + err.print_raw_cr(title); + for (i = 0; i < 78; i++) err.print_raw("-"); + err.cr(); + err.print_raw_cr(message); + for (i = 0; i < 78; i++) err.print_raw("="); + err.cr(); + + char buf[16]; + // Prevent process from exiting upon "read error" without consuming all CPU + while (::read(0, buf, sizeof(buf)) <= 0) { ::sleep(100); } + + return buf[0] == 'y' || buf[0] == 'Y'; +} + +//////////////////////////////////////////////////////////////////////////////// +// runtime exit support + +// Note: os::shutdown() might be called very early during initialization, or +// called from signal handler. Before adding something to os::shutdown(), make +// sure it is async-safe and can handle partially initialized VM. +void os::shutdown() { + + // allow PerfMemory to attempt cleanup of any persistent resources + perfMemory_exit(); + + // needs to remove object in file system + AttachListener::abort(); + + // flush buffered output, finish log files + ostream_abort(); + + // Check for abort hook + abort_hook_t abort_hook = Arguments::abort_hook(); + if (abort_hook != nullptr) { + abort_hook(); + } + +} + +// Note: os::abort() might be called very early during initialization, or +// called from signal handler. Before adding something to os::abort(), make +// sure it is async-safe and can handle partially initialized VM. +// Also note we can abort while other threads continue to run, so we can +// easily trigger secondary faults in those threads. To reduce the likelihood +// of that we use _exit rather than exit, so that no atexit hooks get run. +// But note that os::shutdown() could also trigger secondary faults. +void os::abort(bool dump_core, void* siginfo, const void* context) { + os::shutdown(); + if (dump_core) { + LINUX_ONLY(if (DumpPrivateMappingsInCore) ClassLoader::close_jrt_image();) + ::abort(); // dump core + } + os::_exit(1); +} + +// Die immediately, no exit hook, no abort hook, no cleanup. +// Dump a core file, if possible, for debugging. +void os::die() { + if (TestUnresponsiveErrorHandler && !CreateCoredumpOnCrash) { + // For TimeoutInErrorHandlingTest.java, we just kill the VM + // and don't take the time to generate a core file. + ::raise(SIGKILL); + // ::raise is not noreturn, even though with SIGKILL it definitely won't + // return. Hence "fall through" to ::abort, which is declared noreturn. + } + ::abort(); +} + +const char* os::file_separator() { return "/"; } +const char* os::line_separator() { return "\n"; } +const char* os::path_separator() { return ":"; } + +#endif // !NATIVE_IMAGE diff --git a/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/os/posix/os_posix.hpp b/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/os/posix/os_posix.hpp new file mode 100644 index 000000000000..25db0ebc7a3e --- /dev/null +++ b/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/os/posix/os_posix.hpp @@ -0,0 +1,109 @@ +/* + * Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef OS_POSIX_OS_POSIX_HPP +#define OS_POSIX_OS_POSIX_HPP + +#include "runtime/os.hpp" + +#include + +#ifndef NATIVE_IMAGE +// Note: the Posix API aims to capture functionality available on all Posix +// compliant platforms, but in practice the implementations may depend on +// non-Posix functionality. For example, the use of lseek64 and ftruncate64. +// This use of non-Posix API's is made possible by compiling/linking in a mode +// that is not restricted to being fully Posix complaint, such as by declaring +// -D_GNU_SOURCE. But be aware that in doing so we may enable non-Posix +// behaviour in API's that are defined by Posix. For example, that SIGSTKSZ +// is not defined as a constant as of Glibc 2.34. + +// macros for restartable system calls + +#define RESTARTABLE(_cmd, _result) do { \ + _result = _cmd; \ + } while(((int)_result == OS_ERR) && (errno == EINTR)) + +#define RESTARTABLE_RETURN_SSIZE_T(_cmd) do { \ + ssize_t _result; \ + RESTARTABLE(_cmd, _result); \ + return _result; \ +} while(false) + +class os::Posix { + friend class os; + +protected: + static void print_distro_info(outputStream* st); + static void print_rlimit_info(outputStream* st); + static void print_uname_info(outputStream* st); + static void print_libversion_info(outputStream* st); + static void print_load_average(outputStream* st); + static void print_uptime_info(outputStream* st); + +public: + static void init(void); // early initialization - no logging available + static void init_2(void);// later initialization - logging available + + // Return default stack size for the specified thread type + static size_t default_stack_size(os::ThreadType thr_type); + static size_t get_initial_stack_size(ThreadType thr_type, size_t req_stack_size); + + // Helper function; describes pthread attributes as short string. String is written + // to buf with len buflen; buf is returned. + static char* describe_pthread_attr(char* buf, size_t buflen, const pthread_attr_t* attr); + + // A safe implementation of realpath which will not cause a buffer overflow if the resolved path + // is longer than PATH_MAX. + // On success, returns 'outbuf', which now contains the path. + // On error, it will return null and set errno. The content of 'outbuf' is undefined. + // On truncation error ('outbuf' too small), it will return null and set errno to ENAMETOOLONG. + static char* realpath(const char* filename, char* outbuf, size_t outbuflen); + + // Returns true if given uid is root. + static bool is_root(uid_t uid); + + // Returns true if given uid is effective or root uid. + static bool matches_effective_uid_or_root(uid_t uid); + + // Returns true if either given uid is effective uid and given gid is + // effective gid, or if given uid is root. + static bool matches_effective_uid_and_gid_or_root(uid_t uid, gid_t gid); + + static void print_umask(outputStream* st, mode_t umsk); + + // Set PC into context. Needed for continuation after signal. + static address ucontext_get_pc(const ucontext_t* ctx); + static void ucontext_set_pc(ucontext_t* ctx, address pc); + + static void to_RTC_abstime(timespec* abstime, int64_t millis); + + static bool handle_stack_overflow(JavaThread* thread, address addr, address pc, + const void* ucVoid, + address* stub); +}; +#endif // !NATIVE_IMAGE + +#endif // OS_POSIX_OS_POSIX_HPP diff --git a/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/os/posix/os_posix.inline.hpp b/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/os/posix/os_posix.inline.hpp new file mode 100644 index 000000000000..de050c3c4453 --- /dev/null +++ b/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/os/posix/os_posix.inline.hpp @@ -0,0 +1,71 @@ +/* + * Copyright (c) 2019, 2022, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef OS_POSIX_OS_POSIX_INLINE_HPP +#define OS_POSIX_OS_POSIX_INLINE_HPP + +#include "os_posix.hpp" + +#ifndef NATIVE_IMAGE +#include "runtime/mutex.hpp" +#include "runtime/os.hpp" + +#include +#include +#include + +// Aix does not have NUMA support but need these for compilation. +inline bool os::numa_has_group_homing() { AIX_ONLY(ShouldNotReachHere();) return false; } + +// Platform Mutex/Monitor implementation + +inline void PlatformMutex::lock() { + int status = pthread_mutex_lock(mutex()); + assert_status(status == 0, status, "mutex_lock"); +} + +inline void PlatformMutex::unlock() { + int status = pthread_mutex_unlock(mutex()); + assert_status(status == 0, status, "mutex_unlock"); +} + +inline bool PlatformMutex::try_lock() { + int status = pthread_mutex_trylock(mutex()); + assert_status(status == 0 || status == EBUSY, status, "mutex_trylock"); + return status == 0; +} + +inline void PlatformMonitor::notify() { + int status = pthread_cond_signal(cond()); + assert_status(status == 0, status, "cond_signal"); +} + +inline void PlatformMonitor::notify_all() { + int status = pthread_cond_broadcast(cond()); + assert_status(status == 0, status, "cond_broadcast"); +} +#endif // !NATIVE_IMAGE + +#endif // OS_POSIX_OS_POSIX_INLINE_HPP diff --git a/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/share/memory/allStatic.hpp b/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/share/memory/allStatic.hpp new file mode 100644 index 000000000000..4f3761b470ee --- /dev/null +++ b/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/share/memory/allStatic.hpp @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_MEMORY_ALLSTATIC_HPP +#define SHARE_MEMORY_ALLSTATIC_HPP + +// Base class for classes used as namespaces. HotSpot style prefers +// using classes for grouping. Deriving from this class indicates the +// derived class is intended to be a namespace, with no instances ever +// created. +struct AllStatic { + AllStatic() = delete; + ~AllStatic() = delete; +}; + +#endif // SHARE_MEMORY_ALLSTATIC_HPP diff --git a/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/share/memory/allocation.hpp b/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/share/memory/allocation.hpp new file mode 100644 index 000000000000..565ff0b6a1b2 --- /dev/null +++ b/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/share/memory/allocation.hpp @@ -0,0 +1,649 @@ +/* + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_MEMORY_ALLOCATION_HPP +#define SHARE_MEMORY_ALLOCATION_HPP + +#include "memory/allStatic.hpp" +#include "utilities/debug.hpp" +#include "utilities/globalDefinitions.hpp" +#include "utilities/macros.hpp" + +#include + +class outputStream; +class Thread; +class JavaThread; + +class AllocFailStrategy { +public: + enum AllocFailEnum { EXIT_OOM, RETURN_NULL }; +}; +typedef AllocFailStrategy::AllocFailEnum AllocFailType; + +// The virtual machine must never call one of the implicitly declared +// global allocation or deletion functions. (Such calls may result in +// link-time or run-time errors.) For convenience and documentation of +// intended use, classes in the virtual machine may be derived from one +// of the following allocation classes, some of which define allocation +// and deletion functions. +// Note: std::malloc and std::free should never called directly. + +// +// For objects allocated in the resource area (see resourceArea.hpp). +// - ResourceObj +// +// For objects allocated in the C-heap (managed by: free & malloc and tracked with NMT) +// - CHeapObj +// +// For objects allocated on the stack. +// - StackObj +// +// For classes used as name spaces. +// - AllStatic +// +// For classes in Metaspace (class data) +// - MetaspaceObj +// +// The printable subclasses are used for debugging and define virtual +// member functions for printing. Classes that avoid allocating the +// vtbl entries in the objects should therefore not be the printable +// subclasses. +// +// The following macros and function should be used to allocate memory +// directly in the resource area or in the C-heap, The _OBJ variants +// of the NEW/FREE_C_HEAP macros are used for alloc/dealloc simple +// objects which are not inherited from CHeapObj, note constructor and +// destructor are not called. The preferable way to allocate objects +// is using the new operator. +// +// WARNING: The array variant must only be used for a homogeneous array +// where all objects are of the exact type specified. If subtypes are +// stored in the array then must pay attention to calling destructors +// at needed. +// +// NEW_RESOURCE_ARRAY* +// REALLOC_RESOURCE_ARRAY* +// FREE_RESOURCE_ARRAY* +// NEW_RESOURCE_OBJ* +// NEW_C_HEAP_ARRAY* +// REALLOC_C_HEAP_ARRAY* +// FREE_C_HEAP_ARRAY* +// NEW_C_HEAP_OBJ* +// FREE_C_HEAP_OBJ +// +// char* AllocateHeap(size_t size, MEMFLAGS flags, const NativeCallStack& stack, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM); +// char* AllocateHeap(size_t size, MEMFLAGS flags, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM); +// char* ReallocateHeap(char *old, size_t size, MEMFLAGS flag, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM); +// void FreeHeap(void* p); +// + +#define MEMORY_TYPES_DO(f) \ + /* Memory type by sub systems. It occupies lower byte. */ \ + f(mtJavaHeap, "Java Heap") /* Java heap */ \ + f(mtClass, "Class") /* Java classes */ \ + f(mtThread, "Thread") /* thread objects */ \ + f(mtThreadStack, "Thread Stack") \ + f(mtCode, "Code") /* generated code */ \ + f(mtGC, "GC") \ + f(mtGCCardSet, "GCCardSet") /* G1 card set remembered set */ \ + f(mtCompiler, "Compiler") \ + f(mtJVMCI, "JVMCI") \ + f(mtInternal, "Internal") /* memory used by VM, but does not belong to */ \ + /* any of above categories, and not used by */ \ + /* NMT */ \ + f(mtOther, "Other") /* memory not used by VM */ \ + f(mtSymbol, "Symbol") \ + f(mtNMT, "Native Memory Tracking") /* memory used by NMT */ \ + f(mtClassShared, "Shared class space") /* class data sharing */ \ + f(mtChunk, "Arena Chunk") /* chunk that holds content of arenas */ \ + f(mtTest, "Test") /* Test type for verifying NMT */ \ + f(mtTracing, "Tracing") \ + f(mtLogging, "Logging") \ + f(mtStatistics, "Statistics") \ + f(mtArguments, "Arguments") \ + f(mtModule, "Module") \ + f(mtSafepoint, "Safepoint") \ + f(mtSynchronizer, "Synchronization") \ + f(mtServiceability, "Serviceability") \ + f(mtMetaspace, "Metaspace") \ + f(mtStringDedup, "String Deduplication") \ + f(mtObjectMonitor, "Object Monitors") \ + f(mtNone, "Unknown") \ + //end + +#define MEMORY_TYPE_DECLARE_ENUM(type, human_readable) \ + type, + +/* + * Memory types + */ +enum class MEMFLAGS : uint8_t { + MEMORY_TYPES_DO(MEMORY_TYPE_DECLARE_ENUM) + mt_number_of_types // number of memory types (mtDontTrack + // is not included as validate type) +}; +// Extra insurance that MEMFLAGS truly has the same size as uint8_t. +STATIC_ASSERT(sizeof(MEMFLAGS) == sizeof(uint8_t)); + +#define MEMORY_TYPE_SHORTNAME(type, human_readable) \ + constexpr MEMFLAGS type = MEMFLAGS::type; + +// Generate short aliases for the enum values. E.g. mtGC instead of MEMFLAGS::mtGC. +MEMORY_TYPES_DO(MEMORY_TYPE_SHORTNAME) + +// Make an int version of the sentinel end value. +constexpr int mt_number_of_types = static_cast(MEMFLAGS::mt_number_of_types); + +extern bool NMT_track_callsite; + +class NativeCallStack; + + +char* AllocateHeap(size_t size, + MEMFLAGS flags, + const NativeCallStack& stack, + AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM); +char* AllocateHeap(size_t size, + MEMFLAGS flags, + AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM); + +char* ReallocateHeap(char *old, + size_t size, + MEMFLAGS flag, + AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM); + +// handles null pointers +void FreeHeap(void* p); + +class CHeapObjBase { + public: + ALWAYSINLINE void* operator new(size_t size, MEMFLAGS f) { + return AllocateHeap(size, f); + } + + ALWAYSINLINE void* operator new(size_t size, + MEMFLAGS f, + const NativeCallStack& stack) { + return AllocateHeap(size, f, stack); + } + + ALWAYSINLINE void* operator new(size_t size, + MEMFLAGS f, + const std::nothrow_t&, + const NativeCallStack& stack) throw() { + return AllocateHeap(size, f, stack, AllocFailStrategy::RETURN_NULL); + } + + ALWAYSINLINE void* operator new(size_t size, + MEMFLAGS f, + const std::nothrow_t&) throw() { + return AllocateHeap(size, f, AllocFailStrategy::RETURN_NULL); + } + + ALWAYSINLINE void* operator new[](size_t size, MEMFLAGS f) { + return AllocateHeap(size, f); + } + + ALWAYSINLINE void* operator new[](size_t size, + MEMFLAGS f, + const NativeCallStack& stack) { + return AllocateHeap(size, f, stack); + } + + ALWAYSINLINE void* operator new[](size_t size, + MEMFLAGS f, + const std::nothrow_t&, + const NativeCallStack& stack) throw() { + return AllocateHeap(size, f, stack, AllocFailStrategy::RETURN_NULL); + } + + ALWAYSINLINE void* operator new[](size_t size, + MEMFLAGS f, + const std::nothrow_t&) throw() { + return AllocateHeap(size, f, AllocFailStrategy::RETURN_NULL); + } + + void operator delete(void* p) { FreeHeap(p); } + void operator delete [] (void* p) { FreeHeap(p); } +}; + +// Uses the implicitly static new and delete operators of CHeapObjBase +template +class CHeapObj { + public: + ALWAYSINLINE void* operator new(size_t size) { + return CHeapObjBase::operator new(size, F); + } + + ALWAYSINLINE void* operator new(size_t size, + const NativeCallStack& stack) { + return CHeapObjBase::operator new(size, F, stack); + } + + ALWAYSINLINE void* operator new(size_t size, const std::nothrow_t& nt, + const NativeCallStack& stack) throw() { + return CHeapObjBase::operator new(size, F, nt, stack); + } + + ALWAYSINLINE void* operator new(size_t size, const std::nothrow_t& nt) throw() { + return CHeapObjBase::operator new(size, F, nt); + } + + ALWAYSINLINE void* operator new[](size_t size) { + return CHeapObjBase::operator new[](size, F); + } + + ALWAYSINLINE void* operator new[](size_t size, + const NativeCallStack& stack) { + return CHeapObjBase::operator new[](size, F, stack); + } + + ALWAYSINLINE void* operator new[](size_t size, const std::nothrow_t& nt, + const NativeCallStack& stack) throw() { + return CHeapObjBase::operator new[](size, F, nt, stack); + } + + ALWAYSINLINE void* operator new[](size_t size, const std::nothrow_t& nt) throw() { + return CHeapObjBase::operator new[](size, F, nt); + } + + void operator delete(void* p) { + CHeapObjBase::operator delete(p); + } + + void operator delete [] (void* p) { + CHeapObjBase::operator delete[](p); + } +}; + +// Base class for objects allocated on the stack only. +// Calling new or delete will result in fatal error. + +class StackObj { + public: + void* operator new(size_t size) = delete; + void* operator new [](size_t size) = delete; + void operator delete(void* p) = delete; + void operator delete [](void* p) = delete; +}; + +#ifndef NATIVE_IMAGE +// Base class for objects stored in Metaspace. +// Calling delete will result in fatal error. +// +// Do not inherit from something with a vptr because this class does +// not introduce one. This class is used to allocate both shared read-only +// and shared read-write classes. +// + +class ClassLoaderData; +class MetaspaceClosure; + +class MetaspaceObj { + // There are functions that all subtypes of MetaspaceObj are expected + // to implement, so that templates which are defined for this class hierarchy + // can work uniformly. Within the sub-hierarchy of Metadata, these are virtuals. + // Elsewhere in the hierarchy of MetaspaceObj, type(), size(), and/or on_stack() + // can be static if constant. + // + // The following functions are required by MetaspaceClosure: + // void metaspace_pointers_do(MetaspaceClosure* it) { } + // int size() const { return align_up(sizeof(), wordSize) / wordSize; } + // MetaspaceObj::Type type() const { return Type; } + // + // The following functions are required by MetadataFactory::free_metadata(): + // bool on_stack() { return false; } + // void deallocate_contents(ClassLoaderData* loader_data); + + friend class VMStructs; + // When CDS is enabled, all shared metaspace objects are mapped + // into a single contiguous memory block, so we can use these + // two pointers to quickly determine if something is in the + // shared metaspace. + // When CDS is not enabled, both pointers are set to null. + static void* _shared_metaspace_base; // (inclusive) low address + static void* _shared_metaspace_top; // (exclusive) high address + + public: + + // Returns true if the pointer points to a valid MetaspaceObj. A valid + // MetaspaceObj is MetaWord-aligned and contained within either + // non-shared or shared metaspace. + static bool is_valid(const MetaspaceObj* p); + +#if INCLUDE_CDS + static bool is_shared(const MetaspaceObj* p) { + // If no shared metaspace regions are mapped, _shared_metaspace_{base,top} will + // both be null and all values of p will be rejected quickly. + return (((void*)p) < _shared_metaspace_top && + ((void*)p) >= _shared_metaspace_base); + } + bool is_shared() const { return MetaspaceObj::is_shared(this); } +#else + static bool is_shared(const MetaspaceObj* p) { return false; } + bool is_shared() const { return false; } +#endif + + void print_address_on(outputStream* st) const; // nonvirtual address printing + + static void set_shared_metaspace_range(void* base, void* top) { + _shared_metaspace_base = base; + _shared_metaspace_top = top; + } + + static void* shared_metaspace_base() { return _shared_metaspace_base; } + static void* shared_metaspace_top() { return _shared_metaspace_top; } + +#define METASPACE_OBJ_TYPES_DO(f) \ + f(Class) \ + f(Symbol) \ + f(TypeArrayU1) \ + f(TypeArrayU2) \ + f(TypeArrayU4) \ + f(TypeArrayU8) \ + f(TypeArrayOther) \ + f(Method) \ + f(ConstMethod) \ + f(MethodData) \ + f(ConstantPool) \ + f(ConstantPoolCache) \ + f(Annotations) \ + f(MethodCounters) \ + f(SharedClassPathEntry) \ + f(RecordComponent) + +#define METASPACE_OBJ_TYPE_DECLARE(name) name ## Type, +#define METASPACE_OBJ_TYPE_NAME_CASE(name) case name ## Type: return #name; + + enum Type { + // Types are MetaspaceObj::ClassType, MetaspaceObj::SymbolType, etc + METASPACE_OBJ_TYPES_DO(METASPACE_OBJ_TYPE_DECLARE) + _number_of_types + }; + + static const char * type_name(Type type) { + switch(type) { + METASPACE_OBJ_TYPES_DO(METASPACE_OBJ_TYPE_NAME_CASE) + default: + ShouldNotReachHere(); + return nullptr; + } + } + + static MetaspaceObj::Type array_type(size_t elem_size) { + switch (elem_size) { + case 1: return TypeArrayU1Type; + case 2: return TypeArrayU2Type; + case 4: return TypeArrayU4Type; + case 8: return TypeArrayU8Type; + default: + return TypeArrayOtherType; + } + } + + void* operator new(size_t size, ClassLoaderData* loader_data, + size_t word_size, + Type type, JavaThread* thread) throw(); + // can't use TRAPS from this header file. + void* operator new(size_t size, ClassLoaderData* loader_data, + size_t word_size, + Type type) throw(); + void operator delete(void* p) { ShouldNotCallThis(); } + + // Declare a *static* method with the same signature in any subclass of MetaspaceObj + // that should be read-only by default. See symbol.hpp for an example. This function + // is used by the templates in metaspaceClosure.hpp + static bool is_read_only_by_default() { return false; } +}; + +// Base class for classes that constitute name spaces. + +class Arena; + +extern char* resource_allocate_bytes(size_t size, + AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM); +extern char* resource_allocate_bytes(Thread* thread, size_t size, + AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM); +extern char* resource_reallocate_bytes( char *old, size_t old_size, size_t new_size, + AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM); +extern void resource_free_bytes( Thread* thread, char *old, size_t size ); + +//---------------------------------------------------------------------- +// Base class for objects allocated in the resource area. +class ResourceObj { + public: + void* operator new(size_t size) { + return resource_allocate_bytes(size); + } + + void* operator new(size_t size, const std::nothrow_t& nothrow_constant) throw() { + return resource_allocate_bytes(size, AllocFailStrategy::RETURN_NULL); + } + + void* operator new [](size_t size) throw() = delete; + void* operator new [](size_t size, const std::nothrow_t& nothrow_constant) throw() = delete; + + void operator delete(void* p) = delete; + void operator delete [](void* p) = delete; +}; + +class ArenaObj { + public: + void* operator new(size_t size, Arena *arena) throw(); + void* operator new [](size_t size, Arena *arena) throw() = delete; + + void* operator new [](size_t size) throw() = delete; + void* operator new [](size_t size, const std::nothrow_t& nothrow_constant) throw() = delete; + + void operator delete(void* p) = delete; + void operator delete [](void* p) = delete; +}; + +//---------------------------------------------------------------------- +// Base class for objects allocated in the resource area per default. +// Optionally, objects may be allocated on the C heap with +// new (AnyObj::C_HEAP) Foo(...) or in an Arena with new (&arena). +// AnyObj's can be allocated within other objects, but don't use +// new or delete (allocation_type is unknown). If new is used to allocate, +// use delete to deallocate. +class AnyObj { + public: + enum allocation_type { STACK_OR_EMBEDDED = 0, RESOURCE_AREA, C_HEAP, ARENA, allocation_mask = 0x3 }; + static void set_allocation_type(address res, allocation_type type) NOT_DEBUG_RETURN; +#ifdef ASSERT + private: + // When this object is allocated on stack the new() operator is not + // called but garbage on stack may look like a valid allocation_type. + // Store negated 'this' pointer when new() is called to distinguish cases. + // Use second array's element for verification value to distinguish garbage. + uintptr_t _allocation_t[2]; + bool is_type_set() const; + void initialize_allocation_info(); + public: + allocation_type get_allocation_type() const; + bool allocated_on_stack_or_embedded() const { return get_allocation_type() == STACK_OR_EMBEDDED; } + bool allocated_on_res_area() const { return get_allocation_type() == RESOURCE_AREA; } + bool allocated_on_C_heap() const { return get_allocation_type() == C_HEAP; } + bool allocated_on_arena() const { return get_allocation_type() == ARENA; } +protected: + AnyObj(); // default constructor + AnyObj(const AnyObj& r); // default copy constructor + AnyObj& operator=(const AnyObj& r); // default copy assignment + ~AnyObj(); +#endif // ASSERT + + public: + // CHeap allocations + void* operator new(size_t size, MEMFLAGS flags) throw(); + void* operator new [](size_t size, MEMFLAGS flags) throw() = delete; + void* operator new(size_t size, const std::nothrow_t& nothrow_constant, MEMFLAGS flags) throw(); + void* operator new [](size_t size, const std::nothrow_t& nothrow_constant, MEMFLAGS flags) throw() = delete; + + // Arena allocations + void* operator new(size_t size, Arena *arena); + void* operator new [](size_t size, Arena *arena) = delete; + + // Resource allocations + void* operator new(size_t size) { + address res = (address)resource_allocate_bytes(size); + DEBUG_ONLY(set_allocation_type(res, RESOURCE_AREA);) + return res; + } + void* operator new(size_t size, const std::nothrow_t& nothrow_constant) throw() { + address res = (address)resource_allocate_bytes(size, AllocFailStrategy::RETURN_NULL); + DEBUG_ONLY(if (res != nullptr) set_allocation_type(res, RESOURCE_AREA);) + return res; + } + + void* operator new [](size_t size) = delete; + void* operator new [](size_t size, const std::nothrow_t& nothrow_constant) = delete; + void operator delete(void* p); + void operator delete [](void* p) = delete; + +#ifndef PRODUCT + // Printing support + void print() const; + virtual void print_on(outputStream* st) const; +#endif // PRODUCT +}; + +// One of the following macros must be used when allocating an array +// or object to determine whether it should reside in the C heap on in +// the resource area. + +#define NEW_RESOURCE_ARRAY(type, size)\ + (type*) resource_allocate_bytes((size) * sizeof(type)) + +#define NEW_RESOURCE_ARRAY_RETURN_NULL(type, size)\ + (type*) resource_allocate_bytes((size) * sizeof(type), AllocFailStrategy::RETURN_NULL) + +#define NEW_RESOURCE_ARRAY_IN_THREAD(thread, type, size)\ + (type*) resource_allocate_bytes(thread, (size) * sizeof(type)) + +#define NEW_RESOURCE_ARRAY_IN_THREAD_RETURN_NULL(thread, type, size)\ + (type*) resource_allocate_bytes(thread, (size) * sizeof(type), AllocFailStrategy::RETURN_NULL) + +#define REALLOC_RESOURCE_ARRAY(type, old, old_size, new_size)\ + (type*) resource_reallocate_bytes((char*)(old), (old_size) * sizeof(type), (new_size) * sizeof(type)) + +#define REALLOC_RESOURCE_ARRAY_RETURN_NULL(type, old, old_size, new_size)\ + (type*) resource_reallocate_bytes((char*)(old), (old_size) * sizeof(type),\ + (new_size) * sizeof(type), AllocFailStrategy::RETURN_NULL) + +#define FREE_RESOURCE_ARRAY(type, old, size)\ + resource_free_bytes(Thread::current(), (char*)(old), (size) * sizeof(type)) + +#define FREE_RESOURCE_ARRAY_IN_THREAD(thread, type, old, size)\ + resource_free_bytes(thread, (char*)(old), (size) * sizeof(type)) + +#define FREE_FAST(old)\ + /* nop */ + +#define NEW_RESOURCE_OBJ(type)\ + NEW_RESOURCE_ARRAY(type, 1) + +#define NEW_RESOURCE_OBJ_RETURN_NULL(type)\ + NEW_RESOURCE_ARRAY_RETURN_NULL(type, 1) + +#define NEW_C_HEAP_ARRAY3(type, size, memflags, pc, allocfail)\ + (type*) AllocateHeap((size) * sizeof(type), memflags, pc, allocfail) + +#define NEW_C_HEAP_ARRAY2(type, size, memflags, pc)\ + (type*) (AllocateHeap((size) * sizeof(type), memflags, pc)) + +#endif // !NATIVE_IMAGE + +#define NEW_C_HEAP_ARRAY(type, size, memflags)\ + (type*) (AllocateHeap((size) * sizeof(type), memflags)) + +#define NEW_C_HEAP_ARRAY2_RETURN_NULL(type, size, memflags, pc)\ + NEW_C_HEAP_ARRAY3(type, (size), memflags, pc, AllocFailStrategy::RETURN_NULL) + +#define NEW_C_HEAP_ARRAY_RETURN_NULL(type, size, memflags)\ + NEW_C_HEAP_ARRAY2(type, (size), memflags, AllocFailStrategy::RETURN_NULL) + +#define REALLOC_C_HEAP_ARRAY(type, old, size, memflags)\ + (type*) (ReallocateHeap((char*)(old), (size) * sizeof(type), memflags)) + +#define REALLOC_C_HEAP_ARRAY_RETURN_NULL(type, old, size, memflags)\ + (type*) (ReallocateHeap((char*)(old), (size) * sizeof(type), memflags, AllocFailStrategy::RETURN_NULL)) + +#define FREE_C_HEAP_ARRAY(type, old) \ + FreeHeap((char*)(old)) + +// allocate type in heap without calling ctor +#define NEW_C_HEAP_OBJ(type, memflags)\ + NEW_C_HEAP_ARRAY(type, 1, memflags) + +#define NEW_C_HEAP_OBJ_RETURN_NULL(type, memflags)\ + NEW_C_HEAP_ARRAY_RETURN_NULL(type, 1, memflags) + +// deallocate obj of type in heap without calling dtor +#define FREE_C_HEAP_OBJ(objname)\ + FreeHeap((char*)objname); + +#ifndef NATIVE_IMAGE + +//------------------------------ReallocMark--------------------------------- +// Code which uses REALLOC_RESOURCE_ARRAY should check an associated +// ReallocMark, which is declared in the same scope as the reallocated +// pointer. Any operation that could __potentially__ cause a reallocation +// should check the ReallocMark. +class ReallocMark: public StackObj { +protected: + NOT_PRODUCT(int _nesting;) + +public: + ReallocMark() PRODUCT_RETURN; + void check() PRODUCT_RETURN; +}; + +// Uses mmapped memory for all allocations. All allocations are initially +// zero-filled. No pre-touching. +template +class MmapArrayAllocator : public AllStatic { + private: + static size_t size_for(size_t length); + + public: + static E* allocate_or_null(size_t length, MEMFLAGS flags); + static E* allocate(size_t length, MEMFLAGS flags); + static void free(E* addr, size_t length); +}; + +// Uses malloc:ed memory for all allocations. +template +class MallocArrayAllocator : public AllStatic { + public: + static size_t size_for(size_t length); + + static E* allocate(size_t length, MEMFLAGS flags); + static E* reallocate(E* addr, size_t new_length, MEMFLAGS flags); + static void free(E* addr); +}; +#endif // !NATIVE_IMAGE + +#endif // SHARE_MEMORY_ALLOCATION_HPP diff --git a/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/share/memory/allocation.inline.hpp b/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/share/memory/allocation.inline.hpp new file mode 100644 index 000000000000..64c4a3104eaf --- /dev/null +++ b/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/share/memory/allocation.inline.hpp @@ -0,0 +1,117 @@ +/* + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_MEMORY_ALLOCATION_INLINE_HPP +#define SHARE_MEMORY_ALLOCATION_INLINE_HPP + +#include "memory/allocation.hpp" + +#ifndef NATIVE_IMAGE +#include "runtime/atomic.hpp" +#include "runtime/globals.hpp" +#include "runtime/os.hpp" +#include "utilities/align.hpp" +#include "utilities/globalDefinitions.hpp" + +// Explicit C-heap memory management + +#ifndef PRODUCT +// Increments unsigned long value for statistics (not atomic on MP, but avoids word-tearing on 32 bit). +inline void inc_stat_counter(volatile julong* dest, julong add_value) { +#ifdef _LP64 + *dest += add_value; +#else + julong value = Atomic::load(dest); + Atomic::store(dest, value + add_value); +#endif +} +#endif + +template +size_t MmapArrayAllocator::size_for(size_t length) { + size_t size = length * sizeof(E); + size_t alignment = os::vm_allocation_granularity(); + return align_up(size, alignment); +} + +template +E* MmapArrayAllocator::allocate_or_null(size_t length, MEMFLAGS flags) { + size_t size = size_for(length); + + char* addr = os::reserve_memory(size, !ExecMem, flags); + if (addr == nullptr) { + return nullptr; + } + + if (os::commit_memory(addr, size, !ExecMem)) { + return (E*)addr; + } else { + os::release_memory(addr, size); + return nullptr; + } +} + +template +E* MmapArrayAllocator::allocate(size_t length, MEMFLAGS flags) { + size_t size = size_for(length); + + char* addr = os::reserve_memory(size, !ExecMem, flags); + if (addr == nullptr) { + vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "Allocator (reserve)"); + } + + os::commit_memory_or_exit(addr, size, !ExecMem, "Allocator (commit)"); + + return (E*)addr; +} + +template +void MmapArrayAllocator::free(E* addr, size_t length) { + bool result = os::release_memory((char*)addr, size_for(length)); + assert(result, "Failed to release memory"); +} + +template +size_t MallocArrayAllocator::size_for(size_t length) { + return length * sizeof(E); +} + +template +E* MallocArrayAllocator::allocate(size_t length, MEMFLAGS flags) { + return (E*)AllocateHeap(size_for(length), flags); +} + +template +E* MallocArrayAllocator::reallocate(E* addr, size_t new_length, MEMFLAGS flags) { + return (E*)ReallocateHeap((char*)addr, size_for(new_length), flags); +} + +template +void MallocArrayAllocator::free(E* addr) { + FreeHeap(addr); +} +#endif // !NATIVE_IMAGE + +#endif // SHARE_MEMORY_ALLOCATION_INLINE_HPP diff --git a/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/share/runtime/os.cpp b/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/share/runtime/os.cpp new file mode 100644 index 000000000000..3e9857f5b3f4 --- /dev/null +++ b/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/share/runtime/os.cpp @@ -0,0 +1,2410 @@ +/* + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef NATIVE_IMAGE +#include "precompiled.hpp" +#include "cds/cdsConfig.hpp" +#include "classfile/javaClasses.hpp" +#include "classfile/moduleEntry.hpp" +#include "classfile/systemDictionary.hpp" +#include "classfile/vmClasses.hpp" +#include "classfile/vmSymbols.hpp" +#include "code/codeCache.hpp" +#include "code/icBuffer.hpp" +#include "code/vtableStubs.hpp" +#include "gc/shared/gcVMOperations.hpp" +#include "interpreter/interpreter.hpp" +#include "jvm.h" +#include "logging/log.hpp" +#include "logging/logStream.hpp" +#endif // !NATIVE_IMAGE +#include "memory/allocation.inline.hpp" +#ifndef NATIVE_IMAGE +#include "memory/resourceArea.hpp" +#include "memory/universe.hpp" +#include "nmt/mallocHeader.inline.hpp" +#include "nmt/mallocTracker.hpp" +#include "nmt/memTracker.inline.hpp" +#include "nmt/nmtCommon.hpp" +#include "nmt/nmtPreInit.hpp" +#include "oops/compressedKlass.inline.hpp" +#include "oops/oop.inline.hpp" +#include "prims/jvm_misc.hpp" +#include "prims/jvmtiAgent.hpp" +#include "runtime/arguments.hpp" +#include "runtime/atomic.hpp" +#include "runtime/frame.inline.hpp" +#include "runtime/handles.inline.hpp" +#include "runtime/interfaceSupport.inline.hpp" +#include "runtime/java.hpp" +#include "runtime/javaCalls.hpp" +#include "runtime/javaThread.hpp" +#include "runtime/jniHandles.hpp" +#include "runtime/mutexLocker.hpp" +#endif // !NATIVE_IMAGE +#include "runtime/os.inline.hpp" +#ifndef NATIVE_IMAGE +#include "runtime/osThread.hpp" +#include "runtime/safefetch.hpp" +#include "runtime/sharedRuntime.hpp" +#include "runtime/threadCrashProtection.hpp" +#include "runtime/threadSMR.hpp" +#include "runtime/vmOperations.hpp" +#include "runtime/vm_version.hpp" +#include "sanitizers/address.hpp" +#include "services/attachListener.hpp" +#include "services/threadService.hpp" +#include "utilities/align.hpp" +#endif // !NATIVE_IMAGE +#include "utilities/checkedCast.hpp" +#ifndef NATIVE_IMAGE +#include "utilities/count_trailing_zeros.hpp" +#include "utilities/defaultStream.hpp" +#include "utilities/events.hpp" +#include "utilities/fastrand.hpp" +#include "utilities/powerOfTwo.hpp" + +#ifndef _WINDOWS +# include +#endif + +# include +# include + +OSThread* os::_starting_thread = nullptr; +volatile unsigned int os::_rand_seed = 1234567; +#endif // !NATIVE_IMAGE +int os::_processor_count = 0; +#ifndef NATIVE_IMAGE +int os::_initial_active_processor_count = 0; +os::PageSizes os::_page_sizes; + +DEBUG_ONLY(bool os::_mutex_init_done = false;) + +int os::snprintf(char* buf, size_t len, const char* fmt, ...) { + va_list args; + va_start(args, fmt); + int result = os::vsnprintf(buf, len, fmt, args); + va_end(args); + return result; +} +#endif // !NATIVE_IMAGE + +int os::snprintf_checked(char* buf, size_t len, const char* fmt, ...) { + va_list args; + va_start(args, fmt); + int result = os::vsnprintf(buf, len, fmt, args); + va_end(args); + assert(result >= 0, "os::snprintf error"); + assert(static_cast(result) < len, "os::snprintf truncated"); + return result; +} + +#ifndef NATIVE_IMAGE +// Fill in buffer with current local time as an ISO-8601 string. +// E.g., YYYY-MM-DDThh:mm:ss.mmm+zzzz. +// Returns buffer, or null if it failed. +char* os::iso8601_time(char* buffer, size_t buffer_length, bool utc) { + const jlong now = javaTimeMillis(); + return os::iso8601_time(now, buffer, buffer_length, utc); +} + +// Fill in buffer with an ISO-8601 string corresponding to the given javaTimeMillis value +// E.g., yyyy-mm-ddThh:mm:ss-zzzz. +// Returns buffer, or null if it failed. +// This would mostly be a call to +// strftime(...., "%Y-%m-%d" "T" "%H:%M:%S" "%z", ....) +// except that on Windows the %z behaves badly, so we do it ourselves. +// Also, people wanted milliseconds on there, +// and strftime doesn't do milliseconds. +char* os::iso8601_time(jlong milliseconds_since_19700101, char* buffer, size_t buffer_length, bool utc) { + // Output will be of the form "YYYY-MM-DDThh:mm:ss.mmm+zzzz\0" + + // Sanity check the arguments + if (buffer == nullptr) { + assert(false, "null buffer"); + return nullptr; + } + if (buffer_length < os::iso8601_timestamp_size) { + assert(false, "buffer_length too small"); + return nullptr; + } + const int milliseconds_per_second = 1000; + const time_t seconds_since_19700101 = + milliseconds_since_19700101 / milliseconds_per_second; + const int milliseconds_after_second = + checked_cast(milliseconds_since_19700101 % milliseconds_per_second); + // Convert the time value to a tm and timezone variable + struct tm time_struct; + if (utc) { + if (gmtime_pd(&seconds_since_19700101, &time_struct) == nullptr) { + assert(false, "Failed gmtime_pd"); + return nullptr; + } + } else { + if (localtime_pd(&seconds_since_19700101, &time_struct) == nullptr) { + assert(false, "Failed localtime_pd"); + return nullptr; + } + } + + const time_t seconds_per_minute = 60; + const time_t minutes_per_hour = 60; + const time_t seconds_per_hour = seconds_per_minute * minutes_per_hour; + + // No offset when dealing with UTC + time_t UTC_to_local = 0; + if (!utc) { +#if (defined(_ALLBSD_SOURCE) || defined(_GNU_SOURCE)) && !defined(AIX) + UTC_to_local = -(time_struct.tm_gmtoff); +#elif defined(_WINDOWS) + long zone; + _get_timezone(&zone); + UTC_to_local = static_cast(zone); +#else + UTC_to_local = timezone; +#endif + + // tm_gmtoff already includes adjustment for daylight saving +#if !defined(_ALLBSD_SOURCE) && !defined(_GNU_SOURCE) + // If daylight savings time is in effect, + // we are 1 hour East of our time zone + if (time_struct.tm_isdst > 0) { + UTC_to_local = UTC_to_local - seconds_per_hour; + } +#endif + } + + // Compute the time zone offset. + // localtime_pd() sets timezone to the difference (in seconds) + // between UTC and local time. + // ISO 8601 says we need the difference between local time and UTC, + // we change the sign of the localtime_pd() result. + const time_t local_to_UTC = -(UTC_to_local); + // Then we have to figure out if if we are ahead (+) or behind (-) UTC. + char sign_local_to_UTC = '+'; + time_t abs_local_to_UTC = local_to_UTC; + if (local_to_UTC < 0) { + sign_local_to_UTC = '-'; + abs_local_to_UTC = -(abs_local_to_UTC); + } + // Convert time zone offset seconds to hours and minutes. + const time_t zone_hours = (abs_local_to_UTC / seconds_per_hour); + const time_t zone_min = + ((abs_local_to_UTC % seconds_per_hour) / seconds_per_minute); + + // Print an ISO 8601 date and time stamp into the buffer + const int year = 1900 + time_struct.tm_year; + const int month = 1 + time_struct.tm_mon; + const int printed = jio_snprintf(buffer, buffer_length, + "%04d-%02d-%02dT%02d:%02d:%02d.%03d%c%02d%02d", + year, + month, + time_struct.tm_mday, + time_struct.tm_hour, + time_struct.tm_min, + time_struct.tm_sec, + milliseconds_after_second, + sign_local_to_UTC, + zone_hours, + zone_min); + if (printed == 0) { + assert(false, "Failed jio_printf"); + return nullptr; + } + return buffer; +} + +OSReturn os::set_priority(Thread* thread, ThreadPriority p) { + debug_only(Thread::check_for_dangling_thread_pointer(thread);) + + if ((p >= MinPriority && p <= MaxPriority) || + (p == CriticalPriority && thread->is_ConcurrentGC_thread())) { + int priority = java_to_os_priority[p]; + return set_native_priority(thread, priority); + } else { + assert(false, "Should not happen"); + return OS_ERR; + } +} + +// The mapping from OS priority back to Java priority may be inexact because +// Java priorities can map M:1 with native priorities. If you want the definite +// Java priority then use JavaThread::java_priority() +OSReturn os::get_priority(const Thread* const thread, ThreadPriority& priority) { + int p; + int os_prio; + OSReturn ret = get_native_priority(thread, &os_prio); + if (ret != OS_OK) return ret; + + if (java_to_os_priority[MaxPriority] > java_to_os_priority[MinPriority]) { + for (p = MaxPriority; p > MinPriority && java_to_os_priority[p] > os_prio; p--) ; + } else { + // niceness values are in reverse order + for (p = MaxPriority; p > MinPriority && java_to_os_priority[p] < os_prio; p--) ; + } + priority = (ThreadPriority)p; + return OS_OK; +} + +bool os::dll_build_name(char* buffer, size_t size, const char* fname) { + int n = jio_snprintf(buffer, size, "%s%s%s", JNI_LIB_PREFIX, fname, JNI_LIB_SUFFIX); + return (n != -1); +} + +#if !defined(LINUX) && !defined(_WINDOWS) +bool os::committed_in_range(address start, size_t size, address& committed_start, size_t& committed_size) { + committed_start = start; + committed_size = size; + return true; +} +#endif + +// Helper for dll_locate_lib. +// Pass buffer and printbuffer as we already printed the path to buffer +// when we called get_current_directory. This way we avoid another buffer +// of size MAX_PATH. +static bool conc_path_file_and_check(char *buffer, char *printbuffer, size_t printbuflen, + const char* pname, char lastchar, const char* fname) { + + // Concatenate path and file name, but don't print double path separators. + const char *filesep = (WINDOWS_ONLY(lastchar == ':' ||) lastchar == os::file_separator()[0]) ? + "" : os::file_separator(); + int ret = jio_snprintf(printbuffer, printbuflen, "%s%s%s", pname, filesep, fname); + // Check whether file exists. + if (ret != -1) { + struct stat statbuf; + return os::stat(buffer, &statbuf) == 0; + } + return false; +} + +// Frees all memory allocated on the heap for the +// supplied array of arrays of chars (a), where n +// is the number of elements in the array. +static void free_array_of_char_arrays(char** a, size_t n) { + while (n > 0) { + n--; + if (a[n] != nullptr) { + FREE_C_HEAP_ARRAY(char, a[n]); + } + } + FREE_C_HEAP_ARRAY(char*, a); +} + +bool os::dll_locate_lib(char *buffer, size_t buflen, + const char* pname, const char* fname) { + bool retval = false; + + size_t fullfnamelen = strlen(JNI_LIB_PREFIX) + strlen(fname) + strlen(JNI_LIB_SUFFIX); + char* fullfname = NEW_C_HEAP_ARRAY(char, fullfnamelen + 1, mtInternal); + if (dll_build_name(fullfname, fullfnamelen + 1, fname)) { + const size_t pnamelen = pname ? strlen(pname) : 0; + + if (pnamelen == 0) { + // If no path given, use current working directory. + const char* p = get_current_directory(buffer, buflen); + if (p != nullptr) { + const size_t plen = strlen(buffer); + const char lastchar = buffer[plen - 1]; + retval = conc_path_file_and_check(buffer, &buffer[plen], buflen - plen, + "", lastchar, fullfname); + } + } else if (strchr(pname, *os::path_separator()) != nullptr) { + // A list of paths. Search for the path that contains the library. + size_t n; + char** pelements = split_path(pname, &n, fullfnamelen); + if (pelements != nullptr) { + for (size_t i = 0; i < n; i++) { + char* path = pelements[i]; + // Really shouldn't be null, but check can't hurt. + size_t plen = (path == nullptr) ? 0 : strlen(path); + if (plen == 0) { + continue; // Skip the empty path values. + } + const char lastchar = path[plen - 1]; + retval = conc_path_file_and_check(buffer, buffer, buflen, path, lastchar, fullfname); + if (retval) break; + } + // Release the storage allocated by split_path. + free_array_of_char_arrays(pelements, n); + } + } else { + // A definite path. + const char lastchar = pname[pnamelen-1]; + retval = conc_path_file_and_check(buffer, buffer, buflen, pname, lastchar, fullfname); + } + } + + FREE_C_HEAP_ARRAY(char*, fullfname); + return retval; +} + +// --------------------- sun.misc.Signal (optional) --------------------- + + +// SIGBREAK is sent by the keyboard to query the VM state +#ifndef SIGBREAK +#define SIGBREAK SIGQUIT +#endif + +// sigexitnum_pd is a platform-specific special signal used for terminating the Signal thread. + + +static void signal_thread_entry(JavaThread* thread, TRAPS) { + os::set_priority(thread, NearMaxPriority); + while (true) { + int sig; + { + // FIXME : Currently we have not decided what should be the status + // for this java thread blocked here. Once we decide about + // that we should fix this. + sig = os::signal_wait(); + } + if (sig == os::sigexitnum_pd()) { + // Terminate the signal thread + return; + } + + switch (sig) { + case SIGBREAK: { +#if INCLUDE_SERVICES + // Check if the signal is a trigger to start the Attach Listener - in that + // case don't print stack traces. + if (!DisableAttachMechanism) { + // Attempt to transit state to AL_INITIALIZING. + AttachListenerState cur_state = AttachListener::transit_state(AL_INITIALIZING, AL_NOT_INITIALIZED); + if (cur_state == AL_INITIALIZING) { + // Attach Listener has been started to initialize. Ignore this signal. + continue; + } else if (cur_state == AL_NOT_INITIALIZED) { + // Start to initialize. + if (AttachListener::is_init_trigger()) { + // Attach Listener has been initialized. + // Accept subsequent request. + continue; + } else { + // Attach Listener could not be started. + // So we need to transit the state to AL_NOT_INITIALIZED. + AttachListener::set_state(AL_NOT_INITIALIZED); + } + } else if (AttachListener::check_socket_file()) { + // Attach Listener has been started, but unix domain socket file + // does not exist. So restart Attach Listener. + continue; + } + } +#endif + // Print stack traces + // Any SIGBREAK operations added here should make sure to flush + // the output stream (e.g. tty->flush()) after output. See 4803766. + // Each module also prints an extra carriage return after its output. + VM_PrintThreads op(tty, PrintConcurrentLocks, false /* no extended info */, true /* print JNI handle info */); + VMThread::execute(&op); + VM_FindDeadlocks op1(tty); + VMThread::execute(&op1); + Universe::print_heap_at_SIGBREAK(); + if (PrintClassHistogram) { + VM_GC_HeapInspection op1(tty, true /* force full GC before heap inspection */); + VMThread::execute(&op1); + } + if (JvmtiExport::should_post_data_dump()) { + JvmtiExport::post_data_dump(); + } + break; + } + default: { + // Dispatch the signal to java + HandleMark hm(THREAD); + Klass* klass = SystemDictionary::resolve_or_null(vmSymbols::jdk_internal_misc_Signal(), THREAD); + if (klass != nullptr) { + JavaValue result(T_VOID); + JavaCallArguments args; + args.push_int(sig); + JavaCalls::call_static( + &result, + klass, + vmSymbols::dispatch_name(), + vmSymbols::int_void_signature(), + &args, + THREAD + ); + } + if (HAS_PENDING_EXCEPTION) { + // tty is initialized early so we don't expect it to be null, but + // if it is we can't risk doing an initialization that might + // trigger additional out-of-memory conditions + if (tty != nullptr) { + char klass_name[256]; + char tmp_sig_name[16]; + const char* sig_name = "UNKNOWN"; + InstanceKlass::cast(PENDING_EXCEPTION->klass())-> + name()->as_klass_external_name(klass_name, 256); + if (os::exception_name(sig, tmp_sig_name, 16) != nullptr) + sig_name = tmp_sig_name; + warning("Exception %s occurred dispatching signal %s to handler" + "- the VM may need to be forcibly terminated", + klass_name, sig_name ); + } + CLEAR_PENDING_EXCEPTION; + } + } + } + } +} + +void os::init_before_ergo() { + initialize_initial_active_processor_count(); + // We need to initialize large page support here because ergonomics takes some + // decisions depending on large page support and the calculated large page size. + large_page_init(); + + StackOverflow::initialize_stack_zone_sizes(); + + // VM version initialization identifies some characteristics of the + // platform that are used during ergonomic decisions. + VM_Version::init_before_ergo(); +} + +void os::initialize_jdk_signal_support(TRAPS) { + if (!ReduceSignalUsage) { + // Setup JavaThread for processing signals + const char* name = "Signal Dispatcher"; + Handle thread_oop = JavaThread::create_system_thread_object(name, CHECK); + + JavaThread* thread = new JavaThread(&signal_thread_entry); + JavaThread::vm_exit_on_osthread_failure(thread); + + JavaThread::start_internal_daemon(THREAD, thread, thread_oop, NearMaxPriority); + } +} + + +void os::terminate_signal_thread() { + if (!ReduceSignalUsage) + signal_notify(sigexitnum_pd()); +} + + +// --------------------- loading libraries --------------------- + +typedef jint (JNICALL *JNI_OnLoad_t)(JavaVM *, void *); +extern struct JavaVM_ main_vm; + +static void* _native_java_library = nullptr; + +void* os::native_java_library() { + if (_native_java_library == nullptr) { + char buffer[JVM_MAXPATHLEN]; + char ebuf[1024]; + + // Load java dll + if (dll_locate_lib(buffer, sizeof(buffer), Arguments::get_dll_dir(), + "java")) { + _native_java_library = dll_load(buffer, ebuf, sizeof(ebuf)); + } + if (_native_java_library == nullptr) { + vm_exit_during_initialization("Unable to load native library", ebuf); + } + +#if defined(__OpenBSD__) + // Work-around OpenBSD's lack of $ORIGIN support by pre-loading libnet.so + // ignore errors + if (dll_locate_lib(buffer, sizeof(buffer), Arguments::get_dll_dir(), + "net")) { + dll_load(buffer, ebuf, sizeof(ebuf)); + } +#endif + } + return _native_java_library; +} + +/* + * Support for finding Agent_On(Un)Load/Attach<_lib_name> if it exists. + * If check_lib == true then we are looking for an + * Agent_OnLoad_lib_name or Agent_OnAttach_lib_name function to determine if + * this library is statically linked into the image. + * If check_lib == false then we will look for the appropriate symbol in the + * executable if agent_lib->is_static_lib() == true or in the shared library + * referenced by 'handle'. + */ +void* os::find_agent_function(JvmtiAgent *agent_lib, bool check_lib, + const char *syms[], size_t syms_len) { + assert(agent_lib != nullptr, "sanity check"); + const char *lib_name; + void *handle = agent_lib->os_lib(); + void *entryName = nullptr; + char *agent_function_name; + size_t i; + + // If checking then use the agent name otherwise test is_static_lib() to + // see how to process this lookup + lib_name = ((check_lib || agent_lib->is_static_lib()) ? agent_lib->name() : nullptr); + for (i = 0; i < syms_len; i++) { + agent_function_name = build_agent_function_name(syms[i], lib_name, agent_lib->is_absolute_path()); + if (agent_function_name == nullptr) { + break; + } + entryName = dll_lookup(handle, agent_function_name); + FREE_C_HEAP_ARRAY(char, agent_function_name); + if (entryName != nullptr) { + break; + } + } + return entryName; +} + +// See if the passed in agent is statically linked into the VM image. +bool os::find_builtin_agent(JvmtiAgent* agent, const char *syms[], + size_t syms_len) { + void *ret; + void *proc_handle; + void *save_handle; + + assert(agent != nullptr, "sanity check"); + if (agent->name() == nullptr) { + return false; + } + proc_handle = get_default_process_handle(); + // Check for Agent_OnLoad/Attach_lib_name function + save_handle = agent->os_lib(); + // We want to look in this process' symbol table. + agent->set_os_lib(proc_handle); + ret = find_agent_function(agent, true, syms, syms_len); + if (ret != nullptr) { + // Found an entry point like Agent_OnLoad_lib_name so we have a static agent + agent->set_static_lib(); + agent->set_loaded(); + return true; + } + agent->set_os_lib(save_handle); + return false; +} +#endif // !NATIVE_IMAGE + +// --------------------- heap allocation utilities --------------------- + +char *os::strdup(const char *str, MEMFLAGS flags) { + size_t size = strlen(str); + char *dup_str = (char *)malloc(size + 1, flags); + if (dup_str == nullptr) return nullptr; + strcpy(dup_str, str); + return dup_str; +} + +#ifndef NATIVE_IMAGE +char* os::strdup_check_oom(const char* str, MEMFLAGS flags) { + char* p = os::strdup(str, flags); + if (p == nullptr) { + vm_exit_out_of_memory(strlen(str) + 1, OOM_MALLOC_ERROR, "os::strdup_check_oom"); + } + return p; +} + +#ifdef ASSERT +static void check_crash_protection() { + assert(!ThreadCrashProtection::is_crash_protected(Thread::current_or_null()), + "not allowed when crash protection is set"); +} +static void break_if_ptr_caught(void* ptr) { + if (p2i(ptr) == (intptr_t)MallocCatchPtr) { + log_warning(malloc, free)("ptr caught: " PTR_FORMAT, p2i(ptr)); + breakpoint(); + } +} +#endif // ASSERT +#endif // !NATIVE_IMAGE + +#ifdef NATIVE_IMAGE +void* os::malloc(size_t size, MEMFLAGS flags) { + // On malloc(0), implementations of malloc(3) have the choice to return either + // null or a unique non-null pointer. To unify libc behavior across our platforms + // we chose the latter. + size = MAX2((size_t)1, size); + return ::malloc(size); +} + +void* os::realloc(void *memblock, size_t size, MEMFLAGS flags) { + if (memblock == nullptr) { + return os::malloc(size, flags); + } + + // On realloc(p, 0), implementers of realloc(3) have the choice to return either + // null or a unique non-null pointer. To unify libc behavior across our platforms + // we chose the latter. + size = MAX2((size_t)1, size); + return ::realloc(memblock, size); +} + +void os::free(void *memblock) { + if (memblock == nullptr) { + return; + } + ::free(memblock); +} +#else +void* os::malloc(size_t size, MEMFLAGS flags) { + return os::malloc(size, flags, CALLER_PC); +} + +void* os::malloc(size_t size, MEMFLAGS memflags, const NativeCallStack& stack) { + + // Special handling for NMT preinit phase before arguments are parsed + void* rc = nullptr; + if (NMTPreInit::handle_malloc(&rc, size)) { + // No need to fill with 0 because CDS static dumping doesn't use these + // early allocations. + return rc; + } + + DEBUG_ONLY(check_crash_protection()); + + // On malloc(0), implementations of malloc(3) have the choice to return either + // null or a unique non-null pointer. To unify libc behavior across our platforms + // we chose the latter. + size = MAX2((size_t)1, size); + + // Observe MallocLimit + if (MemTracker::check_exceeds_limit(size, memflags)) { + return nullptr; + } + + const size_t outer_size = size + MemTracker::overhead_per_malloc(); + + // Check for overflow. + if (outer_size < size) { + return nullptr; + } + + ALLOW_C_FUNCTION(::malloc, void* const outer_ptr = ::malloc(outer_size);) + if (outer_ptr == nullptr) { + return nullptr; + } + + void* const inner_ptr = MemTracker::record_malloc((address)outer_ptr, size, memflags, stack); + + if (CDSConfig::is_dumping_static_archive()) { + // Need to deterministically fill all the alignment gaps in C++ structures. + ::memset(inner_ptr, 0, size); + } else { + DEBUG_ONLY(::memset(inner_ptr, uninitBlockPad, size);) + } + DEBUG_ONLY(break_if_ptr_caught(inner_ptr);) + return inner_ptr; +} + +void* os::realloc(void *memblock, size_t size, MEMFLAGS flags) { + return os::realloc(memblock, size, flags, CALLER_PC); +} + +void* os::realloc(void *memblock, size_t size, MEMFLAGS memflags, const NativeCallStack& stack) { + + // Special handling for NMT preinit phase before arguments are parsed + void* rc = nullptr; + if (NMTPreInit::handle_realloc(&rc, memblock, size, memflags)) { + return rc; + } + + if (memblock == nullptr) { + return os::malloc(size, memflags, stack); + } + + DEBUG_ONLY(check_crash_protection()); + + // On realloc(p, 0), implementers of realloc(3) have the choice to return either + // null or a unique non-null pointer. To unify libc behavior across our platforms + // we chose the latter. + size = MAX2((size_t)1, size); + + if (MemTracker::enabled()) { + // NMT realloc handling + + const size_t new_outer_size = size + MemTracker::overhead_per_malloc(); + + // Handle size overflow. + if (new_outer_size < size) { + return nullptr; + } + + const size_t old_size = MallocTracker::malloc_header(memblock)->size(); + + // Observe MallocLimit + if ((size > old_size) && MemTracker::check_exceeds_limit(size - old_size, memflags)) { + return nullptr; + } + + // Perform integrity checks on and mark the old block as dead *before* calling the real realloc(3) since it + // may invalidate the old block, including its header. + MallocHeader* header = MallocHeader::resolve_checked(memblock); + assert(memflags == header->flags(), "weird NMT flags mismatch (new:\"%s\" != old:\"%s\")\n", + NMTUtil::flag_to_name(memflags), NMTUtil::flag_to_name(header->flags())); + const MallocHeader::FreeInfo free_info = header->free_info(); + + header->mark_block_as_dead(); + + // the real realloc + ALLOW_C_FUNCTION(::realloc, void* const new_outer_ptr = ::realloc(header, new_outer_size);) + + if (new_outer_ptr == nullptr) { + // realloc(3) failed and the block still exists. + // We have however marked it as dead, revert this change. + header->revive(); + return nullptr; + } + // realloc(3) succeeded, variable header now points to invalid memory and we need to deaccount the old block. + MemTracker::deaccount(free_info); + + // After a successful realloc(3), we account the resized block with its new size + // to NMT. + void* const new_inner_ptr = MemTracker::record_malloc(new_outer_ptr, size, memflags, stack); + +#ifdef ASSERT + assert(old_size == free_info.size, "Sanity"); + if (old_size < size) { + // We also zap the newly extended region. + ::memset((char*)new_inner_ptr + old_size, uninitBlockPad, size - old_size); + } +#endif + + rc = new_inner_ptr; + + } else { + + // NMT disabled. + ALLOW_C_FUNCTION(::realloc, rc = ::realloc(memblock, size);) + if (rc == nullptr) { + return nullptr; + } + + } + + DEBUG_ONLY(break_if_ptr_caught(rc);) + + return rc; +} + +void os::free(void *memblock) { + + // Special handling for NMT preinit phase before arguments are parsed + if (NMTPreInit::handle_free(memblock)) { + return; + } + + if (memblock == nullptr) { + return; + } + + DEBUG_ONLY(break_if_ptr_caught(memblock);) + + // When NMT is enabled this checks for heap overwrites, then deaccounts the old block. + void* const old_outer_ptr = MemTracker::record_free(memblock); + + ALLOW_C_FUNCTION(::free, ::free(old_outer_ptr);) +} + +void os::init_random(unsigned int initval) { + _rand_seed = initval; +} + + +int os::next_random(unsigned int rand_seed) { + /* standard, well-known linear congruential random generator with + * next_rand = (16807*seed) mod (2**31-1) + * see + * (1) "Random Number Generators: Good Ones Are Hard to Find", + * S.K. Park and K.W. Miller, Communications of the ACM 31:10 (Oct 1988), + * (2) "Two Fast Implementations of the 'Minimal Standard' Random + * Number Generator", David G. Carta, Comm. ACM 33, 1 (Jan 1990), pp. 87-88. + */ + const unsigned int a = 16807; + const unsigned int m = 2147483647; + const int q = m / a; assert(q == 127773, "weird math"); + const int r = m % a; assert(r == 2836, "weird math"); + + // compute az=2^31p+q + unsigned int lo = a * (rand_seed & 0xFFFF); + unsigned int hi = a * (rand_seed >> 16); + lo += (hi & 0x7FFF) << 16; + + // if q overflowed, ignore the overflow and increment q + if (lo > m) { + lo &= m; + ++lo; + } + lo += hi >> 15; + + // if (p+q) overflowed, ignore the overflow and increment (p+q) + if (lo > m) { + lo &= m; + ++lo; + } + return lo; +} + +int os::random() { + // Make updating the random seed thread safe. + while (true) { + unsigned int seed = _rand_seed; + unsigned int rand = next_random(seed); + if (Atomic::cmpxchg(&_rand_seed, seed, rand, memory_order_relaxed) == seed) { + return static_cast(rand); + } + } +} + +// The INITIALIZED state is distinguished from the SUSPENDED state because the +// conditions in which a thread is first started are different from those in which +// a suspension is resumed. These differences make it hard for us to apply the +// tougher checks when starting threads that we want to do when resuming them. +// However, when start_thread is called as a result of Thread.start, on a Java +// thread, the operation is synchronized on the Java Thread object. So there +// cannot be a race to start the thread and hence for the thread to exit while +// we are working on it. Non-Java threads that start Java threads either have +// to do so in a context in which races are impossible, or should do appropriate +// locking. + +void os::start_thread(Thread* thread) { + OSThread* osthread = thread->osthread(); + osthread->set_state(RUNNABLE); + pd_start_thread(thread); +} + +void os::abort(bool dump_core) { + abort(dump_core && CreateCoredumpOnCrash, nullptr, nullptr); +} + +//--------------------------------------------------------------------------- +// Helper functions for fatal error handler + +bool os::print_function_and_library_name(outputStream* st, + address addr, + char* buf, int buflen, + bool shorten_paths, + bool demangle, + bool strip_arguments) { + // If no scratch buffer given, allocate one here on stack. + // (used during error handling; its a coin toss, really, if on-stack allocation + // is worse than (raw) C-heap allocation in that case). + char* p = buf; + if (p == nullptr) { + p = (char*)::alloca(O_BUFLEN); + buflen = O_BUFLEN; + } + int offset = 0; + bool have_function_name = dll_address_to_function_name(addr, p, buflen, + &offset, demangle); + bool is_function_descriptor = false; +#ifdef HAVE_FUNCTION_DESCRIPTORS + // When we deal with a function descriptor instead of a real code pointer, try to + // resolve it. There is a small chance that a random pointer given to this function + // may just happen to look like a valid descriptor, but this is rare and worth the + // risk to see resolved function names. But we will print a little suffix to mark + // this as a function descriptor for the reader (see below). + if (!have_function_name && os::is_readable_pointer(addr)) { + address addr2 = (address)os::resolve_function_descriptor(addr); + if ((have_function_name = is_function_descriptor = + dll_address_to_function_name(addr2, p, buflen, &offset, demangle))) { + addr = addr2; + } + } +#endif // HAVE_FUNCTION_DESCRIPTORS + + if (have_function_name) { + // Print function name, optionally demangled + if (demangle && strip_arguments) { + char* args_start = strchr(p, '('); + if (args_start != nullptr) { + *args_start = '\0'; + } + } + // Print offset. Omit printing if offset is zero, which makes the output + // more readable if we print function pointers. + if (offset == 0) { + st->print("%s", p); + } else { + st->print("%s+%d", p, offset); + } + } else { + st->print(PTR_FORMAT, p2i(addr)); + } + offset = 0; + + const bool have_library_name = dll_address_to_library_name(addr, p, buflen, &offset); + if (have_library_name) { + // Cut path parts + if (shorten_paths) { + char* p2 = strrchr(p, os::file_separator()[0]); + if (p2 != nullptr) { + p = p2 + 1; + } + } + st->print(" in %s", p); + if (!have_function_name) { // Omit offset if we already printed the function offset + st->print("+%d", offset); + } + } + + // Write a trailing marker if this was a function descriptor + if (have_function_name && is_function_descriptor) { + st->print_raw(" (FD)"); + } + + return have_function_name || have_library_name; +} + +ATTRIBUTE_NO_ASAN static bool read_safely_from(intptr_t* p, intptr_t* result) { + const intptr_t errval = 0x1717; + intptr_t i = SafeFetchN(p, errval); + if (i == errval) { + i = SafeFetchN(p, ~errval); + if (i == ~errval) { + return false; + } + } + (*result) = i; + return true; +} + +static void print_hex_location(outputStream* st, address p, int unitsize) { + assert(is_aligned(p, unitsize), "Unaligned"); + address pa = align_down(p, sizeof(intptr_t)); +#ifndef _LP64 + // Special handling for printing qwords on 32-bit platforms + if (unitsize == 8) { + intptr_t i1, i2; + if (read_safely_from((intptr_t*)pa, &i1) && + read_safely_from((intptr_t*)pa + 1, &i2)) { + const uint64_t value = + LITTLE_ENDIAN_ONLY((((uint64_t)i2) << 32) | i1) + BIG_ENDIAN_ONLY((((uint64_t)i1) << 32) | i2); + st->print("%016" FORMAT64_MODIFIER "x", value); + } else { + st->print_raw("????????????????"); + } + return; + } +#endif // 32-bit, qwords + intptr_t i = 0; + if (read_safely_from((intptr_t*)pa, &i)) { + // bytes: CA FE BA BE DE AD C0 DE + // bytoff: 0 1 2 3 4 5 6 7 + // LE bits: 0 8 16 24 32 40 48 56 + // BE bits: 56 48 40 32 24 16 8 0 + const int offset = (int)(p - (address)pa); + const int bitoffset = + LITTLE_ENDIAN_ONLY(offset * BitsPerByte) + BIG_ENDIAN_ONLY((int)((sizeof(intptr_t) - unitsize - offset) * BitsPerByte)); + const int bitfieldsize = unitsize * BitsPerByte; + intptr_t value = bitfield(i, bitoffset, bitfieldsize); + switch (unitsize) { + case 1: st->print("%02x", (u1)value); break; + case 2: st->print("%04x", (u2)value); break; + case 4: st->print("%08x", (u4)value); break; + case 8: st->print("%016" FORMAT64_MODIFIER "x", (u8)value); break; + } + } else { + switch (unitsize) { + case 1: st->print_raw("??"); break; + case 2: st->print_raw("????"); break; + case 4: st->print_raw("????????"); break; + case 8: st->print_raw("????????????????"); break; + } + } +} + +void os::print_hex_dump(outputStream* st, address start, address end, int unitsize, + int bytes_per_line, address logical_start) { + assert(unitsize == 1 || unitsize == 2 || unitsize == 4 || unitsize == 8, "just checking"); + + start = align_down(start, unitsize); + logical_start = align_down(logical_start, unitsize); + bytes_per_line = align_up(bytes_per_line, 8); + + int cols = 0; + int cols_per_line = bytes_per_line / unitsize; + + address p = start; + address logical_p = logical_start; + + // Print out the addresses as if we were starting from logical_start. + st->print(PTR_FORMAT ": ", p2i(logical_p)); + while (p < end) { + print_hex_location(st, p, unitsize); + p += unitsize; + logical_p += unitsize; + cols++; + if (cols >= cols_per_line && p < end) { + cols = 0; + st->cr(); + st->print(PTR_FORMAT ": ", p2i(logical_p)); + } else { + st->print(" "); + } + } + st->cr(); +} + +void os::print_dhm(outputStream* st, const char* startStr, long sec) { + long days = sec/86400; + long hours = (sec/3600) - (days * 24); + long minutes = (sec/60) - (days * 1440) - (hours * 60); + if (startStr == nullptr) startStr = ""; + st->print_cr("%s %ld days %ld:%02ld hours", startStr, days, hours, minutes); +} + +void os::print_tos(outputStream* st, address sp) { + st->print_cr("Top of Stack: (sp=" PTR_FORMAT ")", p2i(sp)); + print_hex_dump(st, sp, sp + 512, sizeof(intptr_t)); +} + +void os::print_instructions(outputStream* st, address pc, int unitsize) { + st->print_cr("Instructions: (pc=" PTR_FORMAT ")", p2i(pc)); + print_hex_dump(st, pc - 256, pc + 256, unitsize); +} + +void os::print_environment_variables(outputStream* st, const char** env_list) { + if (env_list) { + st->print_cr("Environment Variables:"); + + for (int i = 0; env_list[i] != nullptr; i++) { + char *envvar = ::getenv(env_list[i]); + if (envvar != nullptr) { + st->print("%s", env_list[i]); + st->print("="); + st->print("%s", envvar); + // Use separate cr() printing to avoid unnecessary buffer operations that might cause truncation. + st->cr(); + } + } + } +} + +void os::print_register_info(outputStream* st, const void* context) { + int continuation = 0; + print_register_info(st, context, continuation); +} + +void os::print_cpu_info(outputStream* st, char* buf, size_t buflen) { + // cpu + st->print("CPU:"); +#if defined(__APPLE__) && !defined(ZERO) + if (VM_Version::is_cpu_emulated()) { + st->print(" (EMULATED)"); + } +#endif + st->print(" total %d", os::processor_count()); + // It's not safe to query number of active processors after crash + // st->print("(active %d)", os::active_processor_count()); but we can + // print the initial number of active processors. + // We access the raw value here because the assert in the accessor will + // fail if the crash occurs before initialization of this value. + st->print(" (initial active %d)", _initial_active_processor_count); + st->print(" %s", VM_Version::features_string()); + st->cr(); + pd_print_cpu_info(st, buf, buflen); +} + +// Print a one line string summarizing the cpu, number of cores, memory, and operating system version +void os::print_summary_info(outputStream* st, char* buf, size_t buflen) { + st->print("Host: "); +#ifndef PRODUCT + if (get_host_name(buf, buflen)) { + st->print("%s, ", buf); + } +#endif // PRODUCT + get_summary_cpu_info(buf, buflen); + st->print("%s, ", buf); + size_t mem = physical_memory()/G; + if (mem == 0) { // for low memory systems + mem = physical_memory()/M; + st->print("%d cores, " SIZE_FORMAT "M, ", processor_count(), mem); + } else { + st->print("%d cores, " SIZE_FORMAT "G, ", processor_count(), mem); + } + get_summary_os_info(buf, buflen); + st->print_raw(buf); + st->cr(); +} + +static constexpr int secs_per_day = 86400; +static constexpr int secs_per_hour = 3600; +static constexpr int secs_per_min = 60; + +void os::print_date_and_time(outputStream *st, char* buf, size_t buflen) { + + time_t tloc; + (void)time(&tloc); + char* timestring = ctime(&tloc); // ctime adds newline. + // edit out the newline + char* nl = strchr(timestring, '\n'); + if (nl != nullptr) { + *nl = '\0'; + } + + struct tm tz; + if (localtime_pd(&tloc, &tz) != nullptr) { + wchar_t w_buf[80]; + size_t n = ::wcsftime(w_buf, 80, L"%Z", &tz); + if (n > 0) { + ::wcstombs(buf, w_buf, buflen); + st->print("Time: %s %s", timestring, buf); + } else { + st->print("Time: %s", timestring); + } + } else { + st->print("Time: %s", timestring); + } + + double t = os::elapsedTime(); + st->print(" elapsed time: "); + print_elapsed_time(st, t); + st->cr(); +} + +void os::print_elapsed_time(outputStream* st, double time) { + // NOTE: a crash using printf("%f",...) on Linux was historically noted here. + int eltime = (int)time; // elapsed time in seconds + int eltimeFraction = (int) ((time - eltime) * 1000000); + + // print elapsed time in a human-readable format: + int eldays = eltime / secs_per_day; + int day_secs = eldays * secs_per_day; + int elhours = (eltime - day_secs) / secs_per_hour; + int hour_secs = elhours * secs_per_hour; + int elmins = (eltime - day_secs - hour_secs) / secs_per_min; + int minute_secs = elmins * secs_per_min; + int elsecs = (eltime - day_secs - hour_secs - minute_secs); + st->print("%d.%06d seconds (%dd %dh %dm %ds)", eltime, eltimeFraction, eldays, elhours, elmins, elsecs); +} + + +// Check if pointer can be read from (4-byte read access). +// Helps to prove validity of a non-null pointer. +// Returns true in very early stages of VM life when stub is not yet generated. +bool os::is_readable_pointer(const void* p) { + int* const aligned = (int*) align_down((intptr_t)p, 4); + int cafebabe = 0xcafebabe; // tester value 1 + int deadbeef = 0xdeadbeef; // tester value 2 + return (SafeFetch32(aligned, cafebabe) != cafebabe) || (SafeFetch32(aligned, deadbeef) != deadbeef); +} + +bool os::is_readable_range(const void* from, const void* to) { + if ((uintptr_t)from >= (uintptr_t)to) return false; + for (uintptr_t p = align_down((uintptr_t)from, min_page_size()); p < (uintptr_t)to; p += min_page_size()) { + if (!is_readable_pointer((const void*)p)) { + return false; + } + } + return true; +} + + +// moved from debug.cpp (used to be find()) but still called from there +// The verbose parameter is only set by the debug code in one case +void os::print_location(outputStream* st, intptr_t x, bool verbose) { + address addr = (address)x; + // Handle null first, so later checks don't need to protect against it. + if (addr == nullptr) { + st->print_cr("0x0 is null"); + return; + } + + // Check if addr points into a code blob. + CodeBlob* b = CodeCache::find_blob(addr); + if (b != nullptr) { + b->dump_for_addr(addr, st, verbose); + return; + } + + // Check if addr points into Java heap. + if (Universe::heap()->print_location(st, addr)) { + return; + } + + bool accessible = is_readable_pointer(addr); + + // Check if addr is a JNI handle. + if (align_down((intptr_t)addr, sizeof(intptr_t)) != 0 && accessible) { + if (JNIHandles::is_global_handle((jobject) addr)) { + st->print_cr(INTPTR_FORMAT " is a global jni handle", p2i(addr)); + return; + } + if (JNIHandles::is_weak_global_handle((jobject) addr)) { + st->print_cr(INTPTR_FORMAT " is a weak global jni handle", p2i(addr)); + return; + } + } + + // Check if addr belongs to a Java thread. + for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thread = jtiwh.next(); ) { + // If the addr is a java thread print information about that. + if (addr == (address)thread) { + if (verbose) { + thread->print_on(st); + } else { + st->print_cr(INTPTR_FORMAT " is a thread", p2i(addr)); + } + return; + } + // If the addr is in the stack region for this thread then report that + // and print thread info + if (thread->is_in_full_stack(addr)) { + st->print_cr(INTPTR_FORMAT " is pointing into the stack for thread: " + INTPTR_FORMAT, p2i(addr), p2i(thread)); + if (verbose) thread->print_on(st); + return; + } + } + + // Check if in metaspace and print types that have vptrs + if (Metaspace::contains(addr)) { + if (Klass::is_valid((Klass*)addr)) { + st->print_cr(INTPTR_FORMAT " is a pointer to class: ", p2i(addr)); + ((Klass*)addr)->print_on(st); + } else if (Method::is_valid_method((const Method*)addr)) { + ((Method*)addr)->print_value_on(st); + st->cr(); + } else { + // Use addr->print() from the debugger instead (not here) + st->print_cr(INTPTR_FORMAT " is pointing into metadata", p2i(addr)); + } + return; + } + + // Compressed klass needs to be decoded first. +#ifdef _LP64 + if (UseCompressedClassPointers && ((uintptr_t)addr &~ (uintptr_t)max_juint) == 0) { + narrowKlass narrow_klass = (narrowKlass)(uintptr_t)addr; + Klass* k = CompressedKlassPointers::decode_raw(narrow_klass); + + if (Klass::is_valid(k)) { + st->print_cr(UINT32_FORMAT " is a compressed pointer to class: " INTPTR_FORMAT, narrow_klass, p2i((HeapWord*)k)); + k->print_on(st); + return; + } + } +#endif + + // Still nothing? If NMT is enabled, we can ask what it thinks... + if (MemTracker::print_containing_region(addr, st)) { + return; + } + + // Try an OS specific find + if (os::find(addr, st)) { + return; + } + + if (accessible) { + st->print(INTPTR_FORMAT " points into unknown readable memory:", p2i(addr)); + if (is_aligned(addr, sizeof(intptr_t))) { + st->print(" " PTR_FORMAT " |", *(intptr_t*)addr); + } + for (address p = addr; p < align_up(addr + 1, sizeof(intptr_t)); ++p) { + st->print(" %02x", *(u1*)p); + } + st->cr(); + return; + } + + st->print_cr(INTPTR_FORMAT " is an unknown value", p2i(addr)); +} + +bool is_pointer_bad(intptr_t* ptr) { + return !is_aligned(ptr, sizeof(uintptr_t)) || !os::is_readable_pointer(ptr); +} + +// Looks like all platforms can use the same function to check if C +// stack is walkable beyond current frame. +// Returns true if this is not the case, i.e. the frame is possibly +// the first C frame on the stack. +bool os::is_first_C_frame(frame* fr) { + +#ifdef _WINDOWS + return true; // native stack isn't walkable on windows this way. +#endif + // Load up sp, fp, sender sp and sender fp, check for reasonable values. + // Check usp first, because if that's bad the other accessors may fault + // on some architectures. Ditto ufp second, etc. + + if (is_pointer_bad(fr->sp())) return true; + + uintptr_t ufp = (uintptr_t)fr->fp(); + if (is_pointer_bad(fr->fp())) return true; + + uintptr_t old_sp = (uintptr_t)fr->sender_sp(); + if ((uintptr_t)fr->sender_sp() == (uintptr_t)-1 || is_pointer_bad(fr->sender_sp())) return true; + + uintptr_t old_fp = (uintptr_t)fr->link_or_null(); + if (old_fp == 0 || old_fp == (uintptr_t)-1 || old_fp == ufp || + is_pointer_bad(fr->link_or_null())) return true; + + // stack grows downwards; if old_fp is below current fp or if the stack + // frame is too large, either the stack is corrupted or fp is not saved + // on stack (i.e. on x86, ebp may be used as general register). The stack + // is not walkable beyond current frame. + if (old_fp < ufp) return true; + if (old_fp - ufp > 64 * K) return true; + + return false; +} + +// Set up the boot classpath. + +char* os::format_boot_path(const char* format_string, + const char* home, + int home_len, + char fileSep, + char pathSep) { + assert((fileSep == '/' && pathSep == ':') || + (fileSep == '\\' && pathSep == ';'), "unexpected separator chars"); + + // Scan the format string to determine the length of the actual + // boot classpath, and handle platform dependencies as well. + int formatted_path_len = 0; + const char* p; + for (p = format_string; *p != 0; ++p) { + if (*p == '%') formatted_path_len += home_len - 1; + ++formatted_path_len; + } + + char* formatted_path = NEW_C_HEAP_ARRAY(char, formatted_path_len + 1, mtInternal); + + // Create boot classpath from format, substituting separator chars and + // java home directory. + char* q = formatted_path; + for (p = format_string; *p != 0; ++p) { + switch (*p) { + case '%': + strcpy(q, home); + q += home_len; + break; + case '/': + *q++ = fileSep; + break; + case ':': + *q++ = pathSep; + break; + default: + *q++ = *p; + } + } + *q = '\0'; + + assert((q - formatted_path) == formatted_path_len, "formatted_path size botched"); + return formatted_path; +} +#endif // !NATIVE_IMAGE + +// This function is a proxy to fopen, it tries to add a non standard flag ('e' or 'N') +// that ensures automatic closing of the file on exec. If it can not find support in +// the underlying c library, it will make an extra system call (fcntl) to ensure automatic +// closing of the file on exec. +FILE* os::fopen(const char* path, const char* mode) { + char modified_mode[20]; + assert(strlen(mode) + 1 < sizeof(modified_mode), "mode chars plus one extra must fit in buffer"); + os::snprintf_checked(modified_mode, sizeof(modified_mode), "%s" LINUX_ONLY("e") BSD_ONLY("e") WINDOWS_ONLY("N"), mode); + FILE* file = ::fopen(path, modified_mode); + +#if !(defined LINUX || defined BSD || defined _WINDOWS) + // assume fcntl FD_CLOEXEC support as a backup solution when 'e' or 'N' + // is not supported as mode in fopen + if (file != nullptr) { + int fd = fileno(file); + if (fd != -1) { + int fd_flags = fcntl(fd, F_GETFD); + if (fd_flags != -1) { + fcntl(fd, F_SETFD, fd_flags | FD_CLOEXEC); + } + } + } +#endif + + return file; +} + +#ifndef NATIVE_IMAGE +bool os::set_boot_path(char fileSep, char pathSep) { + const char* home = Arguments::get_java_home(); + int home_len = (int)strlen(home); + + struct stat st; + + // modular image if "modules" jimage exists + char* jimage = format_boot_path("%/lib/" MODULES_IMAGE_NAME, home, home_len, fileSep, pathSep); + if (jimage == nullptr) return false; + bool has_jimage = (os::stat(jimage, &st) == 0); + if (has_jimage) { + Arguments::set_boot_class_path(jimage, true); + FREE_C_HEAP_ARRAY(char, jimage); + return true; + } + FREE_C_HEAP_ARRAY(char, jimage); + + // check if developer build with exploded modules + char* base_classes = format_boot_path("%/modules/" JAVA_BASE_NAME, home, home_len, fileSep, pathSep); + if (base_classes == nullptr) return false; + if (os::stat(base_classes, &st) == 0) { + Arguments::set_boot_class_path(base_classes, false); + FREE_C_HEAP_ARRAY(char, base_classes); + return true; + } + FREE_C_HEAP_ARRAY(char, base_classes); + + return false; +} + +bool os::file_exists(const char* filename) { + struct stat statbuf; + if (filename == nullptr || strlen(filename) == 0) { + return false; + } + return os::stat(filename, &statbuf) == 0; +} + +bool os::write(int fd, const void *buf, size_t nBytes) { + ssize_t res; + + while (nBytes > 0) { + res = pd_write(fd, buf, nBytes); + if (res == OS_ERR) { + return false; + } + buf = (void *)((char *)buf + res); + nBytes -= res; + } + + return true; +} + + +// Splits a path, based on its separator, the number of +// elements is returned back in "elements". +// file_name_length is used as a modifier for each path's +// length when compared to JVM_MAXPATHLEN. So if you know +// each returned path will have something appended when +// in use, you can pass the length of that in +// file_name_length, to ensure we detect if any path +// exceeds the maximum path length once prepended onto +// the sub-path/file name. +// It is the callers responsibility to: +// a> check the value of "elements", which may be 0. +// b> ignore any empty path elements +// c> free up the data. +char** os::split_path(const char* path, size_t* elements, size_t file_name_length) { + *elements = (size_t)0; + if (path == nullptr || strlen(path) == 0 || file_name_length == (size_t)nullptr) { + return nullptr; + } + const char psepchar = *os::path_separator(); + char* inpath = NEW_C_HEAP_ARRAY(char, strlen(path) + 1, mtInternal); + strcpy(inpath, path); + size_t count = 1; + char* p = strchr(inpath, psepchar); + // Get a count of elements to allocate memory + while (p != nullptr) { + count++; + p++; + p = strchr(p, psepchar); + } + + char** opath = NEW_C_HEAP_ARRAY(char*, count, mtInternal); + + // do the actual splitting + p = inpath; + for (size_t i = 0 ; i < count ; i++) { + size_t len = strcspn(p, os::path_separator()); + if (len + file_name_length > JVM_MAXPATHLEN) { + // release allocated storage before exiting the vm + free_array_of_char_arrays(opath, i++); + vm_exit_during_initialization("The VM tried to use a path that exceeds the maximum path length for " + "this system. Review path-containing parameters and properties, such as " + "sun.boot.library.path, to identify potential sources for this path."); + } + // allocate the string and add terminator storage + char* s = NEW_C_HEAP_ARRAY(char, len + 1, mtInternal); + strncpy(s, p, len); + s[len] = '\0'; + opath[i] = s; + p += len + 1; + } + FREE_C_HEAP_ARRAY(char, inpath); + *elements = count; + return opath; +} + +// Returns true if the current stack pointer is above the stack shadow +// pages, false otherwise. +bool os::stack_shadow_pages_available(Thread *thread, const methodHandle& method, address sp) { + if (!thread->is_Java_thread()) return false; + // Check if we have StackShadowPages above the guard zone. This parameter + // is dependent on the depth of the maximum VM call stack possible from + // the handler for stack overflow. 'instanceof' in the stack overflow + // handler or a println uses at least 8k stack of VM and native code + // respectively. + const int framesize_in_bytes = + Interpreter::size_top_interpreter_activation(method()) * wordSize; + + address limit = JavaThread::cast(thread)->stack_overflow_state()->shadow_zone_safe_limit(); + return sp > (limit + framesize_in_bytes); +} + +size_t os::page_size_for_region(size_t region_size, size_t min_pages, bool must_be_aligned) { + assert(min_pages > 0, "sanity"); + if (UseLargePages) { + const size_t max_page_size = region_size / min_pages; + + for (size_t page_size = page_sizes().largest(); page_size != 0; + page_size = page_sizes().next_smaller(page_size)) { + if (page_size <= max_page_size) { + if (!must_be_aligned || is_aligned(region_size, page_size)) { + return page_size; + } + } + } + } + + return vm_page_size(); +} + +size_t os::page_size_for_region_aligned(size_t region_size, size_t min_pages) { + return page_size_for_region(region_size, min_pages, true); +} + +size_t os::page_size_for_region_unaligned(size_t region_size, size_t min_pages) { + return page_size_for_region(region_size, min_pages, false); +} + +#ifndef MAX_PATH +#define MAX_PATH (2 * K) +#endif + +void os::pause() { + char filename[MAX_PATH]; + if (PauseAtStartupFile && PauseAtStartupFile[0]) { + jio_snprintf(filename, MAX_PATH, "%s", PauseAtStartupFile); + } else { + jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id()); + } + + int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666); + if (fd != -1) { + struct stat buf; + ::close(fd); + while (::stat(filename, &buf) == 0) { +#if defined(_WINDOWS) + Sleep(100); +#else + (void)::poll(nullptr, 0, 100); +#endif + } + } else { + jio_fprintf(stderr, + "Could not open pause file '%s', continuing immediately.\n", filename); + } +} +#endif // !NATIVE_IMAGE + +static const char* errno_to_string (int e, bool short_text) { + #define ALL_SHARED_ENUMS(X) \ + X(E2BIG, "Argument list too long") \ + X(EACCES, "Permission denied") \ + X(EADDRINUSE, "Address in use") \ + X(EADDRNOTAVAIL, "Address not available") \ + X(EAFNOSUPPORT, "Address family not supported") \ + X(EAGAIN, "Resource unavailable, try again") \ + X(EALREADY, "Connection already in progress") \ + X(EBADF, "Bad file descriptor") \ + X(EBADMSG, "Bad message") \ + X(EBUSY, "Device or resource busy") \ + X(ECANCELED, "Operation canceled") \ + X(ECHILD, "No child processes") \ + X(ECONNABORTED, "Connection aborted") \ + X(ECONNREFUSED, "Connection refused") \ + X(ECONNRESET, "Connection reset") \ + X(EDEADLK, "Resource deadlock would occur") \ + X(EDESTADDRREQ, "Destination address required") \ + X(EDOM, "Mathematics argument out of domain of function") \ + X(EEXIST, "File exists") \ + X(EFAULT, "Bad address") \ + X(EFBIG, "File too large") \ + X(EHOSTUNREACH, "Host is unreachable") \ + X(EIDRM, "Identifier removed") \ + X(EILSEQ, "Illegal byte sequence") \ + X(EINPROGRESS, "Operation in progress") \ + X(EINTR, "Interrupted function") \ + X(EINVAL, "Invalid argument") \ + X(EIO, "I/O error") \ + X(EISCONN, "Socket is connected") \ + X(EISDIR, "Is a directory") \ + X(ELOOP, "Too many levels of symbolic links") \ + X(EMFILE, "Too many open files") \ + X(EMLINK, "Too many links") \ + X(EMSGSIZE, "Message too large") \ + X(ENAMETOOLONG, "Filename too long") \ + X(ENETDOWN, "Network is down") \ + X(ENETRESET, "Connection aborted by network") \ + X(ENETUNREACH, "Network unreachable") \ + X(ENFILE, "Too many files open in system") \ + X(ENOBUFS, "No buffer space available") \ + X(ENODATA, "No message is available on the STREAM head read queue") \ + X(ENODEV, "No such device") \ + X(ENOENT, "No such file or directory") \ + X(ENOEXEC, "Executable file format error") \ + X(ENOLCK, "No locks available") \ + X(ENOLINK, "Reserved") \ + X(ENOMEM, "Not enough space") \ + X(ENOMSG, "No message of the desired type") \ + X(ENOPROTOOPT, "Protocol not available") \ + X(ENOSPC, "No space left on device") \ + X(ENOSR, "No STREAM resources") \ + X(ENOSTR, "Not a STREAM") \ + X(ENOSYS, "Function not supported") \ + X(ENOTCONN, "The socket is not connected") \ + X(ENOTDIR, "Not a directory") \ + X(ENOTEMPTY, "Directory not empty") \ + X(ENOTSOCK, "Not a socket") \ + X(ENOTSUP, "Not supported") \ + X(ENOTTY, "Inappropriate I/O control operation") \ + X(ENXIO, "No such device or address") \ + X(EOPNOTSUPP, "Operation not supported on socket") \ + X(EOVERFLOW, "Value too large to be stored in data type") \ + X(EPERM, "Operation not permitted") \ + X(EPIPE, "Broken pipe") \ + X(EPROTO, "Protocol error") \ + X(EPROTONOSUPPORT, "Protocol not supported") \ + X(EPROTOTYPE, "Protocol wrong type for socket") \ + X(ERANGE, "Result too large") \ + X(EROFS, "Read-only file system") \ + X(ESPIPE, "Invalid seek") \ + X(ESRCH, "No such process") \ + X(ETIME, "Stream ioctl() timeout") \ + X(ETIMEDOUT, "Connection timed out") \ + X(ETXTBSY, "Text file busy") \ + X(EWOULDBLOCK, "Operation would block") \ + X(EXDEV, "Cross-device link") + + #define DEFINE_ENTRY(e, text) { e, #e, text }, + + static const struct { + int v; + const char* short_text; + const char* long_text; + } table [] = { + + ALL_SHARED_ENUMS(DEFINE_ENTRY) + + // The following enums are not defined on all platforms. + #ifdef ESTALE + DEFINE_ENTRY(ESTALE, "Reserved") + #endif + #ifdef EDQUOT + DEFINE_ENTRY(EDQUOT, "Reserved") + #endif + #ifdef EMULTIHOP + DEFINE_ENTRY(EMULTIHOP, "Reserved") + #endif + + // End marker. + { -1, "Unknown errno", "Unknown error" } + + }; + + #undef DEFINE_ENTRY + #undef ALL_FLAGS + + int i = 0; + while (table[i].v != -1 && table[i].v != e) { + i ++; + } + + return short_text ? table[i].short_text : table[i].long_text; + +} + +const char* os::strerror(int e) { + return errno_to_string(e, false); +} + +const char* os::errno_name(int e) { + return errno_to_string(e, true); +} + +#ifndef NATIVE_IMAGE +#define trace_page_size_params(size) byte_size_in_exact_unit(size), exact_unit_for_byte_size(size) + +void os::trace_page_sizes(const char* str, + const size_t region_min_size, + const size_t region_max_size, + const char* base, + const size_t size, + const size_t page_size) { + + log_info(pagesize)("%s: " + " min=" SIZE_FORMAT "%s" + " max=" SIZE_FORMAT "%s" + " base=" PTR_FORMAT + " size=" SIZE_FORMAT "%s" + " page_size=" SIZE_FORMAT "%s", + str, + trace_page_size_params(region_min_size), + trace_page_size_params(region_max_size), + p2i(base), + trace_page_size_params(size), + trace_page_size_params(page_size)); +} + +void os::trace_page_sizes_for_requested_size(const char* str, + const size_t requested_size, + const size_t requested_page_size, + const char* base, + const size_t size, + const size_t page_size) { + + log_info(pagesize)("%s:" + " req_size=" SIZE_FORMAT "%s" + " req_page_size=" SIZE_FORMAT "%s" + " base=" PTR_FORMAT + " size=" SIZE_FORMAT "%s" + " page_size=" SIZE_FORMAT "%s", + str, + trace_page_size_params(requested_size), + trace_page_size_params(requested_page_size), + p2i(base), + trace_page_size_params(size), + trace_page_size_params(page_size)); +} + + +// This is the working definition of a server class machine: +// >= 2 physical CPU's and >=2GB of memory, with some fuzz +// because the graphics memory (?) sometimes masks physical memory. +// If you want to change the definition of a server class machine +// on some OS or platform, e.g., >=4GB on Windows platforms, +// then you'll have to parameterize this method based on that state, +// as was done for logical processors here, or replicate and +// specialize this method for each platform. (Or fix os to have +// some inheritance structure and use subclassing. Sigh.) +// If you want some platform to always or never behave as a server +// class machine, change the setting of AlwaysActAsServerClassMachine +// and NeverActAsServerClassMachine in globals*.hpp. +bool os::is_server_class_machine() { + // First check for the early returns + if (NeverActAsServerClassMachine) { + return false; + } + if (AlwaysActAsServerClassMachine) { + return true; + } + // Then actually look at the machine + bool result = false; + const unsigned int server_processors = 2; + const julong server_memory = 2UL * G; + // We seem not to get our full complement of memory. + // We allow some part (1/8?) of the memory to be "missing", + // based on the sizes of DIMMs, and maybe graphics cards. + const julong missing_memory = 256UL * M; + + /* Is this a server class machine? */ + if ((os::active_processor_count() >= (int)server_processors) && + (os::physical_memory() >= (server_memory - missing_memory))) { + const unsigned int logical_processors = + VM_Version::logical_processors_per_package(); + if (logical_processors > 1) { + const unsigned int physical_packages = + os::active_processor_count() / logical_processors; + if (physical_packages >= server_processors) { + result = true; + } + } else { + result = true; + } + } + return result; +} + +void os::initialize_initial_active_processor_count() { + assert(_initial_active_processor_count == 0, "Initial active processor count already set."); + _initial_active_processor_count = active_processor_count(); + log_debug(os)("Initial active processor count set to %d" , _initial_active_processor_count); +} + +bool os::create_stack_guard_pages(char* addr, size_t bytes) { + return os::pd_create_stack_guard_pages(addr, bytes); +} + +char* os::reserve_memory(size_t bytes, bool executable, MEMFLAGS flags) { + char* result = pd_reserve_memory(bytes, executable); + if (result != nullptr) { + MemTracker::record_virtual_memory_reserve(result, bytes, CALLER_PC, flags); + } + return result; +} + +char* os::attempt_reserve_memory_at(char* addr, size_t bytes, bool executable) { + char* result = SimulateFullAddressSpace ? nullptr : pd_attempt_reserve_memory_at(addr, bytes, executable); + if (result != nullptr) { + MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC); + log_debug(os)("Reserved memory at " INTPTR_FORMAT " for " SIZE_FORMAT " bytes.", p2i(addr), bytes); + } else { + log_debug(os)("Attempt to reserve memory at " INTPTR_FORMAT " for " + SIZE_FORMAT " bytes failed, errno %d", p2i(addr), bytes, get_last_error()); + } + return result; +} + +#ifdef ASSERT +static void print_points(const char* s, unsigned* points, unsigned num) { + stringStream ss; + for (unsigned i = 0; i < num; i ++) { + ss.print("%u ", points[i]); + } + log_trace(os, map)("%s, %u Points: %s", s, num, ss.base()); +} +#endif + +// Helper for os::attempt_reserve_memory_between +// Given an array of things, shuffle them (Fisher-Yates) +template +static void shuffle_fisher_yates(T* arr, unsigned num, FastRandom& frand) { + for (unsigned i = num - 1; i >= 1; i--) { + unsigned j = frand.next() % i; + swap(arr[i], arr[j]); + } +} + +// Helper for os::attempt_reserve_memory_between +// Given an array of things, do a hemisphere split such that the resulting +// order is: [first, last, first + 1, last - 1, ...] +template +static void hemi_split(T* arr, unsigned num) { + T* tmp = (T*)::alloca(sizeof(T) * num); + for (unsigned i = 0; i < num; i++) { + tmp[i] = arr[i]; + } + for (unsigned i = 0; i < num; i++) { + arr[i] = is_even(i) ? tmp[i / 2] : tmp[num - (i / 2) - 1]; + } +} + +// Given an address range [min, max), attempts to reserve memory within this area, with the given alignment. +// If randomize is true, the location will be randomized. +char* os::attempt_reserve_memory_between(char* min, char* max, size_t bytes, size_t alignment, bool randomize) { + + // Please keep the following constants in sync with the companion gtests: + + // Number of mmap attemts we will undertake. + constexpr unsigned max_attempts = 32; + + // In randomization mode: We require a minimum number of possible attach points for + // randomness. Below that we refuse to reserve anything. + constexpr unsigned min_random_value_range = 16; + + // In randomization mode: If the possible value range is below this threshold, we + // use a total shuffle without regard for address space fragmentation, otherwise + // we attempt to minimize fragmentation. + constexpr unsigned total_shuffle_threshold = 1024; + +#define ARGSFMT "range [" PTR_FORMAT "-" PTR_FORMAT "), size " SIZE_FORMAT_X ", alignment " SIZE_FORMAT_X ", randomize: %d" +#define ARGSFMTARGS p2i(min), p2i(max), bytes, alignment, randomize + + log_debug(os, map) ("reserve_between (" ARGSFMT ")", ARGSFMTARGS); + + assert(is_power_of_2(alignment), "alignment invalid (" ARGSFMT ")", ARGSFMTARGS); + assert(alignment < SIZE_MAX / 2, "alignment too large (" ARGSFMT ")", ARGSFMTARGS); + assert(is_aligned(bytes, os::vm_page_size()), "size not page aligned (" ARGSFMT ")", ARGSFMTARGS); + assert(max >= min, "invalid range (" ARGSFMT ")", ARGSFMTARGS); + + char* const absolute_max = (char*)(NOT_LP64(G * 3) LP64_ONLY(G * 128 * 1024)); + char* const absolute_min = (char*) os::vm_min_address(); + + const size_t alignment_adjusted = MAX2(alignment, os::vm_allocation_granularity()); + + // Calculate first and last possible attach points: + char* const lo_att = align_up(MAX2(absolute_min, min), alignment_adjusted); + if (lo_att == nullptr) { + return nullptr; // overflow + } + + char* const hi_att = align_down(MIN2(max, absolute_max) - bytes, alignment_adjusted); + if (hi_att > max) { + return nullptr; // overflow + } + + // no possible attach points + if (hi_att < lo_att) { + return nullptr; + } + + char* result = nullptr; + + const size_t num_attach_points = (size_t)((hi_att - lo_att) / alignment_adjusted) + 1; + assert(num_attach_points > 0, "Sanity"); + + // If this fires, the input range is too large for the given alignment (we work + // with int below to keep things simple). Since alignment is bound to page size, + // and the lowest page size is 4K, this gives us a minimum of 4K*4G=8TB address + // range. + assert(num_attach_points <= UINT_MAX, + "Too many possible attach points - range too large or alignment too small (" ARGSFMT ")", ARGSFMTARGS); + + const unsigned num_attempts = MIN2((unsigned)num_attach_points, max_attempts); + unsigned points[max_attempts]; + + if (randomize) { + FastRandom frand; + + if (num_attach_points < min_random_value_range) { + return nullptr; + } + + // We pre-calc the attach points: + // 1 We divide the attach range into equidistant sections and calculate an attach + // point within each section. + // 2 We wiggle those attach points around within their section (depends on attach + // point granularity) + // 3 Should that not be enough to get effective randomization, shuffle all + // attach points + // 4 Otherwise, re-order them to get an optimized probing sequence. + const unsigned stepsize = (unsigned)num_attach_points / num_attempts; + const unsigned half = num_attempts / 2; + + // 1+2: pre-calc points + for (unsigned i = 0; i < num_attempts; i++) { + const unsigned deviation = stepsize > 1 ? (frand.next() % stepsize) : 0; + points[i] = (i * stepsize) + deviation; + } + + if (num_attach_points < total_shuffle_threshold) { + // 3: + // The numeber of possible attach points is too low for the "wiggle" from + // point 2 to be enough to provide randomization. In that case, shuffle + // all attach points at the cost of possible fragmentation (e.g. if we + // end up mapping into the middle of the range). + shuffle_fisher_yates(points, num_attempts, frand); + } else { + // 4 + // We have a large enough number of attach points to satisfy the randomness + // goal without. In that case, we optimize probing by sorting the attach + // points: We attempt outermost points first, then work ourselves up to + // the middle. That reduces address space fragmentation. We also alternate + // hemispheres, which increases the chance of successfull mappings if the + // previous mapping had been blocked by large maps. + hemi_split(points, num_attempts); + } + } // end: randomized + else + { + // Non-randomized. We just attempt to reserve by probing sequentially. We + // alternate between hemispheres, working ourselves up to the middle. + const int stepsize = (unsigned)num_attach_points / num_attempts; + for (unsigned i = 0; i < num_attempts; i++) { + points[i] = (i * stepsize); + } + hemi_split(points, num_attempts); + } + +#ifdef ASSERT + // Print + check all pre-calculated attach points + print_points("before reserve", points, num_attempts); + for (unsigned i = 0; i < num_attempts; i++) { + assert(points[i] < num_attach_points, "Candidate attach point %d out of range (%u, num_attach_points: %zu) " ARGSFMT, + i, points[i], num_attach_points, ARGSFMTARGS); + } +#endif + + // Now reserve + for (unsigned i = 0; result == nullptr && i < num_attempts; i++) { + const unsigned candidate_offset = points[i]; + char* const candidate = lo_att + candidate_offset * alignment_adjusted; + assert(candidate <= hi_att, "Invalid offset %u (" ARGSFMT ")", candidate_offset, ARGSFMTARGS); + result = SimulateFullAddressSpace ? nullptr : os::pd_attempt_reserve_memory_at(candidate, bytes, false); + if (!result) { + log_trace(os, map)("Failed to attach at " PTR_FORMAT, p2i(candidate)); + } + } + + // Sanity checks, logging, NMT stuff: + if (result != nullptr) { +#define ERRFMT "result: " PTR_FORMAT " " ARGSFMT +#define ERRFMTARGS p2i(result), ARGSFMTARGS + assert(result >= min, "OOB min (" ERRFMT ")", ERRFMTARGS); + assert((result + bytes) <= max, "OOB max (" ERRFMT ")", ERRFMTARGS); + assert(result >= (char*)os::vm_min_address(), "OOB vm.map min (" ERRFMT ")", ERRFMTARGS); + assert((result + bytes) <= absolute_max, "OOB vm.map max (" ERRFMT ")", ERRFMTARGS); + assert(is_aligned(result, alignment), "alignment invalid (" ERRFMT ")", ERRFMTARGS); + log_trace(os, map)(ERRFMT, ERRFMTARGS); + log_debug(os, map)("successfully attached at " PTR_FORMAT, p2i(result)); + MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC); + } else { + log_debug(os, map)("failed to attach anywhere in [" PTR_FORMAT "-" PTR_FORMAT ")", p2i(min), p2i(max)); + } + return result; +#undef ARGSFMT +#undef ERRFMT +#undef ARGSFMTARGS +#undef ERRFMTARGS +} + +static void assert_nonempty_range(const char* addr, size_t bytes) { + assert(addr != nullptr && bytes > 0, "invalid range [" PTR_FORMAT ", " PTR_FORMAT ")", + p2i(addr), p2i(addr) + bytes); +} + +bool os::commit_memory(char* addr, size_t bytes, bool executable) { + assert_nonempty_range(addr, bytes); + bool res = pd_commit_memory(addr, bytes, executable); + if (res) { + MemTracker::record_virtual_memory_commit((address)addr, bytes, CALLER_PC); + } + return res; +} + +bool os::commit_memory(char* addr, size_t size, size_t alignment_hint, + bool executable) { + assert_nonempty_range(addr, size); + bool res = os::pd_commit_memory(addr, size, alignment_hint, executable); + if (res) { + MemTracker::record_virtual_memory_commit((address)addr, size, CALLER_PC); + } + return res; +} + +void os::commit_memory_or_exit(char* addr, size_t bytes, bool executable, + const char* mesg) { + assert_nonempty_range(addr, bytes); + pd_commit_memory_or_exit(addr, bytes, executable, mesg); + MemTracker::record_virtual_memory_commit((address)addr, bytes, CALLER_PC); +} + +void os::commit_memory_or_exit(char* addr, size_t size, size_t alignment_hint, + bool executable, const char* mesg) { + assert_nonempty_range(addr, size); + os::pd_commit_memory_or_exit(addr, size, alignment_hint, executable, mesg); + MemTracker::record_virtual_memory_commit((address)addr, size, CALLER_PC); +} + +bool os::uncommit_memory(char* addr, size_t bytes, bool executable) { + assert_nonempty_range(addr, bytes); + bool res; + if (MemTracker::enabled()) { + Tracker tkr(Tracker::uncommit); + res = pd_uncommit_memory(addr, bytes, executable); + if (res) { + tkr.record((address)addr, bytes); + } + } else { + res = pd_uncommit_memory(addr, bytes, executable); + } + return res; +} + +bool os::release_memory(char* addr, size_t bytes) { + assert_nonempty_range(addr, bytes); + bool res; + if (MemTracker::enabled()) { + // Note: Tracker contains a ThreadCritical. + Tracker tkr(Tracker::release); + res = pd_release_memory(addr, bytes); + if (res) { + tkr.record((address)addr, bytes); + } + } else { + res = pd_release_memory(addr, bytes); + } + if (!res) { + log_info(os)("os::release_memory failed (" PTR_FORMAT ", " SIZE_FORMAT ")", p2i(addr), bytes); + } + return res; +} + +// Prints all mappings +void os::print_memory_mappings(outputStream* st) { + os::print_memory_mappings(nullptr, SIZE_MAX, st); +} + +// Pretouching must use a store, not just a load. On many OSes loads from +// fresh memory would be satisfied from a single mapped page containing all +// zeros. We need to store something to each page to get them backed by +// their own memory, which is the effect we want here. An atomic add of +// zero is used instead of a simple store, allowing the memory to be used +// while pretouch is in progress, rather than requiring users of the memory +// to wait until the entire range has been touched. This is technically +// a UB data race, but doesn't cause any problems for us. +void os::pretouch_memory(void* start, void* end, size_t page_size) { + assert(start <= end, "invalid range: " PTR_FORMAT " -> " PTR_FORMAT, p2i(start), p2i(end)); + assert(is_power_of_2(page_size), "page size misaligned: %zu", page_size); + assert(page_size >= sizeof(int), "page size too small: %zu", page_size); + if (start < end) { + // We're doing concurrent-safe touch and memory state has page + // granularity, so we can touch anywhere in a page. Touch at the + // beginning of each page to simplify iteration. + char* cur = static_cast(align_down(start, page_size)); + void* last = align_down(static_cast(end) - 1, page_size); + assert(cur <= last, "invariant"); + // Iterate from first page through last (inclusive), being careful to + // avoid overflow if the last page abuts the end of the address range. + for ( ; true; cur += page_size) { + Atomic::add(reinterpret_cast(cur), 0, memory_order_relaxed); + if (cur >= last) break; + } + } +} + +char* os::map_memory_to_file(size_t bytes, int file_desc) { + // Could have called pd_reserve_memory() followed by replace_existing_mapping_with_file_mapping(), + // but AIX may use SHM in which case its more trouble to detach the segment and remap memory to the file. + // On all current implementations null is interpreted as any available address. + char* result = os::map_memory_to_file(nullptr /* addr */, bytes, file_desc); + if (result != nullptr) { + MemTracker::record_virtual_memory_reserve_and_commit(result, bytes, CALLER_PC); + } + return result; +} + +char* os::attempt_map_memory_to_file_at(char* addr, size_t bytes, int file_desc) { + char* result = pd_attempt_map_memory_to_file_at(addr, bytes, file_desc); + if (result != nullptr) { + MemTracker::record_virtual_memory_reserve_and_commit((address)result, bytes, CALLER_PC); + } + return result; +} + +char* os::map_memory(int fd, const char* file_name, size_t file_offset, + char *addr, size_t bytes, bool read_only, + bool allow_exec, MEMFLAGS flags) { + char* result = pd_map_memory(fd, file_name, file_offset, addr, bytes, read_only, allow_exec); + if (result != nullptr) { + MemTracker::record_virtual_memory_reserve_and_commit((address)result, bytes, CALLER_PC, flags); + } + return result; +} + +char* os::remap_memory(int fd, const char* file_name, size_t file_offset, + char *addr, size_t bytes, bool read_only, + bool allow_exec) { + return pd_remap_memory(fd, file_name, file_offset, addr, bytes, + read_only, allow_exec); +} + +bool os::unmap_memory(char *addr, size_t bytes) { + bool result; + if (MemTracker::enabled()) { + Tracker tkr(Tracker::release); + result = pd_unmap_memory(addr, bytes); + if (result) { + tkr.record((address)addr, bytes); + } + } else { + result = pd_unmap_memory(addr, bytes); + } + return result; +} + +void os::free_memory(char *addr, size_t bytes, size_t alignment_hint) { + pd_free_memory(addr, bytes, alignment_hint); +} + +void os::realign_memory(char *addr, size_t bytes, size_t alignment_hint) { + pd_realign_memory(addr, bytes, alignment_hint); +} + +char* os::reserve_memory_special(size_t size, size_t alignment, size_t page_size, + char* addr, bool executable) { + + assert(is_aligned(addr, alignment), "Unaligned request address"); + + char* result = pd_reserve_memory_special(size, alignment, page_size, addr, executable); + if (result != nullptr) { + // The memory is committed + MemTracker::record_virtual_memory_reserve_and_commit((address)result, size, CALLER_PC); + } + + return result; +} + +bool os::release_memory_special(char* addr, size_t bytes) { + bool res; + if (MemTracker::enabled()) { + // Note: Tracker contains a ThreadCritical. + Tracker tkr(Tracker::release); + res = pd_release_memory_special(addr, bytes); + if (res) { + tkr.record((address)addr, bytes); + } + } else { + res = pd_release_memory_special(addr, bytes); + } + return res; +} + +// Convenience wrapper around naked_short_sleep to allow for longer sleep +// times. Only for use by non-JavaThreads. +void os::naked_sleep(jlong millis) { + assert(!Thread::current()->is_Java_thread(), "not for use by JavaThreads"); + const jlong limit = 999; + while (millis > limit) { + naked_short_sleep(limit); + millis -= limit; + } + naked_short_sleep(millis); +} + + +////// Implementation of PageSizes + +void os::PageSizes::add(size_t page_size) { + assert(is_power_of_2(page_size), "page_size must be a power of 2: " SIZE_FORMAT_X, page_size); + _v |= page_size; +} + +bool os::PageSizes::contains(size_t page_size) const { + assert(is_power_of_2(page_size), "page_size must be a power of 2: " SIZE_FORMAT_X, page_size); + return (_v & page_size) != 0; +} + +size_t os::PageSizes::next_smaller(size_t page_size) const { + assert(is_power_of_2(page_size), "page_size must be a power of 2: " SIZE_FORMAT_X, page_size); + size_t v2 = _v & (page_size - 1); + if (v2 == 0) { + return 0; + } + return round_down_power_of_2(v2); +} + +size_t os::PageSizes::next_larger(size_t page_size) const { + assert(is_power_of_2(page_size), "page_size must be a power of 2: " SIZE_FORMAT_X, page_size); + if (page_size == max_power_of_2()) { // Shift by 32/64 would be UB + return 0; + } + // Remove current and smaller page sizes + size_t v2 = _v & ~(page_size + (page_size - 1)); + if (v2 == 0) { + return 0; + } + return (size_t)1 << count_trailing_zeros(v2); +} + +size_t os::PageSizes::largest() const { + const size_t max = max_power_of_2(); + if (contains(max)) { + return max; + } + return next_smaller(max); +} + +size_t os::PageSizes::smallest() const { + // Strictly speaking the set should not contain sizes < os::vm_page_size(). + // But this is not enforced. + return next_larger(1); +} + +void os::PageSizes::print_on(outputStream* st) const { + bool first = true; + for (size_t sz = smallest(); sz != 0; sz = next_larger(sz)) { + if (first) { + first = false; + } else { + st->print_raw(", "); + } + if (sz < M) { + st->print(SIZE_FORMAT "k", sz / K); + } else if (sz < G) { + st->print(SIZE_FORMAT "M", sz / M); + } else { + st->print(SIZE_FORMAT "G", sz / G); + } + } + if (first) { + st->print("empty"); + } +} + +// Check minimum allowable stack sizes for thread creation and to initialize +// the java system classes, including StackOverflowError - depends on page +// size. +// The space needed for frames during startup is platform dependent. It +// depends on word size, platform calling conventions, C frame layout and +// interpreter/C1/C2 design decisions. Therefore this is given in a +// platform (os/cpu) dependent constant. +// To this, space for guard mechanisms is added, which depends on the +// page size which again depends on the concrete system the VM is running +// on. Space for libc guard pages is not included in this size. +jint os::set_minimum_stack_sizes() { + + _java_thread_min_stack_allowed = _java_thread_min_stack_allowed + + StackOverflow::stack_guard_zone_size() + + StackOverflow::stack_shadow_zone_size(); + + _java_thread_min_stack_allowed = align_up(_java_thread_min_stack_allowed, vm_page_size()); + _java_thread_min_stack_allowed = MAX2(_java_thread_min_stack_allowed, _os_min_stack_allowed); + + size_t stack_size_in_bytes = ThreadStackSize * K; + if (stack_size_in_bytes != 0 && + stack_size_in_bytes < _java_thread_min_stack_allowed) { + // The '-Xss' and '-XX:ThreadStackSize=N' options both set + // ThreadStackSize so we go with "Java thread stack size" instead + // of "ThreadStackSize" to be more friendly. + tty->print_cr("\nThe Java thread stack size specified is too small. " + "Specify at least " SIZE_FORMAT "k", + _java_thread_min_stack_allowed / K); + return JNI_ERR; + } + + // Make the stack size a multiple of the page size so that + // the yellow/red zones can be guarded. + JavaThread::set_stack_size_at_create(align_up(stack_size_in_bytes, vm_page_size())); + + // Reminder: a compiler thread is a Java thread. + _compiler_thread_min_stack_allowed = _compiler_thread_min_stack_allowed + + StackOverflow::stack_guard_zone_size() + + StackOverflow::stack_shadow_zone_size(); + + _compiler_thread_min_stack_allowed = align_up(_compiler_thread_min_stack_allowed, vm_page_size()); + _compiler_thread_min_stack_allowed = MAX2(_compiler_thread_min_stack_allowed, _os_min_stack_allowed); + + stack_size_in_bytes = CompilerThreadStackSize * K; + if (stack_size_in_bytes != 0 && + stack_size_in_bytes < _compiler_thread_min_stack_allowed) { + tty->print_cr("\nThe CompilerThreadStackSize specified is too small. " + "Specify at least " SIZE_FORMAT "k", + _compiler_thread_min_stack_allowed / K); + return JNI_ERR; + } + + _vm_internal_thread_min_stack_allowed = align_up(_vm_internal_thread_min_stack_allowed, vm_page_size()); + _vm_internal_thread_min_stack_allowed = MAX2(_vm_internal_thread_min_stack_allowed, _os_min_stack_allowed); + + stack_size_in_bytes = VMThreadStackSize * K; + if (stack_size_in_bytes != 0 && + stack_size_in_bytes < _vm_internal_thread_min_stack_allowed) { + tty->print_cr("\nThe VMThreadStackSize specified is too small. " + "Specify at least " SIZE_FORMAT "k", + _vm_internal_thread_min_stack_allowed / K); + return JNI_ERR; + } + return JNI_OK; +} +#endif // !NATIVE_IMAGE diff --git a/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/share/runtime/os.hpp b/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/share/runtime/os.hpp new file mode 100644 index 000000000000..3d3359844dcc --- /dev/null +++ b/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/share/runtime/os.hpp @@ -0,0 +1,1111 @@ +/* + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_RUNTIME_OS_HPP +#define SHARE_RUNTIME_OS_HPP + +#include "jvm_md.h" +#ifndef NATIVE_IMAGE +#include "runtime/osInfo.hpp" +#include "utilities/exceptions.hpp" +#endif // !NATIVE_IMAGE +#include "utilities/ostream.hpp" +#include "utilities/macros.hpp" +#ifndef NATIVE_IMAGE +#ifdef __APPLE__ +# include +#endif + +class frame; +class JvmtiAgent; + +// Rules for using and implementing methods declared in the "os" class +// =================================================================== +// +// The "os" class defines a number of the interfaces for porting HotSpot +// to different operating systems. For example, I/O, memory, timing, etc. +// Note that additional classes such as Semaphore, Mutex, etc., are used for +// porting specific groups of features. +// +// Structure of os*.{cpp, hpp} files +// +// - os.hpp +// +// (This file) declares the entire API of the "os" class. +// +// - os.inline.hpp +// +// To use any of the inline methods declared in the "os" class, this +// header file must be included. +// +// - src/hotspot/os//os_.hpp +// - src/hotspot/os/posix/os_posix.hpp +// +// These headers declare APIs that should be used only within the +// platform-specific source files for that particular OS. +// +// For example, os_linux.hpp declares the os::Linux class, which provides +// many methods that can be used by files under os/linux/ and os_cpu/linux_*/ +// +// os_posix.hpp can be used by platform-specific files for POSIX-like +// OSes such as aix, bsd and linux. +// +// Platform-independent source files should not include these header files +// (although sadly there are some rare exceptions ...) +// +// - os.cpp +// +// Platform-independent methods of the "os" class are defined +// in os.cpp. These are not part of the porting interface, but rather +// can be considered as convenience functions for accessing +// the porting interface. E.g., os::print_function_and_library_name(). +// +// The methods declared in os.hpp but not implemented in os.cpp are +// a part the HotSpot Porting APIs. They must be implemented in one of +// the following four files: +// +// - src/hotspot/os//os_.inline.hpp +// - src/hotspot/os_cpu/_/os__.inline.hpp +// - src/hotspot/os//os_.cpp +// - src/hotspot/os_cpu/_/os__.cpp +// +// The Porting APIs declared as "inline" in os.hpp MUST be +// implemented in one of the two .inline.hpp files, depending on +// whether the feature is specific to a particular CPU architecture +// for this OS. These two files are automatically included by +// os.inline.hpp. Platform-independent source files must not include +// these two files directly. +// +// If the full definition of an inline method is too complex to fit in a +// header file, the actual implementation can be deferred to another +// method defined in the .cpp files. +// +// The Porting APIs that are *not* declared as "inline" in os.hpp MUST +// be implemented in one of the two .cpp files above. These files +// also implement OS-specific APIs such as os::Linux, os::Posix, etc. +// +// (Note: on the POSIX-like platforms, some of the Porting APIs are implemented +// in os_posix.cpp instead). + +class Thread; +class JavaThread; +class NativeCallStack; +class methodHandle; +class OSThread; +class Mutex; + +struct jvmtiTimerInfo; + +template class GrowableArray; + +// %%%%% Moved ThreadState, START_FN, OSThread to new osThread.hpp. -- Rose + +// Platform-independent error return values from OS functions +enum OSReturn { + OS_OK = 0, // Operation was successful + OS_ERR = -1, // Operation failed + OS_INTRPT = -2, // Operation was interrupted + OS_TIMEOUT = -3, // Operation timed out + OS_NOMEM = -5, // Operation failed for lack of memory + OS_NORESOURCE = -6 // Operation failed for lack of nonmemory resource +}; + +enum ThreadPriority { // JLS 20.20.1-3 + NoPriority = -1, // Initial non-priority value + MinPriority = 1, // Minimum priority + NormPriority = 5, // Normal (non-daemon) priority + NearMaxPriority = 9, // High priority, used for VMThread + MaxPriority = 10, // Highest priority, used for WatcherThread + // ensures that VMThread doesn't starve profiler + CriticalPriority = 11 // Critical thread priority +}; + +enum WXMode { + WXWrite, + WXExec +}; + +// Executable parameter flag for os::commit_memory() and +// os::commit_memory_or_exit(). +const bool ExecMem = true; + +// Typedef for structured exception handling support +typedef void (*java_call_t)(JavaValue* value, const methodHandle& method, JavaCallArguments* args, JavaThread* thread); + +class MallocTracker; +#endif // !NATIVE_IMAGE + +class os: AllStatic { + friend class VMStructs; + friend class JVMCIVMStructs; + friend class MallocTracker; + +#ifndef NATIVE_IMAGE +#ifdef ASSERT + private: + static bool _mutex_init_done; + public: + static void set_mutex_init_done() { _mutex_init_done = true; } + static bool mutex_init_done() { return _mutex_init_done; } +#endif + + public: + + // A simple value class holding a set of page sizes (similar to sigset_t) + class PageSizes { + size_t _v; // actually a bitmap. + public: + PageSizes() : _v(0) {} + void add(size_t pagesize); + bool contains(size_t pagesize) const; + // Given a page size, return the next smaller page size in this set, or 0. + size_t next_smaller(size_t pagesize) const; + // Given a page size, return the next larger page size in this set, or 0. + size_t next_larger(size_t pagesize) const; + // Returns the largest page size in this set, or 0 if set is empty. + size_t largest() const; + // Returns the smallest page size in this set, or 0 if set is empty. + size_t smallest() const; + // Prints one line of comma separated, human readable page sizes, "empty" if empty. + void print_on(outputStream* st) const; + }; + + private: + static OSThread* _starting_thread; + static PageSizes _page_sizes; + + // The default value for os::vm_min_address() unless the platform knows better. This value + // is chosen to give us reasonable protection against NULL pointer dereferences while being + // low enough to leave most of the valuable low-4gb address space open. + static constexpr size_t _vm_min_address_default = 16 * M; + + static char* pd_reserve_memory(size_t bytes, bool executable); + + static char* pd_attempt_reserve_memory_at(char* addr, size_t bytes, bool executable); + + static bool pd_commit_memory(char* addr, size_t bytes, bool executable); + static bool pd_commit_memory(char* addr, size_t size, size_t alignment_hint, + bool executable); + // Same as pd_commit_memory() that either succeeds or calls + // vm_exit_out_of_memory() with the specified mesg. + static void pd_commit_memory_or_exit(char* addr, size_t bytes, + bool executable, const char* mesg); + static void pd_commit_memory_or_exit(char* addr, size_t size, + size_t alignment_hint, + bool executable, const char* mesg); + static bool pd_uncommit_memory(char* addr, size_t bytes, bool executable); + static bool pd_release_memory(char* addr, size_t bytes); + + static char* pd_attempt_map_memory_to_file_at(char* addr, size_t bytes, int file_desc); + + static char* pd_map_memory(int fd, const char* file_name, size_t file_offset, + char *addr, size_t bytes, bool read_only = false, + bool allow_exec = false); + static char* pd_remap_memory(int fd, const char* file_name, size_t file_offset, + char *addr, size_t bytes, bool read_only, + bool allow_exec); + static bool pd_unmap_memory(char *addr, size_t bytes); + static void pd_free_memory(char *addr, size_t bytes, size_t alignment_hint); + static void pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint); + + static char* pd_reserve_memory_special(size_t size, size_t alignment, size_t page_size, + + char* addr, bool executable); + static bool pd_release_memory_special(char* addr, size_t bytes); + + static size_t page_size_for_region(size_t region_size, size_t min_pages, bool must_be_aligned); + + // Get summary strings for system information in buffer provided + static void get_summary_cpu_info(char* buf, size_t buflen); + static void get_summary_os_info(char* buf, size_t buflen); + // Returns number of bytes written on success, OS_ERR on failure. + static ssize_t pd_write(int fd, const void *buf, size_t nBytes); + + static void initialize_initial_active_processor_count(); + + LINUX_ONLY(static void pd_init_container_support();) +#endif // !NATIVE_IMAGE + + public: +#ifndef NATIVE_IMAGE + static void init(void); // Called before command line parsing + + static void init_container_support() { // Called during command line parsing. + LINUX_ONLY(pd_init_container_support();) + } + + static void init_before_ergo(void); // Called after command line parsing + // before VM ergonomics processing. + static jint init_2(void); // Called after command line parsing + // and VM ergonomics processing + + // Get environ pointer, platform independently + static char** get_environ(); + + static bool have_special_privileges(); + + static jlong javaTimeMillis(); + static jlong javaTimeNanos(); + static void javaTimeNanos_info(jvmtiTimerInfo *info_ptr); + static void javaTimeSystemUTC(jlong &seconds, jlong &nanos); + static void run_periodic_checks(outputStream* st); + + // Returns the elapsed time in seconds since the vm started. + static double elapsedTime(); + + // Returns real time in seconds since an arbitrary point + // in the past. + static bool getTimesSecs(double* process_real_time, + double* process_user_time, + double* process_system_time); + + // Interface to the performance counter + static jlong elapsed_counter(); + static jlong elapsed_frequency(); + + // The "virtual time" of a thread is the amount of time a thread has + // actually run. The first function indicates whether the OS supports + // this functionality for the current thread, and if so the second + // returns the elapsed virtual time for the current thread. + static bool supports_vtime(); + static double elapsedVTime(); + + // Return current local time in a string (YYYY-MM-DD HH:MM:SS). + // It is MT safe, but not async-safe, as reading time zone + // information may require a lock on some platforms. + static char* local_time_string(char *buf, size_t buflen); + static struct tm* localtime_pd (const time_t* clock, struct tm* res); + static struct tm* gmtime_pd (const time_t* clock, struct tm* res); + + // "YYYY-MM-DDThh:mm:ss.mmm+zzzz" incl. terminating zero + static const size_t iso8601_timestamp_size = 29; + + // Fill in buffer with an ISO-8601 string corresponding to the given javaTimeMillis value + // E.g., YYYY-MM-DDThh:mm:ss.mmm+zzzz. + // Returns buffer, or null if it failed. + static char* iso8601_time(jlong milliseconds_since_19700101, char* buffer, + size_t buffer_length, bool utc = false); + + // Fill in buffer with current local time as an ISO-8601 string. + // E.g., YYYY-MM-DDThh:mm:ss.mmm+zzzz. + // Returns buffer, or null if it failed. + static char* iso8601_time(char* buffer, size_t buffer_length, bool utc = false); + + // Interface for detecting multiprocessor system + static inline bool is_MP() { + // During bootstrap if _processor_count is not yet initialized + // we claim to be MP as that is safest. If any platform has a + // stub generator that might be triggered in this phase and for + // which being declared MP when in fact not, is a problem - then + // the bootstrap routine for the stub generator needs to check + // the processor count directly and leave the bootstrap routine + // in place until called after initialization has occurred. + return (_processor_count != 1); + } + + // On some platforms there is a distinction between "available" memory and "free" memory. + // For example, on Linux, "available" memory (`MemAvailable` in `/proc/meminfo`) is greater + // than "free" memory (`MemFree` in `/proc/meminfo`) because Linux can free memory + // aggressively (e.g. clear caches) so that it becomes available. + static julong available_memory(); + static julong free_memory(); +#endif // !NATIVE_IMAGE + + static julong physical_memory(); +#ifndef NATIVE_IMAGE + static bool has_allocatable_memory_limit(size_t* limit); + static bool is_server_class_machine(); + + // Returns the id of the processor on which the calling thread is currently executing. + // The returned value is guaranteed to be between 0 and (os::processor_count() - 1). + static uint processor_id(); +#endif // !NATIVE_IMAGE + + // number of CPUs + static int processor_count() { + return _processor_count; + } + static void set_processor_count(int count) { _processor_count = count; } + +#ifndef NATIVE_IMAGE + // Returns the number of CPUs this process is currently allowed to run on. + // Note that on some OSes this can change dynamically. + static int active_processor_count(); + + // At startup the number of active CPUs this process is allowed to run on. + // This value does not change dynamically. May be different from active_processor_count(). + static int initial_active_processor_count() { + assert(_initial_active_processor_count > 0, "Initial active processor count not set yet."); + return _initial_active_processor_count; + } + + // Give a name to the current thread. + static void set_native_thread_name(const char *name); + + // Interface for stack banging (predetect possible stack overflow for + // exception processing) There are guard pages, and above that shadow + // pages for stack overflow checking. + inline static bool uses_stack_guard_pages(); + inline static bool must_commit_stack_guard_pages(); + inline static void map_stack_shadow_pages(address sp); + static bool stack_shadow_pages_available(Thread *thread, const methodHandle& method, address sp); + + private: + // Minimum stack size a thread can be created with (allowing + // the VM to completely create the thread and enter user code). + // The initial values exclude any guard pages (by HotSpot or libc). + // set_minimum_stack_sizes() will add the size required for + // HotSpot guard pages depending on page size and flag settings. + // Libc guard pages are never considered by these values. + static size_t _compiler_thread_min_stack_allowed; + static size_t _java_thread_min_stack_allowed; + static size_t _vm_internal_thread_min_stack_allowed; + static size_t _os_min_stack_allowed; + + // Check and sets minimum stack sizes + static jint set_minimum_stack_sizes(); + + public: + // Find committed memory region within specified range (start, start + size), + // return true if found any + static bool committed_in_range(address start, size_t size, address& committed_start, size_t& committed_size); + + // OS interface to Virtual Memory + + // Return the default page size. + static size_t vm_page_size() { return OSInfo::vm_page_size(); } + + // The set of page sizes which the VM is allowed to use (may be a subset of + // the page sizes actually available on the platform). + static const PageSizes& page_sizes() { return _page_sizes; } + + // Returns the page size to use for a region of memory. + // region_size / min_pages will always be greater than or equal to the + // returned value. The returned value will divide region_size. + static size_t page_size_for_region_aligned(size_t region_size, size_t min_pages); + + // Returns the page size to use for a region of memory. + // region_size / min_pages will always be greater than or equal to the + // returned value. The returned value might not divide region_size. + static size_t page_size_for_region_unaligned(size_t region_size, size_t min_pages); + + // Return the largest page size that can be used + static size_t max_page_size() { return page_sizes().largest(); } + + // Return a lower bound for page sizes. Also works before os::init completed. + static size_t min_page_size() { return 4 * K; } + + // Methods for tracing page sizes returned by the above method. + // The region_{min,max}_size parameters should be the values + // passed to page_size_for_region() and page_size should be the result of that + // call. The (optional) base and size parameters should come from the + // ReservedSpace base() and size() methods. + static void trace_page_sizes(const char* str, + const size_t region_min_size, + const size_t region_max_size, + const char* base, + const size_t size, + const size_t page_size); + static void trace_page_sizes_for_requested_size(const char* str, + const size_t requested_size, + const size_t requested_page_size, + const char* base, + const size_t size, + const size_t page_size); + + static size_t vm_allocation_granularity() { return OSInfo::vm_allocation_granularity(); } + + // Returns the lowest address the process is allowed to map against. + static size_t vm_min_address(); + + inline static size_t cds_core_region_alignment(); + + // Reserves virtual memory. + static char* reserve_memory(size_t bytes, bool executable = false, MEMFLAGS flags = mtNone); + + // Reserves virtual memory that starts at an address that is aligned to 'alignment'. + static char* reserve_memory_aligned(size_t size, size_t alignment, bool executable = false); + + // Attempts to reserve the virtual memory at [addr, addr + bytes). + // Does not overwrite existing mappings. + static char* attempt_reserve_memory_at(char* addr, size_t bytes, bool executable = false); + + // Given an address range [min, max), attempts to reserve memory within this area, with the given alignment. + // If randomize is true, the location will be randomized. + static char* attempt_reserve_memory_between(char* min, char* max, size_t bytes, size_t alignment, bool randomize); + + static bool commit_memory(char* addr, size_t bytes, bool executable); + static bool commit_memory(char* addr, size_t size, size_t alignment_hint, + bool executable); + // Same as commit_memory() that either succeeds or calls + // vm_exit_out_of_memory() with the specified mesg. + static void commit_memory_or_exit(char* addr, size_t bytes, + bool executable, const char* mesg); + static void commit_memory_or_exit(char* addr, size_t size, + size_t alignment_hint, + bool executable, const char* mesg); + static bool uncommit_memory(char* addr, size_t bytes, bool executable = false); + static bool release_memory(char* addr, size_t bytes); + + // Does the platform support trimming the native heap? + static bool can_trim_native_heap(); + + // Trim the C-heap. Optionally returns working set size change (RSS+Swap) in *rss_change. + // Note: If trimming succeeded but no size change information could be obtained, + // rss_change.after will contain SIZE_MAX upon return. + struct size_change_t { size_t before; size_t after; }; + static bool trim_native_heap(size_change_t* rss_change = nullptr); + + // A diagnostic function to print memory mappings in the given range. + static void print_memory_mappings(char* addr, size_t bytes, outputStream* st); + // Prints all mappings + static void print_memory_mappings(outputStream* st); + + // Touch memory pages that cover the memory range from start to end + // (exclusive) to make the OS back the memory range with actual memory. + // Other threads may use the memory range concurrently with pretouch. + static void pretouch_memory(void* start, void* end, size_t page_size = vm_page_size()); + + enum ProtType { MEM_PROT_NONE, MEM_PROT_READ, MEM_PROT_RW, MEM_PROT_RWX }; + static bool protect_memory(char* addr, size_t bytes, ProtType prot, + bool is_committed = true); + + static bool guard_memory(char* addr, size_t bytes); + static bool unguard_memory(char* addr, size_t bytes); + static bool create_stack_guard_pages(char* addr, size_t bytes); + static bool pd_create_stack_guard_pages(char* addr, size_t bytes); + static bool remove_stack_guard_pages(char* addr, size_t bytes); + // Helper function to create a new file with template jvmheap.XXXXXX. + // Returns a valid fd on success or else returns -1 + static int create_file_for_heap(const char* dir); + // Map memory to the file referred by fd. This function is slightly different from map_memory() + // and is added to be used for implementation of -XX:AllocateHeapAt + static char* map_memory_to_file(size_t size, int fd); + static char* map_memory_to_file_aligned(size_t size, size_t alignment, int fd); + static char* map_memory_to_file(char* base, size_t size, int fd); + static char* attempt_map_memory_to_file_at(char* base, size_t size, int fd); + // Replace existing reserved memory with file mapping + static char* replace_existing_mapping_with_file_mapping(char* base, size_t size, int fd); + + static char* map_memory(int fd, const char* file_name, size_t file_offset, + char *addr, size_t bytes, bool read_only = false, + bool allow_exec = false, MEMFLAGS flags = mtNone); + static char* remap_memory(int fd, const char* file_name, size_t file_offset, + char *addr, size_t bytes, bool read_only, + bool allow_exec); + static bool unmap_memory(char *addr, size_t bytes); + static void free_memory(char *addr, size_t bytes, size_t alignment_hint); + static void realign_memory(char *addr, size_t bytes, size_t alignment_hint); + + // NUMA-specific interface + static bool numa_has_group_homing(); + static void numa_make_local(char *addr, size_t bytes, int lgrp_hint); + static void numa_make_global(char *addr, size_t bytes); + static size_t numa_get_groups_num(); + static size_t numa_get_leaf_groups(uint *ids, size_t size); + static bool numa_topology_changed(); + static int numa_get_group_id(); + static int numa_get_group_id_for_address(const void* address); + static bool numa_get_group_ids_for_range(const void** addresses, int* lgrp_ids, size_t count); + + // Page manipulation + struct page_info { + size_t size; + int lgrp_id; + }; + static char* non_memory_address_word(); + // reserve, commit and pin the entire memory region + static char* reserve_memory_special(size_t size, size_t alignment, size_t page_size, + char* addr, bool executable); + static bool release_memory_special(char* addr, size_t bytes); + static void large_page_init(); + static size_t large_page_size(); + static bool can_commit_large_page_memory(); + + // Check if pointer points to readable memory (by 4-byte read access) + static bool is_readable_pointer(const void* p); + static bool is_readable_range(const void* from, const void* to); + + // threads + + enum ThreadType { + vm_thread, + gc_thread, // GC thread + java_thread, // Java, JVMTIAgent and Service threads. + compiler_thread, + watcher_thread, + asynclog_thread, // dedicated to flushing logs + os_thread + }; + + static bool create_thread(Thread* thread, + ThreadType thr_type, + size_t req_stack_size = 0); + + // The "main thread", also known as "starting thread", is the thread + // that loads/creates the JVM via JNI_CreateJavaVM. + static bool create_main_thread(JavaThread* thread); + + // The primordial thread is the initial process thread. The java + // launcher never uses the primordial thread as the main thread, but + // applications that host the JVM directly may do so. Some platforms + // need special-case handling of the primordial thread if it attaches + // to the VM. + static bool is_primordial_thread(void) +#if defined(_WINDOWS) || defined(BSD) + // No way to identify the primordial thread. + { return false; } +#else + ; +#endif + + static bool create_attached_thread(JavaThread* thread); + static void pd_start_thread(Thread* thread); + static void start_thread(Thread* thread); + + // Returns true if successful. + static bool signal_thread(Thread* thread, int sig, const char* reason); + + static void free_thread(OSThread* osthread); + + // thread id on Linux/64bit is 64bit, on Windows it's 32bit + static intx current_thread_id(); + static int current_process_id(); + + // Short standalone OS sleep routines suitable for slow path spin loop. + // Ignores safepoints/suspension/Thread.interrupt() (so keep it short). + // ms/ns = 0, will sleep for the least amount of time allowed by the OS. + // Maximum sleep time is just under 1 second. + static void naked_short_sleep(jlong ms); + static void naked_short_nanosleep(jlong ns); + // Longer standalone OS sleep routine - a convenience wrapper around + // multiple calls to naked_short_sleep. Only for use by non-JavaThreads. + static void naked_sleep(jlong millis); + // Never returns, use with CAUTION + ATTRIBUTE_NORETURN static void infinite_sleep(); + static void naked_yield () ; + static OSReturn set_priority(Thread* thread, ThreadPriority priority); + static OSReturn get_priority(const Thread* const thread, ThreadPriority& priority); + + static address fetch_frame_from_context(const void* ucVoid, intptr_t** sp, intptr_t** fp); + static frame fetch_frame_from_context(const void* ucVoid); + static frame fetch_compiled_frame_from_context(const void* ucVoid); + + static void breakpoint(); + static bool start_debugging(char *buf, int buflen); + + static address current_stack_pointer(); + static void current_stack_base_and_size(address* base, size_t* size); + + static void verify_stack_alignment() PRODUCT_RETURN; + + static bool message_box(const char* title, const char* message); + + // run cmd in a separate process and return its exit code; or -1 on failures. + // Note: only safe to use in fatal error situations. + static int fork_and_exec(const char *cmd); + + // Call ::exit() on all platforms + ATTRIBUTE_NORETURN static void exit(int num); + + // Call ::_exit() on all platforms. Similar semantics to die() except we never + // want a core dump. + ATTRIBUTE_NORETURN static void _exit(int num); + + // Terminate the VM, but don't exit the process + static void shutdown(); + + // Terminate with an error. Default is to generate a core file on platforms + // that support such things. This calls shutdown() and then aborts. + ATTRIBUTE_NORETURN static void abort(bool dump_core, void *siginfo, const void *context); + ATTRIBUTE_NORETURN static void abort(bool dump_core = true); + + // Die immediately, no exit hook, no abort hook, no cleanup. + // Dump a core file, if possible, for debugging. os::abort() is the + // preferred means to abort the VM on error. os::die() should only + // be called if something has gone badly wrong. CreateCoredumpOnCrash + // is intentionally not honored by this function. + ATTRIBUTE_NORETURN static void die(); + + // File i/o operations + static int open(const char *path, int oflag, int mode); + static FILE* fdopen(int fd, const char* mode); +#endif // !NATIVE_IMAGE + static FILE* fopen(const char* path, const char* mode); +#ifndef NATIVE_IMAGE + static jlong lseek(int fd, jlong offset, int whence); + static bool file_exists(const char* file); + // This function, on Windows, canonicalizes a given path (see os_windows.cpp for details). + // On Posix, this function is a noop: it does not change anything and just returns + // the input pointer. + static char* native_path(char *path); + static int ftruncate(int fd, jlong length); + static int get_fileno(FILE* fp); + static void flockfile(FILE* fp); + static void funlockfile(FILE* fp); + + static int compare_file_modified_times(const char* file1, const char* file2); + + static bool same_files(const char* file1, const char* file2); + + //File i/o operations + + static ssize_t read_at(int fd, void *buf, unsigned int nBytes, jlong offset); + // Writes the bytes completely. Returns true on success, false otherwise. + static bool write(int fd, const void *buf, size_t nBytes); + + // Reading directories. + static DIR* opendir(const char* dirname); + static struct dirent* readdir(DIR* dirp); + static int closedir(DIR* dirp); + + static const char* get_temp_directory(); + static const char* get_current_directory(char *buf, size_t buflen); + + static void prepare_native_symbols(); + + // Builds the platform-specific name of a library. + // Returns false if the buffer is too small. + static bool dll_build_name(char* buffer, size_t size, + const char* fname); + + // Builds a platform-specific full library path given an ld path and + // unadorned library name. Returns true if the buffer contains a full + // path to an existing file, false otherwise. If pathname is empty, + // uses the path to the current directory. + static bool dll_locate_lib(char* buffer, size_t size, + const char* pathname, const char* fname); + + // Symbol lookup, find nearest function name; basically it implements + // dladdr() for all platforms. Name of the nearest function is copied + // to buf. Distance from its base address is optionally returned as offset. + // If function name is not found, buf[0] is set to '\0' and offset is + // set to -1 (if offset is non-null). + static bool dll_address_to_function_name(address addr, char* buf, + int buflen, int* offset, + bool demangle = true); + + // Locate DLL/DSO. On success, full path of the library is copied to + // buf, and offset is optionally set to be the distance between addr + // and the library's base address. On failure, buf[0] is set to '\0' + // and offset is set to -1 (if offset is non-null). + static bool dll_address_to_library_name(address addr, char* buf, + int buflen, int* offset); + + // Given an address, attempt to locate both the symbol and the library it + // resides in. If at least one of these steps was successful, prints information + // and returns true. + // - if no scratch buffer is given, stack is used + // - shorten_paths: path is omitted from library name + // - demangle: function name is demangled + // - strip_arguments: arguments are stripped (requires demangle=true) + // On success prints either one of: + // "+ in " + // "+" + // "
in +" + static bool print_function_and_library_name(outputStream* st, + address addr, + char* buf = nullptr, int buflen = 0, + bool shorten_paths = true, + bool demangle = true, + bool strip_arguments = false); + + // Used only on PPC. + inline static void* resolve_function_descriptor(void* p); + + // Find out whether the pc is in the static code for jvm.dll/libjvm.so. + static bool address_is_in_vm(address addr); + + // Loads .dll/.so and + // in case of error it checks if .dll/.so was built for the + // same architecture as HotSpot is running on + // in case of an error null is returned and an error message is stored in ebuf + static void* dll_load(const char *name, char *ebuf, int ebuflen); + + // lookup symbol in a shared library + static void* dll_lookup(void* handle, const char* name); + + // Unload library + static void dll_unload(void *lib); + + // Callback for loaded module information + // Input parameters: + // char* module_file_name, + // address module_base_addr, + // address module_top_addr, + // void* param + typedef int (*LoadedModulesCallbackFunc)(const char *, address, address, void *); + + static int get_loaded_modules_info(LoadedModulesCallbackFunc callback, void *param); + + // Return the handle of this process + static void* get_default_process_handle(); + + // Check for static linked agent library + static bool find_builtin_agent(JvmtiAgent *agent_lib, const char *syms[], + size_t syms_len); + + // Find agent entry point + static void *find_agent_function(JvmtiAgent *agent_lib, bool check_lib, + const char *syms[], size_t syms_len); +#endif // !NATIVE_IMAGE + + // Provide C99 compliant versions of these functions, since some versions + // of some platforms don't. + static int vsnprintf(char* buf, size_t len, const char* fmt, va_list args) ATTRIBUTE_PRINTF(3, 0); +#ifndef NATIVE_IMAGE + static int snprintf(char* buf, size_t len, const char* fmt, ...) ATTRIBUTE_PRINTF(3, 4); +#endif // !NATIVE_IMAGE + + // Performs snprintf and asserts the result is non-negative (so there was not + // an encoding error) and that the output was not truncated. + static int snprintf_checked(char* buf, size_t len, const char* fmt, ...) ATTRIBUTE_PRINTF(3, 4); + +#ifndef NATIVE_IMAGE + // Get host name in buffer provided + static bool get_host_name(char* buf, size_t buflen); + + // Print out system information; they are called by fatal error handler. + // Output format may be different on different platforms. + static void print_os_info(outputStream* st); + static void print_os_info_brief(outputStream* st); + static void print_cpu_info(outputStream* st, char* buf, size_t buflen); + static void pd_print_cpu_info(outputStream* st, char* buf, size_t buflen); + static void print_summary_info(outputStream* st, char* buf, size_t buflen); + static void print_memory_info(outputStream* st); + static void print_dll_info(outputStream* st); + static void print_environment_variables(outputStream* st, const char** env_list); + static void print_context(outputStream* st, const void* context); + static void print_tos_pc(outputStream* st, const void* context); + static void print_tos(outputStream* st, address sp); + static void print_instructions(outputStream* st, address pc, int unitsize = 1); + static void print_register_info(outputStream* st, const void* context, int& continuation); + static void print_register_info(outputStream* st, const void* context); + static bool signal_sent_by_kill(const void* siginfo); + static void print_siginfo(outputStream* st, const void* siginfo); + static void print_signal_handlers(outputStream* st, char* buf, size_t buflen); + static void print_date_and_time(outputStream* st, char* buf, size_t buflen); + static void print_elapsed_time(outputStream* st, double time); + + static void print_user_info(outputStream* st); + static void print_active_locale(outputStream* st); + + // helper for output of seconds in days , hours and months + static void print_dhm(outputStream* st, const char* startStr, long sec); + + static void print_location(outputStream* st, intptr_t x, bool verbose = false); + static size_t lasterror(char *buf, size_t len); + static int get_last_error(); + + // Send JFR memory info event + static void jfr_report_memory_info() NOT_JFR_RETURN(); +#endif // !NATIVE_IMAGE + + // Replacement for strerror(). + // Will return the english description of the error (e.g. "File not found", as + // suggested in the POSIX standard. + // Will return "Unknown error" for an unknown errno value. + // Will not attempt to localize the returned string. + // Will always return a valid string which is a static constant. + // Will not change the value of errno. + static const char* strerror(int e); + + // Will return the literalized version of the given errno (e.g. "EINVAL" + // for EINVAL). + // Will return "Unknown error" for an unknown errno value. + // Will always return a valid string which is a static constant. + // Will not change the value of errno. + static const char* errno_name(int e); + +#ifndef NATIVE_IMAGE + // wait for a key press if PauseAtExit is set + static void wait_for_keypress_at_exit(void); + + // The following two functions are used by fatal error handler to trace + // native (C) frames. They are not part of frame.hpp/frame.cpp because + // frame.hpp/cpp assume thread is JavaThread, and also because different + // OS/compiler may have different convention or provide different API to + // walk C frames. + // + // We don't attempt to become a debugger, so we only follow frames if that + // does not require a lookup in the unwind table, which is part of the binary + // file but may be unsafe to read after a fatal error. So on x86, we can + // only walk stack if %ebp is used as frame pointer; on ia64, it's not + // possible to walk C stack without having the unwind table. + static bool is_first_C_frame(frame *fr); + static frame get_sender_for_C_frame(frame *fr); + + // return current frame. pc() and sp() are set to null on failure. + static frame current_frame(); + + static void print_hex_dump(outputStream* st, address start, address end, int unitsize, + int bytes_per_line, address logical_start); + static void print_hex_dump(outputStream* st, address start, address end, int unitsize) { + print_hex_dump(st, start, end, unitsize, /*bytes_per_line=*/16, /*logical_start=*/start); + } + + // returns a string to describe the exception/signal; + // returns null if exception_code is not an OS exception/signal. + static const char* exception_name(int exception_code, char* buf, size_t buflen); + + // Returns the signal number (e.g. 11) for a given signal name (SIGSEGV). + static int get_signal_number(const char* signal_name); + + // Returns native Java library, loads if necessary + static void* native_java_library(); + + // Fills in path to jvm.dll/libjvm.so (used by the Disassembler) + static void jvm_path(char *buf, jint buflen); + + // JNI names + static void print_jni_name_prefix_on(outputStream* st, int args_size); + static void print_jni_name_suffix_on(outputStream* st, int args_size); + + // Init os specific system properties values + static void init_system_properties_values(); + + // IO operations, non-JVM_ version. + static int stat(const char* path, struct stat* sbuf); + static bool dir_is_empty(const char* path); + + // IO operations on binary files + static int create_binary_file(const char* path, bool rewrite_existing); + static jlong current_file_offset(int fd); + static jlong seek_to_file_offset(int fd, jlong offset); + + // Retrieve native stack frames. + // Parameter: + // stack: an array to storage stack pointers. + // frames: size of above array. + // toSkip: number of stack frames to skip at the beginning. + // Return: number of stack frames captured. + static int get_native_stack(address* stack, int size, int toSkip = 0); +#endif // !NATIVE_IMAGE + + // General allocation (must be MT-safe) +#ifndef NATIVE_IMAGE + static void* malloc (size_t size, MEMFLAGS flags, const NativeCallStack& stack); +#endif // !NATIVE_IMAGE + static void* malloc (size_t size, MEMFLAGS flags); +#ifndef NATIVE_IMAGE + static void* realloc (void *memblock, size_t size, MEMFLAGS flag, const NativeCallStack& stack); +#endif // !NATIVE_IMAGE + static void* realloc (void *memblock, size_t size, MEMFLAGS flag); + + // handles null pointers + static void free (void *memblock); + static char* strdup(const char *, MEMFLAGS flags = mtInternal); // Like strdup +#ifndef NATIVE_IMAGE + // Like strdup, but exit VM when strdup() returns null + static char* strdup_check_oom(const char*, MEMFLAGS flags = mtInternal); + + // SocketInterface (ex HPI SocketInterface ) + static int socket_close(int fd); + static ssize_t recv(int fd, char* buf, size_t nBytes, uint flags); + static ssize_t send(int fd, char* buf, size_t nBytes, uint flags); + static ssize_t raw_send(int fd, char* buf, size_t nBytes, uint flags); + static ssize_t connect(int fd, struct sockaddr* him, socklen_t len); + + // Support for signals + static void initialize_jdk_signal_support(TRAPS); + static void signal_notify(int signal_number); + static int signal_wait(); + static void terminate_signal_thread(); + static int sigexitnum_pd(); + + // random number generation + static int random(); // return 32bit pseudorandom number + static int next_random(unsigned int rand_seed); // pure version of random() + static void init_random(unsigned int initval); // initialize random sequence + + // Structured OS Exception support + static void os_exception_wrapper(java_call_t f, JavaValue* value, const methodHandle& method, JavaCallArguments* args, JavaThread* thread); + + // On Posix compatible OS it will simply check core dump limits while on Windows + // it will check if dump file can be created. Check or prepare a core dump to be + // taken at a later point in the same thread in os::abort(). Use the caller + // provided buffer as a scratch buffer. The status message which will be written + // into the error log either is file location or a short error message, depending + // on the checking result. + static void check_dump_limit(char* buffer, size_t bufferSize); + + // Get the default path to the core file + // Returns the length of the string + static int get_core_path(char* buffer, size_t bufferSize); + + // JVMTI & JVM monitoring and management support + // The thread_cpu_time() and current_thread_cpu_time() are only + // supported if is_thread_cpu_time_supported() returns true. + + // Thread CPU Time - return the fast estimate on a platform + // On Linux - fast clock_gettime where available - user+sys + // - otherwise: very slow /proc fs - user+sys + // On Windows - GetThreadTimes - user+sys + static jlong current_thread_cpu_time(); + static jlong thread_cpu_time(Thread* t); + + // Thread CPU Time with user_sys_cpu_time parameter. + // + // If user_sys_cpu_time is true, user+sys time is returned. + // Otherwise, only user time is returned + static jlong current_thread_cpu_time(bool user_sys_cpu_time); + static jlong thread_cpu_time(Thread* t, bool user_sys_cpu_time); + + // Return a bunch of info about the timers. + // Note that the returned info for these two functions may be different + // on some platforms + static void current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr); + static void thread_cpu_time_info(jvmtiTimerInfo *info_ptr); + + static bool is_thread_cpu_time_supported(); + + // System loadavg support. Returns -1 if load average cannot be obtained. + static int loadavg(double loadavg[], int nelem); + + // Amount beyond the callee frame size that we bang the stack. + static int extra_bang_size_in_bytes(); + + static char** split_path(const char* path, size_t* elements, size_t file_name_length); + + // support for mapping non-volatile memory using MAP_SYNC + static bool supports_map_sync(); + + public: + + // File conventions + static const char* file_separator(); + static const char* line_separator(); + static const char* path_separator(); + + // Information about the protection of the page at address '0' on this os. + inline static bool zero_page_read_protected(); + + static void setup_fpu(); + static juint cpu_microcode_revision(); + + static inline jlong rdtsc(); + + // Used to register dynamic code cache area with the OS + // Note: Currently only used in 64 bit Windows implementations + inline static bool register_code_area(char *low, char *high); +#endif // !NATIVE_IMAGE + + // Platform-specific code for interacting with individual OSes. + // TODO: This is for compatibility only with current usage of os::Linux, etc. + // We can get rid of the following block if we rename such a class to something + // like ::LinuxUtils +#if defined(AIX) + class Aix; +#elif defined(BSD) + class Bsd; +#elif defined(LINUX) + class Linux; +#elif defined(_WINDOWS) + class win32; +#endif + + // Ditto - Posix-specific API. Ideally should be moved to something like ::PosixUtils. +#ifndef _WINDOWS + class Posix; +#endif + +#ifndef NATIVE_IMAGE + // FIXME - some random stuff that was in os_windows.hpp +#ifdef _WINDOWS + // strtok_s is the Windows thread-safe equivalent of POSIX strtok_r +# define strtok_r strtok_s +# define S_ISCHR(mode) (((mode) & _S_IFCHR) == _S_IFCHR) +# define S_ISFIFO(mode) (((mode) & _S_IFIFO) == _S_IFIFO) +#endif + +#ifndef OS_NATIVE_THREAD_CREATION_FAILED_MSG +#define OS_NATIVE_THREAD_CREATION_FAILED_MSG "unable to create native thread: possibly out of memory or process/resource limits reached" +#endif + + public: + inline static bool platform_print_native_stack(outputStream* st, const void* context, + char *buf, int buf_size, address& lastpc); + + // debugging support (mostly used by debug.cpp but also fatal error handler) + static bool find(address pc, outputStream* st = tty); // OS specific function to make sense out of an address + + static bool dont_yield(); // when true, JVM_Yield() is nop + + // Thread priority helpers (implemented in OS-specific part) + static OSReturn set_native_priority(Thread* thread, int native_prio); + static OSReturn get_native_priority(const Thread* const thread, int* priority_ptr); + static int java_to_os_priority[CriticalPriority + 1]; + // Hint to the underlying OS that a task switch would not be good. + // Void return because it's a hint and can fail. + static const char* native_thread_creation_failed_msg() { + return OS_NATIVE_THREAD_CREATION_FAILED_MSG; + } + + // Used at creation if requested by the diagnostic flag PauseAtStartup. + // Causes the VM to wait until an external stimulus has been applied + // (for Unix, that stimulus is a signal, for Windows, an external + // ResumeThread call) + static void pause(); + + // Builds a platform dependent Agent_OnLoad_ function name + // which is used to find statically linked in agents. + static char* build_agent_function_name(const char *sym, const char *cname, + bool is_absolute_path); + +#if defined(__APPLE__) && defined(AARCH64) + // Enables write or execute access to writeable and executable pages. + static void current_thread_enable_wx(WXMode mode); +#endif // __APPLE__ && AARCH64 +#endif // !NATIVE_IMAGE + + protected: +#ifndef NATIVE_IMAGE + static volatile unsigned int _rand_seed; // seed for random number generator +#endif // !NATIVE_IMAGE + static int _processor_count; // number of processors +#ifndef NATIVE_IMAGE + static int _initial_active_processor_count; // number of active processors during initialization. + + static char* format_boot_path(const char* format_string, + const char* home, + int home_len, + char fileSep, + char pathSep); + static bool set_boot_path(char fileSep, char pathSep); + + static bool pd_dll_unload(void* libhandle, char* ebuf, int ebuflen); +#endif // !NATIVE_IMAGE +}; + +#ifndef NATIVE_IMAGE +// Note that "PAUSE" is almost always used with synchronization +// so arguably we should provide Atomic::SpinPause() instead +// of the global SpinPause() with C linkage. +// It'd also be eligible for inlining on many platforms. + +extern "C" int SpinPause(); +#endif // !NATIVE_IMAGE + +#endif // SHARE_RUNTIME_OS_HPP diff --git a/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/share/runtime/os.inline.hpp b/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/share/runtime/os.inline.hpp new file mode 100644 index 000000000000..0719c472f3f8 --- /dev/null +++ b/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/share/runtime/os.inline.hpp @@ -0,0 +1,65 @@ +/* + * Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_RUNTIME_OS_INLINE_HPP +#define SHARE_RUNTIME_OS_INLINE_HPP + +#include "runtime/os.hpp" + +#ifndef NATIVE_IMAGE +#include OS_HEADER_INLINE(os) +#include OS_CPU_HEADER_INLINE(os) + +// Below are inline functions that are rarely implemented by the platforms. +// Provide default empty implementation. + +#ifndef HAVE_PLATFORM_PRINT_NATIVE_STACK +inline bool os::platform_print_native_stack(outputStream* st, const void* context, + char *buf, int buf_size, address& lastpc) { + return false; +} +#endif + +#ifndef HAVE_CDS_CORE_REGION_ALIGNMENT +inline size_t os::cds_core_region_alignment() { + return (size_t)os::vm_allocation_granularity(); +} +#endif + +#ifndef _WINDOWS +// Currently used only on Windows. +inline bool os::register_code_area(char *low, char *high) { + return true; +} +#endif + +#ifndef HAVE_FUNCTION_DESCRIPTORS +inline void* os::resolve_function_descriptor(void* p) { + return nullptr; +} +#endif +#endif // !NATIVE_IMAGE + +#endif // SHARE_RUNTIME_OS_INLINE_HPP diff --git a/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/share/utilities/attributeNoreturn.hpp b/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/share/utilities/attributeNoreturn.hpp new file mode 100644 index 000000000000..8706ac44c157 --- /dev/null +++ b/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/share/utilities/attributeNoreturn.hpp @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_UTILITY_ATTRIBUTENORETURN_HPP +#define SHARE_UTILITY_ATTRIBUTENORETURN_HPP + +// Provide a (temporary) macro for the [[noreturn]] attribute. +// +// Unfortunately, some older (though still in use) compilers have bugs when +// using [[noreturn]]. For them we use an empty definition for the attribute. +// +// Note: This can't be placed in globalDefinitions_xxx.hpp because the +// attribute is used in debug.hpp, which can't include globalDefinitions.hpp. + +// clang 12 (and possibly prior) crashes during build if we use [[noreturn]] +// for assertion failure reporting functions. The problem seems to be fixed +// in clang 13. +#ifdef __clang__ +#if __clang_major__ < 13 +#define ATTRIBUTE_NORETURN +#endif +#endif + +// All other platforms can use [[noreturn]]. +#ifndef ATTRIBUTE_NORETURN +#define ATTRIBUTE_NORETURN [[noreturn]] +#endif + +#endif // SHARE_UTILITY_ATTRIBUTENORETURN_HPP diff --git a/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/share/utilities/checkedCast.hpp b/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/share/utilities/checkedCast.hpp new file mode 100644 index 000000000000..3379586aded9 --- /dev/null +++ b/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/share/utilities/checkedCast.hpp @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_UTILITIES_CHECKEDCAST_HPP +#define SHARE_UTILITIES_CHECKEDCAST_HPP + +#include "utilities/debug.hpp" + +// In many places we've added C-style casts to silence compiler +// warnings, for example when truncating a size_t to an int when we +// know the size_t is a small struct. Such casts are risky because +// they effectively disable useful compiler warnings. We can make our +// lives safer with this function, which ensures that any cast is +// reversible without loss of information. It doesn't check +// everything: it isn't intended to make sure that pointer types are +// compatible, for example. +template +constexpr T2 checked_cast(T1 thing) { + T2 result = static_cast(thing); + assert(static_cast(result) == thing, "must be"); + return result; +} + +#endif // SHARE_UTILITIES_CHECKEDCAST_HPP + diff --git a/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/share/utilities/compilerWarnings.hpp b/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/share/utilities/compilerWarnings.hpp new file mode 100644 index 000000000000..df4ca62cd6ee --- /dev/null +++ b/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/share/utilities/compilerWarnings.hpp @@ -0,0 +1,107 @@ +/* + * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_UTILITIES_COMPILERWARNINGS_HPP +#define SHARE_UTILITIES_COMPILERWARNINGS_HPP + +// Macros related to control of compiler warnings. + +#include "utilities/macros.hpp" + +#include COMPILER_HEADER(utilities/compilerWarnings) + +// Defaults when not defined for the TARGET_COMPILER_xxx. + +#ifndef PRAGMA_DIAG_PUSH +#define PRAGMA_DIAG_PUSH +#endif +#ifndef PRAGMA_DIAG_POP +#define PRAGMA_DIAG_POP +#endif + +#ifndef PRAGMA_DISABLE_GCC_WARNING +#define PRAGMA_DISABLE_GCC_WARNING(name) +#endif + +#ifndef PRAGMA_DISABLE_MSVC_WARNING +#define PRAGMA_DISABLE_MSVC_WARNING(num) +#endif + +#ifndef ATTRIBUTE_PRINTF +#define ATTRIBUTE_PRINTF(fmt, vargs) +#endif +#ifndef ATTRIBUTE_SCANF +#define ATTRIBUTE_SCANF(fmt, vargs) +#endif + +#ifndef PRAGMA_DANGLING_POINTER_IGNORED +#define PRAGMA_DANGLING_POINTER_IGNORED +#endif + +#ifndef PRAGMA_FORMAT_NONLITERAL_IGNORED +#define PRAGMA_FORMAT_NONLITERAL_IGNORED +#endif +#ifndef PRAGMA_FORMAT_IGNORED +#define PRAGMA_FORMAT_IGNORED +#endif + +#ifndef PRAGMA_STRINGOP_TRUNCATION_IGNORED +#define PRAGMA_STRINGOP_TRUNCATION_IGNORED +#endif + +#ifndef PRAGMA_STRINGOP_OVERFLOW_IGNORED +#define PRAGMA_STRINGOP_OVERFLOW_IGNORED +#endif + +#ifndef PRAGMA_INFINITE_RECURSION_IGNORED +#define PRAGMA_INFINITE_RECURSION_IGNORED +#endif + +#ifndef PRAGMA_NONNULL_IGNORED +#define PRAGMA_NONNULL_IGNORED +#endif + +// Support warnings for use of certain C functions, except where explicitly +// permitted. +// +// FORBID_C_FUNCTION(signature, alternative) +// - signature: the function that should not normally be used. +// - alternative: a string that may be used in a warning about a use, typically +// suggesting an alternative. +// +// ALLOW_C_FUNCTION(name, ... using statement ...) +// - name: the name of a forbidden function whose use is permitted in statement. +// - statement: a use of the otherwise forbidden function. Using a variadic +// tail allows the statement to contain non-nested commas. + +#ifndef FORBID_C_FUNCTION +#define FORBID_C_FUNCTION(signature, alternative) +#endif + +#ifndef ALLOW_C_FUNCTION +#define ALLOW_C_FUNCTION(name, ...) __VA_ARGS__ +#endif + +#endif // SHARE_UTILITIES_COMPILERWARNINGS_HPP diff --git a/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/share/utilities/compilerWarnings_gcc.hpp b/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/share/utilities/compilerWarnings_gcc.hpp new file mode 100644 index 000000000000..16fab0596cdb --- /dev/null +++ b/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/share/utilities/compilerWarnings_gcc.hpp @@ -0,0 +1,99 @@ +/* + * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_UTILITIES_COMPILERWARNINGS_GCC_HPP +#define SHARE_UTILITIES_COMPILERWARNINGS_GCC_HPP + +// Macros related to control of compiler warnings. + +#ifndef ATTRIBUTE_PRINTF +#define ATTRIBUTE_PRINTF(fmt,vargs) __attribute__((format(printf, fmt, vargs))) +#endif +#ifndef ATTRIBUTE_SCANF +#define ATTRIBUTE_SCANF(fmt,vargs) __attribute__((format(scanf, fmt, vargs))) +#endif + +#define PRAGMA_DISABLE_GCC_WARNING(optstring) _Pragma(STR(GCC diagnostic ignored optstring)) + +#define PRAGMA_DIAG_PUSH _Pragma("GCC diagnostic push") +#define PRAGMA_DIAG_POP _Pragma("GCC diagnostic pop") + +#if !defined(__clang_major__) && (__GNUC__ >= 12) +// Disable -Wdangling-pointer which is introduced in GCC 12. +#define PRAGMA_DANGLING_POINTER_IGNORED PRAGMA_DISABLE_GCC_WARNING("-Wdangling-pointer") + +// Disable -Winfinite-recursion which is introduced in GCC 12. +#define PRAGMA_INFINITE_RECURSION_IGNORED PRAGMA_DISABLE_GCC_WARNING("-Winfinite-recursion") +#endif + +#define PRAGMA_FORMAT_NONLITERAL_IGNORED \ + PRAGMA_DISABLE_GCC_WARNING("-Wformat-nonliteral") \ + PRAGMA_DISABLE_GCC_WARNING("-Wformat-security") + +#define PRAGMA_FORMAT_IGNORED PRAGMA_DISABLE_GCC_WARNING("-Wformat") + +// Disable -Wstringop-truncation which is introduced in GCC 8. +// https://gcc.gnu.org/gcc-8/changes.html +#if !defined(__clang_major__) && (__GNUC__ >= 8) +#define PRAGMA_STRINGOP_TRUNCATION_IGNORED PRAGMA_DISABLE_GCC_WARNING("-Wstringop-truncation") +#endif + +// Disable -Wstringop-overflow which is introduced in GCC 7. +// https://gcc.gnu.org/gcc-7/changes.html +#if !defined(__clang_major__) && (__GNUC__ >= 7) +#define PRAGMA_STRINGOP_OVERFLOW_IGNORED PRAGMA_DISABLE_GCC_WARNING("-Wstringop-overflow") +#endif + +#define PRAGMA_NONNULL_IGNORED PRAGMA_DISABLE_GCC_WARNING("-Wnonnull") + +#if (__GNUC__ >= 10) +// TODO: Re-enable warning attribute for Clang once +// https://github.com/llvm/llvm-project/issues/56519 is fixed and released. +// || (defined(__clang_major__) && (__clang_major__ >= 14)) + +// Use "warning" attribute to detect uses of "forbidden" functions. +// +// Note: The warning attribute is available since GCC 9, but disabling pragmas +// does not work reliably in ALLOW_C_FUNCTION. GCC 10+ and up work fine. +// +// Note: _FORTIFY_SOURCE transforms calls to certain functions into calls to +// associated "checking" functions, and that transformation seems to occur +// *before* the attribute check. We use fortification in fastdebug builds, +// so uses of functions that are both forbidden and fortified won't cause +// forbidden warnings in such builds. +#define FORBID_C_FUNCTION(signature, alternative) \ + extern "C" __attribute__((__warning__(alternative))) signature; + +// Disable warning attribute over the scope of the affected statement. +// The name serves only to document the intended function. +#define ALLOW_C_FUNCTION(name, ...) \ + PRAGMA_DIAG_PUSH \ + PRAGMA_DISABLE_GCC_WARNING("-Wattribute-warning") \ + __VA_ARGS__ \ + PRAGMA_DIAG_POP + +#endif // gcc10+ + +#endif // SHARE_UTILITIES_COMPILERWARNINGS_GCC_HPP diff --git a/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/share/utilities/globalDefinitions.hpp b/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/share/utilities/globalDefinitions.hpp new file mode 100644 index 000000000000..88a516a4c496 --- /dev/null +++ b/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/share/utilities/globalDefinitions.hpp @@ -0,0 +1,1365 @@ +/* + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_UTILITIES_GLOBALDEFINITIONS_HPP +#define SHARE_UTILITIES_GLOBALDEFINITIONS_HPP + +#ifndef NATIVE_IMAGE +#include "utilities/attributeNoreturn.hpp" +#endif // !NATIVE_IMAGE +#include "utilities/compilerWarnings.hpp" +#include "utilities/debug.hpp" +#include "utilities/macros.hpp" + +#ifndef NATIVE_IMAGE +// Get constants like JVM_T_CHAR and JVM_SIGNATURE_INT, before pulling in . +#include "classfile_constants.h" +#endif // !NATIVE_IMAGE + +#include COMPILER_HEADER(utilities/globalDefinitions) + +#include +#include +#include + +#ifndef NATIVE_IMAGE +class oopDesc; +#endif // !NATIVE_IMAGE + +// Defaults for macros that might be defined per compiler. +#ifndef NOINLINE +#define NOINLINE +#endif +#ifndef ALWAYSINLINE +#define ALWAYSINLINE inline +#endif + +#ifndef NATIVE_IMAGE +#ifndef ATTRIBUTE_ALIGNED +#define ATTRIBUTE_ALIGNED(x) alignas(x) +#endif + +#ifndef ATTRIBUTE_FLATTEN +#define ATTRIBUTE_FLATTEN +#endif + +// These are #defines to selectively turn on/off the Print(Opto)Assembly +// capabilities. Choices should be led by a tradeoff between +// code size and improved supportability. +// if PRINT_ASSEMBLY then PRINT_ABSTRACT_ASSEMBLY must be true as well +// to have a fallback in case hsdis is not available. +#if defined(PRODUCT) + #define SUPPORT_ABSTRACT_ASSEMBLY + #define SUPPORT_ASSEMBLY + #undef SUPPORT_OPTO_ASSEMBLY // Can't activate. In PRODUCT, many dump methods are missing. + #undef SUPPORT_DATA_STRUCTS // Of limited use. In PRODUCT, many print methods are empty. +#else + #define SUPPORT_ABSTRACT_ASSEMBLY + #define SUPPORT_ASSEMBLY + #define SUPPORT_OPTO_ASSEMBLY + #define SUPPORT_DATA_STRUCTS +#endif +#if defined(SUPPORT_ASSEMBLY) && !defined(SUPPORT_ABSTRACT_ASSEMBLY) + #define SUPPORT_ABSTRACT_ASSEMBLY +#endif + +// This file holds all globally used constants & types, class (forward) +// declarations and a few frequently used utility functions. + +// Declare the named class to be noncopyable. This macro must be followed by +// a semi-colon. The macro provides deleted declarations for the class's copy +// constructor and assignment operator. Because these operations are deleted, +// they cannot be defined and potential callers will fail to compile. +#define NONCOPYABLE(C) C(C const&) = delete; C& operator=(C const&) = delete /* next token must be ; */ + + +//---------------------------------------------------------------------------------------------------- +// Printf-style formatters for fixed- and variable-width types as pointers and +// integers. These are derived from the definitions in inttypes.h. If the platform +// doesn't provide appropriate definitions, they should be provided in +// the compiler-specific definitions file (e.g., globalDefinitions_gcc.hpp) + +// Guide to the suffixes used in the format specifiers for integers: +// - print the decimal value: 745565 +// _X - print as hexadecimal, without leading 0s: 0x12345 +// _X_0 - print as hexadecimal, with leading 0s: 0x00012345 +// _W(w) - prints w sized string with the given value right +// adjusted. Use -w to print left adjusted. +// +// Note that the PTR format specifiers print using 0x with leading zeros, +// just like the _X_0 version for integers. + +// Format 8-bit quantities. +#define INT8_FORMAT_X_0 "0x%02" PRIx8 +#define UINT8_FORMAT_X_0 "0x%02" PRIx8 + +// Format 16-bit quantities. +#define INT16_FORMAT_X_0 "0x%04" PRIx16 +#define UINT16_FORMAT_X_0 "0x%04" PRIx16 + +// Format 32-bit quantities. +#define INT32_FORMAT "%" PRId32 +#define INT32_FORMAT_X "0x%" PRIx32 +#define INT32_FORMAT_X_0 "0x%08" PRIx32 +#define INT32_FORMAT_W(width) "%" #width PRId32 +#define UINT32_FORMAT "%" PRIu32 +#define UINT32_FORMAT_X "0x%" PRIx32 +#define UINT32_FORMAT_X_0 "0x%08" PRIx32 +#define UINT32_FORMAT_W(width) "%" #width PRIu32 +#endif // !NATIVE_IMAGE + +// Format 64-bit quantities. +#define INT64_FORMAT "%" PRId64 +#define INT64_PLUS_FORMAT "%+" PRId64 +#define INT64_FORMAT_X "0x%" PRIx64 +#define INT64_FORMAT_X_0 "0x%016" PRIx64 +#define INT64_FORMAT_W(width) "%" #width PRId64 +#define UINT64_FORMAT "%" PRIu64 +#define UINT64_FORMAT_X "0x%" PRIx64 +#define UINT64_FORMAT_X_0 "0x%016" PRIx64 +#define UINT64_FORMAT_W(width) "%" #width PRIu64 + +// Format integers which change size between 32- and 64-bit. +#define SSIZE_FORMAT "%" PRIdPTR +#define SSIZE_PLUS_FORMAT "%+" PRIdPTR +#define SSIZE_FORMAT_W(width) "%" #width PRIdPTR +#define SIZE_FORMAT "%" PRIuPTR +#define SIZE_FORMAT_X "0x%" PRIxPTR +#ifdef _LP64 +#define SIZE_FORMAT_X_0 "0x%016" PRIxPTR +#else +#define SIZE_FORMAT_X_0 "0x%08" PRIxPTR +#endif +#define SIZE_FORMAT_W(width) "%" #width PRIuPTR + +#define INTX_FORMAT "%" PRIdPTR +#define INTX_FORMAT_X "0x%" PRIxPTR +#define INTX_FORMAT_W(width) "%" #width PRIdPTR +#define UINTX_FORMAT "%" PRIuPTR +#define UINTX_FORMAT_X "0x%" PRIxPTR +#define UINTX_FORMAT_W(width) "%" #width PRIuPTR + +// Format jlong, if necessary +#ifndef JLONG_FORMAT +#define JLONG_FORMAT INT64_FORMAT +#endif +#ifndef JLONG_FORMAT_W +#define JLONG_FORMAT_W(width) INT64_FORMAT_W(width) +#endif +#ifndef JULONG_FORMAT +#define JULONG_FORMAT UINT64_FORMAT +#endif +#ifndef JULONG_FORMAT_X +#define JULONG_FORMAT_X UINT64_FORMAT_X +#endif + +#ifndef NATIVE_IMAGE +// Format pointers which change size between 32- and 64-bit. +#ifdef _LP64 +#define INTPTR_FORMAT "0x%016" PRIxPTR +#define PTR_FORMAT "0x%016" PRIxPTR +#else // !_LP64 +#define INTPTR_FORMAT "0x%08" PRIxPTR +#define PTR_FORMAT "0x%08" PRIxPTR +#endif // _LP64 + +// Convert pointer to intptr_t, for use in printing pointers. +inline intptr_t p2i(const volatile void* p) { + return (intptr_t) p; +} + +#define BOOL_TO_STR(_b_) ((_b_) ? "true" : "false") + +//---------------------------------------------------------------------------------------------------- +// Forbid the use of various C library functions. +// Some of these have os:: replacements that should normally be used instead. +// Others are considered security concerns, with preferred alternatives. + +FORBID_C_FUNCTION(void exit(int), "use os::exit"); +FORBID_C_FUNCTION(void _exit(int), "use os::exit"); +FORBID_C_FUNCTION(char* strerror(int), "use os::strerror"); +FORBID_C_FUNCTION(char* strtok(char*, const char*), "use strtok_r"); +FORBID_C_FUNCTION(int sprintf(char*, const char*, ...), "use os::snprintf"); +FORBID_C_FUNCTION(int vsprintf(char*, const char*, va_list), "use os::vsnprintf"); +FORBID_C_FUNCTION(int vsnprintf(char*, size_t, const char*, va_list), "use os::vsnprintf"); + +// All of the following functions return raw C-heap pointers (sometimes as an option, e.g. realpath or getwd) +// or, in case of free(), take raw C-heap pointers. Don't use them unless you are really sure you must. +FORBID_C_FUNCTION(void* malloc(size_t size), "use os::malloc"); +FORBID_C_FUNCTION(void* calloc(size_t nmemb, size_t size), "use os::malloc and zero out manually"); +FORBID_C_FUNCTION(void free(void *ptr), "use os::free"); +FORBID_C_FUNCTION(void* realloc(void *ptr, size_t size), "use os::realloc"); +FORBID_C_FUNCTION(char* strdup(const char *s), "use os::strdup"); +FORBID_C_FUNCTION(char* strndup(const char *s, size_t n), "don't use"); +FORBID_C_FUNCTION(int posix_memalign(void **memptr, size_t alignment, size_t size), "don't use"); +FORBID_C_FUNCTION(void* aligned_alloc(size_t alignment, size_t size), "don't use"); +FORBID_C_FUNCTION(char* realpath(const char* path, char* resolved_path), "use os::Posix::realpath"); +FORBID_C_FUNCTION(char* get_current_dir_name(void), "use os::get_current_directory()"); +FORBID_C_FUNCTION(char* getwd(char *buf), "use os::get_current_directory()"); +FORBID_C_FUNCTION(wchar_t* wcsdup(const wchar_t *s), "don't use"); +FORBID_C_FUNCTION(void* reallocf(void *ptr, size_t size), "don't use"); + +//---------------------------------------------------------------------------------------------------- +// Constants + +const int LogBytesPerShort = 1; +const int LogBytesPerInt = 2; +#ifdef _LP64 +const int LogBytesPerWord = 3; +#else +const int LogBytesPerWord = 2; +#endif +const int LogBytesPerLong = 3; + +const int BytesPerShort = 1 << LogBytesPerShort; +const int BytesPerInt = 1 << LogBytesPerInt; +const int BytesPerWord = 1 << LogBytesPerWord; +const int BytesPerLong = 1 << LogBytesPerLong; + +const int LogBitsPerByte = 3; +const int LogBitsPerShort = LogBitsPerByte + LogBytesPerShort; +const int LogBitsPerInt = LogBitsPerByte + LogBytesPerInt; +const int LogBitsPerWord = LogBitsPerByte + LogBytesPerWord; +const int LogBitsPerLong = LogBitsPerByte + LogBytesPerLong; + +const int BitsPerByte = 1 << LogBitsPerByte; +const int BitsPerShort = 1 << LogBitsPerShort; +const int BitsPerInt = 1 << LogBitsPerInt; +const int BitsPerWord = 1 << LogBitsPerWord; +const int BitsPerLong = 1 << LogBitsPerLong; + +const int WordAlignmentMask = (1 << LogBytesPerWord) - 1; +const int LongAlignmentMask = (1 << LogBytesPerLong) - 1; + +const int oopSize = sizeof(char*); // Full-width oop +extern int heapOopSize; // Oop within a java object +const int wordSize = sizeof(char*); +const int longSize = sizeof(jlong); +const int jintSize = sizeof(jint); +const int size_tSize = sizeof(size_t); + +const int BytesPerOop = BytesPerWord; // Full-width oop + +extern int LogBytesPerHeapOop; // Oop within a java object +extern int LogBitsPerHeapOop; +extern int BytesPerHeapOop; +extern int BitsPerHeapOop; + +const int BitsPerJavaInteger = 32; +const int BitsPerJavaLong = 64; +const int BitsPerSize_t = size_tSize * BitsPerByte; + +// Size of a char[] needed to represent a jint as a string in decimal. +const int jintAsStringSize = 12; + +// An opaque type, so that HeapWord* can be a generic pointer into the heap. +// We require that object sizes be measured in units of heap words (e.g. +// pointer-sized values), so that given HeapWord* hw, +// hw += oop(hw)->foo(); +// works, where foo is a method (like size or scavenge) that returns the +// object size. +class HeapWordImpl; // Opaque, never defined. +typedef HeapWordImpl* HeapWord; + +// Analogous opaque struct for metadata allocated from metaspaces. +class MetaWordImpl; // Opaque, never defined. +typedef MetaWordImpl* MetaWord; + +// HeapWordSize must be 2^LogHeapWordSize. +const int HeapWordSize = sizeof(HeapWord); +#ifdef _LP64 +const int LogHeapWordSize = 3; +#else +const int LogHeapWordSize = 2; +#endif +const int HeapWordsPerLong = BytesPerLong / HeapWordSize; +const int LogHeapWordsPerLong = LogBytesPerLong - LogHeapWordSize; + +// The minimum number of native machine words necessary to contain "byte_size" +// bytes. +inline size_t heap_word_size(size_t byte_size) { + return (byte_size + (HeapWordSize-1)) >> LogHeapWordSize; +} + +inline jfloat jfloat_cast(jint x); +inline jdouble jdouble_cast(jlong x); +#endif // !NATIVE_IMAGE + +//------------------------------------------- +// Constant for jlong (standardized by C++11) + +// Build a 64bit integer constant +#define CONST64(x) (x ## LL) +#define UCONST64(x) (x ## ULL) + +const jlong min_jlong = CONST64(0x8000000000000000); +const jlong max_jlong = CONST64(0x7fffffffffffffff); + +#ifndef NATIVE_IMAGE +//------------------------------------------- +// Constant for jdouble +const jlong min_jlongDouble = CONST64(0x0000000000000001); +const jdouble min_jdouble = jdouble_cast(min_jlongDouble); +const jlong max_jlongDouble = CONST64(0x7fefffffffffffff); +const jdouble max_jdouble = jdouble_cast(max_jlongDouble); +#endif // !NATIVE_IMAGE + +const size_t K = 1024; +const size_t M = K*K; +const size_t G = M*K; +#ifndef NATIVE_IMAGE +const size_t HWperKB = K / sizeof(HeapWord); + +// Constants for converting from a base unit to milli-base units. For +// example from seconds to milliseconds and microseconds + +const int MILLIUNITS = 1000; // milli units per base unit +const int MICROUNITS = 1000000; // micro units per base unit +const int NANOUNITS = 1000000000; // nano units per base unit +const int NANOUNITS_PER_MILLIUNIT = NANOUNITS / MILLIUNITS; +#endif // !NATIVE_IMAGE + +const jlong NANOSECS_PER_SEC = CONST64(1000000000); +const jint NANOSECS_PER_MILLISEC = 1000000; + +#ifndef NATIVE_IMAGE +// Unit conversion functions +// The caller is responsible for considering overflow. + +inline int64_t nanos_to_millis(int64_t nanos) { + return nanos / NANOUNITS_PER_MILLIUNIT; +} +inline int64_t millis_to_nanos(int64_t millis) { + return millis * NANOUNITS_PER_MILLIUNIT; +} + +// Proper units routines try to maintain at least three significant digits. +// In worst case, it would print five significant digits with lower prefix. +// G is close to MAX_SIZE on 32-bit platforms, so its product can easily overflow, +// and therefore we need to be careful. + +inline const char* proper_unit_for_byte_size(size_t s) { +#ifdef _LP64 + if (s >= 100*G) { + return "G"; + } +#endif + if (s >= 100*M) { + return "M"; + } else if (s >= 100*K) { + return "K"; + } else { + return "B"; + } +} + +template +inline T byte_size_in_proper_unit(T s) { +#ifdef _LP64 + if (s >= 100*G) { + return (T)(s/G); + } +#endif + if (s >= 100*M) { + return (T)(s/M); + } else if (s >= 100*K) { + return (T)(s/K); + } else { + return s; + } +} + +#define PROPERFMT SIZE_FORMAT "%s" +#define PROPERFMTARGS(s) byte_size_in_proper_unit(s), proper_unit_for_byte_size(s) + +inline const char* exact_unit_for_byte_size(size_t s) { +#ifdef _LP64 + if (s >= G && (s % G) == 0) { + return "G"; + } +#endif + if (s >= M && (s % M) == 0) { + return "M"; + } + if (s >= K && (s % K) == 0) { + return "K"; + } + return "B"; +} + +inline size_t byte_size_in_exact_unit(size_t s) { +#ifdef _LP64 + if (s >= G && (s % G) == 0) { + return s / G; + } +#endif + if (s >= M && (s % M) == 0) { + return s / M; + } + if (s >= K && (s % K) == 0) { + return s / K; + } + return s; +} + +#define EXACTFMT SIZE_FORMAT "%s" +#define EXACTFMTARGS(s) byte_size_in_exact_unit(s), exact_unit_for_byte_size(s) + +// Memory size transition formatting. + +#define HEAP_CHANGE_FORMAT "%s: " SIZE_FORMAT "K(" SIZE_FORMAT "K)->" SIZE_FORMAT "K(" SIZE_FORMAT "K)" + +#define HEAP_CHANGE_FORMAT_ARGS(_name_, _prev_used_, _prev_capacity_, _used_, _capacity_) \ + (_name_), (_prev_used_) / K, (_prev_capacity_) / K, (_used_) / K, (_capacity_) / K + +//---------------------------------------------------------------------------------------------------- +// VM type definitions + +// intx and uintx are the 'extended' int and 'extended' unsigned int types; +// they are 32bit wide on a 32-bit platform, and 64bit wide on a 64bit platform. + +typedef intptr_t intx; +typedef uintptr_t uintx; + +const intx min_intx = (intx)1 << (sizeof(intx)*BitsPerByte-1); +const intx max_intx = (uintx)min_intx - 1; +const uintx max_uintx = (uintx)-1; + +// Table of values: +// sizeof intx 4 8 +// min_intx 0x80000000 0x8000000000000000 +// max_intx 0x7FFFFFFF 0x7FFFFFFFFFFFFFFF +// max_uintx 0xFFFFFFFF 0xFFFFFFFFFFFFFFFF + +typedef unsigned int uint; NEEDS_CLEANUP + +//---------------------------------------------------------------------------------------------------- +// Java type definitions + +// All kinds of 'plain' byte addresses +typedef signed char s_char; +typedef unsigned char u_char; +typedef u_char* address; + +// Pointer subtraction. +// The idea here is to avoid ptrdiff_t, which is signed and so doesn't have +// the range we might need to find differences from one end of the heap +// to the other. +// A typical use might be: +// if (pointer_delta(end(), top()) >= size) { +// // enough room for an object of size +// ... +// and then additions like +// ... top() + size ... +// are safe because we know that top() is at least size below end(). +inline size_t pointer_delta(const volatile void* left, + const volatile void* right, + size_t element_size) { + assert(left >= right, "avoid underflow - left: " PTR_FORMAT " right: " PTR_FORMAT, p2i(left), p2i(right)); + return (((uintptr_t) left) - ((uintptr_t) right)) / element_size; +} + +// A version specialized for HeapWord*'s. +inline size_t pointer_delta(const HeapWord* left, const HeapWord* right) { + return pointer_delta(left, right, sizeof(HeapWord)); +} +// A version specialized for MetaWord*'s. +inline size_t pointer_delta(const MetaWord* left, const MetaWord* right) { + return pointer_delta(left, right, sizeof(MetaWord)); +} + +// pointer_delta_as_int is called to do pointer subtraction for nearby pointers that +// returns a non-negative int, usually used as a size of a code buffer range. +// This scales to sizeof(T). +template +inline int pointer_delta_as_int(const volatile T* left, const volatile T* right) { + size_t delta = pointer_delta(left, right, sizeof(T)); + assert(delta <= size_t(INT_MAX), "pointer delta out of range: %zu", delta); + return static_cast(delta); +} + +// +// ANSI C++ does not allow casting from one pointer type to a function pointer +// directly without at best a warning. This macro accomplishes it silently +// In every case that is present at this point the value be cast is a pointer +// to a C linkage function. In some case the type used for the cast reflects +// that linkage and a picky compiler would not complain. In other cases because +// there is no convenient place to place a typedef with extern C linkage (i.e +// a platform dependent header file) it doesn't. At this point no compiler seems +// picky enough to catch these instances (which are few). It is possible that +// using templates could fix these for all cases. This use of templates is likely +// so far from the middle of the road that it is likely to be problematic in +// many C++ compilers. +// +#define CAST_TO_FN_PTR(func_type, value) (reinterpret_cast(value)) +#define CAST_FROM_FN_PTR(new_type, func_ptr) ((new_type)((uintptr_t)(func_ptr))) + +// Need the correct linkage to call qsort without warnings +extern "C" { + typedef int (*_sort_Fn)(const void *, const void *); +} +#endif // !NATIVE_IMAGE + +// Additional Java basic types + +typedef uint8_t jubyte; +typedef uint16_t jushort; +typedef uint32_t juint; +typedef uint64_t julong; + +// Unsigned byte types for os and stream.hpp + +// Unsigned one, two, four and eight byte quantities used for describing +// the .class file format. See JVM book chapter 4. + +typedef jubyte u1; +typedef jushort u2; +typedef juint u4; +typedef julong u8; + +const jubyte max_jubyte = (jubyte)-1; // 0xFF largest jubyte +const jushort max_jushort = (jushort)-1; // 0xFFFF largest jushort +const juint max_juint = (juint)-1; // 0xFFFFFFFF largest juint +const julong max_julong = (julong)-1; // 0xFF....FF largest julong + +typedef jbyte s1; +typedef jshort s2; +typedef jint s4; +typedef jlong s8; + +#ifndef NATIVE_IMAGE +const jbyte min_jbyte = -(1 << 7); // smallest jbyte +const jbyte max_jbyte = (1 << 7) - 1; // largest jbyte +const jshort min_jshort = -(1 << 15); // smallest jshort +const jshort max_jshort = (1 << 15) - 1; // largest jshort + +const jint min_jint = (jint)1 << (sizeof(jint)*BitsPerByte-1); // 0x80000000 == smallest jint +const jint max_jint = (juint)min_jint - 1; // 0x7FFFFFFF == largest jint + +const jint min_jintFloat = (jint)(0x00000001); +const jfloat min_jfloat = jfloat_cast(min_jintFloat); +const jint max_jintFloat = (jint)(0x7f7fffff); +const jfloat max_jfloat = jfloat_cast(max_jintFloat); + +//---------------------------------------------------------------------------------------------------- +// JVM spec restrictions + +const int max_method_code_size = 64*K - 1; // JVM spec, 2nd ed. section 4.8.1 (p.134) + +//---------------------------------------------------------------------------------------------------- +// old CDS options +extern bool RequireSharedSpaces; +extern "C" { +// Make sure UseSharedSpaces is accessible to the serviceability agent. +extern JNIEXPORT jboolean UseSharedSpaces; +} + +//---------------------------------------------------------------------------------------------------- +// Object alignment, in units of HeapWords. +// +// Minimum is max(BytesPerLong, BytesPerDouble, BytesPerOop) / HeapWordSize, so jlong, jdouble and +// reference fields can be naturally aligned. + +extern int MinObjAlignment; +extern int MinObjAlignmentInBytes; +extern int MinObjAlignmentInBytesMask; + +extern int LogMinObjAlignment; +extern int LogMinObjAlignmentInBytes; + +// Maximal size of heap where unscaled compression can be used. Also upper bound +// for heap placement: 4GB. +const uint64_t UnscaledOopHeapMax = (uint64_t(max_juint) + 1); +// Maximal size of heap where compressed oops can be used. Also upper bound for heap +// placement for zero based compression algorithm: UnscaledOopHeapMax << LogMinObjAlignmentInBytes. +extern uint64_t OopEncodingHeapMax; + +// Machine dependent stuff + +// The maximum size of the code cache. Can be overridden by targets. +#define CODE_CACHE_SIZE_LIMIT (2*G) +// Allow targets to reduce the default size of the code cache. +#define CODE_CACHE_DEFAULT_LIMIT CODE_CACHE_SIZE_LIMIT + +#include CPU_HEADER(globalDefinitions) + +// To assure the IRIW property on processors that are not multiple copy +// atomic, sync instructions must be issued between volatile reads to +// assure their ordering, instead of after volatile stores. +// (See "A Tutorial Introduction to the ARM and POWER Relaxed Memory Models" +// by Luc Maranget, Susmit Sarkar and Peter Sewell, INRIA/Cambridge) +#ifdef CPU_MULTI_COPY_ATOMIC +// Not needed. +const bool support_IRIW_for_not_multiple_copy_atomic_cpu = false; +#else +// From all non-multi-copy-atomic architectures, only PPC64 supports IRIW at the moment. +// Final decision is subject to JEP 188: Java Memory Model Update. +const bool support_IRIW_for_not_multiple_copy_atomic_cpu = PPC64_ONLY(true) NOT_PPC64(false); +#endif + +// The expected size in bytes of a cache line. +#ifndef DEFAULT_CACHE_LINE_SIZE +#error "Platform should define DEFAULT_CACHE_LINE_SIZE" +#endif + +// The default padding size for data structures to avoid false sharing. +#ifndef DEFAULT_PADDING_SIZE +#error "Platform should define DEFAULT_PADDING_SIZE" +#endif + + +//---------------------------------------------------------------------------------------------------- +// Utility macros for compilers +// used to silence compiler warnings + +#define Unused_Variable(var) var + + +//---------------------------------------------------------------------------------------------------- +// Miscellaneous + +// 6302670 Eliminate Hotspot __fabsf dependency +// All fabs() callers should call this function instead, which will implicitly +// convert the operand to double, avoiding a dependency on __fabsf which +// doesn't exist in early versions of Solaris 8. +inline double fabsd(double value) { + return fabs(value); +} + +// Returns numerator/denominator as percentage value from 0 to 100. If denominator +// is zero, return 0.0. +template +inline double percent_of(T numerator, T denominator) { + return denominator != 0 ? (double)numerator / (double)denominator * 100.0 : 0.0; +} + +//---------------------------------------------------------------------------------------------------- +// Special casts +// Cast floats into same-size integers and vice-versa w/o changing bit-pattern +typedef union { + jfloat f; + jint i; +} FloatIntConv; + +typedef union { + jdouble d; + jlong l; + julong ul; +} DoubleLongConv; + +inline jint jint_cast (jfloat x) { return ((FloatIntConv*)&x)->i; } +inline jfloat jfloat_cast (jint x) { return ((FloatIntConv*)&x)->f; } + +inline jlong jlong_cast (jdouble x) { return ((DoubleLongConv*)&x)->l; } +inline julong julong_cast (jdouble x) { return ((DoubleLongConv*)&x)->ul; } +inline jdouble jdouble_cast (jlong x) { return ((DoubleLongConv*)&x)->d; } + +inline jint low (jlong value) { return jint(value); } +inline jint high(jlong value) { return jint(value >> 32); } + +// the fancy casts are a hopefully portable way +// to do unsigned 32 to 64 bit type conversion +inline void set_low (jlong* value, jint low ) { *value &= (jlong)0xffffffff << 32; + *value |= (jlong)(julong)(juint)low; } + +inline void set_high(jlong* value, jint high) { *value &= (jlong)(julong)(juint)0xffffffff; + *value |= (jlong)high << 32; } + +inline jlong jlong_from(jint h, jint l) { + jlong result = 0; // initialization to avoid warning + set_high(&result, h); + set_low(&result, l); + return result; +} + +union jlong_accessor { + jint words[2]; + jlong long_value; +}; + +void basic_types_init(); // cannot define here; uses assert + + +// NOTE: replicated in SA in vm/agent/sun/jvm/hotspot/runtime/BasicType.java +enum BasicType : u1 { +// The values T_BOOLEAN..T_LONG (4..11) are derived from the JVMS. + T_BOOLEAN = JVM_T_BOOLEAN, + T_CHAR = JVM_T_CHAR, + T_FLOAT = JVM_T_FLOAT, + T_DOUBLE = JVM_T_DOUBLE, + T_BYTE = JVM_T_BYTE, + T_SHORT = JVM_T_SHORT, + T_INT = JVM_T_INT, + T_LONG = JVM_T_LONG, + // The remaining values are not part of any standard. + // T_OBJECT and T_VOID denote two more semantic choices + // for method return values. + // T_OBJECT and T_ARRAY describe signature syntax. + // T_ADDRESS, T_METADATA, T_NARROWOOP, T_NARROWKLASS describe + // internal references within the JVM as if they were Java + // types in their own right. + T_OBJECT = 12, + T_ARRAY = 13, + T_VOID = 14, + T_ADDRESS = 15, + T_NARROWOOP = 16, + T_METADATA = 17, + T_NARROWKLASS = 18, + T_CONFLICT = 19, // for stack value type with conflicting contents + T_ILLEGAL = 99 +}; + +#define SIGNATURE_TYPES_DO(F, N) \ + F(JVM_SIGNATURE_BOOLEAN, T_BOOLEAN, N) \ + F(JVM_SIGNATURE_CHAR, T_CHAR, N) \ + F(JVM_SIGNATURE_FLOAT, T_FLOAT, N) \ + F(JVM_SIGNATURE_DOUBLE, T_DOUBLE, N) \ + F(JVM_SIGNATURE_BYTE, T_BYTE, N) \ + F(JVM_SIGNATURE_SHORT, T_SHORT, N) \ + F(JVM_SIGNATURE_INT, T_INT, N) \ + F(JVM_SIGNATURE_LONG, T_LONG, N) \ + F(JVM_SIGNATURE_CLASS, T_OBJECT, N) \ + F(JVM_SIGNATURE_ARRAY, T_ARRAY, N) \ + F(JVM_SIGNATURE_VOID, T_VOID, N) \ + /*end*/ + +inline bool is_java_type(BasicType t) { + return T_BOOLEAN <= t && t <= T_VOID; +} + +inline bool is_java_primitive(BasicType t) { + return T_BOOLEAN <= t && t <= T_LONG; +} + +inline bool is_subword_type(BasicType t) { + // these guys are processed exactly like T_INT in calling sequences: + return (t == T_BOOLEAN || t == T_CHAR || t == T_BYTE || t == T_SHORT); +} + +inline bool is_signed_subword_type(BasicType t) { + return (t == T_BYTE || t == T_SHORT); +} + +inline bool is_unsigned_subword_type(BasicType t) { + return (t == T_BOOLEAN || t == T_CHAR); +} + +inline bool is_double_word_type(BasicType t) { + return (t == T_DOUBLE || t == T_LONG); +} + +inline bool is_reference_type(BasicType t, bool include_narrow_oop = false) { + return (t == T_OBJECT || t == T_ARRAY || (include_narrow_oop && t == T_NARROWOOP)); +} + +inline bool is_integral_type(BasicType t) { + return is_subword_type(t) || t == T_INT || t == T_LONG; +} + +inline bool is_non_subword_integral_type(BasicType t) { + return t == T_INT || t == T_LONG; +} + +inline bool is_floating_point_type(BasicType t) { + return (t == T_FLOAT || t == T_DOUBLE); +} + +extern char type2char_tab[T_CONFLICT+1]; // Map a BasicType to a jchar +inline char type2char(BasicType t) { return (uint)t < T_CONFLICT+1 ? type2char_tab[t] : 0; } +extern int type2size[T_CONFLICT+1]; // Map BasicType to result stack elements +extern const char* type2name_tab[T_CONFLICT+1]; // Map a BasicType to a char* +extern BasicType name2type(const char* name); + +const char* type2name(BasicType t); + +inline jlong max_signed_integer(BasicType bt) { + if (bt == T_INT) { + return max_jint; + } + assert(bt == T_LONG, "unsupported"); + return max_jlong; +} + +inline jlong min_signed_integer(BasicType bt) { + if (bt == T_INT) { + return min_jint; + } + assert(bt == T_LONG, "unsupported"); + return min_jlong; +} + +// Auxiliary math routines +// least common multiple +extern size_t lcm(size_t a, size_t b); + + +// NOTE: replicated in SA in vm/agent/sun/jvm/hotspot/runtime/BasicType.java +enum BasicTypeSize { + T_BOOLEAN_size = 1, + T_CHAR_size = 1, + T_FLOAT_size = 1, + T_DOUBLE_size = 2, + T_BYTE_size = 1, + T_SHORT_size = 1, + T_INT_size = 1, + T_LONG_size = 2, + T_OBJECT_size = 1, + T_ARRAY_size = 1, + T_NARROWOOP_size = 1, + T_NARROWKLASS_size = 1, + T_VOID_size = 0 +}; + +// this works on valid parameter types but not T_VOID, T_CONFLICT, etc. +inline int parameter_type_word_count(BasicType t) { + if (is_double_word_type(t)) return 2; + assert(is_java_primitive(t) || is_reference_type(t), "no goofy types here please"); + assert(type2size[t] == 1, "must be"); + return 1; +} + +// maps a BasicType to its instance field storage type: +// all sub-word integral types are widened to T_INT +extern BasicType type2field[T_CONFLICT+1]; +extern BasicType type2wfield[T_CONFLICT+1]; + + +// size in bytes +enum ArrayElementSize { + T_BOOLEAN_aelem_bytes = 1, + T_CHAR_aelem_bytes = 2, + T_FLOAT_aelem_bytes = 4, + T_DOUBLE_aelem_bytes = 8, + T_BYTE_aelem_bytes = 1, + T_SHORT_aelem_bytes = 2, + T_INT_aelem_bytes = 4, + T_LONG_aelem_bytes = 8, +#ifdef _LP64 + T_OBJECT_aelem_bytes = 8, + T_ARRAY_aelem_bytes = 8, +#else + T_OBJECT_aelem_bytes = 4, + T_ARRAY_aelem_bytes = 4, +#endif + T_NARROWOOP_aelem_bytes = 4, + T_NARROWKLASS_aelem_bytes = 4, + T_VOID_aelem_bytes = 0 +}; + +extern int _type2aelembytes[T_CONFLICT+1]; // maps a BasicType to nof bytes used by its array element +#ifdef ASSERT +extern int type2aelembytes(BasicType t, bool allow_address = false); // asserts +#else +inline int type2aelembytes(BasicType t, bool allow_address = false) { return _type2aelembytes[t]; } +#endif + +inline bool same_type_or_subword_size(BasicType t1, BasicType t2) { + return (t1 == t2) || (is_subword_type(t1) && type2aelembytes(t1) == type2aelembytes(t2)); +} + +// JavaValue serves as a container for arbitrary Java values. + +class JavaValue { + + public: + typedef union JavaCallValue { + jfloat f; + jdouble d; + jint i; + jlong l; + jobject h; + oopDesc* o; + } JavaCallValue; + + private: + BasicType _type; + JavaCallValue _value; + + public: + JavaValue(BasicType t = T_ILLEGAL) { _type = t; } + + JavaValue(jfloat value) { + _type = T_FLOAT; + _value.f = value; + } + + JavaValue(jdouble value) { + _type = T_DOUBLE; + _value.d = value; + } + + jfloat get_jfloat() const { return _value.f; } + jdouble get_jdouble() const { return _value.d; } + jint get_jint() const { return _value.i; } + jlong get_jlong() const { return _value.l; } + jobject get_jobject() const { return _value.h; } + oopDesc* get_oop() const { return _value.o; } + JavaCallValue* get_value_addr() { return &_value; } + BasicType get_type() const { return _type; } + + void set_jfloat(jfloat f) { _value.f = f;} + void set_jdouble(jdouble d) { _value.d = d;} + void set_jint(jint i) { _value.i = i;} + void set_jlong(jlong l) { _value.l = l;} + void set_jobject(jobject h) { _value.h = h;} + void set_oop(oopDesc* o) { _value.o = o;} + void set_type(BasicType t) { _type = t; } + + jboolean get_jboolean() const { return (jboolean) (_value.i);} + jbyte get_jbyte() const { return (jbyte) (_value.i);} + jchar get_jchar() const { return (jchar) (_value.i);} + jshort get_jshort() const { return (jshort) (_value.i);} + +}; + + +// TosState describes the top-of-stack state before and after the execution of +// a bytecode or method. The top-of-stack value may be cached in one or more CPU +// registers. The TosState corresponds to the 'machine representation' of this cached +// value. There's 4 states corresponding to the JAVA types int, long, float & double +// as well as a 5th state in case the top-of-stack value is actually on the top +// of stack (in memory) and thus not cached. The atos state corresponds to the itos +// state when it comes to machine representation but is used separately for (oop) +// type specific operations (e.g. verification code). + +enum TosState { // describes the tos cache contents + btos = 0, // byte, bool tos cached + ztos = 1, // byte, bool tos cached + ctos = 2, // char tos cached + stos = 3, // short tos cached + itos = 4, // int tos cached + ltos = 5, // long tos cached + ftos = 6, // float tos cached + dtos = 7, // double tos cached + atos = 8, // object cached + vtos = 9, // tos not cached + number_of_states, + ilgl // illegal state: should not occur +}; + + +inline TosState as_TosState(BasicType type) { + switch (type) { + case T_BYTE : return btos; + case T_BOOLEAN: return ztos; + case T_CHAR : return ctos; + case T_SHORT : return stos; + case T_INT : return itos; + case T_LONG : return ltos; + case T_FLOAT : return ftos; + case T_DOUBLE : return dtos; + case T_VOID : return vtos; + case T_ARRAY : // fall through + case T_OBJECT : return atos; + default : return ilgl; + } +} + +inline BasicType as_BasicType(TosState state) { + switch (state) { + case btos : return T_BYTE; + case ztos : return T_BOOLEAN; + case ctos : return T_CHAR; + case stos : return T_SHORT; + case itos : return T_INT; + case ltos : return T_LONG; + case ftos : return T_FLOAT; + case dtos : return T_DOUBLE; + case atos : return T_OBJECT; + case vtos : return T_VOID; + default : return T_ILLEGAL; + } +} + + +// Helper function to convert BasicType info into TosState +// Note: Cannot define here as it uses global constant at the time being. +TosState as_TosState(BasicType type); + + +// JavaThreadState keeps track of which part of the code a thread is executing in. This +// information is needed by the safepoint code. +// +// There are 4 essential states: +// +// _thread_new : Just started, but not executed init. code yet (most likely still in OS init code) +// _thread_in_native : In native code. This is a safepoint region, since all oops will be in jobject handles +// _thread_in_vm : Executing in the vm +// _thread_in_Java : Executing either interpreted or compiled Java code (or could be in a stub) +// +// Each state has an associated xxxx_trans state, which is an intermediate state used when a thread is in +// a transition from one state to another. These extra states makes it possible for the safepoint code to +// handle certain thread_states without having to suspend the thread - making the safepoint code faster. +// +// Given a state, the xxxx_trans state can always be found by adding 1. +// +enum JavaThreadState { + _thread_uninitialized = 0, // should never happen (missing initialization) + _thread_new = 2, // just starting up, i.e., in process of being initialized + _thread_new_trans = 3, // corresponding transition state (not used, included for completeness) + _thread_in_native = 4, // running in native code + _thread_in_native_trans = 5, // corresponding transition state + _thread_in_vm = 6, // running in VM + _thread_in_vm_trans = 7, // corresponding transition state + _thread_in_Java = 8, // running in Java or in stub code + _thread_in_Java_trans = 9, // corresponding transition state (not used, included for completeness) + _thread_blocked = 10, // blocked in vm + _thread_blocked_trans = 11, // corresponding transition state + _thread_max_state = 12 // maximum thread state+1 - used for statistics allocation +}; + +enum LockingMode { + // Use only heavy monitors for locking + LM_MONITOR = 0, + // Legacy stack-locking, with monitors as 2nd tier + LM_LEGACY = 1, + // New lightweight locking, with monitors as 2nd tier + LM_LIGHTWEIGHT = 2 +}; + +//---------------------------------------------------------------------------------------------------- +// Special constants for debugging + +const jint badInt = -3; // generic "bad int" value +const intptr_t badAddressVal = -2; // generic "bad address" value +const intptr_t badOopVal = -1; // generic "bad oop" value +const intptr_t badHeapOopVal = (intptr_t) CONST64(0x2BAD4B0BBAADBABE); // value used to zap heap after GC +const int badStackSegVal = 0xCA; // value used to zap stack segments +const int badHandleValue = 0xBC; // value used to zap vm handle area +const int badResourceValue = 0xAB; // value used to zap resource area +const int freeBlockPad = 0xBA; // value used to pad freed blocks. +const int uninitBlockPad = 0xF1; // value used to zap newly malloc'd blocks. +const juint uninitMetaWordVal= 0xf7f7f7f7; // value used to zap newly allocated metachunk +const juint badHeapWordVal = 0xBAADBABE; // value used to zap heap after GC +const juint badMetaWordVal = 0xBAADFADE; // value used to zap metadata heap after GC +const int badCodeHeapNewVal= 0xCC; // value used to zap Code heap at allocation +const int badCodeHeapFreeVal = 0xDD; // value used to zap Code heap at deallocation +const intptr_t badDispHeaderDeopt = 0xDE0BD000; // value to fill unused displaced header during deoptimization +const intptr_t badDispHeaderOSR = 0xDEAD05A0; // value to fill unused displaced header during OSR + +// (These must be implemented as #defines because C++ compilers are +// not obligated to inline non-integral constants!) +#define badAddress ((address)::badAddressVal) +#define badHeapWord (::badHeapWordVal) + +// Default TaskQueue size is 16K (32-bit) or 128K (64-bit) +const uint TASKQUEUE_SIZE = (NOT_LP64(1<<14) LP64_ONLY(1<<17)); + +//---------------------------------------------------------------------------------------------------- +// Utility functions for bitfield manipulations + +const intptr_t AllBits = ~0; // all bits set in a word +const intptr_t NoBits = 0; // no bits set in a word +const jlong NoLongBits = 0; // no bits set in a long +const intptr_t OneBit = 1; // only right_most bit set in a word + +// get a word with the n.th or the right-most or left-most n bits set +// (note: #define used only so that they can be used in enum constant definitions) +#define nth_bit(n) (((n) >= BitsPerWord) ? 0 : (OneBit << (n))) +#define right_n_bits(n) (nth_bit(n) - 1) + +// bit-operations using a mask m +inline void set_bits (intptr_t& x, intptr_t m) { x |= m; } +inline void clear_bits (intptr_t& x, intptr_t m) { x &= ~m; } +inline intptr_t mask_bits (intptr_t x, intptr_t m) { return x & m; } +inline jlong mask_long_bits (jlong x, jlong m) { return x & m; } +inline bool mask_bits_are_true (intptr_t flags, intptr_t mask) { return (flags & mask) == mask; } + +// bit-operations using the n.th bit +inline void set_nth_bit(intptr_t& x, int n) { set_bits (x, nth_bit(n)); } +inline void clear_nth_bit(intptr_t& x, int n) { clear_bits(x, nth_bit(n)); } +inline bool is_set_nth_bit(intptr_t x, int n) { return mask_bits (x, nth_bit(n)) != NoBits; } + +// returns the bitfield of x starting at start_bit_no with length field_length (no sign-extension!) +inline intptr_t bitfield(intptr_t x, int start_bit_no, int field_length) { + return mask_bits(x >> start_bit_no, right_n_bits(field_length)); +} + + +//---------------------------------------------------------------------------------------------------- +// Utility functions for integers + +// Avoid use of global min/max macros which may cause unwanted double +// evaluation of arguments. +#ifdef max +#undef max +#endif + +#ifdef min +#undef min +#endif +#endif // !NATIVE_IMAGE + +// It is necessary to use templates here. Having normal overloaded +// functions does not work because it is necessary to provide both 32- +// and 64-bit overloaded functions, which does not work, and having +// explicitly-typed versions of these routines (i.e., MAX2I, MAX2L) +// will be even more error-prone than macros. +template constexpr T MAX2(T a, T b) { return (a > b) ? a : b; } +template constexpr T MIN2(T a, T b) { return (a < b) ? a : b; } +#ifndef NATIVE_IMAGE +template constexpr T MAX3(T a, T b, T c) { return MAX2(MAX2(a, b), c); } +template constexpr T MIN3(T a, T b, T c) { return MIN2(MIN2(a, b), c); } +template constexpr T MAX4(T a, T b, T c, T d) { return MAX2(MAX3(a, b, c), d); } +template constexpr T MIN4(T a, T b, T c, T d) { return MIN2(MIN3(a, b, c), d); } + +template inline T ABS(T x) { return (x > 0) ? x : -x; } + +// Return the given value clamped to the range [min ... max] +template +inline T clamp(T value, T min, T max) { + assert(min <= max, "must be"); + return MIN2(MAX2(value, min), max); +} + +inline bool is_odd (intx x) { return x & 1; } +inline bool is_even(intx x) { return !is_odd(x); } + +// abs methods which cannot overflow and so are well-defined across +// the entire domain of integer types. +static inline unsigned int uabs(unsigned int n) { + union { + unsigned int result; + int value; + }; + result = n; + if (value < 0) result = 0-result; + return result; +} +static inline julong uabs(julong n) { + union { + julong result; + jlong value; + }; + result = n; + if (value < 0) result = 0-result; + return result; +} +static inline julong uabs(jlong n) { return uabs((julong)n); } +static inline unsigned int uabs(int n) { return uabs((unsigned int)n); } + +// "to" should be greater than "from." +inline size_t byte_size(void* from, void* to) { + return pointer_delta(to, from, sizeof(char)); +} + +// Pack and extract shorts to/from ints: + +inline u2 extract_low_short_from_int(u4 x) { + return u2(x & 0xffff); +} + +inline u2 extract_high_short_from_int(u4 x) { + return u2((x >> 16) & 0xffff); +} + +inline int build_int_from_shorts( u2 low, u2 high ) { + return ((int)((unsigned int)high << 16) | (unsigned int)low); +} + +// swap a & b +template static void swap(T& a, T& b) { + T tmp = a; + a = b; + b = tmp; +} + +// array_size_impl is a function that takes a reference to T[N] and +// returns a reference to char[N]. It is not ODR-used, so not defined. +template char (&array_size_impl(T (&)[N]))[N]; + +#define ARRAY_SIZE(array) sizeof(array_size_impl(array)) + +//---------------------------------------------------------------------------------------------------- +// Sum and product which can never overflow: they wrap, just like the +// Java operations. Note that we don't intend these to be used for +// general-purpose arithmetic: their purpose is to emulate Java +// operations. + +// The goal of this code to avoid undefined or implementation-defined +// behavior. The use of an lvalue to reference cast is explicitly +// permitted by Lvalues and rvalues [basic.lval]. [Section 3.10 Para +// 15 in C++03] +#define JAVA_INTEGER_OP(OP, NAME, TYPE, UNSIGNED_TYPE) \ +inline TYPE NAME (TYPE in1, TYPE in2) { \ + UNSIGNED_TYPE ures = static_cast(in1); \ + ures OP ## = static_cast(in2); \ + return reinterpret_cast(ures); \ +} + +JAVA_INTEGER_OP(+, java_add, jint, juint) +JAVA_INTEGER_OP(-, java_subtract, jint, juint) +JAVA_INTEGER_OP(*, java_multiply, jint, juint) +JAVA_INTEGER_OP(+, java_add, jlong, julong) +JAVA_INTEGER_OP(-, java_subtract, jlong, julong) +JAVA_INTEGER_OP(*, java_multiply, jlong, julong) + +inline jint java_negate(jint v) { return java_subtract((jint) 0, v); } +inline jlong java_negate(jlong v) { return java_subtract((jlong)0, v); } + +#undef JAVA_INTEGER_OP + +// Provide integer shift operations with Java semantics. No overflow +// issues - left shifts simply discard shifted out bits. No undefined +// behavior for large or negative shift quantities; instead the actual +// shift distance is the argument modulo the lhs value's size in bits. +// No undefined or implementation defined behavior for shifting negative +// values; left shift discards bits, right shift sign extends. We use +// the same safe conversion technique as above for java_add and friends. +#define JAVA_INTEGER_SHIFT_OP(OP, NAME, TYPE, XTYPE) \ +inline TYPE NAME (TYPE lhs, jint rhs) { \ + const uint rhs_mask = (sizeof(TYPE) * 8) - 1; \ + STATIC_ASSERT(rhs_mask == 31 || rhs_mask == 63); \ + XTYPE xres = static_cast(lhs); \ + xres OP ## = (rhs & rhs_mask); \ + return reinterpret_cast(xres); \ +} + +JAVA_INTEGER_SHIFT_OP(<<, java_shift_left, jint, juint) +JAVA_INTEGER_SHIFT_OP(<<, java_shift_left, jlong, julong) + +// For signed shift right, assume C++ implementation >> sign extends. +// +// C++14 5.8/3: In the description of "E1 >> E2" it says "If E1 has a signed type +// and a negative value, the resulting value is implementation-defined." +// +// However, C++20 7.6.7/3 further defines integral arithmetic, as part of +// requiring two's-complement behavior. +// https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2018/p0907r3.html +// https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2018/p1236r1.html +// The corresponding C++20 text is "Right-shift on signed integral types is an +// arithmetic right shift, which performs sign-extension." +// +// As discussed in the two's complement proposal, all known modern C++ compilers +// already behave that way. And it is unlikely any would go off and do something +// different now, with C++20 tightening things up. +JAVA_INTEGER_SHIFT_OP(>>, java_shift_right, jint, jint) +JAVA_INTEGER_SHIFT_OP(>>, java_shift_right, jlong, jlong) +// For >>> use C++ unsigned >>. +JAVA_INTEGER_SHIFT_OP(>>, java_shift_right_unsigned, jint, juint) +JAVA_INTEGER_SHIFT_OP(>>, java_shift_right_unsigned, jlong, julong) + +#undef JAVA_INTEGER_SHIFT_OP + +//---------------------------------------------------------------------------------------------------- +// The goal of this code is to provide saturating operations for int/uint. +// Checks overflow conditions and saturates the result to min_jint/max_jint. +#define SATURATED_INTEGER_OP(OP, NAME, TYPE1, TYPE2) \ +inline int NAME (TYPE1 in1, TYPE2 in2) { \ + jlong res = static_cast(in1); \ + res OP ## = static_cast(in2); \ + if (res > max_jint) { \ + res = max_jint; \ + } else if (res < min_jint) { \ + res = min_jint; \ + } \ + return static_cast(res); \ +} + +SATURATED_INTEGER_OP(+, saturated_add, int, int) +SATURATED_INTEGER_OP(+, saturated_add, int, uint) +SATURATED_INTEGER_OP(+, saturated_add, uint, int) +SATURATED_INTEGER_OP(+, saturated_add, uint, uint) + +#undef SATURATED_INTEGER_OP + +// Taken from rom section 8-2 of Henry S. Warren, Jr., Hacker's Delight (2nd ed.) (Addison Wesley, 2013), 173-174. +inline uint64_t multiply_high_unsigned(const uint64_t x, const uint64_t y) { + const uint64_t x1 = x >> 32u; + const uint64_t x2 = x & 0xFFFFFFFF; + const uint64_t y1 = y >> 32u; + const uint64_t y2 = y & 0xFFFFFFFF; + const uint64_t z2 = x2 * y2; + const uint64_t t = x1 * y2 + (z2 >> 32u); + uint64_t z1 = t & 0xFFFFFFFF; + const uint64_t z0 = t >> 32u; + z1 += x2 * y1; + + return x1 * y1 + z0 + (z1 >> 32u); +} + +// Taken from java.lang.Math::multiplyHigh which uses the technique from section 8-2 of Henry S. Warren, Jr., +// Hacker's Delight (2nd ed.) (Addison Wesley, 2013), 173-174 but adapted for signed longs. +inline int64_t multiply_high_signed(const int64_t x, const int64_t y) { + const jlong x1 = java_shift_right((jlong)x, 32); + const jlong x2 = x & 0xFFFFFFFF; + const jlong y1 = java_shift_right((jlong)y, 32); + const jlong y2 = y & 0xFFFFFFFF; + + const uint64_t z2 = (uint64_t)x2 * y2; + const int64_t t = x1 * y2 + (z2 >> 32u); // Unsigned shift + int64_t z1 = t & 0xFFFFFFFF; + const int64_t z0 = java_shift_right((jlong)t, 32); + z1 += x2 * y1; + + return x1 * y1 + z0 + java_shift_right((jlong)z1, 32); +} + +// Dereference vptr +// All C++ compilers that we know of have the vtbl pointer in the first +// word. If there are exceptions, this function needs to be made compiler +// specific. +static inline void* dereference_vptr(const void* addr) { + return *(void**)addr; +} + +//---------------------------------------------------------------------------------------------------- +// String type aliases used by command line flag declarations and +// processing utilities. + +typedef const char* ccstr; +typedef const char* ccstrlist; // represents string arguments which accumulate + +//---------------------------------------------------------------------------------------------------- +// Default hash/equals functions used by ResourceHashtable + +template unsigned primitive_hash(const K& k) { + unsigned hash = (unsigned)((uintptr_t)k); + return hash ^ (hash >> 3); // just in case we're dealing with aligned ptrs +} + +template bool primitive_equals(const K& k0, const K& k1) { + return k0 == k1; +} + +template int primitive_compare(const K& k0, const K& k1) { + return ((k0 < k1) ? -1 : (k0 == k1) ? 0 : 1); +} + +//---------------------------------------------------------------------------------------------------- + +// Allow use of C++ thread_local when approved - see JDK-8282469. +#define APPROVED_CPP_THREAD_LOCAL thread_local + +// Converts any type T to a reference type. +template +std::add_rvalue_reference_t declval() noexcept; + +// Quickly test to make sure IEEE-754 subnormal numbers are correctly +// handled. +bool IEEE_subnormal_handling_OK(); +#endif // !NATIVE_IMAGE + +#endif // SHARE_UTILITIES_GLOBALDEFINITIONS_HPP diff --git a/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/share/utilities/globalDefinitions_gcc.hpp b/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/share/utilities/globalDefinitions_gcc.hpp new file mode 100644 index 000000000000..9132b3523863 --- /dev/null +++ b/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/share/utilities/globalDefinitions_gcc.hpp @@ -0,0 +1,164 @@ +/* + * Copyright (c) 1998, 2022, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_UTILITIES_GLOBALDEFINITIONS_GCC_HPP +#define SHARE_UTILITIES_GLOBALDEFINITIONS_GCC_HPP + +#include "jni.h" + +// This file holds compiler-dependent includes, +// globally used constants & types, class (forward) +// declarations and a few frequently used utility functions. + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include + +#if defined(LINUX) || defined(_ALLBSD_SOURCE) +#include +#include +#ifndef __OpenBSD__ +#include +#endif +#ifdef __APPLE__ + #include + #include +#endif +#include +#endif // LINUX || _ALLBSD_SOURCE + +// NULL vs NULL_WORD: +// On Linux NULL is defined as a special type '__null'. Assigning __null to +// integer variable will cause gcc warning. Use NULL_WORD in places where a +// pointer is stored as integer value. On some platforms, sizeof(intptr_t) > +// sizeof(void*), so here we want something which is integer type, but has the +// same size as a pointer. +#ifdef __GNUC__ + #ifdef _LP64 + #define NULL_WORD 0L + #else + // Cast 0 to intptr_t rather than int32_t since they are not the same type + // on platforms such as Mac OS X. + #define NULL_WORD ((intptr_t)0) + #endif +#else + #define NULL_WORD NULL +#endif + +#if !defined(LINUX) && !defined(_ALLBSD_SOURCE) +// Compiler-specific primitive types +typedef unsigned short uint16_t; +#ifndef _UINT32_T +#define _UINT32_T +typedef unsigned int uint32_t; +#endif // _UINT32_T + +#if !defined(_SYS_INT_TYPES_H) +#ifndef _UINT64_T +#define _UINT64_T +typedef unsigned long long uint64_t; +#endif // _UINT64_T +// %%%% how to access definition of intptr_t portably in 5.5 onward? +typedef int intptr_t; +typedef unsigned int uintptr_t; +// If this gets an error, figure out a symbol XXX that implies the +// prior definition of intptr_t, and add "&& !defined(XXX)" above. +#endif // _SYS_INT_TYPES_H + +#endif // !LINUX && !_ALLBSD_SOURCE + +// checking for nanness +#if defined(__APPLE__) +inline int g_isnan(double f) { return isnan(f); } +#elif defined(LINUX) || defined(_ALLBSD_SOURCE) +inline int g_isnan(float f) { return isnan(f); } +inline int g_isnan(double f) { return isnan(f); } +#else +#error "missing platform-specific definition here" +#endif + +#define CAN_USE_NAN_DEFINE 1 + + +// Checking for finiteness + +inline int g_isfinite(jfloat f) { return isfinite(f); } +inline int g_isfinite(jdouble f) { return isfinite(f); } + + +// Formatting. +#ifdef _LP64 +# ifdef __APPLE__ +# define FORMAT64_MODIFIER "ll" +# else +# define FORMAT64_MODIFIER "l" +# endif +#else // !_LP64 +#define FORMAT64_MODIFIER "ll" +#endif // _LP64 + +// gcc warns about applying offsetof() to non-POD object or calculating +// offset directly when base address is null. The -Wno-invalid-offsetof +// option could be used to suppress this warning, but we instead just +// avoid the use of offsetof(). +// +// FIXME: This macro is complex and rather arcane. Perhaps we should +// use offsetof() instead, with the invalid-offsetof warning +// temporarily disabled. +#define offset_of(klass,field) \ +([]() { \ + alignas(16) char space[sizeof (klass)]; \ + klass* dummyObj = (klass*)space; \ + char* c = (char*)(void*)&dummyObj->field; \ + return (size_t)(c - space); \ +}()) + + +#if defined(_LP64) && defined(__APPLE__) +#define JLONG_FORMAT "%ld" +#define JLONG_FORMAT_W(width) "%" #width "ld" +#endif // _LP64 && __APPLE__ + +#define THREAD_LOCAL __thread + +// Inlining support +#define NOINLINE __attribute__ ((noinline)) +#define ALWAYSINLINE inline __attribute__ ((always_inline)) +#define ATTRIBUTE_FLATTEN __attribute__ ((flatten)) + +#endif // SHARE_UTILITIES_GLOBALDEFINITIONS_GCC_HPP diff --git a/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/share/utilities/macros.hpp b/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/share/utilities/macros.hpp new file mode 100644 index 000000000000..9e027976938f --- /dev/null +++ b/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/share/utilities/macros.hpp @@ -0,0 +1,639 @@ +/* + * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_UTILITIES_MACROS_HPP +#define SHARE_UTILITIES_MACROS_HPP + +// Use this to mark code that needs to be cleaned up (for development only) +#define NEEDS_CLEANUP + +// Makes a string of the argument (which is not macro-expanded) +#define STR(a) #a + +// Makes a string of the macro expansion of a +#define XSTR(a) STR(a) + +// Allow commas in macro arguments. +#define COMMA , + +// Apply pre-processor token pasting to the expansions of x and y. +// The token pasting operator (##) prevents its arguments from being +// expanded. This macro allows expansion of its arguments before the +// concatenation is performed. Note: One auxiliary level ought to be +// sufficient, but two are used because of bugs in some preprocesors. +#define PASTE_TOKENS(x, y) PASTE_TOKENS_AUX(x, y) +#define PASTE_TOKENS_AUX(x, y) PASTE_TOKENS_AUX2(x, y) +#define PASTE_TOKENS_AUX2(x, y) x ## y + +// Convenience macro that produces a string literal with the filename +// and linenumber of the location where the macro was used. +#ifndef FILE_AND_LINE +#define FILE_AND_LINE __FILE__ ":" XSTR(__LINE__) +#endif + +// -DINCLUDE_=0 | 1 can be specified on the command line to include +// or exclude functionality. + +#ifndef FILE_AND_LINE +#define FILE_AND_LINE __FILE__ ":" XSTR(__LINE__) +#endif + +#ifndef INCLUDE_JVMTI +#define INCLUDE_JVMTI 1 +#endif // INCLUDE_JVMTI + +#if INCLUDE_JVMTI +#define JVMTI_ONLY(x) x +#define NOT_JVMTI(x) +#define NOT_JVMTI_RETURN +#define NOT_JVMTI_RETURN_(code) /* next token must be ; */ +#else +#define JVMTI_ONLY(x) +#define NOT_JVMTI(x) x +#define NOT_JVMTI_RETURN { return; } +#define NOT_JVMTI_RETURN_(code) { return code; } +#endif // INCLUDE_JVMTI + +#ifndef INCLUDE_VM_STRUCTS +#define INCLUDE_VM_STRUCTS 1 +#endif + +#if INCLUDE_VM_STRUCTS +#define NOT_VM_STRUCTS_RETURN /* next token must be ; */ +#define NOT_VM_STRUCTS_RETURN_(code) /* next token must be ; */ +#else +#define NOT_VM_STRUCTS_RETURN {} +#define NOT_VM_STRUCTS_RETURN_(code) { return code; } +#endif // INCLUDE_VM_STRUCTS + +#ifndef INCLUDE_JNI_CHECK +#define INCLUDE_JNI_CHECK 1 +#endif + +#if INCLUDE_JNI_CHECK +#define NOT_JNI_CHECK_RETURN /* next token must be ; */ +#define NOT_JNI_CHECK_RETURN_(code) /* next token must be ; */ +#else +#define NOT_JNI_CHECK_RETURN {} +#define NOT_JNI_CHECK_RETURN_(code) { return code; } +#endif // INCLUDE_JNI_CHECK + +#ifndef INCLUDE_SERVICES +#define INCLUDE_SERVICES 1 +#endif + +#if INCLUDE_SERVICES +#define NOT_SERVICES_RETURN /* next token must be ; */ +#define NOT_SERVICES_RETURN_(code) /* next token must be ; */ +#else +#define NOT_SERVICES_RETURN {} +#define NOT_SERVICES_RETURN_(code) { return code; } +#endif // INCLUDE_SERVICES + +#ifndef INCLUDE_CDS +#define INCLUDE_CDS 1 +#endif + +#if INCLUDE_CDS +#define CDS_ONLY(x) x +#define NOT_CDS(x) +#define NOT_CDS_RETURN /* next token must be ; */ +#define NOT_CDS_RETURN0 /* next token must be ; */ +#define NOT_CDS_RETURN_(code) /* next token must be ; */ +#else +#define CDS_ONLY(x) +#define NOT_CDS(x) x +#define NOT_CDS_RETURN {} +#define NOT_CDS_RETURN0 { return 0; } +#define NOT_CDS_RETURN_(code) { return code; } +#endif // INCLUDE_CDS + +#ifndef INCLUDE_MANAGEMENT +#define INCLUDE_MANAGEMENT 1 +#endif // INCLUDE_MANAGEMENT + +#if INCLUDE_MANAGEMENT +#define NOT_MANAGEMENT_RETURN /* next token must be ; */ +#define NOT_MANAGEMENT_RETURN_(code) /* next token must be ; */ +#define MANAGEMENT_ONLY(x) x +#else +#define NOT_MANAGEMENT_RETURN {} +#define NOT_MANAGEMENT_RETURN_(code) { return code; } +#define MANAGEMENT_ONLY(x) +#endif // INCLUDE_MANAGEMENT + +#ifndef INCLUDE_EPSILONGC +#define INCLUDE_EPSILONGC 1 +#endif // INCLUDE_EPSILONGC + +#if INCLUDE_EPSILONGC +#define EPSILONGC_ONLY(x) x +#define EPSILONGC_ONLY_ARG(arg) arg, +#define NOT_EPSILONGC(x) +#define NOT_EPSILONGC_RETURN /* next token must be ; */ +#define NOT_EPSILONGC_RETURN_(code) /* next token must be ; */ +#else +#define EPSILONGC_ONLY(x) +#define EPSILONGC_ONLY_ARG(arg) +#define NOT_EPSILONGC(x) x +#define NOT_EPSILONGC_RETURN {} +#define NOT_EPSILONGC_RETURN_(code) { return code; } +#endif // INCLUDE_EPSILONGC + +#ifndef INCLUDE_G1GC +#define INCLUDE_G1GC 1 +#endif // INCLUDE_G1GC + +#if INCLUDE_G1GC +#define G1GC_ONLY(x) x +#define G1GC_ONLY_ARG(arg) arg, +#define NOT_G1GC(x) +#define NOT_G1GC_RETURN /* next token must be ; */ +#define NOT_G1GC_RETURN_(code) /* next token must be ; */ +#else +#define G1GC_ONLY(x) +#define G1GC_ONLY_ARG(arg) +#define NOT_G1GC(x) x +#define NOT_G1GC_RETURN {} +#define NOT_G1GC_RETURN_(code) { return code; } +#endif // INCLUDE_G1GC + +#ifndef INCLUDE_PARALLELGC +#define INCLUDE_PARALLELGC 1 +#endif // INCLUDE_PARALLELGC + +#if INCLUDE_PARALLELGC +#define PARALLELGC_ONLY(x) x +#define PARALLELGC_ONLY_ARG(arg) arg, +#define NOT_PARALLELGC(x) +#define NOT_PARALLELGC_RETURN /* next token must be ; */ +#define NOT_PARALLELGC_RETURN_(code) /* next token must be ; */ +#else +#define PARALLELGC_ONLY(x) +#define PARALLELGC_ONLY_ARG(arg) +#define NOT_PARALLELGC(x) x +#define NOT_PARALLELGC_RETURN {} +#define NOT_PARALLELGC_RETURN_(code) { return code; } +#endif // INCLUDE_PARALLELGC + +#ifndef INCLUDE_SERIALGC +#define INCLUDE_SERIALGC 1 +#endif // INCLUDE_SERIALGC + +#if INCLUDE_SERIALGC +#define SERIALGC_ONLY(x) x +#define SERIALGC_ONLY_ARG(arg) arg, +#define NOT_SERIALGC(x) +#define NOT_SERIALGC_RETURN /* next token must be ; */ +#define NOT_SERIALGC_RETURN_(code) /* next token must be ; */ +#else +#define SERIALGC_ONLY(x) +#define SERIALGC_ONLY_ARG(arg) +#define NOT_SERIALGC(x) x +#define NOT_SERIALGC_RETURN {} +#define NOT_SERIALGC_RETURN_(code) { return code; } +#endif // INCLUDE_SERIALGC + +#ifndef INCLUDE_SHENANDOAHGC +#define INCLUDE_SHENANDOAHGC 1 +#endif // INCLUDE_SHENANDOAHGC + +#if INCLUDE_SHENANDOAHGC +#define SHENANDOAHGC_ONLY(x) x +#define SHENANDOAHGC_ONLY_ARG(arg) arg, +#define NOT_SHENANDOAHGC(x) +#define NOT_SHENANDOAHGC_RETURN /* next token must be ; */ +#define NOT_SHENANDOAHGC_RETURN_(code) /* next token must be ; */ +#else +#define SHENANDOAHGC_ONLY(x) +#define SHENANDOAHGC_ONLY_ARG(arg) +#define NOT_SHENANDOAHGC(x) x +#define NOT_SHENANDOAHGC_RETURN {} +#define NOT_SHENANDOAHGC_RETURN_(code) { return code; } +#endif // INCLUDE_SHENANDOAHGC + +#ifndef INCLUDE_ZGC +#define INCLUDE_ZGC 1 +#endif // INCLUDE_ZGC + +#if INCLUDE_ZGC +#define ZGC_ONLY(x) x +#define ZGC_ONLY_ARG(arg) arg, +#define NOT_ZGC(x) +#define NOT_ZGC_RETURN /* next token must be ; */ +#define NOT_ZGC_RETURN_(code) /* next token must be ; */ +#else +#define ZGC_ONLY(x) +#define ZGC_ONLY_ARG(arg) +#define NOT_ZGC(x) x +#define NOT_ZGC_RETURN {} +#define NOT_ZGC_RETURN_(code) { return code; } +#endif // INCLUDE_ZGC + +#ifndef INCLUDE_JFR +#define INCLUDE_JFR 1 +#endif + +#if INCLUDE_JFR +#define JFR_ONLY(code) code +#define NOT_JFR_RETURN() /* next token must be ; */ +#define NOT_JFR_RETURN_(code) /* next token must be ; */ +#else +#define JFR_ONLY(code) +#define NOT_JFR_RETURN() {} +#define NOT_JFR_RETURN_(code) { return code; } +#endif + +#ifndef INCLUDE_JVMCI +#define INCLUDE_JVMCI 1 +#endif + +#if INCLUDE_JVMCI +#define JVMCI_ONLY(code) code +#define NOT_JVMCI(code) +#define NOT_JVMCI_RETURN /* next token must be ; */ +#else +#define JVMCI_ONLY(code) +#define NOT_JVMCI(code) code +#define NOT_JVMCI_RETURN {} +#endif // INCLUDE_JVMCI + +// COMPILER1 variant +#ifdef COMPILER1 +#define COMPILER1_PRESENT(code) code +#define NOT_COMPILER1(code) +#else // COMPILER1 +#define COMPILER1_PRESENT(code) +#define NOT_COMPILER1(code) code +#endif // COMPILER1 + +// COMPILER2 variant +#ifdef COMPILER2 +#define COMPILER2_PRESENT(code) code +#define NOT_COMPILER2(code) +#else // COMPILER2 +#define COMPILER2_PRESENT(code) +#define NOT_COMPILER2(code) code +#endif // COMPILER2 + +// COMPILER2 or JVMCI +#if defined(COMPILER2) || INCLUDE_JVMCI +#define COMPILER2_OR_JVMCI 1 +#define COMPILER2_OR_JVMCI_PRESENT(code) code +#define NOT_COMPILER2_OR_JVMCI(code) +#define NOT_COMPILER2_OR_JVMCI_RETURN /* next token must be ; */ +#define NOT_COMPILER2_OR_JVMCI_RETURN_(code) /* next token must be ; */ +#else +#define COMPILER2_OR_JVMCI 0 +#define COMPILER2_OR_JVMCI_PRESENT(code) +#define NOT_COMPILER2_OR_JVMCI(code) code +#define NOT_COMPILER2_OR_JVMCI_RETURN {} +#define NOT_COMPILER2_OR_JVMCI_RETURN_(code) { return code; } +#endif + +// COMPILER1 and COMPILER2 +#if defined(COMPILER1) && defined(COMPILER2) +#define COMPILER1_AND_COMPILER2 1 +#define COMPILER1_AND_COMPILER2_PRESENT(code) code +#else +#define COMPILER1_AND_COMPILER2 0 +#define COMPILER1_AND_COMPILER2_PRESENT(code) +#endif + +// COMPILER1 or COMPILER2 +#if defined(COMPILER1) || defined(COMPILER2) +#define COMPILER1_OR_COMPILER2 1 +#define COMPILER1_OR_COMPILER2_PRESENT(code) code +#else +#define COMPILER1_OR_COMPILER2 0 +#define COMPILER1_OR_COMPILER2_PRESENT(code) +#endif + + +// PRODUCT variant +#ifdef PRODUCT +#define PRODUCT_ONLY(code) code +#define NOT_PRODUCT(code) +#define NOT_PRODUCT_ARG(arg) +#define PRODUCT_RETURN {} +#define PRODUCT_RETURN0 { return 0; } +#define PRODUCT_RETURN_(code) { code } +#else // PRODUCT +#define PRODUCT_ONLY(code) +#define NOT_PRODUCT(code) code +#define NOT_PRODUCT_ARG(arg) arg, +#define PRODUCT_RETURN /*next token must be ;*/ +#define PRODUCT_RETURN0 /*next token must be ;*/ +#define PRODUCT_RETURN_(code) /*next token must be ;*/ +#endif // PRODUCT + +#ifdef CHECK_UNHANDLED_OOPS +#define CHECK_UNHANDLED_OOPS_ONLY(code) code +#define NOT_CHECK_UNHANDLED_OOPS(code) +#else +#define CHECK_UNHANDLED_OOPS_ONLY(code) +#define NOT_CHECK_UNHANDLED_OOPS(code) code +#endif // CHECK_UNHANDLED_OOPS + +#ifdef ASSERT +#define DEBUG_ONLY(code) code +#define NOT_DEBUG(code) +#define NOT_DEBUG_RETURN /*next token must be ;*/ +// Historical. +#define debug_only(code) code +#else // ASSERT +#define DEBUG_ONLY(code) +#define NOT_DEBUG(code) code +#define NOT_DEBUG_RETURN {} +#define debug_only(code) +#endif // ASSERT + +#ifdef _LP64 +#define LP64_ONLY(code) code +#define NOT_LP64(code) +#else // !_LP64 +#define LP64_ONLY(code) +#define NOT_LP64(code) code +#endif // _LP64 + +#ifdef LINUX +#define LINUX_ONLY(code) code +#define NOT_LINUX(code) +#else +#define LINUX_ONLY(code) +#define NOT_LINUX(code) code +#endif + +#ifdef __APPLE__ +#define MACOS_ONLY(code) code +#define NOT_MACOS(code) +#else +#define MACOS_ONLY(code) +#define NOT_MACOS(code) code +#endif + +#ifdef AIX +#define AIX_ONLY(code) code +#define NOT_AIX(code) +#else +#define AIX_ONLY(code) +#define NOT_AIX(code) code +#endif + +#ifdef _WINDOWS +#define WINDOWS_ONLY(code) code +#define NOT_WINDOWS(code) +#else +#define WINDOWS_ONLY(code) +#define NOT_WINDOWS(code) code +#endif + +#if defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__) || defined(__APPLE__) +#ifndef BSD +#define BSD +#endif // BSD defined in +#define BSD_ONLY(code) code +#define NOT_BSD(code) +#else +#define BSD_ONLY(code) +#define NOT_BSD(code) code +#endif + +#ifdef _WIN64 +#define WIN64_ONLY(code) code +#define NOT_WIN64(code) +#else +#define WIN64_ONLY(code) +#define NOT_WIN64(code) code +#endif + +#if defined(ZERO) +#define ZERO_ONLY(code) code +#define NOT_ZERO(code) +#define NOT_ZERO_RETURN {} +#else +#define ZERO_ONLY(code) +#define NOT_ZERO(code) code +#define NOT_ZERO_RETURN +#endif + +#if defined(IA32) || defined(AMD64) +#define X86 +#define X86_ONLY(code) code +#define NOT_X86(code) +#else +#undef X86 +#define X86_ONLY(code) +#define NOT_X86(code) code +#endif + +#ifdef IA32 +#define IA32_ONLY(code) code +#define NOT_IA32(code) +#else +#define IA32_ONLY(code) +#define NOT_IA32(code) code +#endif + +// This is a REALLY BIG HACK, but on AIX unconditionally defines IA64. +// At least on AIX 7.1 this is a real problem because 'systemcfg.h' is indirectly included +// by 'pthread.h' and other common system headers. + +#if defined(IA64) && !defined(AIX) +#define IA64_ONLY(code) code +#define NOT_IA64(code) +#else +#define IA64_ONLY(code) +#define NOT_IA64(code) code +#endif + +#ifdef AMD64 +#define AMD64_ONLY(code) code +#define NOT_AMD64(code) +#else +#define AMD64_ONLY(code) +#define NOT_AMD64(code) code +#endif + +#ifdef S390 +#define S390_ONLY(code) code +#define NOT_S390(code) +#else +#define S390_ONLY(code) +#define NOT_S390(code) code +#endif + +#if defined(PPC32) || defined(PPC64) +#ifndef PPC +#define PPC +#endif +#define PPC_ONLY(code) code +#define NOT_PPC(code) +#else +#undef PPC +#define PPC_ONLY(code) +#define NOT_PPC(code) code +#endif + +#ifdef PPC32 +#define PPC32_ONLY(code) code +#define NOT_PPC32(code) +#else +#define PPC32_ONLY(code) +#define NOT_PPC32(code) code +#endif + +#ifdef PPC64 +#define PPC64_ONLY(code) code +#define NOT_PPC64(code) +#else +#define PPC64_ONLY(code) +#define NOT_PPC64(code) code +#endif + +#ifdef E500V2 +#define E500V2_ONLY(code) code +#define NOT_E500V2(code) +#else +#define E500V2_ONLY(code) +#define NOT_E500V2(code) code +#endif + +// Note: There are two ARM ports. They set the following in the makefiles: +// 1. 32-bit port: -DARM -DARM32 -DTARGET_ARCH_arm +// 2. 64-bit port: -DAARCH64 -D_LP64 -DTARGET_ARCH_aarch64 +#ifdef ARM +#define ARM_ONLY(code) code +#define NOT_ARM(code) +#else +#define ARM_ONLY(code) +#define NOT_ARM(code) code +#endif + +#ifdef ARM32 +#define ARM32_ONLY(code) code +#define NOT_ARM32(code) +#else +#define ARM32_ONLY(code) +#define NOT_ARM32(code) code +#endif + +#ifdef AARCH64 +#define AARCH64_ONLY(code) code +#define NOT_AARCH64(code) +#else +#define AARCH64_ONLY(code) +#define NOT_AARCH64(code) code +#endif + +#ifdef TARGET_ARCH_aarch64 +#define AARCH64_PORT_ONLY(code) code +#else +#define AARCH64_PORT_ONLY(code) +#endif + +#define MACOS_AARCH64_ONLY(x) MACOS_ONLY(AARCH64_ONLY(x)) + +#if defined(RISCV32) || defined(RISCV64) +#define RISCV +#define RISCV_ONLY(code) code +#define NOT_RISCV(code) +#else +#undef RISCV +#define RISCV_ONLY(code) +#define NOT_RISCV(code) code +#endif + +#ifdef RISCV32 +#define RISCV32_ONLY(code) code +#define NOT_RISCV32(code) +#else +#define RISCV32_ONLY(code) +#define NOT_RISCV32(code) code +#endif + +#ifdef RISCV64 +#define RISCV64_ONLY(code) code +#define NOT_RISCV64(code) +#else +#define RISCV64_ONLY(code) +#define NOT_RISCV64(code) code +#endif + +#ifdef VM_LITTLE_ENDIAN +#define LITTLE_ENDIAN_ONLY(code) code +#define BIG_ENDIAN_ONLY(code) +#else +#define LITTLE_ENDIAN_ONLY(code) +#define BIG_ENDIAN_ONLY(code) code +#endif + +#define define_pd_global(type, name, value) const type pd_##name = value; + +// Helper macros for constructing file names for includes. +#define CPU_HEADER_STEM(basename) PASTE_TOKENS(basename, INCLUDE_SUFFIX_CPU) +#define OS_HEADER_STEM(basename) PASTE_TOKENS(basename, INCLUDE_SUFFIX_OS) +#define OS_CPU_HEADER_STEM(basename) PASTE_TOKENS(basename, PASTE_TOKENS(INCLUDE_SUFFIX_OS, INCLUDE_SUFFIX_CPU)) +#define COMPILER_HEADER_STEM(basename) PASTE_TOKENS(basename, INCLUDE_SUFFIX_COMPILER) + +// Include platform dependent files. +// +// This macro constructs from basename and INCLUDE_SUFFIX_OS / +// INCLUDE_SUFFIX_CPU / INCLUDE_SUFFIX_COMPILER, which are set on +// the command line, the name of platform dependent files to be included. +// Example: INCLUDE_SUFFIX_OS=_linux / INCLUDE_SUFFIX_CPU=_x86 +// CPU_HEADER_INLINE(macroAssembler) --> macroAssembler_x86.inline.hpp +// OS_CPU_HEADER(vmStructs) --> vmStructs_linux_x86.hpp +// +// basename.hpp / basename.inline.hpp +#define CPU_HEADER_H(basename) XSTR(CPU_HEADER_STEM(basename).h) +#define CPU_HEADER(basename) XSTR(CPU_HEADER_STEM(basename).hpp) +#define CPU_HEADER_INLINE(basename) XSTR(CPU_HEADER_STEM(basename).inline.hpp) +// basename.hpp / basename.inline.hpp +#define OS_HEADER_H(basename) XSTR(OS_HEADER_STEM(basename).h) +#define OS_HEADER(basename) XSTR(OS_HEADER_STEM(basename).hpp) +#define OS_HEADER_INLINE(basename) XSTR(OS_HEADER_STEM(basename).inline.hpp) +// basename.hpp / basename.inline.hpp +#define OS_CPU_HEADER(basename) XSTR(OS_CPU_HEADER_STEM(basename).hpp) +#define OS_CPU_HEADER_INLINE(basename) XSTR(OS_CPU_HEADER_STEM(basename).inline.hpp) +// basename.hpp / basename.inline.hpp +#define COMPILER_HEADER(basename) XSTR(COMPILER_HEADER_STEM(basename).hpp) +#define COMPILER_HEADER_INLINE(basename) XSTR(COMPILER_HEADER_STEM(basename).inline.hpp) + +#if INCLUDE_CDS && INCLUDE_G1GC && defined(_LP64) && !defined(_WINDOWS) +#define INCLUDE_CDS_JAVA_HEAP 1 +#define CDS_JAVA_HEAP_ONLY(x) x +#define NOT_CDS_JAVA_HEAP(x) +#define NOT_CDS_JAVA_HEAP_RETURN +#define NOT_CDS_JAVA_HEAP_RETURN_(code) +#else +#define INCLUDE_CDS_JAVA_HEAP 0 +#define CDS_JAVA_HEAP_ONLY(x) +#define NOT_CDS_JAVA_HEAP(x) x +#define NOT_CDS_JAVA_HEAP_RETURN {} +#define NOT_CDS_JAVA_HEAP_RETURN_(code) { return code; } +#endif + +#endif // SHARE_UTILITIES_MACROS_HPP diff --git a/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/share/utilities/ostream.cpp b/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/share/utilities/ostream.cpp new file mode 100644 index 000000000000..7598cdccc705 --- /dev/null +++ b/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/share/utilities/ostream.cpp @@ -0,0 +1,1148 @@ +/* + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef NATIVE_IMAGE +#include "precompiled.hpp" +#include "cds/classListWriter.hpp" +#include "compiler/compileLog.hpp" +#include "jvm.h" +#endif // !NATIVE_IMAGE +#include "memory/allocation.inline.hpp" +#ifndef NATIVE_IMAGE +#include "oops/oop.inline.hpp" +#include "runtime/arguments.hpp" +#include "runtime/mutexLocker.hpp" +#include "runtime/orderAccess.hpp" +#include "runtime/os.inline.hpp" +#include "runtime/safepoint.hpp" +#include "runtime/vm_version.hpp" +#include "utilities/defaultStream.hpp" +#endif // !NATIVE_IMAGE +#include "utilities/macros.hpp" +#include "utilities/ostream.hpp" +#ifndef NATIVE_IMAGE +#include "utilities/vmError.hpp" +#include "utilities/xmlstream.hpp" + +// Declarations of jvm methods +extern "C" void jio_print(const char* s, size_t len); +extern "C" int jio_printf(const char *fmt, ...); + +outputStream::outputStream() { + _position = 0; + _precount = 0; + _indentation = 0; + _scratch = nullptr; + _scratch_len = 0; +} + +outputStream::outputStream(bool has_time_stamps) { + _position = 0; + _precount = 0; + _indentation = 0; + _scratch = nullptr; + _scratch_len = 0; + if (has_time_stamps) _stamp.update(); +} + +bool outputStream::update_position(const char* s, size_t len) { + bool saw_newline = false; + for (size_t i = 0; i < len; i++) { + char ch = s[i]; + if (ch == '\n') { + saw_newline = true; + _precount += _position + 1; + _position = 0; + } else if (ch == '\t') { + int tw = 8 - (_position & 7); + _position += tw; + _precount -= tw-1; // invariant: _precount + _position == total count + } else { + _position += 1; + } + } + return saw_newline; +} + +// Execute a vsprintf, using the given buffer if necessary. +// Return a pointer to the formatted string. +const char* outputStream::do_vsnprintf(char* buffer, size_t buflen, + const char* format, va_list ap, + bool add_cr, + size_t& result_len) { + assert(buflen >= 2, "buffer too small"); + + const char* result; + if (add_cr) buflen--; + if (!strchr(format, '%')) { + // constant format string + result = format; + result_len = strlen(result); + if (add_cr && result_len >= buflen) result_len = buflen-1; // truncate + } else if (format[0] == '%' && format[1] == 's' && format[2] == '\0') { + // trivial copy-through format string + result = va_arg(ap, const char*); + result_len = strlen(result); + if (add_cr && result_len >= buflen) result_len = buflen-1; // truncate + } else { + int required_len = os::vsnprintf(buffer, buflen, format, ap); + assert(required_len >= 0, "vsnprintf encoding error"); + result = buffer; + if ((size_t)required_len < buflen) { + result_len = required_len; + } else { + DEBUG_ONLY(warning("outputStream::do_vsnprintf output truncated -- buffer length is %d bytes but %d bytes are needed.", + add_cr ? (int)buflen + 1 : (int)buflen, add_cr ? required_len + 2 : required_len + 1);) + result_len = buflen - 1; + } + } + if (add_cr) { + if (result != buffer) { + memcpy(buffer, result, result_len); + result = buffer; + } + buffer[result_len++] = '\n'; + buffer[result_len] = 0; + } + return result; +} + +void outputStream::do_vsnprintf_and_write_with_automatic_buffer(const char* format, va_list ap, bool add_cr) { + char buffer[O_BUFLEN]; + size_t len; + const char* str = do_vsnprintf(buffer, sizeof(buffer), format, ap, add_cr, len); + write(str, len); +} + +void outputStream::do_vsnprintf_and_write_with_scratch_buffer(const char* format, va_list ap, bool add_cr) { + size_t len; + const char* str = do_vsnprintf(_scratch, _scratch_len, format, ap, add_cr, len); + write(str, len); +} + +void outputStream::do_vsnprintf_and_write(const char* format, va_list ap, bool add_cr) { + if (_scratch) { + do_vsnprintf_and_write_with_scratch_buffer(format, ap, add_cr); + } else { + do_vsnprintf_and_write_with_automatic_buffer(format, ap, add_cr); + } +} + +void outputStream::print(const char* format, ...) { + va_list ap; + va_start(ap, format); + do_vsnprintf_and_write(format, ap, false); + va_end(ap); +} + +void outputStream::print_cr(const char* format, ...) { + va_list ap; + va_start(ap, format); + do_vsnprintf_and_write(format, ap, true); + va_end(ap); +} + +void outputStream::vprint(const char *format, va_list argptr) { + do_vsnprintf_and_write(format, argptr, false); +} + +void outputStream::vprint_cr(const char* format, va_list argptr) { + do_vsnprintf_and_write(format, argptr, true); +} + +void outputStream::fill_to(int col) { + int need_fill = col - position(); + sp(need_fill); +} + +void outputStream::move_to(int col, int slop, int min_space) { + if (position() >= col + slop) + cr(); + int need_fill = col - position(); + if (need_fill < min_space) + need_fill = min_space; + sp(need_fill); +} + +void outputStream::put(char ch) { + assert(ch != 0, "please fix call site"); + char buf[] = { ch, '\0' }; + write(buf, 1); +} + +void outputStream::sp(int count) { + if (count < 0) return; + + while (count > 0) { + int nw = (count > 8) ? 8 : count; + this->write(" ", nw); + count -= nw; + } +} + +void outputStream::cr() { + this->write("\n", 1); +} + +void outputStream::cr_indent() { + cr(); indent(); +} + +void outputStream::stamp() { + if (! _stamp.is_updated()) { + _stamp.update(); // start at 0 on first call to stamp() + } + + // outputStream::stamp() may get called by ostream_abort(), use snprintf + // to avoid allocating large stack buffer in print(). + char buf[40]; + jio_snprintf(buf, sizeof(buf), "%.3f", _stamp.seconds()); + print_raw(buf); +} + +void outputStream::stamp(bool guard, + const char* prefix, + const char* suffix) { + if (!guard) { + return; + } + print_raw(prefix); + stamp(); + print_raw(suffix); +} + +void outputStream::date_stamp(bool guard, + const char* prefix, + const char* suffix) { + if (!guard) { + return; + } + print_raw(prefix); + static const char error_time[] = "yyyy-mm-ddThh:mm:ss.mmm+zzzz"; + static const int buffer_length = 32; + char buffer[buffer_length]; + const char* iso8601_result = os::iso8601_time(buffer, buffer_length); + if (iso8601_result != nullptr) { + print_raw(buffer); + } else { + print_raw(error_time); + } + print_raw(suffix); + return; +} + +outputStream& outputStream::indent() { + sp(_indentation - _position); + return *this; +} + +void outputStream::print_jlong(jlong value) { + print(JLONG_FORMAT, value); +} + +void outputStream::print_julong(julong value) { + print(JULONG_FORMAT, value); +} + +/** + * This prints out hex data in a 'windbg' or 'xxd' form, where each line is: + * : 8 * + * example: + * 0000000: 7f44 4f46 0102 0102 0000 0000 0000 0000 .DOF............ + * 0000010: 0000 0000 0000 0040 0000 0020 0000 0005 .......@... .... + * 0000020: 0000 0000 0000 0040 0000 0000 0000 015d .......@.......] + * ... + * + * indent is applied to each line. Ends with a CR. + */ +void outputStream::print_data(void* data, size_t len, bool with_ascii, bool rel_addr) { + size_t limit = (len + 16) / 16 * 16; + for (size_t i = 0; i < limit; ++i) { + if (i % 16 == 0) { + if (rel_addr) { + indent().print("%07" PRIxPTR ":", i); + } else { + indent().print(PTR_FORMAT ":", p2i((unsigned char*)data + i)); + } + } + if (i % 2 == 0) { + print(" "); + } + if (i < len) { + print("%02x", ((unsigned char*)data)[i]); + } else { + print(" "); + } + if ((i + 1) % 16 == 0) { + if (with_ascii) { + print(" "); + for (size_t j = 0; j < 16; ++j) { + size_t idx = i + j - 15; + if (idx < len) { + char c = ((char*)data)[idx]; + print("%c", c >= 32 && c <= 126 ? c : '.'); + } + } + } + cr(); + } + } +} +#endif // !NATIVE_IMAGE + +stringStream::stringStream(size_t initial_capacity) : + outputStream(), + _buffer(_small_buffer), + _written(0), + _capacity(sizeof(_small_buffer)), + _is_fixed(false) +{ + if (initial_capacity > _capacity) { + grow(initial_capacity); + } + zero_terminate(); +} + +#ifndef NATIVE_IMAGE +// useful for output to fixed chunks of memory, such as performance counters +stringStream::stringStream(char* fixed_buffer, size_t fixed_buffer_size) : + outputStream(), + _buffer(fixed_buffer), + _written(0), + _capacity(fixed_buffer_size), + _is_fixed(true) +{ + zero_terminate(); +} +#endif // !NATIVE_IMAGE + +// Grow backing buffer to desired capacity. Don't call for fixed buffers +void stringStream::grow(size_t new_capacity) { + assert(!_is_fixed, "Don't call for caller provided buffers"); + assert(new_capacity > _capacity, "Sanity"); + assert(new_capacity > sizeof(_small_buffer), "Sanity"); + if (_buffer == _small_buffer) { + _buffer = NEW_C_HEAP_ARRAY(char, new_capacity, mtInternal); + _capacity = new_capacity; + if (_written > 0) { + ::memcpy(_buffer, _small_buffer, _written); + } + zero_terminate(); + } else { + _buffer = REALLOC_C_HEAP_ARRAY(char, _buffer, new_capacity, mtInternal); + _capacity = new_capacity; + } +} + +void stringStream::write(const char* s, size_t len) { + assert(_is_frozen == false, "Modification forbidden"); + assert(_capacity >= _written + 1, "Sanity"); + if (len == 0) { + return; + } + const size_t reasonable_max_len = 1 * G; + if (len >= reasonable_max_len) { + assert(false, "bad length? (" SIZE_FORMAT ")", len); + return; + } + size_t write_len = 0; + if (_is_fixed) { + write_len = MIN2(len, _capacity - _written - 1); + } else { + write_len = len; + size_t needed = _written + len + 1; + if (needed > _capacity) { + grow(MAX2(needed, _capacity * 2)); + } + } + assert(_written + write_len + 1 <= _capacity, "stringStream oob"); + if (write_len > 0) { + ::memcpy(_buffer + _written, s, write_len); + _written += write_len; + zero_terminate(); + } + +#ifndef NATIVE_IMAGE + // Note that the following does not depend on write_len. + // This means that position and count get updated + // even when overflow occurs. + update_position(s, len); +#endif // !NATIVE_IMAGE +} + +void stringStream::zero_terminate() { + assert(_buffer != nullptr && + _written < _capacity, "sanity"); + _buffer[_written] = '\0'; +} + +#ifndef NATIVE_IMAGE +void stringStream::reset() { + assert(_is_frozen == false, "Modification forbidden"); + _written = 0; _precount = 0; _position = 0; + zero_terminate(); +} + +char* stringStream::as_string(bool c_heap) const { + char* copy = c_heap ? + NEW_C_HEAP_ARRAY(char, _written + 1, mtInternal) : NEW_RESOURCE_ARRAY(char, _written + 1); + ::memcpy(copy, _buffer, _written); + copy[_written] = 0; // terminating null + if (c_heap) { + // Need to ensure our content is written to memory before we return + // the pointer to it. + OrderAccess::storestore(); + } + return copy; +} +#endif // !NATIVE_IMAGE + +stringStream::~stringStream() { + if (!_is_fixed && _buffer != _small_buffer) { + FREE_C_HEAP_ARRAY(char, _buffer); + } +} + +#ifndef NATIVE_IMAGE +// tty needs to be always accessible since there are code paths that may write to it +// outside of the VM lifespan. +// Examples for pre-VM-init accesses: Early NMT init, Early UL init +// Examples for post-VM-exit accesses: many, e.g. NMT C-heap bounds checker, signal handling, AGCT, ... +// During lifetime tty is served by an instance of defaultStream. That instance's deletion cannot +// be (easily) postponed or omitted since it has ties to the JVM infrastructure. +// The policy followed here is a compromise reached during review of JDK-8292351: +// - pre-init: we silently swallow all output. We won't see anything, but at least won't crash +// - post-exit: we write to a simple fdStream, but somewhat mimic the behavior of the real defaultStream +static nullStream tty_preinit_stream; +outputStream* tty = &tty_preinit_stream; + +xmlStream* xtty; + +#define EXTRACHARLEN 32 +#define CURRENTAPPX ".current" +// convert YYYY-MM-DD HH:MM:SS to YYYY-MM-DD_HH-MM-SS +char* get_datetime_string(char *buf, size_t len) { + os::local_time_string(buf, len); + int i = (int)strlen(buf); + while (--i >= 0) { + if (buf[i] == ' ') buf[i] = '_'; + else if (buf[i] == ':') buf[i] = '-'; + } + return buf; +} + +static const char* make_log_name_internal(const char* log_name, const char* force_directory, + int pid, const char* tms) { + const char* basename = log_name; + char file_sep = os::file_separator()[0]; + const char* cp; + char pid_text[32]; + + for (cp = log_name; *cp != '\0'; cp++) { + if (*cp == '/' || *cp == file_sep) { + basename = cp + 1; + } + } + const char* nametail = log_name; + // Compute buffer length + size_t buffer_length; + if (force_directory != nullptr) { + buffer_length = strlen(force_directory) + strlen(os::file_separator()) + + strlen(basename) + 1; + } else { + buffer_length = strlen(log_name) + 1; + } + + const char* pts = strstr(basename, "%p"); + int pid_pos = (pts == nullptr) ? -1 : (pts - nametail); + + if (pid_pos >= 0) { + jio_snprintf(pid_text, sizeof(pid_text), "pid%u", pid); + buffer_length += strlen(pid_text); + } + + pts = strstr(basename, "%t"); + int tms_pos = (pts == nullptr) ? -1 : (pts - nametail); + if (tms_pos >= 0) { + buffer_length += strlen(tms); + } + + // File name is too long. + if (buffer_length > JVM_MAXPATHLEN) { + return nullptr; + } + + // Create big enough buffer. + char *buf = NEW_C_HEAP_ARRAY(char, buffer_length, mtInternal); + + strcpy(buf, ""); + if (force_directory != nullptr) { + strcat(buf, force_directory); + strcat(buf, os::file_separator()); + nametail = basename; // completely skip directory prefix + } + + // who is first, %p or %t? + int first = -1, second = -1; + const char *p1st = nullptr; + const char *p2nd = nullptr; + + if (pid_pos >= 0 && tms_pos >= 0) { + // contains both %p and %t + if (pid_pos < tms_pos) { + // case foo%pbar%tmonkey.log + first = pid_pos; + p1st = pid_text; + second = tms_pos; + p2nd = tms; + } else { + // case foo%tbar%pmonkey.log + first = tms_pos; + p1st = tms; + second = pid_pos; + p2nd = pid_text; + } + } else if (pid_pos >= 0) { + // contains %p only + first = pid_pos; + p1st = pid_text; + } else if (tms_pos >= 0) { + // contains %t only + first = tms_pos; + p1st = tms; + } + + int buf_pos = (int)strlen(buf); + const char* tail = nametail; + + if (first >= 0) { + tail = nametail + first + 2; + strncpy(&buf[buf_pos], nametail, first); + strcpy(&buf[buf_pos + first], p1st); + buf_pos = (int)strlen(buf); + if (second >= 0) { + strncpy(&buf[buf_pos], tail, second - first - 2); + strcpy(&buf[buf_pos + second - first - 2], p2nd); + tail = nametail + second + 2; + } + } + strcat(buf, tail); // append rest of name, or all of name + return buf; +} + +// log_name comes from -XX:LogFile=log_name or +// -XX:DumpLoadedClassList= +// in log_name, %p => pid1234 and +// %t => YYYY-MM-DD_HH-MM-SS +const char* make_log_name(const char* log_name, const char* force_directory) { + char timestr[32]; + get_datetime_string(timestr, sizeof(timestr)); + return make_log_name_internal(log_name, force_directory, os::current_process_id(), + timestr); +} + +fileStream::fileStream(const char* file_name) { + _file = os::fopen(file_name, "w"); + if (_file != nullptr) { + _need_close = true; + } else { + warning("Cannot open file %s due to %s\n", file_name, os::strerror(errno)); + _need_close = false; + } +} + +fileStream::fileStream(const char* file_name, const char* opentype) { + _file = os::fopen(file_name, opentype); + if (_file != nullptr) { + _need_close = true; + } else { + warning("Cannot open file %s due to %s\n", file_name, os::strerror(errno)); + _need_close = false; + } +} + +void fileStream::write(const char* s, size_t len) { + if (_file != nullptr) { + // Make an unused local variable to avoid warning from gcc compiler. + size_t count = fwrite(s, 1, len, _file); + update_position(s, len); + } +} + +long fileStream::fileSize() { + long size = -1; + if (_file != nullptr) { + long pos = ::ftell(_file); + if (pos < 0) return pos; + if (::fseek(_file, 0, SEEK_END) == 0) { + size = ::ftell(_file); + } + ::fseek(_file, pos, SEEK_SET); + } + return size; +} + +char* fileStream::readln(char *data, int count ) { + char * ret = nullptr; + if (_file != nullptr) { + ret = ::fgets(data, count, _file); + // Get rid of annoying \n char only if it is present. + size_t len = ::strlen(data); + if (len > 0 && data[len - 1] == '\n') { + data[len - 1] = '\0'; + } + } + return ret; +} + +fileStream::~fileStream() { + if (_file != nullptr) { + if (_need_close) fclose(_file); + _file = nullptr; + } +} + +void fileStream::flush() { + if (_file != nullptr) { + fflush(_file); + } +} + +fdStream fdStream::_stdout_stream(1); +fdStream fdStream::_stderr_stream(2); + +void fdStream::write(const char* s, size_t len) { + if (_fd != -1) { + // Make an unused local variable to avoid warning from gcc compiler. + ssize_t count = ::write(_fd, s, (int)len); + update_position(s, len); + } +} + +defaultStream* defaultStream::instance = nullptr; +int defaultStream::_output_fd = 1; +int defaultStream::_error_fd = 2; +FILE* defaultStream::_output_stream = stdout; +FILE* defaultStream::_error_stream = stderr; + +#define LOG_MAJOR_VERSION 160 +#define LOG_MINOR_VERSION 1 + +void defaultStream::init() { + _inited = true; + if (LogVMOutput || LogCompilation) { + init_log(); + } +} + +bool defaultStream::has_log_file() { + // lazily create log file (at startup, LogVMOutput is false even + // if +LogVMOutput is used, because the flags haven't been parsed yet) + // For safer printing during fatal error handling, do not init logfile + // if a VM error has been reported. + if (!_inited && !VMError::is_error_reported()) init(); + return _log_file != nullptr; +} + +fileStream* defaultStream::open_file(const char* log_name) { + const char* try_name = make_log_name(log_name, nullptr); + if (try_name == nullptr) { + warning("Cannot open file %s: file name is too long.\n", log_name); + return nullptr; + } + + fileStream* file = new (mtInternal) fileStream(try_name); + FREE_C_HEAP_ARRAY(char, try_name); + if (file->is_open()) { + return file; + } + + // Try again to open the file in the temp directory. + delete file; + // Note: This feature is for maintainer use only. No need for L10N. + jio_printf("Warning: Cannot open log file: %s\n", log_name); + try_name = make_log_name(log_name, os::get_temp_directory()); + if (try_name == nullptr) { + warning("Cannot open file %s: file name is too long for directory %s.\n", log_name, os::get_temp_directory()); + return nullptr; + } + + jio_printf("Warning: Forcing option -XX:LogFile=%s\n", try_name); + + file = new (mtInternal) fileStream(try_name); + FREE_C_HEAP_ARRAY(char, try_name); + if (file->is_open()) { + return file; + } + + delete file; + return nullptr; +} + +void defaultStream::init_log() { + // %%% Need a MutexLocker? + const char* log_name = LogFile != nullptr ? LogFile : "hotspot_%p.log"; + fileStream* file = open_file(log_name); + + if (file != nullptr) { + _log_file = file; + _outer_xmlStream = new(mtInternal) xmlStream(file); + start_log(); + } else { + // and leave xtty as null + LogVMOutput = false; + DisplayVMOutput = true; + LogCompilation = false; + } +} + +void defaultStream::start_log() { + xmlStream*xs = _outer_xmlStream; + if (this == tty) xtty = xs; + // Write XML header. + xs->print_cr(""); + // (For now, don't bother to issue a DTD for this private format.) + + // Calculate the start time of the log as ms since the epoch: this is + // the current time in ms minus the uptime in ms. + jlong time_ms = os::javaTimeMillis() - tty->time_stamp().milliseconds(); + xs->head("hotspot_log version='%d %d'" + " process='%d' time_ms='" INT64_FORMAT "'", + LOG_MAJOR_VERSION, LOG_MINOR_VERSION, + os::current_process_id(), (int64_t)time_ms); + // Write VM version header immediately. + xs->head("vm_version"); + xs->head("name"); xs->text("%s", VM_Version::vm_name()); xs->cr(); + xs->tail("name"); + xs->head("release"); xs->text("%s", VM_Version::vm_release()); xs->cr(); + xs->tail("release"); + xs->head("info"); xs->text("%s", VM_Version::internal_vm_info_string()); xs->cr(); + xs->tail("info"); + xs->tail("vm_version"); + // Record information about the command-line invocation. + xs->head("vm_arguments"); // Cf. Arguments::print_on() + if (Arguments::num_jvm_flags() > 0) { + xs->head("flags"); + Arguments::print_jvm_flags_on(xs->text()); + xs->tail("flags"); + } + if (Arguments::num_jvm_args() > 0) { + xs->head("args"); + Arguments::print_jvm_args_on(xs->text()); + xs->tail("args"); + } + if (Arguments::java_command() != nullptr) { + xs->head("command"); xs->text()->print_cr("%s", Arguments::java_command()); + xs->tail("command"); + } + if (Arguments::sun_java_launcher() != nullptr) { + xs->head("launcher"); xs->text()->print_cr("%s", Arguments::sun_java_launcher()); + xs->tail("launcher"); + } + if (Arguments::system_properties() != nullptr) { + xs->head("properties"); + // Print it as a java-style property list. + // System properties don't generally contain newlines, so don't bother with unparsing. + outputStream *text = xs->text(); + for (SystemProperty* p = Arguments::system_properties(); p != nullptr; p = p->next()) { + assert(p->key() != nullptr, "p->key() is null"); + if (p->readable()) { + // Print in two stages to avoid problems with long + // keys/values. + text->print_raw(p->key()); + text->put('='); + assert(p->value() != nullptr, "p->value() is null"); + text->print_raw_cr(p->value()); + } + } + xs->tail("properties"); + } + xs->tail("vm_arguments"); + // tty output per se is grouped under the ... element. + xs->head("tty"); + // All further non-markup text gets copied to the tty: + xs->_text = this; // requires friend declaration! +} + +// finish_log() is called during normal VM shutdown. finish_log_on_error() is +// called by ostream_abort() after a fatal error. +// +void defaultStream::finish_log() { + xmlStream* xs = _outer_xmlStream; + xs->done("tty"); + + // Other log forks are appended here, at the End of Time: + CompileLog::finish_log(xs->out()); // write compile logging, if any, now + + xs->done("hotspot_log"); + xs->flush(); + + fileStream* file = _log_file; + _log_file = nullptr; + + delete _outer_xmlStream; + _outer_xmlStream = nullptr; + + file->flush(); + delete file; +} + +void defaultStream::finish_log_on_error(char *buf, int buflen) { + xmlStream* xs = _outer_xmlStream; + + if (xs && xs->out()) { + + xs->done_raw("tty"); + + // Other log forks are appended here, at the End of Time: + CompileLog::finish_log_on_error(xs->out(), buf, buflen); // write compile logging, if any, now + + xs->done_raw("hotspot_log"); + xs->flush(); + + fileStream* file = _log_file; + _log_file = nullptr; + _outer_xmlStream = nullptr; + + if (file) { + file->flush(); + + // Can't delete or close the file because delete and fclose aren't + // async-safe. We are about to die, so leave it to the kernel. + // delete file; + } + } +} + +intx defaultStream::hold(intx writer_id) { + bool has_log = has_log_file(); // check before locking + if (// impossible, but who knows? + writer_id == NO_WRITER || + + // bootstrap problem + tty_lock == nullptr || + + // can't grab a lock if current Thread isn't set + Thread::current_or_null() == nullptr || + + // developer hook + !SerializeVMOutput || + + // VM already unhealthy + VMError::is_error_reported() || + + // safepoint == global lock (for VM only) + (SafepointSynchronize::is_synchronizing() && + Thread::current()->is_VM_thread()) + ) { + // do not attempt to lock unless we know the thread and the VM is healthy + return NO_WRITER; + } + if (_writer == writer_id) { + // already held, no need to re-grab the lock + return NO_WRITER; + } + tty_lock->lock_without_safepoint_check(); + // got the lock + if (writer_id != _last_writer) { + if (has_log) { + _log_file->bol(); + // output a hint where this output is coming from: + _log_file->print_cr("", writer_id); + } + _last_writer = writer_id; + } + _writer = writer_id; + return writer_id; +} + +void defaultStream::release(intx holder) { + if (holder == NO_WRITER) { + // nothing to release: either a recursive lock, or we scribbled (too bad) + return; + } + if (_writer != holder) { + return; // already unlocked, perhaps via break_tty_lock_for_safepoint + } + _writer = NO_WRITER; + tty_lock->unlock(); +} + +void defaultStream::write(const char* s, size_t len) { + intx thread_id = os::current_thread_id(); + intx holder = hold(thread_id); + + if (DisplayVMOutput && + (_outer_xmlStream == nullptr || !_outer_xmlStream->inside_attrs())) { + // print to output stream. It can be redirected by a vfprintf hook + jio_print(s, len); + } + + // print to log file + if (has_log_file() && _outer_xmlStream != nullptr) { + _outer_xmlStream->write_text(s, len); + bool nl = update_position(s, len); + // flush the log file too, if there were any newlines + if (nl) { + flush(); + } + } else { + update_position(s, len); + } + + release(holder); +} + +intx ttyLocker::hold_tty() { + if (defaultStream::instance == nullptr) return defaultStream::NO_WRITER; + intx thread_id = os::current_thread_id(); + return defaultStream::instance->hold(thread_id); +} + +void ttyLocker::release_tty(intx holder) { + if (holder == defaultStream::NO_WRITER) return; + defaultStream::instance->release(holder); +} + +bool ttyLocker::release_tty_if_locked() { + intx thread_id = os::current_thread_id(); + if (defaultStream::instance->writer() == thread_id) { + // release the lock and return true so callers know if was + // previously held. + release_tty(thread_id); + return true; + } + return false; +} + +void ttyLocker::break_tty_lock_for_safepoint(intx holder) { + if (defaultStream::instance != nullptr && + defaultStream::instance->writer() == holder) { + if (xtty != nullptr) { + xtty->print_cr(""); + } + defaultStream::instance->release(holder); + } + // (else there was no lock to break) +} + +void ostream_init() { + if (defaultStream::instance == nullptr) { + defaultStream::instance = new(mtInternal) defaultStream(); + tty = defaultStream::instance; + + // We want to ensure that time stamps in GC logs consider time 0 + // the time when the JVM is initialized, not the first time we ask + // for a time stamp. So, here, we explicitly update the time stamp + // of tty. + tty->time_stamp().update_to(1); + } +} + +void ostream_init_log() { + // Note : this must be called AFTER ostream_init() + + ClassListWriter::init(); + + // If we haven't lazily initialized the logfile yet, do it now, + // to avoid the possibility of lazy initialization during a VM + // crash, which can affect the stability of the fatal error handler. + defaultStream::instance->has_log_file(); +} + +// ostream_exit() is called during normal VM exit to finish log files, flush +// output and free resource. +void ostream_exit() { + static bool ostream_exit_called = false; + if (ostream_exit_called) return; + ostream_exit_called = true; + ClassListWriter::delete_classlist(); + // Make sure tty works after VM exit by assigning an always-on functioning fdStream. + outputStream* tmp = tty; + tty = DisplayVMOutputToStderr ? fdStream::stdout_stream() : fdStream::stderr_stream(); + if (tmp != &tty_preinit_stream && tmp != defaultStream::instance) { + delete tmp; + } + delete defaultStream::instance; + xtty = nullptr; + defaultStream::instance = nullptr; +} + +// ostream_abort() is called by os::abort() when VM is about to die. +void ostream_abort() { + // Here we can't delete tty, just flush its output + if (tty) tty->flush(); + + if (defaultStream::instance != nullptr) { + static char buf[4096]; + defaultStream::instance->finish_log_on_error(buf, sizeof(buf)); + } +} + +bufferedStream::bufferedStream(size_t initial_size, size_t bufmax) : outputStream() { + buffer_length = initial_size; + buffer = NEW_C_HEAP_ARRAY(char, buffer_length, mtInternal); + buffer_pos = 0; + buffer_max = bufmax; + truncated = false; +} + +void bufferedStream::write(const char* s, size_t len) { + + if (truncated) { + return; + } + + if(buffer_pos + len > buffer_max) { + flush(); // Note: may be a noop. + } + + size_t end = buffer_pos + len; + if (end >= buffer_length) { + // For small overruns, double the buffer. For larger ones, + // increase to the requested size. + if (end < buffer_length * 2) { + end = buffer_length * 2; + } + // Impose a cap beyond which the buffer cannot grow - a size which + // in all probability indicates a real error, e.g. faulty printing + // code looping, while not affecting cases of just-very-large-but-its-normal + // output. + const size_t reasonable_cap = MAX2(100 * M, buffer_max * 2); + if (end > reasonable_cap) { + // In debug VM, assert right away. + assert(false, "Exceeded max buffer size for this string."); + // Release VM: silently truncate. We do this since these kind of errors + // are both difficult to predict with testing (depending on logging content) + // and usually not serious enough to kill a production VM for it. + end = reasonable_cap; + size_t remaining = end - buffer_pos; + if (len >= remaining) { + len = remaining - 1; + truncated = true; + } + } + if (buffer_length < end) { + buffer = REALLOC_C_HEAP_ARRAY(char, buffer, end, mtInternal); + buffer_length = end; + } + } + if (len > 0) { + memcpy(buffer + buffer_pos, s, len); + buffer_pos += len; + update_position(s, len); + } +} + +char* bufferedStream::as_string() { + char* copy = NEW_RESOURCE_ARRAY(char, buffer_pos+1); + strncpy(copy, buffer, buffer_pos); + copy[buffer_pos] = 0; // terminating null + return copy; +} + +bufferedStream::~bufferedStream() { + FREE_C_HEAP_ARRAY(char, buffer); +} + +#ifndef PRODUCT + +#if defined(LINUX) || defined(AIX) || defined(_ALLBSD_SOURCE) +#include +#include +#include +#include +#include +#elif defined(_WINDOWS) +#include +#endif + +// Network access +networkStream::networkStream() : bufferedStream(1024*10, 1024*10) { + + _socket = -1; + + int result = ::socket(AF_INET, SOCK_STREAM, 0); + if (result <= 0) { + assert(false, "Socket could not be created!"); + } else { + _socket = result; + } +} + +ssize_t networkStream::read(char *buf, size_t len) { + return os::recv(_socket, buf, len, 0); +} + +void networkStream::flush() { + if (size() != 0) { + ssize_t result = os::raw_send(_socket, (char *)base(), size(), 0); + assert(result != -1, "connection error"); + assert(result >= 0 && (size_t)result == size(), "didn't send enough data"); + } + reset(); +} + +networkStream::~networkStream() { + close(); +} + +void networkStream::close() { + if (_socket != -1) { + flush(); + os::socket_close(_socket); + _socket = -1; + } +} + +// host could be IP address, or a host name +bool networkStream::connect(const char *host, short port) { + + char s_port[6]; // 5 digits max plus terminator + int ret = os::snprintf(s_port, sizeof(s_port), "%hu", (unsigned short) port); + assert(ret > 0, "snprintf failed: %d", ret); + + struct addrinfo* addr_info = nullptr; + struct addrinfo hints; + + memset(&hints, 0, sizeof(hints)); + hints.ai_family = AF_INET; // Allow IPv4 only + hints.ai_socktype = SOCK_STREAM; // TCP only + + // getaddrinfo can resolve both an IP address and a host name + ret = getaddrinfo(host, s_port, &hints, &addr_info); + if (ret != 0) { + warning("networkStream::connect getaddrinfo for host %s and port %s failed: %s", + host, s_port, gai_strerror(ret)); + return false; + } + + ssize_t conn = os::connect(_socket, addr_info->ai_addr, (socklen_t)addr_info->ai_addrlen); + freeaddrinfo(addr_info); + return (conn >= 0); +} +#endif // !NATIVE_IMAGE + +#endif diff --git a/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/share/utilities/ostream.hpp b/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/share/utilities/ostream.hpp new file mode 100644 index 000000000000..ce7ce45dc0ee --- /dev/null +++ b/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/share/utilities/ostream.hpp @@ -0,0 +1,350 @@ +/* + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_UTILITIES_OSTREAM_HPP +#define SHARE_UTILITIES_OSTREAM_HPP + +#include "memory/allocation.hpp" +#ifndef NATIVE_IMAGE +#include "runtime/timer.hpp" +#endif // !NATIVE_IMAGE +#include "utilities/globalDefinitions.hpp" +#include "utilities/macros.hpp" + +#ifndef NATIVE_IMAGE +DEBUG_ONLY(class ResourceMark;) +#endif // !NATIVE_IMAGE + +// Output streams for printing +// +// Printing guidelines: +// Where possible, please use tty->print() and tty->print_cr(). +// For product mode VM warnings use warning() which internally uses tty. +// In places where tty is not initialized yet or too much overhead, +// we may use jio_printf: +// jio_fprintf(defaultStream::output_stream(), "Message"); +// This allows for redirection via -XX:+DisplayVMOutputToStdout and +// -XX:+DisplayVMOutputToStderr +class outputStream : public CHeapObjBase { +#ifndef NATIVE_IMAGE + private: + NONCOPYABLE(outputStream); + + protected: + int _indentation; // current indentation + int _position; // visual position on the current line + uint64_t _precount; // number of chars output, less than _position + TimeStamp _stamp; // for time stamps + char* _scratch; // internal scratch buffer for printf + size_t _scratch_len; // size of internal scratch buffer + + // Returns whether a newline was seen or not + bool update_position(const char* s, size_t len); + static const char* do_vsnprintf(char* buffer, size_t buflen, + const char* format, va_list ap, + bool add_cr, + size_t& result_len) ATTRIBUTE_PRINTF(3, 0); + + // calls do_vsnprintf and writes output to stream; uses an on-stack buffer. + void do_vsnprintf_and_write_with_automatic_buffer(const char* format, va_list ap, bool add_cr) ATTRIBUTE_PRINTF(2, 0); + // calls do_vsnprintf and writes output to stream; uses the user-provided buffer; + void do_vsnprintf_and_write_with_scratch_buffer(const char* format, va_list ap, bool add_cr) ATTRIBUTE_PRINTF(2, 0); + // calls do_vsnprintf, then writes output to stream. + void do_vsnprintf_and_write(const char* format, va_list ap, bool add_cr) ATTRIBUTE_PRINTF(2, 0); +#endif // !NATIVE_IMAGE + + public: +#ifndef NATIVE_IMAGE + // creation + outputStream(); + outputStream(bool has_time_stamps); + + // indentation + outputStream& indent(); + void inc() { _indentation++; }; + void dec() { _indentation--; }; + void inc(int n) { _indentation += n; }; + void dec(int n) { _indentation -= n; }; + int indentation() const { return _indentation; } + void set_indentation(int i) { _indentation = i; } + void fill_to(int col); + void move_to(int col, int slop = 6, int min_space = 2); + + // sizing + int position() const { return _position; } + julong count() const { return _precount + _position; } + void set_count(julong count) { _precount = count - _position; } + void set_position(int pos) { _position = pos; } + + // printing + void print(const char* format, ...) ATTRIBUTE_PRINTF(2, 3); + void print_cr(const char* format, ...) ATTRIBUTE_PRINTF(2, 3); + void vprint(const char *format, va_list argptr) ATTRIBUTE_PRINTF(2, 0); + void vprint_cr(const char* format, va_list argptr) ATTRIBUTE_PRINTF(2, 0); +#endif // !NATIVE_IMAGE + void print_raw(const char* str) { write(str, strlen(str)); } + void print_raw(const char* str, size_t len) { write(str, len); } +#ifndef NATIVE_IMAGE + void print_raw_cr(const char* str) { write(str, strlen(str)); cr(); } + void print_raw_cr(const char* str, size_t len){ write(str, len); cr(); } + void print_data(void* data, size_t len, bool with_ascii, bool rel_addr=true); + void put(char ch); + void sp(int count = 1); + void cr(); + void cr_indent(); + void bol() { if (_position > 0) cr(); } + + + // Time stamp + TimeStamp& time_stamp() { return _stamp; } + void stamp(); + void stamp(bool guard, const char* prefix, const char* suffix); + void stamp(bool guard) { + stamp(guard, "", ": "); + } + // Date stamp + void date_stamp(bool guard, const char* prefix, const char* suffix); + // A simplified call that includes a suffix of ": " + void date_stamp(bool guard) { + date_stamp(guard, "", ": "); + } + + // portable printing of 64 bit integers + void print_jlong(jlong value); + void print_julong(julong value); + + // flushing + virtual void flush() {} +#endif // !NATIVE_IMAGE + virtual void write(const char* str, size_t len) = 0; +#ifndef NATIVE_IMAGE + virtual void rotate_log(bool force, outputStream* out = nullptr) {} // GC log rotation + virtual ~outputStream() {} // close properly on deletion + + // Caller may specify their own scratch buffer to use for printing; otherwise, + // an automatic buffer on the stack (with O_BUFLEN len) is used. + void set_scratch_buffer(char* p, size_t len) { _scratch = p; _scratch_len = len; } + + void dec_cr() { dec(); cr(); } + void inc_cr() { inc(); cr(); } +#endif // !NATIVE_IMAGE +}; + +#ifndef NATIVE_IMAGE +// standard output +// ANSI C++ name collision +extern outputStream* tty; // tty output + +class streamIndentor : public StackObj { + private: + outputStream* _str; + int _amount; + + public: + streamIndentor(outputStream* str, int amt = 2) : _str(str), _amount(amt) { + _str->inc(_amount); + } + ~streamIndentor() { _str->dec(_amount); } +}; + +// advisory locking for the shared tty stream: +class ttyLocker: StackObj { + friend class ttyUnlocker; + private: + intx _holder; + + public: + static intx hold_tty(); // returns a "holder" token + static void release_tty(intx holder); // must witness same token + static bool release_tty_if_locked(); // returns true if lock was released + static void break_tty_lock_for_safepoint(intx holder); + + ttyLocker() { _holder = hold_tty(); } + ~ttyLocker() { release_tty(_holder); } +}; + +// Release the tty lock if it's held and reacquire it if it was +// locked. Used to avoid lock ordering problems. +class ttyUnlocker: StackObj { + private: + bool _was_locked; + public: + ttyUnlocker() { + _was_locked = ttyLocker::release_tty_if_locked(); + } + ~ttyUnlocker() { + if (_was_locked) { + ttyLocker::hold_tty(); + } + } +}; +#endif // !NATIVE_IMAGE + +// for writing to strings; buffer will expand automatically. +// Buffer will always be zero-terminated. +class stringStream : public outputStream { + DEBUG_ONLY(bool _is_frozen = false); + char* _buffer; + size_t _written; // Number of characters written, excluding termin. zero + size_t _capacity; + const bool _is_fixed; + char _small_buffer[48]; + + // Grow backing buffer to desired capacity. + void grow(size_t new_capacity); + + // zero terminate at buffer_pos. + void zero_terminate(); + + public: + // Create a stringStream using an internal buffer of initially initial_bufsize size; + // will be enlarged on demand. There is no maximum cap. + stringStream(size_t initial_capacity = 0); +#ifndef NATIVE_IMAGE + // Creates a stringStream using a caller-provided buffer. Will truncate silently if + // it overflows. + stringStream(char* fixed_buffer, size_t fixed_buffer_size); +#endif // !NATIVE_IMAGE + ~stringStream(); + virtual void write(const char* c, size_t len); + // Return number of characters written into buffer, excluding terminating zero and + // subject to truncation in static buffer mode. + size_t size() const { return _written; } + // Returns internal buffer containing the accumulated string. + // Returned buffer is only guaranteed to be valid as long as stream is not modified + const char* base() const { return _buffer; } + // Freezes stringStream (no further modifications possible) and returns pointer to it. + // No-op if stream is frozen already. + // Returns the internal buffer containing the accumulated string. + const char* freeze() NOT_DEBUG(const) { + DEBUG_ONLY(_is_frozen = true); + return _buffer; + }; +#ifndef NATIVE_IMAGE + void reset(); + // Copy to a resource, or C-heap, array as requested + char* as_string(bool c_heap = false) const; +#endif // !NATIVE_IMAGE +}; + +#ifndef NATIVE_IMAGE +class fileStream : public outputStream { + protected: + FILE* _file; + bool _need_close; + public: + fileStream() { _file = nullptr; _need_close = false; } + fileStream(const char* file_name); + fileStream(const char* file_name, const char* opentype); + fileStream(FILE* file, bool need_close = false) { _file = file; _need_close = need_close; } + ~fileStream(); + bool is_open() const { return _file != nullptr; } + virtual void write(const char* c, size_t len); + size_t read(void *data, size_t size, size_t count) { return _file != nullptr ? ::fread(data, size, count, _file) : 0; } + char* readln(char *data, int count); + int eof() { return _file != nullptr ? feof(_file) : -1; } + long fileSize(); + void rewind() { if (_file != nullptr) ::rewind(_file); } + void flush(); +}; + +// unlike fileStream, fdStream does unbuffered I/O by calling +// open() and write() directly. It is async-safe, but output +// from multiple thread may be mixed together. Used by fatal +// error handler. +class fdStream : public outputStream { + protected: + int _fd; + static fdStream _stdout_stream; + static fdStream _stderr_stream; + public: + fdStream(int fd = -1) : _fd(fd) { } + bool is_open() const { return _fd != -1; } + void set_fd(int fd) { _fd = fd; } + int fd() const { return _fd; } + virtual void write(const char* c, size_t len); + void flush() {}; + + // predefined streams for unbuffered IO to stdout, stderr + static fdStream* stdout_stream() { return &_stdout_stream; } + static fdStream* stderr_stream() { return &_stderr_stream; } +}; + +// A /dev/null equivalent stream +class nullStream : public outputStream { +public: + void write(const char* c, size_t len) {} + void flush() {}; +}; + +void ostream_init(); +void ostream_init_log(); +void ostream_exit(); +void ostream_abort(); +const char* make_log_name(const char* log_name, const char* force_directory); + +// In the non-fixed buffer case an underlying buffer will be created and +// managed in C heap. Not MT-safe. +class bufferedStream : public outputStream { + protected: + char* buffer; + size_t buffer_pos; + size_t buffer_max; + size_t buffer_length; + bool truncated; + public: + bufferedStream(size_t initial_bufsize = 256, size_t bufmax = 1024*1024*10); + ~bufferedStream(); + virtual void write(const char* c, size_t len); + size_t size() { return buffer_pos; } + const char* base() { return buffer; } + void reset() { buffer_pos = 0; _precount = 0; _position = 0; } + char* as_string(); +}; + +#define O_BUFLEN 2000 // max size of output of individual print() methods + +#ifndef PRODUCT + +class networkStream : public bufferedStream { + + private: + int _socket; + + public: + networkStream(); + ~networkStream(); + + bool connect(const char *host, short port); + bool is_open() const { return _socket != -1; } + ssize_t read(char *buf, size_t len); + void close(); + virtual void flush(); +}; + +#endif +#endif // !NATIVE_IMAGE + +#endif // SHARE_UTILITIES_OSTREAM_HPP diff --git a/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/svm/share/logging/log.hpp b/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/svm/share/logging/log.hpp new file mode 100644 index 000000000000..aa9b630f1427 --- /dev/null +++ b/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/svm/share/logging/log.hpp @@ -0,0 +1,71 @@ +/* + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +#ifndef SHARE_LOGGING_LOG_HPP +#define SHARE_LOGGING_LOG_HPP + +#include "utilities/debug.hpp" + +// +// Logging macros +// +// Usage: +// log_()(); +// e.g. +// log_debug(logging)("message %d", i); +// +// Note that these macros will not evaluate the arguments unless the logging is enabled. +// + +#if defined(LOG_LEVEL) && defined(PRINT_WARNINGS) + +#define Error 1 +#define Warning 2 +#define Info 3 +#define Debug 4 +#define Trace 5 + +#define log_error(...) (!log_is_enabled(Error, __VA_ARGS__)) ? (void)0 : warning +#define log_warning(...) (!log_is_enabled(Warning, __VA_ARGS__)) ? (void)0 : warning +#define log_info(...) (!log_is_enabled(Info, __VA_ARGS__)) ? (void)0 : warning +#define log_debug(...) (!log_is_enabled(Debug, __VA_ARGS__)) ? (void)0 : warning +#define log_trace(...) (!log_is_enabled(Trace, __VA_ARGS__)) ? (void)0 : warning + +#define log_is_enabled(level, ...) (level <= LOG_LEVEL) + +#else +#define log_error(...) DUMMY_ARGUMENT_CONSUMER +#define log_warning(...) DUMMY_ARGUMENT_CONSUMER +#define log_info(...) DUMMY_ARGUMENT_CONSUMER +#define log_debug(...) DUMMY_ARGUMENT_CONSUMER +#define log_trace(...) DUMMY_ARGUMENT_CONSUMER + +#define DUMMY_ARGUMENT_CONSUMER(...) + +// Convenience macro to test if the logging is enabled on the specified level for given tags. +#define log_is_enabled(level, ...) false + +#endif // LOG_LEVEL + +#endif // SHARE_LOGGING_LOG_HPP diff --git a/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/svm/share/memory/allocation.cpp b/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/svm/share/memory/allocation.cpp new file mode 100644 index 000000000000..3dcc3a474cb1 --- /dev/null +++ b/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/svm/share/memory/allocation.cpp @@ -0,0 +1,48 @@ +/* + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "memory/allocation.hpp" +#include "runtime/os.hpp" + +char* AllocateHeap(size_t size, + MEMFLAGS flags, + AllocFailType alloc_failmode /* = AllocFailStrategy::EXIT_OOM*/) { + // Note that we do not use AllocFailType. Instead, we always just return the value that the OS returns. + // If it is null, we will run into a segfault which is the best that we can do because we must not call + // exit or abort on the C++ side. + return (char*)os::malloc(size, flags); +} + +char* ReallocateHeap(char* old, + size_t size, + MEMFLAGS flag, + AllocFailType alloc_failmode) { + return (char*) os::realloc(old, size, flag); +} + +// handles null pointers +void FreeHeap(void* p) { + os::free(p); +} diff --git a/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/svm/share/runtime/globals.hpp b/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/svm/share/runtime/globals.hpp new file mode 100644 index 000000000000..8c6b3cb6cf6c --- /dev/null +++ b/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/svm/share/runtime/globals.hpp @@ -0,0 +1,35 @@ +/* + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_RUNTIME_GLOBALS_HPP +#define SHARE_RUNTIME_GLOBALS_HPP + +#include "utilities/globalDefinitions.hpp" +#include "utilities/macros.hpp" + +constexpr bool UseCpuAllocPath = false; +constexpr bool UseContainerSupport = true; + +#endif // SHARE_RUNTIME_GLOBALS_HPP diff --git a/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/svm/share/utilities/debug.cpp b/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/svm/share/utilities/debug.cpp new file mode 100644 index 000000000000..0be1d5d79791 --- /dev/null +++ b/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/svm/share/utilities/debug.cpp @@ -0,0 +1,61 @@ +/* + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "utilities/debug.hpp" + +#include +#include +#include +#include + +#ifdef PRINT_WARNINGS +ATTRIBUTE_PRINTF(1, 2) +void warning(const char* format, ...) { + FILE* const err = stderr; + va_list ap; + va_start(ap, format); + vfprintf(err, format, ap); + va_end(ap); + fputc('\n', err); +} +#endif + +#ifdef ASSERT +ATTRIBUTE_PRINTF(4, 5) +void report_vm_error(const char* file, int line, const char* error_msg, const char* detail_fmt, ...) { + FILE* const err = stderr; + va_list detail_args; + va_start(detail_args, detail_fmt); + vfprintf(err, detail_fmt, detail_args); + va_end(detail_args); + fputc('\n', err); + abort(); +} + +void report_vm_error(const char* file, int line, const char* error_msg) +{ + report_vm_error(file, line, error_msg, "%s", ""); +} +#endif diff --git a/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/svm/share/utilities/debug.hpp b/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/svm/share/utilities/debug.hpp new file mode 100644 index 000000000000..6073ac8a7864 --- /dev/null +++ b/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/svm/share/utilities/debug.hpp @@ -0,0 +1,73 @@ +/* + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_UTILITIES_DEBUG_HPP +#define SHARE_UTILITIES_DEBUG_HPP + +#include "utilities/attributeNoreturn.hpp" +#include "utilities/compilerWarnings.hpp" + +#ifdef ASSERT +// error reporting helper functions +ATTRIBUTE_NORETURN +void report_vm_error(const char* file, int line, const char* error_msg); + +ATTRIBUTE_NORETURN +ATTRIBUTE_PRINTF(4, 5) +void report_vm_error(const char* file, int line, const char* error_msg, + const char* detail_fmt, ...); +#endif + +#ifdef ASSERT +#define __FILENAME_ONLY__ __FILE__ +#else +// NOTE (chaeubl): Avoid that __FILE__ embeds the full path into the binary. +#define __FILENAME_ONLY__ "unknown file" +#endif + +// assertions +#ifndef ASSERT +#define vmassert(p, ...) +#else +#define vmassert(p, ...) \ +do { \ + if (!(p)) { \ + report_vm_error(__FILENAME_ONLY__, __LINE__, "assert(" #p ") failed", __VA_ARGS__); \ + } \ +} while (0) +#endif + +// For backward compatibility. +#define assert(p, ...) vmassert(p, __VA_ARGS__) + +#ifndef PRINT_WARNINGS +#define warning(format, ...) +#else +void warning(const char* format, ...); +#endif + +#define STATIC_ASSERT(Cond) static_assert((Cond), #Cond) + +#endif // SHARE_UTILITIES_DEBUG_HPP diff --git a/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/svm/svm_container.cpp b/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/svm/svm_container.cpp new file mode 100644 index 000000000000..7b1ed8c88201 --- /dev/null +++ b/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/svm/svm_container.cpp @@ -0,0 +1,113 @@ +/* + * Copyright (c) 2024, 2024, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "runtime/os.hpp" +#include "os_linux.hpp" +#include "osContainer_linux.hpp" +#include "svm_container.hpp" + +extern "C" { + +// keep in sync with ContainerLibrary.java +constexpr int SUCCESS_IS_NOT_CONTAINERIZED = 0; +constexpr int SUCCESS_IS_CONTAINERIZED = 1; +constexpr int ERROR_LIBCONTAINER_TOO_OLD = 2; +constexpr int ERROR_LIBCONTAINER_TOO_NEW = 3; + +static bool is_initialized = false; + +// NO_TRANSITION +EXPORT_FOR_SVM int svm_container_initialize(int actual_native_image_container_version) { + // Note: Do not pass and store any option values to the C++ in here. + // The C++ code is shared between isolates, but options are not. + const int expected_native_image_container_version = 240100; + if (actual_native_image_container_version > expected_native_image_container_version) { + return ERROR_LIBCONTAINER_TOO_OLD; + } + if (actual_native_image_container_version < expected_native_image_container_version) { + return ERROR_LIBCONTAINER_TOO_NEW; + } + + os::Linux::initialize_system_info(); + OSContainer::init(); + is_initialized = true; + return OSContainer::is_containerized() ? SUCCESS_IS_CONTAINERIZED : SUCCESS_IS_NOT_CONTAINERIZED; +} + +// NO_TRANSITION +EXPORT_FOR_SVM jlong svm_container_physical_memory() { + assert(is_initialized, "libsvm_container not yet initialized"); + return os::physical_memory(); +} + +// NO_TRANSITION +EXPORT_FOR_SVM jlong svm_container_memory_limit_in_bytes() { + assert(is_initialized, "libsvm_container not yet initialized"); + return OSContainer::memory_limit_in_bytes(); +} + +// NO_TRANSITION +EXPORT_FOR_SVM jlong svm_container_memory_and_swap_limit_in_bytes() { + assert(is_initialized, "libsvm_container not yet initialized"); + return OSContainer::memory_and_swap_limit_in_bytes(); +} + +// NO_TRANSITION +EXPORT_FOR_SVM jlong svm_container_memory_soft_limit_in_bytes() { + assert(is_initialized, "libsvm_container not yet initialized"); + return OSContainer::memory_soft_limit_in_bytes(); +} + +// NO_TRANSITION +EXPORT_FOR_SVM jlong svm_container_memory_usage_in_bytes() { + assert(is_initialized, "libsvm_container not yet initialized"); + return OSContainer::memory_usage_in_bytes(); +} + +// NO_TRANSITION +EXPORT_FOR_SVM jlong svm_container_memory_max_usage_in_bytes() { + assert(is_initialized, "libsvm_container not yet initialized"); + return OSContainer::memory_max_usage_in_bytes(); +} + +// NO_TRANSITION +EXPORT_FOR_SVM jlong svm_container_rss_usage_in_bytes() { + assert(is_initialized, "libsvm_container not yet initialized"); + return OSContainer::rss_usage_in_bytes(); +} + +// NO_TRANSITION +EXPORT_FOR_SVM jlong svm_container_cache_usage_in_bytes() { + assert(is_initialized, "libsvm_container not yet initialized"); + return OSContainer::cache_usage_in_bytes(); +} + +// NO_TRANSITION +EXPORT_FOR_SVM int svm_container_active_processor_count() { + assert(is_initialized, "libsvm_container not yet initialized"); + return OSContainer::active_processor_count(); +} + +} // extern C diff --git a/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/svm/svm_container.hpp b/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/svm/svm_container.hpp new file mode 100644 index 000000000000..d7a3c958b27f --- /dev/null +++ b/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/svm/svm_container.hpp @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2024, 2024, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SVM_CONTAINER_HPP +#define SVM_CONTAINER_HPP + +#include "jni_md.h" + +#ifndef EXPORT_FOR_SVM + #if (defined(__GNUC__) && ((__GNUC__ > 4) || (__GNUC__ == 4) && (__GNUC_MINOR__ > 2))) || __has_attribute(visibility) + #ifdef ARM + #define EXPORT_FOR_SVM __attribute__((externally_visible,visibility("default"))) + #else + #define EXPORT_FOR_SVM __attribute__((visibility("default"))) + #endif + #else + #define EXPORT_FOR_SVM + #endif +#endif + +extern "C" { + EXPORT_FOR_SVM int svm_container_initialize(int version); + EXPORT_FOR_SVM jlong svm_container_physical_memory(); + EXPORT_FOR_SVM jlong svm_container_memory_limit_in_bytes(); + EXPORT_FOR_SVM jlong svm_container_memory_and_swap_limit_in_bytes(); + EXPORT_FOR_SVM jlong svm_container_memory_soft_limit_in_bytes(); + EXPORT_FOR_SVM jlong svm_container_memory_usage_in_bytes(); + EXPORT_FOR_SVM jlong svm_container_memory_max_usage_in_bytes(); + EXPORT_FOR_SVM jlong svm_container_rss_usage_in_bytes(); + EXPORT_FOR_SVM jlong svm_container_cache_usage_in_bytes(); + EXPORT_FOR_SVM int svm_container_active_processor_count(); +} + +#endif // SVM_CONTAINER_HPP diff --git a/substratevm/src/com.oracle.svm.test/src/com/oracle/svm/test/jfr/TestContainerEvent.java b/substratevm/src/com.oracle.svm.test/src/com/oracle/svm/test/jfr/TestContainerEvent.java index 2e8dc49cb770..ddacab3d1dff 100644 --- a/substratevm/src/com.oracle.svm.test/src/com/oracle/svm/test/jfr/TestContainerEvent.java +++ b/substratevm/src/com.oracle.svm.test/src/com/oracle/svm/test/jfr/TestContainerEvent.java @@ -34,7 +34,7 @@ import org.junit.Assume; import org.junit.Test; -import com.oracle.svm.core.OS; +import com.oracle.svm.core.container.Container; import com.oracle.svm.test.jfr.events.ThreadEvent; import jdk.jfr.Recording; @@ -46,7 +46,7 @@ public class TestContainerEvent extends JfrRecordingTest { @Test public void test() throws Throwable { - Assume.assumeTrue("Container support is limited to Linux", OS.LINUX.isCurrent()); + Assume.assumeTrue("Container support not enabled or available", Container.isSupported()); String[] events = new String[]{"jdk.ContainerConfiguration"}; Recording recording = startRecording(events); From e7f656ba3ff749a81ec085c559d110ea9a2a11a9 Mon Sep 17 00:00:00 2001 From: Josef Eisl Date: Wed, 29 May 2024 09:53:23 +0200 Subject: [PATCH 4/8] svm: add CHANGELOG entry --- substratevm/CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/substratevm/CHANGELOG.md b/substratevm/CHANGELOG.md index b6e224c58fa6..e672b290ebeb 100644 --- a/substratevm/CHANGELOG.md +++ b/substratevm/CHANGELOG.md @@ -33,6 +33,7 @@ This changelog summarizes major changes to GraalVM Native Image. * (GR-52844) Add `-Os`, a new optimization mode to configure the optimizer in a way to get the smallest code size. * (GR-49770) Add support for glob patterns in resource-config files in addition to regexp. The Tracing agent now prints entries in the glob format. * (GR-46386) Throw missing registration errors for JNI queries when the query was not included in the reachability metadata. +* (GR-51479) Implement cgroup support in native code. See the [README](src/com.oracle.svm.native.libcontainer/README.md) and the [PR description](https://github.com/oracle/graal/pull/8989). ## GraalVM for JDK 22 (Internal Version 24.0.0) * (GR-48304) Red Hat added support for the JFR event ThreadAllocationStatistics. From 41d09ac39e74bb50268fb0873f1a33b906577fa6 Mon Sep 17 00:00:00 2001 From: Josef Eisl Date: Tue, 4 Jun 2024 12:42:59 +0200 Subject: [PATCH 5/8] svm/libcontainer: move svm specific files to src/svm --- substratevm/mx.substratevm/suite.py | 4 ++-- .../src/com.oracle.svm.native.libcontainer/ninja.template | 2 +- .../src/{hotspot => }/svm/share/logging/log.hpp | 0 .../src/{hotspot => }/svm/share/memory/allocation.cpp | 0 .../src/{hotspot => }/svm/share/runtime/globals.hpp | 0 .../src/{hotspot => }/svm/share/utilities/debug.cpp | 0 .../src/{hotspot => }/svm/share/utilities/debug.hpp | 0 .../src/{hotspot => }/svm/svm_container.cpp | 0 .../src/{hotspot => }/svm/svm_container.hpp | 0 9 files changed, 3 insertions(+), 3 deletions(-) rename substratevm/src/com.oracle.svm.native.libcontainer/src/{hotspot => }/svm/share/logging/log.hpp (100%) rename substratevm/src/com.oracle.svm.native.libcontainer/src/{hotspot => }/svm/share/memory/allocation.cpp (100%) rename substratevm/src/com.oracle.svm.native.libcontainer/src/{hotspot => }/svm/share/runtime/globals.hpp (100%) rename substratevm/src/com.oracle.svm.native.libcontainer/src/{hotspot => }/svm/share/utilities/debug.cpp (100%) rename substratevm/src/com.oracle.svm.native.libcontainer/src/{hotspot => }/svm/share/utilities/debug.hpp (100%) rename substratevm/src/com.oracle.svm.native.libcontainer/src/{hotspot => }/svm/svm_container.cpp (100%) rename substratevm/src/com.oracle.svm.native.libcontainer/src/{hotspot => }/svm/svm_container.hpp (100%) diff --git a/substratevm/mx.substratevm/suite.py b/substratevm/mx.substratevm/suite.py index fbf8dc72dfba..325641ac4acf 100644 --- a/substratevm/mx.substratevm/suite.py +++ b/substratevm/mx.substratevm/suite.py @@ -870,11 +870,11 @@ # include dirs "-I/src/hotspot", "-I/src/hotspot/share", - "-I/src/hotspot/svm", - "-I/src/hotspot/svm/share", "-I/src/hotspot/os/linux", "-I/src/hotspot/os/posix", "-I/src/hotspot/os/posix/include", + "-I/src/svm", + "-I/src/svm/share", # HotSpot standard flags # See https://github.com/openjdk/jdk/blob/master/make/autoconf/flags-cflags.m4 # C++ standard diff --git a/substratevm/src/com.oracle.svm.native.libcontainer/ninja.template b/substratevm/src/com.oracle.svm.native.libcontainer/ninja.template index 386a984d6437..94bef867e66a 100644 --- a/substratevm/src/com.oracle.svm.native.libcontainer/ninja.template +++ b/substratevm/src/com.oracle.svm.native.libcontainer/ninja.template @@ -25,7 +25,7 @@ root = . builddir = build -includes = -Isrc/hotspot -Isrc/hotspot/share -Isrc/hotspot/svm -Isrc/hotspot/svm/share +includes = -Isrc/hotspot -Isrc/hotspot/share -Isrc/svm -Isrc/svm/share includes_linux = $includes -Isrc/hotspot/os/linux -Isrc/hotspot/os/posix -Isrc/hotspot/os/posix/include defines_linux = -DNATIVE_IMAGE -DLINUX -DINCLUDE_SUFFIX_COMPILER=_gcc -D__STDC_FORMAT_MACROS -D__STDC_LIMIT_MACROS -D__STDC_CONSTANT_MACROS diff --git a/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/svm/share/logging/log.hpp b/substratevm/src/com.oracle.svm.native.libcontainer/src/svm/share/logging/log.hpp similarity index 100% rename from substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/svm/share/logging/log.hpp rename to substratevm/src/com.oracle.svm.native.libcontainer/src/svm/share/logging/log.hpp diff --git a/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/svm/share/memory/allocation.cpp b/substratevm/src/com.oracle.svm.native.libcontainer/src/svm/share/memory/allocation.cpp similarity index 100% rename from substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/svm/share/memory/allocation.cpp rename to substratevm/src/com.oracle.svm.native.libcontainer/src/svm/share/memory/allocation.cpp diff --git a/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/svm/share/runtime/globals.hpp b/substratevm/src/com.oracle.svm.native.libcontainer/src/svm/share/runtime/globals.hpp similarity index 100% rename from substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/svm/share/runtime/globals.hpp rename to substratevm/src/com.oracle.svm.native.libcontainer/src/svm/share/runtime/globals.hpp diff --git a/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/svm/share/utilities/debug.cpp b/substratevm/src/com.oracle.svm.native.libcontainer/src/svm/share/utilities/debug.cpp similarity index 100% rename from substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/svm/share/utilities/debug.cpp rename to substratevm/src/com.oracle.svm.native.libcontainer/src/svm/share/utilities/debug.cpp diff --git a/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/svm/share/utilities/debug.hpp b/substratevm/src/com.oracle.svm.native.libcontainer/src/svm/share/utilities/debug.hpp similarity index 100% rename from substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/svm/share/utilities/debug.hpp rename to substratevm/src/com.oracle.svm.native.libcontainer/src/svm/share/utilities/debug.hpp diff --git a/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/svm/svm_container.cpp b/substratevm/src/com.oracle.svm.native.libcontainer/src/svm/svm_container.cpp similarity index 100% rename from substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/svm/svm_container.cpp rename to substratevm/src/com.oracle.svm.native.libcontainer/src/svm/svm_container.cpp diff --git a/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/svm/svm_container.hpp b/substratevm/src/com.oracle.svm.native.libcontainer/src/svm/svm_container.hpp similarity index 100% rename from substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/svm/svm_container.hpp rename to substratevm/src/com.oracle.svm.native.libcontainer/src/svm/svm_container.hpp From b22fe4578cfa317c63a81d57bcc37a5c06644beb Mon Sep 17 00:00:00 2001 From: Josef Eisl Date: Tue, 4 Jun 2024 12:45:48 +0200 Subject: [PATCH 6/8] svm/libcontainer: move jni*.h into the right directories --- substratevm/mx.substratevm/suite.py | 2 ++ .../src/com.oracle.svm.native.libcontainer/ninja.template | 4 ++-- .../src/{hotspot => java.base/share/native/include}/jni.h | 0 .../src/{hotspot => java.base/unix/native/include}/jni_md.h | 0 4 files changed, 4 insertions(+), 2 deletions(-) rename substratevm/src/com.oracle.svm.native.libcontainer/src/{hotspot => java.base/share/native/include}/jni.h (100%) rename substratevm/src/com.oracle.svm.native.libcontainer/src/{hotspot => java.base/unix/native/include}/jni_md.h (100%) diff --git a/substratevm/mx.substratevm/suite.py b/substratevm/mx.substratevm/suite.py index 325641ac4acf..16547ae644d4 100644 --- a/substratevm/mx.substratevm/suite.py +++ b/substratevm/mx.substratevm/suite.py @@ -868,6 +868,8 @@ # e.g., '__cxa_pure_virtual'. -O1 or higher avoids the problem. # "-DASSERT", "-DPRINT_WARNINGS", "-g", "-O1", "-DLOG_LEVEL=6", # include dirs + "-I/src/java.base/share/native/include", + "-I/src/java.base/unix/native/include", "-I/src/hotspot", "-I/src/hotspot/share", "-I/src/hotspot/os/linux", diff --git a/substratevm/src/com.oracle.svm.native.libcontainer/ninja.template b/substratevm/src/com.oracle.svm.native.libcontainer/ninja.template index 94bef867e66a..dd8eda4558cd 100644 --- a/substratevm/src/com.oracle.svm.native.libcontainer/ninja.template +++ b/substratevm/src/com.oracle.svm.native.libcontainer/ninja.template @@ -25,8 +25,8 @@ root = . builddir = build -includes = -Isrc/hotspot -Isrc/hotspot/share -Isrc/svm -Isrc/svm/share -includes_linux = $includes -Isrc/hotspot/os/linux -Isrc/hotspot/os/posix -Isrc/hotspot/os/posix/include +includes = -Isrc/hotspot -Isrc/hotspot/share -Isrc/svm -Isrc/svm/share -Isrc/java.base/share/native/include +includes_linux = $includes -Isrc/hotspot/os/linux -Isrc/hotspot/os/posix -Isrc/hotspot/os/posix/include -Isrc/java.base/unix/native/include defines_linux = -DNATIVE_IMAGE -DLINUX -DINCLUDE_SUFFIX_COMPILER=_gcc -D__STDC_FORMAT_MACROS -D__STDC_LIMIT_MACROS -D__STDC_CONSTANT_MACROS cflags_linux = $defines_linux -std=c++14 $includes_linux -fno-rtti -fno-exceptions -fvisibility=hidden -fPIC diff --git a/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/jni.h b/substratevm/src/com.oracle.svm.native.libcontainer/src/java.base/share/native/include/jni.h similarity index 100% rename from substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/jni.h rename to substratevm/src/com.oracle.svm.native.libcontainer/src/java.base/share/native/include/jni.h diff --git a/substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/jni_md.h b/substratevm/src/com.oracle.svm.native.libcontainer/src/java.base/unix/native/include/jni_md.h similarity index 100% rename from substratevm/src/com.oracle.svm.native.libcontainer/src/hotspot/jni_md.h rename to substratevm/src/com.oracle.svm.native.libcontainer/src/java.base/unix/native/include/jni_md.h From 434867d574614244c5600195777161ad1a959e0f Mon Sep 17 00:00:00 2001 From: Josef Eisl Date: Wed, 3 Jul 2024 09:44:34 +0200 Subject: [PATCH 7/8] svm/libcontainer: improve Javadoc --- .../svm/core/container/ContainerLibrary.java | 14 +++++--------- .../svm/core/jfr/Target_jdk_jfr_internal_JVM.java | 10 ++++++---- 2 files changed, 11 insertions(+), 13 deletions(-) diff --git a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/container/ContainerLibrary.java b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/container/ContainerLibrary.java index b08c0b42582a..13733f37d2e7 100644 --- a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/container/ContainerLibrary.java +++ b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/container/ContainerLibrary.java @@ -88,16 +88,12 @@ class ContainerLibrary { @Platforms(Platform.HOSTED_ONLY.class) class ContainerLibraryDirectives implements CContext.Directives { /** - * True if the {@link ContainerLibrary} should be linked or not. + * True if {@link ContainerLibrary} should be linked. * - * Note that although this method returns {@code true} only if - * {@linkplain SubstrateOptions#useSerialGC() serial GC} or - * {@linkplain SubstrateOptions#useEpsilonGC() epsilon GC} is enabled, the {@link CFunction}s - * defined in {@link ContainerLibrary} are always registered and can be called even if this - * method returns {@code false}. Other GCs can provide alternative implementations themselves, - * or manually link against the native {@code - * svm_container} library, e.g., by calling - * {@code com.oracle.svm.hosted.c.NativeLibraries#addStaticJniLibrary}. + * Note that although this method returns {@code true} only for certain GCs, the + * {@link CFunction}s defined in {@link ContainerLibrary} are always registered and can be + * called even if this method returns {@code false}, as other GCs provide alternative + * implementations themselves. */ @Override public boolean isInConfiguration() { diff --git a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/jfr/Target_jdk_jfr_internal_JVM.java b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/jfr/Target_jdk_jfr_internal_JVM.java index eae3988f906a..065820dd959b 100644 --- a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/jfr/Target_jdk_jfr_internal_JVM.java +++ b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/jfr/Target_jdk_jfr_internal_JVM.java @@ -532,13 +532,15 @@ public static boolean isContainerized() { return Container.singleton().isContainerized(); } + /** + * Return the total memory available on the host. + * + * This is unconditionally using {@link OperatingSystem#getPhysicalMemorySize} since we are + * interested in the host values (and not the containerized values). + */ @Substitute @TargetElement(onlyWith = JDKLatest.class) // public static long hostTotalMemory() { - /* - * This is unconditionally using Machine#getPhysicalMemorySize since we are interested in - * the host values (and not the containerized values). - */ return OperatingSystem.singleton().getPhysicalMemorySize().rawValue(); } From 1bea0be4cc32eb90b3daf2de4d141c91bdf7f690 Mon Sep 17 00:00:00 2001 From: Josef Eisl Date: Wed, 3 Jul 2024 10:05:22 +0200 Subject: [PATCH 8/8] svm/libcontainer: replace isContainerized(boolean) with isInitialized() and isContainerized() --- .../oracle/svm/core/SubstrateDiagnostics.java | 10 ++++--- .../oracle/svm/core/container/Container.java | 27 +++++++++++-------- 2 files changed, 23 insertions(+), 14 deletions(-) diff --git a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/SubstrateDiagnostics.java b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/SubstrateDiagnostics.java index a416839fac73..403d189e20cb 100644 --- a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/SubstrateDiagnostics.java +++ b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/SubstrateDiagnostics.java @@ -30,7 +30,6 @@ import java.util.ArrayList; import java.util.Arrays; -import com.oracle.svm.core.container.OperatingSystem; import org.graalvm.collections.EconomicMap; import org.graalvm.nativeimage.CurrentIsolate; import org.graalvm.nativeimage.ImageSingletons; @@ -57,6 +56,7 @@ import com.oracle.svm.core.code.RuntimeCodeInfoMemory; import com.oracle.svm.core.config.ConfigurationValues; import com.oracle.svm.core.container.Container; +import com.oracle.svm.core.container.OperatingSystem; import com.oracle.svm.core.deopt.DeoptimizationSupport; import com.oracle.svm.core.deopt.Deoptimizer; import com.oracle.svm.core.feature.AutomaticallyRegisteredImageSingleton; @@ -508,7 +508,11 @@ private static boolean pointsIntoNativeImageCode(CodePointer possibleIp) { private static boolean isContainerized() { boolean allowInit = !SubstrateOptions.AsyncSignalSafeDiagnostics.getValue(); - return Container.singleton().isContainerized(allowInit); + if (Container.singleton().isInitialized() || allowInit) { + return Container.singleton().isContainerized(); + } + // uninitialized and initialization not allowed + return false; } public static class FatalErrorState { @@ -860,7 +864,7 @@ public void printDiagnostics(Log log, ErrorContext context, int maxDiagnosticLev Platform platform = ImageSingletons.lookup(Platform.class); log.string("Platform: ").string(platform.getOS()).string("/").string(platform.getArchitecture()).newline(); log.string("Page size: ").unsigned(SubstrateOptions.getPageSize()).newline(); - log.string("Containerized: ").bool(isContainerized()).newline(); + log.string("Containerized: ").string(Container.singleton().isInitialized() ? String.valueOf(isContainerized()) : "unknown").newline(); log.string("CPU features used for AOT compiled code: ").string(getBuildTimeCpuFeatures()).newline(); log.indent(false); } diff --git a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/container/Container.java b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/container/Container.java index 15d2b0c74f96..050af828e479 100644 --- a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/container/Container.java +++ b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/container/Container.java @@ -74,22 +74,27 @@ public static Container singleton() { } @Uninterruptible(reason = CALLED_FROM_UNINTERRUPTIBLE_CODE, mayBeInlined = true) - public boolean isContainerized() { - return isContainerized(true); + public boolean isInitialized() { + return STATE.get().readWord(0) != State.UNINITIALIZED; } + /** + * Determines whether the image runs containerized, potentially initializing container support + * if not yet initialized. If initialization is not desired, calls to this method must be + * guarded by {@link #isInitialized()}. + */ @Uninterruptible(reason = CALLED_FROM_UNINTERRUPTIBLE_CODE, mayBeInlined = true) - public boolean isContainerized(boolean allowInit) { + public boolean isContainerized() { if (!isSupported()) { return false; } UnsignedWord value = STATE.get().readWord(0); - if (allowInit && value == State.UNINITIALIZED) { + if (value == State.UNINITIALIZED) { value = initialize(); } - assert value == State.CONTAINERIZED || value == State.NOT_CONTAINERIZED || !allowInit; + assert value == State.CONTAINERIZED || value == State.NOT_CONTAINERIZED; return value == State.CONTAINERIZED; } @@ -126,7 +131,7 @@ private static UnsignedWord initialize() { @Uninterruptible(reason = CALLED_FROM_UNINTERRUPTIBLE_CODE, mayBeInlined = true) public int getActiveProcessorCount() { - VMError.guarantee(isContainerized(false)); + VMError.guarantee(isInitialized() && isContainerized()); long currentMs = System.currentTimeMillis(); if (currentMs > activeProcessorCountTimeoutMs) { @@ -138,13 +143,13 @@ public int getActiveProcessorCount() { @Uninterruptible(reason = CALLED_FROM_UNINTERRUPTIBLE_CODE, mayBeInlined = true) public int getCachedActiveProcessorCount() { - VMError.guarantee(isContainerized(false)); + VMError.guarantee(isInitialized() && isContainerized()); return cachedActiveProcessorCount; } @Uninterruptible(reason = CALLED_FROM_UNINTERRUPTIBLE_CODE, mayBeInlined = true) public UnsignedWord getPhysicalMemory() { - VMError.guarantee(isContainerized(false)); + VMError.guarantee(isInitialized() && isContainerized()); long currentMs = System.currentTimeMillis(); if (currentMs > physicalMemoryTimeoutMs) { @@ -156,13 +161,13 @@ public UnsignedWord getPhysicalMemory() { @Uninterruptible(reason = CALLED_FROM_UNINTERRUPTIBLE_CODE, mayBeInlined = true) public UnsignedWord getCachedPhysicalMemory() { - VMError.guarantee(isContainerized(false)); + VMError.guarantee(isInitialized() && isContainerized()); return cachedPhysicalMemorySize; } @Uninterruptible(reason = CALLED_FROM_UNINTERRUPTIBLE_CODE, mayBeInlined = true) public long getMemoryLimitInBytes() { - VMError.guarantee(isContainerized(false)); + VMError.guarantee(isInitialized() && isContainerized()); long currentMs = System.currentTimeMillis(); if (currentMs > memoryLimitInBytesTimeoutMs) { @@ -174,7 +179,7 @@ public long getMemoryLimitInBytes() { @Uninterruptible(reason = CALLED_FROM_UNINTERRUPTIBLE_CODE, mayBeInlined = true) public long getCachedMemoryLimitInBytes() { - VMError.guarantee(isContainerized(false)); + VMError.guarantee(isInitialized() && isContainerized()); return cachedMemoryLimitInBytes; }