diff --git a/make/Images.gmk b/make/Images.gmk index bfad1ad563c9b..bb82d1334d0e9 100644 --- a/make/Images.gmk +++ b/make/Images.gmk @@ -132,10 +132,16 @@ CDS_DUMP_FLAGS = -Xmx128M -Xms128M # Helper function for creating the CDS archives for the JDK and JRE # # Param1 - VM variant (e.g., server, client, zero, ...) -# Param2 - _nocoops, or empty +# Param2 - _nocoops, _coh, _nocoops_coh, or empty define CreateCDSArchive - $1_$2_DUMP_EXTRA_ARG := $(if $(filter _nocoops, $2),-XX:-UseCompressedOops,) - $1_$2_DUMP_TYPE := $(if $(filter _nocoops, $2),-NOCOOPS,) + $1_$2_COOPS_OPTION := $(if $(findstring _nocoops, $2),-XX:-UseCompressedOops) + # enable and also explicitly disable coh as needed. + ifeq ($(call isTargetCpuBits, 64), true) + $1_$2_COH_OPTION := -XX:+UnlockExperimentalVMOptions \ + $(if $(findstring _coh, $2),-XX:+UseCompactObjectHeaders,-XX:-UseCompactObjectHeaders) + endif + $1_$2_DUMP_EXTRA_ARG := $$($1_$2_COOPS_OPTION) $$($1_$2_COH_OPTION) + $1_$2_DUMP_TYPE := $(if $(findstring _nocoops, $2),-NOCOOPS,)$(if $(findstring _coh, $2),-COH,) # Only G1 supports dumping the shared heap, so explicitly use G1 if the JVM supports it. $1_$2_CDS_DUMP_FLAGS := $(CDS_DUMP_FLAGS) $(if $(filter g1gc, $(JVM_FEATURES_$1)),-XX:+UseG1GC) @@ -190,6 +196,14 @@ ifeq ($(BUILD_CDS_ARCHIVE), true) $(foreach v, $(JVM_VARIANTS), \ $(eval $(call CreateCDSArchive,$v,_nocoops)) \ ) + ifeq ($(BUILD_CDS_ARCHIVE_COH), true) + $(foreach v, $(JVM_VARIANTS), \ + $(eval $(call CreateCDSArchive,$v,_coh)) \ + ) + $(foreach v, $(JVM_VARIANTS), \ + $(eval $(call CreateCDSArchive,$v,_nocoops_coh)) \ + ) + endif endif endif diff --git a/make/autoconf/configure.ac b/make/autoconf/configure.ac index f7e9844a64301..0674162b24f6e 100644 --- a/make/autoconf/configure.ac +++ b/make/autoconf/configure.ac @@ -1,5 +1,5 @@ # -# Copyright (c) 2011, 2023, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2011, 2024, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -260,6 +260,7 @@ JDKOPT_ENABLE_DISABLE_GENERATE_CLASSLIST JDKOPT_EXCLUDE_TRANSLATIONS JDKOPT_ENABLE_DISABLE_MANPAGES JDKOPT_ENABLE_DISABLE_CDS_ARCHIVE +JDKOPT_ENABLE_DISABLE_CDS_ARCHIVE_COH JDKOPT_ENABLE_DISABLE_COMPATIBLE_CDS_ALIGNMENT JDKOPT_SETUP_MACOSX_SIGNING diff --git a/make/autoconf/jdk-options.m4 b/make/autoconf/jdk-options.m4 index 76e95127f7393..19a6e062c7a35 100644 --- a/make/autoconf/jdk-options.m4 +++ b/make/autoconf/jdk-options.m4 @@ -673,6 +673,33 @@ AC_DEFUN([JDKOPT_ENABLE_DISABLE_CDS_ARCHIVE], AC_SUBST(BUILD_CDS_ARCHIVE) ]) +################################################################################ +# +# Enable or disable the default CDS archive generation for Compact Object Headers +# +AC_DEFUN([JDKOPT_ENABLE_DISABLE_CDS_ARCHIVE_COH], +[ + UTIL_ARG_ENABLE(NAME: cds-archive-coh, DEFAULT: auto, RESULT: BUILD_CDS_ARCHIVE_COH, + DESC: [enable generation of default CDS archives for compact object headers (requires --enable-cds-archive)], + DEFAULT_DESC: [auto], + CHECKING_MSG: [if default CDS archives for compact object headers should be generated], + CHECK_AVAILABLE: [ + AC_MSG_CHECKING([if CDS archive with compact object headers is available]) + if test "x$BUILD_CDS_ARCHIVE" = "xfalse"; then + AC_MSG_RESULT([no (CDS default archive generation is disabled)]) + AVAILABLE=false + elif test "x$OPENJDK_TARGET_CPU" != "xx86_64" && + test "x$OPENJDK_TARGET_CPU" != "xaarch64"; then + AC_MSG_RESULT([no (compact object headers not supported for this platform)]) + AVAILABLE=false + else + AC_MSG_RESULT([yes]) + AVAILABLE=true + fi + ]) + AC_SUBST(BUILD_CDS_ARCHIVE_COH) +]) + ################################################################################ # # Enable the alternative CDS core region alignment diff --git a/make/autoconf/spec.gmk.template b/make/autoconf/spec.gmk.template index 79a541dc78cd5..9d7bac7aa8fe1 100644 --- a/make/autoconf/spec.gmk.template +++ b/make/autoconf/spec.gmk.template @@ -370,6 +370,7 @@ EXCLUDE_TRANSLATIONS := @EXCLUDE_TRANSLATIONS@ BUILD_MANPAGES := @BUILD_MANPAGES@ BUILD_CDS_ARCHIVE := @BUILD_CDS_ARCHIVE@ +BUILD_CDS_ARCHIVE_COH := @BUILD_CDS_ARCHIVE_COH@ ENABLE_COMPATIBLE_CDS_ALIGNMENT := @ENABLE_COMPATIBLE_CDS_ALIGNMENT@ diff --git a/src/hotspot/cpu/aarch64/aarch64.ad b/src/hotspot/cpu/aarch64/aarch64.ad index 8eb2821cc5744..b96f813cb36aa 100644 --- a/src/hotspot/cpu/aarch64/aarch64.ad +++ b/src/hotspot/cpu/aarch64/aarch64.ad @@ -6438,7 +6438,7 @@ instruct loadKlass(iRegPNoSp dst, memory mem) instruct loadNKlass(iRegNNoSp dst, memory mem) %{ match(Set dst (LoadNKlass mem)); - predicate(!needs_acquiring_load(n)); + predicate(!needs_acquiring_load(n) && !UseCompactObjectHeaders); ins_cost(4 * INSN_COST); format %{ "ldrw $dst, $mem\t# compressed class ptr" %} @@ -6448,6 +6448,20 @@ instruct loadNKlass(iRegNNoSp dst, memory mem) ins_pipe(iload_reg_mem); %} +instruct loadNKlassCompactHeaders(iRegNNoSp dst, memory mem, rFlagsReg cr) +%{ + match(Set dst (LoadNKlass mem)); + effect(KILL cr); + predicate(!needs_acquiring_load(n) && UseCompactObjectHeaders); + + ins_cost(4 * INSN_COST); + format %{ "ldrw $dst, $mem\t# compressed class ptr" %} + ins_encode %{ + __ load_nklass_compact($dst$$Register, $mem$$base$$Register, $mem$$index$$Register, $mem$$scale, $mem$$disp); + %} + ins_pipe(pipe_slow); +%} + // Load Float instruct loadF(vRegF dst, memory mem) %{ diff --git a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp index 91430be5835b5..cbcb8da89fc47 100644 --- a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp @@ -2246,8 +2246,6 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes()); Address dst_length_addr = Address(dst, arrayOopDesc::length_offset_in_bytes()); - Address src_klass_addr = Address(src, oopDesc::klass_offset_in_bytes()); - Address dst_klass_addr = Address(dst, oopDesc::klass_offset_in_bytes()); // test for null if (flags & LIR_OpArrayCopy::src_null_check) { @@ -2308,15 +2306,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { // We don't know the array types are compatible if (basic_type != T_OBJECT) { // Simple test for basic type arrays - if (UseCompressedClassPointers) { - __ ldrw(tmp, src_klass_addr); - __ ldrw(rscratch1, dst_klass_addr); - __ cmpw(tmp, rscratch1); - } else { - __ ldr(tmp, src_klass_addr); - __ ldr(rscratch1, dst_klass_addr); - __ cmp(tmp, rscratch1); - } + __ cmp_klass(src, dst, tmp, rscratch1); __ br(Assembler::NE, *stub->entry()); } else { // For object arrays, if src is a sub class of dst then we can @@ -2438,36 +2428,14 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { // but not necessarily exactly of type default_type. Label known_ok, halt; __ mov_metadata(tmp, default_type->constant_encoding()); - if (UseCompressedClassPointers) { - __ encode_klass_not_null(tmp); - } if (basic_type != T_OBJECT) { - - if (UseCompressedClassPointers) { - __ ldrw(rscratch1, dst_klass_addr); - __ cmpw(tmp, rscratch1); - } else { - __ ldr(rscratch1, dst_klass_addr); - __ cmp(tmp, rscratch1); - } + __ cmp_klass(dst, tmp, rscratch1); __ br(Assembler::NE, halt); - if (UseCompressedClassPointers) { - __ ldrw(rscratch1, src_klass_addr); - __ cmpw(tmp, rscratch1); - } else { - __ ldr(rscratch1, src_klass_addr); - __ cmp(tmp, rscratch1); - } + __ cmp_klass(src, tmp, rscratch1); __ br(Assembler::EQ, known_ok); } else { - if (UseCompressedClassPointers) { - __ ldrw(rscratch1, dst_klass_addr); - __ cmpw(tmp, rscratch1); - } else { - __ ldr(rscratch1, dst_klass_addr); - __ cmp(tmp, rscratch1); - } + __ cmp_klass(dst, tmp, rscratch1); __ br(Assembler::EQ, known_ok); __ cmp(src, dst); __ br(Assembler::EQ, known_ok); @@ -2551,7 +2519,12 @@ void LIR_Assembler::emit_load_klass(LIR_OpLoadKlass* op) { } if (UseCompressedClassPointers) { - __ ldrw(result, Address (obj, oopDesc::klass_offset_in_bytes())); + if (UseCompactObjectHeaders) { + __ ldr(result, Address(obj, oopDesc::mark_offset_in_bytes())); + __ lsr(result, result, markWord::klass_shift); + } else { + __ ldrw(result, Address (obj, oopDesc::klass_offset_in_bytes())); + } __ decode_klass_not_null(result); } else { __ ldr(result, Address (obj, oopDesc::klass_offset_in_bytes())); diff --git a/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.cpp index 89624aeffdd04..cd3d544126520 100644 --- a/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.cpp @@ -175,15 +175,19 @@ void C1_MacroAssembler::try_allocate(Register obj, Register var_size_in_bytes, i void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register len, Register t1, Register t2) { assert_different_registers(obj, klass, len); - // This assumes that all prototype bits fit in an int32_t - mov(t1, (int32_t)(intptr_t)markWord::prototype().value()); - str(t1, Address(obj, oopDesc::mark_offset_in_bytes())); - if (UseCompressedClassPointers) { // Take care not to kill klass - encode_klass_not_null(t1, klass); - strw(t1, Address(obj, oopDesc::klass_offset_in_bytes())); + if (UseCompactObjectHeaders) { + ldr(t1, Address(klass, Klass::prototype_header_offset())); + str(t1, Address(obj, oopDesc::mark_offset_in_bytes())); } else { - str(klass, Address(obj, oopDesc::klass_offset_in_bytes())); + mov(t1, checked_cast(markWord::prototype().value())); + str(t1, Address(obj, oopDesc::mark_offset_in_bytes())); + if (UseCompressedClassPointers) { // Take care not to kill klass + encode_klass_not_null(t1, klass); + strw(t1, Address(obj, oopDesc::klass_offset_in_bytes())); + } else { + str(klass, Address(obj, oopDesc::klass_offset_in_bytes())); + } } if (len->is_valid()) { @@ -194,7 +198,7 @@ void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register // Clear gap/first 4 bytes following the length field. strw(zr, Address(obj, base_offset)); } - } else if (UseCompressedClassPointers) { + } else if (UseCompressedClassPointers && !UseCompactObjectHeaders) { store_klass_gap(obj, zr); } } diff --git a/src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.cpp index 19af03d348806..5377506609b7f 100644 --- a/src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.cpp @@ -2557,3 +2557,22 @@ bool C2_MacroAssembler::in_scratch_emit_size() { } return MacroAssembler::in_scratch_emit_size(); } + +void C2_MacroAssembler::load_nklass_compact(Register dst, Register obj, Register index, int scale, int disp) { + // Note: Don't clobber obj anywhere in that method! + + // The incoming address is pointing into obj-start + klass_offset_in_bytes. We need to extract + // obj-start, so that we can load from the object's mark-word instead. Usually the address + // comes as obj-start in obj and klass_offset_in_bytes in disp. However, sometimes C2 + // emits code that pre-computes obj-start + klass_offset_in_bytes into a register, and + // then passes that register as obj and 0 in disp. The following code extracts the base + // and offset to load the mark-word. + int offset = oopDesc::mark_offset_in_bytes() + disp - oopDesc::klass_offset_in_bytes(); + if (index == noreg) { + ldr(dst, Address(obj, offset)); + } else { + lea(dst, Address(obj, index, Address::lsl(scale))); + ldr(dst, Address(dst, offset)); + } + lsr(dst, dst, markWord::klass_shift); +} diff --git a/src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.hpp b/src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.hpp index 43e60ae5a48f8..bd7b2f6cab9d9 100644 --- a/src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.hpp +++ b/src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.hpp @@ -179,4 +179,6 @@ void vector_signum_sve(FloatRegister dst, FloatRegister src, FloatRegister zero, FloatRegister one, FloatRegister vtmp, PRegister pgtmp, SIMD_RegVariant T); + void load_nklass_compact(Register dst, Register obj, Register index, int scale, int disp); + #endif // CPU_AARCH64_C2_MACROASSEMBLER_AARCH64_HPP diff --git a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp index f8b703fb4daf6..30781e15777f6 100644 --- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp @@ -4838,8 +4838,19 @@ void MacroAssembler::load_method_holder(Register holder, Register method) { ldr(holder, Address(holder, ConstantPool::pool_holder_offset())); // InstanceKlass* } +// Loads the obj's Klass* into dst. +// Preserves all registers (incl src, rscratch1 and rscratch2). +void MacroAssembler::load_nklass_compact(Register dst, Register src) { + assert(UseCompactObjectHeaders, "expects UseCompactObjectHeaders"); + ldr(dst, Address(src, oopDesc::mark_offset_in_bytes())); + lsr(dst, dst, markWord::klass_shift); +} + void MacroAssembler::load_klass(Register dst, Register src) { - if (UseCompressedClassPointers) { + if (UseCompactObjectHeaders) { + load_nklass_compact(dst, src); + decode_klass_not_null(dst); + } else if (UseCompressedClassPointers) { ldrw(dst, Address(src, oopDesc::klass_offset_in_bytes())); decode_klass_not_null(dst); } else { @@ -4895,8 +4906,13 @@ void MacroAssembler::load_mirror(Register dst, Register method, Register tmp1, R } void MacroAssembler::cmp_klass(Register oop, Register trial_klass, Register tmp) { + assert_different_registers(oop, trial_klass, tmp); if (UseCompressedClassPointers) { - ldrw(tmp, Address(oop, oopDesc::klass_offset_in_bytes())); + if (UseCompactObjectHeaders) { + load_nklass_compact(tmp, oop); + } else { + ldrw(tmp, Address(oop, oopDesc::klass_offset_in_bytes())); + } if (CompressedKlassPointers::base() == nullptr) { cmp(trial_klass, tmp, LSL, CompressedKlassPointers::shift()); return; @@ -4913,9 +4929,26 @@ void MacroAssembler::cmp_klass(Register oop, Register trial_klass, Register tmp) cmp(trial_klass, tmp); } +void MacroAssembler::cmp_klass(Register src, Register dst, Register tmp1, Register tmp2) { + if (UseCompactObjectHeaders) { + load_nklass_compact(tmp1, src); + load_nklass_compact(tmp2, dst); + cmpw(tmp1, tmp2); + } else if (UseCompressedClassPointers) { + ldrw(tmp1, Address(src, oopDesc::klass_offset_in_bytes())); + ldrw(tmp2, Address(dst, oopDesc::klass_offset_in_bytes())); + cmpw(tmp1, tmp2); + } else { + ldr(tmp1, Address(src, oopDesc::klass_offset_in_bytes())); + ldr(tmp2, Address(dst, oopDesc::klass_offset_in_bytes())); + cmp(tmp1, tmp2); + } +} + void MacroAssembler::store_klass(Register dst, Register src) { // FIXME: Should this be a store release? concurrent gcs assumes // klass length is valid if klass field is not null. + assert(!UseCompactObjectHeaders, "not with compact headers"); if (UseCompressedClassPointers) { encode_klass_not_null(src); strw(src, Address(dst, oopDesc::klass_offset_in_bytes())); @@ -4925,6 +4958,7 @@ void MacroAssembler::store_klass(Register dst, Register src) { } void MacroAssembler::store_klass_gap(Register dst, Register src) { + assert(!UseCompactObjectHeaders, "not with compact headers"); if (UseCompressedClassPointers) { // Store to klass gap in destination strw(src, Address(dst, oopDesc::klass_gap_offset_in_bytes())); diff --git a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp index e49f0c49ef66f..d0dd122c0b8fc 100644 --- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp +++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp @@ -875,9 +875,11 @@ class MacroAssembler: public Assembler { void load_method_holder(Register holder, Register method); // oop manipulations + void load_nklass_compact(Register dst, Register src); void load_klass(Register dst, Register src); void store_klass(Register dst, Register src); void cmp_klass(Register oop, Register trial_klass, Register tmp); + void cmp_klass(Register src, Register dst, Register tmp1, Register tmp2); void resolve_weak_handle(Register result, Register tmp1, Register tmp2); void resolve_oop_handle(Register result, Register tmp1, Register tmp2); diff --git a/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp b/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp index f7cf99381578b..825d05998cad4 100644 --- a/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp @@ -3628,12 +3628,22 @@ void TemplateTable::_new() { // The object is initialized before the header. If the object size is // zero, go directly to the header initialization. - __ sub(r3, r3, sizeof(oopDesc)); + if (UseCompactObjectHeaders) { + assert(is_aligned(oopDesc::base_offset_in_bytes(), BytesPerLong), "oop base offset must be 8-byte-aligned"); + __ sub(r3, r3, oopDesc::base_offset_in_bytes()); + } else { + __ sub(r3, r3, sizeof(oopDesc)); + } __ cbz(r3, initialize_header); // Initialize object fields { - __ add(r2, r0, sizeof(oopDesc)); + if (UseCompactObjectHeaders) { + assert(is_aligned(oopDesc::base_offset_in_bytes(), BytesPerLong), "oop base offset must be 8-byte-aligned"); + __ add(r2, r0, oopDesc::base_offset_in_bytes()); + } else { + __ add(r2, r0, sizeof(oopDesc)); + } Label loop; __ bind(loop); __ str(zr, Address(__ post(r2, BytesPerLong))); @@ -3643,10 +3653,15 @@ void TemplateTable::_new() { // initialize object header only. __ bind(initialize_header); - __ mov(rscratch1, (intptr_t)markWord::prototype().value()); - __ str(rscratch1, Address(r0, oopDesc::mark_offset_in_bytes())); - __ store_klass_gap(r0, zr); // zero klass gap for compressed oops - __ store_klass(r0, r4); // store klass last + if (UseCompactObjectHeaders) { + __ ldr(rscratch1, Address(r4, Klass::prototype_header_offset())); + __ str(rscratch1, Address(r0, oopDesc::mark_offset_in_bytes())); + } else { + __ mov(rscratch1, (intptr_t)markWord::prototype().value()); + __ str(rscratch1, Address(r0, oopDesc::mark_offset_in_bytes())); + __ store_klass_gap(r0, zr); // zero klass gap for compressed oops + __ store_klass(r0, r4); // store klass last + } if (DTraceAllocProbes) { // Trigger dtrace event for fastpath diff --git a/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp b/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp index e2fde10b98d86..d7ef87aa6b98d 100644 --- a/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp +++ b/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp @@ -3048,6 +3048,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { Register length = op->length()->as_register(); Register tmp = op->tmp()->as_register(); Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg); + Register tmp2 = UseCompactObjectHeaders ? rscratch2 : noreg; CodeStub* stub = op->stub(); int flags = op->flags(); @@ -3172,8 +3173,6 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes()); Address dst_length_addr = Address(dst, arrayOopDesc::length_offset_in_bytes()); - Address src_klass_addr = Address(src, oopDesc::klass_offset_in_bytes()); - Address dst_klass_addr = Address(dst, oopDesc::klass_offset_in_bytes()); // length and pos's are all sign extended at this point on 64bit @@ -3239,13 +3238,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { // We don't know the array types are compatible if (basic_type != T_OBJECT) { // Simple test for basic type arrays - if (UseCompressedClassPointers) { - __ movl(tmp, src_klass_addr); - __ cmpl(tmp, dst_klass_addr); - } else { - __ movptr(tmp, src_klass_addr); - __ cmpptr(tmp, dst_klass_addr); - } + __ cmp_klass(src, dst, tmp, tmp2); __ jcc(Assembler::notEqual, *stub->entry()); } else { // For object arrays, if src is a sub class of dst then we can @@ -3304,6 +3297,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { store_parameter(src, 4); #ifndef _LP64 + Address dst_klass_addr = Address(dst, oopDesc::klass_offset_in_bytes()); __ movptr(tmp, dst_klass_addr); __ movptr(tmp, Address(tmp, ObjArrayKlass::element_klass_offset())); __ push(tmp); @@ -3407,16 +3401,12 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { #endif if (basic_type != T_OBJECT) { - - if (UseCompressedClassPointers) __ cmpl(tmp, dst_klass_addr); - else __ cmpptr(tmp, dst_klass_addr); + __ cmp_klass(tmp, dst, tmp2); __ jcc(Assembler::notEqual, halt); - if (UseCompressedClassPointers) __ cmpl(tmp, src_klass_addr); - else __ cmpptr(tmp, src_klass_addr); + __ cmp_klass(tmp, src, tmp2); __ jcc(Assembler::equal, known_ok); } else { - if (UseCompressedClassPointers) __ cmpl(tmp, dst_klass_addr); - else __ cmpptr(tmp, dst_klass_addr); + __ cmp_klass(tmp, dst, tmp2); __ jcc(Assembler::equal, known_ok); __ cmpptr(src, dst); __ jcc(Assembler::equal, known_ok); @@ -3514,12 +3504,22 @@ void LIR_Assembler::emit_load_klass(LIR_OpLoadKlass* op) { } #ifdef _LP64 - if (UseCompressedClassPointers) { + if (UseCompactObjectHeaders) { + Register tmp = rscratch1; + assert_different_registers(tmp, obj); + assert_different_registers(tmp, result); + + __ movq(result, Address(obj, oopDesc::mark_offset_in_bytes())); + __ shrq(result, markWord::klass_shift); + __ decode_klass_not_null(result, tmp); + } else if (UseCompressedClassPointers) { __ movl(result, Address(obj, oopDesc::klass_offset_in_bytes())); __ decode_klass_not_null(result, rscratch1); } else #endif + { __ movptr(result, Address(obj, oopDesc::klass_offset_in_bytes())); + } } void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) { diff --git a/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp b/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp index 576592d05aa77..ef8bae2105fac 100644 --- a/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp +++ b/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp @@ -171,16 +171,20 @@ void C1_MacroAssembler::try_allocate(Register obj, Register var_size_in_bytes, i void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register len, Register t1, Register t2) { - assert_different_registers(obj, klass, len); - movptr(Address(obj, oopDesc::mark_offset_in_bytes()), checked_cast(markWord::prototype().value())); + assert_different_registers(obj, klass, len, t1, t2); #ifdef _LP64 - if (UseCompressedClassPointers) { // Take care not to kill klass + if (UseCompactObjectHeaders) { + movptr(t1, Address(klass, Klass::prototype_header_offset())); + movptr(Address(obj, oopDesc::mark_offset_in_bytes()), t1); + } else if (UseCompressedClassPointers) { // Take care not to kill klass + movptr(Address(obj, oopDesc::mark_offset_in_bytes()), checked_cast(markWord::prototype().value())); movptr(t1, klass); encode_klass_not_null(t1, rscratch1); movl(Address(obj, oopDesc::klass_offset_in_bytes()), t1); } else #endif { + movptr(Address(obj, oopDesc::mark_offset_in_bytes()), checked_cast(markWord::prototype().value())); movptr(Address(obj, oopDesc::klass_offset_in_bytes()), klass); } @@ -197,7 +201,7 @@ void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register #endif } #ifdef _LP64 - else if (UseCompressedClassPointers) { + else if (UseCompressedClassPointers && !UseCompactObjectHeaders) { xorptr(t1, t1); store_klass_gap(obj, t1); } @@ -231,7 +235,9 @@ void C1_MacroAssembler::initialize_object(Register obj, Register klass, Register assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0, "con_size_in_bytes is not multiple of alignment"); const int hdr_size_in_bytes = instanceOopDesc::header_size() * HeapWordSize; - + if (UseCompactObjectHeaders) { + assert(hdr_size_in_bytes == 8, "check object headers size"); + } initialize_header(obj, klass, noreg, t1, t2); if (!(UseTLAB && ZeroTLAB && is_tlab_allocated)) { diff --git a/src/hotspot/cpu/x86/c2_MacroAssembler_x86.cpp b/src/hotspot/cpu/x86/c2_MacroAssembler_x86.cpp index 5dbfdbc225d75..079911827d576 100644 --- a/src/hotspot/cpu/x86/c2_MacroAssembler_x86.cpp +++ b/src/hotspot/cpu/x86/c2_MacroAssembler_x86.cpp @@ -6510,3 +6510,19 @@ void C2_MacroAssembler::vector_rearrange_int_float(BasicType bt, XMMRegister dst vpermps(dst, shuffle, src, vlen_enc); } } + +#ifdef _LP64 +void C2_MacroAssembler::load_nklass_compact_c2(Register dst, Register obj, Register index, Address::ScaleFactor scale, int disp) { + // Note: Don't clobber obj anywhere in that method! + + // The incoming address is pointing into obj-start + klass_offset_in_bytes. We need to extract + // obj-start, so that we can load from the object's mark-word instead. Usually the address + // comes as obj-start in obj and klass_offset_in_bytes in disp. However, sometimes C2 + // emits code that pre-computes obj-start + klass_offset_in_bytes into a register, and + // then passes that register as obj and 0 in disp. The following code extracts the base + // and offset to load the mark-word. + int offset = oopDesc::mark_offset_in_bytes() + disp - oopDesc::klass_offset_in_bytes(); + movq(dst, Address(obj, index, scale, offset)); + shrq(dst, markWord::klass_shift); +} +#endif diff --git a/src/hotspot/cpu/x86/c2_MacroAssembler_x86.hpp b/src/hotspot/cpu/x86/c2_MacroAssembler_x86.hpp index af57546b3d143..01c015cb2a367 100644 --- a/src/hotspot/cpu/x86/c2_MacroAssembler_x86.hpp +++ b/src/hotspot/cpu/x86/c2_MacroAssembler_x86.hpp @@ -505,4 +505,6 @@ void vgather8b_offset(BasicType elem_bt, XMMRegister dst, Register base, Register idx_base, Register offset, Register rtmp, int vlen_enc); + void load_nklass_compact_c2(Register dst, Register obj, Register index, Address::ScaleFactor scale, int disp); + #endif // CPU_X86_C2_MACROASSEMBLER_X86_HPP diff --git a/src/hotspot/cpu/x86/macroAssembler_x86.cpp b/src/hotspot/cpu/x86/macroAssembler_x86.cpp index ba337751d19d1..2e6a943de5f3f 100644 --- a/src/hotspot/cpu/x86/macroAssembler_x86.cpp +++ b/src/hotspot/cpu/x86/macroAssembler_x86.cpp @@ -5673,19 +5673,33 @@ void MacroAssembler::load_method_holder(Register holder, Register method) { movptr(holder, Address(holder, ConstantPool::pool_holder_offset())); // InstanceKlass* } +#ifdef _LP64 +void MacroAssembler::load_nklass_compact(Register dst, Register src) { + assert(UseCompactObjectHeaders, "expect compact object headers"); + movq(dst, Address(src, oopDesc::mark_offset_in_bytes())); + shrq(dst, markWord::klass_shift); +} +#endif + void MacroAssembler::load_klass(Register dst, Register src, Register tmp) { assert_different_registers(src, tmp); assert_different_registers(dst, tmp); #ifdef _LP64 - if (UseCompressedClassPointers) { + if (UseCompactObjectHeaders) { + load_nklass_compact(dst, src); + decode_klass_not_null(dst, tmp); + } else if (UseCompressedClassPointers) { movl(dst, Address(src, oopDesc::klass_offset_in_bytes())); decode_klass_not_null(dst, tmp); } else #endif + { movptr(dst, Address(src, oopDesc::klass_offset_in_bytes())); + } } void MacroAssembler::store_klass(Register dst, Register src, Register tmp) { + assert(!UseCompactObjectHeaders, "not with compact headers"); assert_different_registers(src, tmp); assert_different_registers(dst, tmp); #ifdef _LP64 @@ -5697,6 +5711,39 @@ void MacroAssembler::store_klass(Register dst, Register src, Register tmp) { movptr(Address(dst, oopDesc::klass_offset_in_bytes()), src); } +void MacroAssembler::cmp_klass(Register klass, Register obj, Register tmp) { +#ifdef _LP64 + if (UseCompactObjectHeaders) { + load_nklass_compact(tmp, obj); + cmpl(klass, tmp); + } else if (UseCompressedClassPointers) { + cmpl(klass, Address(obj, oopDesc::klass_offset_in_bytes())); + } else +#endif + { + cmpptr(klass, Address(obj, oopDesc::klass_offset_in_bytes())); + } +} + +void MacroAssembler::cmp_klass(Register src, Register dst, Register tmp1, Register tmp2) { +#ifdef _LP64 + if (UseCompactObjectHeaders) { + assert(tmp2 != noreg, "need tmp2"); + assert_different_registers(src, dst, tmp1, tmp2); + load_nklass_compact(tmp1, src); + load_nklass_compact(tmp2, dst); + cmpl(tmp1, tmp2); + } else if (UseCompressedClassPointers) { + movl(tmp1, Address(src, oopDesc::klass_offset_in_bytes())); + cmpl(tmp1, Address(dst, oopDesc::klass_offset_in_bytes())); + } else +#endif + { + movptr(tmp1, Address(src, oopDesc::klass_offset_in_bytes())); + cmpptr(tmp1, Address(dst, oopDesc::klass_offset_in_bytes())); + } +} + void MacroAssembler::access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src, Register tmp1, Register thread_tmp) { BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); @@ -5744,6 +5791,7 @@ void MacroAssembler::store_heap_oop_null(Address dst) { #ifdef _LP64 void MacroAssembler::store_klass_gap(Register dst, Register src) { + assert(!UseCompactObjectHeaders, "Don't use with compact headers"); if (UseCompressedClassPointers) { // Store to klass gap in destination movl(Address(dst, oopDesc::klass_gap_offset_in_bytes()), src); diff --git a/src/hotspot/cpu/x86/macroAssembler_x86.hpp b/src/hotspot/cpu/x86/macroAssembler_x86.hpp index 594f0b95ca3e2..5ea5e0d0d24c7 100644 --- a/src/hotspot/cpu/x86/macroAssembler_x86.hpp +++ b/src/hotspot/cpu/x86/macroAssembler_x86.hpp @@ -363,9 +363,20 @@ class MacroAssembler: public Assembler { void load_method_holder(Register holder, Register method); // oop manipulations +#ifdef _LP64 + void load_nklass_compact(Register dst, Register src); +#endif void load_klass(Register dst, Register src, Register tmp); void store_klass(Register dst, Register src, Register tmp); + // Compares the Klass pointer of an object to a given Klass (which might be narrow, + // depending on UseCompressedClassPointers). + void cmp_klass(Register klass, Register dst, Register tmp); + + // Compares the Klass pointer of two objects o1 and o2. Result is in the condition flags. + // Uses tmp1 and tmp2 as temporary registers. + void cmp_klass(Register src, Register dst, Register tmp1, Register tmp2); + void access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src, Register tmp1, Register thread_tmp); void access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register val, diff --git a/src/hotspot/cpu/x86/sharedRuntime_x86.cpp b/src/hotspot/cpu/x86/sharedRuntime_x86.cpp index 78330962d1a43..6bf3f6a7b752c 100644 --- a/src/hotspot/cpu/x86/sharedRuntime_x86.cpp +++ b/src/hotspot/cpu/x86/sharedRuntime_x86.cpp @@ -78,8 +78,13 @@ void SharedRuntime::inline_check_hashcode_from_object_header(MacroAssembler* mas // Read the header and build a mask to get its hash field. // Depend on hash_mask being at most 32 bits and avoid the use of hash_mask_in_place // because it could be larger than 32 bits in a 64-bit vm. See markWord.hpp. - __ shrptr(result, markWord::hash_shift); - __ andptr(result, markWord::hash_mask); + if (UseCompactObjectHeaders) { + __ shrptr(result, markWord::hash_shift_compact); + __ andptr(result, markWord::hash_mask_compact); + } else { + __ shrptr(result, markWord::hash_shift); + __ andptr(result, markWord::hash_mask); + } #else __ andptr(result, markWord::hash_mask_in_place); #endif //_LP64 diff --git a/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp b/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp index 2bc4a0a9cba94..7800dc3959208 100644 --- a/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp +++ b/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp @@ -4004,7 +4004,7 @@ void StubGenerator::generate_compiler_stubs() { generate_chacha_stubs(); #ifdef COMPILER2 - if ((UseAVX == 2) && EnableX86ECoreOpts) { + if ((UseAVX == 2) && EnableX86ECoreOpts && !UseCompactObjectHeaders) { generate_string_indexof(StubRoutines::_string_indexof_array); } #endif diff --git a/src/hotspot/cpu/x86/templateTable_x86.cpp b/src/hotspot/cpu/x86/templateTable_x86.cpp index fc6844aedd6b2..2fd14073ea8ea 100644 --- a/src/hotspot/cpu/x86/templateTable_x86.cpp +++ b/src/hotspot/cpu/x86/templateTable_x86.cpp @@ -4084,7 +4084,12 @@ void TemplateTable::_new() { // The object is initialized before the header. If the object size is // zero, go directly to the header initialization. - __ decrement(rdx, sizeof(oopDesc)); + if (UseCompactObjectHeaders) { + assert(is_aligned(oopDesc::base_offset_in_bytes(), BytesPerLong), "oop base offset must be 8-byte-aligned"); + __ decrement(rdx, oopDesc::base_offset_in_bytes()); + } else { + __ decrement(rdx, sizeof(oopDesc)); + } __ jcc(Assembler::zero, initialize_header); // Initialize topmost object field, divide rdx by 8, check if odd and @@ -4106,22 +4111,35 @@ void TemplateTable::_new() { // initialize remaining object fields: rdx was a multiple of 8 { Label loop; __ bind(loop); - __ movptr(Address(rax, rdx, Address::times_8, sizeof(oopDesc) - 1*oopSize), rcx); - NOT_LP64(__ movptr(Address(rax, rdx, Address::times_8, sizeof(oopDesc) - 2*oopSize), rcx)); + if (UseCompactObjectHeaders) { + assert(is_aligned(oopDesc::base_offset_in_bytes(), BytesPerLong), "oop base offset must be 8-byte-aligned"); + int header_size = oopDesc::base_offset_in_bytes(); + __ movptr(Address(rax, rdx, Address::times_8, header_size - 1*oopSize), rcx); + NOT_LP64(__ movptr(Address(rax, rdx, Address::times_8, header_size - 2*oopSize), rcx)); + } else { + __ movptr(Address(rax, rdx, Address::times_8, sizeof(oopDesc) - 1*oopSize), rcx); + NOT_LP64(__ movptr(Address(rax, rdx, Address::times_8, sizeof(oopDesc) - 2*oopSize), rcx)); + } __ decrement(rdx); __ jcc(Assembler::notZero, loop); } // initialize object header only. __ bind(initialize_header); - __ movptr(Address(rax, oopDesc::mark_offset_in_bytes()), - (intptr_t)markWord::prototype().value()); // header - __ pop(rcx); // get saved klass back in the register. + if (UseCompactObjectHeaders) { + __ pop(rcx); // get saved klass back in the register. + __ movptr(rbx, Address(rcx, Klass::prototype_header_offset())); + __ movptr(Address(rax, oopDesc::mark_offset_in_bytes()), rbx); + } else { + __ movptr(Address(rax, oopDesc::mark_offset_in_bytes()), + (intptr_t)markWord::prototype().value()); // header + __ pop(rcx); // get saved klass back in the register. #ifdef _LP64 - __ xorl(rsi, rsi); // use zero reg to clear memory (shorter code) - __ store_klass_gap(rax, rsi); // zero klass gap for compressed oops + __ xorl(rsi, rsi); // use zero reg to clear memory (shorter code) + __ store_klass_gap(rax, rsi); // zero klass gap for compressed oops #endif - __ store_klass(rax, rcx, rscratch1); // klass + __ store_klass(rax, rcx, rscratch1); // klass + } if (DTraceAllocProbes) { // Trigger dtrace event for fastpath diff --git a/src/hotspot/cpu/x86/x86_64.ad b/src/hotspot/cpu/x86/x86_64.ad index 3bc4cac2f06a7..641b477d46ed6 100644 --- a/src/hotspot/cpu/x86/x86_64.ad +++ b/src/hotspot/cpu/x86/x86_64.ad @@ -4369,6 +4369,7 @@ instruct loadKlass(rRegP dst, memory mem) // Load narrow Klass Pointer instruct loadNKlass(rRegN dst, memory mem) %{ + predicate(!UseCompactObjectHeaders); match(Set dst (LoadNKlass mem)); ins_cost(125); // XXX @@ -4379,6 +4380,21 @@ instruct loadNKlass(rRegN dst, memory mem) ins_pipe(ialu_reg_mem); // XXX %} +instruct loadNKlassCompactHeaders(rRegN dst, memory mem, rFlagsReg cr) +%{ + predicate(UseCompactObjectHeaders); + match(Set dst (LoadNKlass mem)); + effect(KILL cr); + ins_cost(125); // XXX + format %{ "movl $dst, $mem\t# compressed klass ptr" %} + ins_encode %{ + Register index = $mem$$index != 4 ? $mem$$index$$Register : noreg; + Address::ScaleFactor sf = (index != noreg) ? static_cast($mem$$scale) : Address::no_scale; + __ load_nklass_compact_c2($dst$$Register, $mem$$base$$Register, index, sf, $mem$$disp); + %} + ins_pipe(pipe_slow); // XXX +%} + // Load Float instruct loadF(regF dst, memory mem) %{ @@ -11716,6 +11732,7 @@ instruct compN_rReg_imm_klass(rFlagsRegU cr, rRegN op1, immNKlass op2) %{ instruct compN_mem_imm_klass(rFlagsRegU cr, memory mem, immNKlass src) %{ + predicate(!UseCompactObjectHeaders); match(Set cr (CmpN src (LoadNKlass mem))); format %{ "cmpl $mem, $src\t# compressed klass ptr" %} diff --git a/src/hotspot/share/cds/archiveBuilder.cpp b/src/hotspot/share/cds/archiveBuilder.cpp index 76b6698a40099..9dfaf06a62b4b 100644 --- a/src/hotspot/share/cds/archiveBuilder.cpp +++ b/src/hotspot/share/cds/archiveBuilder.cpp @@ -780,6 +780,15 @@ void ArchiveBuilder::make_klasses_shareable() { const char* generated = ""; Klass* k = get_buffered_addr(klasses()->at(i)); k->remove_java_mirror(); +#ifdef _LP64 + if (UseCompactObjectHeaders) { + Klass* requested_k = to_requested(k); + address narrow_klass_base = _requested_static_archive_bottom; // runtime encoding base == runtime mapping start + const int narrow_klass_shift = precomputed_narrow_klass_shift; + narrowKlass nk = CompressedKlassPointers::encode_not_null(requested_k, narrow_klass_base, narrow_klass_shift); + k->set_prototype_header(markWord::prototype().set_narrow_klass(nk)); + } +#endif //_LP64 if (k->is_objArray_klass()) { // InstanceKlass and TypeArrayKlass will in turn call remove_unshareable_info // on their array classes. @@ -878,7 +887,7 @@ narrowKlass ArchiveBuilder::get_requested_narrow_klass(Klass* k) { k = get_buffered_klass(k); Klass* requested_k = to_requested(k); address narrow_klass_base = _requested_static_archive_bottom; // runtime encoding base == runtime mapping start - const int narrow_klass_shift = ArchiveHeapWriter::precomputed_narrow_klass_shift; + const int narrow_klass_shift = precomputed_narrow_klass_shift; return CompressedKlassPointers::encode_not_null(requested_k, narrow_klass_base, narrow_klass_shift); } #endif // INCLUDE_CDS_JAVA_HEAP diff --git a/src/hotspot/share/cds/archiveBuilder.hpp b/src/hotspot/share/cds/archiveBuilder.hpp index c17090ee53d8f..5733a16b35bf5 100644 --- a/src/hotspot/share/cds/archiveBuilder.hpp +++ b/src/hotspot/share/cds/archiveBuilder.hpp @@ -90,6 +90,20 @@ const int SharedSpaceObjectAlignment = KlassAlignmentInBytes; // buffered_address + _buffer_to_requested_delta == requested_address // class ArchiveBuilder : public StackObj { +public: + // The archive contains pre-computed narrow Klass IDs in two places: + // - in the header of archived java objects (only if the archive contains java heap portions) + // - within the prototype markword of archived Klass structures. + // These narrow Klass ids have been computed at dump time with the following scheme: + // 1) the encoding base must be the mapping start address. + // 2) shift must be large enough to result in an encoding range that covers the runtime Klass range. + // That Klass range is defined by CDS archive size and runtime class space size. Luckily, the maximum + // size can be predicted: archive size is assumed to be <1G, class space size capped at 3G, and at + // runtime we put both regions adjacent to each other. Therefore, runtime Klass range size < 4G. + // Since nKlass itself is 32 bit, our encoding range len is 4G, and since we set the base directly + // at mapping start, these 4G are enough. Therefore, we don't need to shift at all (shift=0). + static constexpr int precomputed_narrow_klass_shift = 0; + protected: DumpRegion* _current_dump_region; address _buffer_bottom; // for writing the contents of rw/ro regions diff --git a/src/hotspot/share/cds/archiveHeapWriter.cpp b/src/hotspot/share/cds/archiveHeapWriter.cpp index bf49805658c09..8d85c660c31de 100644 --- a/src/hotspot/share/cds/archiveHeapWriter.cpp +++ b/src/hotspot/share/cds/archiveHeapWriter.cpp @@ -205,8 +205,13 @@ void ArchiveHeapWriter::copy_roots_to_buffer(GrowableArrayCHeapget_requested_narrow_klass(k); + oopDesc::release_set_mark(mem, markWord::prototype().set_narrow_klass(nk)); + } else { + oopDesc::set_mark(mem, markWord::prototype()); + oopDesc::release_set_klass(mem, k); + } } { // This is copied from ObjArrayAllocator::initialize @@ -327,9 +332,13 @@ HeapWord* ArchiveHeapWriter::init_filler_array_at_buffer_top(int array_length, s Klass* oak = Universe::objectArrayKlass(); // already relocated to point to archived klass HeapWord* mem = offset_to_buffered_address(_buffer_used); memset(mem, 0, fill_bytes); - oopDesc::set_mark(mem, markWord::prototype()); narrowKlass nk = ArchiveBuilder::current()->get_requested_narrow_klass(oak); - cast_to_oop(mem)->set_narrow_klass(nk); + if (UseCompactObjectHeaders) { + oopDesc::release_set_mark(mem, markWord::prototype().set_narrow_klass(nk)); + } else { + oopDesc::set_mark(mem, markWord::prototype()); + cast_to_oop(mem)->set_narrow_klass(nk); + } arrayOopDesc::set_length(mem, array_length); return mem; } @@ -529,13 +538,21 @@ void ArchiveHeapWriter::update_header_for_requested_obj(oop requested_obj, oop s address buffered_addr = requested_addr_to_buffered_addr(cast_from_oop
(requested_obj)); oop fake_oop = cast_to_oop(buffered_addr); - fake_oop->set_narrow_klass(nk); + if (UseCompactObjectHeaders) { + fake_oop->set_mark(markWord::prototype().set_narrow_klass(nk)); + } else { + fake_oop->set_narrow_klass(nk); + } // We need to retain the identity_hash, because it may have been used by some hashtables // in the shared heap. if (src_obj != nullptr && !src_obj->fast_no_hash_check()) { intptr_t src_hash = src_obj->identity_hash(); - fake_oop->set_mark(markWord::prototype().copy_set_hash(src_hash)); + if (UseCompactObjectHeaders) { + fake_oop->set_mark(markWord::prototype().set_narrow_klass(nk).copy_set_hash(src_hash)); + } else { + fake_oop->set_mark(markWord::prototype().copy_set_hash(src_hash)); + } assert(fake_oop->mark().is_unlocked(), "sanity"); DEBUG_ONLY(intptr_t archived_hash = fake_oop->identity_hash()); diff --git a/src/hotspot/share/cds/archiveHeapWriter.hpp b/src/hotspot/share/cds/archiveHeapWriter.hpp index 352aeb9a08f7c..49879db293b3f 100644 --- a/src/hotspot/share/cds/archiveHeapWriter.hpp +++ b/src/hotspot/share/cds/archiveHeapWriter.hpp @@ -249,17 +249,6 @@ class ArchiveHeapWriter : AllStatic { static oop buffered_addr_to_source_obj(address buffered_addr); static address buffered_addr_to_requested_addr(address buffered_addr); - // Archived heap object headers carry pre-computed narrow Klass ids calculated with the - // following scheme: - // 1) the encoding base must be the mapping start address. - // 2) shift must be large enough to result in an encoding range that covers the runtime Klass range. - // That Klass range is defined by CDS archive size and runtime class space size. Luckily, the maximum - // size can be predicted: archive size is assumed to be <1G, class space size capped at 3G, and at - // runtime we put both regions adjacent to each other. Therefore, runtime Klass range size < 4G. - // Since nKlass itself is 32 bit, our encoding range len is 4G, and since we set the base directly - // at mapping start, these 4G are enough. Therefore, we don't need to shift at all (shift=0). - static constexpr int precomputed_narrow_klass_shift = 0; - }; #endif // INCLUDE_CDS_JAVA_HEAP #endif // SHARE_CDS_ARCHIVEHEAPWRITER_HPP diff --git a/src/hotspot/share/cds/cdsConfig.cpp b/src/hotspot/share/cds/cdsConfig.cpp index a0a562eca21a0..bad532bab225c 100644 --- a/src/hotspot/share/cds/cdsConfig.cpp +++ b/src/hotspot/share/cds/cdsConfig.cpp @@ -82,13 +82,20 @@ char* CDSConfig::default_archive_path() { os::jvm_path(jvm_path, sizeof(jvm_path)); char *end = strrchr(jvm_path, *os::file_separator()); if (end != nullptr) *end = '\0'; - size_t jvm_path_len = strlen(jvm_path); - size_t file_sep_len = strlen(os::file_separator()); - const size_t len = jvm_path_len + file_sep_len + 20; - _default_archive_path = NEW_C_HEAP_ARRAY(char, len, mtArguments); - jio_snprintf(_default_archive_path, len, - LP64_ONLY(!UseCompressedOops ? "%s%sclasses_nocoops.jsa":) "%s%sclasses.jsa", - jvm_path, os::file_separator()); + stringStream tmp; + tmp.print("%s%sclasses", jvm_path, os::file_separator()); +#ifdef _LP64 + if (!UseCompressedOops) { + tmp.print_raw("_nocoops"); + } + if (UseCompactObjectHeaders) { + // Note that generation of xxx_coh.jsa variants require + // --enable-cds-archive-coh at build time + tmp.print_raw("_coh"); + } +#endif + tmp.print_raw(".jsa"); + _default_archive_path = os::strdup(tmp.base()); } return _default_archive_path; } diff --git a/src/hotspot/share/cds/filemap.cpp b/src/hotspot/share/cds/filemap.cpp index dc6c7ea097c65..aaec87c065cbb 100644 --- a/src/hotspot/share/cds/filemap.cpp +++ b/src/hotspot/share/cds/filemap.cpp @@ -204,6 +204,7 @@ void FileMapHeader::populate(FileMapInfo *info, size_t core_region_alignment, _core_region_alignment = core_region_alignment; _obj_alignment = ObjectAlignmentInBytes; _compact_strings = CompactStrings; + _compact_headers = UseCompactObjectHeaders; if (CDSConfig::is_dumping_heap()) { _narrow_oop_mode = CompressedOops::mode(); _narrow_oop_base = CompressedOops::base(); @@ -271,6 +272,7 @@ void FileMapHeader::print(outputStream* st) { st->print_cr("- narrow_oop_base: " INTPTR_FORMAT, p2i(_narrow_oop_base)); st->print_cr("- narrow_oop_shift %d", _narrow_oop_shift); st->print_cr("- compact_strings: %d", _compact_strings); + st->print_cr("- compact_headers: %d", _compact_headers); st->print_cr("- max_heap_size: " UINTX_FORMAT, _max_heap_size); st->print_cr("- narrow_oop_mode: %d", _narrow_oop_mode); st->print_cr("- compressed_oops: %d", _compressed_oops); @@ -2053,11 +2055,11 @@ bool FileMapInfo::can_use_heap_region() { } // We pre-compute narrow Klass IDs with the runtime mapping start intended to be the base, and a shift of - // ArchiveHeapWriter::precomputed_narrow_klass_shift. We enforce this encoding at runtime (see + // ArchiveBuilder::precomputed_narrow_klass_shift. We enforce this encoding at runtime (see // CompressedKlassPointers::initialize_for_given_encoding()). Therefore, the following assertions must // hold: address archive_narrow_klass_base = (address)header()->mapped_base_address(); - const int archive_narrow_klass_shift = ArchiveHeapWriter::precomputed_narrow_klass_shift; + const int archive_narrow_klass_shift = ArchiveBuilder::precomputed_narrow_klass_shift; log_info(cds)("CDS archive was created with max heap size = " SIZE_FORMAT "M, and the following configuration:", max_heap_size()/M); @@ -2465,6 +2467,14 @@ bool FileMapHeader::validate() { return false; } + if (compact_headers() != UseCompactObjectHeaders) { + log_info(cds)("The shared archive file's UseCompactObjectHeaders setting (%s)" + " does not equal the current UseCompactObjectHeaders setting (%s).", + _compact_headers ? "enabled" : "disabled", + UseCompactObjectHeaders ? "enabled" : "disabled"); + return false; + } + if (!_use_optimized_module_handling) { CDSConfig::stop_using_optimized_module_handling(); log_info(cds)("optimized module handling: disabled because archive was created without optimized module handling"); diff --git a/src/hotspot/share/cds/filemap.hpp b/src/hotspot/share/cds/filemap.hpp index 7b10c16920b8f..fb25aab506ca6 100644 --- a/src/hotspot/share/cds/filemap.hpp +++ b/src/hotspot/share/cds/filemap.hpp @@ -187,6 +187,7 @@ class FileMapHeader: private CDSFileMapHeaderBase { address _narrow_oop_base; // compressed oop encoding base int _narrow_oop_shift; // compressed oop encoding shift bool _compact_strings; // value of CompactStrings + bool _compact_headers; // value of UseCompactObjectHeaders uintx _max_heap_size; // java max heap size during dumping CompressedOops::Mode _narrow_oop_mode; // compressed oop encoding mode bool _compressed_oops; // save the flag UseCompressedOops @@ -259,6 +260,7 @@ class FileMapHeader: private CDSFileMapHeaderBase { address narrow_oop_base() const { return _narrow_oop_base; } int narrow_oop_shift() const { return _narrow_oop_shift; } bool compact_strings() const { return _compact_strings; } + bool compact_headers() const { return _compact_headers; } uintx max_heap_size() const { return _max_heap_size; } CompressedOops::Mode narrow_oop_mode() const { return _narrow_oop_mode; } char* cloned_vtables() const { return from_mapped_offset(_cloned_vtables_offset); } diff --git a/src/hotspot/share/cds/metaspaceShared.cpp b/src/hotspot/share/cds/metaspaceShared.cpp index 4d978a7ad880f..07acd7c266384 100644 --- a/src/hotspot/share/cds/metaspaceShared.cpp +++ b/src/hotspot/share/cds/metaspaceShared.cpp @@ -1182,19 +1182,25 @@ MapArchiveResult MetaspaceShared::map_archives(FileMapInfo* static_mapinfo, File address cds_base = (address)static_mapinfo->mapped_base(); address ccs_end = (address)class_space_rs.end(); assert(ccs_end > cds_base, "Sanity check"); -#if INCLUDE_CDS_JAVA_HEAP - // We archived objects with pre-computed narrow Klass id. Set up encoding such that these Ids stay valid. - address precomputed_narrow_klass_base = cds_base; - const int precomputed_narrow_klass_shift = ArchiveHeapWriter::precomputed_narrow_klass_shift; - CompressedKlassPointers::initialize_for_given_encoding( - cds_base, ccs_end - cds_base, // Klass range - precomputed_narrow_klass_base, precomputed_narrow_klass_shift // precomputed encoding, see ArchiveHeapWriter - ); -#else - CompressedKlassPointers::initialize ( - cds_base, ccs_end - cds_base // Klass range + if (INCLUDE_CDS_JAVA_HEAP || UseCompactObjectHeaders) { + // The CDS archive may contain narrow Klass IDs that were precomputed at archive generation time: + // - every archived java object header (only if INCLUDE_CDS_JAVA_HEAP) + // - every archived Klass' prototype (only if +UseCompactObjectHeaders) + // + // In order for those IDs to still be valid, we need to dictate base and shift: base should be the + // mapping start, shift the shift used at archive generation time. + address precomputed_narrow_klass_base = cds_base; + const int precomputed_narrow_klass_shift = ArchiveBuilder::precomputed_narrow_klass_shift; + CompressedKlassPointers::initialize_for_given_encoding( + cds_base, ccs_end - cds_base, // Klass range + precomputed_narrow_klass_base, precomputed_narrow_klass_shift // precomputed encoding, see ArchiveBuilder ); -#endif // INCLUDE_CDS_JAVA_HEAP + } else { + // Let JVM freely chose encoding base and shift + CompressedKlassPointers::initialize ( + cds_base, ccs_end - cds_base // Klass range + ); + } // map_or_load_heap_region() compares the current narrow oop and klass encodings // with the archived ones, so it must be done after all encodings are determined. static_mapinfo->map_or_load_heap_region(); diff --git a/src/hotspot/share/ci/ciKlass.cpp b/src/hotspot/share/ci/ciKlass.cpp index 6e70d69f05d8f..82ae9d424c7b8 100644 --- a/src/hotspot/share/ci/ciKlass.cpp +++ b/src/hotspot/share/ci/ciKlass.cpp @@ -249,3 +249,23 @@ const char* ciKlass::external_name() const { return get_Klass()->external_name(); ) } + +// ------------------------------------------------------------------ +// ciKlass::prototype_header_offset +juint ciKlass::prototype_header_offset() { + assert(is_loaded(), "must be loaded"); + + VM_ENTRY_MARK; + Klass* this_klass = get_Klass(); + return in_bytes(this_klass->prototype_header_offset()); +} + +// ------------------------------------------------------------------ +// ciKlass::prototype_header +uintptr_t ciKlass::prototype_header() { + assert(is_loaded(), "must be loaded"); + + VM_ENTRY_MARK; + Klass* this_klass = get_Klass(); + return (uintptr_t)this_klass->prototype_header().to_pointer(); +} diff --git a/src/hotspot/share/ci/ciKlass.hpp b/src/hotspot/share/ci/ciKlass.hpp index 2dd5a5e2c0b7b..fa6d363ec3da5 100644 --- a/src/hotspot/share/ci/ciKlass.hpp +++ b/src/hotspot/share/ci/ciKlass.hpp @@ -129,6 +129,9 @@ class ciKlass : public ciType { void print_name_on(outputStream* st); const char* external_name() const; + + juint prototype_header_offset(); + uintptr_t prototype_header(); }; #endif // SHARE_CI_CIKLASS_HPP diff --git a/src/hotspot/share/gc/g1/g1Arguments.cpp b/src/hotspot/share/gc/g1/g1Arguments.cpp index 3d4ce0d780da9..d42987e8ca12e 100644 --- a/src/hotspot/share/gc/g1/g1Arguments.cpp +++ b/src/hotspot/share/gc/g1/g1Arguments.cpp @@ -35,6 +35,7 @@ #include "gc/g1/g1HeapVerifier.hpp" #include "gc/shared/cardTable.hpp" #include "gc/shared/gcArguments.hpp" +#include "gc/shared/gcForwarding.hpp" #include "gc/shared/workerPolicy.hpp" #include "runtime/globals.hpp" #include "runtime/globals_extension.hpp" @@ -247,6 +248,7 @@ void G1Arguments::initialize() { void G1Arguments::initialize_heap_flags_and_sizes() { GCArguments::initialize_heap_flags_and_sizes(); + GCForwarding::initialize_flags(heap_reserved_size_bytes()); } CollectedHeap* G1Arguments::create_heap() { diff --git a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp index fd73b725a1289..757fcd1c17691 100644 --- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp +++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp @@ -78,6 +78,7 @@ #include "gc/shared/classUnloadingContext.hpp" #include "gc/shared/concurrentGCBreakpoints.hpp" #include "gc/shared/gcBehaviours.hpp" +#include "gc/shared/gcForwarding.hpp" #include "gc/shared/gcHeapSummary.hpp" #include "gc/shared/gcId.hpp" #include "gc/shared/gcTimer.hpp" @@ -85,7 +86,6 @@ #include "gc/shared/isGCActiveMark.hpp" #include "gc/shared/locationPrinter.inline.hpp" #include "gc/shared/oopStorageParState.hpp" -#include "gc/shared/preservedMarks.inline.hpp" #include "gc/shared/referenceProcessor.inline.hpp" #include "gc/shared/suspendibleThreadSet.hpp" #include "gc/shared/taskqueue.inline.hpp" @@ -1435,6 +1435,8 @@ jint G1CollectedHeap::initialize() { G1InitLogger::print(); + GCForwarding::initialize(heap_rs.region()); + return JNI_OK; } diff --git a/src/hotspot/share/gc/g1/g1FullGCCompactTask.cpp b/src/hotspot/share/gc/g1/g1FullGCCompactTask.cpp index 05f669592433b..37c12ff127239 100644 --- a/src/hotspot/share/gc/g1/g1FullGCCompactTask.cpp +++ b/src/hotspot/share/gc/g1/g1FullGCCompactTask.cpp @@ -29,6 +29,7 @@ #include "gc/g1/g1FullGCCompactionPoint.hpp" #include "gc/g1/g1FullGCCompactTask.hpp" #include "gc/g1/g1HeapRegion.inline.hpp" +#include "gc/shared/gcForwarding.inline.hpp" #include "gc/shared/gcTraceTime.inline.hpp" #include "logging/log.hpp" #include "oops/oop.inline.hpp" @@ -41,7 +42,7 @@ void G1FullGCCompactTask::G1CompactRegionClosure::clear_in_bitmap(oop obj) { size_t G1FullGCCompactTask::G1CompactRegionClosure::apply(oop obj) { size_t size = obj->size(); - if (obj->is_forwarded()) { + if (GCForwarding::is_forwarded(obj)) { G1FullGCCompactTask::copy_object_to_new_location(obj); } @@ -52,13 +53,13 @@ size_t G1FullGCCompactTask::G1CompactRegionClosure::apply(oop obj) { } void G1FullGCCompactTask::copy_object_to_new_location(oop obj) { - assert(obj->is_forwarded(), "Sanity!"); - assert(obj->forwardee() != obj, "Object must have a new location"); + assert(GCForwarding::is_forwarded(obj), "Sanity!"); + assert(GCForwarding::forwardee(obj) != obj, "Object must have a new location"); size_t size = obj->size(); // Copy object and reinit its mark. HeapWord* obj_addr = cast_from_oop(obj); - HeapWord* destination = cast_from_oop(obj->forwardee()); + HeapWord* destination = cast_from_oop(GCForwarding::forwardee(obj)); Copy::aligned_conjoint_words(obj_addr, destination, size); // There is no need to transform stack chunks - marking already did that. @@ -121,7 +122,7 @@ void G1FullGCCompactTask::compact_humongous_obj(G1HeapRegion* src_hr) { size_t word_size = obj->size(); uint num_regions = (uint)G1CollectedHeap::humongous_obj_size_in_regions(word_size); - HeapWord* destination = cast_from_oop(obj->forwardee()); + HeapWord* destination = cast_from_oop(GCForwarding::forwardee(obj)); assert(collector()->mark_bitmap()->is_marked(obj), "Should only compact marked objects"); collector()->mark_bitmap()->clear(obj); diff --git a/src/hotspot/share/gc/g1/g1FullGCCompactionPoint.cpp b/src/hotspot/share/gc/g1/g1FullGCCompactionPoint.cpp index 019484c810a54..3d6edd99a820a 100644 --- a/src/hotspot/share/gc/g1/g1FullGCCompactionPoint.cpp +++ b/src/hotspot/share/gc/g1/g1FullGCCompactionPoint.cpp @@ -26,6 +26,7 @@ #include "gc/g1/g1FullCollector.inline.hpp" #include "gc/g1/g1FullGCCompactionPoint.hpp" #include "gc/g1/g1HeapRegion.hpp" +#include "gc/shared/gcForwarding.inline.hpp" #include "gc/shared/preservedMarks.inline.hpp" #include "oops/oop.inline.hpp" #include "utilities/debug.hpp" @@ -106,10 +107,10 @@ void G1FullGCCompactionPoint::forward(oop object, size_t size) { if (!object->is_forwarded()) { preserved_stack()->push_if_necessary(object, object->mark()); } - object->forward_to(cast_to_oop(_compaction_top)); - assert(object->is_forwarded(), "must be forwarded"); + GCForwarding::forward_to(object, cast_to_oop(_compaction_top)); + assert(GCForwarding::is_forwarded(object), "must be forwarded"); } else { - assert(!object->is_forwarded(), "must not be forwarded"); + assert(!GCForwarding::is_forwarded(object), "must not be forwarded"); } // Update compaction values. @@ -172,8 +173,8 @@ void G1FullGCCompactionPoint::forward_humongous(G1HeapRegion* hr) { preserved_stack()->push_if_necessary(obj, obj->mark()); G1HeapRegion* dest_hr = _compaction_regions->at(range_begin); - obj->forward_to(cast_to_oop(dest_hr->bottom())); - assert(obj->is_forwarded(), "Object must be forwarded!"); + GCForwarding::forward_to(obj, cast_to_oop(dest_hr->bottom())); + assert(GCForwarding::is_forwarded(obj), "Object must be forwarded!"); // Add the humongous object regions to the compaction point. add_humongous(hr); diff --git a/src/hotspot/share/gc/g1/g1FullGCOopClosures.inline.hpp b/src/hotspot/share/gc/g1/g1FullGCOopClosures.inline.hpp index f10f884b24253..4c1d8541c1dbd 100644 --- a/src/hotspot/share/gc/g1/g1FullGCOopClosures.inline.hpp +++ b/src/hotspot/share/gc/g1/g1FullGCOopClosures.inline.hpp @@ -32,6 +32,7 @@ #include "gc/g1/g1FullCollector.inline.hpp" #include "gc/g1/g1FullGCMarker.inline.hpp" #include "gc/g1/g1HeapRegionRemSet.inline.hpp" +#include "gc/shared/gcForwarding.inline.hpp" #include "memory/iterator.inline.hpp" #include "memory/universe.hpp" #include "oops/access.inline.hpp" @@ -65,8 +66,8 @@ template inline void G1AdjustClosure::adjust_pointer(T* p) { return; } - if (obj->is_forwarded()) { - oop forwardee = obj->forwardee(); + if (GCForwarding::is_forwarded(obj)) { + oop forwardee = GCForwarding::forwardee(obj); // Forwarded, just update. assert(G1CollectedHeap::heap()->is_in_reserved(forwardee), "should be in object space"); RawAccess::oop_store(p, forwardee); diff --git a/src/hotspot/share/gc/g1/g1FullGCPrepareTask.inline.hpp b/src/hotspot/share/gc/g1/g1FullGCPrepareTask.inline.hpp index 2fb61cc593480..f102dd0dfc49b 100644 --- a/src/hotspot/share/gc/g1/g1FullGCPrepareTask.inline.hpp +++ b/src/hotspot/share/gc/g1/g1FullGCPrepareTask.inline.hpp @@ -32,6 +32,7 @@ #include "gc/g1/g1FullGCCompactionPoint.hpp" #include "gc/g1/g1FullGCScope.hpp" #include "gc/g1/g1HeapRegion.inline.hpp" +#include "gc/shared/gcForwarding.inline.hpp" void G1DetermineCompactionQueueClosure::free_empty_humongous_region(G1HeapRegion* hr) { _g1h->free_humongous_region(hr, nullptr); @@ -114,10 +115,10 @@ inline bool G1DetermineCompactionQueueClosure::do_heap_region(G1HeapRegion* hr) } inline size_t G1SerialRePrepareClosure::apply(oop obj) { - if (obj->is_forwarded()) { + if (GCForwarding::is_forwarded(obj)) { // We skip objects compiled into the first region or // into regions not part of the serial compaction point. - if (cast_from_oop(obj->forwardee()) < _dense_prefix_top) { + if (cast_from_oop(GCForwarding::forwardee(obj)) < _dense_prefix_top) { return obj->size(); } } diff --git a/src/hotspot/share/gc/g1/g1GCPhaseTimes.cpp b/src/hotspot/share/gc/g1/g1GCPhaseTimes.cpp index 32a56d7120569..4819cbfed7074 100644 --- a/src/hotspot/share/gc/g1/g1GCPhaseTimes.cpp +++ b/src/hotspot/share/gc/g1/g1GCPhaseTimes.cpp @@ -105,7 +105,6 @@ G1GCPhaseTimes::G1GCPhaseTimes(STWGCTimer* gc_timer, uint max_gc_threads) : _gc_par_phases[UpdateDerivedPointers] = new WorkerDataArray("UpdateDerivedPointers", "Update Derived Pointers (ms):", max_gc_threads); #endif _gc_par_phases[EagerlyReclaimHumongousObjects] = new WorkerDataArray("EagerlyReclaimHumongousObjects", "Eagerly Reclaim Humongous Objects (ms):", max_gc_threads); - _gc_par_phases[RestorePreservedMarks] = new WorkerDataArray("RestorePreservedMarks", "Restore Preserved Marks (ms):", max_gc_threads); _gc_par_phases[ProcessEvacuationFailedRegions] = new WorkerDataArray("ProcessEvacuationFailedRegions", "Process Evacuation Failed Regions (ms):", max_gc_threads); _gc_par_phases[ScanHR]->create_thread_work_items("Scanned Cards:", ScanHRScannedCards); @@ -512,7 +511,6 @@ double G1GCPhaseTimes::print_post_evacuate_collection_set(bool evacuation_failed debug_time("Post Evacuate Cleanup 2", _cur_post_evacuate_cleanup_2_time_ms); if (evacuation_failed) { debug_phase(_gc_par_phases[RecalculateUsed], 1); - debug_phase(_gc_par_phases[RestorePreservedMarks], 1); debug_phase(_gc_par_phases[ProcessEvacuationFailedRegions], 1); } #if COMPILER2_OR_JVMCI diff --git a/src/hotspot/share/gc/g1/g1GCPhaseTimes.hpp b/src/hotspot/share/gc/g1/g1GCPhaseTimes.hpp index 40abfd605339f..171efa7be778d 100644 --- a/src/hotspot/share/gc/g1/g1GCPhaseTimes.hpp +++ b/src/hotspot/share/gc/g1/g1GCPhaseTimes.hpp @@ -87,7 +87,6 @@ class G1GCPhaseTimes : public CHeapObj { UpdateDerivedPointers, #endif EagerlyReclaimHumongousObjects, - RestorePreservedMarks, ProcessEvacuationFailedRegions, ResetMarkingState, NoteStartOfMark, diff --git a/src/hotspot/share/gc/g1/g1OopClosures.inline.hpp b/src/hotspot/share/gc/g1/g1OopClosures.inline.hpp index 26cc88de0beb5..5b3bbedfeb289 100644 --- a/src/hotspot/share/gc/g1/g1OopClosures.inline.hpp +++ b/src/hotspot/share/gc/g1/g1OopClosures.inline.hpp @@ -228,7 +228,7 @@ void G1ParCopyClosure::do_oop_work(T* p) { oop forwardee; markWord m = obj->mark(); if (m.is_forwarded()) { - forwardee = m.forwardee(); + forwardee = obj->forwardee(m); } else { forwardee = _par_scan_state->copy_to_survivor_space(state, obj, m); } diff --git a/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp b/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp index f81c3241a1a90..47321f1701d9f 100644 --- a/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp +++ b/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp @@ -37,7 +37,6 @@ #include "gc/shared/continuationGCSupport.inline.hpp" #include "gc/shared/partialArrayState.hpp" #include "gc/shared/partialArrayTaskStepper.inline.hpp" -#include "gc/shared/preservedMarks.inline.hpp" #include "gc/shared/stringdedup/stringDedup.hpp" #include "gc/shared/taskqueue.inline.hpp" #include "memory/allocation.inline.hpp" @@ -59,7 +58,6 @@ G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, G1RedirtyCardsQueueSet* rdcqs, - PreservedMarks* preserved_marks, uint worker_id, uint num_workers, G1CollectionSet* collection_set, @@ -90,7 +88,6 @@ G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, _numa(g1h->numa()), _obj_alloc_stat(nullptr), ALLOCATION_FAILURE_INJECTOR_ONLY(_allocation_failure_inject_counter(0) COMMA) - _preserved_marks(preserved_marks), _evacuation_failed_info(), _evac_failure_regions(evac_failure_regions), _evac_failure_enqueued_cards(0) @@ -216,7 +213,7 @@ void G1ParScanThreadState::do_oop_evac(T* p) { markWord m = obj->mark(); if (m.is_forwarded()) { - obj = m.forwardee(); + obj = obj->forwardee(m); } else { obj = do_copy_to_survivor_space(region_attr, obj, m); } @@ -232,7 +229,7 @@ void G1ParScanThreadState::do_partial_array(PartialArrayState* state) { #ifdef ASSERT oop from_obj = state->source(); assert(_g1h->is_in_reserved(from_obj), "must be in heap."); - assert(from_obj->is_objArray(), "must be obj array"); + assert(from_obj->forward_safe_klass()->is_objArray_klass(), "must be obj array"); assert(from_obj->is_forwarded(), "must be forwarded"); assert(from_obj != to_obj, "should not be chunking self-forwarded objects"); assert(to_obj->is_objArray(), "must be obj array"); @@ -265,7 +262,7 @@ MAYBE_INLINE_EVACUATION void G1ParScanThreadState::start_partial_objarray(G1HeapRegionAttr dest_attr, oop from_obj, oop to_obj) { - assert(from_obj->is_objArray(), "precondition"); + assert(from_obj->forward_safe_klass()->is_objArray_klass(), "precondition"); assert(from_obj->is_forwarded(), "precondition"); assert(from_obj->forwardee() == to_obj, "precondition"); assert(to_obj->is_objArray(), "precondition"); @@ -401,22 +398,22 @@ G1HeapRegionAttr G1ParScanThreadState::next_region_attr(G1HeapRegionAttr const r } void G1ParScanThreadState::report_promotion_event(G1HeapRegionAttr const dest_attr, - oop const old, size_t word_sz, uint age, + Klass* klass, size_t word_sz, uint age, HeapWord * const obj_ptr, uint node_index) const { PLAB* alloc_buf = _plab_allocator->alloc_buffer(dest_attr, node_index); if (alloc_buf->contains(obj_ptr)) { - _g1h->gc_tracer_stw()->report_promotion_in_new_plab_event(old->klass(), word_sz * HeapWordSize, age, + _g1h->gc_tracer_stw()->report_promotion_in_new_plab_event(klass, word_sz * HeapWordSize, age, dest_attr.type() == G1HeapRegionAttr::Old, alloc_buf->word_sz() * HeapWordSize); } else { - _g1h->gc_tracer_stw()->report_promotion_outside_plab_event(old->klass(), word_sz * HeapWordSize, age, + _g1h->gc_tracer_stw()->report_promotion_outside_plab_event(klass, word_sz * HeapWordSize, age, dest_attr.type() == G1HeapRegionAttr::Old); } } NOINLINE HeapWord* G1ParScanThreadState::allocate_copy_slow(G1HeapRegionAttr* dest_attr, - oop old, + Klass* klass, size_t word_sz, uint age, uint node_index) { @@ -439,7 +436,7 @@ HeapWord* G1ParScanThreadState::allocate_copy_slow(G1HeapRegionAttr* dest_attr, update_numa_stats(node_index); if (_g1h->gc_tracer_stw()->should_report_promotion_events()) { // The events are checked individually as part of the actual commit - report_promotion_event(*dest_attr, old, word_sz, age, obj_ptr, node_index); + report_promotion_event(*dest_attr, klass, word_sz, age, obj_ptr, node_index); } } return obj_ptr; @@ -476,7 +473,13 @@ oop G1ParScanThreadState::do_copy_to_survivor_space(G1HeapRegionAttr const regio // Get the klass once. We'll need it again later, and this avoids // re-decoding when it's compressed. - Klass* klass = old->klass(); + // NOTE: With compact headers, it is not safe to load the Klass* from o, because + // that would access the mark-word, and the mark-word might change at any time by + // concurrent promotion. The promoted mark-word would point to the forwardee, which + // may not yet have completed copying. Therefore we must load the Klass* from + // the mark-word that we have already loaded. This is safe, because we have checked + // that this is not yet forwarded in the caller. + Klass* klass = old->forward_safe_klass(old_mark); const size_t word_sz = old->size_given_klass(klass); // JNI only allows pinning of typeArrays, so we only need to keep those in place. @@ -494,7 +497,7 @@ oop G1ParScanThreadState::do_copy_to_survivor_space(G1HeapRegionAttr const regio // PLAB allocations should succeed most of the time, so we'll // normally check against null once and that's it. if (obj_ptr == nullptr) { - obj_ptr = allocate_copy_slow(&dest_attr, old, word_sz, age, node_index); + obj_ptr = allocate_copy_slow(&dest_attr, klass, word_sz, age, node_index); if (obj_ptr == nullptr) { // This will either forward-to-self, or detect that someone else has // installed a forwarding pointer. @@ -595,7 +598,6 @@ G1ParScanThreadState* G1ParScanThreadStateSet::state_for_worker(uint worker_id) if (_states[worker_id] == nullptr) { _states[worker_id] = new G1ParScanThreadState(_g1h, rdcqs(), - _preserved_marks_set.get(worker_id), worker_id, _num_workers, _collection_set, @@ -655,7 +657,7 @@ NOINLINE oop G1ParScanThreadState::handle_evacuation_failure_par(oop old, markWord m, size_t word_sz, bool cause_pinned) { assert(_g1h->is_in_cset(old), "Object " PTR_FORMAT " should be in the CSet", p2i(old)); - oop forward_ptr = old->forward_to_atomic(old, m, memory_order_relaxed); + oop forward_ptr = old->forward_to_self_atomic(m, memory_order_relaxed); if (forward_ptr == nullptr) { // Forward-to-self succeeded. We are the "owner" of the object. G1HeapRegion* r = _g1h->heap_region_containing(old); @@ -668,8 +670,6 @@ oop G1ParScanThreadState::handle_evacuation_failure_par(oop old, markWord m, siz // evacuation failure recovery. _g1h->mark_evac_failure_object(_worker_id, old, word_sz); - _preserved_marks->push_if_necessary(old, m); - ContinuationGCSupport::transform_stack_chunk(old); _evacuation_failed_info.register_copy_failure(word_sz); @@ -727,7 +727,6 @@ G1ParScanThreadStateSet::G1ParScanThreadStateSet(G1CollectedHeap* g1h, _g1h(g1h), _collection_set(collection_set), _rdcqs(G1BarrierSet::dirty_card_queue_set().allocator()), - _preserved_marks_set(true /* in_c_heap */), _states(NEW_C_HEAP_ARRAY(G1ParScanThreadState*, num_workers, mtGC)), _rdc_buffers(NEW_C_HEAP_ARRAY(BufferNodeList, num_workers, mtGC)), _surviving_young_words_total(NEW_C_HEAP_ARRAY(size_t, collection_set->young_region_length() + 1, mtGC)), @@ -736,7 +735,6 @@ G1ParScanThreadStateSet::G1ParScanThreadStateSet(G1CollectedHeap* g1h, _evac_failure_regions(evac_failure_regions), _partial_array_state_allocator(num_workers) { - _preserved_marks_set.init(num_workers); for (uint i = 0; i < num_workers; ++i) { _states[i] = nullptr; _rdc_buffers[i] = BufferNodeList(); @@ -749,5 +747,4 @@ G1ParScanThreadStateSet::~G1ParScanThreadStateSet() { FREE_C_HEAP_ARRAY(G1ParScanThreadState*, _states); FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_total); FREE_C_HEAP_ARRAY(BufferNodeList, _rdc_buffers); - _preserved_marks_set.reclaim(); } diff --git a/src/hotspot/share/gc/g1/g1ParScanThreadState.hpp b/src/hotspot/share/gc/g1/g1ParScanThreadState.hpp index 1cfd6fca08a6f..4cb5fa669db39 100644 --- a/src/hotspot/share/gc/g1/g1ParScanThreadState.hpp +++ b/src/hotspot/share/gc/g1/g1ParScanThreadState.hpp @@ -34,7 +34,6 @@ #include "gc/shared/gc_globals.hpp" #include "gc/shared/partialArrayState.hpp" #include "gc/shared/partialArrayTaskStepper.hpp" -#include "gc/shared/preservedMarks.hpp" #include "gc/shared/stringdedup/stringDedup.hpp" #include "gc/shared/taskqueue.hpp" #include "memory/allocation.hpp" @@ -48,8 +47,6 @@ class G1EvacuationRootClosures; class G1OopStarChunkedList; class G1PLABAllocator; class G1HeapRegion; -class PreservedMarks; -class PreservedMarksSet; class outputStream; class G1ParScanThreadState : public CHeapObj { @@ -108,7 +105,6 @@ class G1ParScanThreadState : public CHeapObj { // Per-thread evacuation failure data structures. ALLOCATION_FAILURE_INJECTOR_ONLY(size_t _allocation_failure_inject_counter;) - PreservedMarks* _preserved_marks; EvacuationFailedInfo _evacuation_failed_info; G1EvacFailureRegions* _evac_failure_regions; // Number of additional cards into evacuation failed regions enqueued into @@ -127,7 +123,6 @@ class G1ParScanThreadState : public CHeapObj { public: G1ParScanThreadState(G1CollectedHeap* g1h, G1RedirtyCardsQueueSet* rdcqs, - PreservedMarks* preserved_marks, uint worker_id, uint num_workers, G1CollectionSet* collection_set, @@ -176,7 +171,7 @@ class G1ParScanThreadState : public CHeapObj { void start_partial_objarray(G1HeapRegionAttr dest_dir, oop from, oop to); HeapWord* allocate_copy_slow(G1HeapRegionAttr* dest_attr, - oop old, + Klass* klass, size_t word_sz, uint age, uint node_index); @@ -211,7 +206,7 @@ class G1ParScanThreadState : public CHeapObj { inline G1HeapRegionAttr next_region_attr(G1HeapRegionAttr const region_attr, markWord const m, uint& age); void report_promotion_event(G1HeapRegionAttr const dest_attr, - oop const old, size_t word_sz, uint age, + Klass* klass, size_t word_sz, uint age, HeapWord * const obj_ptr, uint node_index) const; void trim_queue_to_threshold(uint threshold); @@ -248,7 +243,6 @@ class G1ParScanThreadStateSet : public StackObj { G1CollectedHeap* _g1h; G1CollectionSet* _collection_set; G1RedirtyCardsQueueSet _rdcqs; - PreservedMarksSet _preserved_marks_set; G1ParScanThreadState** _states; BufferNodeList* _rdc_buffers; size_t* _surviving_young_words_total; @@ -266,7 +260,6 @@ class G1ParScanThreadStateSet : public StackObj { G1RedirtyCardsQueueSet* rdcqs() { return &_rdcqs; } BufferNodeList* rdc_buffers() { return _rdc_buffers; } - PreservedMarksSet* preserved_marks_set() { return &_preserved_marks_set; } void flush_stats(); void record_unused_optional_region(G1HeapRegion* hr); diff --git a/src/hotspot/share/gc/g1/g1YoungCollector.cpp b/src/hotspot/share/gc/g1/g1YoungCollector.cpp index f2fe93015c532..f3590aa2ff696 100644 --- a/src/hotspot/share/gc/g1/g1YoungCollector.cpp +++ b/src/hotspot/share/gc/g1/g1YoungCollector.cpp @@ -53,7 +53,6 @@ #include "gc/shared/gcTraceTime.inline.hpp" #include "gc/shared/gcTimer.hpp" #include "gc/shared/gc_globals.hpp" -#include "gc/shared/preservedMarks.hpp" #include "gc/shared/referenceProcessor.hpp" #include "gc/shared/weakProcessor.inline.hpp" #include "gc/shared/workerPolicy.hpp" diff --git a/src/hotspot/share/gc/g1/g1YoungGCPostEvacuateTasks.cpp b/src/hotspot/share/gc/g1/g1YoungGCPostEvacuateTasks.cpp index a0e9a9b1569ab..6b218d3971e8b 100644 --- a/src/hotspot/share/gc/g1/g1YoungGCPostEvacuateTasks.cpp +++ b/src/hotspot/share/gc/g1/g1YoungGCPostEvacuateTasks.cpp @@ -42,7 +42,6 @@ #include "gc/g1/g1RemSet.hpp" #include "gc/g1/g1YoungGCPostEvacuateTasks.hpp" #include "gc/shared/bufferNode.hpp" -#include "gc/shared/preservedMarks.inline.hpp" #include "jfr/jfrEvents.hpp" #include "oops/access.inline.hpp" #include "oops/compressedOops.inline.hpp" @@ -252,7 +251,7 @@ class G1PostEvacuateCollectionSetCleanupTask1::RestoreEvacFailureRegionsTask : p { // Process marked object. assert(obj->is_forwarded() && obj->forwardee() == obj, "must be self-forwarded"); - obj->init_mark(); + obj->unset_self_forwarded(); hr->update_bot_for_block(obj_addr, obj_end_addr); // Statistics @@ -477,27 +476,6 @@ class G1PostEvacuateCollectionSetCleanupTask2::EagerlyReclaimHumongousObjectsTas } }; -class G1PostEvacuateCollectionSetCleanupTask2::RestorePreservedMarksTask : public G1AbstractSubTask { - PreservedMarksSet* _preserved_marks; - WorkerTask* _task; - -public: - RestorePreservedMarksTask(PreservedMarksSet* preserved_marks) : - G1AbstractSubTask(G1GCPhaseTimes::RestorePreservedMarks), - _preserved_marks(preserved_marks), - _task(preserved_marks->create_task()) { } - - virtual ~RestorePreservedMarksTask() { - delete _task; - } - - double worker_cost() const override { - return _preserved_marks->num(); - } - - void do_work(uint worker_id) override { _task->work(worker_id); } -}; - class RedirtyLoggedCardTableEntryClosure : public G1CardTableEntryClosure { size_t _num_dirtied; G1CollectedHeap* _g1h; @@ -979,7 +957,6 @@ G1PostEvacuateCollectionSetCleanupTask2::G1PostEvacuateCollectionSetCleanupTask2 } if (evac_failure_regions->has_regions_evac_failed()) { - add_parallel_task(new RestorePreservedMarksTask(per_thread_states->preserved_marks_set())); add_parallel_task(new ProcessEvacuationFailedRegionsTask(evac_failure_regions)); } add_parallel_task(new RedirtyLoggedCardsTask(evac_failure_regions, diff --git a/src/hotspot/share/gc/g1/g1YoungGCPostEvacuateTasks.hpp b/src/hotspot/share/gc/g1/g1YoungGCPostEvacuateTasks.hpp index 868ab788b534a..c79a8dd8eb8a1 100644 --- a/src/hotspot/share/gc/g1/g1YoungGCPostEvacuateTasks.hpp +++ b/src/hotspot/share/gc/g1/g1YoungGCPostEvacuateTasks.hpp @@ -56,7 +56,6 @@ class G1PostEvacuateCollectionSetCleanupTask1 : public G1BatchedTask { // - Update Derived Pointers (s) // - Clear Retained Region Data (on evacuation failure) // - Redirty Logged Cards -// - Restore Preserved Marks (on evacuation failure) // - Free Collection Set // - Resize TLABs class G1PostEvacuateCollectionSetCleanupTask2 : public G1BatchedTask { @@ -67,7 +66,6 @@ class G1PostEvacuateCollectionSetCleanupTask2 : public G1BatchedTask { class ProcessEvacuationFailedRegionsTask; class RedirtyLoggedCardsTask; - class RestorePreservedMarksTask; class FreeCollectionSetTask; class ResizeTLABsTask; diff --git a/src/hotspot/share/gc/parallel/mutableSpace.cpp b/src/hotspot/share/gc/parallel/mutableSpace.cpp index d90121b0eaf2c..a79300dddfb2d 100644 --- a/src/hotspot/share/gc/parallel/mutableSpace.cpp +++ b/src/hotspot/share/gc/parallel/mutableSpace.cpp @@ -210,7 +210,8 @@ void MutableSpace::oop_iterate(OopIterateClosure* cl) { } } -void MutableSpace::object_iterate(ObjectClosure* cl) { +template +void MutableSpace::object_iterate_impl(ObjectClosure* cl) { HeapWord* p = bottom(); while (p < top()) { oop obj = cast_to_oop(p); @@ -219,13 +220,26 @@ void MutableSpace::object_iterate(ObjectClosure* cl) { // They are essentially dead, so skipping them if (!obj->is_forwarded()) { cl->do_object(obj); - } -#ifdef ASSERT - else { + p += obj->size(); + } else { assert(obj->forwardee() != obj, "must not be self-forwarded"); + if (COMPACT_HEADERS) { + // It is safe to use the forwardee here. Parallel GC only uses + // header-based forwarding during promotion. Full GC doesn't + // use the object header for forwarding at all. + p += obj->forwardee()->size(); + } else { + p += obj->size(); + } } -#endif - p += obj->size(); + } +} + +void MutableSpace::object_iterate(ObjectClosure* cl) { + if (UseCompactObjectHeaders) { + object_iterate_impl(cl); + } else { + object_iterate_impl(cl); } } diff --git a/src/hotspot/share/gc/parallel/mutableSpace.hpp b/src/hotspot/share/gc/parallel/mutableSpace.hpp index b48b1ebcc9fb4..3403b95485522 100644 --- a/src/hotspot/share/gc/parallel/mutableSpace.hpp +++ b/src/hotspot/share/gc/parallel/mutableSpace.hpp @@ -61,6 +61,9 @@ class MutableSpace: public CHeapObj { void set_last_setup_region(MemRegion mr) { _last_setup_region = mr; } MemRegion last_setup_region() const { return _last_setup_region; } + template + void object_iterate_impl(ObjectClosure* cl); + public: virtual ~MutableSpace() = default; MutableSpace(size_t page_size); diff --git a/src/hotspot/share/gc/parallel/parallelArguments.cpp b/src/hotspot/share/gc/parallel/parallelArguments.cpp index 313716752d5fe..7e55500367f2a 100644 --- a/src/hotspot/share/gc/parallel/parallelArguments.cpp +++ b/src/hotspot/share/gc/parallel/parallelArguments.cpp @@ -28,6 +28,7 @@ #include "gc/parallel/parallelScavengeHeap.hpp" #include "gc/shared/adaptiveSizePolicy.hpp" #include "gc/shared/gcArguments.hpp" +#include "gc/shared/gcForwarding.hpp" #include "gc/shared/genArguments.hpp" #include "gc/shared/workerPolicy.hpp" #include "logging/log.hpp" @@ -127,6 +128,7 @@ void ParallelArguments::initialize_heap_flags_and_sizes() { // Redo everything from the start initialize_heap_flags_and_sizes_one_pass(); } + GCForwarding::initialize_flags(heap_reserved_size_bytes()); } size_t ParallelArguments::heap_reserved_size_bytes() { diff --git a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp index 5883b1cd6074d..113af051cdf61 100644 --- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp +++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp @@ -33,6 +33,7 @@ #include "gc/parallel/psPromotionManager.hpp" #include "gc/parallel/psScavenge.hpp" #include "gc/parallel/psVMOperations.hpp" +#include "gc/shared/gcForwarding.inline.hpp" #include "gc/shared/gcHeapSummary.hpp" #include "gc/shared/gcLocker.inline.hpp" #include "gc/shared/gcWhen.hpp" @@ -129,6 +130,8 @@ jint ParallelScavengeHeap::initialize() { ParallelInitLogger::print(); + GCForwarding::initialize(heap_rs.region()); + return JNI_OK; } diff --git a/src/hotspot/share/gc/parallel/psParallelCompact.cpp b/src/hotspot/share/gc/parallel/psParallelCompact.cpp index 4bff8f8a7d06a..b4c04a2ad4996 100644 --- a/src/hotspot/share/gc/parallel/psParallelCompact.cpp +++ b/src/hotspot/share/gc/parallel/psParallelCompact.cpp @@ -45,6 +45,7 @@ #include "gc/parallel/psYoungGen.hpp" #include "gc/shared/classUnloadingContext.hpp" #include "gc/shared/gcCause.hpp" +#include "gc/shared/gcForwarding.inline.hpp" #include "gc/shared/gcHeapSummary.hpp" #include "gc/shared/gcId.hpp" #include "gc/shared/gcLocker.hpp" @@ -780,6 +781,10 @@ void PSParallelCompact::fill_dense_prefix_end(SpaceId id) { // filler obj will extend to next region. // Note: If min-fill-size decreases to 1, this whole method becomes redundant. + if (UseCompactObjectHeaders) { + // The gap is always equal to min-fill-size, so nothing to do. + return; + } assert(CollectedHeap::min_fill_size() >= 2, "inv"); #ifndef _LP64 // In 32-bit system, each heap word is 4 bytes, so MinObjAlignment == 2. @@ -1592,7 +1597,7 @@ void PSParallelCompact::forward_to_new_addr() { oop obj = cast_to_oop(cur_addr); if (new_addr != cur_addr) { cm->preserved_marks()->push_if_necessary(obj, obj->mark()); - obj->forward_to(cast_to_oop(new_addr)); + GCForwarding::forward_to(obj, cast_to_oop(new_addr)); } size_t obj_size = obj->size(); live_words += obj_size; @@ -1635,7 +1640,7 @@ void PSParallelCompact::verify_forward() { } oop obj = cast_to_oop(cur_addr); if (cur_addr != bump_ptr) { - assert(obj->forwardee() == cast_to_oop(bump_ptr), "inv"); + assert(GCForwarding::forwardee(obj) == cast_to_oop(bump_ptr), "inv"); } bump_ptr += obj->size(); cur_addr += obj->size(); @@ -2398,8 +2403,8 @@ void MoveAndUpdateClosure::do_addr(HeapWord* addr, size_t words) { if (copy_destination() != source()) { DEBUG_ONLY(PSParallelCompact::check_new_location(source(), destination());) assert(source() != destination(), "inv"); - assert(cast_to_oop(source())->is_forwarded(), "inv"); - assert(cast_to_oop(source())->forwardee() == cast_to_oop(destination()), "inv"); + assert(GCForwarding::is_forwarded(cast_to_oop(source())), "inv"); + assert(GCForwarding::forwardee(cast_to_oop(source())) == cast_to_oop(destination()), "inv"); Copy::aligned_conjoint_words(source(), copy_destination(), words); cast_to_oop(copy_destination())->init_mark(); } diff --git a/src/hotspot/share/gc/parallel/psParallelCompact.inline.hpp b/src/hotspot/share/gc/parallel/psParallelCompact.inline.hpp index e5f1b9d30aed7..55e6b5fb7b29c 100644 --- a/src/hotspot/share/gc/parallel/psParallelCompact.inline.hpp +++ b/src/hotspot/share/gc/parallel/psParallelCompact.inline.hpp @@ -31,6 +31,7 @@ #include "gc/parallel/parMarkBitMap.inline.hpp" #include "gc/shared/collectedHeap.hpp" #include "gc/shared/continuationGCSupport.inline.hpp" +#include "gc/shared/gcForwarding.inline.hpp" #include "oops/access.inline.hpp" #include "oops/compressedOops.inline.hpp" #include "oops/klass.hpp" @@ -79,7 +80,7 @@ inline void PSParallelCompact::adjust_pointer(T* p) { if (!obj->is_forwarded()) { return; } - oop new_obj = obj->forwardee(); + oop new_obj = GCForwarding::forwardee(obj); assert(new_obj != nullptr, "non-null address for live objects"); assert(new_obj != obj, "inv"); assert(ParallelScavengeHeap::heap()->is_in_reserved(new_obj), diff --git a/src/hotspot/share/gc/parallel/psPromotionManager.cpp b/src/hotspot/share/gc/parallel/psPromotionManager.cpp index 19f688852385b..1c1a6424d0c05 100644 --- a/src/hotspot/share/gc/parallel/psPromotionManager.cpp +++ b/src/hotspot/share/gc/parallel/psPromotionManager.cpp @@ -281,7 +281,7 @@ void PSPromotionManager::process_array_chunk(PartialArrayScanTask task) { assert(PSChunkLargeArrays, "invariant"); oop old = task.to_source_array(); - assert(old->is_objArray(), "invariant"); + assert(old->forward_safe_klass()->is_objArray_klass(), "invariant"); assert(old->is_forwarded(), "invariant"); TASKQUEUE_STATS_ONLY(++_array_chunks_processed); @@ -319,7 +319,7 @@ oop PSPromotionManager::oop_promotion_failed(oop obj, markWord obj_mark) { // this started. If it is the same (i.e., no forwarding // pointer has been installed), then this thread owns // it. - if (obj->forward_to_atomic(obj, obj_mark) == nullptr) { + if (obj->forward_to_self_atomic(obj_mark) == nullptr) { // We won any races, we "own" this object. assert(obj == obj->forwardee(), "Sanity"); diff --git a/src/hotspot/share/gc/parallel/psPromotionManager.hpp b/src/hotspot/share/gc/parallel/psPromotionManager.hpp index b7d2e0f725331..a6b2e646b5bc0 100644 --- a/src/hotspot/share/gc/parallel/psPromotionManager.hpp +++ b/src/hotspot/share/gc/parallel/psPromotionManager.hpp @@ -105,7 +105,7 @@ class PSPromotionManager { void push_depth(ScannerTask task); - inline void promotion_trace_event(oop new_obj, oop old_obj, size_t obj_size, + inline void promotion_trace_event(oop new_obj, Klass* klass, size_t obj_size, uint age, bool tenured, const PSPromotionLAB* lab); diff --git a/src/hotspot/share/gc/parallel/psPromotionManager.inline.hpp b/src/hotspot/share/gc/parallel/psPromotionManager.inline.hpp index 390dea4976d19..47e6691978d77 100644 --- a/src/hotspot/share/gc/parallel/psPromotionManager.inline.hpp +++ b/src/hotspot/share/gc/parallel/psPromotionManager.inline.hpp @@ -66,7 +66,7 @@ inline void PSPromotionManager::claim_or_forward_depth(T* p) { } } -inline void PSPromotionManager::promotion_trace_event(oop new_obj, oop old_obj, +inline void PSPromotionManager::promotion_trace_event(oop new_obj, Klass* klass, size_t obj_size, uint age, bool tenured, const PSPromotionLAB* lab) { @@ -79,14 +79,14 @@ inline void PSPromotionManager::promotion_trace_event(oop new_obj, oop old_obj, if (gc_tracer->should_report_promotion_in_new_plab_event()) { size_t obj_bytes = obj_size * HeapWordSize; size_t lab_size = lab->capacity(); - gc_tracer->report_promotion_in_new_plab_event(old_obj->klass(), obj_bytes, + gc_tracer->report_promotion_in_new_plab_event(klass, obj_bytes, age, tenured, lab_size); } } else { // Promotion of object directly to heap if (gc_tracer->should_report_promotion_outside_plab_event()) { size_t obj_bytes = obj_size * HeapWordSize; - gc_tracer->report_promotion_outside_plab_event(old_obj->klass(), obj_bytes, + gc_tracer->report_promotion_outside_plab_event(klass, obj_bytes, age, tenured); } } @@ -149,7 +149,7 @@ inline oop PSPromotionManager::copy_to_survivor_space(oop o) { return copy_unmarked_to_survivor_space(o, m); } else { // Return the already installed forwardee. - return m.forwardee(); + return o->forwardee(m); } } @@ -165,7 +165,14 @@ inline oop PSPromotionManager::copy_unmarked_to_survivor_space(oop o, oop new_obj = nullptr; bool new_obj_is_tenured = false; - size_t new_obj_size = o->size(); + // NOTE: With compact headers, it is not safe to load the Klass* from o, because + // that would access the mark-word, and the mark-word might change at any time by + // concurrent promotion. The promoted mark-word would point to the forwardee, which + // may not yet have completed copying. Therefore we must load the Klass* from + // the mark-word that we have already loaded. This is safe, because we have checked + // that this is not yet forwarded in the caller. + Klass* klass = o->forward_safe_klass(test_mark); + size_t new_obj_size = o->size_given_klass(klass); // Find the objects age, MT safe. uint age = (test_mark.has_displaced_mark_helper() /* o->has_displaced_mark() */) ? @@ -180,7 +187,7 @@ inline oop PSPromotionManager::copy_unmarked_to_survivor_space(oop o, if (new_obj_size > (YoungPLABSize / 2)) { // Allocate this object directly new_obj = cast_to_oop(young_space()->cas_allocate(new_obj_size)); - promotion_trace_event(new_obj, o, new_obj_size, age, false, nullptr); + promotion_trace_event(new_obj, klass, new_obj_size, age, false, nullptr); } else { // Flush and fill _young_lab.flush(); @@ -190,7 +197,7 @@ inline oop PSPromotionManager::copy_unmarked_to_survivor_space(oop o, _young_lab.initialize(MemRegion(lab_base, YoungPLABSize)); // Try the young lab allocation again. new_obj = cast_to_oop(_young_lab.allocate(new_obj_size)); - promotion_trace_event(new_obj, o, new_obj_size, age, false, &_young_lab); + promotion_trace_event(new_obj, klass, new_obj_size, age, false, &_young_lab); } else { _young_gen_is_full = true; } @@ -216,7 +223,7 @@ inline oop PSPromotionManager::copy_unmarked_to_survivor_space(oop o, if (new_obj_size > (OldPLABSize / 2)) { // Allocate this object directly new_obj = cast_to_oop(old_gen()->allocate(new_obj_size)); - promotion_trace_event(new_obj, o, new_obj_size, age, true, nullptr); + promotion_trace_event(new_obj, klass, new_obj_size, age, true, nullptr); } else { // Flush and fill _old_lab.flush(); @@ -226,7 +233,7 @@ inline oop PSPromotionManager::copy_unmarked_to_survivor_space(oop o, _old_lab.initialize(MemRegion(lab_base, OldPLABSize)); // Try the old lab allocation again. new_obj = cast_to_oop(_old_lab.allocate(new_obj_size)); - promotion_trace_event(new_obj, o, new_obj_size, age, true, &_old_lab); + promotion_trace_event(new_obj, klass, new_obj_size, age, true, &_old_lab); } } } diff --git a/src/hotspot/share/gc/serial/defNewGeneration.cpp b/src/hotspot/share/gc/serial/defNewGeneration.cpp index 047171a5eb364..0a10998d6f8b2 100644 --- a/src/hotspot/share/gc/serial/defNewGeneration.cpp +++ b/src/hotspot/share/gc/serial/defNewGeneration.cpp @@ -39,7 +39,6 @@ #include "gc/shared/gcTimer.hpp" #include "gc/shared/gcTrace.hpp" #include "gc/shared/gcTraceTime.inline.hpp" -#include "gc/shared/preservedMarks.inline.hpp" #include "gc/shared/referencePolicy.hpp" #include "gc/shared/referenceProcessorPhaseTimes.hpp" #include "gc/shared/space.hpp" @@ -227,7 +226,6 @@ DefNewGeneration::DefNewGeneration(ReservedSpace rs, const char* policy) : Generation(rs, initial_size), _promotion_failed(false), - _preserved_marks_set(false /* in_c_heap */), _promo_failure_drain_in_progress(false), _string_dedup_requests() { @@ -609,8 +607,6 @@ bool DefNewGeneration::collect(bool clear_all_soft_refs) { age_table()->clear(); to()->clear(SpaceDecorator::Mangle); - // The preserved marks should be empty at the start of the GC. - _preserved_marks_set.init(1); YoungGenScanClosure young_gen_cl(this); OldGenScanClosure old_gen_cl(this); @@ -681,8 +677,6 @@ bool DefNewGeneration::collect(bool clear_all_soft_refs) { // Reset the PromotionFailureALot counters. NOT_PRODUCT(heap->reset_promotion_should_fail();) } - // We should have processed and cleared all the preserved marks. - _preserved_marks_set.reclaim(); heap->trace_heap_after_gc(_gc_tracer); @@ -706,19 +700,13 @@ void DefNewGeneration::remove_forwarding_pointers() { // starts. (The mark word is overloaded: `is_marked()` == `is_forwarded()`.) struct ResetForwardedMarkWord : ObjectClosure { void do_object(oop obj) override { - if (obj->is_forwarded()) { - obj->init_mark(); + if (obj->is_self_forwarded()) { + obj->unset_self_forwarded(); } } } cl; eden()->object_iterate(&cl); from()->object_iterate(&cl); - - restore_preserved_marks(); -} - -void DefNewGeneration::restore_preserved_marks() { - _preserved_marks_set.restore(nullptr); } void DefNewGeneration::handle_promotion_failure(oop old) { @@ -726,12 +714,11 @@ void DefNewGeneration::handle_promotion_failure(oop old) { _promotion_failed = true; _promotion_failed_info.register_copy_failure(old->size()); - _preserved_marks_set.get()->push_if_necessary(old, old->mark()); ContinuationGCSupport::transform_stack_chunk(old); // forward to self - old->forward_to(old); + old->forward_to_self(); _promo_failure_scan_stack.push(old); diff --git a/src/hotspot/share/gc/serial/defNewGeneration.hpp b/src/hotspot/share/gc/serial/defNewGeneration.hpp index c5b7c095ac4e6..ee85ec5cd89f8 100644 --- a/src/hotspot/share/gc/serial/defNewGeneration.hpp +++ b/src/hotspot/share/gc/serial/defNewGeneration.hpp @@ -32,7 +32,6 @@ #include "gc/shared/copyFailedInfo.hpp" #include "gc/shared/gc_globals.hpp" #include "gc/shared/generationCounters.hpp" -#include "gc/shared/preservedMarks.hpp" #include "gc/shared/stringdedup/stringDedup.hpp" #include "gc/shared/tlab_globals.hpp" #include "utilities/align.hpp" @@ -99,11 +98,6 @@ class DefNewGeneration: public Generation { // therefore we must remove their forwarding pointers. void remove_forwarding_pointers(); - virtual void restore_preserved_marks(); - - // Preserved marks - PreservedMarksSet _preserved_marks_set; - Stack _promo_failure_scan_stack; void drain_promo_failure_scan_stack(void); bool _promo_failure_drain_in_progress; diff --git a/src/hotspot/share/gc/serial/serialArguments.cpp b/src/hotspot/share/gc/serial/serialArguments.cpp index ac6dd24fdbf1e..9825c4aed4502 100644 --- a/src/hotspot/share/gc/serial/serialArguments.cpp +++ b/src/hotspot/share/gc/serial/serialArguments.cpp @@ -23,10 +23,16 @@ */ #include "precompiled.hpp" +#include "gc/shared/gcForwarding.hpp" #include "gc/shared/genArguments.hpp" #include "gc/serial/serialArguments.hpp" #include "gc/serial/serialHeap.hpp" +void SerialArguments::initialize_heap_flags_and_sizes() { + GenArguments::initialize_heap_flags_and_sizes(); + GCForwarding::initialize_flags(MaxNewSize + MaxOldSize); +} + CollectedHeap* SerialArguments::create_heap() { return new SerialHeap(); } diff --git a/src/hotspot/share/gc/serial/serialArguments.hpp b/src/hotspot/share/gc/serial/serialArguments.hpp index 3ed4df5f41b85..df804ea619dad 100644 --- a/src/hotspot/share/gc/serial/serialArguments.hpp +++ b/src/hotspot/share/gc/serial/serialArguments.hpp @@ -32,6 +32,7 @@ class CollectedHeap; class SerialArguments : public GenArguments { private: virtual CollectedHeap* create_heap(); + void initialize_heap_flags_and_sizes(); }; #endif // SHARE_GC_SERIAL_SERIALARGUMENTS_HPP diff --git a/src/hotspot/share/gc/serial/serialFullGC.cpp b/src/hotspot/share/gc/serial/serialFullGC.cpp index 897437e33c96e..e7d6907c2bd50 100644 --- a/src/hotspot/share/gc/serial/serialFullGC.cpp +++ b/src/hotspot/share/gc/serial/serialFullGC.cpp @@ -43,6 +43,7 @@ #include "gc/shared/classUnloadingContext.hpp" #include "gc/shared/collectedHeap.inline.hpp" #include "gc/shared/continuationGCSupport.inline.hpp" +#include "gc/shared/gcForwarding.inline.hpp" #include "gc/shared/gcHeapSummary.hpp" #include "gc/shared/gcTimer.hpp" #include "gc/shared/gcTrace.hpp" @@ -230,7 +231,7 @@ class Compacter { static void forward_obj(oop obj, HeapWord* new_addr) { prefetch_write_scan(obj); if (cast_from_oop(obj) != new_addr) { - obj->forward_to(cast_to_oop(new_addr)); + GCForwarding::forward_to(obj, cast_to_oop(new_addr)); } else { assert(obj->is_gc_marked(), "inv"); // This obj will stay in-place. Fix the markword. @@ -255,7 +256,7 @@ class Compacter { prefetch_read_scan(addr); oop obj = cast_to_oop(addr); - oop new_obj = obj->forwardee(); + oop new_obj = GCForwarding::forwardee(obj); HeapWord* new_addr = cast_from_oop(new_obj); assert(addr != new_addr, "inv"); prefetch_write_copy(new_addr); @@ -352,13 +353,13 @@ class Compacter { HeapWord* top = space->top(); // Check if the first obj inside this space is forwarded. - if (!cast_to_oop(cur_addr)->is_forwarded()) { + if (!GCForwarding::is_forwarded(cast_to_oop(cur_addr))) { // Jump over consecutive (in-place) live-objs-chunk cur_addr = get_first_dead(i); } while (cur_addr < top) { - if (!cast_to_oop(cur_addr)->is_forwarded()) { + if (!GCForwarding::is_forwarded(cast_to_oop(cur_addr))) { cur_addr = *(HeapWord**) cur_addr; continue; } @@ -593,7 +594,7 @@ void SerialFullGC::mark_object(oop obj) { // some marks may contain information we need to preserve so we store them away // and overwrite the mark. We'll restore it at the end of serial full GC. markWord mark = obj->mark(); - obj->set_mark(markWord::prototype().set_marked()); + obj->set_mark(obj->prototype_mark().set_marked()); ContinuationGCSupport::transform_stack_chunk(obj); @@ -624,8 +625,8 @@ template void SerialFullGC::adjust_pointer(T* p) { oop obj = CompressedOops::decode_not_null(heap_oop); assert(Universe::heap()->is_in(obj), "should be in heap"); - if (obj->is_forwarded()) { - oop new_obj = obj->forwardee(); + if (GCForwarding::is_forwarded(obj)) { + oop new_obj = GCForwarding::forwardee(obj); assert(is_object_aligned(new_obj), "oop must be aligned"); RawAccess::oop_store(p, new_obj); } diff --git a/src/hotspot/share/gc/serial/serialHeap.cpp b/src/hotspot/share/gc/serial/serialHeap.cpp index 3c48177554121..e0a227d664011 100644 --- a/src/hotspot/share/gc/serial/serialHeap.cpp +++ b/src/hotspot/share/gc/serial/serialHeap.cpp @@ -40,6 +40,7 @@ #include "gc/shared/collectedHeap.inline.hpp" #include "gc/shared/collectorCounters.hpp" #include "gc/shared/continuationGCSupport.inline.hpp" +#include "gc/shared/gcForwarding.hpp" #include "gc/shared/gcId.hpp" #include "gc/shared/gcInitLogger.hpp" #include "gc/shared/gcLocker.inline.hpp" @@ -200,6 +201,8 @@ jint SerialHeap::initialize() { GCInitLogger::print(); + GCForwarding::initialize(_reserved); + return JNI_OK; } diff --git a/src/hotspot/share/gc/shared/collectedHeap.cpp b/src/hotspot/share/gc/shared/collectedHeap.cpp index 82eaaf9a39673..c6ff9cfb7c1c2 100644 --- a/src/hotspot/share/gc/shared/collectedHeap.cpp +++ b/src/hotspot/share/gc/shared/collectedHeap.cpp @@ -229,7 +229,9 @@ bool CollectedHeap::is_oop(oop object) const { return false; } - if (!Metaspace::contains(object->klass_without_asserts())) { + // With compact headers, we can't safely access the class, due + // to possibly forwarded objects. + if (!UseCompactObjectHeaders && !Metaspace::contains(object->klass_without_asserts())) { return false; } diff --git a/src/hotspot/share/gc/shared/collectedHeap.hpp b/src/hotspot/share/gc/shared/collectedHeap.hpp index b413e3dfb438c..036bc0230c877 100644 --- a/src/hotspot/share/gc/shared/collectedHeap.hpp +++ b/src/hotspot/share/gc/shared/collectedHeap.hpp @@ -306,7 +306,7 @@ class CollectedHeap : public CHeapObj { } virtual void fill_with_dummy_object(HeapWord* start, HeapWord* end, bool zap); - static constexpr size_t min_dummy_object_size() { + static size_t min_dummy_object_size() { return oopDesc::header_size(); } diff --git a/src/hotspot/share/gc/shared/gcForwarding.cpp b/src/hotspot/share/gc/shared/gcForwarding.cpp new file mode 100644 index 0000000000000..5def881416240 --- /dev/null +++ b/src/hotspot/share/gc/shared/gcForwarding.cpp @@ -0,0 +1,52 @@ +/* + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "gc/shared/gcForwarding.hpp" +#include "memory/memRegion.hpp" +#include "runtime/globals_extension.hpp" + +HeapWord* GCForwarding::_heap_base = nullptr; +int GCForwarding::_num_low_bits = 0; + +void GCForwarding::initialize_flags(size_t max_heap_size) { +#ifdef _LP64 + size_t max_narrow_heap_size = right_n_bits(NumLowBitsNarrow - Shift); + if (UseCompactObjectHeaders && max_heap_size > max_narrow_heap_size * HeapWordSize) { + FLAG_SET_DEFAULT(UseCompactObjectHeaders, false); + } +#endif +} + +void GCForwarding::initialize(MemRegion heap) { +#ifdef _LP64 + _heap_base = heap.start(); + if (heap.word_size() <= right_n_bits(NumLowBitsNarrow - Shift)) { + _num_low_bits = NumLowBitsNarrow; + } else { + assert(!UseCompactObjectHeaders, "Compact object headers should be turned off for large heaps"); + _num_low_bits = NumLowBitsWide; + } +#endif +} diff --git a/src/hotspot/share/gc/shared/gcForwarding.hpp b/src/hotspot/share/gc/shared/gcForwarding.hpp new file mode 100644 index 0000000000000..be61607b8deb7 --- /dev/null +++ b/src/hotspot/share/gc/shared/gcForwarding.hpp @@ -0,0 +1,57 @@ +/* + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_GC_SHARED_GCFORWARDING_HPP +#define SHARE_GC_SHARED_GCFORWARDING_HPP + +#include "memory/allStatic.hpp" +#include "memory/memRegion.hpp" +#include "oops/markWord.hpp" +#include "oops/oopsHierarchy.hpp" + +/* + * Implements forwarding for the full-GCs of Serial, Parallel, G1 and Shenandoan in + * a way that preserves upper N bits of object mark-words, which contain crucial + * Klass* information when running with compact headers. The encoding is similar to + * compressed-oops encoding: it basically subtracts the forwardee address from the + * heap-base, shifts that difference into the right place, and sets the lowest two + * bits (to indicate 'forwarded' state as usual). + */ +class GCForwarding : public AllStatic { + static const int NumKlassBits = 32; // Will be 22 with Tiny Class-Pointers + static const int NumLowBitsNarrow = BitsPerWord - NumKlassBits; + static const int NumLowBitsWide = BitsPerWord; + static const int Shift = markWord::lock_bits + markWord::lock_shift; + + static HeapWord* _heap_base; + static int _num_low_bits; +public: + static void initialize_flags(size_t max_heap_size); + static void initialize(MemRegion heap); + static inline void forward_to(oop from, oop to); + static inline oop forwardee(oop from); + static inline bool is_forwarded(oop obj); +}; + +#endif // SHARE_GC_SHARED_GCFORWARDING_HPP diff --git a/src/hotspot/share/gc/shared/gcForwarding.inline.hpp b/src/hotspot/share/gc/shared/gcForwarding.inline.hpp new file mode 100644 index 0000000000000..beb31c1260662 --- /dev/null +++ b/src/hotspot/share/gc/shared/gcForwarding.inline.hpp @@ -0,0 +1,59 @@ +/* + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef GC_SHARED_GCFORWARDING_INLINE_HPP +#define GC_SHARED_GCFORWARDING_INLINE_HPP + +#include "gc/shared/gcForwarding.hpp" +#include "oops/oop.inline.hpp" +#include "utilities/globalDefinitions.hpp" + +void GCForwarding::forward_to(oop from, oop to) { +#ifdef _LP64 + uintptr_t encoded = pointer_delta(cast_from_oop(to), _heap_base) << Shift; + assert(encoded <= static_cast(right_n_bits(_num_low_bits)), "encoded forwardee must fit"); + uintptr_t mark = from->mark().value(); + mark &= ~right_n_bits(_num_low_bits); + mark |= (encoded | markWord::marked_value); + from->set_mark(markWord(mark)); +#else + from->forward_to(to); +#endif +} + +oop GCForwarding::forwardee(oop from) { +#ifdef _LP64 + uintptr_t mark = from->mark().value(); + HeapWord* decoded = _heap_base + ((mark & right_n_bits(_num_low_bits)) >> Shift); + return cast_to_oop(decoded); +#else + return from->forwardee(); +#endif +} + +bool GCForwarding::is_forwarded(oop obj) { + return obj->mark().is_forwarded(); +} + +#endif // GC_SHARED_GCFORWARDING_INLINE_HPP diff --git a/src/hotspot/share/gc/shared/memAllocator.cpp b/src/hotspot/share/gc/shared/memAllocator.cpp index 318ab00188b3d..9b1fdb8adbed1 100644 --- a/src/hotspot/share/gc/shared/memAllocator.cpp +++ b/src/hotspot/share/gc/shared/memAllocator.cpp @@ -361,18 +361,23 @@ void MemAllocator::mem_clear(HeapWord* mem) const { assert(mem != nullptr, "cannot initialize null object"); const size_t hs = oopDesc::header_size(); assert(_word_size >= hs, "unexpected object size"); - oopDesc::set_klass_gap(mem, 0); + if (!UseCompactObjectHeaders) { + oopDesc::set_klass_gap(mem, 0); + } Copy::fill_to_aligned_words(mem + hs, _word_size - hs); } oop MemAllocator::finish(HeapWord* mem) const { assert(mem != nullptr, "null object pointer"); - // May be bootstrapping - oopDesc::set_mark(mem, markWord::prototype()); // Need a release store to ensure array/class length, mark word, and // object zeroing are visible before setting the klass non-null, for // concurrent collectors. - oopDesc::release_set_klass(mem, _klass); + if (UseCompactObjectHeaders) { + oopDesc::release_set_mark(mem, _klass->prototype_header()); + } else { + oopDesc::set_mark(mem, markWord::prototype()); + oopDesc::release_set_klass(mem, _klass); + } return cast_to_oop(mem); } diff --git a/src/hotspot/share/gc/shared/preservedMarks.cpp b/src/hotspot/share/gc/shared/preservedMarks.cpp index 9889dbc369018..4d3a34d34fa53 100644 --- a/src/hotspot/share/gc/shared/preservedMarks.cpp +++ b/src/hotspot/share/gc/shared/preservedMarks.cpp @@ -23,6 +23,7 @@ */ #include "precompiled.hpp" +#include "gc/shared/gcForwarding.inline.hpp" #include "gc/shared/preservedMarks.inline.hpp" #include "gc/shared/workerThread.hpp" #include "gc/shared/workerUtils.hpp" @@ -42,8 +43,8 @@ void PreservedMarks::restore() { void PreservedMarks::adjust_preserved_mark(PreservedMark* elem) { oop obj = elem->get_oop(); - if (obj->is_forwarded()) { - elem->set_oop(obj->forwardee()); + if (GCForwarding::is_forwarded(obj)) { + elem->set_oop(GCForwarding::forwardee(obj)); } } diff --git a/src/hotspot/share/gc/shared/space.cpp b/src/hotspot/share/gc/shared/space.cpp index 0eb9d2520b739..0e4db3108be6b 100644 --- a/src/hotspot/share/gc/shared/space.cpp +++ b/src/hotspot/share/gc/shared/space.cpp @@ -100,7 +100,7 @@ void ContiguousSpace::object_iterate(ObjectClosure* blk) { while (addr < top()) { oop obj = cast_to_oop(addr); blk->do_object(obj); - addr += obj->size(); + addr += obj->forward_safe_size(); } } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahArguments.cpp b/src/hotspot/share/gc/shenandoah/shenandoahArguments.cpp index bcc370eeb314d..38cd6421c82d4 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahArguments.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahArguments.cpp @@ -24,6 +24,7 @@ #include "precompiled.hpp" #include "gc/shared/gcArguments.hpp" +#include "gc/shared/gcForwarding.hpp" #include "gc/shared/tlab_globals.hpp" #include "gc/shared/workerPolicy.hpp" #include "gc/shenandoah/shenandoahArguments.hpp" @@ -198,6 +199,11 @@ void ShenandoahArguments::initialize_alignments() { HeapAlignment = align; } +void ShenandoahArguments::initialize_heap_flags_and_sizes() { + GCArguments::initialize_heap_flags_and_sizes(); + GCForwarding::initialize_flags(MaxHeapSize); +} + CollectedHeap* ShenandoahArguments::create_heap() { return new ShenandoahHeap(new ShenandoahCollectorPolicy()); } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahArguments.hpp b/src/hotspot/share/gc/shenandoah/shenandoahArguments.hpp index bc73d9a2d1288..ad54b1d235ca9 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahArguments.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahArguments.hpp @@ -35,6 +35,7 @@ class ShenandoahArguments : public GCArguments { virtual void initialize(); virtual size_t conservative_max_heap_alignment(); + virtual void initialize_heap_flags_and_sizes(); virtual CollectedHeap* create_heap(); }; diff --git a/src/hotspot/share/gc/shenandoah/shenandoahAsserts.cpp b/src/hotspot/share/gc/shenandoah/shenandoahAsserts.cpp index 1539c7c2c5daa..e52fa29531631 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahAsserts.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahAsserts.cpp @@ -197,7 +197,7 @@ void ShenandoahAsserts::assert_correct(void* interior_loc, oop obj, const char* file, line); } - Klass* obj_klass = obj->klass_or_null(); + Klass* obj_klass = obj->forward_safe_klass(); if (obj_klass == nullptr) { print_failure(_safe_unknown, obj, interior_loc, nullptr, "Shenandoah assert_correct failed", "Object klass pointer should not be null", @@ -235,7 +235,7 @@ void ShenandoahAsserts::assert_correct(void* interior_loc, oop obj, const char* file, line); } - if (obj_klass != fwd->klass()) { + if (obj_klass != fwd->forward_safe_klass()) { print_failure(_safe_oop, obj, interior_loc, nullptr, "Shenandoah assert_correct failed", "Forwardee klass disagrees with object class", file, line); @@ -266,7 +266,7 @@ void ShenandoahAsserts::assert_correct(void* interior_loc, oop obj, const char* // Do additional checks for special objects: their fields can hold metadata as well. // We want to check class loading/unloading did not corrupt them. - if (Universe::is_fully_initialized() && java_lang_Class::is_instance(obj)) { + if (Universe::is_fully_initialized() && obj_klass == vmClasses::Class_klass()) { Metadata* klass = obj->metadata_field(java_lang_Class::klass_offset()); if (klass != nullptr && !Metaspace::contains(klass)) { print_failure(_safe_all, obj, interior_loc, nullptr, "Shenandoah assert_correct failed", diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp index de7d81d0f43cd..1c81793e78ebc 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp @@ -26,6 +26,7 @@ #include "compiler/oopMap.hpp" #include "gc/shared/continuationGCSupport.hpp" +#include "gc/shared/gcForwarding.inline.hpp" #include "gc/shared/gcTraceTime.inline.hpp" #include "gc/shared/preservedMarks.inline.hpp" #include "gc/shared/tlab_globals.hpp" @@ -369,7 +370,7 @@ class ShenandoahPrepareForCompactionObjectClosure : public ObjectClosure { shenandoah_assert_not_forwarded(nullptr, p); if (_compact_point != cast_from_oop(p)) { _preserved_marks->push_if_necessary(p, p->mark()); - p->forward_to(cast_to_oop(_compact_point)); + GCForwarding::forward_to(p, cast_to_oop(_compact_point)); } _compact_point += obj_size; } @@ -492,7 +493,7 @@ void ShenandoahFullGC::calculate_target_humongous_objects() { if (start >= to_begin && start != r->index()) { // Fits into current window, and the move is non-trivial. Record the move then, and continue scan. _preserved_marks->get(0)->push_if_necessary(old_obj, old_obj->mark()); - old_obj->forward_to(cast_to_oop(heap->get_region(start)->bottom())); + GCForwarding::forward_to(old_obj, cast_to_oop(heap->get_region(start)->bottom())); to_end = start; continue; } @@ -752,8 +753,8 @@ class ShenandoahAdjustPointersClosure : public MetadataVisitingOopIterateClosure if (!CompressedOops::is_null(o)) { oop obj = CompressedOops::decode_not_null(o); assert(_ctx->is_marked(obj), "must be marked"); - if (obj->is_forwarded()) { - oop forw = obj->forwardee(); + if (GCForwarding::is_forwarded(obj)) { + oop forw = GCForwarding::forwardee(obj); RawAccess::oop_store(p, forw); } } @@ -863,9 +864,9 @@ class ShenandoahCompactObjectsClosure : public ObjectClosure { void do_object(oop p) { assert(_heap->complete_marking_context()->is_marked(p), "must be marked"); size_t size = p->size(); - if (p->is_forwarded()) { + if (GCForwarding::is_forwarded(p)) { HeapWord* compact_from = cast_from_oop(p); - HeapWord* compact_to = cast_from_oop(p->forwardee()); + HeapWord* compact_to = cast_from_oop(GCForwarding::forwardee(p)); assert(compact_from != compact_to, "Forwarded object should move"); Copy::aligned_conjoint_words(compact_from, compact_to, size); oop new_obj = cast_to_oop(compact_to); @@ -970,7 +971,7 @@ void ShenandoahFullGC::compact_humongous_objects() { ShenandoahHeapRegion* r = heap->get_region(c - 1); if (r->is_humongous_start()) { oop old_obj = cast_to_oop(r->bottom()); - if (!old_obj->is_forwarded()) { + if (!GCForwarding::is_forwarded(old_obj)) { // No need to move the object, it stays at the same slot continue; } @@ -979,7 +980,7 @@ void ShenandoahFullGC::compact_humongous_objects() { size_t old_start = r->index(); size_t old_end = old_start + num_regions - 1; - size_t new_start = heap->heap_region_index_containing(old_obj->forwardee()); + size_t new_start = heap->heap_region_index_containing(GCForwarding::forwardee(old_obj)); size_t new_end = new_start + num_regions - 1; assert(old_start != new_start, "must be real move"); assert(r->is_stw_move_allowed(), "Region " SIZE_FORMAT " should be movable", r->index()); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp index a587cc417e319..74ab518cb2bcf 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp @@ -29,6 +29,7 @@ #include "gc/shared/classUnloadingContext.hpp" #include "gc/shared/gcArguments.hpp" +#include "gc/shared/gcForwarding.hpp" #include "gc/shared/gcTimer.hpp" #include "gc/shared/gcTraceTime.inline.hpp" #include "gc/shared/locationPrinter.inline.hpp" @@ -422,6 +423,8 @@ jint ShenandoahHeap::initialize() { ShenandoahInitLogger::print(); + GCForwarding::initialize(_heap_region); + return JNI_OK; } @@ -1130,7 +1133,7 @@ oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) { assert(ShenandoahThreadLocalData::is_evac_allowed(thread), "must be enclosed in oom-evac scope"); - size_t size = p->size(); + size_t size = p->forward_safe_size(); assert(!heap_region_containing(p)->is_humongous(), "never evacuate humongous objects"); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp index f2cc602d8cbc0..85bef6eeb71d4 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp @@ -434,7 +434,7 @@ inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, oop obj = cast_to_oop(cs); assert(oopDesc::is_oop(obj), "sanity"); assert(ctx->is_marked(obj), "object expected to be marked"); - size_t size = obj->size(); + size_t size = obj->forward_safe_size(); cl->do_object(obj); cs += size; } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp index 4834ecba54390..966c4ee8272fd 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp @@ -102,7 +102,7 @@ class ShenandoahVerifyOopClosure : public BasicOopIterateClosure { T o = RawAccess<>::oop_load(p); if (!CompressedOops::is_null(o)) { oop obj = CompressedOops::decode_not_null(o); - if (is_instance_ref_klass(obj->klass())) { + if (is_instance_ref_klass(obj->forward_safe_klass())) { obj = ShenandoahForwarding::get_forwardee(obj); } // Single threaded verification can use faster non-atomic stack and bitmap @@ -129,7 +129,7 @@ class ShenandoahVerifyOopClosure : public BasicOopIterateClosure { "oop must be aligned"); ShenandoahHeapRegion *obj_reg = _heap->heap_region_containing(obj); - Klass* obj_klass = obj->klass_or_null(); + Klass* obj_klass = obj->forward_safe_klass(); // Verify that obj is not in dead space: { @@ -144,11 +144,11 @@ class ShenandoahVerifyOopClosure : public BasicOopIterateClosure { "Object start should be within the region"); if (!obj_reg->is_humongous()) { - check(ShenandoahAsserts::_safe_unknown, obj, (obj_addr + obj->size()) <= obj_reg->top(), + check(ShenandoahAsserts::_safe_unknown, obj, (obj_addr + obj->forward_safe_size()) <= obj_reg->top(), "Object end should be within the region"); } else { size_t humongous_start = obj_reg->index(); - size_t humongous_end = humongous_start + (obj->size() >> ShenandoahHeapRegion::region_size_words_shift()); + size_t humongous_end = humongous_start + (obj->forward_safe_size() >> ShenandoahHeapRegion::region_size_words_shift()); for (size_t idx = humongous_start + 1; idx < humongous_end; idx++) { check(ShenandoahAsserts::_safe_unknown, obj, _heap->get_region(idx)->is_humongous_continuation(), "Humongous object is in continuation that fits it"); @@ -165,7 +165,7 @@ class ShenandoahVerifyOopClosure : public BasicOopIterateClosure { // skip break; case ShenandoahVerifier::_verify_liveness_complete: - Atomic::add(&_ld[obj_reg->index()], (uint) obj->size(), memory_order_relaxed); + Atomic::add(&_ld[obj_reg->index()], (uint) obj->forward_safe_size(), memory_order_relaxed); // fallthrough for fast failure for un-live regions: case ShenandoahVerifier::_verify_liveness_conservative: check(ShenandoahAsserts::_safe_oop, obj, obj_reg->has_live(), @@ -209,7 +209,7 @@ class ShenandoahVerifyOopClosure : public BasicOopIterateClosure { HeapWord *fwd_addr = cast_from_oop(fwd); check(ShenandoahAsserts::_safe_oop, obj, fwd_addr < fwd_reg->top(), "Forwardee start should be within the region"); - check(ShenandoahAsserts::_safe_oop, obj, (fwd_addr + fwd->size()) <= fwd_reg->top(), + check(ShenandoahAsserts::_safe_oop, obj, (fwd_addr + fwd->forward_safe_size()) <= fwd_reg->top(), "Forwardee end should be within the region"); oop fwd2 = ShenandoahForwarding::get_forwardee_raw_unchecked(fwd); @@ -222,7 +222,7 @@ class ShenandoahVerifyOopClosure : public BasicOopIterateClosure { // Do additional checks for special objects: their fields can hold metadata as well. // We want to check class loading/unloading did not corrupt them. - if (java_lang_Class::is_instance(obj)) { + if (obj_klass == vmClasses::Class_klass()) { Metadata* klass = obj->metadata_field(java_lang_Class::klass_offset()); check(ShenandoahAsserts::_safe_oop, obj, klass == nullptr || Metaspace::contains(klass), @@ -327,7 +327,8 @@ class ShenandoahVerifyOopClosure : public BasicOopIterateClosure { */ void verify_oops_from(oop obj) { _loc = obj; - obj->oop_iterate(this); + Klass* klass = obj->forward_safe_klass(); + obj->oop_iterate_backwards(this, klass); _loc = nullptr; } @@ -591,7 +592,7 @@ class ShenandoahVerifierMarkedRegionTask : public WorkerTask { while (addr < limit) { verify_and_follow(addr, stack, cl, &processed); - addr += cast_to_oop(addr)->size(); + addr += cast_to_oop(addr)->forward_safe_size(); } } @@ -607,7 +608,7 @@ class ShenandoahVerifierMarkedRegionTask : public WorkerTask { // Verify everything reachable from that object too, hopefully realizing // everything was already marked, and never touching further: - if (!is_instance_ref_klass(obj->klass())) { + if (!is_instance_ref_klass(obj->forward_safe_klass())) { cl.verify_oops_from(obj); (*processed)++; } diff --git a/src/hotspot/share/gc/x/c2/xBarrierSetC2.cpp b/src/hotspot/share/gc/x/c2/xBarrierSetC2.cpp index d006b37e7d208..06af497e74511 100644 --- a/src/hotspot/share/gc/x/c2/xBarrierSetC2.cpp +++ b/src/hotspot/share/gc/x/c2/xBarrierSetC2.cpp @@ -298,7 +298,7 @@ void XBarrierSetC2::clone_at_expansion(PhaseMacroExpand* phase, ArrayCopyNode* a assert(src_offset == dest_offset, "should be equal"); jlong offset = src_offset->get_long(); if (offset != arrayOopDesc::base_offset_in_bytes(T_OBJECT)) { - assert(!UseCompressedClassPointers, "should only happen without compressed class pointers"); + assert(!UseCompressedClassPointers || UseCompactObjectHeaders, "should only happen without compressed class pointers"); assert((arrayOopDesc::base_offset_in_bytes(T_OBJECT) - offset) == BytesPerLong, "unexpected offset"); length = phase->transform_later(new SubLNode(length, phase->longcon(1))); // Size is in longs src_offset = phase->longcon(arrayOopDesc::base_offset_in_bytes(T_OBJECT)); diff --git a/src/hotspot/share/gc/x/xObjArrayAllocator.cpp b/src/hotspot/share/gc/x/xObjArrayAllocator.cpp index 0950b886a9b7b..f2377cd37b722 100644 --- a/src/hotspot/share/gc/x/xObjArrayAllocator.cpp +++ b/src/hotspot/share/gc/x/xObjArrayAllocator.cpp @@ -73,8 +73,12 @@ oop XObjArrayAllocator::initialize(HeapWord* mem) const { // The array is going to be exposed before it has been completely // cleared, therefore we can't expose the header at the end of this // function. Instead explicitly initialize it according to our needs. - arrayOopDesc::set_mark(mem, markWord::prototype()); - arrayOopDesc::release_set_klass(mem, _klass); + if (UseCompactObjectHeaders) { + arrayOopDesc::release_set_mark(mem, _klass->prototype_header()); + } else { + arrayOopDesc::set_mark(mem, markWord::prototype()); + arrayOopDesc::release_set_klass(mem, _klass); + } assert(_length >= 0, "length should be non-negative"); arrayOopDesc::set_length(mem, _length); diff --git a/src/hotspot/share/gc/z/c2/zBarrierSetC2.cpp b/src/hotspot/share/gc/z/c2/zBarrierSetC2.cpp index f72e84eaf5935..3ec07cea17bd1 100644 --- a/src/hotspot/share/gc/z/c2/zBarrierSetC2.cpp +++ b/src/hotspot/share/gc/z/c2/zBarrierSetC2.cpp @@ -439,7 +439,7 @@ void ZBarrierSetC2::clone_at_expansion(PhaseMacroExpand* phase, ArrayCopyNode* a assert(src_offset == dest_offset, "should be equal"); const jlong offset = src_offset->get_long(); if (offset != arrayOopDesc::base_offset_in_bytes(T_OBJECT)) { - assert(!UseCompressedClassPointers, "should only happen without compressed class pointers"); + assert(!UseCompressedClassPointers || UseCompactObjectHeaders, "should only happen without compressed class pointers"); assert((arrayOopDesc::base_offset_in_bytes(T_OBJECT) - offset) == BytesPerLong, "unexpected offset"); length = phase->transform_later(new SubLNode(length, phase->longcon(1))); // Size is in longs src_offset = phase->longcon(arrayOopDesc::base_offset_in_bytes(T_OBJECT)); diff --git a/src/hotspot/share/gc/z/zObjArrayAllocator.cpp b/src/hotspot/share/gc/z/zObjArrayAllocator.cpp index ada8351a9f65f..c63b989dc4eb0 100644 --- a/src/hotspot/share/gc/z/zObjArrayAllocator.cpp +++ b/src/hotspot/share/gc/z/zObjArrayAllocator.cpp @@ -63,8 +63,12 @@ oop ZObjArrayAllocator::initialize(HeapWord* mem) const { // Signal to the ZIterator that this is an invisible root, by setting // the mark word to "marked". Reset to prototype() after the clearing. - arrayOopDesc::set_mark(mem, markWord::prototype().set_marked()); - arrayOopDesc::release_set_klass(mem, _klass); + if (UseCompactObjectHeaders) { + oopDesc::release_set_mark(mem, _klass->prototype_header().set_marked()); + } else { + arrayOopDesc::set_mark(mem, markWord::prototype().set_marked()); + arrayOopDesc::release_set_klass(mem, _klass); + } assert(_length >= 0, "length should be non-negative"); arrayOopDesc::set_length(mem, _length); @@ -152,7 +156,11 @@ oop ZObjArrayAllocator::initialize(HeapWord* mem) const { ZThreadLocalData::clear_invisible_root(_thread); // Signal to the ZIterator that this is no longer an invisible root - oopDesc::release_set_mark(mem, markWord::prototype()); + if (UseCompactObjectHeaders) { + oopDesc::release_set_mark(mem, _klass->prototype_header()); + } else { + oopDesc::release_set_mark(mem, markWord::prototype()); + } return cast_to_oop(mem); } diff --git a/src/hotspot/share/gc/z/zRelocate.cpp b/src/hotspot/share/gc/z/zRelocate.cpp index b55a1863bdee3..62e8f538e44f0 100644 --- a/src/hotspot/share/gc/z/zRelocate.cpp +++ b/src/hotspot/share/gc/z/zRelocate.cpp @@ -591,7 +591,6 @@ class ZRelocateWork : public StackObj { zaddress try_relocate_object_inner(zaddress from_addr) { ZForwardingCursor cursor; - const size_t size = ZUtils::object_size(from_addr); ZPage* const to_page = target(_forwarding->to_age()); // Lookup forwarding @@ -599,12 +598,14 @@ class ZRelocateWork : public StackObj { const zaddress to_addr = _forwarding->find(from_addr, &cursor); if (!is_null(to_addr)) { // Already relocated + const size_t size = ZUtils::object_size(to_addr); increase_other_forwarded(size); return to_addr; } } // Allocate object + const size_t size = ZUtils::object_size(from_addr); const zaddress allocated_addr = _allocator->alloc_object(to_page, size); if (is_null(allocated_addr)) { // Allocation failed diff --git a/src/hotspot/share/interpreter/zero/bytecodeInterpreter.cpp b/src/hotspot/share/interpreter/zero/bytecodeInterpreter.cpp index fbdf8f9ca7145..ee50b2eb3d850 100644 --- a/src/hotspot/share/interpreter/zero/bytecodeInterpreter.cpp +++ b/src/hotspot/share/interpreter/zero/bytecodeInterpreter.cpp @@ -2021,10 +2021,13 @@ void BytecodeInterpreter::run(interpreterState istate) { } // Initialize header, mirrors MemAllocator. - oopDesc::set_mark(result, markWord::prototype()); - oopDesc::set_klass_gap(result, 0); - oopDesc::release_set_klass(result, ik); - + if (UseCompactObjectHeaders) { + oopDesc::release_set_mark(result, ik->prototype_header()); + } else { + oopDesc::set_mark(result, markWord::prototype()); + oopDesc::set_klass_gap(result, 0); + oopDesc::release_set_klass(result, ik); + } oop obj = cast_to_oop(result); // Must prevent reordering of stores for object initialization diff --git a/src/hotspot/share/jfr/leakprofiler/chains/objectSampleMarker.hpp b/src/hotspot/share/jfr/leakprofiler/chains/objectSampleMarker.hpp index 13b55c34e238a..dd8fc2cc47325 100644 --- a/src/hotspot/share/jfr/leakprofiler/chains/objectSampleMarker.hpp +++ b/src/hotspot/share/jfr/leakprofiler/chains/objectSampleMarker.hpp @@ -70,7 +70,7 @@ class ObjectSampleMarker : public StackObj { // now we will set the mark word to "marked" in order to quickly // identify sample objects during the reachability search from gc roots. assert(!obj->mark().is_marked(), "should only mark an object once"); - obj->set_mark(markWord::prototype().set_marked()); + obj->set_mark(obj->prototype_mark().set_marked()); assert(obj->mark().is_marked(), "invariant"); } }; diff --git a/src/hotspot/share/jvmci/vmStructs_jvmci.cpp b/src/hotspot/share/jvmci/vmStructs_jvmci.cpp index 688691fb9765c..d97fdcb3f4486 100644 --- a/src/hotspot/share/jvmci/vmStructs_jvmci.cpp +++ b/src/hotspot/share/jvmci/vmStructs_jvmci.cpp @@ -792,11 +792,13 @@ declare_constant(InvocationCounter::count_shift) \ \ declare_constant(markWord::hash_shift) \ + declare_constant(markWord::hash_shift_compact) \ declare_constant(markWord::monitor_value) \ \ declare_constant(markWord::lock_mask_in_place) \ declare_constant(markWord::age_mask_in_place) \ declare_constant(markWord::hash_mask) \ + declare_constant(markWord::hash_mask_compact) \ declare_constant(markWord::hash_mask_in_place) \ \ declare_constant(markWord::unlocked_value) \ diff --git a/src/hotspot/share/memory/universe.cpp b/src/hotspot/share/memory/universe.cpp index 8f20f331235ff..758c8794ec887 100644 --- a/src/hotspot/share/memory/universe.cpp +++ b/src/hotspot/share/memory/universe.cpp @@ -382,8 +382,13 @@ void Universe::genesis(TRAPS) { HandleMark hm(THREAD); // Explicit null checks are needed if these offsets are not smaller than the page size - assert(oopDesc::klass_offset_in_bytes() < static_cast(os::vm_page_size()), - "Klass offset is expected to be less than the page size"); + if (UseCompactObjectHeaders) { + assert(oopDesc::mark_offset_in_bytes() < static_cast(os::vm_page_size()), + "Mark offset is expected to be less than the page size"); + } else { + assert(oopDesc::klass_offset_in_bytes() < static_cast(os::vm_page_size()), + "Klass offset is expected to be less than the page size"); + } assert(arrayOopDesc::length_offset_in_bytes() < static_cast(os::vm_page_size()), "Array length offset is expected to be less than the page size"); diff --git a/src/hotspot/share/oops/arrayOop.hpp b/src/hotspot/share/oops/arrayOop.hpp index 1ca8a9530a48c..115e2f732150e 100644 --- a/src/hotspot/share/oops/arrayOop.hpp +++ b/src/hotspot/share/oops/arrayOop.hpp @@ -82,8 +82,13 @@ class arrayOopDesc : public oopDesc { // declared nonstatic fields in arrayOopDesc if not compressed, otherwise // it occupies the second half of the _klass field in oopDesc. static int length_offset_in_bytes() { - return UseCompressedClassPointers ? klass_gap_offset_in_bytes() : - (int)sizeof(arrayOopDesc); + if (UseCompactObjectHeaders) { + return oopDesc::base_offset_in_bytes(); + } else if (UseCompressedClassPointers) { + return klass_gap_offset_in_bytes(); + } else { + return (int)sizeof(arrayOopDesc); + } } // Returns the offset of the first element. diff --git a/src/hotspot/share/oops/instanceOop.hpp b/src/hotspot/share/oops/instanceOop.hpp index 8de3b1a742cc0..8830b58af0af0 100644 --- a/src/hotspot/share/oops/instanceOop.hpp +++ b/src/hotspot/share/oops/instanceOop.hpp @@ -33,15 +33,15 @@ class instanceOopDesc : public oopDesc { public: - // aligned header size. - static int header_size() { return sizeof(instanceOopDesc)/HeapWordSize; } - // If compressed, the offset of the fields of the instance may not be aligned. static int base_offset_in_bytes() { - return (UseCompressedClassPointers) ? - klass_gap_offset_in_bytes() : - sizeof(instanceOopDesc); - + if (UseCompactObjectHeaders) { + return oopDesc::base_offset_in_bytes(); + } else if (UseCompressedClassPointers) { + return klass_gap_offset_in_bytes(); + } else { + return sizeof(instanceOopDesc); + } } }; diff --git a/src/hotspot/share/oops/klass.cpp b/src/hotspot/share/oops/klass.cpp index 964bb030b960e..23c6e0d8f09bf 100644 --- a/src/hotspot/share/oops/klass.cpp +++ b/src/hotspot/share/oops/klass.cpp @@ -251,6 +251,16 @@ void* Klass::operator new(size_t size, ClassLoaderData* loader_data, size_t word return Metaspace::allocate(loader_data, word_size, MetaspaceObj::ClassType, THREAD); } +static markWord make_prototype(Klass* kls) { + markWord prototype = markWord::prototype(); +#ifdef _LP64 + if (UseCompactObjectHeaders) { + prototype = prototype.set_klass(kls); + } +#endif + return prototype; +} + Klass::Klass() : _kind(UnknownKlassKind) { assert(CDSConfig::is_dumping_static_archive() || CDSConfig::is_using_archive(), "only for cds"); } @@ -260,6 +270,7 @@ Klass::Klass() : _kind(UnknownKlassKind) { // The constructor is also used from CppVtableCloner, // which doesn't zero out the memory before calling the constructor. Klass::Klass(KlassKind kind) : _kind(kind), + _prototype_header(make_prototype(this)), _shared_class_path_index(-1) { CDS_ONLY(_shared_class_flags = 0;) CDS_JAVA_HEAP_ONLY(_archived_mirror_index = -1;) @@ -970,6 +981,10 @@ void Klass::oop_print_on(oop obj, outputStream* st) { // print header obj->mark().print_on(st); st->cr(); + if (UseCompactObjectHeaders) { + st->print(BULLET"prototype_header: " INTPTR_FORMAT, _prototype_header.value()); + st->cr(); + } } // print class diff --git a/src/hotspot/share/oops/klass.hpp b/src/hotspot/share/oops/klass.hpp index 6ec6ac889be71..bfcfb7baf1ee6 100644 --- a/src/hotspot/share/oops/klass.hpp +++ b/src/hotspot/share/oops/klass.hpp @@ -166,6 +166,8 @@ class Klass : public Metadata { // contention that may happen when a nearby object is modified. AccessFlags _access_flags; // Access flags. The class/interface distinction is stored here. + markWord _prototype_header; // Used to initialize objects' header + JFR_ONLY(DEFINE_TRACE_ID_FIELD;) // Bitmap and hash code used by hashed secondary supers. @@ -704,6 +706,13 @@ class Klass : public Metadata { bool is_cloneable() const; void set_is_cloneable(); + markWord prototype_header() const { + assert(UseCompactObjectHeaders, "only use with compact object headers"); + return _prototype_header; + } + inline void set_prototype_header(markWord header); + static ByteSize prototype_header_offset() { return in_ByteSize(offset_of(Klass, _prototype_header)); } + JFR_ONLY(DEFINE_TRACE_ID_METHODS;) virtual void metaspace_pointers_do(MetaspaceClosure* iter); diff --git a/src/hotspot/share/oops/klass.inline.hpp b/src/hotspot/share/oops/klass.inline.hpp index a72868a08d890..c45c75c37e6e3 100644 --- a/src/hotspot/share/oops/klass.inline.hpp +++ b/src/hotspot/share/oops/klass.inline.hpp @@ -52,6 +52,11 @@ inline bool Klass::is_loader_alive() const { return class_loader_data()->is_alive(); } +inline void Klass::set_prototype_header(markWord header) { + assert(UseCompactObjectHeaders, "only with compact headers"); + _prototype_header = header; +} + inline oop Klass::java_mirror() const { return _java_mirror.resolve(); } diff --git a/src/hotspot/share/oops/markWord.hpp b/src/hotspot/share/oops/markWord.hpp index 92577a8b40b01..b4c2a4edbee97 100644 --- a/src/hotspot/share/oops/markWord.hpp +++ b/src/hotspot/share/oops/markWord.hpp @@ -26,6 +26,7 @@ #define SHARE_OOPS_MARKWORD_HPP #include "metaprogramming/primitiveConversions.hpp" +#include "oops/compressedKlass.hpp" #include "oops/oopsHierarchy.hpp" #include "runtime/globals.hpp" @@ -37,11 +38,15 @@ // // 32 bits: // -------- -// hash:25 ------------>| age:4 unused_gap:1 lock:2 (normal object) +// hash:25 ------------>| age:4 self-fwd:1 lock:2 (normal object) // // 64 bits: // -------- -// unused:25 hash:31 -->| unused_gap:1 age:4 unused_gap:1 lock:2 (normal object) +// unused:25 hash:31 -->| unused_gap:1 age:4 self-fwd:1 lock:2 (normal object) +// +// 64 bits (with compact headers): +// ------------------------------- +// nklass:32 hash:25 -->| unused_gap:1 age:4 self-fwded:1 lock:2 (normal object) // // - hash contains the identity hash value: largest value is // 31 bits, see os::random(). Also, 64-bit vm's require @@ -103,21 +108,43 @@ class markWord { // Constants static const int age_bits = 4; static const int lock_bits = 2; - static const int first_unused_gap_bits = 1; - static const int max_hash_bits = BitsPerWord - age_bits - lock_bits - first_unused_gap_bits; + static const int self_fwd_bits = 1; + static const int max_hash_bits = BitsPerWord - age_bits - lock_bits - self_fwd_bits; static const int hash_bits = max_hash_bits > 31 ? 31 : max_hash_bits; - static const int second_unused_gap_bits = LP64_ONLY(1) NOT_LP64(0); + static const int hash_bits_compact = max_hash_bits > 25 ? 25 : max_hash_bits; + // Used only without compact headers. + static const int unused_gap_bits = LP64_ONLY(1) NOT_LP64(0); +#ifdef _LP64 + // Used only with compact headers. + static const int klass_bits = 32; +#endif static const int lock_shift = 0; - static const int age_shift = lock_bits + first_unused_gap_bits; - static const int hash_shift = age_shift + age_bits + second_unused_gap_bits; + static const int self_fwd_shift = lock_shift + lock_bits; + static const int age_shift = self_fwd_shift + self_fwd_bits; + static const int hash_shift = age_shift + age_bits + unused_gap_bits; + static const int hash_shift_compact = age_shift + age_bits; +#ifdef _LP64 + // Used only with compact headers. + static const int klass_shift = hash_shift_compact + hash_bits_compact; +#endif static const uintptr_t lock_mask = right_n_bits(lock_bits); static const uintptr_t lock_mask_in_place = lock_mask << lock_shift; + static const uintptr_t self_fwd_mask = right_n_bits(self_fwd_bits); + static const uintptr_t self_fwd_mask_in_place = self_fwd_mask << self_fwd_shift; static const uintptr_t age_mask = right_n_bits(age_bits); static const uintptr_t age_mask_in_place = age_mask << age_shift; static const uintptr_t hash_mask = right_n_bits(hash_bits); static const uintptr_t hash_mask_in_place = hash_mask << hash_shift; + static const uintptr_t hash_mask_compact = right_n_bits(hash_bits_compact); + static const uintptr_t hash_mask_compact_in_place = hash_mask_compact << hash_shift_compact; +#ifdef _LP64 + // Used only with compact headers. + static const uintptr_t klass_mask = right_n_bits(klass_bits); + static const uintptr_t klass_mask_in_place = klass_mask << klass_shift; +#endif + static const uintptr_t locked_value = 0; static const uintptr_t unlocked_value = 1; @@ -143,9 +170,11 @@ class markWord { bool is_marked() const { return (mask_bits(value(), lock_mask_in_place) == marked_value); } - bool is_forwarded() const { - return (mask_bits(value(), lock_mask_in_place) == marked_value); + bool is_forwarded() const { + // Returns true for normal forwarded (0b011) and self-forwarded (0b1xx). + return mask_bits(value(), lock_mask_in_place | self_fwd_mask_in_place) >= static_cast(marked_value); } + bool is_neutral() const { // Not locked, or marked - a "clean" neutral state return (mask_bits(value(), lock_mask_in_place) == unlocked_value); } @@ -212,9 +241,15 @@ class markWord { markWord displaced_mark_helper() const; void set_displaced_mark_helper(markWord m) const; markWord copy_set_hash(intptr_t hash) const { - uintptr_t tmp = value() & (~hash_mask_in_place); - tmp |= ((hash & hash_mask) << hash_shift); - return markWord(tmp); + if (UseCompactObjectHeaders) { + uintptr_t tmp = value() & (~hash_mask_compact_in_place); + tmp |= ((hash & hash_mask_compact) << hash_shift_compact); + return markWord(tmp); + } else { + uintptr_t tmp = value() & (~hash_mask_in_place); + tmp |= ((hash & hash_mask) << hash_shift); + return markWord(tmp); + } } // it is only used to be stored into BasicLock as the // indicator that the lock is using heavyweight monitor @@ -252,13 +287,24 @@ class markWord { // hash operations intptr_t hash() const { - return mask_bits(value() >> hash_shift, hash_mask); + if (UseCompactObjectHeaders) { + return mask_bits(value() >> hash_shift_compact, hash_mask_compact); + } else { + return mask_bits(value() >> hash_shift, hash_mask); + } } bool has_no_hash() const { return hash() == no_hash; } + inline Klass* klass() const; + inline Klass* klass_or_null() const; + inline Klass* klass_without_asserts() const; + inline narrowKlass narrow_klass() const; + inline markWord set_narrow_klass(narrowKlass nklass) const; + inline markWord set_klass(Klass* klass) const; + // Prototype mark for initialization static markWord prototype() { return markWord( no_hash_in_place | no_lock_in_place ); @@ -273,6 +319,21 @@ class markWord { // Recover address of oop from encoded form used in mark inline void* decode_pointer() const { return (void*)clear_lock_bits().value(); } + inline bool is_self_forwarded() const { + NOT_LP64(assert(LockingMode != LM_LEGACY, "incorrect with LM_LEGACY on 32 bit");) + return mask_bits(value(), self_fwd_mask_in_place) != 0; + } + + inline markWord set_self_forwarded() const { + NOT_LP64(assert(LockingMode != LM_LEGACY, "incorrect with LM_LEGACY on 32 bit");) + return markWord(value() | self_fwd_mask_in_place); + } + + inline markWord unset_self_forwarded() const { + NOT_LP64(assert(LockingMode != LM_LEGACY, "incorrect with LM_LEGACY on 32 bit");) + return markWord(value() & ~self_fwd_mask_in_place); + } + inline oop forwardee() const { return cast_to_oop(decode_pointer()); } diff --git a/src/hotspot/share/oops/markWord.inline.hpp b/src/hotspot/share/oops/markWord.inline.hpp new file mode 100644 index 0000000000000..90c11ab207adc --- /dev/null +++ b/src/hotspot/share/oops/markWord.inline.hpp @@ -0,0 +1,93 @@ +/* + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_OOPS_MARKWORD_INLINE_HPP +#define SHARE_OOPS_MARKWORD_INLINE_HPP + +#include "oops/markWord.hpp" +#include "oops/compressedOops.inline.hpp" + +narrowKlass markWord::narrow_klass() const { +#ifdef _LP64 + assert(UseCompactObjectHeaders, "only used with compact object headers"); + return narrowKlass(value() >> klass_shift); +#else + ShouldNotReachHere(); + return 0; +#endif +} + +markWord markWord::set_narrow_klass(narrowKlass nklass) const { +#ifdef _LP64 + assert(UseCompactObjectHeaders, "only used with compact object headers"); + return markWord((value() & ~klass_mask_in_place) | ((uintptr_t) nklass << klass_shift)); +#else + ShouldNotReachHere(); + return markWord(0); +#endif +} + +Klass* markWord::klass() const { +#ifdef _LP64 + assert(UseCompactObjectHeaders, "only used with compact object headers"); + return CompressedKlassPointers::decode_not_null(narrow_klass()); +#else + ShouldNotReachHere(); + return nullptr; +#endif +} + +Klass* markWord::klass_or_null() const { +#ifdef _LP64 + assert(UseCompactObjectHeaders, "only used with compact object headers"); + return CompressedKlassPointers::decode(narrow_klass()); +#else + ShouldNotReachHere(); + return nullptr; +#endif +} + +Klass* markWord::klass_without_asserts() const { +#ifdef _LP64 + assert(UseCompactObjectHeaders, "only used with compact object headers"); + return CompressedKlassPointers::decode_without_asserts(narrow_klass()); +#else + ShouldNotReachHere(); + return nullptr; +#endif +} + +markWord markWord::set_klass(Klass* klass) const { +#ifdef _LP64 + assert(UseCompactObjectHeaders, "only used with compact object headers"); + assert(UseCompressedClassPointers, "expect compressed klass pointers"); + narrowKlass nklass = CompressedKlassPointers::encode(const_cast(klass)); + return set_narrow_klass(nklass); +#else + ShouldNotReachHere(); + return markWord(); +#endif +} + +#endif // SHARE_OOPS_MARKWORD_INLINE_HPP diff --git a/src/hotspot/share/oops/objArrayKlass.cpp b/src/hotspot/share/oops/objArrayKlass.cpp index bee010b6d7244..ffd2d8a72a3a7 100644 --- a/src/hotspot/share/oops/objArrayKlass.cpp +++ b/src/hotspot/share/oops/objArrayKlass.cpp @@ -143,7 +143,8 @@ ObjArrayKlass::ObjArrayKlass(int n, Klass* element_klass, Symbol* name) : ArrayK } size_t ObjArrayKlass::oop_size(oop obj) const { - assert(obj->is_objArray(), "must be object array"); + // In this assert, we cannot safely access the Klass* with compact headers. + assert(UseCompactObjectHeaders || obj->is_objArray(), "must be object array"); return objArrayOop(obj)->object_size(); } diff --git a/src/hotspot/share/oops/objArrayKlass.inline.hpp b/src/hotspot/share/oops/objArrayKlass.inline.hpp index 6c9165509c7c2..3f1c8c208db9f 100644 --- a/src/hotspot/share/oops/objArrayKlass.inline.hpp +++ b/src/hotspot/share/oops/objArrayKlass.inline.hpp @@ -70,7 +70,8 @@ void ObjArrayKlass::oop_oop_iterate_elements_bounded( template void ObjArrayKlass::oop_oop_iterate(oop obj, OopClosureType* closure) { - assert (obj->is_array(), "obj must be array"); + // In this assert, we cannot safely access the Klass* with compact headers. + assert (UseCompactObjectHeaders || obj->is_array(), "obj must be array"); objArrayOop a = objArrayOop(obj); if (Devirtualizer::do_metadata(closure)) { diff --git a/src/hotspot/share/oops/oop.cpp b/src/hotspot/share/oops/oop.cpp index fde91969ea5ef..ef5e4db815a64 100644 --- a/src/hotspot/share/oops/oop.cpp +++ b/src/hotspot/share/oops/oop.cpp @@ -154,7 +154,8 @@ bool oopDesc::is_typeArray_noinline() const { return is_typeArray(); } bool oopDesc::has_klass_gap() { // Only has a klass gap when compressed class pointers are used. - return UseCompressedClassPointers; + // Except when using compact headers. + return UseCompressedClassPointers && !UseCompactObjectHeaders; } #if INCLUDE_CDS_JAVA_HEAP @@ -220,12 +221,12 @@ jdouble oopDesc::double_field_acquire(int offset) const { return A void oopDesc::release_double_field_put(int offset, jdouble value) { Atomic::release_store(field_addr(offset), value); } #ifdef ASSERT -bool oopDesc::size_might_change() { +bool oopDesc::size_might_change(Klass* klass) { // UseParallelGC and UseG1GC can change the length field // of an "old copy" of an object array in the young gen so it indicates // the grey portion of an already copied array. This will cause the first // disjunct below to fail if the two comparands are computed across such // a concurrent change. - return Universe::heap()->is_stw_gc_active() && is_objArray() && is_forwarded() && (UseParallelGC || UseG1GC); + return Universe::heap()->is_stw_gc_active() && klass->is_objArray_klass() && is_forwarded() && (UseParallelGC || UseG1GC); } #endif diff --git a/src/hotspot/share/oops/oop.hpp b/src/hotspot/share/oops/oop.hpp index 2d827699b41fa..73122954d7684 100644 --- a/src/hotspot/share/oops/oop.hpp +++ b/src/hotspot/share/oops/oop.hpp @@ -62,6 +62,8 @@ class oopDesc { // make use of the C++ copy/assign incorrect. NONCOPYABLE(oopDesc); + inline oop cas_set_forwardee(markWord new_mark, markWord old_mark, atomic_memory_order order); + public: // Must be trivial; see verifying static assert after the class. oopDesc() = default; @@ -78,6 +80,9 @@ class oopDesc { inline markWord cas_set_mark(markWord new_mark, markWord old_mark); inline markWord cas_set_mark(markWord new_mark, markWord old_mark, atomic_memory_order order); + // Returns the prototype mark that should be used for this object. + inline markWord prototype_mark() const; + // Used only to re-initialize the mark word (e.g., of promoted // objects during a GC) -- requires a valid klass pointer inline void init_mark(); @@ -96,7 +101,13 @@ class oopDesc { static inline void set_klass_gap(HeapWord* mem, int z); // size of object header, aligned to platform wordSize - static constexpr int header_size() { return sizeof(oopDesc)/HeapWordSize; } + static int header_size() { + if (UseCompactObjectHeaders) { + return sizeof(markWord) / HeapWordSize; + } else { + return sizeof(oopDesc)/HeapWordSize; + } + } // Returns whether this is an instance of k or an instance of a subclass of k inline bool is_a(Klass* k) const; @@ -108,6 +119,20 @@ class oopDesc { // to be able to figure out the size of an object knowing its klass. inline size_t size_given_klass(Klass* klass); + // The following set of methods is used to access the mark-word and related + // properties when the object may be forwarded. Be careful where and when + // using this method. It assumes that the forwardee is installed in + // the header as a plain pointer (or self-forwarded). In particular, + // those methods can not deal with the encoded forwarding that is used + // in Serial, Parallel, G1 and Shenandoah full-GCs. +private: + inline Klass* forward_safe_klass_impl(markWord m) const; +public: + inline Klass* forward_safe_klass() const; + inline Klass* forward_safe_klass(markWord m) const; + inline size_t forward_safe_size(); + inline void forward_safe_init_mark(); + // type test operations (inlined in oop.inline.hpp) inline bool is_instance() const; inline bool is_instanceRef() const; @@ -258,16 +283,22 @@ class oopDesc { // Forward pointer operations for scavenge inline bool is_forwarded() const; + inline bool is_self_forwarded() const; inline void forward_to(oop p); + inline void forward_to_self(); // Like "forward_to", but inserts the forwarding pointer atomically. // Exactly one thread succeeds in inserting the forwarding pointer, and // this call returns null for that thread; any other thread has the // value of the forwarding pointer returned and does not modify "this". inline oop forward_to_atomic(oop p, markWord compare, atomic_memory_order order = memory_order_conservative); + inline oop forward_to_self_atomic(markWord compare, atomic_memory_order order = memory_order_conservative); inline oop forwardee() const; + inline oop forwardee(markWord header) const; + + inline void unset_self_forwarded(); // Age of object during scavenge inline uint age() const; @@ -311,16 +342,49 @@ class oopDesc { // for code generation static int mark_offset_in_bytes() { return (int)offset_of(oopDesc, _mark); } - static int klass_offset_in_bytes() { return (int)offset_of(oopDesc, _metadata._klass); } + static int klass_offset_in_bytes() { +#ifdef _LP64 + if (UseCompactObjectHeaders) { + // NOTE: The only place where this is used with compact headers is + // the C2 compiler, and even there we don't use it to access the (narrow)Klass* + // directly. It is used only as a placeholder to identify the special memory slice + // of LoadNKlass instructions. This value could be any value that is not a valid + // field offset. Also, if it weren't for C2, we could + // assert(!UseCompactObjectHeaders) here. + STATIC_ASSERT(markWord::klass_shift % 8 == 0); + return mark_offset_in_bytes() + markWord::klass_shift / 8; + } else +#endif + { + return (int)offset_of(oopDesc, _metadata._klass); + } + } static int klass_gap_offset_in_bytes() { assert(has_klass_gap(), "only applicable to compressed klass pointers"); + assert(!UseCompactObjectHeaders, "don't use klass_gap_offset_in_bytes() with compact headers"); return klass_offset_in_bytes() + sizeof(narrowKlass); } + static int base_offset_in_bytes() { +#ifdef _LP64 + if (UseCompactObjectHeaders) { + // With compact headers, the Klass* field is not used for the Klass* + // and is used for the object fields instead. + STATIC_ASSERT(sizeof(markWord) == 8); + return sizeof(markWord); + } else if (UseCompressedClassPointers) { + return sizeof(markWord) + sizeof(narrowKlass); + } else +#endif + { + return sizeof(oopDesc); + } + } + // for error reporting static void* load_oop_raw(oop obj, int offset); - DEBUG_ONLY(bool size_might_change();) + DEBUG_ONLY(bool size_might_change(Klass* klass);) }; // An oopDesc is not initialized via a constructor. Space is allocated in diff --git a/src/hotspot/share/oops/oop.inline.hpp b/src/hotspot/share/oops/oop.inline.hpp index ffcc9af24c328..2871b9c24205f 100644 --- a/src/hotspot/share/oops/oop.inline.hpp +++ b/src/hotspot/share/oops/oop.inline.hpp @@ -34,7 +34,7 @@ #include "oops/arrayOop.hpp" #include "oops/compressedKlass.inline.hpp" #include "oops/instanceKlass.hpp" -#include "oops/markWord.hpp" +#include "oops/markWord.inline.hpp" #include "oops/oopsHierarchy.hpp" #include "runtime/atomic.hpp" #include "runtime/globals.hpp" @@ -82,20 +82,36 @@ markWord oopDesc::cas_set_mark(markWord new_mark, markWord old_mark, atomic_memo return Atomic::cmpxchg(&_mark, old_mark, new_mark, order); } +markWord oopDesc::prototype_mark() const { + if (UseCompactObjectHeaders) { + return klass()->prototype_header(); + } else { + return markWord::prototype(); + } +} + void oopDesc::init_mark() { - set_mark(markWord::prototype()); + if (UseCompactObjectHeaders) { + set_mark(prototype_mark()); + } else { + set_mark(markWord::prototype()); + } } Klass* oopDesc::klass() const { - if (UseCompressedClassPointers) { - return CompressedKlassPointers::decode_not_null(_metadata._compressed_klass); + if (UseCompactObjectHeaders) { + return mark().klass(); + } else if (UseCompressedClassPointers) { + return CompressedKlassPointers::decode_not_null(_metadata._compressed_klass); } else { return _metadata._klass; } } Klass* oopDesc::klass_or_null() const { - if (UseCompressedClassPointers) { + if (UseCompactObjectHeaders) { + return mark().klass_or_null(); + } else if (UseCompressedClassPointers) { return CompressedKlassPointers::decode(_metadata._compressed_klass); } else { return _metadata._klass; @@ -103,7 +119,9 @@ Klass* oopDesc::klass_or_null() const { } Klass* oopDesc::klass_or_null_acquire() const { - if (UseCompressedClassPointers) { + if (UseCompactObjectHeaders) { + return mark_acquire().klass(); + } else if (UseCompressedClassPointers) { narrowKlass nklass = Atomic::load_acquire(&_metadata._compressed_klass); return CompressedKlassPointers::decode(nklass); } else { @@ -112,7 +130,9 @@ Klass* oopDesc::klass_or_null_acquire() const { } Klass* oopDesc::klass_without_asserts() const { - if (UseCompressedClassPointers) { + if (UseCompactObjectHeaders) { + return mark().klass_without_asserts(); + } else if (UseCompressedClassPointers) { return CompressedKlassPointers::decode_without_asserts(_metadata._compressed_klass); } else { return _metadata._klass; @@ -121,6 +141,7 @@ Klass* oopDesc::klass_without_asserts() const { void oopDesc::set_klass(Klass* k) { assert(Universe::is_bootstrapping() || (k != nullptr && k->is_klass()), "incorrect Klass"); + assert(!UseCompactObjectHeaders, "don't set Klass* with compact headers"); if (UseCompressedClassPointers) { _metadata._compressed_klass = CompressedKlassPointers::encode_not_null(k); } else { @@ -130,6 +151,7 @@ void oopDesc::set_klass(Klass* k) { void oopDesc::release_set_klass(HeapWord* mem, Klass* k) { assert(Universe::is_bootstrapping() || (k != nullptr && k->is_klass()), "incorrect Klass"); + assert(!UseCompactObjectHeaders, "don't set Klass* with compact headers"); char* raw_mem = ((char*)mem + klass_offset_in_bytes()); if (UseCompressedClassPointers) { Atomic::release_store((narrowKlass*)raw_mem, @@ -140,6 +162,7 @@ void oopDesc::release_set_klass(HeapWord* mem, Klass* k) { } void oopDesc::set_klass_gap(HeapWord* mem, int v) { + assert(!UseCompactObjectHeaders, "don't set Klass* gap with compact headers"); if (UseCompressedClassPointers) { *(int*)(((char*)mem) + klass_gap_offset_in_bytes()) = v; } @@ -190,7 +213,7 @@ size_t oopDesc::size_given_klass(Klass* klass) { // skipping the intermediate round to HeapWordSize. s = align_up(size_in_bytes, MinObjAlignmentInBytes) / HeapWordSize; - assert(s == klass->oop_size(this) || size_might_change(), "wrong array object size"); + assert(s == klass->oop_size(this) || size_might_change(klass), "wrong array object size"); } else { // Must be zero, so bite the bullet and take the virtual call. s = klass->oop_size(this); @@ -202,6 +225,53 @@ size_t oopDesc::size_given_klass(Klass* klass) { return s; } +#ifdef _LP64 +Klass* oopDesc::forward_safe_klass_impl(markWord m) const { + assert(UseCompactObjectHeaders, "Only get here with compact headers"); + if (m.is_marked()) { + oop fwd = forwardee(m); + markWord m2 = fwd->mark(); + assert(!m2.is_marked() || m2.is_self_forwarded(), "no double forwarding: this: " PTR_FORMAT " (" INTPTR_FORMAT "), fwd: " PTR_FORMAT " (" INTPTR_FORMAT ")", p2i(this), m.value(), p2i(fwd), m2.value()); + m = m2; + } + return m.klass(); +} +#endif + +Klass* oopDesc::forward_safe_klass(markWord m) const { +#ifdef _LP64 + if (UseCompactObjectHeaders) { + return forward_safe_klass_impl(m); + } else +#endif + { + return klass(); + } +} + +Klass* oopDesc::forward_safe_klass() const { +#ifdef _LP64 + if (UseCompactObjectHeaders) { + return forward_safe_klass_impl(mark()); + } else +#endif + { + return klass(); + } +} + +size_t oopDesc::forward_safe_size() { + return size_given_klass(forward_safe_klass()); +} + +void oopDesc::forward_safe_init_mark() { + if (UseCompactObjectHeaders) { + set_mark(forward_safe_klass()->prototype_header()); + } else { + set_mark(markWord::prototype()); + } +} + bool oopDesc::is_instance() const { return klass()->is_instance_klass(); } bool oopDesc::is_instanceRef() const { return klass()->is_reference_instance_klass(); } bool oopDesc::is_stackChunk() const { return klass()->is_stack_chunk_instance_klass(); } @@ -267,6 +337,10 @@ bool oopDesc::is_forwarded() const { return mark().is_forwarded(); } +bool oopDesc::is_self_forwarded() const { + return mark().is_self_forwarded(); +} + // Used by scavengers void oopDesc::forward_to(oop p) { markWord m = markWord::encode_pointer_as_mark(p); @@ -274,14 +348,38 @@ void oopDesc::forward_to(oop p) { set_mark(m); } -oop oopDesc::forward_to_atomic(oop p, markWord compare, atomic_memory_order order) { - markWord m = markWord::encode_pointer_as_mark(p); - assert(m.decode_pointer() == p, "encoding must be reversible"); - markWord old_mark = cas_set_mark(m, compare, order); +void oopDesc::forward_to_self() { + set_mark(mark().set_self_forwarded()); +} + +oop oopDesc::cas_set_forwardee(markWord new_mark, markWord compare, atomic_memory_order order) { + markWord old_mark = cas_set_mark(new_mark, compare, order); if (old_mark == compare) { return nullptr; } else { - return cast_to_oop(old_mark.decode_pointer()); + assert(old_mark.is_forwarded(), "must be forwarded here"); + return forwardee(old_mark); + } +} + +oop oopDesc::forward_to_atomic(oop p, markWord compare, atomic_memory_order order) { + markWord m = markWord::encode_pointer_as_mark(p); + assert(forwardee(m) == p, "encoding must be reversible"); + return cas_set_forwardee(m, compare, order); +} + +oop oopDesc::forward_to_self_atomic(markWord old_mark, atomic_memory_order order) { + markWord new_mark = old_mark.set_self_forwarded(); + assert(forwardee(new_mark) == cast_to_oop(this), "encoding must be reversible"); + return cas_set_forwardee(new_mark, old_mark, order); +} + +oop oopDesc::forwardee(markWord mark) const { + assert(mark.is_forwarded(), "only decode when actually forwarded"); + if (mark.is_self_forwarded()) { + return cast_to_oop(this); + } else { + return mark.forwardee(); } } @@ -289,7 +387,11 @@ oop oopDesc::forward_to_atomic(oop p, markWord compare, atomic_memory_order orde // The forwardee is used when copying during scavenge and mark-sweep. // It does need to clear the low two locking- and GC-related bits. oop oopDesc::forwardee() const { - return mark().forwardee(); + return forwardee(mark()); +} + +void oopDesc::unset_self_forwarded() { + set_mark(mark().unset_self_forwarded()); } // The following method needs to be MT safe. @@ -346,7 +448,8 @@ void oopDesc::oop_iterate_backwards(OopClosureType* cl) { template void oopDesc::oop_iterate_backwards(OopClosureType* cl, Klass* k) { - assert(k == klass(), "wrong klass"); + // In this assert, we cannot safely access the Klass* with compact headers. + assert(UseCompactObjectHeaders || k == klass(), "wrong klass"); OopIteratorClosureDispatch::oop_oop_iterate_backwards(cl, this, k); } diff --git a/src/hotspot/share/oops/typeArrayKlass.cpp b/src/hotspot/share/oops/typeArrayKlass.cpp index 38e28edd15743..3b2020b2bd48c 100644 --- a/src/hotspot/share/oops/typeArrayKlass.cpp +++ b/src/hotspot/share/oops/typeArrayKlass.cpp @@ -171,7 +171,8 @@ void TypeArrayKlass::copy_array(arrayOop s, int src_pos, arrayOop d, int dst_pos } size_t TypeArrayKlass::oop_size(oop obj) const { - assert(obj->is_typeArray(),"must be a type array"); + // In this assert, we cannot safely access the Klass* with compact headers. + assert(UseCompactObjectHeaders || obj->is_typeArray(),"must be a type array"); typeArrayOop t = typeArrayOop(obj); return t->object_size(this); } diff --git a/src/hotspot/share/oops/typeArrayKlass.inline.hpp b/src/hotspot/share/oops/typeArrayKlass.inline.hpp index 098f9e7399343..8d0f2c5c4a559 100644 --- a/src/hotspot/share/oops/typeArrayKlass.inline.hpp +++ b/src/hotspot/share/oops/typeArrayKlass.inline.hpp @@ -35,7 +35,8 @@ class OopIterateClosure; inline void TypeArrayKlass::oop_oop_iterate_impl(oop obj, OopIterateClosure* closure) { - assert(obj->is_typeArray(),"must be a type array"); + // In this assert, we cannot safely access the Klass* with compact headers. + assert(UseCompactObjectHeaders || obj->is_typeArray(),"must be a type array"); // Performance tweak: We skip processing the klass pointer since all // TypeArrayKlasses are guaranteed processed via the null class loader. } diff --git a/src/hotspot/share/opto/callnode.cpp b/src/hotspot/share/opto/callnode.cpp index d715e6533432e..cf386f6555ffa 100644 --- a/src/hotspot/share/opto/callnode.cpp +++ b/src/hotspot/share/opto/callnode.cpp @@ -1615,8 +1615,14 @@ void AllocateNode::compute_MemBar_redundancy(ciMethod* initializer) } Node *AllocateNode::make_ideal_mark(PhaseGVN *phase, Node* obj, Node* control, Node* mem) { Node* mark_node = nullptr; - // For now only enable fast locking for non-array types - mark_node = phase->MakeConX(markWord::prototype().value()); + if (UseCompactObjectHeaders) { + Node* klass_node = in(AllocateNode::KlassNode); + Node* proto_adr = phase->transform(new AddPNode(klass_node, klass_node, phase->MakeConX(in_bytes(Klass::prototype_header_offset())))); + mark_node = LoadNode::make(*phase, control, mem, proto_adr, TypeRawPtr::BOTTOM, TypeX_X, TypeX_X->basic_type(), MemNode::unordered); + } else { + // For now only enable fast locking for non-array types + mark_node = phase->MakeConX(markWord::prototype().value()); + } return mark_node; } diff --git a/src/hotspot/share/opto/compile.cpp b/src/hotspot/share/opto/compile.cpp index a9d73364e2d00..b3b045f0c03de 100644 --- a/src/hotspot/share/opto/compile.cpp +++ b/src/hotspot/share/opto/compile.cpp @@ -1695,6 +1695,10 @@ Compile::AliasType* Compile::find_alias_type(const TypePtr* adr_type, bool no_cr } } if (flat->isa_klassptr()) { + if (UseCompactObjectHeaders) { + if (flat->offset() == in_bytes(Klass::prototype_header_offset())) + alias_type(idx)->set_rewritable(false); + } if (flat->offset() == in_bytes(Klass::super_check_offset_offset())) alias_type(idx)->set_rewritable(false); if (flat->offset() == in_bytes(Klass::modifier_flags_offset())) diff --git a/src/hotspot/share/opto/library_call.cpp b/src/hotspot/share/opto/library_call.cpp index eced285f8cbdf..da1cd8c0e7230 100644 --- a/src/hotspot/share/opto/library_call.cpp +++ b/src/hotspot/share/opto/library_call.cpp @@ -4627,8 +4627,8 @@ bool LibraryCallKit::inline_native_hashcode(bool is_virtual, bool is_static) { // We depend on hash_mask being at most 32 bits and avoid the use of // hash_mask_in_place because it could be larger than 32 bits in a 64-bit // vm: see markWord.hpp. - Node *hash_mask = _gvn.intcon(markWord::hash_mask); - Node *hash_shift = _gvn.intcon(markWord::hash_shift); + Node *hash_mask = _gvn.intcon(UseCompactObjectHeaders ? markWord::hash_mask_compact : markWord::hash_mask); + Node *hash_shift = _gvn.intcon(UseCompactObjectHeaders ? markWord::hash_shift_compact : markWord::hash_shift); Node *hshifted_header= _gvn.transform(new URShiftXNode(header, hash_shift)); // This hack lets the hash bits live anywhere in the mark object now, as long // as the shift drops the relevant bits into the low 32 bits. Note that diff --git a/src/hotspot/share/opto/macro.cpp b/src/hotspot/share/opto/macro.cpp index de804457a262d..ed8c517bd8d75 100644 --- a/src/hotspot/share/opto/macro.cpp +++ b/src/hotspot/share/opto/macro.cpp @@ -1706,7 +1706,9 @@ PhaseMacroExpand::initialize_object(AllocateNode* alloc, } rawmem = make_store(control, rawmem, object, oopDesc::mark_offset_in_bytes(), mark_node, TypeX_X->basic_type()); - rawmem = make_store(control, rawmem, object, oopDesc::klass_offset_in_bytes(), klass_node, T_METADATA); + if (!UseCompactObjectHeaders) { + rawmem = make_store(control, rawmem, object, oopDesc::klass_offset_in_bytes(), klass_node, T_METADATA); + } int header_size = alloc->minimum_header_size(); // conservatively small // Array length diff --git a/src/hotspot/share/opto/memnode.cpp b/src/hotspot/share/opto/memnode.cpp index c7f0fb9fc3202..dd4cbca2820e7 100644 --- a/src/hotspot/share/opto/memnode.cpp +++ b/src/hotspot/share/opto/memnode.cpp @@ -1934,6 +1934,13 @@ Node *LoadNode::Ideal(PhaseGVN *phase, bool can_reshape) { const Type* LoadNode::load_array_final_field(const TypeKlassPtr *tkls, ciKlass* klass) const { + if (UseCompactObjectHeaders) { + if (tkls->offset() == in_bytes(Klass::prototype_header_offset())) { + // The field is Klass::_prototype_header. Return its (constant) value. + assert(this->Opcode() == Op_LoadX, "must load a proper type from _prototype_header"); + return TypeX::make(klass->prototype_header()); + } + } if (tkls->offset() == in_bytes(Klass::modifier_flags_offset())) { // The field is Klass::_modifier_flags. Return its (constant) value. // (Folds up the 2nd indirection in aClassConstant.getModifiers().) @@ -2106,6 +2113,13 @@ const Type* LoadNode::Value(PhaseGVN* phase) const { assert(Opcode() == Op_LoadI, "must load an int from _super_check_offset"); return TypeInt::make(klass->super_check_offset()); } + if (UseCompactObjectHeaders) { + if (tkls->offset() == in_bytes(Klass::prototype_header_offset())) { + // The field is Klass::_prototype_header. Return its (constant) value. + assert(this->Opcode() == Op_LoadX, "must load a proper type from _prototype_header"); + return TypeX::make(klass->prototype_header()); + } + } // Compute index into primary_supers array juint depth = (tkls->offset() - in_bytes(Klass::primary_supers_offset())) / sizeof(Klass*); // Check for overflowing; use unsigned compare to handle the negative case. @@ -2196,7 +2210,7 @@ const Type* LoadNode::Value(PhaseGVN* phase) const { } Node* alloc = is_new_object_mark_load(); - if (alloc != nullptr) { + if (!UseCompactObjectHeaders && alloc != nullptr) { return TypeX::make(markWord::prototype().value()); } diff --git a/src/hotspot/share/runtime/arguments.cpp b/src/hotspot/share/runtime/arguments.cpp index 81b40e76a31f2..a8f5f02f813b2 100644 --- a/src/hotspot/share/runtime/arguments.cpp +++ b/src/hotspot/share/runtime/arguments.cpp @@ -3641,6 +3641,32 @@ jint Arguments::parse(const JavaVMInitArgs* initial_cmd_args) { Arguments::print_on(&st); } +#ifdef _LP64 + if (UseCompactObjectHeaders && FLAG_IS_CMDLINE(UseCompressedClassPointers) && !UseCompressedClassPointers) { + warning("Compact object headers require compressed class pointers. Disabling compact object headers."); + FLAG_SET_DEFAULT(UseCompactObjectHeaders, false); + } + if (UseCompactObjectHeaders && LockingMode != LM_LIGHTWEIGHT) { + FLAG_SET_DEFAULT(LockingMode, LM_LIGHTWEIGHT); + } + if (UseCompactObjectHeaders && !UseObjectMonitorTable) { + // If UseCompactObjectHeaders is on the command line, turn on UseObjectMonitorTable. + if (FLAG_IS_CMDLINE(UseCompactObjectHeaders)) { + FLAG_SET_DEFAULT(UseObjectMonitorTable, true); + + // If UseObjectMonitorTable is on the command line, turn off UseCompactObjectHeaders. + } else if (FLAG_IS_CMDLINE(UseObjectMonitorTable)) { + FLAG_SET_DEFAULT(UseCompactObjectHeaders, false); + // If neither on the command line, the defaults are incompatible, but turn on UseObjectMonitorTable. + } else { + FLAG_SET_DEFAULT(UseObjectMonitorTable, true); + } + } + if (UseCompactObjectHeaders && !UseCompressedClassPointers) { + FLAG_SET_DEFAULT(UseCompressedClassPointers, true); + } +#endif + return JNI_OK; } diff --git a/src/hotspot/share/runtime/globals.hpp b/src/hotspot/share/runtime/globals.hpp index d442894798b78..b94260b2315b3 100644 --- a/src/hotspot/share/runtime/globals.hpp +++ b/src/hotspot/share/runtime/globals.hpp @@ -128,6 +128,9 @@ const size_t minimumSymbolTableSize = 1024; "Use 32-bit class pointers in 64-bit VM. " \ "lp64_product means flag is always constant in 32 bit VM") \ \ + product(bool, UseCompactObjectHeaders, false, EXPERIMENTAL, \ + "Use compact 64-bit object headers in 64-bit VM") \ + \ product(int, ObjectAlignmentInBytes, 8, \ "Default object alignment in bytes, 8 is minimum") \ range(8, 256) \ @@ -144,6 +147,7 @@ const size_t minimumSymbolTableSize = 1024; constraint) const bool UseCompressedOops = false; const bool UseCompressedClassPointers = false; +const bool UseCompactObjectHeaders = false; const int ObjectAlignmentInBytes = 8; #endif // _LP64 diff --git a/src/hotspot/share/runtime/synchronizer.cpp b/src/hotspot/share/runtime/synchronizer.cpp index d8fb7a4734abe..6c941d4f1cfc5 100644 --- a/src/hotspot/share/runtime/synchronizer.cpp +++ b/src/hotspot/share/runtime/synchronizer.cpp @@ -963,7 +963,7 @@ static intptr_t get_next_hash(Thread* current, oop obj) { value = v; } - value &= markWord::hash_mask; + value &= UseCompactObjectHeaders ? markWord::hash_mask_compact : markWord::hash_mask; if (value == 0) value = 0xBAD; assert(value != markWord::no_hash, "invariant"); return value; diff --git a/src/hotspot/share/runtime/vmStructs.cpp b/src/hotspot/share/runtime/vmStructs.cpp index fe9620586be3b..b95d00a176366 100644 --- a/src/hotspot/share/runtime/vmStructs.cpp +++ b/src/hotspot/share/runtime/vmStructs.cpp @@ -2498,10 +2498,13 @@ declare_constant(markWord::lock_bits) \ declare_constant(markWord::max_hash_bits) \ declare_constant(markWord::hash_bits) \ + declare_constant(markWord::hash_bits_compact) \ \ declare_constant(markWord::lock_shift) \ declare_constant(markWord::age_shift) \ declare_constant(markWord::hash_shift) \ + declare_constant(markWord::hash_shift_compact) \ + LP64_ONLY(declare_constant(markWord::klass_shift)) \ \ declare_constant(markWord::lock_mask) \ declare_constant(markWord::lock_mask_in_place) \ @@ -2509,6 +2512,8 @@ declare_constant(markWord::age_mask_in_place) \ declare_constant(markWord::hash_mask) \ declare_constant(markWord::hash_mask_in_place) \ + declare_constant(markWord::hash_mask_compact) \ + declare_constant(markWord::hash_mask_compact_in_place) \ \ declare_constant(markWord::locked_value) \ declare_constant(markWord::unlocked_value) \ diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/DebuggerBase.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/DebuggerBase.java index 67bba331149f9..7a7c761e89f8f 100644 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/DebuggerBase.java +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/DebuggerBase.java @@ -24,6 +24,9 @@ package sun.jvm.hotspot.debugger; +import sun.jvm.hotspot.oops.Mark; +import sun.jvm.hotspot.runtime.VM; + /**

DebuggerBase is a recommended base class for debugger implementations. It can use a PageCache to cache data from the target process. Note that this class would not be suitable if the @@ -394,7 +397,15 @@ protected long readCompOopAddressValue(long address) protected long readCompKlassAddressValue(long address) throws UnmappedAddressException, UnalignedAddressException { - long value = readCInteger(address, getKlassPtrSize(), true); + long value; + if (VM.getVM().isCompactObjectHeadersEnabled()) { + // With compact headers, the compressed Klass* is currently read from the mark + // word. We need to load the whole mark, and shift the upper parts. + value = readCInteger(address, machDesc.getAddressSize(), true); + value = value >>> Mark.getKlassShift(); + } else { + value = readCInteger(address, getKlassPtrSize(), true); + } if (value != 0) { value = (long)(narrowKlassBase + (long)(value << narrowKlassShift)); } diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Array.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Array.java index 3f96f72cf0408..c4eeaf4a367f7 100644 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Array.java +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Array.java @@ -81,7 +81,9 @@ private static long lengthOffsetInBytes() { if (lengthOffsetInBytes != 0) { return lengthOffsetInBytes; } - if (VM.getVM().isCompressedKlassPointersEnabled()) { + if (VM.getVM().isCompactObjectHeadersEnabled()) { + lengthOffsetInBytes = Oop.getHeaderSize(); + } else if (VM.getVM().isCompressedKlassPointersEnabled()) { lengthOffsetInBytes = typeSize - VM.getVM().getIntSize(); } else { lengthOffsetInBytes = typeSize; diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Instance.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Instance.java index b837d869ea0c8..fd364d6a174b0 100644 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Instance.java +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Instance.java @@ -55,7 +55,9 @@ private static synchronized void initialize(TypeDataBase db) throws WrongTypeExc // Returns header size in bytes. public static long getHeaderSize() { - if (VM.getVM().isCompressedKlassPointersEnabled()) { + if (VM.getVM().isCompactObjectHeadersEnabled()) { + return Oop.getHeaderSize(); + } else if (VM.getVM().isCompressedKlassPointersEnabled()) { return typeSize - VM.getVM().getIntSize(); } else { return typeSize; diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Mark.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Mark.java index a3a06ec73f5f8..c146a85cdb5af 100644 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Mark.java +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Mark.java @@ -51,15 +51,22 @@ private static synchronized void initialize(TypeDataBase db) throws WrongTypeExc lockBits = db.lookupLongConstant("markWord::lock_bits").longValue(); maxHashBits = db.lookupLongConstant("markWord::max_hash_bits").longValue(); hashBits = db.lookupLongConstant("markWord::hash_bits").longValue(); + hashBitsCompact = db.lookupLongConstant("markWord::hash_bits_compact").longValue(); lockShift = db.lookupLongConstant("markWord::lock_shift").longValue(); ageShift = db.lookupLongConstant("markWord::age_shift").longValue(); hashShift = db.lookupLongConstant("markWord::hash_shift").longValue(); + hashShiftCompact = db.lookupLongConstant("markWord::hash_shift_compact").longValue(); + if (VM.getVM().isLP64()) { + klassShift = db.lookupLongConstant("markWord::klass_shift").longValue(); + } lockMask = db.lookupLongConstant("markWord::lock_mask").longValue(); lockMaskInPlace = db.lookupLongConstant("markWord::lock_mask_in_place").longValue(); ageMask = db.lookupLongConstant("markWord::age_mask").longValue(); ageMaskInPlace = db.lookupLongConstant("markWord::age_mask_in_place").longValue(); hashMask = db.lookupLongConstant("markWord::hash_mask").longValue(); hashMaskInPlace = db.lookupLongConstant("markWord::hash_mask_in_place").longValue(); + hashMaskCompact = db.lookupLongConstant("markWord::hash_mask_compact").longValue(); + hashMaskCompactInPlace = db.lookupLongConstant("markWord::hash_mask_compact_in_place").longValue(); lockedValue = db.lookupLongConstant("markWord::locked_value").longValue(); unlockedValue = db.lookupLongConstant("markWord::unlocked_value").longValue(); monitorValue = db.lookupLongConstant("markWord::monitor_value").longValue(); @@ -78,10 +85,13 @@ private static synchronized void initialize(TypeDataBase db) throws WrongTypeExc private static long lockBits; private static long maxHashBits; private static long hashBits; + private static long hashBitsCompact; private static long lockShift; private static long ageShift; private static long hashShift; + private static long hashShiftCompact; + private static long klassShift; private static long lockMask; private static long lockMaskInPlace; @@ -89,6 +99,8 @@ private static synchronized void initialize(TypeDataBase db) throws WrongTypeExc private static long ageMaskInPlace; private static long hashMask; private static long hashMaskInPlace; + private static long hashMaskCompact; + private static long hashMaskCompactInPlace; private static long lockedValue; private static long unlockedValue; @@ -102,6 +114,10 @@ private static synchronized void initialize(TypeDataBase db) throws WrongTypeExc private static long maxAge; + public static long getKlassShift() { + return klassShift; + } + public Mark(Address addr) { super(addr); } @@ -184,13 +200,22 @@ public Mark displacedMarkHelper() { // hash operations public long hash() { - return Bits.maskBitsLong(value() >> hashShift, hashMask); + if (VM.getVM().isCompactObjectHeadersEnabled()) { + return Bits.maskBitsLong(value() >> hashShiftCompact, hashMaskCompact); + } else { + return Bits.maskBitsLong(value() >> hashShift, hashMask); + } } public boolean hasNoHash() { return hash() == noHash; } + public Klass getKlass() { + assert(VM.getVM().isCompactObjectHeadersEnabled()); + return (Klass)Metadata.instantiateWrapperFor(addr.getCompKlassAddressAt(0)); + } + // Debugging public void printOn(PrintStream tty) { if (isLocked()) { diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Oop.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Oop.java index bf957941a56b7..6bf35be67e47d 100644 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Oop.java +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Oop.java @@ -46,9 +46,14 @@ public void update(Observable o, Object data) { private static synchronized void initialize(TypeDataBase db) throws WrongTypeException { Type type = db.lookupType("oopDesc"); mark = new CIntField(type.getCIntegerField("_mark"), 0); - klass = new MetadataField(type.getAddressField("_metadata._klass"), 0); - compressedKlass = new NarrowKlassField(type.getAddressField("_metadata._compressed_klass"), 0); - headerSize = type.getSize(); + if (VM.getVM().isCompactObjectHeadersEnabled()) { + Type markType = db.lookupType("markWord"); + headerSize = markType.getSize(); + } else { + headerSize = type.getSize(); + klass = new MetadataField(type.getAddressField("_metadata._klass"), 0); + compressedKlass = new NarrowKlassField(type.getAddressField("_metadata._compressed_klass"), 0); + } } private OopHandle handle; @@ -75,8 +80,17 @@ private static synchronized void initialize(TypeDataBase db) throws WrongTypeExc // Accessors for declared fields public Mark getMark() { return new Mark(getHandle()); } + + private static Klass getKlass(Mark mark) { + assert(VM.getVM().isCompactObjectHeadersEnabled()); + return mark.getKlass(); + } + public Klass getKlass() { - if (VM.getVM().isCompressedKlassPointersEnabled()) { + if (VM.getVM().isCompactObjectHeadersEnabled()) { + assert(VM.getVM().isCompressedKlassPointersEnabled()); + return getKlass(getMark()); + } else if (VM.getVM().isCompressedKlassPointersEnabled()) { return (Klass)compressedKlass.getValue(getHandle()); } else { return (Klass)klass.getValue(getHandle()); @@ -147,10 +161,12 @@ public void iterate(OopVisitor visitor, boolean doVMFields) { void iterateFields(OopVisitor visitor, boolean doVMFields) { if (doVMFields) { visitor.doCInt(mark, true); - if (VM.getVM().isCompressedKlassPointersEnabled()) { - visitor.doMetadata(compressedKlass, true); - } else { - visitor.doMetadata(klass, true); + if (!VM.getVM().isCompactObjectHeadersEnabled()) { + if (VM.getVM().isCompressedKlassPointersEnabled()) { + visitor.doMetadata(compressedKlass, true); + } else { + visitor.doMetadata(klass, true); + } } } } @@ -206,7 +222,10 @@ public static Klass getKlassForOopHandle(OopHandle handle) { if (handle == null) { return null; } - if (VM.getVM().isCompressedKlassPointersEnabled()) { + if (VM.getVM().isCompactObjectHeadersEnabled()) { + Mark mark = new Mark(handle); + return getKlass(mark); + } else if (VM.getVM().isCompressedKlassPointersEnabled()) { return (Klass)Metadata.instantiateWrapperFor(handle.getCompKlassAddressAt(compressedKlass.getOffset())); } else { return (Klass)Metadata.instantiateWrapperFor(handle.getAddressAt(klass.getOffset())); diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/VM.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/VM.java index 232f3e864a3f8..472b63814ad47 100644 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/VM.java +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/VM.java @@ -148,6 +148,7 @@ public class VM { private Boolean sharingEnabled; private Boolean compressedOopsEnabled; private Boolean compressedKlassPointersEnabled; + private Boolean compactObjectHeadersEnabled; // command line flags supplied to VM - see struct JVMFlag in jvmFlag.hpp public static final class Flag { @@ -960,6 +961,15 @@ public boolean isCompressedKlassPointersEnabled() { return compressedKlassPointersEnabled.booleanValue(); } + public boolean isCompactObjectHeadersEnabled() { + if (compactObjectHeadersEnabled == null) { + Flag flag = getCommandLineFlag("UseCompactObjectHeaders"); + compactObjectHeadersEnabled = (flag == null) ? Boolean.FALSE: + (flag.getBool()? Boolean.TRUE: Boolean.FALSE); + } + return compactObjectHeadersEnabled.booleanValue(); + } + public int getObjectAlignmentInBytes() { if (objectAlignmentInBytes == 0) { Flag flag = getCommandLineFlag("ObjectAlignmentInBytes"); diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/utilities/RobustOopDeterminator.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/utilities/RobustOopDeterminator.java index 3f701b8d24e49..6a8b794a93585 100644 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/utilities/RobustOopDeterminator.java +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/utilities/RobustOopDeterminator.java @@ -26,6 +26,7 @@ import sun.jvm.hotspot.debugger.*; import sun.jvm.hotspot.oops.Metadata; +import sun.jvm.hotspot.oops.Oop; import sun.jvm.hotspot.runtime.*; import sun.jvm.hotspot.types.*; @@ -37,26 +38,6 @@ states than the ObjectHeap code. */ public class RobustOopDeterminator { - private static AddressField klassField; - - static { - VM.registerVMInitializedObserver(new Observer() { - public void update(Observable o, Object data) { - initialize(VM.getVM().getTypeDataBase()); - } - }); - } - - private static void initialize(TypeDataBase db) { - Type type = db.lookupType("oopDesc"); - - if (VM.getVM().isCompressedKlassPointersEnabled()) { - klassField = type.getAddressField("_metadata._compressed_klass"); - } else { - klassField = type.getAddressField("_metadata._klass"); - } - } - public static boolean oopLooksValid(OopHandle oop) { if (oop == null) { return false; @@ -66,11 +47,7 @@ public static boolean oopLooksValid(OopHandle oop) { } try { // Try to instantiate the Klass - if (VM.getVM().isCompressedKlassPointersEnabled()) { - Metadata.instantiateWrapperFor(oop.getCompKlassAddressAt(klassField.getOffset())); - } else { - Metadata.instantiateWrapperFor(klassField.getValue(oop)); - } + Oop.getKlassForOopHandle(oop); return true; } catch (AddressException | WrongTypeException e) { return false; diff --git a/test/hotspot/gtest/gc/shared/test_preservedMarks.cpp b/test/hotspot/gtest/gc/shared/test_preservedMarks.cpp index 5f9a361105ec7..79be24d4c3de7 100644 --- a/test/hotspot/gtest/gc/shared/test_preservedMarks.cpp +++ b/test/hotspot/gtest/gc/shared/test_preservedMarks.cpp @@ -23,59 +23,48 @@ #include "precompiled.hpp" #include "gc/shared/preservedMarks.inline.hpp" +#include "gc/shared/gcForwarding.inline.hpp" #include "oops/oop.inline.hpp" #include "unittest.hpp" -// Class to create a "fake" oop with a mark that will -// return true for calls to must_be_preserved(). -class FakeOop { - oopDesc _oop; - -public: - FakeOop() : _oop() { _oop.set_mark(originalMark()); } - - oop get_oop() { return &_oop; } - markWord mark() { return _oop.mark(); } - void set_mark(markWord m) { _oop.set_mark(m); } - void forward_to(oop obj) { - markWord m = markWord::encode_pointer_as_mark(obj); - _oop.set_mark(m); - } - - static markWord originalMark() { return markWord(markWord::lock_mask_in_place); } - static markWord changedMark() { return markWord(0x4711); } -}; +static markWord originalMark() { return markWord(markWord::lock_mask_in_place); } +static markWord changedMark() { return markWord(0x4711); } #define ASSERT_MARK_WORD_EQ(a, b) ASSERT_EQ((a).value(), (b).value()) TEST_VM(PreservedMarks, iterate_and_restore) { PreservedMarks pm; - FakeOop o1; - FakeOop o2; - FakeOop o3; - FakeOop o4; + + HeapWord fakeheap[32] = { nullptr }; + HeapWord* heap = align_up(fakeheap, 8 * sizeof(HeapWord)); + GCForwarding::initialize(MemRegion(&heap[0], &heap[16])); + + oop o1 = cast_to_oop(&heap[0]); o1->set_mark(originalMark()); + oop o2 = cast_to_oop(&heap[2]); o2->set_mark(originalMark()); + oop o3 = cast_to_oop(&heap[4]); o3->set_mark(originalMark()); + oop o4 = cast_to_oop(&heap[6]); o4->set_mark(originalMark()); // Make sure initial marks are correct. - ASSERT_MARK_WORD_EQ(o1.mark(), FakeOop::originalMark()); - ASSERT_MARK_WORD_EQ(o2.mark(), FakeOop::originalMark()); - ASSERT_MARK_WORD_EQ(o3.mark(), FakeOop::originalMark()); - ASSERT_MARK_WORD_EQ(o4.mark(), FakeOop::originalMark()); + ASSERT_MARK_WORD_EQ(o1->mark(), originalMark()); + ASSERT_MARK_WORD_EQ(o2->mark(), originalMark()); + ASSERT_MARK_WORD_EQ(o3->mark(), originalMark()); + ASSERT_MARK_WORD_EQ(o4->mark(), originalMark()); // Change the marks and verify change. - o1.set_mark(FakeOop::changedMark()); - o2.set_mark(FakeOop::changedMark()); - ASSERT_MARK_WORD_EQ(o1.mark(), FakeOop::changedMark()); - ASSERT_MARK_WORD_EQ(o2.mark(), FakeOop::changedMark()); + o1->set_mark(changedMark()); + o2->set_mark(changedMark()); + ASSERT_MARK_WORD_EQ(o1->mark(), changedMark()); + ASSERT_MARK_WORD_EQ(o2->mark(), changedMark()); // Push o1 and o2 to have their marks preserved. - pm.push_if_necessary(o1.get_oop(), o1.mark()); - pm.push_if_necessary(o2.get_oop(), o2.mark()); + pm.push_if_necessary(o1, o1->mark()); + pm.push_if_necessary(o2, o2->mark()); // Fake a move from o1->o3 and o2->o4. - o1.forward_to(o3.get_oop()); - o2.forward_to(o4.get_oop()); - ASSERT_EQ(o1.get_oop()->forwardee(), o3.get_oop()); - ASSERT_EQ(o2.get_oop()->forwardee(), o4.get_oop()); + GCForwarding::forward_to(o1, o3); + GCForwarding::forward_to(o2, o4); + ASSERT_EQ(GCForwarding::forwardee(o1), o3); + ASSERT_EQ(GCForwarding::forwardee(o2), o4); // Adjust will update the PreservedMarks stack to // make sure the mark is updated at the new location. pm.adjust_during_full_gc(); @@ -83,6 +72,6 @@ TEST_VM(PreservedMarks, iterate_and_restore) { // Restore all preserved and verify that the changed // mark is now present at o3 and o4. pm.restore(); - ASSERT_MARK_WORD_EQ(o3.mark(), FakeOop::changedMark()); - ASSERT_MARK_WORD_EQ(o4.mark(), FakeOop::changedMark()); + ASSERT_MARK_WORD_EQ(o3->mark(), changedMark()); + ASSERT_MARK_WORD_EQ(o4->mark(), changedMark()); } diff --git a/test/hotspot/gtest/oops/test_arrayOop.cpp b/test/hotspot/gtest/oops/test_arrayOop.cpp index e67e6e6c13b92..5670aedafc1e1 100644 --- a/test/hotspot/gtest/oops/test_arrayOop.cpp +++ b/test/hotspot/gtest/oops/test_arrayOop.cpp @@ -82,7 +82,23 @@ TEST_VM(arrayOopDesc, narrowOop) { TEST_VM(arrayOopDesc, base_offset) { #ifdef _LP64 - if (UseCompressedClassPointers) { + if (UseCompactObjectHeaders) { + EXPECT_EQ(arrayOopDesc::base_offset_in_bytes(T_BOOLEAN), 12); + EXPECT_EQ(arrayOopDesc::base_offset_in_bytes(T_BYTE), 12); + EXPECT_EQ(arrayOopDesc::base_offset_in_bytes(T_SHORT), 12); + EXPECT_EQ(arrayOopDesc::base_offset_in_bytes(T_CHAR), 12); + EXPECT_EQ(arrayOopDesc::base_offset_in_bytes(T_INT), 12); + EXPECT_EQ(arrayOopDesc::base_offset_in_bytes(T_FLOAT), 12); + EXPECT_EQ(arrayOopDesc::base_offset_in_bytes(T_LONG), 16); + EXPECT_EQ(arrayOopDesc::base_offset_in_bytes(T_DOUBLE), 16); + if (UseCompressedOops) { + EXPECT_EQ(arrayOopDesc::base_offset_in_bytes(T_OBJECT), 12); + EXPECT_EQ(arrayOopDesc::base_offset_in_bytes(T_ARRAY), 12); + } else { + EXPECT_EQ(arrayOopDesc::base_offset_in_bytes(T_OBJECT), 16); + EXPECT_EQ(arrayOopDesc::base_offset_in_bytes(T_ARRAY), 16); + } + } else if (UseCompressedClassPointers) { EXPECT_EQ(arrayOopDesc::base_offset_in_bytes(T_BOOLEAN), 16); EXPECT_EQ(arrayOopDesc::base_offset_in_bytes(T_BYTE), 16); EXPECT_EQ(arrayOopDesc::base_offset_in_bytes(T_SHORT), 16); diff --git a/test/hotspot/gtest/oops/test_objArrayOop.cpp b/test/hotspot/gtest/oops/test_objArrayOop.cpp index 60cf6242dd596..deb4919ce4600 100644 --- a/test/hotspot/gtest/oops/test_objArrayOop.cpp +++ b/test/hotspot/gtest/oops/test_objArrayOop.cpp @@ -28,29 +28,36 @@ TEST_VM(objArrayOop, osize) { static const struct { - int objal; bool ccp; bool coops; int result; + int objal; bool ccp; bool coops; bool coh; int result; } x[] = { -// ObjAligInB, UseCCP, UseCoops, object size in heap words +// ObjAligInB, UseCCP, UseCoops, UseCOH, object size in heap words #ifdef _LP64 - { 8, false, false, 4 }, // 20 byte header, 8 byte oops - { 8, false, true, 3 }, // 20 byte header, 4 byte oops - { 8, true, false, 3 }, // 16 byte header, 8 byte oops - { 8, true, true, 3 }, // 16 byte header, 4 byte oops - { 16, false, false, 4 }, // 20 byte header, 8 byte oops, 16-byte align - { 16, false, true, 4 }, // 20 byte header, 4 byte oops, 16-byte align - { 16, true, false, 4 }, // 16 byte header, 8 byte oops, 16-byte align - { 16, true, true, 4 }, // 16 byte header, 4 byte oops, 16-byte align - { 256, false, false, 32 }, // 20 byte header, 8 byte oops, 256-byte align - { 256, false, true, 32 }, // 20 byte header, 4 byte oops, 256-byte align - { 256, true, false, 32 }, // 16 byte header, 8 byte oops, 256-byte align - { 256, true, true, 32 }, // 16 byte header, 4 byte oops, 256-byte align + { 8, false, false, false, 4 }, // 20 byte header, 8 byte oops + { 8, false, true, false, 3 }, // 20 byte header, 4 byte oops + { 8, true, false, false, 3 }, // 16 byte header, 8 byte oops + { 8, true, true, false, 3 }, // 16 byte header, 4 byte oops + { 8, true, false, true, 3 }, // 12 byte header, 8 byte oops + { 8, true, true, true, 2 }, // 12 byte header, 4 byte oops + { 16, false, false, false, 4 }, // 20 byte header, 8 byte oops, 16-byte align + { 16, false, true, false, 4 }, // 20 byte header, 4 byte oops, 16-byte align + { 16, true, false, false, 4 }, // 16 byte header, 8 byte oops, 16-byte align + { 16, true, true, false, 4 }, // 16 byte header, 4 byte oops, 16-byte align + { 16, true, false, true, 4 }, // 12 byte header, 8 byte oops, 16-byte align + { 16, true, true, true, 2 }, // 12 byte header, 4 byte oops, 16-byte align + { 256, false, false, false, 32 }, // 20 byte header, 8 byte oops, 256-byte align + { 256, false, true, false, 32 }, // 20 byte header, 4 byte oops, 256-byte align + { 256, true, false, false, 32 }, // 16 byte header, 8 byte oops, 256-byte align + { 256, true, true, false, 32 }, // 16 byte header, 4 byte oops, 256-byte align + { 256, true, false, true, 32 }, // 12 byte header, 8 byte oops, 256-byte align + { 256, true, true, true, 32 }, // 12 byte header, 4 byte oops, 256-byte align #else - { 8, false, false, 4 }, // 12 byte header, 4 byte oops, wordsize 4 + { 8, false, false, false, 4 }, // 12 byte header, 4 byte oops, wordsize 4 #endif - { -1, false, false, -1 } + { -1, false, false, false, -1 } }; for (int i = 0; x[i].result != -1; i++) { - if (x[i].objal == (int)ObjectAlignmentInBytes && x[i].ccp == UseCompressedClassPointers && x[i].coops == UseCompressedOops) { + if (x[i].objal == (int)ObjectAlignmentInBytes && x[i].ccp == UseCompressedClassPointers && x[i].coops == UseCompressedOops && + x[i].coh == UseCompactObjectHeaders) { EXPECT_EQ(objArrayOopDesc::object_size(1), (size_t)x[i].result); } } diff --git a/test/hotspot/gtest/oops/test_typeArrayOop.cpp b/test/hotspot/gtest/oops/test_typeArrayOop.cpp index a7565a23d58ee..b8c5586707757 100644 --- a/test/hotspot/gtest/oops/test_typeArrayOop.cpp +++ b/test/hotspot/gtest/oops/test_typeArrayOop.cpp @@ -36,7 +36,11 @@ TEST_VM(typeArrayOopDesc, bool_at_put) { char* addr = align_up(mem, 16); typeArrayOop o = (typeArrayOop) cast_to_oop(addr); - o->set_klass(Universe::boolArrayKlass()); + if (UseCompactObjectHeaders) { + o->set_mark(Universe::boolArrayKlass()->prototype_header()); + } else { + o->set_klass(Universe::boolArrayKlass()); + } o->set_length(10); diff --git a/test/hotspot/jtreg/compiler/c2/irTests/TestVectorizationMismatchedAccess.java b/test/hotspot/jtreg/compiler/c2/irTests/TestVectorizationMismatchedAccess.java index b68ddfe2799ce..1a1aa6b7a5e91 100644 --- a/test/hotspot/jtreg/compiler/c2/irTests/TestVectorizationMismatchedAccess.java +++ b/test/hotspot/jtreg/compiler/c2/irTests/TestVectorizationMismatchedAccess.java @@ -147,7 +147,8 @@ static private void runAndVerify3(Runnable test, int offset) { } @Test - @IR(counts = { IRNode.LOAD_VECTOR_L, ">=1", IRNode.STORE_VECTOR, ">=1" }) + @IR(applyIf = {"UseCompactObjectHeaders", "false"}, + counts = { IRNode.LOAD_VECTOR_L, ">=1", IRNode.STORE_VECTOR, ">=1" }) public static void testByteLong1(byte[] dest, long[] src) { for (int i = 0; i < src.length; i++) { UNSAFE.putLongUnaligned(dest, UNSAFE.ARRAY_BYTE_BASE_OFFSET + 8 * i, src[i]); @@ -160,7 +161,8 @@ public static void testByteLong1_runner() { } @Test - @IR(counts = { IRNode.LOAD_VECTOR_L, ">=1", IRNode.STORE_VECTOR, ">=1" }) + @IR(applyIf = {"UseCompactObjectHeaders", "false"}, + counts = { IRNode.LOAD_VECTOR_L, ">=1", IRNode.STORE_VECTOR, ">=1" }) public static void testByteLong2(byte[] dest, long[] src) { for (int i = 1; i < src.length; i++) { UNSAFE.putLongUnaligned(dest, UNSAFE.ARRAY_BYTE_BASE_OFFSET + 8 * (i - 1), src[i]); @@ -173,7 +175,8 @@ public static void testByteLong2_runner() { } @Test - @IR(counts = { IRNode.LOAD_VECTOR_L, ">=1", IRNode.STORE_VECTOR, ">=1" }) + @IR(applyIf = {"UseCompactObjectHeaders", "false"}, + counts = { IRNode.LOAD_VECTOR_L, ">=1", IRNode.STORE_VECTOR, ">=1" }) public static void testByteLong3(byte[] dest, long[] src) { for (int i = 0; i < src.length - 1; i++) { UNSAFE.putLongUnaligned(dest, UNSAFE.ARRAY_BYTE_BASE_OFFSET + 8 * (i + 1), src[i]); @@ -202,7 +205,8 @@ public static void testByteLong4_runner() { } @Test - @IR(counts = { IRNode.LOAD_VECTOR_L, ">=1", IRNode.STORE_VECTOR, ">=1" }) + @IR(applyIf = {"UseCompactObjectHeaders", "false"}, + counts = { IRNode.LOAD_VECTOR_L, ">=1", IRNode.STORE_VECTOR, ">=1" }) public static void testByteLong5(byte[] dest, long[] src, int start, int stop) { for (int i = start; i < stop; i++) { UNSAFE.putLongUnaligned(dest, UNSAFE.ARRAY_BYTE_BASE_OFFSET + 8 * (i + baseOffset), src[i]); @@ -216,7 +220,8 @@ public static void testByteLong5_runner() { } @Test - @IR(counts = { IRNode.LOAD_VECTOR_L, ">=1", IRNode.STORE_VECTOR, ">=1" }) + @IR(applyIf = {"UseCompactObjectHeaders", "false"}, + counts = { IRNode.LOAD_VECTOR_L, ">=1", IRNode.STORE_VECTOR, ">=1" }) public static void testByteByte1(byte[] dest, byte[] src) { for (int i = 0; i < src.length / 8; i++) { UNSAFE.putLongUnaligned(dest, UNSAFE.ARRAY_BYTE_BASE_OFFSET + 8 * i, UNSAFE.getLongUnaligned(src, UNSAFE.ARRAY_BYTE_BASE_OFFSET + 8 * i)); @@ -229,7 +234,8 @@ public static void testByteByte1_runner() { } @Test - @IR(counts = { IRNode.LOAD_VECTOR_L, ">=1", IRNode.STORE_VECTOR, ">=1" }) + @IR(applyIf = {"UseCompactObjectHeaders", "false"}, + counts = { IRNode.LOAD_VECTOR_L, ">=1", IRNode.STORE_VECTOR, ">=1" }) public static void testByteByte2(byte[] dest, byte[] src) { for (int i = 1; i < src.length / 8; i++) { UNSAFE.putLongUnaligned(dest, UNSAFE.ARRAY_BYTE_BASE_OFFSET + 8 * (i - 1), UNSAFE.getLongUnaligned(src, UNSAFE.ARRAY_BYTE_BASE_OFFSET + 8 * i)); diff --git a/test/hotspot/jtreg/compiler/c2/irTests/TestVectorizationNotRun.java b/test/hotspot/jtreg/compiler/c2/irTests/TestVectorizationNotRun.java index 5968b7221c70c..ba78d74476760 100644 --- a/test/hotspot/jtreg/compiler/c2/irTests/TestVectorizationNotRun.java +++ b/test/hotspot/jtreg/compiler/c2/irTests/TestVectorizationNotRun.java @@ -51,7 +51,8 @@ public static void main(String[] args) { static long[] longArray = new long[size]; @Test - @IR(counts = { IRNode.LOAD_VECTOR_L, ">=1", IRNode.STORE_VECTOR, ">=1" }) + @IR(applyIf = {"UseCompactObjectHeaders", "false"}, + counts = { IRNode.LOAD_VECTOR_L, ">=1", IRNode.STORE_VECTOR, ">=1" }) public static void test(byte[] dest, long[] src) { for (int i = 0; i < src.length; i++) { if ((i < 0) || (8 > sizeBytes - i)) { diff --git a/test/hotspot/jtreg/compiler/lib/ir_framework/TestFramework.java b/test/hotspot/jtreg/compiler/lib/ir_framework/TestFramework.java index d477aa44763fa..caef911f73a03 100644 --- a/test/hotspot/jtreg/compiler/lib/ir_framework/TestFramework.java +++ b/test/hotspot/jtreg/compiler/lib/ir_framework/TestFramework.java @@ -142,7 +142,8 @@ public class TestFramework { "UseZbb", "UseRVV", "Xlog", - "LogCompilation" + "LogCompilation", + "UseCompactObjectHeaders" ) ); diff --git a/test/hotspot/jtreg/compiler/loopopts/superword/TestAlignVector.java b/test/hotspot/jtreg/compiler/loopopts/superword/TestAlignVector.java index fd5c2969074f9..102e08c6787b1 100644 --- a/test/hotspot/jtreg/compiler/loopopts/superword/TestAlignVector.java +++ b/test/hotspot/jtreg/compiler/loopopts/superword/TestAlignVector.java @@ -398,6 +398,7 @@ static Object[] test0(byte[] a, byte[] b, byte mask) { @IR(counts = {IRNode.LOAD_VECTOR_B, "> 0", IRNode.AND_VB, "> 0", IRNode.STORE_VECTOR, "> 0"}, + applyIf = {"UseCompactObjectHeaders", "false"}, applyIfPlatform = {"64-bit", "true"}, applyIfCPUFeatureOr = {"avx2", "true", "asimd", "true"}) static Object[] test1(byte[] a, byte[] b, byte mask) { @@ -706,7 +707,7 @@ static Object[] test10c(short[] a, short[] b, short mask) { @IR(counts = {IRNode.LOAD_VECTOR_S, IRNode.VECTOR_SIZE_4, "> 0", IRNode.AND_VS, IRNode.VECTOR_SIZE_4, "> 0", IRNode.STORE_VECTOR, "> 0"}, - applyIf = {"MaxVectorSize", ">=16"}, + applyIfAnd = {"MaxVectorSize", ">=16", "UseCompactObjectHeaders", "false"}, applyIfPlatform = {"64-bit", "true"}, applyIfCPUFeatureOr = {"avx2", "true", "asimd", "true"}) static Object[] test10d(short[] a, short[] b, short mask) { @@ -1001,6 +1002,7 @@ static Object[] test13aIL(int[] a, long[] b) { IRNode.ADD_VB, "> 0", IRNode.ADD_VI, "> 0", IRNode.STORE_VECTOR, "> 0"}, + applyIf = {"UseCompactObjectHeaders", "false"}, applyIfPlatform = {"64-bit", "true"}, applyIfCPUFeatureOr = {"avx2", "true", "asimd", "true"}) static Object[] test13aIB(int[] a, byte[] b) { @@ -1017,6 +1019,7 @@ static Object[] test13aIB(int[] a, byte[] b) { IRNode.ADD_VI, "> 0", IRNode.ADD_VS, "> 0", IRNode.STORE_VECTOR, "> 0"}, + applyIf = {"UseCompactObjectHeaders", "false"}, applyIfPlatform = {"64-bit", "true"}, applyIfCPUFeatureOr = {"avx2", "true", "asimd", "true"}) static Object[] test13aIS(int[] a, short[] b) { @@ -1037,6 +1040,7 @@ static Object[] test13aIS(int[] a, short[] b) { IRNode.ADD_VI, "> 0", IRNode.ADD_VL, "> 0", IRNode.STORE_VECTOR, "> 0"}, + applyIf = {"UseCompactObjectHeaders", "false"}, applyIfPlatform = {"64-bit", "true"}, applyIfCPUFeatureOr = {"avx2", "true", "asimd", "true"}) static Object[] test13aBSIL(byte[] a, short[] b, int[] c, long[] d) { @@ -1072,6 +1076,7 @@ static Object[] test13bIL(int[] a, long[] b) { IRNode.ADD_VB, "> 0", IRNode.ADD_VI, "> 0", IRNode.STORE_VECTOR, "> 0"}, + applyIf = {"UseCompactObjectHeaders", "false"}, applyIfPlatform = {"64-bit", "true"}, applyIfCPUFeatureOr = {"avx2", "true", "asimd", "true"}) static Object[] test13bIB(int[] a, byte[] b) { @@ -1088,6 +1093,7 @@ static Object[] test13bIB(int[] a, byte[] b) { IRNode.ADD_VI, "> 0", IRNode.ADD_VS, "> 0", IRNode.STORE_VECTOR, "> 0"}, + applyIf = {"UseCompactObjectHeaders", "false"}, applyIfPlatform = {"64-bit", "true"}, applyIfCPUFeatureOr = {"avx2", "true", "asimd", "true"}) static Object[] test13bIS(int[] a, short[] b) { @@ -1108,6 +1114,7 @@ static Object[] test13bIS(int[] a, short[] b) { IRNode.ADD_VI, "> 0", IRNode.ADD_VL, "> 0", IRNode.STORE_VECTOR, "> 0"}, + applyIf = {"UseCompactObjectHeaders", "false"}, applyIfPlatform = {"64-bit", "true"}, applyIfCPUFeatureOr = {"avx2", "true", "asimd", "true"}) static Object[] test13bBSIL(byte[] a, short[] b, int[] c, long[] d) { diff --git a/test/hotspot/jtreg/compiler/loopopts/superword/TestIndependentPacksWithCyclicDependency.java b/test/hotspot/jtreg/compiler/loopopts/superword/TestIndependentPacksWithCyclicDependency.java index 65398e8adfd39..50f8863df6455 100644 --- a/test/hotspot/jtreg/compiler/loopopts/superword/TestIndependentPacksWithCyclicDependency.java +++ b/test/hotspot/jtreg/compiler/loopopts/superword/TestIndependentPacksWithCyclicDependency.java @@ -268,7 +268,8 @@ public void runTest6() { } @Test - @IR(counts = {IRNode.ADD_VI, "> 0", IRNode.MUL_VI, "> 0", IRNode.ADD_VF, "> 0"}, + @IR(applyIf = {"UseCompactObjectHeaders", "false"}, + counts = {IRNode.ADD_VI, "> 0", IRNode.MUL_VI, "> 0", IRNode.ADD_VF, "> 0"}, applyIfPlatform = {"64-bit", "true"}, applyIfCPUFeatureOr = {"sse4.1", "true", "asimd", "true"}) static void test6(int[] dataIa, int[] dataIb, float[] dataFa, float[] dataFb, diff --git a/test/hotspot/jtreg/compiler/loopopts/superword/TestMulAddS2I.java b/test/hotspot/jtreg/compiler/loopopts/superword/TestMulAddS2I.java index fb99fc5983a16..758ecca0f73be 100644 --- a/test/hotspot/jtreg/compiler/loopopts/superword/TestMulAddS2I.java +++ b/test/hotspot/jtreg/compiler/loopopts/superword/TestMulAddS2I.java @@ -163,11 +163,13 @@ public static int[] testc(int[] out) { @Test @IR(applyIfCPUFeature = {"sse2", "true"}, applyIfPlatform = {"64-bit", "true"}, + applyIf = { "UseCompactObjectHeaders", "false" }, counts = {IRNode.MUL_ADD_S2I, "> 0", IRNode.MUL_ADD_VS2VI, "> 0"}) @IR(applyIfCPUFeature = {"asimd", "true"}, - applyIf = {"MaxVectorSize", "16"}, // AD file requires vector_length = 16 + applyIfAnd = {"MaxVectorSize", "16", "UseCompactObjectHeaders", "false" }, // AD file requires vector_length = 16 counts = {IRNode.MUL_ADD_S2I, "> 0", IRNode.MUL_ADD_VS2VI, "> 0"}) @IR(applyIfCPUFeature = {"avx512_vnni", "true"}, + applyIf = { "UseCompactObjectHeaders", "false" }, counts = {IRNode.MUL_ADD_S2I, "> 0", IRNode.MUL_ADD_VS2VI_VNNI, "> 0"}) public static int[] testd(int[] out) { for (int i = 0; i < ITER-2; i+=2) { @@ -181,11 +183,13 @@ public static int[] testd(int[] out) { @Test @IR(applyIfCPUFeature = {"sse2", "true"}, applyIfPlatform = {"64-bit", "true"}, + applyIf = { "UseCompactObjectHeaders", "false" }, counts = {IRNode.MUL_ADD_S2I, "> 0", IRNode.MUL_ADD_VS2VI, "> 0"}) @IR(applyIfCPUFeature = {"asimd", "true"}, - applyIf = {"MaxVectorSize", "16"}, // AD file requires vector_length = 16 + applyIfAnd = {"MaxVectorSize", "16", "UseCompactObjectHeaders", "false" }, // AD file requires vector_length = 16 counts = {IRNode.MUL_ADD_S2I, "> 0", IRNode.MUL_ADD_VS2VI, "> 0"}) @IR(applyIfCPUFeature = {"avx512_vnni", "true"}, + applyIf = { "UseCompactObjectHeaders", "false" }, counts = {IRNode.MUL_ADD_S2I, "> 0", IRNode.MUL_ADD_VS2VI_VNNI, "> 0"}) public static int[] teste(int[] out) { for (int i = 0; i < ITER-2; i+=2) { @@ -199,11 +203,13 @@ public static int[] teste(int[] out) { @Test @IR(applyIfCPUFeature = {"sse2", "true"}, applyIfPlatform = {"64-bit", "true"}, + applyIf = { "UseCompactObjectHeaders", "false" }, counts = {IRNode.MUL_ADD_S2I, "> 0", IRNode.MUL_ADD_VS2VI, "> 0"}) @IR(applyIfCPUFeature = {"asimd", "true"}, - applyIf = {"MaxVectorSize", "16"}, // AD file requires vector_length = 16 + applyIfAnd = {"MaxVectorSize", "16", "UseCompactObjectHeaders", "false" }, // AD file requires vector_length = 16 counts = {IRNode.MUL_ADD_S2I, "> 0", IRNode.MUL_ADD_VS2VI, "> 0"}) @IR(applyIfCPUFeature = {"avx512_vnni", "true"}, + applyIf = { "UseCompactObjectHeaders", "false" }, counts = {IRNode.MUL_ADD_S2I, "> 0", IRNode.MUL_ADD_VS2VI_VNNI, "> 0"}) public static int[] testf(int[] out) { for (int i = 0; i < ITER-2; i+=2) { @@ -217,11 +223,13 @@ public static int[] testf(int[] out) { @Test @IR(applyIfCPUFeature = {"sse2", "true"}, applyIfPlatform = {"64-bit", "true"}, + applyIf = { "UseCompactObjectHeaders", "false" }, counts = {IRNode.MUL_ADD_S2I, "> 0", IRNode.MUL_ADD_VS2VI, "> 0"}) @IR(applyIfCPUFeature = {"asimd", "true"}, - applyIf = {"MaxVectorSize", "16"}, // AD file requires vector_length = 16 + applyIfAnd = {"MaxVectorSize", "16", "UseCompactObjectHeaders", "false" }, // AD file requires vector_length = 16 counts = {IRNode.MUL_ADD_S2I, "> 0", IRNode.MUL_ADD_VS2VI, "> 0"}) @IR(applyIfCPUFeature = {"avx512_vnni", "true"}, + applyIf = { "UseCompactObjectHeaders", "false" }, counts = {IRNode.MUL_ADD_S2I, "> 0", IRNode.MUL_ADD_VS2VI_VNNI, "> 0"}) public static int[] testg(int[] out) { for (int i = 0; i < ITER-2; i+=2) { @@ -235,11 +243,13 @@ public static int[] testg(int[] out) { @Test @IR(applyIfCPUFeature = {"sse2", "true"}, applyIfPlatform = {"64-bit", "true"}, + applyIf = { "UseCompactObjectHeaders", "false" }, counts = {IRNode.MUL_ADD_S2I, "> 0", IRNode.MUL_ADD_VS2VI, "> 0"}) @IR(applyIfCPUFeature = {"asimd", "true"}, - applyIf = {"MaxVectorSize", "16"}, // AD file requires vector_length = 16 + applyIfAnd = {"MaxVectorSize", "16", "UseCompactObjectHeaders", "false" }, // AD file requires vector_length = 16 counts = {IRNode.MUL_ADD_S2I, "> 0", IRNode.MUL_ADD_VS2VI, "> 0"}) @IR(applyIfCPUFeature = {"avx512_vnni", "true"}, + applyIf = { "UseCompactObjectHeaders", "false" }, counts = {IRNode.MUL_ADD_S2I, "> 0", IRNode.MUL_ADD_VS2VI_VNNI, "> 0"}) public static int[] testh(int[] out) { for (int i = 0; i < ITER-2; i+=2) { diff --git a/test/hotspot/jtreg/gc/g1/TestGCLogMessages.java b/test/hotspot/jtreg/gc/g1/TestGCLogMessages.java index d37bf56738143..e407895107186 100644 --- a/test/hotspot/jtreg/gc/g1/TestGCLogMessages.java +++ b/test/hotspot/jtreg/gc/g1/TestGCLogMessages.java @@ -259,7 +259,6 @@ private void testConcurrentRefinementLogs() throws Exception { LogMessageWithLevel exhFailureMessages[] = new LogMessageWithLevel[] { new LogMessageWithLevel("Recalculate Used Memory \\(ms\\):", Level.DEBUG), - new LogMessageWithLevel("Restore Preserved Marks \\(ms\\):", Level.DEBUG), new LogMessageWithLevel("Restore Evacuation Failed Regions \\(ms\\):", Level.DEBUG), new LogMessageWithLevel("Process Evacuation Failed Regions \\(ms\\):", Level.DEBUG), new LogMessageWithLevel("Evacuation Failed Regions:", Level.DEBUG), diff --git a/test/hotspot/jtreg/gc/g1/plab/TestPLABPromotion.java b/test/hotspot/jtreg/gc/g1/plab/TestPLABPromotion.java index 2e7ebc2370f03..88d11c10fa0b2 100644 --- a/test/hotspot/jtreg/gc/g1/plab/TestPLABPromotion.java +++ b/test/hotspot/jtreg/gc/g1/plab/TestPLABPromotion.java @@ -32,7 +32,7 @@ * @modules java.management * @build jdk.test.whitebox.WhiteBox * @run driver jdk.test.lib.helpers.ClassFileInstaller jdk.test.whitebox.WhiteBox - * @run main/timeout=240 gc.g1.plab.TestPLABPromotion + * @run main/othervm/timeout=240 -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI gc.g1.plab.TestPLABPromotion */ package gc.g1.plab; @@ -48,12 +48,15 @@ import jdk.test.lib.Platform; import jdk.test.lib.process.OutputAnalyzer; import jdk.test.lib.process.ProcessTools; +import jdk.test.whitebox.WhiteBox; /** * Test checks PLAB promotion of different size objects. */ public class TestPLABPromotion { + private static final boolean COMPACT_HEADERS = Platform.is64bit() && WhiteBox.getWhiteBox().getBooleanVMFlag("UseCompactObjectHeaders"); + // GC ID with survivor PLAB statistics private final static long GC_ID_SURVIVOR_STATS = 1l; // GC ID with old PLAB statistics @@ -74,7 +77,7 @@ public class TestPLABPromotion { private static final int PLAB_SIZE_HIGH = 65536; private static final int OBJECT_SIZE_SMALL = 10 * HEAP_WORD_SIZE; private static final int OBJECT_SIZE_MEDIUM = 128 * HEAP_WORD_SIZE; - private static final int OBJECT_SIZE_HIGH = 3072 * HEAP_WORD_SIZE; + private static final int OBJECT_SIZE_HIGH = (COMPACT_HEADERS ? 3266 : 3250) * HEAP_WORD_SIZE; private static final int GC_NUM_SMALL = 1; private static final int GC_NUM_MEDIUM = 3; private static final int GC_NUM_HIGH = 7; diff --git a/test/hotspot/jtreg/runtime/FieldLayout/ArrayBaseOffsets.java b/test/hotspot/jtreg/runtime/FieldLayout/ArrayBaseOffsets.java index b679e866ac82a..e69de29bb2d1d 100644 --- a/test/hotspot/jtreg/runtime/FieldLayout/ArrayBaseOffsets.java +++ b/test/hotspot/jtreg/runtime/FieldLayout/ArrayBaseOffsets.java @@ -1,113 +0,0 @@ -/* - * Copyright Amazon.com Inc. or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -/* - * @test id=with-coops-no-ccp - * @library /test/lib - * @requires vm.bits == "64" - * @modules java.base/jdk.internal.misc - * @run main/othervm -XX:+UseCompressedOops -XX:-UseCompressedClassPointers ArrayBaseOffsets - */ -/* - * @test id=with-coops-with-ccp - * @library /test/lib - * @requires vm.bits == "64" - * @requires vm.opt.UseCompressedClassPointers != false - * @modules java.base/jdk.internal.misc - * @run main/othervm -XX:+UseCompressedOops -XX:+UseCompressedClassPointers ArrayBaseOffsets - */ -/* - * @test id=no-coops-no-ccp - * @library /test/lib - * @requires vm.bits == "64" - * @modules java.base/jdk.internal.misc - * @run main/othervm -XX:-UseCompressedOops -XX:-UseCompressedClassPointers ArrayBaseOffsets - */ -/* - * @test id=no-coops-with-ccp - * @library /test/lib - * @requires vm.bits == "64" - * @requires vm.opt.UseCompressedClassPointers != false - * @modules java.base/jdk.internal.misc - * @run main/othervm -XX:-UseCompressedOops -XX:+UseCompressedClassPointers ArrayBaseOffsets - */ -/* - * @test id=32bit - * @library /test/lib - * @requires vm.bits == "32" - * @modules java.base/jdk.internal.misc - * @run main/othervm ArrayBaseOffsets - */ - -import jdk.internal.misc.Unsafe; - -import java.lang.management.ManagementFactory; -import java.lang.management.RuntimeMXBean; -import java.util.List; - -import jdk.test.lib.Asserts; -import jdk.test.lib.Platform; - -public class ArrayBaseOffsets { - - private static final boolean COOP; - private static final boolean CCP; - - static { - if (Platform.is64bit()) { - RuntimeMXBean runtime = ManagementFactory.getRuntimeMXBean(); - List vmargs = runtime.getInputArguments(); - CCP = !vmargs.contains("-XX:-UseCompressedClassPointers"); - COOP = System.getProperty("java.vm.compressedOopsMode") != null; - } else { - COOP = CCP = false; - } - } - - static public void main(String[] args) { - Unsafe unsafe = Unsafe.getUnsafe(); - int intOffset, longOffset; - if (Platform.is64bit()) { - if (CCP) { - intOffset = 16; - longOffset = 16; - } else { - intOffset = 20; - longOffset = 24; - } - } else { - intOffset = 12; - longOffset = 16; - } - Asserts.assertEquals(unsafe.arrayBaseOffset(boolean[].class), intOffset, "Misplaced boolean array base"); - Asserts.assertEquals(unsafe.arrayBaseOffset(byte[].class), intOffset, "Misplaced byte array base"); - Asserts.assertEquals(unsafe.arrayBaseOffset(char[].class), intOffset, "Misplaced char array base"); - Asserts.assertEquals(unsafe.arrayBaseOffset(short[].class), intOffset, "Misplaced short array base"); - Asserts.assertEquals(unsafe.arrayBaseOffset(int[].class), intOffset, "Misplaced int array base"); - Asserts.assertEquals(unsafe.arrayBaseOffset(long[].class), longOffset, "Misplaced long array base"); - Asserts.assertEquals(unsafe.arrayBaseOffset(float[].class), intOffset, "Misplaced float array base"); - Asserts.assertEquals(unsafe.arrayBaseOffset(double[].class), longOffset, "Misplaced double array base"); - int expectedObjArrayOffset = (COOP || !Platform.is64bit()) ? intOffset : longOffset; - Asserts.assertEquals(unsafe.arrayBaseOffset(Object[].class), expectedObjArrayOffset, "Misplaced object array base"); - } -} diff --git a/test/hotspot/jtreg/runtime/FieldLayout/BaseOffsets.java b/test/hotspot/jtreg/runtime/FieldLayout/BaseOffsets.java new file mode 100644 index 0000000000000..fe2366b35cc31 --- /dev/null +++ b/test/hotspot/jtreg/runtime/FieldLayout/BaseOffsets.java @@ -0,0 +1,157 @@ +/* + * Copyright (c) 2022, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test id=with-coops-with-ccp + * @library /test/lib / + * @requires vm.bits == "64" + * @modules java.base/jdk.internal.misc + * java.management + * @build jdk.test.whitebox.WhiteBox + * @run driver jdk.test.lib.helpers.ClassFileInstaller jdk.test.whitebox.WhiteBox + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:+UnlockExperimentalVMOptions -XX:+UseCompressedOops -XX:+UseCompressedClassPointers -XX:-UseCompactObjectHeaders BaseOffsets + */ +/* + * @test id=no-coops-with-ccp + * @library /test/lib / + * @requires vm.bits == "64" + * @modules java.base/jdk.internal.misc + * java.management + * @build jdk.test.whitebox.WhiteBox + * @run driver jdk.test.lib.helpers.ClassFileInstaller jdk.test.whitebox.WhiteBox + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:+UnlockExperimentalVMOptions -XX:-UseCompressedOops -XX:+UseCompressedClassPointers -XX:-UseCompactObjectHeaders BaseOffsets + */ +/* + * @test id=with-coops-no-ccp + * @library /test/lib / + * @requires vm.bits == "64" + * @modules java.base/jdk.internal.misc + * java.management + * @build jdk.test.whitebox.WhiteBox + * @run driver jdk.test.lib.helpers.ClassFileInstaller jdk.test.whitebox.WhiteBox + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:+UnlockExperimentalVMOptions -XX:+UseCompressedOops -XX:-UseCompressedClassPointers -XX:-UseCompactObjectHeaders BaseOffsets + */ +/* + * @test id=no-coops-no-ccp + * @library /test/lib / + * @requires vm.bits == "64" + * @modules java.base/jdk.internal.misc + * java.management + * @build jdk.test.whitebox.WhiteBox + * @run driver jdk.test.lib.helpers.ClassFileInstaller jdk.test.whitebox.WhiteBox + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:+UnlockExperimentalVMOptions -XX:-UseCompressedOops -XX:-UseCompressedClassPointers -XX:-UseCompactObjectHeaders BaseOffsets + */ +/* + * @test id=with-coop--with-coh + * @library /test/lib / + * @requires vm.bits == "64" + * @modules java.base/jdk.internal.misc + * java.management + * @build jdk.test.whitebox.WhiteBox + * @run driver jdk.test.lib.helpers.ClassFileInstaller jdk.test.whitebox.WhiteBox + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:+UnlockExperimentalVMOptions -XX:+UseCompressedOops -XX:+UseCompactObjectHeaders BaseOffsets + */ +/* + * @test id=no-coops-with-coh + * @library /test/lib / + * @requires vm.bits == "64" + * @modules java.base/jdk.internal.misc + * java.management + * @build jdk.test.whitebox.WhiteBox + * @run driver jdk.test.lib.helpers.ClassFileInstaller jdk.test.whitebox.WhiteBox + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:+UnlockExperimentalVMOptions -XX:-UseCompressedOops -XX:+UseCompactObjectHeaders BaseOffsets + */ +/* + * @test id=32bit + * @library /test/lib / + * @requires vm.bits == "32" + * @modules java.base/jdk.internal.misc + * java.management + * @build jdk.test.whitebox.WhiteBox + * @run driver jdk.test.lib.helpers.ClassFileInstaller jdk.test.whitebox.WhiteBox + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI BaseOffsets + */ + +import java.lang.reflect.Field; +import java.util.Arrays; +import java.util.Comparator; +import jdk.internal.misc.Unsafe; + +import jdk.test.lib.Asserts; +import jdk.test.lib.Platform; +import jdk.test.whitebox.WhiteBox; + +public class BaseOffsets { + + static class LIClass { + public int i; + } + + public static final WhiteBox WB = WhiteBox.getWhiteBox(); + + static final long INT_OFFSET; + static final int INT_ARRAY_OFFSET; + static final int LONG_ARRAY_OFFSET; + static { + if (!Platform.is64bit() || WB.getBooleanVMFlag("UseCompactObjectHeaders")) { + INT_OFFSET = 8; + INT_ARRAY_OFFSET = 12; + LONG_ARRAY_OFFSET = 16; + } else if (WB.getBooleanVMFlag("UseCompressedClassPointers")) { + INT_OFFSET = 12; + INT_ARRAY_OFFSET = 16; + LONG_ARRAY_OFFSET = 16; + } else { + INT_OFFSET = 16; + INT_ARRAY_OFFSET = 20; + LONG_ARRAY_OFFSET = 24; + } + } + + static public void main(String[] args) { + Unsafe unsafe = Unsafe.getUnsafe(); + Class c = LIClass.class; + Field[] fields = c.getFields(); + for (int i = 0; i < fields.length; i++) { + long offset = unsafe.objectFieldOffset(fields[i]); + if (fields[i].getType() == int.class) { + Asserts.assertEquals(offset, INT_OFFSET, "Misplaced int field"); + } else { + Asserts.fail("Unexpected field type"); + } + } + + Asserts.assertEquals(unsafe.arrayBaseOffset(boolean[].class), INT_ARRAY_OFFSET, "Misplaced boolean array base"); + Asserts.assertEquals(unsafe.arrayBaseOffset(byte[].class), INT_ARRAY_OFFSET, "Misplaced byte array base"); + Asserts.assertEquals(unsafe.arrayBaseOffset(char[].class), INT_ARRAY_OFFSET, "Misplaced char array base"); + Asserts.assertEquals(unsafe.arrayBaseOffset(short[].class), INT_ARRAY_OFFSET, "Misplaced short array base"); + Asserts.assertEquals(unsafe.arrayBaseOffset(int[].class), INT_ARRAY_OFFSET, "Misplaced int array base"); + Asserts.assertEquals(unsafe.arrayBaseOffset(long[].class), LONG_ARRAY_OFFSET, "Misplaced long array base"); + Asserts.assertEquals(unsafe.arrayBaseOffset(float[].class), INT_ARRAY_OFFSET, "Misplaced float array base"); + Asserts.assertEquals(unsafe.arrayBaseOffset(double[].class), LONG_ARRAY_OFFSET, "Misplaced double array base"); + boolean narrowOops = System.getProperty("java.vm.compressedOopsMode") != null || + !Platform.is64bit(); + int expected_objary_offset = narrowOops ? INT_ARRAY_OFFSET : LONG_ARRAY_OFFSET; + Asserts.assertEquals(unsafe.arrayBaseOffset(Object[].class), expected_objary_offset, "Misplaced object array base"); + } +} diff --git a/test/hotspot/jtreg/runtime/cds/TestDefaultArchiveLoading.java b/test/hotspot/jtreg/runtime/cds/TestDefaultArchiveLoading.java new file mode 100644 index 0000000000000..d328625f8de47 --- /dev/null +++ b/test/hotspot/jtreg/runtime/cds/TestDefaultArchiveLoading.java @@ -0,0 +1,122 @@ +/* + * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2024, Red Hat, Inc. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/** + * @test id=nocoops_nocoh + * @summary Test Loading of default archives in all configurations + * @requires vm.cds + * @requires vm.bits == 64 + * @library /test/lib + * @modules java.base/jdk.internal.misc + * java.management + * @run driver TestDefaultArchiveLoading nocoops_nocoh + */ + +/** + * @test id=nocoops_coh + * @summary Test Loading of default archives in all configurations (requires --enable-cds-archive-coh) + * @requires vm.cds + * @requires vm.bits == 64 + * @requires !vm.gc.ZGenerational + * @library /test/lib + * @modules java.base/jdk.internal.misc + * java.management + * @run driver TestDefaultArchiveLoading nocoops_coh + */ + +/** + * @test id=coops_nocoh + * @summary Test Loading of default archives in all configurations + * @requires vm.cds + * @requires vm.bits == 64 + * @library /test/lib + * @modules java.base/jdk.internal.misc + * java.management + * @run driver TestDefaultArchiveLoading coops_nocoh + */ + +/** + * @test id=coops_coh + * @summary Test Loading of default archives in all configurations (requires --enable-cds-archive-coh) + * @requires vm.cds + * @requires vm.bits == 64 + * @requires !vm.gc.ZGenerational + * @library /test/lib + * @modules java.base/jdk.internal.misc + * java.management + * @run driver TestDefaultArchiveLoading coops_coh + */ + +import jdk.test.lib.Platform; +import jdk.test.lib.process.OutputAnalyzer; +import jdk.test.lib.process.ProcessTools; +import jtreg.SkippedException; + +public class TestDefaultArchiveLoading { + public static void main(String[] args) throws Exception { + + if (args.length != 1) { + throw new RuntimeException("Expected argument"); + } + + String archiveSuffix; + char coh, coops; + + switch (args[0]) { + case "nocoops_nocoh": + coh = coops = '-'; + archiveSuffix = "_nocoops"; + break; + case "nocoops_coh": + coops = '-'; + coh = '+'; + archiveSuffix = "_nocoops_coh"; + break; + case "coops_nocoh": + coops = '+'; + coh = '-'; + archiveSuffix = ""; + break; + case "coops_coh": + coh = coops = '+'; + archiveSuffix = "_coh"; + break; + default: throw new RuntimeException("Invalid argument " + args[0]); + } + + ProcessBuilder pb = ProcessTools.createLimitedTestJavaProcessBuilder( + "-XX:+UnlockExperimentalVMOptions", + "-XX:" + coh + "UseCompactObjectHeaders", + "-XX:" + coops + "UseCompressedOops", + "-Xlog:cds", + "-Xshare:on", // fail if we cannot load archive + "-version"); + + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + output.shouldHaveExitValue(0); + + output.shouldContain("classes" + archiveSuffix + ".jsa"); + + } +} diff --git a/test/hotspot/jtreg/runtime/cds/appcds/TestZGCWithCDS.java b/test/hotspot/jtreg/runtime/cds/appcds/TestZGCWithCDS.java index ea51b198f5999..275c88416514b 100644 --- a/test/hotspot/jtreg/runtime/cds/appcds/TestZGCWithCDS.java +++ b/test/hotspot/jtreg/runtime/cds/appcds/TestZGCWithCDS.java @@ -56,6 +56,7 @@ public class TestZGCWithCDS { public final static String ERR_MSG = "The saved state of UseCompressedOops and UseCompressedClassPointers is different from runtime, CDS will be disabled."; public static void main(String... args) throws Exception { String zGenerational = args[0]; + String compactHeaders = "-XX:" + (zGenerational.equals("-XX:+ZGenerational") ? "+" : "-") + "UseCompactObjectHeaders"; String helloJar = JarBuilder.build("hello", "Hello"); System.out.println("0. Dump with ZGC"); OutputAnalyzer out = TestCommon @@ -63,6 +64,8 @@ public static void main(String... args) throws Exception { new String[] {"Hello"}, "-XX:+UseZGC", zGenerational, + "-XX:+UnlockExperimentalVMOptions", + compactHeaders, "-Xlog:cds"); out.shouldContain("Dumping shared data to file:"); out.shouldHaveExitValue(0); @@ -72,6 +75,8 @@ public static void main(String... args) throws Exception { .exec(helloJar, "-XX:+UseZGC", zGenerational, + "-XX:+UnlockExperimentalVMOptions", + compactHeaders, "-Xlog:cds", "Hello"); out.shouldContain(HELLO); @@ -83,6 +88,8 @@ public static void main(String... args) throws Exception { "-XX:-UseZGC", "-XX:+UseCompressedOops", // in case turned off by vmoptions "-XX:+UseCompressedClassPointers", // by jtreg + "-XX:+UnlockExperimentalVMOptions", + compactHeaders, "-Xlog:cds", "Hello"); out.shouldContain(UNABLE_TO_USE_ARCHIVE); @@ -95,6 +102,8 @@ public static void main(String... args) throws Exception { "-XX:+UseSerialGC", "-XX:-UseCompressedOops", "-XX:-UseCompressedClassPointers", + "-XX:+UnlockExperimentalVMOptions", + compactHeaders, "-Xlog:cds", "Hello"); out.shouldContain(UNABLE_TO_USE_ARCHIVE); @@ -107,6 +116,8 @@ public static void main(String... args) throws Exception { "-XX:+UseSerialGC", "-XX:-UseCompressedOops", "-XX:+UseCompressedClassPointers", + "-XX:+UnlockExperimentalVMOptions", + compactHeaders, "-Xlog:cds", "Hello"); out.shouldContain(HELLO); @@ -118,6 +129,8 @@ public static void main(String... args) throws Exception { "-XX:+UseSerialGC", "-XX:+UseCompressedOops", "-XX:-UseCompressedClassPointers", + "-XX:+UnlockExperimentalVMOptions", + compactHeaders, "-Xlog:cds", "Hello"); out.shouldContain(UNABLE_TO_USE_ARCHIVE); @@ -130,6 +143,8 @@ public static void main(String... args) throws Exception { "-XX:+UseSerialGC", "-XX:+UseCompressedOops", "-XX:+UseCompressedClassPointers", + "-XX:+UnlockExperimentalVMOptions", + compactHeaders, "-Xlog:cds", "Hello"); out.shouldContain(UNABLE_TO_USE_ARCHIVE); @@ -143,6 +158,8 @@ public static void main(String... args) throws Exception { "-XX:+UseSerialGC", "-XX:-UseCompressedOops", "-XX:+UseCompressedClassPointers", + "-XX:+UnlockExperimentalVMOptions", + compactHeaders, "-Xlog:cds"); out.shouldContain("Dumping shared data to file:"); out.shouldHaveExitValue(0); @@ -152,6 +169,8 @@ public static void main(String... args) throws Exception { .exec(helloJar, "-XX:+UseZGC", zGenerational, + "-XX:+UnlockExperimentalVMOptions", + compactHeaders, "-Xlog:cds", "Hello"); out.shouldContain(HELLO); diff --git a/test/hotspot/jtreg/runtime/cds/appcds/loaderConstraints/DynamicLoaderConstraintsTest.java b/test/hotspot/jtreg/runtime/cds/appcds/loaderConstraints/DynamicLoaderConstraintsTest.java index 6de08da4673cb..f98dcc716e5bf 100644 --- a/test/hotspot/jtreg/runtime/cds/appcds/loaderConstraints/DynamicLoaderConstraintsTest.java +++ b/test/hotspot/jtreg/runtime/cds/appcds/loaderConstraints/DynamicLoaderConstraintsTest.java @@ -167,7 +167,7 @@ static void doTest(boolean errorInDump) throws Exception { String zGenerational = "-XX:" + (useZGenerational ? "+" : "-") + "ZGenerational"; // Add options to force eager class unloading. cmdLine = TestCommon.concat(cmdLine, "-cp", loaderJar, - "-XX:+UseZGC", zGenerational, "-XX:ZCollectionInterval=0.01", + "-XX:+UseZGC", zGenerational, "-XX:ZCollectionInterval=0.01", "-XX:+UnlockExperimentalVMOptions", "-XX:-UseCompactObjectHeaders", loaderMainClass, appJar); setBaseArchiveOptions("-XX:+UseZGC", "-Xlog:cds"); } else { diff --git a/test/jdk/java/lang/instrument/GetObjectSizeIntrinsicsTest.java b/test/jdk/java/lang/instrument/GetObjectSizeIntrinsicsTest.java index 22c5069f3e78e..1e473ccd97464 100644 --- a/test/jdk/java/lang/instrument/GetObjectSizeIntrinsicsTest.java +++ b/test/jdk/java/lang/instrument/GetObjectSizeIntrinsicsTest.java @@ -301,6 +301,7 @@ public class GetObjectSizeIntrinsicsTest extends ASimpleInstrumentationTestCase { + private static final boolean COMPACT_HEADERS = Platform.is64bit() && WhiteBox.getWhiteBox().getBooleanVMFlag("UseCompactObjectHeaders"); static final Boolean COMPRESSED_OOPS = WhiteBox.getWhiteBox().getBooleanVMFlag("UseCompressedOops"); static final long REF_SIZE = (COMPRESSED_OOPS == null || COMPRESSED_OOPS == true) ? 4 : 8; @@ -374,15 +375,25 @@ private static long roundUp(long v, long a) { return (v + a - 1) / a * a; } + private static long expectedSmallObjSize() { + long size; + if (!Platform.is64bit() || COMPACT_HEADERS) { + size = 8; + } else { + size = 16; + } + return roundUp(size, OBJ_ALIGN); + } + private void testSize_newObject() { - long expected = roundUp(Platform.is64bit() ? 16 : 8, OBJ_ALIGN); + long expected = expectedSmallObjSize(); for (int c = 0; c < ITERS; c++) { assertEquals(expected, fInst.getObjectSize(new Object())); } } private void testSize_localObject() { - long expected = roundUp(Platform.is64bit() ? 16 : 8, OBJ_ALIGN); + long expected = expectedSmallObjSize(); Object o = new Object(); for (int c = 0; c < ITERS; c++) { assertEquals(expected, fInst.getObjectSize(o)); @@ -392,7 +403,7 @@ private void testSize_localObject() { static Object staticO = new Object(); private void testSize_fieldObject() { - long expected = roundUp(Platform.is64bit() ? 16 : 8, OBJ_ALIGN); + long expected = expectedSmallObjSize(); for (int c = 0; c < ITERS; c++) { assertEquals(expected, fInst.getObjectSize(staticO)); }