Skip to content
Permalink
Browse files

8241825: Make compressed oops and compressed class pointers independe…

…nt (x86_64, PPC, S390)

Reviewed-by: coleenp, fparain, stuefe, mdoerr
  • Loading branch information
fisk committed May 13, 2020
1 parent 9651edd commit 382e5dc334502d9147977d50a210ed503ca9fe3e
Showing with 407 additions and 309 deletions.
  1. +2 −0 src/hotspot/cpu/aarch64/globalDefinitions_aarch64.hpp
  2. +2 −0 src/hotspot/cpu/ppc/globalDefinitions_ppc.hpp
  3. +2 −0 src/hotspot/cpu/s390/globalDefinitions_s390.hpp
  4. +2 −0 src/hotspot/cpu/sparc/globalDefinitions_sparc.hpp
  5. +1 −1 src/hotspot/cpu/x86/c1_FrameMap_x86.hpp
  6. +25 −19 src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp
  7. +6 −3 src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp
  8. +2 −1 src/hotspot/cpu/x86/c1_Runtime1_x86.cpp
  9. +1 −2 src/hotspot/cpu/x86/c2_MacroAssembler_x86.cpp
  10. +6 −0 src/hotspot/cpu/x86/globalDefinitions_x86.hpp
  11. +4 −2 src/hotspot/cpu/x86/interp_masm_x86.cpp
  12. +50 −61 src/hotspot/cpu/x86/macroAssembler_x86.cpp
  13. +8 −12 src/hotspot/cpu/x86/macroAssembler_x86.hpp
  14. +4 −4 src/hotspot/cpu/x86/methodHandles_x86.cpp
  15. +1 −1 src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp
  16. +3 −3 src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp
  17. +9 −10 src/hotspot/cpu/x86/stubGenerator_x86_64.cpp
  18. +19 −11 src/hotspot/cpu/x86/templateTable_x86.cpp
  19. +2 −2 src/hotspot/cpu/x86/vtableStubs_x86_32.cpp
  20. +4 −3 src/hotspot/cpu/x86/vtableStubs_x86_64.cpp
  21. +21 −29 src/hotspot/cpu/x86/x86_64.ad
  22. +1 −78 src/hotspot/share/classfile/fieldLayoutBuilder.cpp
  23. +0 −2 src/hotspot/share/classfile/fieldLayoutBuilder.hpp
  24. +10 −8 src/hotspot/share/classfile/javaClasses.cpp
  25. +1 −2 src/hotspot/share/gc/z/zArguments.cpp
  26. +6 −1 src/hotspot/share/memory/metaspace.cpp
  27. +11 −5 src/hotspot/share/oops/instanceOop.hpp
  28. +2 −2 src/hotspot/share/opto/lcm.cpp
  29. +19 −8 src/hotspot/share/runtime/arguments.cpp
  30. +12 −12 test/hotspot/jtreg/gc/metaspace/TestSizeTransitions.java
  31. +135 −1 test/hotspot/jtreg/runtime/CompressedOops/CompressedClassPointers.java
  32. +0 −8 test/hotspot/jtreg/runtime/CompressedOops/CompressedClassSpaceSize.java
  33. +1 −7 test/hotspot/jtreg/runtime/cds/appcds/TestCombinedCompressedFlags.java
  34. +30 −6 test/hotspot/jtreg/runtime/cds/appcds/TestZGCWithCDS.java
  35. +1 −1 test/jdk/jdk/jfr/event/gc/objectcount/TestObjectCountAfterGCEventWithG1ConcurrentMark.java
  36. +1 −1 test/jdk/jdk/jfr/event/gc/objectcount/TestObjectCountAfterGCEventWithG1FullCollection.java
  37. +1 −1 test/jdk/jdk/jfr/event/gc/objectcount/TestObjectCountAfterGCEventWithParallelOld.java
  38. +1 −1 test/jdk/jdk/jfr/event/gc/objectcount/TestObjectCountAfterGCEventWithSerial.java
  39. +1 −1 test/jdk/jdk/jfr/event/gc/objectcount/TestObjectCountEvent.java
@@ -58,4 +58,6 @@ const bool CCallingConventionRequiresIntsAsLongs = false;

#define PREFERRED_METASPACE_ALIGNMENT

#define COMPRESSED_CLASS_POINTERS_DEPENDS_ON_COMPRESSED_OOPS true

#endif // CPU_AARCH64_GLOBALDEFINITIONS_AARCH64_HPP
@@ -69,4 +69,6 @@ const bool CCallingConventionRequiresIntsAsLongs = true;
// Define the condition to use this -XX flag.
#define USE_POLL_BIT_ONLY UseSIGTRAP

#define COMPRESSED_CLASS_POINTERS_DEPENDS_ON_COMPRESSED_OOPS false

#endif // CPU_PPC_GLOBALDEFINITIONS_PPC_HPP
@@ -56,4 +56,6 @@ const bool CCallingConventionRequiresIntsAsLongs = true;

#define SUPPORT_RESERVED_STACK_AREA

#define COMPRESSED_CLASS_POINTERS_DEPENDS_ON_COMPRESSED_OOPS false

#endif // CPU_S390_GLOBALDEFINITIONS_S390_HPP
@@ -56,4 +56,6 @@ const bool CCallingConventionRequiresIntsAsLongs = true;
#define SUPPORT_RESERVED_STACK_AREA
#endif

#define COMPRESSED_CLASS_POINTERS_DEPENDS_ON_COMPRESSED_OOPS true

#endif // CPU_SPARC_GLOBALDEFINITIONS_SPARC_HPP
@@ -148,7 +148,7 @@

static int adjust_reg_range(int range) {
// Reduce the number of available regs (to free r12) in case of compressed oops
if (UseCompressedOops || UseCompressedClassPointers) return range - 1;
if (UseCompressedOops) return range - 1;
return range;
}

@@ -1185,6 +1185,7 @@ void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch

LIR_Address* addr = src->as_address_ptr();
Address from_addr = as_Address(addr);
Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);

if (addr->base()->type() == T_OBJECT) {
__ verify_oop(addr->base()->as_pointer_register());
@@ -1370,7 +1371,7 @@ void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch
} else if (type == T_ADDRESS && addr->disp() == oopDesc::klass_offset_in_bytes()) {
#ifdef _LP64
if (UseCompressedClassPointers) {
__ decode_klass_not_null(dest->as_register());
__ decode_klass_not_null(dest->as_register(), tmp_load_klass);
}
#endif
}
@@ -1698,6 +1699,7 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L
Register dst = op->result_opr()->as_register();
ciKlass* k = op->klass();
Register Rtmp1 = noreg;
Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);

// check if it needs to be profiled
ciMethodData* md = NULL;
@@ -1761,7 +1763,7 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L
// not a safepoint as obj null check happens earlier
#ifdef _LP64
if (UseCompressedClassPointers) {
__ load_klass(Rtmp1, obj);
__ load_klass(Rtmp1, obj, tmp_load_klass);
__ cmpptr(k_RInfo, Rtmp1);
} else {
__ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
@@ -1778,7 +1780,7 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L
} else {
// get object class
// not a safepoint as obj null check happens earlier
__ load_klass(klass_RInfo, obj);
__ load_klass(klass_RInfo, obj, tmp_load_klass);
if (k->is_loaded()) {
// See if we get an immediate positive hit
#ifdef _LP64
@@ -1833,7 +1835,7 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L
Register mdo = klass_RInfo, recv = k_RInfo;
__ bind(profile_cast_success);
__ mov_metadata(mdo, md->constant_encoding());
__ load_klass(recv, obj);
__ load_klass(recv, obj, tmp_load_klass);
type_profile_helper(mdo, md, data, recv, success);
__ jmp(*success);

@@ -1848,6 +1850,7 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L


void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
LIR_Code code = op->code();
if (code == lir_store_check) {
Register value = op->object()->as_register();
@@ -1893,8 +1896,8 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
}

add_debug_info_for_null_check_here(op->info_for_exception());
__ load_klass(k_RInfo, array);
__ load_klass(klass_RInfo, value);
__ load_klass(k_RInfo, array, tmp_load_klass);
__ load_klass(klass_RInfo, value, tmp_load_klass);

// get instance klass (it's already uncompressed)
__ movptr(k_RInfo, Address(k_RInfo, ObjArrayKlass::element_klass_offset()));
@@ -1915,7 +1918,7 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
Register mdo = klass_RInfo, recv = k_RInfo;
__ bind(profile_cast_success);
__ mov_metadata(mdo, md->constant_encoding());
__ load_klass(recv, value);
__ load_klass(recv, value, tmp_load_klass);
type_profile_helper(mdo, md, data, recv, &done);
__ jmpb(done);

@@ -3107,6 +3110,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
Register dst_pos = op->dst_pos()->as_register();
Register length = op->length()->as_register();
Register tmp = op->tmp()->as_register();
Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);

__ resolve(ACCESS_READ, src);
__ resolve(ACCESS_WRITE, dst);
@@ -3254,13 +3258,13 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
// an instance type.
if (flags & LIR_OpArrayCopy::type_check) {
if (!(flags & LIR_OpArrayCopy::dst_objarray)) {
__ load_klass(tmp, dst);
__ load_klass(tmp, dst, tmp_load_klass);
__ cmpl(Address(tmp, in_bytes(Klass::layout_helper_offset())), Klass::_lh_neutral_value);
__ jcc(Assembler::greaterEqual, *stub->entry());
}

if (!(flags & LIR_OpArrayCopy::src_objarray)) {
__ load_klass(tmp, src);
__ load_klass(tmp, src, tmp_load_klass);
__ cmpl(Address(tmp, in_bytes(Klass::layout_helper_offset())), Klass::_lh_neutral_value);
__ jcc(Assembler::greaterEqual, *stub->entry());
}
@@ -3317,8 +3321,8 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
__ push(src);
__ push(dst);

__ load_klass(src, src);
__ load_klass(dst, dst);
__ load_klass(src, src, tmp_load_klass);
__ load_klass(dst, dst, tmp_load_klass);

__ check_klass_subtype_fast_path(src, dst, tmp, &cont, &slow, NULL);

@@ -3346,9 +3350,9 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
assert(flags & mask, "one of the two should be known to be an object array");

if (!(flags & LIR_OpArrayCopy::src_objarray)) {
__ load_klass(tmp, src);
__ load_klass(tmp, src, tmp_load_klass);
} else if (!(flags & LIR_OpArrayCopy::dst_objarray)) {
__ load_klass(tmp, dst);
__ load_klass(tmp, dst, tmp_load_klass);
}
int lh_offset = in_bytes(Klass::layout_helper_offset());
Address klass_lh_addr(tmp, lh_offset);
@@ -3392,14 +3396,14 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
#ifdef _WIN64
// Allocate abi space for args but be sure to keep stack aligned
__ subptr(rsp, 6*wordSize);
__ load_klass(c_rarg3, dst);
__ load_klass(c_rarg3, dst, tmp_load_klass);
__ movptr(c_rarg3, Address(c_rarg3, ObjArrayKlass::element_klass_offset()));
store_parameter(c_rarg3, 4);
__ movl(c_rarg3, Address(c_rarg3, Klass::super_check_offset_offset()));
__ call(RuntimeAddress(copyfunc_addr));
__ addptr(rsp, 6*wordSize);
#else
__ load_klass(c_rarg4, dst);
__ load_klass(c_rarg4, dst, tmp_load_klass);
__ movptr(c_rarg4, Address(c_rarg4, ObjArrayKlass::element_klass_offset()));
__ movl(c_rarg3, Address(c_rarg4, Klass::super_check_offset_offset()));
__ call(RuntimeAddress(copyfunc_addr));
@@ -3464,7 +3468,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
__ mov_metadata(tmp, default_type->constant_encoding());
#ifdef _LP64
if (UseCompressedClassPointers) {
__ encode_klass_not_null(tmp);
__ encode_klass_not_null(tmp, rscratch1);
}
#endif

@@ -3569,6 +3573,7 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
ciMethod* method = op->profiled_method();
int bci = op->profiled_bci();
ciMethod* callee = op->profiled_callee();
Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);

// Update counter for all call types
ciMethodData* md = method->method_data_or_null();
@@ -3621,7 +3626,7 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
}
}
} else {
__ load_klass(recv, recv);
__ load_klass(recv, recv, tmp_load_klass);
Label update_done;
type_profile_helper(mdo, md, data, recv, &update_done);
// Receiver did not match any saved receiver and there is no empty row for it.
@@ -3639,6 +3644,7 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
Register obj = op->obj()->as_register();
Register tmp = op->tmp()->as_pointer_register();
Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
Address mdo_addr = as_Address(op->mdp()->as_address_ptr());
ciKlass* exact_klass = op->exact_klass();
intptr_t current_klass = op->current_klass();
@@ -3685,7 +3691,7 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
#ifdef ASSERT
if (exact_klass != NULL) {
Label ok;
__ load_klass(tmp, tmp);
__ load_klass(tmp, tmp, tmp_load_klass);
__ push(tmp);
__ mov_metadata(tmp, exact_klass->constant_encoding());
__ cmpptr(tmp, Address(rsp, 0));
@@ -3700,7 +3706,7 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
if (exact_klass != NULL) {
__ mov_metadata(tmp, exact_klass->constant_encoding());
} else {
__ load_klass(tmp, tmp);
__ load_klass(tmp, tmp, tmp_load_klass);
}

__ xorptr(tmp, mdo_addr);
@@ -53,7 +53,8 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr

if (UseBiasedLocking) {
assert(scratch != noreg, "should have scratch register at this point");
null_check_offset = biased_locking_enter(disp_hdr, obj, hdr, scratch, false, done, &slow_case);
Register rklass_decode_tmp = LP64_ONLY(rscratch1) NOT_LP64(noreg);
null_check_offset = biased_locking_enter(disp_hdr, obj, hdr, scratch, rklass_decode_tmp, false, done, &slow_case);
} else {
null_check_offset = offset();
}
@@ -150,6 +151,7 @@ void C1_MacroAssembler::try_allocate(Register obj, Register var_size_in_bytes, i

void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register len, Register t1, Register t2) {
assert_different_registers(obj, klass, len);
Register tmp_encode_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
if (UseBiasedLocking && !len->is_valid()) {
assert_different_registers(obj, klass, len, t1, t2);
movptr(t1, Address(klass, Klass::prototype_header_offset()));
@@ -161,7 +163,7 @@ void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register
#ifdef _LP64
if (UseCompressedClassPointers) { // Take care not to kill klass
movptr(t1, klass);
encode_klass_not_null(t1);
encode_klass_not_null(t1, tmp_encode_klass);
movl(Address(obj, oopDesc::klass_offset_in_bytes()), t1);
} else
#endif
@@ -296,9 +298,10 @@ void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache) {
// check against inline cache
assert(!MacroAssembler::needs_explicit_null_check(oopDesc::klass_offset_in_bytes()), "must add explicit null check");
int start_offset = offset();
Register tmp_load_klass = LP64_ONLY(rscratch2) NOT_LP64(noreg);

if (UseCompressedClassPointers) {
load_klass(rscratch1, receiver);
load_klass(rscratch1, receiver, tmp_load_klass);
cmpptr(rscratch1, iCache);
} else {
cmpptr(iCache, Address(receiver, oopDesc::klass_offset_in_bytes()));
@@ -1248,8 +1248,9 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {

// load the klass and check the has finalizer flag
Label register_finalizer;
Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
Register t = rsi;
__ load_klass(t, rax);
__ load_klass(t, rax, tmp_load_klass);
__ movl(t, Address(t, Klass::access_flags_offset()));
__ testl(t, JVM_ACC_HAS_FINALIZER);
__ jcc(Assembler::notZero, register_finalizer);
@@ -442,7 +442,6 @@ void C2_MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmp
if (use_rtm) {
assert_different_registers(objReg, boxReg, tmpReg, scrReg, cx1Reg, cx2Reg);
} else {
assert(cx1Reg == noreg, "");
assert(cx2Reg == noreg, "");
assert_different_registers(objReg, boxReg, tmpReg, scrReg);
}
@@ -478,7 +477,7 @@ void C2_MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmp
// at [FETCH], below, will never observe a biased encoding (*101b).
// If this invariant is not held we risk exclusion (safety) failure.
if (UseBiasedLocking && !UseOptoBiasInlining) {
biased_locking_enter(boxReg, objReg, tmpReg, scrReg, false, DONE_LABEL, NULL, counters);
biased_locking_enter(boxReg, objReg, tmpReg, scrReg, cx1Reg, false, DONE_LABEL, NULL, counters);
}

#if INCLUDE_RTM_OPT
@@ -69,4 +69,10 @@ const bool CCallingConventionRequiresIntsAsLongs = false;
#define SUPPORT_RESERVED_STACK_AREA
#endif

#if INCLUDE_JVMCI
#define COMPRESSED_CLASS_POINTERS_DEPENDS_ON_COMPRESSED_OOPS (EnableJVMCI || UseAOT)
#else
#define COMPRESSED_CLASS_POINTERS_DEPENDS_ON_COMPRESSED_OOPS false
#endif

#endif // CPU_X86_GLOBALDEFINITIONS_X86_HPP
@@ -59,7 +59,8 @@ void InterpreterMacroAssembler::profile_obj_type(Register obj, const Address& md
jmpb(next);

bind(update);
load_klass(obj, obj);
Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
load_klass(obj, obj, tmp_load_klass);

xorptr(obj, mdo_addr);
testptr(obj, TypeEntries::type_klass_mask);
@@ -1197,7 +1198,8 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg) {
movptr(obj_reg, Address(lock_reg, obj_offset));

if (UseBiasedLocking) {
biased_locking_enter(lock_reg, obj_reg, swap_reg, tmp_reg, false, done, &slow_case);
Register rklass_decode_tmp = LP64_ONLY(rscratch1) NOT_LP64(noreg);
biased_locking_enter(lock_reg, obj_reg, swap_reg, tmp_reg, rklass_decode_tmp, false, done, &slow_case);
}

// Load immediate 1 into swap_reg %rax

0 comments on commit 382e5dc

Please sign in to comment.
You can’t perform that action at this time.