Skip to content
Permalink
Browse files
8247299: [lworld] Disable UseBiasedLocking by default
Reviewed-by: fparain
  • Loading branch information
David Simms committed Oct 21, 2020
1 parent 3167856 commit 3a7c9fe6c01bf79f1124e0fc3af143be28c49207
Showing with 307 additions and 558 deletions.
  1. +1 −1 src/hotspot/cpu/x86/c1_CodeStubs_x86.cpp
  2. +4 −4 src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp
  3. +6 −5 src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp
  4. +4 −3 src/hotspot/cpu/x86/c2_MacroAssembler_x86.cpp
  5. +4 −3 src/hotspot/cpu/x86/interp_masm_x86.cpp
  6. +46 −10 src/hotspot/cpu/x86/macroAssembler_x86.cpp
  7. +6 −2 src/hotspot/cpu/x86/macroAssembler_x86.hpp
  8. +5 −3 src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp
  9. +9 −11 src/hotspot/cpu/x86/templateTable_x86.cpp
  10. +2 −2 src/hotspot/share/gc/serial/markSweep.inline.hpp
  11. +1 −1 src/hotspot/share/memory/heapShared.cpp
  12. +4 −1 src/hotspot/share/memory/oopFactory.cpp
  13. +6 −3 src/hotspot/share/oops/flatArrayKlass.cpp
  14. +1 −1 src/hotspot/share/oops/flatArrayKlass.inline.hpp
  15. +7 −6 src/hotspot/share/oops/inlineKlass.cpp
  16. +1 −1 src/hotspot/share/oops/klass.inline.hpp
  17. +147 −50 src/hotspot/share/oops/markWord.hpp
  18. +7 −3 src/hotspot/share/oops/markWord.inline.hpp
  19. +3 −0 src/hotspot/share/oops/objArrayKlass.cpp
  20. +1 −0 src/hotspot/share/oops/oop.cpp
  21. +2 −0 src/hotspot/share/oops/oop.hpp
  22. +9 −2 src/hotspot/share/oops/oop.inline.hpp
  23. +2 −3 src/hotspot/share/opto/callnode.cpp
  24. +2 −2 src/hotspot/share/opto/graphKit.cpp
  25. +1 −1 src/hotspot/share/opto/inlinetypenode.cpp
  26. +2 −2 src/hotspot/share/opto/macro.cpp
  27. +5 −4 src/hotspot/share/opto/memnode.cpp
  28. +1 −1 src/hotspot/share/opto/mulnode.cpp
  29. +2 −7 src/hotspot/share/prims/jvmtiEnvBase.cpp
  30. +5 −0 src/hotspot/share/runtime/arguments.cpp
  31. +1 −1 src/hotspot/share/runtime/globals.hpp
  32. +4 −4 src/hotspot/share/runtime/synchronizer.cpp
  33. +1 −12 test/hotspot/gtest/gc/shared/test_preservedMarks.cpp
  34. +1 −0 test/hotspot/jtreg/ProblemList.txt
  35. +0 −89 test/hotspot/jtreg/compiler/rtm/cli/TestUseRTMLockingOptionWithBiasedLocking.java
  36. +0 −1 test/hotspot/jtreg/gc/TestFullGCALot.java
  37. +1 −5 test/hotspot/jtreg/runtime/Monitor/SyncOnPrimitiveWrapperTest.java
  38. +3 −5 test/hotspot/jtreg/runtime/cds/appcds/javaldr/LockDuringDump.java
  39. +0 −132 test/hotspot/jtreg/runtime/handshake/HandshakeDirectTest.java
  40. +0 −81 test/hotspot/jtreg/runtime/logging/BiasedLockingTest.java
  41. +0 −48 test/hotspot/jtreg/serviceability/jvmti/GetOwnedMonitorInfo/GetOwnedMonitorInfoWithEATest.java
  42. +0 −48 ...g/serviceability/jvmti/GetOwnedMonitorStackDepthInfo/GetOwnedMonitorStackDepthInfoWithEATest.java
@@ -345,7 +345,7 @@ void MonitorEnterStub::emit_code(LIR_Assembler* ce) {
__ bind(_entry);
if (_throw_imse_stub != NULL) {
// When we come here, _obj_reg has already been checked to be non-null.
const int is_value_mask = markWord::always_locked_pattern;
const int is_value_mask = markWord::inline_type_pattern;
Register mark = _scratch_reg->as_register();
__ movptr(mark, Address(_obj_reg->as_register(), oopDesc::mark_offset_in_bytes()));
__ andptr(mark, is_value_mask);
@@ -2041,16 +2041,16 @@ void LIR_Assembler::emit_opSubstitutabilityCheck(LIR_OpSubstitutabilityCheck* op
ciKlass* left_klass = op->left_klass();
ciKlass* right_klass = op->right_klass();

// (2) Value object check -- if either of the operands is not a value object,
// (2) Inline type check -- if either of the operands is not a inline type,
// they are not substitutable. We do this only if we are not sure that the
// operands are value objects
// operands are inline type
if ((left_klass == NULL || right_klass == NULL) ||// The klass is still unloaded, or came from a Phi node.
!left_klass->is_inlinetype() || !right_klass->is_inlinetype()) {
Register tmp1 = op->tmp1()->as_register();
__ movptr(tmp1, (intptr_t)markWord::always_locked_pattern);
__ movptr(tmp1, (intptr_t)markWord::inline_type_pattern);
__ andptr(tmp1, Address(left, oopDesc::mark_offset_in_bytes()));
__ andptr(tmp1, Address(right, oopDesc::mark_offset_in_bytes()));
__ cmpptr(tmp1, (intptr_t)markWord::always_locked_pattern);
__ cmpptr(tmp1, (intptr_t)markWord::inline_type_pattern);
__ jcc(Assembler::notEqual, L_oops_not_equal);
}

@@ -71,9 +71,10 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr
movptr(hdr, Address(obj, hdr_offset));
// and mark it as unlocked
orptr(hdr, markWord::unlocked_value);
if (EnableValhalla && !UseBiasedLocking) {
// Mask always_locked bit such that we go to the slow path if object is an inline type
andptr(hdr, ~((int) markWord::biased_lock_bit_in_place));
if (EnableValhalla) {
assert(!UseBiasedLocking, "Not compatible with biased-locking");
// Mask inline_type bit such that we go to the slow path if object is an inline type
andptr(hdr, ~((int) markWord::inline_type_bit_in_place));
}
// save unlocked object header into the displaced header location on the stack
movptr(Address(disp_hdr, 0), hdr);
@@ -164,8 +165,8 @@ void C1_MacroAssembler::try_allocate(Register obj, Register var_size_in_bytes, i
void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register len, Register t1, Register t2) {
assert_different_registers(obj, klass, len);
Register tmp_encode_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
if ((UseBiasedLocking || EnableValhalla) && !len->is_valid()) {
// Need to copy markWord::always_locked_pattern for values.
if (EnableValhalla) {
// Need to copy markWord::prototype header for klass
assert_different_registers(obj, klass, len, t1, t2);
movptr(t1, Address(klass, Klass::prototype_header_offset()));
movptr(Address(obj, oopDesc::mark_offset_in_bytes()), t1);
@@ -516,9 +516,10 @@ void C2_MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmp

// Attempt stack-locking ...
orptr (tmpReg, markWord::unlocked_value);
if (EnableValhalla && !UseBiasedLocking) {
// Mask always_locked bit such that we go to the slow path if object is an inline type
andptr(tmpReg, ~((int) markWord::biased_lock_bit_in_place));
if (EnableValhalla) {
assert(!UseBiasedLocking, "Not compatible with biased-locking");
// Mask inline_type bit such that we go to the slow path if object is an inline type
andptr(tmpReg, ~((int) markWord::inline_type_bit_in_place));
}
movptr(Address(boxReg, 0), tmpReg); // Anticipate successful CAS
lock();
@@ -1373,9 +1373,10 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg) {

// Load (object->mark() | 1) into swap_reg %rax
orptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
if (EnableValhalla && !UseBiasedLocking) {
// For slow path is_always_locked, using biased, which is never natural for !UseBiasLocking
andptr(swap_reg, ~((int) markWord::biased_lock_bit_in_place));
if (EnableValhalla) {
assert(!UseBiasedLocking, "Not compatible with biased-locking");
// Mask inline_type bit such that we go to the slow path if object is an inline type
andptr(swap_reg, ~((int) markWord::inline_type_bit_in_place));
}

// Save (object->mark() | 1) into BasicLock's displaced header
@@ -2671,6 +2671,12 @@ void MacroAssembler::null_check(Register reg, int offset) {
}
}

void MacroAssembler::test_markword_is_inline_type(Register markword, Label& is_inline_type) {
andptr(markword, markWord::inline_type_mask_in_place);
cmpptr(markword, markWord::inline_type_pattern);
jcc(Assembler::equal, is_inline_type);
}

void MacroAssembler::test_klass_is_inline_type(Register klass, Register temp_reg, Label& is_inline_type) {
movl(temp_reg, Address(klass, Klass::access_flags_offset()));
testl(temp_reg, JVM_ACC_INLINE);
@@ -2680,7 +2686,7 @@ void MacroAssembler::test_klass_is_inline_type(Register klass, Register temp_reg
void MacroAssembler::test_oop_is_not_inline_type(Register object, Register tmp, Label& not_inline_type) {
testptr(object, object);
jcc(Assembler::equal, not_inline_type);
const int is_inline_type_mask = markWord::always_locked_pattern;
const int is_inline_type_mask = markWord::inline_type_pattern;
movptr(tmp, Address(object, oopDesc::mark_offset_in_bytes()));
andptr(tmp, is_inline_type_mask);
cmpptr(tmp, is_inline_type_mask);
@@ -2725,40 +2731,70 @@ void MacroAssembler::test_field_is_inlined(Register flags, Register temp_reg, La
jcc(Assembler::notZero, is_inlined);
}

void MacroAssembler::test_oop_prototype_bit(Register oop, Register temp_reg, int32_t test_bit, bool jmp_set, Label& jmp_label) {
Label test_mark_word;
// load mark word
movptr(temp_reg, Address(oop, oopDesc::mark_offset_in_bytes()));
// check displaced
testl(temp_reg, markWord::unlocked_value);
jccb(Assembler::notZero, test_mark_word);
// slow path use klass prototype
push(rscratch1);
load_prototype_header(temp_reg, oop, rscratch1);
pop(rscratch1);

bind(test_mark_word);
testl(temp_reg, test_bit);
jcc((jmp_set) ? Assembler::notZero : Assembler::zero, jmp_label);
}

void MacroAssembler::test_flattened_array_oop(Register oop, Register temp_reg,
Label&is_flattened_array) {
Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
load_klass(temp_reg, oop, tmp_load_klass);
#ifdef _LP64
test_oop_prototype_bit(oop, temp_reg, markWord::flat_array_bit_in_place, true, is_flattened_array);
#else
load_klass(temp_reg, oop, noreg);
movl(temp_reg, Address(temp_reg, Klass::layout_helper_offset()));
test_flattened_array_layout(temp_reg, is_flattened_array);
#endif
}

void MacroAssembler::test_non_flattened_array_oop(Register oop, Register temp_reg,
Label&is_non_flattened_array) {
Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
load_klass(temp_reg, oop, tmp_load_klass);
#ifdef _LP64
test_oop_prototype_bit(oop, temp_reg, markWord::flat_array_bit_in_place, false, is_non_flattened_array);
#else
load_klass(temp_reg, oop, noreg);
movl(temp_reg, Address(temp_reg, Klass::layout_helper_offset()));
test_non_flattened_array_layout(temp_reg, is_non_flattened_array);
#endif
}

void MacroAssembler::test_null_free_array_oop(Register oop, Register temp_reg, Label&is_null_free_array) {
Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
load_klass(temp_reg, oop, tmp_load_klass);
#ifdef _LP64
test_oop_prototype_bit(oop, temp_reg, markWord::nullfree_array_bit_in_place, true, is_null_free_array);
#else
load_klass(temp_reg, oop, noreg);
movl(temp_reg, Address(temp_reg, Klass::layout_helper_offset()));
test_null_free_array_layout(temp_reg, is_null_free_array);
#endif
}

void MacroAssembler::test_non_null_free_array_oop(Register oop, Register temp_reg, Label&is_non_null_free_array) {
Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
load_klass(temp_reg, oop, tmp_load_klass);
#ifdef _LP64
test_oop_prototype_bit(oop, temp_reg, markWord::nullfree_array_bit_in_place, false, is_non_null_free_array);
#else
load_klass(temp_reg, oop, noreg);
movl(temp_reg, Address(temp_reg, Klass::layout_helper_offset()));
test_non_null_free_array_layout(temp_reg, is_non_null_free_array);
#endif
}

void MacroAssembler::test_flattened_array_layout(Register lh, Label& is_flattened_array) {
testl(lh, Klass::_lh_array_tag_vt_value_bit_inplace);
jcc(Assembler::notZero, is_flattened_array);
}

void MacroAssembler::test_non_flattened_array_layout(Register lh, Label& is_non_flattened_array) {
testl(lh, Klass::_lh_array_tag_vt_value_bit_inplace);
jcc(Assembler::zero, is_non_flattened_array);
@@ -5413,7 +5449,7 @@ int MacroAssembler::store_inline_type_fields_to_buf(ciInlineKlass* vk, bool from
cmpptr(r14, Address(r15_thread, in_bytes(JavaThread::tlab_end_offset())));
jcc(Assembler::above, slow_case);
movptr(Address(r15_thread, in_bytes(JavaThread::tlab_top_offset())), r14);
movptr(Address(r13, oopDesc::mark_offset_in_bytes()), (intptr_t)markWord::always_locked_prototype().value());
movptr(Address(r13, oopDesc::mark_offset_in_bytes()), (intptr_t)markWord::inline_type_prototype().value());

xorl(rax, rax); // use zero reg to clear memory (shorter code)
store_klass_gap(r13, rax); // zero klass gap for compressed oops
@@ -101,7 +101,10 @@ class MacroAssembler: public Assembler {
static bool needs_explicit_null_check(intptr_t offset);
static bool uses_implicit_null_check(void* address);

// valueKlass queries, kills temp_reg
// markWord tests, kills markWord reg
void test_markword_is_inline_type(Register markword, Label& is_inline_type);

// inlineKlass queries, kills temp_reg
void test_klass_is_inline_type(Register klass, Register temp_reg, Label& is_inline_type);
void test_klass_is_empty_inline_type(Register klass, Register temp_reg, Label& is_empty_inline_type);
void test_oop_is_not_inline_type(Register object, Register tmp, Label& not_inline_type);
@@ -116,7 +119,8 @@ class MacroAssembler: public Assembler {
void test_field_is_not_inline_type(Register flags, Register temp_reg, Label& not_inline);
void test_field_is_inlined(Register flags, Register temp_reg, Label& is_inlined);

// Check oops array storage properties, i.e. flattened and/or null-free
// Check oops for special arrays, i.e. flattened and/or null-free
void test_oop_prototype_bit(Register oop, Register temp_reg, int32_t test_bit, bool jmp_set, Label& jmp_label);
void test_flattened_array_oop(Register oop, Register temp_reg, Label&is_flattened_array);
void test_non_flattened_array_oop(Register oop, Register temp_reg, Label&is_non_flattened_array);
void test_null_free_array_oop(Register oop, Register temp_reg, Label&is_null_free_array);
@@ -2746,11 +2746,13 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,

// Load (object->mark() | 1) into swap_reg %rax
__ orptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
if (EnableValhalla && !UseBiasedLocking) {
// For slow path is_always_locked, using biased, which is never natural for !UseBiasLocking
__ andptr(swap_reg, ~((int) markWord::biased_lock_bit_in_place));
if (EnableValhalla) {
assert(!UseBiasedLocking, "Not compatible with biased-locking");
// Mask inline_type bit such that we go to the slow path if object is an inline type
__ andptr(swap_reg, ~((int) markWord::inline_type_bit_in_place));
}


// Save (object->mark() | 1) into BasicLock's displaced header
__ movptr(Address(lock_reg, mark_word_offset), swap_reg);

@@ -2484,7 +2484,7 @@ void TemplateTable::if_acmp(Condition cc) {

__ profile_acmp(rbx, rdx, rax, rcx);

const int is_inline_type_mask = markWord::always_locked_pattern;
const int is_inline_type_mask = markWord::inline_type_pattern;
if (EnableValhalla) {
__ cmpoop(rdx, rax);
__ jcc(Assembler::equal, (cc == equal) ? taken : not_taken);
@@ -4664,16 +4664,9 @@ void TemplateTable::monitorenter() {

__ resolve(IS_NOT_NULL, rax);

const int is_inline_type_mask = markWord::always_locked_pattern;
Label has_identity;
Label is_inline_type;
__ movptr(rbx, Address(rax, oopDesc::mark_offset_in_bytes()));
__ andptr(rbx, is_inline_type_mask);
__ cmpl(rbx, is_inline_type_mask);
__ jcc(Assembler::notEqual, has_identity);
__ call_VM(noreg, CAST_FROM_FN_PTR(address,
InterpreterRuntime::throw_illegal_monitor_state_exception));
__ should_not_reach_here();
__ bind(has_identity);
__ test_markword_is_inline_type(rbx, is_inline_type);

const Address monitor_block_top(
rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
@@ -4764,6 +4757,11 @@ void TemplateTable::monitorenter() {
// The bcp has already been incremented. Just need to dispatch to
// next instruction.
__ dispatch_next(vtos);

__ bind(is_inline_type);
__ call_VM(noreg, CAST_FROM_FN_PTR(address,
InterpreterRuntime::throw_illegal_monitor_state_exception));
__ should_not_reach_here();
}

void TemplateTable::monitorexit() {
@@ -4774,7 +4772,7 @@ void TemplateTable::monitorexit() {

__ resolve(IS_NOT_NULL, rax);

const int is_inline_type_mask = markWord::always_locked_pattern;
const int is_inline_type_mask = markWord::inline_type_pattern;
Label has_identity;
__ movptr(rbx, Address(rax, oopDesc::mark_offset_in_bytes()));
__ andptr(rbx, is_inline_type_mask);
@@ -81,8 +81,8 @@ template <class T> inline void MarkSweep::adjust_pointer(T* p) {

oop new_obj = oop(obj->mark().decode_pointer());

assert(new_obj != NULL || // is forwarding ptr?
obj->mark() == markWord::prototype() || // not gc marked?
assert(new_obj != NULL || // is forwarding ptr?
obj->mark() == markWord::prototype_for_klass(obj->klass()) || // not gc marked?
(UseBiasedLocking && obj->mark().has_bias_pattern()),
// not gc marked?
"should be forwarded");
@@ -192,7 +192,7 @@ oop HeapShared::archive_heap_object(oop obj, Thread* THREAD) {
// identity_hash for all shared objects, so they are less likely to be written
// into during run time, increasing the potential of memory sharing.
int hash_original = obj->identity_hash();
archived_oop->set_mark(markWord::prototype().copy_set_hash(hash_original));
archived_oop->set_mark(markWord::prototype_for_klass(archived_oop->klass()).copy_set_hash(hash_original));
assert(archived_oop->mark().is_unlocked(), "sanity");

DEBUG_ONLY(int hash_archived = archived_oop->identity_hash());
@@ -137,10 +137,13 @@ arrayOop oopFactory::new_flatArray(Klass* klass, int length, TRAPS) {
arrayOop oop;
if (array_klass->is_flatArray_klass()) {
oop = (arrayOop) FlatArrayKlass::cast(array_klass)->allocate(length, THREAD);
assert(oop == NULL || oop->is_flatArray(), "sanity");
assert(oop == NULL || oop->klass()->is_flatArray_klass(), "sanity");
} else {
oop = (arrayOop) ObjArrayKlass::cast(array_klass)->allocate(length, THREAD);
}
assert(oop == NULL || oop->klass()->is_null_free_array_klass(), "Bad array storage encoding");
assert(oop == NULL || oop->klass()->is_null_free_array_klass(), "sanity");
assert(oop == NULL || oop->is_nullfreeArray(), "sanity");
return oop;
}

@@ -60,12 +60,15 @@ FlatArrayKlass::FlatArrayKlass(Klass* element_klass, Symbol* name) : ArrayKlass(

set_element_klass(InlineKlass::cast(element_klass));
set_class_loader_data(element_klass->class_loader_data());
set_layout_helper(array_layout_helper(InlineKlass::cast(element_klass)));

set_layout_helper(array_layout_helper(InlineKlass::cast(element_klass)));
assert(is_array_klass(), "sanity");
assert(is_flatArray_klass(), "sanity");
assert(is_null_free_array_klass(), "sanity");

set_prototype_header(markWord::flat_array_prototype());
assert(prototype_header().is_flat_array(), "sanity");

CMH("tweak name symbol refcnt ?")
#ifndef PRODUCT
if (PrintFlatArrayLayout) {
print();
@@ -181,7 +184,7 @@ jint FlatArrayKlass::array_layout_helper(InlineKlass* vk) {
}

int FlatArrayKlass::oop_size(oop obj) const {
assert(obj->is_flatArray(),"must be an flat array");
assert(obj->klass()->is_flatArray_klass(),"must be an flat array");
flatArrayOop array = flatArrayOop(obj);
return array->object_size();
}
@@ -94,7 +94,7 @@ void FlatArrayKlass::oop_oop_iterate_elements(flatArrayOop a, OopClosureType* cl

template <typename T, typename OopClosureType>
void FlatArrayKlass::oop_oop_iterate(oop obj, OopClosureType* closure) {
assert(obj->is_flatArray(),"must be a flat array");
assert(obj->klass()->is_flatArray_klass(),"must be a flat array");
flatArrayOop a = flatArrayOop(obj);

if (Devirtualizer::do_metadata(closure)) {
@@ -60,8 +60,9 @@ InlineKlass::InlineKlass(const ClassFileParser& parser)
*((address*)adr_unpack_handler()) = NULL;
assert(pack_handler() == NULL, "pack handler not null");
*((int*)adr_default_value_offset()) = 0;
set_prototype_header(markWord::always_locked_prototype());
assert(is_inline_type_klass(), "invariant");
set_prototype_header(markWord::inline_type_prototype());
assert(is_inline_type_klass(), "sanity");
assert(prototype_header().is_inline_type(), "sanity");
}

oop InlineKlass::default_value() {
@@ -128,15 +129,15 @@ instanceOop InlineKlass::allocate_instance(TRAPS) {
int size = size_helper(); // Query before forming handle.

instanceOop oop = (instanceOop)Universe::heap()->obj_allocate(this, size, CHECK_NULL);
assert(oop->mark().is_always_locked(), "Unlocked inline type");
assert(oop->mark().is_inline_type(), "Expected inline type");
return oop;
}

instanceOop InlineKlass::allocate_instance_buffer(TRAPS) {
int size = size_helper(); // Query before forming handle.

instanceOop oop = (instanceOop)Universe::heap()->obj_buffer_allocate(this, size, CHECK_NULL);
assert(oop->mark().is_always_locked(), "Unlocked inline type");
assert(oop->mark().is_inline_type(), "Expected inline type");
return oop;
}

@@ -549,12 +550,12 @@ InlineKlass* InlineKlass::returned_inline_klass(const RegisterMap& map) {

void InlineKlass::verify_on(outputStream* st) {
InstanceKlass::verify_on(st);
guarantee(prototype_header().is_always_locked(), "Prototype header is not always locked");
guarantee(prototype_header().is_inline_type(), "Prototype header is not inline type");
}

void InlineKlass::oop_verify_on(oop obj, outputStream* st) {
InstanceKlass::oop_verify_on(obj, st);
guarantee(obj->mark().is_always_locked(), "Header is not always locked");
guarantee(obj->mark().is_inline_type(), "Header is not inline type");
}

void InlineKlass::metaspace_pointers_do(MetaspaceClosure* it) {

0 comments on commit 3a7c9fe

Please sign in to comment.