Skip to content
Permalink
Browse files
Store (narrow) klass in object header
Reviewed-by: shade
  • Loading branch information
rkennke committed May 26, 2021
1 parent c6e863a commit 161e7c871b10677744c77de8c3ea0e753097a1d8
@@ -157,16 +157,10 @@ void C1_MacroAssembler::try_allocate(Register obj, Register var_size_in_bytes, i


void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register len, Register t1, Register t2) {
assert_different_registers(obj, klass, len);
Register tmp_encode_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
if (UseBiasedLocking && !len->is_valid()) {
assert_different_registers(obj, klass, len, t1, t2);
movptr(t1, Address(klass, Klass::prototype_header_offset()));
movptr(Address(obj, oopDesc::mark_offset_in_bytes()), t1);
} else {
// This assumes that all prototype bits fit in an int32_t
movptr(Address(obj, oopDesc::mark_offset_in_bytes ()), (int32_t)(intptr_t)markWord::prototype().value());
}
assert_different_registers(obj, klass, len, t1, t2);
movptr(t1, Address(klass, Klass::prototype_header_offset()));
movptr(Address(obj, oopDesc::mark_offset_in_bytes()), t1);
#ifdef _LP64
if (UseCompressedClassPointers) { // Take care not to kill klass
movptr(t1, klass);
@@ -4043,15 +4043,9 @@ void TemplateTable::_new() {

// initialize object header only.
__ bind(initialize_header);
if (UseBiasedLocking) {
__ pop(rcx); // get saved klass back in the register.
__ movptr(rbx, Address(rcx, Klass::prototype_header_offset()));
__ movptr(Address(rax, oopDesc::mark_offset_in_bytes ()), rbx);
} else {
__ movptr(Address(rax, oopDesc::mark_offset_in_bytes ()),
(intptr_t)markWord::prototype().value()); // header
__ pop(rcx); // get saved klass back in the register.
}
__ pop(rcx); // get saved klass back in the register.
__ movptr(rbx, Address(rcx, Klass::prototype_header_offset()));
__ movptr(Address(rax, oopDesc::mark_offset_in_bytes ()), rbx);
#ifdef _LP64
__ xorl(rsi, rsi); // use zero reg to clear memory (shorter code)
__ store_klass_gap(rax, rsi); // zero klass gap for compressed oops
@@ -39,6 +39,7 @@
#include "memory/memRegion.hpp"
#include "memory/resourceArea.hpp"
#include "oops/instanceKlass.hpp"
#include "oops/klass.inline.hpp"
#include "oops/objArrayKlass.hpp"
#include "oops/oopHandle.inline.hpp"
#include "runtime/arguments.hpp"
@@ -731,6 +732,9 @@ void ArchiveBuilder::make_klasses_shareable() {
for (int i = 0; i < klasses()->length(); i++) {
Klass* k = klasses()->at(i);
k->remove_java_mirror();
Klass* requested_k = to_requested(k);
narrowKlass nk = CompressedKlassPointers::encode_not_null(requested_k, _requested_static_archive_bottom);
k->set_prototype_header(markWord::prototype() LP64_ONLY(.set_narrow_klass(nk)));
if (k->is_objArray_klass()) {
// InstanceKlass and TypeArrayKlass will in turn call remove_unshareable_info
// on their array classes.
@@ -776,6 +780,9 @@ void ArchiveBuilder::relocate_klass_ptr(oop o) {
Klass* k = get_relocated_klass(o->klass());
Klass* requested_k = to_requested(k);
narrowKlass nk = CompressedKlassPointers::encode_not_null(requested_k, _requested_static_archive_bottom);
#ifdef _LP64
o->set_mark(o->mark().set_narrow_klass(nk));
#endif
o->set_narrow_klass(nk);
}

@@ -275,7 +275,8 @@ oop HeapShared::archive_heap_object(oop obj) {
// identity_hash for all shared objects, so they are less likely to be written
// into during run time, increasing the potential of memory sharing.
int hash_original = obj->identity_hash();
archived_oop->set_mark(markWord::prototype().copy_set_hash(hash_original));
narrowKlass nklass = obj->mark().narrow_klass();
archived_oop->set_mark(markWord::prototype().copy_set_hash(hash_original) LP64_ONLY(.set_narrow_klass(nklass)));
assert(archived_oop->mark().is_unlocked(), "sanity");

DEBUG_ONLY(int hash_archived = archived_oop->identity_hash());
@@ -416,11 +417,7 @@ void HeapShared::copy_roots() {
memset(mem, 0, size * BytesPerWord);
{
// This is copied from MemAllocator::finish
if (UseBiasedLocking) {
oopDesc::set_mark(mem, k->prototype_header());
} else {
oopDesc::set_mark(mem, markWord::prototype());
}
oopDesc::set_mark(mem, markWord::prototype_for_klass(k));
oopDesc::release_set_klass(mem, k);
}
{
@@ -60,11 +60,11 @@ class G1ResetSkipCompactingClosure : public HeapRegionClosure {

size_t G1FullGCCompactTask::G1CompactRegionClosure::apply(oop obj) {
size_t size = obj->size();
HeapWord* destination = cast_from_oop<HeapWord*>(obj->forwardee());
if (destination == NULL) {
if (!obj->is_forwarded()) {
// Object not moving
return size;
}
HeapWord* destination = cast_from_oop<HeapWord*>(obj->forwardee());

// copy object and reinit its mark
HeapWord* obj_addr = cast_from_oop<HeapWord*>(obj);
@@ -105,7 +105,12 @@ void G1FullGCCompactionPoint::forward(oop object, size_t size) {
if (cast_from_oop<HeapWord*>(object) != _compaction_top) {
object->forward_to(cast_to_oop(_compaction_top));
} else {
if (object->forwardee() != NULL) {
// TODO: This seems to be checking if mark-word looks like a forwarding pointer, and fix it if
// it doesn't. That is because compaction code (G1FullGCCompactTask::G1CompactRegionClosure::apply(oop obj))
// used to do the same check. However, it is more reliable to first check the lower bits (is_forwarded())
// instead before accepting the forwardee. The code in G1FullCompactTask has been changed accordingly,
// which should make this block superfluous.
if ((cast_from_oop<uintptr_t>(object->forwardee()) & 0x00000000ffffffff) != 0) {
// Object should not move but mark-word is used so it looks like the
// object is forwarded. Need to clear the mark and it's no problem
// since it will be restored by preserved marks. There is an exception
@@ -122,7 +127,7 @@ void G1FullGCCompactionPoint::forward(oop object, size_t size) {
"should have correct prototype obj: " PTR_FORMAT " mark: " PTR_FORMAT " prototype: " PTR_FORMAT,
p2i(object), object->mark().value(), markWord::prototype_for_klass(object->klass()).value());
}
assert(object->forwardee() == NULL, "should be forwarded to NULL");
assert((cast_from_oop<uintptr_t>(object->forwardee()) & 0x00000000ffffffff) == 0, "should be forwarded to NULL");
}

// Update compaction values.
@@ -76,8 +76,7 @@ template <class T> inline void G1AdjustClosure::adjust_pointer(T* p) {
return;
}

oop forwardee = obj->forwardee();
if (forwardee == NULL) {
if (!obj->is_forwarded()) {
// Not forwarded, return current reference.
assert(obj->mark() == markWord::prototype_for_klass(obj->klass()) || // Correct mark
obj->mark_must_be_preserved() || // Will be restored by PreservedMarksSet
@@ -88,6 +87,7 @@ template <class T> inline void G1AdjustClosure::adjust_pointer(T* p) {
}

// Forwarded, just update.
oop forwardee = obj->forwardee();
assert(G1CollectedHeap::heap()->is_in_reserved(forwardee), "should be in object space");
RawAccess<IS_NOT_NULL>::oop_store(p, forwardee);
}
@@ -185,7 +185,7 @@ size_t G1FullGCPrepareTask::G1RePrepareClosure::apply(oop obj) {
// We only re-prepare objects forwarded within the current region, so
// skip objects that are already forwarded to another region.
oop forwarded_to = obj->forwardee();
if (forwarded_to != NULL && !_current->is_in(forwarded_to)) {
if (obj->is_forwarded() && !_current->is_in(forwarded_to)) {
return obj->size();
}

@@ -79,15 +79,10 @@ template <class T> inline void MarkSweep::adjust_pointer(T* p) {
oop obj = CompressedOops::decode_not_null(heap_oop);
assert(Universe::heap()->is_in(obj), "should be in heap");

oop new_obj = cast_to_oop(obj->mark().decode_pointer());

assert(new_obj != NULL || // is forwarding ptr?
obj->mark() == markWord::prototype() || // not gc marked?
(UseBiasedLocking && obj->mark().has_bias_pattern()),
// not gc marked?
"should be forwarded");

if (new_obj != NULL) {
markWord header = obj->mark();
if (header.is_marked()) {
oop new_obj = cast_to_oop(header.decode_pointer());
assert(new_obj != NULL, "must be forwarded");
assert(is_object_aligned(new_obj), "oop must be aligned");
RawAccess<IS_NOT_NULL>::oop_store(p, new_obj);
}
@@ -384,12 +384,7 @@ void MemAllocator::mem_clear(HeapWord* mem) const {

oop MemAllocator::finish(HeapWord* mem) const {
assert(mem != NULL, "NULL object pointer");
if (UseBiasedLocking) {
oopDesc::set_mark(mem, _klass->prototype_header());
} else {
// May be bootstrapping
oopDesc::set_mark(mem, markWord::prototype());
}
oopDesc::set_mark(mem, _klass->prototype_header());
// Need a release store to ensure array/class length, mark word, and
// object zeroing are visible before setting the klass non-NULL, for
// concurrent collectors.
@@ -376,7 +376,7 @@ HeapWord* CompactibleSpace::forward(oop q, size_t size,
// if the object isn't moving we can just set the mark to the default
// mark and handle it specially later on.
q->init_mark();
assert(q->forwardee() == NULL, "should be forwarded to NULL");
assert(!q->is_forwarded(), "should not be forwarded");
}

compact_top += size;
@@ -592,14 +592,14 @@ void ContiguousSpace::allocate_temporary_filler(int factor) {
// allocate uninitialized int array
typeArrayOop t = (typeArrayOop) cast_to_oop(allocate(size));
assert(t != NULL, "allocation should succeed");
t->set_mark(markWord::prototype());
t->set_mark(markWord::prototype_for_klass(Universe::intArrayKlassObj()));
t->set_klass(Universe::intArrayKlassObj());
t->set_length((int)length);
} else {
assert(size == CollectedHeap::min_fill_size(),
"size for smallest fake object doesn't match");
instanceOop obj = (instanceOop) cast_to_oop(allocate(size));
obj->set_mark(markWord::prototype());
obj->set_mark(markWord::prototype_for_klass(vmClasses::Object_klass()));
obj->set_klass_gap(0);
obj->set_klass(vmClasses::Object_klass());
}
@@ -202,7 +202,7 @@ void* Klass::operator new(size_t size, ClassLoaderData* loader_data, size_t word
// The constructor is also used from CppVtableCloner,
// which doesn't zero out the memory before calling the constructor.
Klass::Klass(KlassID id) : _id(id),
_prototype_header(markWord::prototype()),
_prototype_header(markWord::prototype() LP64_ONLY(.set_klass(this))),
_shared_class_path_index(-1) {
CDS_ONLY(_shared_class_flags = 0;)
CDS_JAVA_HEAP_ONLY(_archived_mirror_index = -1;)
@@ -23,6 +23,7 @@
*/

#include "precompiled.hpp"
#include "oops/compressedOops.inline.hpp"
#include "oops/markWord.hpp"
#include "runtime/thread.inline.hpp"
#include "runtime/objectMonitor.inline.hpp"
@@ -98,3 +99,24 @@ void markWord::print_on(outputStream* st, bool print_monitor_info) const {
st->print(" age=%d)", age());
}
}

#ifdef _LP64
narrowKlass markWord::narrow_klass() const {
return narrowKlass(value() >> klass_shift);
}

Klass* markWord::klass() const {
return CompressedKlassPointers::decode(narrow_klass());
}

markWord markWord::set_narrow_klass(const narrowKlass nklass) const {
return markWord((value() & ~klass_mask_in_place) | ((uintptr_t) nklass << klass_shift));
}

markWord markWord::set_klass(const Klass* klass) const {
assert(UseCompressedClassPointers, "expect compressed klass pointers");
// TODO: Don't cast to non-const, change CKP::encode() to accept const Klass* instead.
narrowKlass nklass = CompressedKlassPointers::encode(const_cast<Klass*>(klass));
return set_narrow_klass(nklass);
}
#endif
@@ -132,6 +132,9 @@ class markWord {
static const int max_hash_bits = BitsPerWord - age_bits - lock_bits - biased_lock_bits;
static const int hash_bits = max_hash_bits > 25 ? 25 : max_hash_bits;
static const int epoch_bits = 2;
#ifdef _LP64
static const int klass_bits = 32;
#endif

// The biased locking code currently requires that the age bits be
// contiguous to the lock bits.
@@ -140,6 +143,9 @@ class markWord {
static const int age_shift = lock_bits + biased_lock_bits;
static const int hash_shift = age_shift + age_bits;
static const int epoch_shift = hash_shift;
#ifdef _LP64
static const int klass_shift = hash_shift + hash_bits;
#endif

static const uintptr_t lock_mask = right_n_bits(lock_bits);
static const uintptr_t lock_mask_in_place = lock_mask << lock_shift;
@@ -154,6 +160,11 @@ class markWord {
static const uintptr_t hash_mask = right_n_bits(hash_bits);
static const uintptr_t hash_mask_in_place = hash_mask << hash_shift;

#ifdef _LP64
static const uintptr_t klass_mask = right_n_bits(klass_bits);
static const uintptr_t klass_mask_in_place = klass_mask << klass_shift;
#endif

// Alignment of JavaThread pointers encoded in object header required by biased locking
static const size_t biased_lock_alignment = 2 << (epoch_shift + epoch_bits);

@@ -336,6 +347,13 @@ class markWord {
return hash() == no_hash;
}

#ifdef _LP64
narrowKlass narrow_klass() const;
Klass* klass() const;
markWord set_klass(const Klass* klass) const;
markWord set_narrow_klass(const narrowKlass klass) const;
#endif

// Prototype mark for initialization
static markWord prototype() {
return markWord( no_hash_in_place | no_lock_in_place );
@@ -70,8 +70,7 @@ inline bool markWord::must_be_preserved_for_promotion_failure(KlassProxy klass)

inline markWord markWord::prototype_for_klass(const Klass* klass) {
markWord prototype_header = klass->prototype_header();
assert(prototype_header == prototype() || prototype_header.has_bias_pattern(), "corrupt prototype header");

assert(prototype_header == prototype() LP64_ONLY(.set_klass(klass)) || prototype_header.has_bias_pattern(), "corrupt prototype header;");
return prototype_header;
}

@@ -74,7 +74,14 @@ markWord oopDesc::cas_set_mark(markWord new_mark, markWord old_mark, atomic_memo
}

void oopDesc::init_mark() {
set_mark(markWord::prototype_for_klass(klass()));
markWord header = markWord::prototype();
#ifdef _LP64
assert(UseCompressedClassPointers, "expect compressed klass pointers");
narrowKlass nklass = _metadata._compressed_klass;
assert(nklass != 0, "expect klass");
header = header.set_narrow_klass(nklass);
#endif
set_mark(header);
}

Klass* oopDesc::klass() const {
@@ -1627,15 +1627,9 @@ void AllocateNode::compute_MemBar_redundancy(ciMethod* initializer)
}
}
Node *AllocateNode::make_ideal_mark(PhaseGVN *phase, Node* obj, Node* control, Node* mem) {
Node* mark_node = NULL;
// For now only enable fast locking for non-array types
if (UseBiasedLocking && Opcode() == Op_Allocate) {
Node* klass_node = in(AllocateNode::KlassNode);
Node* proto_adr = phase->transform(new AddPNode(klass_node, klass_node, phase->MakeConX(in_bytes(Klass::prototype_header_offset()))));
mark_node = LoadNode::make(*phase, control, mem, proto_adr, TypeRawPtr::BOTTOM, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
} else {
mark_node = phase->MakeConX(markWord::prototype().value());
}
Node* klass_node = in(AllocateNode::KlassNode);
Node* proto_adr = phase->transform(new AddPNode(klass_node, klass_node, phase->MakeConX(in_bytes(Klass::prototype_header_offset()))));
Node* mark_node = LoadNode::make(*phase, control, mem, proto_adr, TypeRawPtr::BOTTOM, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
return mark_node;
}

@@ -125,7 +125,7 @@ const size_t minimumSymbolTableSize = 1024;
"Use 32-bit object references in 64-bit VM. " \
"lp64_product means flag is always constant in 32 bit VM") \
\
product(bool, UseCompressedClassPointers, false, \
product(bool, UseCompressedClassPointers, true, \
"Use 32-bit class pointers in 64-bit VM. " \
"lp64_product means flag is always constant in 32 bit VM") \
\
@@ -148,9 +148,11 @@ void HeapObjectStatistics::visit_object(oop obj) {
increase_counter(_num_locked);
}
#ifdef ASSERT
#ifdef _LP64
if (!mark.has_displaced_mark_helper()) {
assert((mark.value() & 0xffffffff00000000) == 0, "upper 32 mark bits must be free");
assert(mark.narrow_klass() == CompressedKlassPointers::encode(obj->klass_or_null()), "upper 32 mark bits must be narrow klass: mark: " INTPTR_FORMAT ", compressed-klass: " INTPTR_FORMAT, (intptr_t)mark.narrow_klass(), (intptr_t)CompressedKlassPointers::encode(obj->klass_or_null()));
}
#endif
#endif
increase_counter(_lds, obj->size());
}

0 comments on commit 161e7c8

Please sign in to comment.