Skip to content
Permalink
Browse files

8234794: AArch64: runtime/memory/ReadFromNoaccessArea.java crashes

Try to map CDS shared space at 4G aligned address on AArch64

Reviewed-by: adinn, aph, jiangli, iklam
  • Loading branch information
nick-arm committed Dec 19, 2019
1 parent 03c3f17 commit aea0a575123037d39331d4d2fe6de4c228274a53
@@ -59,4 +59,6 @@ const bool CCallingConventionRequiresIntsAsLongs = false;

#define THREAD_LOCAL_POLL

#define PREFERRED_METASPACE_ALIGNMENT

#endif // CPU_AARCH64_GLOBALDEFINITIONS_AARCH64_HPP
@@ -3904,96 +3904,116 @@ void MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) {
}
}

void MacroAssembler::encode_klass_not_null(Register dst, Register src) {
MacroAssembler::KlassDecodeMode MacroAssembler::_klass_decode_mode(KlassDecodeNone);

MacroAssembler::KlassDecodeMode MacroAssembler::klass_decode_mode() {
assert(UseCompressedClassPointers, "not using compressed class pointers");
assert(Metaspace::initialized(), "metaspace not initialized yet");

if (_klass_decode_mode != KlassDecodeNone) {
return _klass_decode_mode;
}

assert(LogKlassAlignmentInBytes == CompressedKlassPointers::shift()
|| 0 == CompressedKlassPointers::shift(), "decode alg wrong");

if (CompressedKlassPointers::base() == NULL) {
return (_klass_decode_mode = KlassDecodeZero);
}

if (operand_valid_for_logical_immediate(
/*is32*/false, (uint64_t)CompressedKlassPointers::base())) {
const uint64_t range_mask =
(1UL << log2_intptr(CompressedKlassPointers::range())) - 1;
if (((uint64_t)CompressedKlassPointers::base() & range_mask) == 0) {
return (_klass_decode_mode = KlassDecodeXor);
}
}

const uint64_t shifted_base =
(uint64_t)CompressedKlassPointers::base() >> CompressedKlassPointers::shift();
guarantee((shifted_base & 0xffff0000ffffffff) == 0,
"compressed class base bad alignment");

return (_klass_decode_mode = KlassDecodeMovk);
}

void MacroAssembler::encode_klass_not_null(Register dst, Register src) {
switch (klass_decode_mode()) {
case KlassDecodeZero:
if (CompressedKlassPointers::shift() != 0) {
assert (LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong");
lsr(dst, src, LogKlassAlignmentInBytes);
} else {
if (dst != src) mov(dst, src);
}
return;
}
break;

if (use_XOR_for_compressed_class_base) {
case KlassDecodeXor:
if (CompressedKlassPointers::shift() != 0) {
eor(dst, src, (uint64_t)CompressedKlassPointers::base());
lsr(dst, dst, LogKlassAlignmentInBytes);
} else {
eor(dst, src, (uint64_t)CompressedKlassPointers::base());
}
return;
}

if (((uint64_t)CompressedKlassPointers::base() & 0xffffffff) == 0
&& CompressedKlassPointers::shift() == 0) {
movw(dst, src);
return;
}
break;

#ifdef ASSERT
verify_heapbase("MacroAssembler::encode_klass_not_null2: heap base corrupted?");
#endif
case KlassDecodeMovk:
if (CompressedKlassPointers::shift() != 0) {
ubfx(dst, src, LogKlassAlignmentInBytes, 32);
} else {
movw(dst, src);
}
break;

Register rbase = dst;
if (dst == src) rbase = rheapbase;
mov(rbase, (uint64_t)CompressedKlassPointers::base());
sub(dst, src, rbase);
if (CompressedKlassPointers::shift() != 0) {
assert (LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong");
lsr(dst, dst, LogKlassAlignmentInBytes);
case KlassDecodeNone:
ShouldNotReachHere();
break;
}
if (dst == src) reinit_heapbase();
}

void MacroAssembler::encode_klass_not_null(Register r) {
encode_klass_not_null(r, r);
}

void MacroAssembler::decode_klass_not_null(Register dst, Register src) {
Register rbase = dst;
assert (UseCompressedClassPointers, "should only be used for compressed headers");

if (CompressedKlassPointers::base() == NULL) {
switch (klass_decode_mode()) {
case KlassDecodeZero:
if (CompressedKlassPointers::shift() != 0) {
assert(LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong");
lsl(dst, src, LogKlassAlignmentInBytes);
} else {
if (dst != src) mov(dst, src);
}
return;
}
break;

if (use_XOR_for_compressed_class_base) {
case KlassDecodeXor:
if (CompressedKlassPointers::shift() != 0) {
lsl(dst, src, LogKlassAlignmentInBytes);
eor(dst, dst, (uint64_t)CompressedKlassPointers::base());
} else {
eor(dst, src, (uint64_t)CompressedKlassPointers::base());
}
return;
}
break;

if (((uint64_t)CompressedKlassPointers::base() & 0xffffffff) == 0
&& CompressedKlassPointers::shift() == 0) {
if (dst != src)
movw(dst, src);
movk(dst, (uint64_t)CompressedKlassPointers::base() >> 32, 32);
return;
case KlassDecodeMovk: {
const uint64_t shifted_base =
(uint64_t)CompressedKlassPointers::base() >> CompressedKlassPointers::shift();

if (dst != src) movw(dst, src);
movk(dst, shifted_base >> 32, 32);

if (CompressedKlassPointers::shift() != 0) {
lsl(dst, dst, LogKlassAlignmentInBytes);
}

break;
}

// Cannot assert, unverified entry point counts instructions (see .ad file)
// vtableStubs also counts instructions in pd_code_size_limit.
// Also do not verify_oop as this is called by verify_oop.
if (dst == src) rbase = rheapbase;
mov(rbase, (uint64_t)CompressedKlassPointers::base());
if (CompressedKlassPointers::shift() != 0) {
assert(LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong");
add(dst, rbase, src, Assembler::LSL, LogKlassAlignmentInBytes);
} else {
add(dst, rbase, src);
case KlassDecodeNone:
ShouldNotReachHere();
break;
}
if (dst == src) reinit_heapbase();
}

void MacroAssembler::decode_klass_not_null(Register r) {
@@ -80,17 +80,20 @@ class MacroAssembler: public Assembler {

void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions = true);

// True if an XOR can be used to expand narrow klass references.
bool use_XOR_for_compressed_class_base;
enum KlassDecodeMode {
KlassDecodeNone,
KlassDecodeZero,
KlassDecodeXor,
KlassDecodeMovk
};

KlassDecodeMode klass_decode_mode();

private:
static KlassDecodeMode _klass_decode_mode;

public:
MacroAssembler(CodeBuffer* code) : Assembler(code) {
use_XOR_for_compressed_class_base
= operand_valid_for_logical_immediate
(/*is32*/false, (uint64_t)CompressedKlassPointers::base())
&& ((uint64_t)CompressedKlassPointers::base()
> (1UL << log2_intptr(CompressedKlassPointers::range())));
}
MacroAssembler(CodeBuffer* code) : Assembler(code) {}

// These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code.
// The implementation is only non-empty for the InterpreterMacroAssembler,
@@ -58,6 +58,10 @@ const bool CCallingConventionRequiresIntsAsLongs = true;
#define INCLUDE_RTM_OPT 0
#endif

#if defined(AIX)
#define PREFERRED_METASPACE_ALIGNMENT
#endif

#define SUPPORT_RESERVED_STACK_AREA

#define THREAD_LOCAL_POLL
@@ -1011,6 +1011,79 @@ void Metaspace::set_narrow_klass_base_and_shift(ReservedSpace metaspace_rs, addr
AOTLoader::set_narrow_klass_shift();
}

#ifdef PREFERRED_METASPACE_ALIGNMENT
ReservedSpace Metaspace::reserve_preferred_space(size_t size, size_t alignment,
bool large_pages, char *requested_addr,
bool use_requested_addr) {
// Our compressed klass pointers may fit nicely into the lower 32 bits.
if (requested_addr != NULL && (uint64_t)requested_addr + size < 4*G) {
ReservedSpace rs(size, alignment, large_pages, requested_addr);
if (rs.is_reserved() || use_requested_addr) {
return rs;
}
}

struct SearchParams { uintptr_t limit; size_t increment; };

// AArch64: Try to align metaspace so that we can decode a compressed
// klass with a single MOVK instruction. We can do this iff the
// compressed class base is a multiple of 4G.
// Aix: Search for a place where we can find memory. If we need to load
// the base, 4G alignment is helpful, too.

// Go faster above 32G as it is no longer possible to use a zero base.
// AArch64: Additionally, ensure the lower LogKlassAlignmentInBytes
// bits of the upper 32-bits of the address are zero so we can handle
// a shift when decoding.

static const SearchParams search_params[] = {
// Limit Increment
{ 32*G, AARCH64_ONLY(4*)G, },
{ 1024*G, (4 AARCH64_ONLY(<< LogKlassAlignmentInBytes))*G },
};

// Null requested_addr means allocate anywhere so ensure the search
// begins from a non-null address.
char *a = MAX2(requested_addr, (char *)search_params[0].increment);

for (const SearchParams *p = search_params;
p < search_params + ARRAY_SIZE(search_params);
++p) {
a = align_up(a, p->increment);
if (use_requested_addr && a != requested_addr)
return ReservedSpace();

for (; a < (char *)p->limit; a += p->increment) {
ReservedSpace rs(size, alignment, large_pages, a);
if (rs.is_reserved() || use_requested_addr) {
return rs;
}
}
}

return ReservedSpace();
}
#endif // PREFERRED_METASPACE_ALIGNMENT

// Try to reserve a region for the metaspace at the requested address. Some
// platforms have particular alignment requirements to allow efficient decode of
// compressed class pointers in which case requested_addr is treated as hint for
// where to start looking unless use_requested_addr is true.
ReservedSpace Metaspace::reserve_space(size_t size, size_t alignment,
char* requested_addr, bool use_requested_addr) {
bool large_pages = false; // Don't use large pages for the class space.
assert(is_aligned(requested_addr, alignment), "must be");
assert(requested_addr != NULL || !use_requested_addr,
"cannot set use_requested_addr with NULL address");

#ifdef PREFERRED_METASPACE_ALIGNMENT
return reserve_preferred_space(size, alignment, large_pages,
requested_addr, use_requested_addr);
#else
return ReservedSpace(size, alignment, large_pages, requested_addr);
#endif
}

// Try to allocate the metaspace at the requested addr.
void Metaspace::allocate_metaspace_compressed_klass_ptrs(ReservedSpace metaspace_rs, char* requested_addr, address cds_base) {
assert(!DumpSharedSpaces, "compress klass space is allocated by MetaspaceShared class.");
@@ -1022,53 +1095,16 @@ void Metaspace::allocate_metaspace_compressed_klass_ptrs(ReservedSpace metaspace
assert_is_aligned(cds_base, _reserve_alignment);
assert_is_aligned(compressed_class_space_size(), _reserve_alignment);

// Don't use large pages for the class space.
bool large_pages = false;

if (metaspace_rs.is_reserved()) {
// CDS should have already reserved the space.
assert(requested_addr == NULL, "not used");
assert(cds_base != NULL, "CDS should have already reserved the memory space");
} else {
assert(cds_base == NULL, "must be");
#if !(defined(AARCH64) || defined(AIX))
metaspace_rs = ReservedSpace(compressed_class_space_size(), _reserve_alignment,
large_pages, requested_addr);
#else // AARCH64
// Our compressed klass pointers may fit nicely into the lower 32
// bits.
if ((uint64_t)requested_addr + compressed_class_space_size() < 4*G) {
metaspace_rs = ReservedSpace(compressed_class_space_size(),
_reserve_alignment,
large_pages,
requested_addr);
}

if (! metaspace_rs.is_reserved()) {
// Aarch64: Try to align metaspace so that we can decode a compressed
// klass with a single MOVK instruction. We can do this iff the
// compressed class base is a multiple of 4G.
// Aix: Search for a place where we can find memory. If we need to load
// the base, 4G alignment is helpful, too.
size_t increment = AARCH64_ONLY(4*)G;
for (char *a = align_up(requested_addr, increment);
a < (char*)(1024*G);
a += increment) {
if (a == (char *)(32*G)) {
// Go faster from here on. Zero-based is no longer possible.
increment = 4*G;
}

metaspace_rs = ReservedSpace(compressed_class_space_size(),
_reserve_alignment,
large_pages,
a);
if (metaspace_rs.is_reserved())
break;
}
if (metaspace_rs.is_reserved()) {
// CDS should have already reserved the space.
assert(requested_addr == NULL, "not used");
assert(cds_base != NULL, "CDS should have already reserved the memory space");
} else {
assert(cds_base == NULL, "must be");
metaspace_rs = reserve_space(compressed_class_space_size(),
_reserve_alignment, requested_addr,
false /* use_requested_addr */);
}
#endif // AARCH64
}

if (!metaspace_rs.is_reserved()) {
assert(cds_base == NULL, "CDS should have already reserved the memory space");
@@ -1077,8 +1113,8 @@ void Metaspace::allocate_metaspace_compressed_klass_ptrs(ReservedSpace metaspace
// metaspace as if UseCompressedClassPointers is off because too much
// initialization has happened that depends on UseCompressedClassPointers.
// So, UseCompressedClassPointers cannot be turned off at this point.
metaspace_rs = ReservedSpace(compressed_class_space_size(),
_reserve_alignment, large_pages);
metaspace_rs = reserve_space(compressed_class_space_size(),
_reserve_alignment, NULL, false);
if (!metaspace_rs.is_reserved()) {
vm_exit_during_initialization(err_msg("Could not allocate metaspace: " SIZE_FORMAT " bytes",
compressed_class_space_size()));
@@ -183,6 +183,15 @@ class Metaspace : public AllStatic {
static void initialize_class_space(ReservedSpace rs);
#endif

static ReservedSpace reserve_space(size_t size, size_t alignment,
char* requested_addr, bool use_requested_addr);

#ifdef PREFERRED_METASPACE_ALIGNMENT
static ReservedSpace reserve_preferred_space(size_t size, size_t alignment,
bool large_pages, char *requested_addr,
bool use_requested_addr);
#endif

public:

static void ergo_initialize();

0 comments on commit aea0a57

Please sign in to comment.
You can’t perform that action at this time.