Skip to content
Permalink
Browse files
8254072: AArch64: Get rid of --disable-warnings-as-errors on Windows+…
…ARM64 build

Co-authored-by: Bernhard Urban-Forster <burban@openjdk.org>
Reviewed-by: burban, aph
Backport-of: d2812f7
  • Loading branch information
2 people authored and Vladimir Kempik committed Feb 16, 2022
1 parent 0aa5f75 commit b5f8c318f49859b7faa0d675db6a17b5da797a54
Show file tree
Hide file tree
Showing 11 changed files with 31 additions and 28 deletions.
@@ -55,6 +55,9 @@ endif
# Disabling undef, switch, format-nonliteral and tautological-undefined-compare
# warnings for clang because of test source.

# Disable MSVC warning C4146 "unary minus operator applied to unsigned type,
# result still unsigned". This operation is well-defined.

# Solaris: Disable inlining (+d) to workaround Assertion: (../lnk/vardescr.h, line 109)
$(eval $(call SetupNativeCompilation, BUILD_GTEST_LIBJVM, \
NAME := jvm, \
@@ -78,6 +81,7 @@ $(eval $(call SetupNativeCompilation, BUILD_GTEST_LIBJVM, \
DISABLED_WARNINGS_clang := undef switch format-nonliteral \
tautological-undefined-compare $(BUILD_LIBJVM_DISABLED_WARNINGS_clang), \
DISABLED_WARNINGS_solstudio := identexpected, \
DISABLED_WARNINGS_microsoft := 4146, \
LDFLAGS := $(JVM_LDFLAGS), \
LDFLAGS_solaris := -library=stlport4 $(call SET_SHARED_LIBRARY_ORIGIN), \
LIBS := $(JVM_LIBS), \
@@ -148,6 +148,8 @@ JVM_STRIPFLAGS ?= $(STRIPFLAGS)
################################################################################
# Now set up the actual compilation of the main hotspot native library

# Disable MSVC warning C4146 "unary minus operator applied to unsigned type,
# result still unsigned". This operation is well-defined.
$(eval $(call SetupNativeCompilation, BUILD_LIBJVM, \
NAME := jvm, \
TOOLCHAIN := TOOLCHAIN_LINK_CXX, \
@@ -164,6 +166,7 @@ $(eval $(call SetupNativeCompilation, BUILD_LIBJVM, \
DISABLED_WARNINGS_solstudio := $(DISABLED_WARNINGS_solstudio), \
DISABLED_WARNINGS_xlc := 1540-0216 1540-0198 1540-1090 1540-1639 \
1540-1088 1500-010, \
DISABLED_WARNINGS_microsoft := 4146, \
ASFLAGS := $(JVM_ASFLAGS), \
LDFLAGS := $(JVM_LDFLAGS), \
LIBS := $(JVM_LIBS), \
@@ -2055,11 +2055,6 @@ int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf)

const bool Matcher::match_rule_supported(int opcode) {

switch (opcode) {
default:
break;
}

if (!has_match_rule(opcode)) {
return false;
}
@@ -214,7 +214,7 @@ class Instruction_aarch64 {

static void patch(address a, int msb, int lsb, uint64_t val) {
int nbits = msb - lsb + 1;
guarantee(val < (1U << nbits), "Field too big for insn");
guarantee(val < (1ULL << nbits), "Field too big for insn");
assert_cond(msb >= lsb);
unsigned mask = checked_cast<unsigned>(right_n_bits(nbits));
val <<= lsb;
@@ -436,8 +436,8 @@ class Address {
}

Register base() const {
guarantee((_mode == base_plus_offset | _mode == base_plus_offset_reg
| _mode == post | _mode == post_reg),
guarantee((_mode == base_plus_offset || _mode == base_plus_offset_reg
|| _mode == post || _mode == post_reg),
"wrong mode");
return _base;
}
@@ -676,11 +676,12 @@ intptr_t* frame::real_fp() const {

#undef DESCRIBE_FP_OFFSET

#define DESCRIBE_FP_OFFSET(name) \
{ \
uintptr_t *p = (uintptr_t *)fp; \
printf("0x%016lx 0x%016lx %s\n", (uintptr_t)(p + frame::name##_offset), \
p[frame::name##_offset], #name); \
#define DESCRIBE_FP_OFFSET(name) \
{ \
uintptr_t *p = (uintptr_t *)fp; \
printf(INTPTR_FORMAT " " INTPTR_FORMAT " %s\n", \
(uintptr_t)(p + frame::name##_offset), \
p[frame::name##_offset], #name); \
}

static THREAD_LOCAL_DECL uintptr_t nextfp;
@@ -1832,7 +1832,7 @@ bool MacroAssembler::try_merge_ldst(Register rt, const Address &adr, size_t size
return true;
} else {
assert(size_in_bytes == 8 || size_in_bytes == 4, "only 8 bytes or 4 bytes load/store is supported.");
const unsigned mask = size_in_bytes - 1;
const uint64_t mask = size_in_bytes - 1;
if (adr.getMode() == Address::base_plus_offset &&
(adr.offset() & mask) == 0) { // only supports base_plus_offset.
code()->set_last_insn(pc());
@@ -2774,7 +2774,7 @@ void MacroAssembler::merge_ldst(Register rt,
// Overwrite previous generated binary.
code_section()->set_end(prev);

const int sz = prev_ldst->size_in_bytes();
const size_t sz = prev_ldst->size_in_bytes();
assert(sz == 8 || sz == 4, "only supports 64/32bit merging.");
if (!is_store) {
BLOCK_COMMENT("merged ldr pair");
@@ -89,7 +89,7 @@ class MacroAssembler: public Assembler {
= (operand_valid_for_logical_immediate(false /*is32*/,
(uint64_t)Universe::narrow_klass_base())
&& ((uint64_t)Universe::narrow_klass_base()
> (1UL << log2_intptr((uintptr_t)Universe::narrow_klass_range()))));
> (1ULL << log2_intptr(Universe::narrow_klass_range()))));
}

// These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code.
@@ -681,7 +681,7 @@ class NativeLdSt : public NativeInstruction {
return 0;
}
}
size_t size_in_bytes() { return 1 << size(); }
size_t size_in_bytes() { return 1ULL << size(); }
bool is_not_pre_post_index() { return (is_ldst_ur() || is_ldst_unsigned_offset()); }
bool is_load() {
assert(Instruction_aarch64::extract(uint_at(0), 23, 22) == 0b01 ||
@@ -1498,7 +1498,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,

// Generate stack overflow check
if (UseStackBanging) {
__ bang_stack_with_offset(JavaThread::stack_shadow_zone_size());
__ bang_stack_with_offset(checked_cast<int>(JavaThread::stack_shadow_zone_size()));
} else {
Unimplemented();
}
@@ -2446,7 +2446,7 @@ void SharedRuntime::generate_deopt_blob() {
__ sub(sp, sp, r19);

// Push interpreter frames in a loop
__ mov(rscratch1, (address)0xDEADDEAD); // Make a recognizable pattern
__ mov(rscratch1, (uint64_t)0xDEADDEAD); // Make a recognizable pattern
__ mov(rscratch2, rscratch1);
Label loop;
__ bind(loop);
@@ -1299,14 +1299,14 @@ class StubGenerator: public StubCodeGenerator {

// Scan over array at a for count oops, verifying each one.
// Preserves a and count, clobbers rscratch1 and rscratch2.
void verify_oop_array (size_t size, Register a, Register count, Register temp) {
void verify_oop_array (int size, Register a, Register count, Register temp) {
Label loop, end;
__ mov(rscratch1, a);
__ mov(rscratch2, zr);
__ bind(loop);
__ cmp(rscratch2, count);
__ br(Assembler::HS, end);
if (size == (size_t)wordSize) {
if (size == wordSize) {
__ ldr(temp, Address(a, rscratch2, Address::lsl(exact_log2(size))));
__ verify_oop(temp);
} else {
@@ -1338,7 +1338,7 @@ class StubGenerator: public StubCodeGenerator {
// used by generate_conjoint_int_oop_copy().
//
address generate_disjoint_copy(size_t size, bool aligned, bool is_oop, address *entry,
const char *name, bool dest_uninitialized = false) {
const char *name, bool dest_uninitialized = false) {
Register s = c_rarg0, d = c_rarg1, count = c_rarg2;
RegSet saved_reg = RegSet::of(s, d, count);
__ align(CodeEntryAlignment);
@@ -1367,12 +1367,12 @@ class StubGenerator: public StubCodeGenerator {
// save regs before copy_memory
__ push(RegSet::of(d, count), sp);
}
copy_memory(aligned, s, d, count, rscratch1, size);
copy_memory(aligned, s, d, count, rscratch1, checked_cast<int>(size));

if (is_oop) {
__ pop(RegSet::of(d, count), sp);
if (VerifyOops)
verify_oop_array(size, d, count, r16);
verify_oop_array(checked_cast<int>(size), d, count, r16);
}

bs->arraycopy_epilogue(_masm, decorators, is_oop, d, count, rscratch1, RegSet());
@@ -1398,7 +1398,7 @@ class StubGenerator: public StubCodeGenerator {
// the hardware handle it. The two dwords within qwords that span
// cache line boundaries will still be loaded and stored atomically.
//
address generate_conjoint_copy(size_t size, bool aligned, bool is_oop, address nooverlap_target,
address generate_conjoint_copy(int size, bool aligned, bool is_oop, address nooverlap_target,
address *entry, const char *name,
bool dest_uninitialized = false) {
Register s = c_rarg0, d = c_rarg1, count = c_rarg2;
@@ -1644,7 +1644,7 @@ class StubGenerator: public StubCodeGenerator {
address generate_disjoint_oop_copy(bool aligned, address *entry,
const char *name, bool dest_uninitialized) {
const bool is_oop = true;
const size_t size = UseCompressedOops ? sizeof (jint) : sizeof (jlong);
const int size = UseCompressedOops ? sizeof (jint) : sizeof (jlong);
return generate_disjoint_copy(size, aligned, is_oop, entry, name, dest_uninitialized);
}

@@ -1662,7 +1662,7 @@ class StubGenerator: public StubCodeGenerator {
address nooverlap_target, address *entry,
const char *name, bool dest_uninitialized) {
const bool is_oop = true;
const size_t size = UseCompressedOops ? sizeof (jint) : sizeof (jlong);
const int size = UseCompressedOops ? sizeof (jint) : sizeof (jlong);
return generate_conjoint_copy(size, aligned, is_oop, nooverlap_target, entry,
name, dest_uninitialized);
}
@@ -1125,7 +1125,7 @@ void TemplateInterpreterGenerator::bang_stack_shadow_pages(bool native_call) {
// an interpreter frame with greater than a page of locals, so each page
// needs to be checked. Only true for non-native.
if (UseStackBanging) {
const int n_shadow_pages = JavaThread::stack_shadow_zone_size() / os::vm_page_size();
const int n_shadow_pages = checked_cast<int>(JavaThread::stack_shadow_zone_size() / os::vm_page_size());
const int start_page = native_call ? n_shadow_pages : 1;
const int page_size = os::vm_page_size();
for (int pages = start_page; pages <= n_shadow_pages ; pages++) {

1 comment on commit b5f8c31

@openjdk-notifier
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Please sign in to comment.