Skip to content
This repository has been archived by the owner on Aug 27, 2022. It is now read-only.

Commit

Permalink
Automatic merge of jdk:master into master
Browse files Browse the repository at this point in the history
  • Loading branch information
duke committed Jan 29, 2021
2 parents 88c1ff8 + 1519632 commit 1cc80db
Show file tree
Hide file tree
Showing 238 changed files with 10,914 additions and 11,933 deletions.
4 changes: 2 additions & 2 deletions src/hotspot/cpu/aarch64/c1_LIRGenerator_aarch64.cpp
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
Expand Down Expand Up @@ -1411,7 +1411,7 @@ void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result,
// membar it's possible for a simple Dekker test to fail if loads
// use LD;DMB but stores use STLR. This can happen if C2 compiles
// the stores in one method and C1 compiles the loads in another.
if (!is_c1_or_interpreter_only()) {
if (!CompilerConfig::is_c1_only_no_aot_or_jvmci()) {
__ membar();
}
__ volatile_load_mem_reg(address, result, info);
Expand Down
6 changes: 3 additions & 3 deletions src/hotspot/cpu/aarch64/c1_globals_aarch64.hpp
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
Expand Down Expand Up @@ -32,7 +32,7 @@
// Sets the default values for platform dependent flags used by the client compiler.
// (see c1_globals.hpp)

#ifndef TIERED
#ifndef COMPILER2
define_pd_global(bool, BackgroundCompilation, true );
define_pd_global(bool, InlineIntrinsics, true );
define_pd_global(bool, PreferInterpreterNativeStubs, false);
Expand All @@ -56,7 +56,7 @@ define_pd_global(uintx, MetaspaceSize, 12*M );
define_pd_global(bool, NeverActAsServerClassMachine, true );
define_pd_global(uint64_t,MaxRAM, 1ULL*G);
define_pd_global(bool, CICompileOSR, true );
#endif // !TIERED
#endif // !COMPILER2
define_pd_global(bool, UseTypeProfile, false);

define_pd_global(bool, OptimizeSinglePrecision, true );
Expand Down
4 changes: 2 additions & 2 deletions src/hotspot/cpu/aarch64/c2_globals_aarch64.hpp
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
Expand Down Expand Up @@ -39,7 +39,7 @@ define_pd_global(bool, PreferInterpreterNativeStubs, false);
define_pd_global(bool, ProfileTraps, true);
define_pd_global(bool, UseOnStackReplacement, true);
define_pd_global(bool, ProfileInterpreter, true);
define_pd_global(bool, TieredCompilation, trueInTiered);
define_pd_global(bool, TieredCompilation, COMPILER1_PRESENT(true) NOT_COMPILER1(false));
define_pd_global(intx, CompileThreshold, 10000);

define_pd_global(intx, OnStackReplacePercentage, 140);
Expand Down
Expand Up @@ -51,7 +51,7 @@ void LIR_OpShenandoahCompareAndSwap::emit_code(LIR_Assembler* masm) {

ShenandoahBarrierSet::assembler()->cmpxchg_oop(masm->masm(), addr, cmpval, newval, /*acquire*/ true, /*release*/ true, /*is_cae*/ false, result);

if (is_c1_or_interpreter_only()) {
if (CompilerConfig::is_c1_only_no_aot_or_jvmci()) {
// The membar here is necessary to prevent reordering between the
// release store in the CAS above and a subsequent volatile load.
// However for tiered compilation C1 inserts a full barrier before
Expand Down
4 changes: 2 additions & 2 deletions src/hotspot/cpu/aarch64/globals_aarch64.hpp
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2019, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
Expand Down Expand Up @@ -36,7 +36,7 @@ define_pd_global(bool, ImplicitNullChecks, true); // Generate code for im
define_pd_global(bool, TrapBasedNullChecks, false);
define_pd_global(bool, UncommonNullCast, true); // Uncommon-trap NULLs past to check cast

define_pd_global(uintx, CodeCacheSegmentSize, 64 TIERED_ONLY(+64)); // Tiered compilation has large code-entry alignment.
define_pd_global(uintx, CodeCacheSegmentSize, 64 COMPILER1_AND_COMPILER2_PRESENT(+64)); // Tiered compilation has large code-entry alignment.
define_pd_global(intx, CodeEntryAlignment, 64);
define_pd_global(intx, OptoLoopAlignment, 16);
define_pd_global(intx, InlineFrequencyCount, 100);
Expand Down
13 changes: 7 additions & 6 deletions src/hotspot/cpu/aarch64/methodHandles_aarch64.cpp
@@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
Expand All @@ -26,6 +26,7 @@
#include "precompiled.hpp"
#include "asm/macroAssembler.hpp"
#include "classfile/javaClasses.inline.hpp"
#include "classfile/vmClasses.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/interpreterRuntime.hpp"
#include "memory/allocation.inline.hpp"
Expand All @@ -47,7 +48,7 @@

void MethodHandles::load_klass_from_Class(MacroAssembler* _masm, Register klass_reg) {
if (VerifyMethodHandles)
verify_klass(_masm, klass_reg, SystemDictionary::WK_KLASS_ENUM_NAME(java_lang_Class),
verify_klass(_masm, klass_reg, VM_CLASS_ID(java_lang_Class),
"MH argument is a Class");
__ ldr(klass_reg, Address(klass_reg, java_lang_Class::klass_offset()));
}
Expand All @@ -64,10 +65,10 @@ static int check_nonzero(const char* xname, int x) {

#ifdef ASSERT
void MethodHandles::verify_klass(MacroAssembler* _masm,
Register obj, SystemDictionary::WKID klass_id,
Register obj, VMClassID klass_id,
const char* error_message) {
InstanceKlass** klass_addr = SystemDictionary::well_known_klass_addr(klass_id);
Klass* klass = SystemDictionary::well_known_klass(klass_id);
InstanceKlass** klass_addr = vmClasses::klass_addr_at(klass_id);
Klass* klass = vmClasses::klass_at(klass_id);
Register temp = rscratch2;
Register temp2 = rscratch1; // used by MacroAssembler::cmpptr
Label L_ok, L_bad;
Expand Down Expand Up @@ -289,7 +290,7 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
// The method is a member invoker used by direct method handles.
if (VerifyMethodHandles) {
// make sure the trailing argument really is a MemberName (caller responsibility)
verify_klass(_masm, member_reg, SystemDictionary::WK_KLASS_ENUM_NAME(java_lang_invoke_MemberName),
verify_klass(_masm, member_reg, VM_CLASS_ID(java_lang_invoke_MemberName),
"MemberName required for invokeVirtual etc.");
}

Expand Down
6 changes: 3 additions & 3 deletions src/hotspot/cpu/aarch64/methodHandles_aarch64.hpp
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2010, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2010, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
Expand Down Expand Up @@ -36,11 +36,11 @@ enum /* platform_dependent_constants */ {
static void load_klass_from_Class(MacroAssembler* _masm, Register klass_reg);

static void verify_klass(MacroAssembler* _masm,
Register obj, SystemDictionary::WKID klass_id,
Register obj, VMClassID klass_id,
const char* error_message = "wrong klass") NOT_DEBUG_RETURN;

static void verify_method_handle(MacroAssembler* _masm, Register mh_reg) {
verify_klass(_masm, mh_reg, SystemDictionary::WK_KLASS_ENUM_NAME(java_lang_invoke_MethodHandle),
verify_klass(_masm, mh_reg, VM_CLASS_ID(java_lang_invoke_MethodHandle),
"reference is a MH");
}

Expand Down
115 changes: 24 additions & 91 deletions src/hotspot/cpu/aarch64/templateInterpreterGenerator_aarch64.cpp
Expand Up @@ -589,82 +589,31 @@ address TemplateInterpreterGenerator::generate_safept_entry_for(
//
// rmethod: method
//
void TemplateInterpreterGenerator::generate_counter_incr(
Label* overflow,
Label* profile_method,
Label* profile_method_continue) {
void TemplateInterpreterGenerator::generate_counter_incr(Label* overflow) {
Label done;
// Note: In tiered we increment either counters in Method* or in MDO depending if we're profiling or not.
if (TieredCompilation) {
int increment = InvocationCounter::count_increment;
Label no_mdo;
if (ProfileInterpreter) {
// Are we profiling?
__ ldr(r0, Address(rmethod, Method::method_data_offset()));
__ cbz(r0, no_mdo);
// Increment counter in the MDO
const Address mdo_invocation_counter(r0, in_bytes(MethodData::invocation_counter_offset()) +
in_bytes(InvocationCounter::counter_offset()));
const Address mask(r0, in_bytes(MethodData::invoke_mask_offset()));
__ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rscratch1, rscratch2, false, Assembler::EQ, overflow);
__ b(done);
}
__ bind(no_mdo);
// Increment counter in MethodCounters
const Address invocation_counter(rscratch2,
MethodCounters::invocation_counter_offset() +
InvocationCounter::counter_offset());
__ get_method_counters(rmethod, rscratch2, done);
const Address mask(rscratch2, in_bytes(MethodCounters::invoke_mask_offset()));
__ increment_mask_and_jump(invocation_counter, increment, mask, rscratch1, r1, false, Assembler::EQ, overflow);
__ bind(done);
} else { // not TieredCompilation
const Address backedge_counter(rscratch2,
MethodCounters::backedge_counter_offset() +
InvocationCounter::counter_offset());
const Address invocation_counter(rscratch2,
MethodCounters::invocation_counter_offset() +
InvocationCounter::counter_offset());

__ get_method_counters(rmethod, rscratch2, done);

if (ProfileInterpreter) { // %%% Merge this into MethodData*
__ ldrw(r1, Address(rscratch2, MethodCounters::interpreter_invocation_counter_offset()));
__ addw(r1, r1, 1);
__ strw(r1, Address(rscratch2, MethodCounters::interpreter_invocation_counter_offset()));
}
// Update standard invocation counters
__ ldrw(r1, invocation_counter);
__ ldrw(r0, backedge_counter);

__ addw(r1, r1, InvocationCounter::count_increment);
__ andw(r0, r0, InvocationCounter::count_mask_value);

__ strw(r1, invocation_counter);
__ addw(r0, r0, r1); // add both counters

// profile_method is non-null only for interpreted method so
// profile_method != NULL == !native_call

if (ProfileInterpreter && profile_method != NULL) {
// Test to see if we should create a method data oop
__ ldr(rscratch2, Address(rmethod, Method::method_counters_offset()));
__ ldrw(rscratch2, Address(rscratch2, in_bytes(MethodCounters::interpreter_profile_limit_offset())));
__ cmpw(r0, rscratch2);
__ br(Assembler::LT, *profile_method_continue);

// if no method data exists, go to profile_method
__ test_method_data_pointer(rscratch2, *profile_method);
}

{
__ ldr(rscratch2, Address(rmethod, Method::method_counters_offset()));
__ ldrw(rscratch2, Address(rscratch2, in_bytes(MethodCounters::interpreter_invocation_limit_offset())));
__ cmpw(r0, rscratch2);
__ br(Assembler::HS, *overflow);
}
__ bind(done);
int increment = InvocationCounter::count_increment;
Label no_mdo;
if (ProfileInterpreter) {
// Are we profiling?
__ ldr(r0, Address(rmethod, Method::method_data_offset()));
__ cbz(r0, no_mdo);
// Increment counter in the MDO
const Address mdo_invocation_counter(r0, in_bytes(MethodData::invocation_counter_offset()) +
in_bytes(InvocationCounter::counter_offset()));
const Address mask(r0, in_bytes(MethodData::invoke_mask_offset()));
__ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rscratch1, rscratch2, false, Assembler::EQ, overflow);
__ b(done);
}
__ bind(no_mdo);
// Increment counter in MethodCounters
const Address invocation_counter(rscratch2,
MethodCounters::invocation_counter_offset() +
InvocationCounter::counter_offset());
__ get_method_counters(rmethod, rscratch2, done);
const Address mask(rscratch2, in_bytes(MethodCounters::invoke_mask_offset()));
__ increment_mask_and_jump(invocation_counter, increment, mask, rscratch1, r1, false, Assembler::EQ, overflow);
__ bind(done);
}

void TemplateInterpreterGenerator::generate_counter_overflow(Label& do_continue) {
Expand Down Expand Up @@ -1205,7 +1154,7 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
// increment invocation count & check for overflow
Label invocation_counter_overflow;
if (inc_counter) {
generate_counter_incr(&invocation_counter_overflow, NULL, NULL);
generate_counter_incr(&invocation_counter_overflow);
}

Label continue_after_compile;
Expand Down Expand Up @@ -1649,15 +1598,8 @@ address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {

// increment invocation count & check for overflow
Label invocation_counter_overflow;
Label profile_method;
Label profile_method_continue;
if (inc_counter) {
generate_counter_incr(&invocation_counter_overflow,
&profile_method,
&profile_method_continue);
if (ProfileInterpreter) {
__ bind(profile_method_continue);
}
generate_counter_incr(&invocation_counter_overflow);
}

Label continue_after_compile;
Expand Down Expand Up @@ -1709,15 +1651,6 @@ address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {

// invocation counter overflow
if (inc_counter) {
if (ProfileInterpreter) {
// We have decided to profile this method in the interpreter
__ bind(profile_method);
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
__ set_method_data_pointer_for_bcp();
// don't think we need this
__ get_method(r1);
__ b(profile_method_continue);
}
// Handle overflow of counter and compile method
__ bind(invocation_counter_overflow);
generate_counter_overflow(continue_after_compile);
Expand Down

0 comments on commit 1cc80db

Please sign in to comment.