Skip to content

Commit

Permalink
8299162: Refactor shared trampoline emission logic
Browse files Browse the repository at this point in the history
Reviewed-by: fyang, adinn, luhenry
  • Loading branch information
zhengxiaolinX authored and luhenry committed Feb 6, 2023
1 parent 522fa13 commit 7730506
Show file tree
Hide file tree
Showing 9 changed files with 83 additions and 62 deletions.
44 changes: 21 additions & 23 deletions src/hotspot/cpu/aarch64/codeBuffer_aarch64.cpp
Expand Up @@ -42,46 +42,44 @@ void CodeBuffer::share_trampoline_for(address dest, int caller_offset) {
_finalize_stubs = true;
}

#define __ masm.

static bool emit_shared_trampolines(CodeBuffer* cb, CodeBuffer::SharedTrampolineRequests* requests) {
if (requests == nullptr) {
return true;
}

MacroAssembler masm(cb);

bool p_succeeded = true;
auto emit = [&](address dest, const CodeBuffer::Offsets &offsets) {
masm.set_code_section(cb->stubs());
if (!is_aligned(masm.offset(), wordSize)) {
if (cb->stubs()->maybe_expand_to_ensure_remaining(NativeInstruction::instruction_size) && cb->blob() == NULL) {
ciEnv::current()->record_failure("CodeCache is full");
p_succeeded = false;
return p_succeeded;
}
masm.align(wordSize);
}

assert(cb->stubs()->remaining() >= MacroAssembler::max_trampoline_stub_size(), "pre-allocated trampolines");
LinkedListIterator<int> it(offsets.head());
int offset = *it.next();
for (; !it.is_empty(); offset = *it.next()) {
masm.relocate(trampoline_stub_Relocation::spec(cb->insts()->start() + offset));
}
masm.set_code_section(cb->insts());
address stub = __ emit_trampoline_stub(offset, dest);
assert(stub, "pre-allocated trampolines");

address stub = masm.emit_trampoline_stub(offset, dest);
if (stub == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
p_succeeded = false;
address reloc_pc = cb->stubs()->end() - NativeCallTrampolineStub::instruction_size;
while (!it.is_empty()) {
offset = *it.next();
address caller_pc = cb->insts()->start() + offset;
cb->stubs()->relocate(reloc_pc, trampoline_stub_Relocation::spec(caller_pc));
}

return p_succeeded;
return true;
};

requests->iterate(emit);
assert(requests->number_of_entries() >= 1, "at least one");
const int total_requested_size = MacroAssembler::max_trampoline_stub_size() * requests->number_of_entries();
if (cb->stubs()->maybe_expand_to_ensure_remaining(total_requested_size) && cb->blob() == NULL) {
ciEnv::current()->record_failure("CodeCache is full");
return false;
}

return p_succeeded;
requests->iterate(emit);
return true;
}

#undef __

bool CodeBuffer::pd_finalize_stubs() {
return emit_shared_stubs_to_interp<MacroAssembler>(this, _shared_stub_to_interp_requests)
&& emit_shared_trampolines(this, _shared_trampoline_requests);
Expand Down
7 changes: 3 additions & 4 deletions src/hotspot/cpu/aarch64/compiledIC_aarch64.cpp
@@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2018, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
Expand Down Expand Up @@ -71,15 +71,14 @@ address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark)
#undef __

int CompiledStaticCall::to_interp_stub_size() {
// isb; movk; movz; movz; movk; movz; movz; br
return 8 * NativeInstruction::instruction_size;
return MacroAssembler::static_call_stub_size();
}

int CompiledStaticCall::to_trampoline_stub_size() {
// Somewhat pessimistically, we count 3 instructions here (although
// there are only two) because we sometimes emit an alignment nop.
// Trampoline stubs are always word aligned.
return 3 * NativeInstruction::instruction_size + wordSize;
return MacroAssembler::max_trampoline_stub_size();
}

// Relocation entries for call stub, compiled java to interpreter.
Expand Down
13 changes: 11 additions & 2 deletions src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp
Expand Up @@ -926,8 +926,7 @@ address MacroAssembler::trampoline_call(Address entry) {
address MacroAssembler::emit_trampoline_stub(int insts_call_instruction_offset,
address dest) {
// Max stub size: alignment nop, TrampolineStub.
address stub = start_a_stub(NativeInstruction::instruction_size
+ NativeCallTrampolineStub::instruction_size);
address stub = start_a_stub(max_trampoline_stub_size());
if (stub == NULL) {
return NULL; // CodeBuffer::expand failed
}
Expand Down Expand Up @@ -959,6 +958,11 @@ address MacroAssembler::emit_trampoline_stub(int insts_call_instruction_offset,
return stub_start_addr;
}

int MacroAssembler::max_trampoline_stub_size() {
// Max stub size: alignment nop, TrampolineStub.
return NativeInstruction::instruction_size + NativeCallTrampolineStub::instruction_size;
}

void MacroAssembler::emit_static_call_stub() {
// CompiledDirectStaticCall::set_to_interpreted knows the
// exact layout of this stub.
Expand All @@ -971,6 +975,11 @@ void MacroAssembler::emit_static_call_stub() {
br(rscratch1);
}

int MacroAssembler::static_call_stub_size() {
// isb; movk; movz; movz; movk; movz; movz; br
return 8 * NativeInstruction::instruction_size;
}

void MacroAssembler::c2bool(Register x) {
// implements x == 0 ? 0 : 1
// note: must only look at least-significant byte of x
Expand Down
2 changes: 2 additions & 0 deletions src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp
Expand Up @@ -638,7 +638,9 @@ class MacroAssembler: public Assembler {
return false;
}
address emit_trampoline_stub(int insts_call_instruction_offset, address target);
static int max_trampoline_stub_size();
void emit_static_call_stub();
static int static_call_stub_size();

// The following 4 methods return the offset of the appropriate move instruction

Expand Down
44 changes: 21 additions & 23 deletions src/hotspot/cpu/riscv/codeBuffer_riscv.cpp
Expand Up @@ -44,46 +44,44 @@ void CodeBuffer::share_trampoline_for(address dest, int caller_offset) {
_finalize_stubs = true;
}

#define __ masm.

static bool emit_shared_trampolines(CodeBuffer* cb, CodeBuffer::SharedTrampolineRequests* requests) {
if (requests == nullptr) {
return true;
}

MacroAssembler masm(cb);

bool p_succeeded = true;
auto emit = [&](address dest, const CodeBuffer::Offsets &offsets) {
masm.set_code_section(cb->stubs());
if (!is_aligned(masm.offset() + NativeCallTrampolineStub::data_offset, wordSize)) {
if (cb->stubs()->maybe_expand_to_ensure_remaining(NativeInstruction::instruction_size) && cb->blob() == NULL) {
ciEnv::current()->record_failure("CodeCache is full");
p_succeeded = false;
return p_succeeded;
}
masm.align(wordSize, NativeCallTrampolineStub::data_offset);
}

assert(cb->stubs()->remaining() >= MacroAssembler::max_trampoline_stub_size(), "pre-allocated trampolines");
LinkedListIterator<int> it(offsets.head());
int offset = *it.next();
for (; !it.is_empty(); offset = *it.next()) {
masm.relocate(trampoline_stub_Relocation::spec(cb->insts()->start() + offset));
}
masm.set_code_section(cb->insts());
address stub = __ emit_trampoline_stub(offset, dest);
assert(stub, "pre-allocated trampolines");

address stub = masm.emit_trampoline_stub(offset, dest);
if (stub == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
p_succeeded = false;
address reloc_pc = cb->stubs()->end() - NativeCallTrampolineStub::instruction_size;
while (!it.is_empty()) {
offset = *it.next();
address caller_pc = cb->insts()->start() + offset;
cb->stubs()->relocate(reloc_pc, trampoline_stub_Relocation::spec(caller_pc));
}

return p_succeeded;
return true;
};

requests->iterate(emit);
assert(requests->number_of_entries() >= 1, "at least one");
const int total_requested_size = MacroAssembler::max_trampoline_stub_size() * requests->number_of_entries();
if (cb->stubs()->maybe_expand_to_ensure_remaining(total_requested_size) && cb->blob() == NULL) {
ciEnv::current()->record_failure("CodeCache is full");
return false;
}

return p_succeeded;
requests->iterate(emit);
return true;
}

#undef __

bool CodeBuffer::pd_finalize_stubs() {
return emit_shared_stubs_to_interp<MacroAssembler>(this, _shared_stub_to_interp_requests)
&& emit_shared_trampolines(this, _shared_trampoline_requests);
Expand Down
7 changes: 3 additions & 4 deletions src/hotspot/cpu/riscv/compiledIC_riscv.cpp
@@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2018, Red Hat Inc. All rights reserved.
* Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
Expand Down Expand Up @@ -69,15 +69,14 @@ address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark)
#undef __

int CompiledStaticCall::to_interp_stub_size() {
// (lui, addi, slli, addi, slli, addi) + (lui, addi, slli, addi, slli) + jalr
return 12 * NativeInstruction::instruction_size;
return MacroAssembler::static_call_stub_size();
}

int CompiledStaticCall::to_trampoline_stub_size() {
// Somewhat pessimistically, we count 4 instructions here (although
// there are only 3) because we sometimes emit an alignment nop.
// Trampoline stubs are always word aligned.
return NativeInstruction::instruction_size + NativeCallTrampolineStub::instruction_size;
return MacroAssembler::max_trampoline_stub_size();
}

// Relocation entries for call stub, compiled java to interpreter.
Expand Down
14 changes: 12 additions & 2 deletions src/hotspot/cpu/riscv/macroAssembler_riscv.cpp
Expand Up @@ -3142,8 +3142,8 @@ address MacroAssembler::ic_call(address entry, jint method_index) {

address MacroAssembler::emit_trampoline_stub(int insts_call_instruction_offset,
address dest) {
address stub = start_a_stub(NativeInstruction::instruction_size
+ NativeCallTrampolineStub::instruction_size);
// Max stub size: alignment nop, TrampolineStub.
address stub = start_a_stub(max_trampoline_stub_size());
if (stub == NULL) {
return NULL; // CodeBuffer::expand failed
}
Expand Down Expand Up @@ -3183,6 +3183,16 @@ address MacroAssembler::emit_trampoline_stub(int insts_call_instruction_offset,
return stub_start_addr;
}

int MacroAssembler::max_trampoline_stub_size() {
// Max stub size: alignment nop, TrampolineStub.
return NativeInstruction::instruction_size + NativeCallTrampolineStub::instruction_size;
}

int MacroAssembler::static_call_stub_size() {
// (lui, addi, slli, addi, slli, addi) + (lui, addi, slli, addi, slli) + jalr
return 12 * NativeInstruction::instruction_size;
}

Address MacroAssembler::add_memory_helper(const Address dst, Register tmp) {
switch (dst.getMode()) {
case Address::base_plus_offset:
Expand Down
2 changes: 2 additions & 0 deletions src/hotspot/cpu/riscv/macroAssembler_riscv.hpp
Expand Up @@ -412,7 +412,9 @@ class MacroAssembler: public Assembler {
}

address emit_trampoline_stub(int insts_call_instruction_offset, address target);
static int max_trampoline_stub_size();
void emit_static_call_stub();
static int static_call_stub_size();

// The following 4 methods return the offset of the appropriate move instruction

Expand Down
12 changes: 8 additions & 4 deletions src/hotspot/share/asm/codeBuffer.inline.hpp
Expand Up @@ -29,6 +29,8 @@
#include "ci/ciEnv.hpp"
#include "code/compiledIC.hpp"

#define __ masm.

template <typename MacroAssembler, int relocate_format = 0>
bool emit_shared_stubs_to_interp(CodeBuffer* cb, SharedStubToInterpRequests* shared_stub_to_interp_requests) {
if (shared_stub_to_interp_requests == NULL) {
Expand All @@ -46,7 +48,7 @@ bool emit_shared_stubs_to_interp(CodeBuffer* cb, SharedStubToInterpRequests* sha
shared_stub_to_interp_requests->sort(by_shared_method);
MacroAssembler masm(cb);
for (int i = 0; i < shared_stub_to_interp_requests->length();) {
address stub = masm.start_a_stub(CompiledStaticCall::to_interp_stub_size());
address stub = __ start_a_stub(CompiledStaticCall::to_interp_stub_size());
if (stub == NULL) {
ciEnv::current()->record_failure("CodeCache is full");
return false;
Expand All @@ -55,13 +57,15 @@ bool emit_shared_stubs_to_interp(CodeBuffer* cb, SharedStubToInterpRequests* sha
ciMethod* method = shared_stub_to_interp_requests->at(i).shared_method();
do {
address caller_pc = cb->insts_begin() + shared_stub_to_interp_requests->at(i).call_offset();
masm.relocate(static_stub_Relocation::spec(caller_pc), relocate_format);
__ relocate(static_stub_Relocation::spec(caller_pc), relocate_format);
++i;
} while (i < shared_stub_to_interp_requests->length() && shared_stub_to_interp_requests->at(i).shared_method() == method);
masm.emit_static_call_stub();
masm.end_a_stub();
__ emit_static_call_stub();
__ end_a_stub();
}
return true;
}

#undef __

#endif // SHARE_ASM_CODEBUFFER_INLINE_HPP

1 comment on commit 7730506

@openjdk-notifier
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Please sign in to comment.