Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

8274527: Minimal VM build fails after JDK-8273459 #5764

Closed
wants to merge 2 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
6 changes: 5 additions & 1 deletion src/hotspot/cpu/x86/macroAssembler_x86.cpp
Expand Up @@ -1177,6 +1177,10 @@ void MacroAssembler::align64() {
align(64, (unsigned long long) pc());
}

void MacroAssembler::align32() {
align(32, (unsigned long long) pc());
}

void MacroAssembler::align(int modulus) {
// 8273459: Ensure alignment is possible with current segment alignment
assert(modulus <= CodeEntryAlignment, "Alignment must be <= CodeEntryAlignment");
Expand Down Expand Up @@ -6905,7 +6909,7 @@ void MacroAssembler::kernel_crc32(Register crc, Register buf, Register len, Regi
// 128 bits per each of 4 parallel streams.
movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 32));

align(32);
align32();
BIND(L_fold_512b_loop);
fold_128bit_crc32(xmm1, xmm0, xmm5, buf, 0);
fold_128bit_crc32(xmm2, xmm0, xmm5, buf, 16);
Expand Down
1 change: 1 addition & 0 deletions src/hotspot/cpu/x86/macroAssembler_x86.hpp
Expand Up @@ -194,6 +194,7 @@ class MacroAssembler: public Assembler {
void incrementq(AddressLiteral dst);

// Alignment
void align32();
void align64();
void align(int modulus);
void align(int modulus, int target);
Expand Down
4 changes: 2 additions & 2 deletions src/hotspot/cpu/x86/macroAssembler_x86_adler.cpp
Expand Up @@ -80,7 +80,7 @@ void MacroAssembler::updateBytesAdler32(Register init_d, Register data, Register
cmpptr(data, end);
jcc(Assembler::aboveEqual, SKIP_LOOP_1A);

align(32);
align32();
bind(SLOOP1A);
vbroadcastf128(ydata, Address(data, 0), Assembler::AVX_256bit);
addptr(data, CHUNKSIZE);
Expand Down Expand Up @@ -178,7 +178,7 @@ void MacroAssembler::updateBytesAdler32(Register init_d, Register data, Register
movdl(rax, xb);
addl(b_d, rax);

align(32);
align32();
bind(FINAL_LOOP);
movzbl(rax, Address(data, 0)); //movzx eax, byte[data]
addl(a_d, rax);
Expand Down
28 changes: 14 additions & 14 deletions src/hotspot/cpu/x86/stubGenerator_x86_64.cpp
Expand Up @@ -1484,7 +1484,7 @@ class StubGenerator: public StubCodeGenerator {
__ subq(temp1, loop_size[shift]);

// Main loop with aligned copy block size of 192 bytes at 32 byte granularity.
__ align(32);
__ align32();
__ BIND(L_main_loop);
__ copy64_avx(to, from, temp4, xmm1, false, shift, 0);
__ copy64_avx(to, from, temp4, xmm1, false, shift, 64);
Expand Down Expand Up @@ -1551,7 +1551,7 @@ class StubGenerator: public StubCodeGenerator {

// Main loop with aligned copy block size of 192 bytes at
// 64 byte copy granularity.
__ align(32);
__ align32();
__ BIND(L_main_loop_64bytes);
__ copy64_avx(to, from, temp4, xmm1, false, shift, 0 , true);
__ copy64_avx(to, from, temp4, xmm1, false, shift, 64, true);
Expand Down Expand Up @@ -1691,7 +1691,7 @@ class StubGenerator: public StubCodeGenerator {
__ BIND(L_main_pre_loop);

// Main loop with aligned copy block size of 192 bytes at 32 byte granularity.
__ align(32);
__ align32();
__ BIND(L_main_loop);
__ copy64_avx(to, from, temp1, xmm1, true, shift, -64);
__ copy64_avx(to, from, temp1, xmm1, true, shift, -128);
Expand Down Expand Up @@ -1724,7 +1724,7 @@ class StubGenerator: public StubCodeGenerator {

// Main loop with aligned copy block size of 192 bytes at
// 64 byte copy granularity.
__ align(32);
__ align32();
__ BIND(L_main_loop_64bytes);
__ copy64_avx(to, from, temp1, xmm1, true, shift, -64 , true);
__ copy64_avx(to, from, temp1, xmm1, true, shift, -128, true);
Expand Down Expand Up @@ -4274,7 +4274,7 @@ class StubGenerator: public StubCodeGenerator {

//Mask for byte-swapping a couple of qwords in an XMM register using (v)pshufb.
address generate_pshuffle_byte_flip_mask_sha512() {
__ align(32);
__ align32();
StubCodeMark mark(this, "StubRoutines", "pshuffle_byte_flip_mask_sha512");
address start = __ pc();
if (VM_Version::supports_avx2()) {
Expand Down Expand Up @@ -5401,7 +5401,7 @@ address generate_avx_ghash_processBlocks() {

address base64_avx2_shuffle_addr()
{
__ align(32);
__ align32();
StubCodeMark mark(this, "StubRoutines", "avx2_shuffle_base64");
address start = __ pc();
__ emit_data64(0x0809070805060405, relocInfo::none);
Expand All @@ -5413,7 +5413,7 @@ address generate_avx_ghash_processBlocks() {

address base64_avx2_input_mask_addr()
{
__ align(32);
__ align32();
StubCodeMark mark(this, "StubRoutines", "avx2_input_mask_base64");
address start = __ pc();
__ emit_data64(0x8000000000000000, relocInfo::none);
Expand All @@ -5425,7 +5425,7 @@ address generate_avx_ghash_processBlocks() {

address base64_avx2_lut_addr()
{
__ align(32);
__ align32();
StubCodeMark mark(this, "StubRoutines", "avx2_lut_base64");
address start = __ pc();
__ emit_data64(0xfcfcfcfcfcfc4741, relocInfo::none);
Expand Down Expand Up @@ -5530,7 +5530,7 @@ address generate_avx_ghash_processBlocks() {
__ evmovdquq(xmm2, Address(encode_table, 0), Assembler::AVX_512bit);
__ evpbroadcastq(xmm1, rax, Assembler::AVX_512bit);

__ align(32);
__ align32();
__ BIND(L_vbmiLoop);

__ vpermb(xmm0, xmm3, Address(source, start_offset), Assembler::AVX_512bit);
Expand Down Expand Up @@ -5730,7 +5730,7 @@ address generate_avx_ghash_processBlocks() {
__ cmpl(length, 31);
__ jcc(Assembler::belowEqual, L_process3);

__ align(32);
__ align32();
__ BIND(L_32byteLoop);

// Get next 32 bytes
Expand Down Expand Up @@ -6177,7 +6177,7 @@ address generate_avx_ghash_processBlocks() {
__ evmovdquq(join12, ExternalAddress(StubRoutines::x86::base64_vbmi_join_1_2_addr()), Assembler::AVX_512bit, r13);
__ evmovdquq(join23, ExternalAddress(StubRoutines::x86::base64_vbmi_join_2_3_addr()), Assembler::AVX_512bit, r13);

__ align(32);
__ align32();
__ BIND(L_process256);
// Grab input data
__ evmovdquq(input0, Address(source, start_offset, Address::times_1, 0x00), Assembler::AVX_512bit);
Expand Down Expand Up @@ -6259,7 +6259,7 @@ address generate_avx_ghash_processBlocks() {
__ cmpl(length, 63);
__ jcc(Assembler::lessEqual, L_finalBit);

__ align(32);
__ align32();
__ BIND(L_process64Loop);

// Handle first 64-byte block
Expand Down Expand Up @@ -6395,7 +6395,7 @@ address generate_avx_ghash_processBlocks() {
__ shrq(rax, 1);
__ jmp(L_donePadding);

__ align(32);
__ align32();
__ BIND(L_bruteForce);
} // End of if(avx512_vbmi)

Expand Down Expand Up @@ -6439,7 +6439,7 @@ address generate_avx_ghash_processBlocks() {

__ jmp(L_bottomLoop);

__ align(32);
__ align32();
__ BIND(L_forceLoop);
__ shll(byte1, 18);
__ shll(byte2, 12);
Expand Down