Skip to content
This repository has been archived by the owner. It is now read-only.
Permalink
Browse files
Automatic merge of jdk:master into master
  • Loading branch information
duke committed Feb 18, 2021
2 parents d486cd8 + 5f30829 commit 1eefa8f3d9286513a0d5fc9410491978d33baf60
Showing with 4,793 additions and 808 deletions.
  1. +1 −1 doc/building.html
  2. +2 −2 doc/building.md
  3. +0 −1 src/hotspot/cpu/aarch64/gc/shared/cardTableBarrierSetAssembler_aarch64.cpp
  4. +3 −55 src/hotspot/cpu/ppc/ppc.ad
  5. +56 −44 src/hotspot/cpu/ppc/stubGenerator_ppc.cpp
  6. +7 −0 src/hotspot/cpu/x86/assembler_x86.cpp
  7. +1 −0 src/hotspot/cpu/x86/assembler_x86.hpp
  8. +3 −11 src/hotspot/cpu/x86/c2_MacroAssembler_x86.cpp
  9. +4 −7 src/hotspot/cpu/x86/macroAssembler_x86_arrayCopy_avx3.cpp
  10. +12 −8 src/hotspot/cpu/x86/stubGenerator_x86_64.cpp
  11. +1 −1 src/hotspot/cpu/x86/x86.ad
  12. +1 −1 src/hotspot/os/aix/os_aix.cpp
  13. +2 −2 src/hotspot/os/linux/gc/z/zPhysicalMemoryBacking_linux.cpp
  14. +41 −4 src/hotspot/os/linux/os_linux.cpp
  15. +1 −0 src/hotspot/os/linux/os_linux.hpp
  16. +16 −3 src/hotspot/os/windows/os_windows.cpp
  17. +7 −2 src/hotspot/share/c1/c1_LIR.hpp
  18. +11 −10 src/hotspot/share/c1/c1_LIRGenerator.cpp
  19. +18 −3 src/hotspot/share/c1/c1_LinearScan.cpp
  20. +1 −0 src/hotspot/share/c1/c1_LinearScan.hpp
  21. +34 −42 src/hotspot/share/classfile/classLoader.cpp
  22. +2 −1 src/hotspot/share/classfile/classLoader.hpp
  23. +2 −2 src/hotspot/share/classfile/classLoaderStats.hpp
  24. +2 −1 src/hotspot/share/code/icBuffer.cpp
  25. +1 −0 src/hotspot/share/compiler/compileBroker.cpp
  26. +1 −1 src/hotspot/share/gc/shared/gcVMOperations.hpp
  27. +2 −2 src/hotspot/share/gc/shared/oopStorage.cpp
  28. +5 −55 src/hotspot/share/gc/shenandoah/shenandoahHeapRegionSet.cpp
  29. +1 −23 src/hotspot/share/gc/shenandoah/shenandoahHeapRegionSet.hpp
  30. +0 −10 src/hotspot/share/gc/shenandoah/shenandoahHeapRegionSet.inline.hpp
  31. +1 −0 src/hotspot/share/gc/shenandoah/shenandoahPacer.cpp
  32. +2 −1 src/hotspot/share/memory/metaspace/metaspaceDCmd.cpp
  33. +1 −1 src/hotspot/share/prims/jvmtiEnvBase.hpp
  34. +2 −2 src/hotspot/share/prims/jvmtiRedefineClasses.hpp
  35. +0 −1 src/hotspot/share/prims/jvmtiTrace.hpp
  36. +1 −0 src/hotspot/share/prims/unsafe.cpp
  37. +2 −1 src/hotspot/share/prims/upcallStubs.cpp
  38. +1 −0 src/hotspot/share/runtime/deoptimization.cpp
  39. +1 −0 src/hotspot/share/runtime/handshake.cpp
  40. +1 −0 src/hotspot/share/runtime/interfaceSupport.cpp
  41. +1 −1 src/hotspot/share/runtime/objectMonitor.cpp
  42. +2 −1 src/hotspot/share/runtime/os.cpp
  43. +51 −0 src/hotspot/share/runtime/safefetch.hpp
  44. +2 −2 src/hotspot/share/runtime/safepoint.hpp
  45. +2 −2 src/hotspot/share/runtime/stubRoutines.cpp
  46. +1 −21 src/hotspot/share/runtime/stubRoutines.hpp
  47. +174 −0 src/hotspot/share/runtime/vmOperation.hpp
  48. +3 −142 src/hotspot/share/runtime/vmOperations.hpp
  49. +1 −1 src/hotspot/share/runtime/vmThread.hpp
  50. +1 −0 src/hotspot/share/services/attachListener.cpp
  51. +1 −0 src/hotspot/share/services/diagnosticCommand.cpp
  52. +1 −0 src/hotspot/share/services/management.cpp
  53. +1 −0 src/hotspot/share/utilities/vmError.cpp
  54. +2 −1 src/hotspot/share/utilities/xmlstream.cpp
  55. +38 −2 src/java.base/share/classes/java/lang/Character.java
  56. +1 −54 src/java.base/share/classes/sun/net/www/MimeEntry.java
  57. +0 −205 src/java.base/share/classes/sun/net/www/MimeLauncher.java
  58. +40 −11 src/java.base/share/classes/sun/security/tools/keytool/Main.java
  59. +10 −2 src/jdk.sctp/unix/native/libsctp/SctpChannelImpl.c
  60. +2 −2 test/hotspot/gtest/runtime/test_safefetch.cpp
  61. +4,068 −0 test/hotspot/jtreg/compiler/c1/TestTooManyVirtualRegisters.jasm
  62. +18 −15 ...icationLaunchException.java → test/hotspot/jtreg/compiler/c1/TestTooManyVirtualRegistersMain.java
  63. +1 −1 test/hotspot/jtreg/gc/epsilon/TestByteArrays.java
  64. +1 −1 test/hotspot/jtreg/gc/epsilon/TestElasticTLAB.java
  65. +1 −1 test/hotspot/jtreg/gc/epsilon/TestElasticTLABDecay.java
  66. +1 −1 test/hotspot/jtreg/gc/epsilon/TestMemoryPools.java
  67. +1 −0 test/hotspot/jtreg/gc/g1/TestGCLogMessages.java
  68. +1 −1 test/hotspot/jtreg/gc/g1/TestHumongousRemsetsMatch.java
  69. +7 −2 test/hotspot/jtreg/gc/g1/TestLargePageUseForHeap.java
  70. +1 −0 test/hotspot/jtreg/gc/g1/ihop/TestIHOPStatic.java
  71. +1 −0 test/hotspot/jtreg/gc/logging/TestMetaSpaceLog.java
  72. +2 −38 test/jdk/java/lang/reflect/callerCache/ReflectionCallerCacheTest.java
  73. +102 −0 test/jdk/sun/security/tools/keytool/CheckCertAKID.java
  74. +4 −3 test/jdk/sun/security/tools/keytool/ExtOptionCamelCase.java
@@ -849,7 +849,7 @@ <h3 id="handling-reconfigurations">Handling Reconfigurations</h3>
<p>If you update the repository and part of the configure script has changed, the build system will force you to re-run <code>configure</code>.</p>
<p>Most of the time, you will be fine by running <code>configure</code> again with the same arguments as the last time, which can easily be performed by <code>make reconfigure</code>. To simplify this, you can use the <code>CONF_CHECK</code> make control variable, either as <code>make CONF_CHECK=auto</code>, or by setting an environment variable. For instance, if you add <code>export CONF_CHECK=auto</code> to your <code>.bashrc</code> file, <code>make</code> will always run <code>reconfigure</code> automatically whenever the configure script has changed.</p>
<p>You can also use <code>CONF_CHECK=ignore</code> to skip the check for a needed configure update. This might speed up the build, but comes at the risk of an incorrect build result. This is only recommended if you know what you're doing.</p>
<p>From time to time, you will also need to modify the command line to <code>configure</code> due to changes. Use <code>make print-configure</code> to show the command line used for your current configuration.</p>
<p>From time to time, you will also need to modify the command line to <code>configure</code> due to changes. Use <code>make print-configuration</code> to show the command line used for your current configuration.</p>
<h3 id="using-fine-grained-make-targets">Using Fine-Grained Make Targets</h3>
<p>The default behavior for make is to create consistent and correct output, at the expense of build speed, if necessary.</p>
<p>If you are prepared to take some risk of an incorrect build, and know enough of the system to understand how things build and interact, you can speed up the build process considerably by instructing make to only build a portion of the product.</p>
@@ -1556,8 +1556,8 @@ update. This might speed up the build, but comes at the risk of an incorrect
build result. This is only recommended if you know what you're doing.

From time to time, you will also need to modify the command line to `configure`
due to changes. Use `make print-configure` to show the command line used for
your current configuration.
due to changes. Use `make print-configuration` to show the command line used
for your current configuration.

### Using Fine-Grained Make Targets

@@ -46,7 +46,6 @@ void CardTableBarrierSetAssembler::store_check(MacroAssembler* masm, Register ob

if (UseCondCardMark) {
Label L_already_dirty;
__ membar(Assembler::StoreLoad);
__ ldrb(rscratch2, Address(obj, rscratch1));
__ cbz(rscratch2, L_already_dirty);
__ strb(zr, Address(obj, rscratch1));
@@ -3035,36 +3035,6 @@ encode %{
__ stfd($src$$FloatRegister, Idisp, $mem$$base$$Register);
%}

// Use release_store for card-marking to ensure that previous
// oop-stores are visible before the card-mark change.
enc_class enc_cms_card_mark(memory mem, iRegLdst releaseFieldAddr, flagsReg crx) %{
// FIXME: Implement this as a cmove and use a fixed condition code
// register which is written on every transition to compiled code,
// e.g. in call-stub and when returning from runtime stubs.
//
// Proposed code sequence for the cmove implementation:
//
// Label skip_release;
// __ beq(CCRfixed, skip_release);
// __ release();
// __ bind(skip_release);
// __ stb(card mark);

C2_MacroAssembler _masm(&cbuf);
Label skip_storestore;

__ li(R0, 0);
__ membar(Assembler::StoreStore);

// Do the store.
if ($mem$$index == 0) {
__ stb(R0, $mem$$disp, $mem$$base$$Register);
} else {
assert(0 == $mem$$disp, "no displacement possible with indexed load/stores on ppc");
__ stbx(R0, $mem$$base$$Register, $mem$$index$$Register);
}
%}

enc_class postalloc_expand_encode_oop(iRegNdst dst, iRegPdst src, flagsReg crx) %{

if (VM_Version::has_isel()) {
@@ -6601,37 +6571,15 @@ instruct storeD(memory mem, regD src) %{

//----------Store Instructions With Zeros--------------------------------------

// Card-mark for CMS garbage collection.
// This cardmark does an optimization so that it must not always
// do a releasing store. For this, it gets the address of
// CMSCollectorCardTableBarrierSetBSExt::_requires_release as input.
// (Using releaseFieldAddr in the match rule is a hack.)
instruct storeCM_CMS(memory mem, iRegLdst releaseFieldAddr, flagsReg crx) %{
match(Set mem (StoreCM mem releaseFieldAddr));
effect(TEMP crx);
predicate(false);
ins_cost(MEMORY_REF_COST);

// See loadConP.
ins_cannot_rematerialize(true);

format %{ "STB #0, $mem \t// CMS card-mark byte (must be 0!), checking requires_release in [$releaseFieldAddr]" %}
ins_encode( enc_cms_card_mark(mem, releaseFieldAddr, crx) );
ins_pipe(pipe_class_memory);
%}

instruct storeCM_G1(memory mem, immI_0 zero) %{
instruct storeCM(memory mem, immI_0 zero) %{
match(Set mem (StoreCM mem zero));
predicate(UseG1GC);
ins_cost(MEMORY_REF_COST);

ins_cannot_rematerialize(true);

format %{ "STB #0, $mem \t// CMS card-mark byte store (G1)" %}
format %{ "STB #0, $mem \t// CMS card-mark byte store" %}
size(8);
ins_encode %{
__ li(R0, 0);
//__ release(); // G1: oops are allowed to get visible after dirty marking
// No release barrier: Oops are allowed to get visible after marking.
guarantee($mem$$base$$Register != R1_SP, "use frame_slots_bias");
__ stb(R0, $mem$$disp, $mem$$base$$Register);
%}
@@ -2598,7 +2598,7 @@ class StubGenerator: public StubCodeGenerator {

address start = __ function_entry();

Label L_doLast;
Label L_doLast, L_error;

Register from = R3_ARG1; // source array address
Register to = R4_ARG2; // destination array address
@@ -2628,7 +2628,7 @@ class StubGenerator: public StubCodeGenerator {

__ li (fifteen, 15);

// load unaligned from[0-15] to vsRet
// load unaligned from[0-15] to vRet
__ lvx (vRet, from);
__ lvx (vTmp1, fifteen, from);
__ lvsl (fromPerm, from);
@@ -2743,6 +2743,11 @@ class StubGenerator: public StubCodeGenerator {
__ cmpwi (CCR0, keylen, 52);
__ beq (CCR0, L_doLast);

#ifdef ASSERT
__ cmpwi (CCR0, keylen, 60);
__ bne (CCR0, L_error);
#endif

// 12th - 13th rounds
__ vcipher (vRet, vRet, vKey1);
__ vcipher (vRet, vRet, vKey2);
@@ -2763,29 +2768,30 @@ class StubGenerator: public StubCodeGenerator {
__ vcipher (vRet, vRet, vKey1);
__ vcipherlast (vRet, vRet, vKey2);

// store result (unaligned)
#ifdef VM_LITTLE_ENDIAN
__ lvsl (toPerm, to);
#else
__ lvsr (toPerm, to);
#endif
__ vspltisb (vTmp3, -1);
__ vspltisb (vTmp4, 0);
__ lvx (vTmp1, to);
__ lvx (vTmp2, fifteen, to);
#ifdef VM_LITTLE_ENDIAN
__ vperm (vTmp3, vTmp3, vTmp4, toPerm); // generate select mask
__ vxor (toPerm, toPerm, fSplt); // swap bytes
#else
__ vperm (vTmp3, vTmp4, vTmp3, toPerm); // generate select mask
// toPerm = 0x0F0E0D0C0B0A09080706050403020100
__ lvsl (toPerm, keypos); // keypos is a multiple of 16
__ vxor (toPerm, toPerm, fSplt);

// Swap Bytes
__ vperm (vRet, vRet, vRet, toPerm);
#endif
__ vperm (vTmp4, vRet, vRet, toPerm); // rotate data
__ vsel (vTmp2, vTmp4, vTmp2, vTmp3);
__ vsel (vTmp1, vTmp1, vTmp4, vTmp3);
__ stvx (vTmp2, fifteen, to); // store this one first (may alias)
__ stvx (vTmp1, to);

// store result (unaligned)
// Note: We can't use a read-modify-write sequence which touches additional Bytes.
Register lo = temp, hi = fifteen; // Reuse
__ vsldoi (vTmp1, vRet, vRet, 8);
__ mfvrd (hi, vRet);
__ mfvrd (lo, vTmp1);
__ std (hi, 0 LITTLE_ENDIAN_ONLY(+ 8), to);
__ std (lo, 0 BIG_ENDIAN_ONLY(+ 8), to);

__ blr();

#ifdef ASSERT
__ bind(L_error);
__ stop("aescrypt_encryptBlock: invalid key length");
#endif
return start;
}

@@ -2799,9 +2805,7 @@ class StubGenerator: public StubCodeGenerator {

address start = __ function_entry();

Label L_doLast;
Label L_do44;
Label L_do52;
Label L_doLast, L_do44, L_do52, L_error;

Register from = R3_ARG1; // source array address
Register to = R4_ARG2; // destination array address
@@ -2832,7 +2836,7 @@ class StubGenerator: public StubCodeGenerator {

__ li (fifteen, 15);

// load unaligned from[0-15] to vsRet
// load unaligned from[0-15] to vRet
__ lvx (vRet, from);
__ lvx (vTmp1, fifteen, from);
__ lvsl (fromPerm, from);
@@ -2861,6 +2865,11 @@ class StubGenerator: public StubCodeGenerator {
__ cmpwi (CCR0, keylen, 52);
__ beq (CCR0, L_do52);

#ifdef ASSERT
__ cmpwi (CCR0, keylen, 60);
__ bne (CCR0, L_error);
#endif

// load the 15th round key to vKey1
__ li (keypos, 240);
__ lvx (vKey1, keypos, key);
@@ -2897,6 +2906,7 @@ class StubGenerator: public StubCodeGenerator {

__ b (L_doLast);

__ align(32);
__ bind (L_do52);

// load the 13th round key to vKey1
@@ -2923,6 +2933,7 @@ class StubGenerator: public StubCodeGenerator {

__ b (L_doLast);

__ align(32);
__ bind (L_do44);

// load the 11th round key to vKey1
@@ -3000,29 +3011,30 @@ class StubGenerator: public StubCodeGenerator {
__ vncipher (vRet, vRet, vKey4);
__ vncipherlast (vRet, vRet, vKey5);

// store result (unaligned)
#ifdef VM_LITTLE_ENDIAN
__ lvsl (toPerm, to);
#else
__ lvsr (toPerm, to);
#endif
__ vspltisb (vTmp3, -1);
__ vspltisb (vTmp4, 0);
__ lvx (vTmp1, to);
__ lvx (vTmp2, fifteen, to);
#ifdef VM_LITTLE_ENDIAN
__ vperm (vTmp3, vTmp3, vTmp4, toPerm); // generate select mask
__ vxor (toPerm, toPerm, fSplt); // swap bytes
#else
__ vperm (vTmp3, vTmp4, vTmp3, toPerm); // generate select mask
// toPerm = 0x0F0E0D0C0B0A09080706050403020100
__ lvsl (toPerm, keypos); // keypos is a multiple of 16
__ vxor (toPerm, toPerm, fSplt);

// Swap Bytes
__ vperm (vRet, vRet, vRet, toPerm);
#endif
__ vperm (vTmp4, vRet, vRet, toPerm); // rotate data
__ vsel (vTmp2, vTmp4, vTmp2, vTmp3);
__ vsel (vTmp1, vTmp1, vTmp4, vTmp3);
__ stvx (vTmp2, fifteen, to); // store this one first (may alias)
__ stvx (vTmp1, to);

// store result (unaligned)
// Note: We can't use a read-modify-write sequence which touches additional Bytes.
Register lo = temp, hi = fifteen; // Reuse
__ vsldoi (vTmp1, vRet, vRet, 8);
__ mfvrd (hi, vRet);
__ mfvrd (lo, vTmp1);
__ std (hi, 0 LITTLE_ENDIAN_ONLY(+ 8), to);
__ std (lo, 0 BIG_ENDIAN_ONLY(+ 8), to);

__ blr();

#ifdef ASSERT
__ bind(L_error);
__ stop("aescrypt_decryptBlock: invalid key length");
#endif
return start;
}

@@ -9173,6 +9173,13 @@ void Assembler::evpblendmq (XMMRegister dst, KRegister mask, XMMRegister nds, XM
emit_int16(0x64, (0xC0 | encode));
}

void Assembler::bzhiq(Register dst, Register src1, Register src2) {
assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported");
InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
int encode = vex_prefix_and_encode(dst->encoding(), src2->encoding(), src1->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes);
emit_int16((unsigned char)0xF5, (0xC0 | encode));
}

void Assembler::shlxl(Register dst, Register src1, Register src2) {
assert(VM_Version::supports_bmi2(), "");
InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true);
@@ -2092,6 +2092,7 @@ class Assembler : public AbstractAssembler {
void shlxq(Register dst, Register src1, Register src2);
void shrxq(Register dst, Register src1, Register src2);

void bzhiq(Register dst, Register src1, Register src2);

//====================VECTOR ARITHMETIC=====================================
void evpmovd2m(KRegister kdst, XMMRegister src, int vector_len);
@@ -1894,17 +1894,9 @@ void C2_MacroAssembler::reduce8L(int opcode, Register dst, Register src1, XMMReg
}

void C2_MacroAssembler::genmask(Register dst, Register len, Register temp) {
if (ArrayCopyPartialInlineSize <= 32) {
mov64(dst, 1);
shlxq(dst, dst, len);
decq(dst);
} else {
mov64(dst, -1);
movq(temp, len);
negptr(temp);
addptr(temp, 64);
shrxq(dst, dst, temp);
}
assert(ArrayCopyPartialInlineSize <= 64,"");
mov64(dst, -1L);
bzhiq(dst, dst, len);
}
#endif // _LP64

@@ -196,10 +196,8 @@ void MacroAssembler::copy64_masked_avx(Register dst, Register src, XMMRegister x
} else {
Address::ScaleFactor scale = (Address::ScaleFactor)(shift);
assert(MaxVectorSize == 64, "vector length != 64");
negptr(length);
addq(length, 64);
mov64(temp, -1);
shrxq(temp, temp, length);
mov64(temp, -1L);
bzhiq(temp, temp, length);
kmovql(mask, temp);
evmovdqu(type[shift], mask, xmm, Address(src, index, scale, offset), Assembler::AVX_512bit);
evmovdqu(type[shift], mask, Address(dst, index, scale, offset), xmm, Assembler::AVX_512bit);
@@ -213,9 +211,8 @@ void MacroAssembler::copy32_masked_avx(Register dst, Register src, XMMRegister x
assert(MaxVectorSize >= 32, "vector length should be >= 32");
BasicType type[] = { T_BYTE, T_SHORT, T_INT, T_LONG};
Address::ScaleFactor scale = (Address::ScaleFactor)(shift);
mov64(temp, 1);
shlxq(temp, temp, length);
decq(temp);
mov64(temp, -1L);
bzhiq(temp, temp, length);
kmovql(mask, temp);
evmovdqu(type[shift], mask, xmm, Address(src, index, scale, offset), Assembler::AVX_256bit);
evmovdqu(type[shift], mask, Address(dst, index, scale, offset), xmm, Assembler::AVX_256bit);

0 comments on commit 1eefa8f

Please sign in to comment.