Skip to content
This repository has been archived by the owner. It is now read-only.
Permalink
Browse files
Automatic merge of jdk:master into master
  • Loading branch information
duke committed Nov 27, 2020
2 parents 205bae2 + 2215e5a commit dcff176cd1684e46c7d787faf32c5fa13941a0c8
Showing with 738 additions and 281 deletions.
  1. +8 −0 make/autoconf/libraries.m4
  2. +7 −2 src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp
  3. +4 −2 src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp
  4. +7 −0 src/hotspot/cpu/aarch64/vm_version_aarch64.cpp
  5. +1 −1 src/hotspot/share/adlc/output_h.cpp
  6. +29 −4 src/hotspot/share/gc/g1/g1RemSet.cpp
  7. +0 −7 src/hotspot/share/gc/parallel/psAdaptiveSizePolicy.cpp
  8. +0 −6 src/hotspot/share/gc/parallel/psAdaptiveSizePolicy.hpp
  9. +3 −1 src/hotspot/share/gc/z/zForwarding.inline.hpp
  10. +2 −2 src/hotspot/share/gc/z/zHeap.cpp
  11. +7 −7 src/hotspot/share/gc/z/zHeapIterator.cpp
  12. +9 −9 src/hotspot/share/gc/z/zHeapIterator.hpp
  13. +6 −6 src/hotspot/share/gc/z/zMark.cpp
  14. +13 −13 src/hotspot/share/gc/z/zRelocate.cpp
  15. +0 −1 src/hotspot/share/gc/z/zRelocate.hpp
  16. +7 −8 src/hotspot/share/gc/z/zRootsIterator.cpp
  17. +3 −3 src/hotspot/share/gc/z/zRootsIterator.hpp
  18. +11 −18 src/hotspot/share/gc/z/zRuntimeWorkers.cpp
  19. +1 −8 src/hotspot/share/gc/z/zThread.cpp
  20. +1 −4 src/hotspot/share/gc/z/zThread.hpp
  21. +1 −6 src/hotspot/share/gc/z/zThread.inline.hpp
  22. +32 −32 src/hotspot/share/gc/z/zVerify.cpp
  23. +2 −4 src/hotspot/share/gc/z/zVerify.hpp
  24. +10 −10 src/hotspot/share/gc/z/zWeakRootsProcessor.cpp
  25. +2 −2 src/hotspot/share/gc/z/zWeakRootsProcessor.hpp
  26. +9 −15 src/hotspot/share/gc/z/zWorkers.cpp
  27. +57 −33 src/hotspot/share/opto/regmask.hpp
  28. +16 −11 src/java.base/share/classes/sun/security/provider/SecureRandom.java
  29. +3 −0 src/java.desktop/macosx/classes/sun/lwawt/macosx/LWCToolkit.java
  30. +80 −40 src/java.desktop/share/classes/sun/awt/SunToolkit.java
  31. +3 −0 src/java.desktop/unix/classes/sun/awt/X11/XToolkit.java
  32. +1 −1 src/java.desktop/windows/classes/sun/awt/windows/WToolkit.java
  33. +3 −0 src/java.desktop/windows/native/libawt/windows/awt_Toolkit.cpp
  34. +2 −0 src/jdk.incubator.foreign/share/classes/jdk/incubator/foreign/CLinker.java
  35. +10 −2 src/jdk.incubator.foreign/share/classes/jdk/internal/foreign/CABI.java
  36. +3 −3 src/jdk.incubator.foreign/share/classes/jdk/internal/foreign/PlatformLayouts.java
  37. +156 −0 test/hotspot/gtest/opto/test_regmask.cpp
  38. +0 −1 test/hotspot/jtreg/ProblemList.txt
  39. +0 −1 test/jdk/ProblemList.txt
  40. +48 −0 test/jdk/java/awt/Robot/FlushCurrentEvent.java
  41. +66 −0 test/jdk/java/awt/Robot/InfiniteLoopException.java
  42. +1 −0 test/jdk/java/foreign/StdLibTest.java
  43. +1 −0 test/jdk/java/foreign/TestCircularInit1.java
  44. +1 −0 test/jdk/java/foreign/TestCircularInit2.java
  45. +1 −1 test/jdk/java/foreign/TestCondy.java
  46. +1 −0 test/jdk/java/foreign/TestDowncall.java
  47. +1 −0 test/jdk/java/foreign/TestFunctionDescriptor.java
  48. +1 −1 test/jdk/java/foreign/TestIllegalLink.java
  49. +6 −0 test/jdk/java/foreign/TestLibraryLookup.java
  50. +1 −0 test/jdk/java/foreign/TestNative.java
  51. +5 −4 test/jdk/java/foreign/TestNativeScope.java
  52. +1 −0 test/jdk/java/foreign/TestNulls.java
  53. +54 −0 test/jdk/java/foreign/TestUnsupportedPlatform.java
  54. +1 −0 test/jdk/java/foreign/TestUpcall.java
  55. +1 −0 test/jdk/java/foreign/TestUpcallHighArity.java
  56. +1 −0 test/jdk/java/foreign/TestUpcallStubs.java
  57. +1 −0 test/jdk/java/foreign/TestVarArgs.java
  58. +1 −0 test/jdk/java/foreign/stackwalk/TestStackWalk.java
  59. +1 −0 test/jdk/java/foreign/valist/VaListTest.java
  60. +8 −2 test/jdk/java/util/logging/DrainFindDeadlockTest.java
  61. +9 −2 test/jdk/java/util/logging/LoggingDeadlock.java
  62. +18 −8 test/jdk/javax/swing/text/JTextComponent/5074573/bug5074573.java
@@ -124,6 +124,14 @@ AC_DEFUN_ONCE([LIB_SETUP_LIBRARIES],
BASIC_JVM_LIBS="$BASIC_JVM_LIBS -lpthread"
fi
# Libatomic library
# 32-bit MIPS needs fallback library for 8-byte atomic ops
if test "x$OPENJDK_TARGET_OS" = xlinux &&
(test "x$OPENJDK_TARGET_CPU" = xmips ||
test "x$OPENJDK_TARGET_CPU" = xmipsel); then
BASIC_JVM_LIBS="$BASIC_JVM_LIBS -latomic"
fi
# perfstat lib
if test "x$OPENJDK_TARGET_OS" = xaix; then
BASIC_JVM_LIBS="$BASIC_JVM_LIBS -lperfstat"
@@ -681,14 +681,16 @@ void InterpreterMacroAssembler::remove_activation(

// remove activation
// get sender esp
ldr(esp,
ldr(rscratch2,
Address(rfp, frame::interpreter_frame_sender_sp_offset * wordSize));
if (StackReservedPages > 0) {
// testing if reserved zone needs to be re-enabled
Label no_reserved_zone_enabling;

// look for an overflow into the stack reserved zone, i.e.
// interpreter_frame_sender_sp <= JavaThread::reserved_stack_activation
ldr(rscratch1, Address(rthread, JavaThread::reserved_stack_activation_offset()));
cmp(esp, rscratch1);
cmp(rscratch2, rscratch1);
br(Assembler::LS, no_reserved_zone_enabling);

call_VM_leaf(
@@ -699,6 +701,9 @@ void InterpreterMacroAssembler::remove_activation(

bind(no_reserved_zone_enabling);
}

// restore sender esp
mov(esp, rscratch2);
// remove frame anchor
leave();
// If we're returning to interpreted code we will shortly be
@@ -1152,9 +1152,11 @@ class StubGenerator: public StubCodeGenerator {
// (96 bytes if SIMD because we do 32 byes per instruction)
__ bind(copy80);
if (UseSIMDForMemoryOps) {
__ ld4(v0, v1, v2, v3, __ T16B, Address(s, 0));
__ ldpq(v0, v1, Address(s, 0));
__ ldpq(v2, v3, Address(s, 32));
__ ldpq(v4, v5, Address(send, -32));
__ st4(v0, v1, v2, v3, __ T16B, Address(d, 0));
__ stpq(v0, v1, Address(d, 0));
__ stpq(v2, v3, Address(d, 32));
__ stpq(v4, v5, Address(dend, -32));
} else {
__ ldp(t0, t1, Address(s, 0));
@@ -174,6 +174,13 @@ void VM_Version::initialize() {
}
}

// Neoverse N1
if (_cpu == CPU_ARM && (_model == 0xd0c || _model2 == 0xd0c)) {
if (FLAG_IS_DEFAULT(UseSIMDForMemoryOps)) {
FLAG_SET_DEFAULT(UseSIMDForMemoryOps, true);
}
}

if (_cpu == CPU_ARM) {
if (FLAG_IS_DEFAULT(UseSignumIntrinsic)) {
FLAG_SET_DEFAULT(UseSignumIntrinsic, true);
@@ -98,7 +98,7 @@ void ArchDesc::buildMachRegisterNumbers(FILE *fp_hpp) {
}

fprintf(fp_hpp, "\n// Size of register-mask in ints\n");
fprintf(fp_hpp, "#define RM_SIZE %d\n",RegisterForm::RegMask_Size());
fprintf(fp_hpp, "#define RM_SIZE %d\n", RegisterForm::RegMask_Size());
fprintf(fp_hpp, "// Unroll factor for loops over the data in a RegMask\n");
fprintf(fp_hpp, "#define FORALL_BODY ");
int len = RegisterForm::RegMask_Size();
@@ -548,13 +548,12 @@ class G1RemSetSamplingTask : public G1ServiceTask {
// reevaluates the prediction for the remembered set scanning costs, and potentially
// G1Policy resizes the young gen. This may do a premature GC or even
// increase the young gen size to keep pause time length goal.
void sample_young_list_rs_length(){
SuspendibleThreadSetJoiner sts;
void sample_young_list_rs_length(SuspendibleThreadSetJoiner* sts){
G1CollectedHeap* g1h = G1CollectedHeap::heap();
G1Policy* policy = g1h->policy();

if (policy->use_adaptive_young_list_length()) {
G1YoungRemSetSamplingClosure cl(&sts);
G1YoungRemSetSamplingClosure cl(sts);

G1CollectionSet* g1cs = g1h->collection_set();
g1cs->iterate(&cl);
@@ -565,10 +564,36 @@ class G1RemSetSamplingTask : public G1ServiceTask {
}
}

// To avoid extensive rescheduling if the task is executed a bit early. The task is
// only rescheduled if the expected time is more than 1ms away.
bool should_reschedule() {
return reschedule_delay_ms() > 1;
}

// There is no reason to do the sampling if a GC occurred recently. We use the
// G1ConcRefinementServiceIntervalMillis as the metric for recently and calculate
// the diff to the last GC. If the last GC occurred longer ago than the interval
// 0 is returned.
jlong reschedule_delay_ms() {
Tickspan since_last_gc = G1CollectedHeap::heap()->time_since_last_collection();
jlong delay = (jlong) (G1ConcRefinementServiceIntervalMillis - since_last_gc.milliseconds());
return MAX2<jlong>(0L, delay);
}

public:
G1RemSetSamplingTask(const char* name) : G1ServiceTask(name) { }
virtual void execute() {
sample_young_list_rs_length();
SuspendibleThreadSetJoiner sts;

// Reschedule if a GC happened too recently.
if (should_reschedule()) {
// Calculate the delay given the last GC and the interval.
schedule(reschedule_delay_ms());
return;
}

// Do the actual sampling.
sample_young_list_rs_length(&sts);
schedule(G1ConcRefinementServiceIntervalMillis);
}
};
@@ -60,7 +60,6 @@ PSAdaptiveSizePolicy::PSAdaptiveSizePolicy(size_t init_eden_size,
_live_at_last_full_gc(init_promo_size),
_change_old_gen_for_min_pauses(0),
_change_young_gen_for_maj_pauses(0),
_old_gen_policy_is_ready(false),
_young_gen_size_increment_supplement(YoungGenerationSizeSupplement),
_old_gen_size_increment_supplement(TenuredGenerationSizeSupplement)
{
@@ -164,12 +163,6 @@ void PSAdaptiveSizePolicy::major_collection_end(size_t amount_live,
// Update the amount live at the end of a full GC
_live_at_last_full_gc = amount_live;

// The policy does not have enough data until at least some major collections
// have been done.
if (_avg_major_pause->count() >= AdaptiveSizePolicyReadyThreshold) {
_old_gen_policy_is_ready = true;
}

// Interval times use this timer to measure the interval that
// the mutator runs. Reset after the GC pause has been measured.
_major_timer.reset();
@@ -106,10 +106,6 @@ class PSAdaptiveSizePolicy : public AdaptiveSizePolicy {
// increase/decrease the young generation for major pause time
int _change_young_gen_for_maj_pauses;


// Flag indicating that the adaptive policy is ready to use
bool _old_gen_policy_is_ready;

// To facilitate faster growth at start up, supplement the normal
// growth percentage for the young gen eden and the
// old gen space for promotion with these value which decay
@@ -310,8 +306,6 @@ class PSAdaptiveSizePolicy : public AdaptiveSizePolicy {
}
float major_collection_slope() { return _major_collection_estimator->slope();}

bool old_gen_policy_is_ready() { return _old_gen_policy_is_ready; }

// Given the amount of live data in the heap, should we
// perform a Full GC?
bool should_full_GC(size_t live_in_old_gen);
@@ -94,7 +94,9 @@ inline ZForwardingEntry* ZForwarding::entries() const {
}

inline ZForwardingEntry ZForwarding::at(ZForwardingCursor* cursor) const {
return Atomic::load(entries() + *cursor);
// Load acquire for correctness with regards to
// accesses to the contents of the forwarded object.
return Atomic::load_acquire(entries() + *cursor);
}

inline ZForwardingEntry ZForwarding::first(uintptr_t from_index, ZForwardingCursor* cursor) const {
@@ -302,8 +302,8 @@ void ZHeap::process_non_strong_references() {
// Process Soft/Weak/Final/PhantomReferences
_reference_processor.process_references();

// Process concurrent weak roots
_weak_roots_processor.process_concurrent_weak_roots();
// Process weak roots
_weak_roots_processor.process_weak_roots();

// Unlink stale metadata and nmethods
_unload.unlink();
@@ -162,8 +162,8 @@ ZHeapIterator::ZHeapIterator(uint nworkers, bool visit_weaks) :
_bitmaps_lock(),
_queues(nworkers),
_array_queues(nworkers),
_concurrent_roots(ClassLoaderData::_claim_other),
_concurrent_weak_roots(),
_roots(ClassLoaderData::_claim_other),
_weak_roots(),
_terminator(nworkers, &_queues) {

// Create queues
@@ -280,15 +280,15 @@ void ZHeapIterator::push_strong_roots(const ZHeapIteratorContext& context) {
ZHeapIteratorNMethodClosure nm_cl(&cl);
ZHeapIteratorThreadClosure thread_cl(&cl, &nm_cl);

_concurrent_roots.apply(&cl,
&cld_cl,
&thread_cl,
&nm_cl);
_roots.apply(&cl,
&cld_cl,
&thread_cl,
&nm_cl);
}

void ZHeapIterator::push_weak_roots(const ZHeapIteratorContext& context) {
ZHeapIteratorRootOopClosure<true /* Weak */> cl(context);
_concurrent_weak_roots.apply(&cl);
_weak_roots.apply(&cl);
}

template <bool VisitWeaks>
@@ -46,15 +46,15 @@ class ZHeapIterator : public ParallelObjectIterator {
friend class ZHeapIteratorContext;

private:
const bool _visit_weaks;
ZStatTimerDisable _timer_disable;
ZHeapIteratorBitMaps _bitmaps;
ZLock _bitmaps_lock;
ZHeapIteratorQueues _queues;
ZHeapIteratorArrayQueues _array_queues;
ZConcurrentRootsIterator _concurrent_roots;
ZConcurrentWeakRootsIterator _concurrent_weak_roots;
TaskTerminator _terminator;
const bool _visit_weaks;
ZStatTimerDisable _timer_disable;
ZHeapIteratorBitMaps _bitmaps;
ZLock _bitmaps_lock;
ZHeapIteratorQueues _queues;
ZHeapIteratorArrayQueues _array_queues;
ZRootsIterator _roots;
ZWeakRootsIterator _weak_roots;
TaskTerminator _terminator;

ZHeapIteratorBitMap* object_bitmap(oop obj);

@@ -644,20 +644,20 @@ class ZMarkNMethodClosure : public NMethodClosure {

typedef ClaimingCLDToOopClosure<ClassLoaderData::_claim_strong> ZMarkCLDClosure;

class ZMarkConcurrentRootsTask : public ZTask {
class ZMarkRootsTask : public ZTask {
private:
ZMark* const _mark;
SuspendibleThreadSetJoiner _sts_joiner;
ZConcurrentRootsIterator _roots;
ZRootsIterator _roots;

ZMarkOopClosure _cl;
ZMarkCLDClosure _cld_cl;
ZMarkThreadClosure _thread_cl;
ZMarkNMethodClosure _nm_cl;

public:
ZMarkConcurrentRootsTask(ZMark* mark) :
ZTask("ZMarkConcurrentRootsTask"),
ZMarkRootsTask(ZMark* mark) :
ZTask("ZMarkRootsTask"),
_mark(mark),
_sts_joiner(),
_roots(ClassLoaderData::_claim_strong),
@@ -668,7 +668,7 @@ class ZMarkConcurrentRootsTask : public ZTask {
ClassLoaderDataGraph_lock->lock();
}

~ZMarkConcurrentRootsTask() {
~ZMarkRootsTask() {
ClassLoaderDataGraph_lock->unlock();
}

@@ -710,7 +710,7 @@ class ZMarkTask : public ZTask {

void ZMark::mark(bool initial) {
if (initial) {
ZMarkConcurrentRootsTask task(this);
ZMarkRootsTask task(this);
_workers->run_concurrent(&task);
}

@@ -58,21 +58,12 @@ static uintptr_t forwarding_insert(ZForwarding* forwarding, uintptr_t from_addr,
return ZAddress::good(to_offset_final);
}

uintptr_t ZRelocate::relocate_object_inner(ZForwarding* forwarding, uintptr_t from_addr) const {
ZForwardingCursor cursor;

// Lookup forwarding
uintptr_t to_addr = forwarding_find(forwarding, from_addr, &cursor);
if (to_addr != 0) {
// Already relocated
return to_addr;
}

static uintptr_t relocate_object_inner(ZForwarding* forwarding, uintptr_t from_addr, ZForwardingCursor* cursor) {
assert(ZHeap::heap()->is_object_live(from_addr), "Should be live");

// Allocate object
const size_t size = ZUtils::object_size(from_addr);
to_addr = ZHeap::heap()->alloc_object_non_blocking(size);
const uintptr_t to_addr = ZHeap::heap()->alloc_object_non_blocking(size);
if (to_addr == 0) {
// Allocation failed
return 0;
@@ -82,7 +73,7 @@ uintptr_t ZRelocate::relocate_object_inner(ZForwarding* forwarding, uintptr_t fr
ZUtils::object_copy_disjoint(from_addr, to_addr, size);

// Insert forwarding
const uintptr_t to_addr_final = forwarding_insert(forwarding, from_addr, to_addr, &cursor);
const uintptr_t to_addr_final = forwarding_insert(forwarding, from_addr, to_addr, cursor);
if (to_addr_final != to_addr) {
// Already relocated, try undo allocation
ZHeap::heap()->undo_alloc_object(to_addr, size);
@@ -92,9 +83,18 @@ uintptr_t ZRelocate::relocate_object_inner(ZForwarding* forwarding, uintptr_t fr
}

uintptr_t ZRelocate::relocate_object(ZForwarding* forwarding, uintptr_t from_addr) const {
ZForwardingCursor cursor;

// Lookup forwarding
uintptr_t to_addr = forwarding_find(forwarding, from_addr, &cursor);
if (to_addr != 0) {
// Already relocated
return to_addr;
}

// Relocate object
if (forwarding->retain_page()) {
const uintptr_t to_addr = relocate_object_inner(forwarding, from_addr);
to_addr = relocate_object_inner(forwarding, from_addr, &cursor);
forwarding->release_page();

if (to_addr != 0) {
@@ -35,7 +35,6 @@ class ZRelocate {
private:
ZWorkers* const _workers;

uintptr_t relocate_object_inner(ZForwarding* forwarding, uintptr_t from_addr) const;
void work(ZRelocationSetParallelIterator* iter);

public:

0 comments on commit dcff176

Please sign in to comment.