Skip to content
This repository has been archived by the owner. It is now read-only.
Permalink
Browse files
Automatic merge of jdk:master into master
  • Loading branch information
duke committed Mar 4, 2021
2 parents 41df642 + d93fa0d commit 6f128b439de94be960db4a54fc11cd3d455f7afa
Showing with 1,132 additions and 550 deletions.
  1. +1 −0 src/hotspot/cpu/aarch64/assembler_aarch64.hpp
  2. +27 −0 src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp
  3. +4 −3 src/hotspot/cpu/aarch64/vtableStubs_aarch64.cpp
  4. +7 −6 src/hotspot/cpu/ppc/vtableStubs_ppc_64.cpp
  5. +5 −4 src/hotspot/cpu/s390/vtableStubs_s390.cpp
  6. +4 −3 src/hotspot/cpu/x86/vtableStubs_x86_64.cpp
  7. +1 −1 src/hotspot/share/classfile/verifier.cpp
  8. +74 −31 src/hotspot/share/gc/parallel/psParallelCompact.cpp
  9. +3 −3 src/hotspot/share/gc/parallel/psParallelCompact.hpp
  10. +3 −4 src/hotspot/share/gc/parallel/psParallelCompact.inline.hpp
  11. +3 −4 src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp
  12. +15 −4 src/hotspot/share/oops/method.cpp
  13. +4 −4 src/hotspot/share/oops/method.hpp
  14. +4 −4 src/hotspot/share/opto/graphKit.cpp
  15. +28 −7 src/hotspot/share/opto/library_call.cpp
  16. +43 −26 src/hotspot/share/runtime/java.cpp
  17. +63 −47 src/hotspot/share/runtime/sharedRuntime.cpp
  18. +11 −11 src/hotspot/share/runtime/sharedRuntime.hpp
  19. +0 −1 src/hotspot/share/runtime/vmStructs.cpp
  20. +2 −3 src/java.base/aix/classes/sun/nio/fs/AixFileStore.java
  21. +26 −10 src/java.base/share/classes/java/math/BigDecimal.java
  22. +29 −48 src/java.desktop/share/classes/java/awt/color/ColorSpace.java
  23. +1 −1 src/jdk.compiler/share/classes/com/sun/tools/javac/comp/Resolve.java
  24. +3 −4 src/jdk.hotspot.agent/linux/native/libsaproc/DwarfParser.cpp
  25. +458 −0 test/hotspot/jtreg/compiler/vectorapi/TestVectorShiftImm.java
  26. +62 −0 test/hotspot/jtreg/runtime/verifier/ReturnMsgs.java
  27. +64 −0 test/hotspot/jtreg/runtime/verifier/Returns.jasm
  28. +21 −5 test/jdk/com/sun/jdi/JdwpAttachTest.java
  29. +135 −0 test/jdk/java/lang/annotation/LoaderLeakTest.java
  30. +0 −28 test/jdk/java/lang/annotation/loaderLeak/A.java
  31. +0 −24 test/jdk/java/lang/annotation/loaderLeak/B.java
  32. +0 −24 test/jdk/java/lang/annotation/loaderLeak/C.java
  33. +0 −93 test/jdk/java/lang/annotation/loaderLeak/LoaderLeak.sh
  34. +0 −136 test/jdk/java/lang/annotation/loaderLeak/Main.java
  35. +15 −0 test/langtools/tools/javac/recovery/SuperMethodCallBroken.java
  36. +2 −0 test/langtools/tools/javac/recovery/SuperMethodCallBroken.out
  37. +14 −11 test/lib/jdk/test/lib/Utils.java
@@ -2685,6 +2685,7 @@ void mvnw(Register Rd, Register Rm,
* 1xxx xxx 1D/2D, shift = UInt(immh:immb) - 64 \
* (1D is RESERVED) \
*/ \
assert(!isSHR || (isSHR && (shift != 0)), "Zero right shift"); \
assert((1 << ((T>>1)+3)) > shift, "Invalid Shift value"); \
int cVal = (1 << (((T >> 1) + 3) + (isSHR ? 1 : 0))); \
int encodedShift = isSHR ? cVal - shift : cVal + shift; \
@@ -527,6 +527,33 @@ class MacroAssembler: public Assembler {
orr(Vd, T, Vn, Vn);
}

// AdvSIMD shift by immediate.
// These are "user friendly" variants which allow a shift count of 0.
#define WRAP(INSN) \
void INSN(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn, int shift) { \
if (shift == 0) { \
SIMD_Arrangement arrange = (T & 1) == 0 ? T8B : T16B; \
Assembler::orr(Vd, arrange, Vn, Vn); \
} else { \
Assembler::INSN(Vd, T, Vn, shift); \
} \
} \

WRAP(shl) WRAP(sshr) WRAP(ushr)
#undef WRAP

#define WRAP(INSN) \
void INSN(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn, int shift) { \
if (shift == 0) { \
Assembler::addv(Vd, T, Vd, Vn); \
} else { \
Assembler::INSN(Vd, T, Vn, shift); \
} \
} \

WRAP(usra) WRAP(ssra)
#undef WRAP

public:

// Generalized Test Bit And Branch, including a "far" variety which
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -70,7 +70,7 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
#if (!defined(PRODUCT) && defined(COMPILER2))
if (CountCompiledCalls) {
__ lea(r16, ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr()));
__ incrementw(Address(r16));
__ increment(Address(r16));
}
#endif

@@ -145,6 +145,7 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) {
if (s == NULL) {
return NULL;
}

// Count unused bytes in instruction sequences of variable size.
// We add them to the computed buffer size in order to avoid
// overflow in subsequently generated stubs.
@@ -159,7 +160,7 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) {
#if (!defined(PRODUCT) && defined(COMPILER2))
if (CountCompiledCalls) {
__ lea(r10, ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr()));
__ incrementw(Address(r10));
__ increment(Address(r10));
}
#endif

@@ -1,6 +1,6 @@
/*
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2018 SAP SE. All rights reserved.
* Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2021 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -73,9 +73,9 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
slop_delta = load_const_maxLen - (__ pc() - start_pc);
slop_bytes += slop_delta;
assert(slop_delta >= 0, "negative slop(%d) encountered, adjust code size estimate!", slop_delta);
__ lwz(R12_scratch2, offs, R11_scratch1);
__ ld(R12_scratch2, offs, R11_scratch1);
__ addi(R12_scratch2, R12_scratch2, 1);
__ stw(R12_scratch2, offs, R11_scratch1);
__ std(R12_scratch2, offs, R11_scratch1);
}
#endif

@@ -141,6 +141,7 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) {
if (s == NULL) {
return NULL;
}

// Count unused bytes in instruction sequences of variable size.
// We add them to the computed buffer size in order to avoid
// overflow in subsequently generated stubs.
@@ -160,9 +161,9 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) {
slop_delta = load_const_maxLen - (__ pc() - start_pc);
slop_bytes += slop_delta;
assert(slop_delta >= 0, "negative slop(%d) encountered, adjust code size estimate!", slop_delta);
__ lwz(R12_scratch2, offs, R11_scratch1);
__ ld(R12_scratch2, offs, R11_scratch1);
__ addi(R12_scratch2, R12_scratch2, 1);
__ stw(R12_scratch2, offs, R11_scratch1);
__ std(R12_scratch2, offs, R11_scratch1);
}
#endif

@@ -1,6 +1,6 @@
/*
* Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2018 SAP SE. All rights reserved.
* Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2021 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -75,7 +75,7 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
// Abuse Z_method as scratch register for generic emitter.
// It is loaded further down anyway before it is first used.
// No dynamic code size variance here, increment is 1, always.
__ add2mem_32(Address(Z_R1_scratch), 1, Z_method);
__ add2mem_64(Address(Z_R1_scratch), 1, Z_method);
}
#endif

@@ -158,6 +158,7 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) {
if (s == NULL) {
return NULL;
}

// Count unused bytes in instruction sequences of variable size.
// We add them to the computed buffer size in order to avoid
// overflow in subsequently generated stubs.
@@ -179,7 +180,7 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) {
// Abuse Z_method as scratch register for generic emitter.
// It is loaded further down anyway before it is first used.
// No dynamic code size variance here, increment is 1, always.
__ add2mem_32(Address(Z_R1_scratch), 1, Z_method);
__ add2mem_64(Address(Z_R1_scratch), 1, Z_method);
}
#endif

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -70,7 +70,7 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {

#if (!defined(PRODUCT) && defined(COMPILER2))
if (CountCompiledCalls) {
__ incrementl(ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr()));
__ incrementq(ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr()));
}
#endif

@@ -148,6 +148,7 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) {
if (s == NULL) {
return NULL;
}

// Count unused bytes in instruction sequences of variable size.
// We add them to the computed buffer size in order to avoid
// overflow in subsequently generated stubs.
@@ -163,7 +164,7 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) {

#if (!defined(PRODUCT) && defined(COMPILER2))
if (CountCompiledCalls) {
__ incrementl(ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr()));
__ incrementq(ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr()));
}
#endif // PRODUCT

@@ -3149,7 +3149,7 @@ void ClassVerifier::verify_return_value(
if (return_type == VerificationType::bogus_type()) {
verify_error(ErrorContext::bad_type(bci,
current_frame->stack_top_ctx(), TypeOrigin::signature(return_type)),
"Method expects a return value");
"Method does not expect a return value");
return;
}
bool match = return_type.is_assignable_from(type, this, false, CHECK_VERIFY(this));
@@ -57,7 +57,7 @@
#include "gc/shared/referenceProcessorPhaseTimes.hpp"
#include "gc/shared/spaceDecorator.inline.hpp"
#include "gc/shared/taskTerminator.hpp"
#include "gc/shared/weakProcessor.hpp"
#include "gc/shared/weakProcessor.inline.hpp"
#include "gc/shared/workerPolicy.hpp"
#include "gc/shared/workgroup.hpp"
#include "logging/log.hpp"
@@ -780,7 +780,7 @@ bool ParallelCompactData::summarize(SplitInfo& split_info,
return true;
}

HeapWord* ParallelCompactData::calc_new_pointer(HeapWord* addr, ParCompactionManager* cm) {
HeapWord* ParallelCompactData::calc_new_pointer(HeapWord* addr, ParCompactionManager* cm) const {
assert(addr != NULL, "Should detect NULL oop earlier");
assert(ParallelScavengeHeap::heap()->is_in(addr), "not in heap");
assert(PSParallelCompact::mark_bitmap()->is_marked(addr), "not marked");
@@ -1788,8 +1788,6 @@ bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
ParCompactionManager::manager_array(ParallelScavengeHeap::heap()->workers().total_workers());

{
ResourceMark rm;

const uint active_workers =
WorkerPolicy::calc_active_workers(ParallelScavengeHeap::heap()->workers().total_workers(),
ParallelScavengeHeap::heap()->workers().active_workers(),
@@ -1834,7 +1832,7 @@ bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {

// adjust_roots() updates Universe::_intArrayKlassObj which is
// needed by the compaction for filling holes in the dense prefix.
adjust_roots(vmthread_cm);
adjust_roots();

compaction_start.update();
compact();
@@ -2209,35 +2207,81 @@ void PSParallelCompact::marking_phase(ParCompactionManager* cm,
_gc_tracer.report_object_count_after_gc(is_alive_closure());
}

void PSParallelCompact::adjust_roots(ParCompactionManager* cm) {
// Adjust the pointers to reflect the new locations
GCTraceTime(Info, gc, phases) tm("Adjust Roots", &_gc_timer);
class PSAdjustTask final : public AbstractGangTask {
SubTasksDone _sub_tasks;
WeakProcessor::Task _weak_proc_task;
OopStorageSetStrongParState<false, false> _oop_storage_iter;
uint _nworkers;

// Need new claim bits when tracing through and adjusting pointers.
ClassLoaderDataGraph::clear_claimed_marks();
enum PSAdjustSubTask {
PSAdjustSubTask_code_cache,
PSAdjustSubTask_aot,
PSAdjustSubTask_old_ref_process,
PSAdjustSubTask_young_ref_process,

PCAdjustPointerClosure oop_closure(cm);
PSAdjustSubTask_num_elements
};

// General strong roots.
Threads::oops_do(&oop_closure, NULL);
OopStorageSet::strong_oops_do(&oop_closure);
CLDToOopClosure cld_closure(&oop_closure, ClassLoaderData::_claim_strong);
ClassLoaderDataGraph::cld_do(&cld_closure);
public:
PSAdjustTask(uint nworkers) :
AbstractGangTask("PSAdjust task"),
_sub_tasks(PSAdjustSubTask_num_elements),
_weak_proc_task(nworkers),
_nworkers(nworkers) {
// Need new claim bits when tracing through and adjusting pointers.
ClassLoaderDataGraph::clear_claimed_marks();
if (nworkers > 1) {
Threads::change_thread_claim_token();
}
}

// Now adjust pointers in remaining weak roots. (All of which should
// have been cleared if they pointed to non-surviving objects.)
WeakProcessor::oops_do(&oop_closure);
~PSAdjustTask() {
Threads::assert_all_threads_claimed();
}

CodeBlobToOopClosure adjust_from_blobs(&oop_closure, CodeBlobToOopClosure::FixRelocations);
CodeCache::blobs_do(&adjust_from_blobs);
AOT_ONLY(AOTLoader::oops_do(&oop_closure);)
void work(uint worker_id) {
ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(worker_id);
PCAdjustPointerClosure adjust(cm);
{
ResourceMark rm;
Threads::possibly_parallel_oops_do(_nworkers > 1, &adjust, nullptr);
}
_oop_storage_iter.oops_do(&adjust);
{
CLDToOopClosure cld_closure(&adjust, ClassLoaderData::_claim_strong);
ClassLoaderDataGraph::cld_do(&cld_closure);
}
{
AlwaysTrueClosure always_alive;
_weak_proc_task.work(worker_id, &always_alive, &adjust);
}
if (_sub_tasks.try_claim_task(PSAdjustSubTask_code_cache)) {
CodeBlobToOopClosure adjust_code(&adjust, CodeBlobToOopClosure::FixRelocations);
CodeCache::blobs_do(&adjust_code);
}
if (_sub_tasks.try_claim_task(PSAdjustSubTask_aot)) {
AOT_ONLY(AOTLoader::oops_do(&adjust);)
}
if (_sub_tasks.try_claim_task(PSAdjustSubTask_old_ref_process)) {
PSParallelCompact::ref_processor()->weak_oops_do(&adjust);
}
if (_sub_tasks.try_claim_task(PSAdjustSubTask_young_ref_process)) {
// Roots were visited so references into the young gen in roots
// may have been scanned. Process them also.
// Should the reference processor have a span that excludes
// young gen objects?
PSScavenge::reference_processor()->weak_oops_do(&adjust);
}
_sub_tasks.all_tasks_claimed();
}
};

ref_processor()->weak_oops_do(&oop_closure);
// Roots were visited so references into the young gen in roots
// may have been scanned. Process them also.
// Should the reference processor have a span that excludes
// young gen objects?
PSScavenge::reference_processor()->weak_oops_do(&oop_closure);
void PSParallelCompact::adjust_roots() {
// Adjust the pointers to reflect the new locations
GCTraceTime(Info, gc, phases) tm("Adjust Roots", &_gc_timer);
uint nworkers = ParallelScavengeHeap::heap()->workers().active_workers();
PSAdjustTask task(nworkers);
ParallelScavengeHeap::heap()->workers().run_task(&task);
}

// Helper class to print 8 region numbers per line and then print the total at the end.
@@ -2306,7 +2350,7 @@ void PSParallelCompact::prepare_region_draining_tasks(uint parallel_gc_threads)

for (size_t cur = end_region - 1; cur + 1 > beg_region; --cur) {
if (sd.region(cur)->claim_unsafe()) {
ParCompactionManager* cm = ParCompactionManager::manager_array(worker_id);
ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(worker_id);
bool result = sd.region(cur)->mark_normal();
assert(result, "Must succeed at this point.");
cm->region_stack()->push(cur);
@@ -2505,7 +2549,6 @@ static void compaction_with_stealing_work(TaskTerminator* terminator, uint worke
// Go around again.
}
}
return;
}

class UpdateDensePrefixAndCompactionTask: public AbstractGangTask {
@@ -3133,7 +3176,7 @@ void PSParallelCompact::initialize_shadow_regions(uint parallel_gc_threads)

size_t beg_region = sd.addr_to_region_idx(_space_info[old_space_id].dense_prefix());
for (uint i = 0; i < parallel_gc_threads; i++) {
ParCompactionManager *cm = ParCompactionManager::manager_array(i);
ParCompactionManager *cm = ParCompactionManager::gc_thread_compaction_manager(i);
cm->set_next_shadow_region(beg_region + i);
}
}
@@ -480,9 +480,9 @@ class ParallelCompactData
HeapWord* partial_obj_end(size_t region_idx) const;

// Return the location of the object after compaction.
HeapWord* calc_new_pointer(HeapWord* addr, ParCompactionManager* cm);
HeapWord* calc_new_pointer(HeapWord* addr, ParCompactionManager* cm) const;

HeapWord* calc_new_pointer(oop p, ParCompactionManager* cm) {
HeapWord* calc_new_pointer(oop p, ParCompactionManager* cm) const {
return calc_new_pointer(cast_from_oop<HeapWord*>(p), cm);
}

@@ -1107,7 +1107,7 @@ class PSParallelCompact : AllStatic {
static void summary_phase(ParCompactionManager* cm, bool maximum_compaction);

// Adjust addresses in roots. Does not adjust addresses in heap.
static void adjust_roots(ParCompactionManager* cm);
static void adjust_roots();

DEBUG_ONLY(static void write_block_fill_histogram();)

@@ -113,10 +113,9 @@ inline void PSParallelCompact::adjust_pointer(T* p, ParCompactionManager* cm) {
assert(ParallelScavengeHeap::heap()->is_in(obj), "should be in heap");

oop new_obj = (oop)summary_data().calc_new_pointer(obj, cm);
assert(new_obj != NULL, // is forwarding ptr?
"should be forwarded");
// Just always do the update unconditionally?
if (new_obj != NULL) {
assert(new_obj != NULL, "non-null address for live objects");
// Is it actually relocated at all?
if (new_obj != obj) {
assert(ParallelScavengeHeap::heap()->is_in_reserved(new_obj),
"should be in object space");
RawAccess<IS_NOT_NULL>::oop_store(p, new_obj);

0 comments on commit 6f128b4

Please sign in to comment.