Skip to content
This repository has been archived by the owner before Nov 9, 2022. It is now read-only.
Permalink
Browse files
Automatic merge of jdk:master into master
  • Loading branch information
duke committed Dec 4, 2020
2 parents 85fcedc + f83fd4a commit 33a384e5a7e6bc7a5821ef219a3d8a16c2f9211e
Show file tree
Hide file tree
Showing 120 changed files with 2,443 additions and 592 deletions.
@@ -240,12 +240,22 @@ ifeq ($(ALLOW_ABSOLUTE_PATHS_IN_OUTPUT)-$(FILE_MACRO_CFLAGS), false-)
# When compiling with relative paths, the deps file may come out with relative
# paths, and that path may start with './'. First remove any leading ./, then
# add WORKSPACE_ROOT to any line not starting with /, while allowing for
# leading spaces.
# leading spaces. There may also be multiple entries on the same line, so start
# with splitting such lines.
# Non GNU sed (BSD on macosx) cannot substitue in literal \n using regex.
# Instead use a bash escaped literal newline. To avoid having unmatched quotes
# ruin the ability for an editor to properly syntax highlight this file, define
# that newline sequence as a separate variable and add the closing quote behind
# a comment.
sed_newline := \'$$'\n''#'
define fix-deps-file
$(SED) \
-e 's|^\([ ]*\)\./|\1|' \
-e '/^[ ]*[^/ ]/s|^\([ ]*\)|\1$(WORKSPACE_ROOT)/|' \
$1.tmp > $1
-e 's|\([^ ]\) \{1,\}\([^\\:]\)|\1 \\$(sed_newline) \2|g' \
$1.tmp \
| $(SED) \
-e 's|^\([ ]*\)\./|\1|' \
-e '/^[ ]*[^/ ]/s|^\([ ]*\)|\1$(WORKSPACE_ROOT)/|' \
> $1
endef
else
# By default the MakeCommandRelative macro does nothing.
@@ -132,8 +132,6 @@ void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm,
assert(recv != noreg, "required register");
assert(method_temp == rmethod, "required register for loading method");

//NOT_PRODUCT({ FlagSetting fs(TraceMethodHandles, true); trace_method_handle(_masm, "LZMH"); });

// Load the invoker, as MH -> MH.form -> LF.vmentry
__ verify_oop(recv);
__ load_heap_oop(method_temp, Address(recv, NONZERO(java_lang_invoke_MethodHandle::form_offset())), temp2);
@@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -31,8 +31,8 @@
enum {
// Relocations are byte-aligned.
offset_unit = 1,
// We don't use format().
format_width = 0
// Must be at least 1 for RelocInfo::narrow_oop_in_const.
format_width = 1
};

public:
@@ -1094,10 +1094,10 @@ class StubGenerator: public StubCodeGenerator {
Register count, Register tmp, int step) {
copy_direction direction = step < 0 ? copy_backwards : copy_forwards;
bool is_backwards = step < 0;
int granularity = uabs(step);
unsigned int granularity = uabs(step);
const Register t0 = r3, t1 = r4;

// <= 96 bytes do inline. Direction doesn't matter because we always
// <= 80 (or 96 for SIMD) bytes do inline. Direction doesn't matter because we always
// load all the data before writing anything
Label copy4, copy8, copy16, copy32, copy80, copy_big, finish;
const Register t2 = r5, t3 = r6, t4 = r7, t5 = r8;
@@ -1154,7 +1154,28 @@ class StubGenerator: public StubCodeGenerator {
if (UseSIMDForMemoryOps) {
__ ldpq(v0, v1, Address(s, 0));
__ ldpq(v2, v3, Address(s, 32));
// Unaligned pointers can be an issue for copying.
// The issue has more chances to happen when granularity of data is
// less than 4(sizeof(jint)). Pointers for arrays of jint are at least
// 4 byte aligned. Pointers for arrays of jlong are 8 byte aligned.
// The most performance drop has been seen for the range 65-80 bytes.
// For such cases using the pair of ldp/stp instead of the third pair of
// ldpq/stpq fixes the performance issue.
if (granularity < sizeof (jint)) {
Label copy96;
__ cmp(count, u1(80/granularity));
__ br(Assembler::HI, copy96);
__ ldp(t0, t1, Address(send, -16));

__ stpq(v0, v1, Address(d, 0));
__ stpq(v2, v3, Address(d, 32));
__ stp(t0, t1, Address(dend, -16));
__ b(finish);

__ bind(copy96);
}
__ ldpq(v4, v5, Address(send, -32));

__ stpq(v0, v1, Address(d, 0));
__ stpq(v2, v3, Address(d, 32));
__ stpq(v4, v5, Address(dend, -32));
@@ -168,8 +168,6 @@ void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm,
assert(recv != noreg, "required register");
assert(method_temp == rbx, "required register for loading method");

//NOT_PRODUCT({ FlagSetting fs(TraceMethodHandles, true); trace_method_handle(_masm, "LZMH"); });

// Load the invoker, as MH -> MH.form -> LF.vmentry
__ verify_oop(recv);
__ load_heap_oop(method_temp, Address(recv, NONZERO(java_lang_invoke_MethodHandle::form_offset())), temp2);
@@ -86,6 +86,7 @@

// UseLargePages means nothing, for now, on AIX.
// Use Use64KPages or Use16MPages instead.
define_pd_global(size_t, PreTouchParallelChunkSize, 1 * G);
define_pd_global(bool, UseLargePages, false);
define_pd_global(bool, UseLargePagesIndividualAllocation, false);
define_pd_global(bool, UseThreadPriorities, true) ;
@@ -42,6 +42,7 @@
// Defines Bsd-specific default values. The flags are available on all
// platforms, but they may have different default values on other platforms.
//
define_pd_global(size_t, PreTouchParallelChunkSize, 1 * G);
define_pd_global(bool, UseLargePages, false);
define_pd_global(bool, UseLargePagesIndividualAllocation, false);
define_pd_global(bool, UseThreadPriorities, true) ;
@@ -90,6 +90,7 @@
// Defines Linux-specific default values. The flags are available on all
// platforms, but they may have different default values on other platforms.
//
define_pd_global(size_t, PreTouchParallelChunkSize, 4 * M);
define_pd_global(bool, UseLargePages, false);
define_pd_global(bool, UseLargePagesIndividualAllocation, false);
define_pd_global(bool, UseThreadPriorities, true) ;
@@ -45,6 +45,7 @@ product(bool, UseOSErrorReporting, false, \
// Defines Windows-specific default values. The flags are available on all
// platforms, but they may have different default values on other platforms.
//
define_pd_global(size_t, PreTouchParallelChunkSize, 1 * G);
define_pd_global(bool, UseLargePages, false);
define_pd_global(bool, UseLargePagesIndividualAllocation, true);
define_pd_global(bool, UseThreadPriorities, true) ;
@@ -3215,9 +3215,10 @@ void os::split_reserved_memory(char *base, size_t size, size_t split) {
(attempt_reserve_memory_at(base, split) != NULL) &&
(attempt_reserve_memory_at(split_address, size - split) != NULL);
if (!rc) {
log_warning(os)("os::split_reserved_memory failed for [" RANGE_FORMAT ")",
log_warning(os)("os::split_reserved_memory failed for " RANGE_FORMAT,
RANGE_FORMAT_ARGS(base, size));
assert(false, "os::split_reserved_memory failed for [" RANGE_FORMAT ")",
os::print_memory_mappings(base, size, tty);
assert(false, "os::split_reserved_memory failed for " RANGE_FORMAT,
RANGE_FORMAT_ARGS(base, size));
}

@@ -5989,19 +5990,55 @@ bool os::win32::find_mapping(address addr, mapping_info_t* mi) {
return rc;
}

// Helper for print_one_mapping: print n words, both as hex and ascii.
// Use Safefetch for all values.
static void print_snippet(const void* p, outputStream* st) {
static const int num_words = LP64_ONLY(3) NOT_LP64(6);
static const int num_bytes = num_words * sizeof(int);
intptr_t v[num_words];
const int errval = 0xDE210244;
for (int i = 0; i < num_words; i++) {
v[i] = SafeFetchN((intptr_t*)p + i, errval);
if (v[i] == errval &&
SafeFetchN((intptr_t*)p + i, ~errval) == ~errval) {
return;
}
}
st->put('[');
for (int i = 0; i < num_words; i++) {
st->print(INTPTR_FORMAT " ", v[i]);
}
const char* b = (char*)v;
st->put('\"');
for (int i = 0; i < num_bytes; i++) {
st->put(::isgraph(b[i]) ? b[i] : '.');
}
st->put('\"');
st->put(']');
}

// Helper function for print_memory_mappings:
// Given a MEMORY_BASIC_INFORMATION, containing information about a non-free region:
// print out all regions in that allocation. If any of those regions
// fall outside the given range [start, end), indicate that in the output.
// Return the pointer to the end of the allocation.
static address print_one_mapping(MEMORY_BASIC_INFORMATION* minfo, address start, address end, outputStream* st) {
assert(start != NULL && end != NULL && end > start, "Sanity");
// Print it like this:
//
// Base: <xxxxx>: [xxxx - xxxx], state=MEM_xxx, prot=x, type=MEM_xxx (region 1)
// [xxxx - xxxx], state=MEM_xxx, prot=x, type=MEM_xxx (region 2)
assert(minfo->State != MEM_FREE, "Not inside an allocation.");
address allocation_base = (address)minfo->AllocationBase;
address last_region_end = NULL;
st->print_cr("AllocationBase: " PTR_FORMAT ":", allocation_base);
#define IS_IN(p) (p >= start && p < end)
bool first_line = true;
bool is_dll = false;
for(;;) {
if (first_line) {
st->print("Base " PTR_FORMAT ": ", p2i(allocation_base));
} else {
st->print_raw(NOT_LP64 (" ")
LP64_ONLY(" "));
}
address region_start = (address)minfo->BaseAddress;
address region_end = region_start + minfo->RegionSize;
assert(region_end > region_start, "Sanity");
@@ -6014,19 +6051,39 @@ static address print_one_mapping(MEMORY_BASIC_INFORMATION* minfo, address start,
}
st->print("[" PTR_FORMAT "-" PTR_FORMAT "), state=", p2i(region_start), p2i(region_end));
switch (minfo->State) {
case MEM_COMMIT: st->print("MEM_COMMIT"); break;
case MEM_FREE: st->print("MEM_FREE"); break;
case MEM_RESERVE: st->print("MEM_RESERVE"); break;
case MEM_COMMIT: st->print_raw("MEM_COMMIT "); break;
case MEM_FREE: st->print_raw("MEM_FREE "); break;
case MEM_RESERVE: st->print_raw("MEM_RESERVE"); break;
default: st->print("%x?", (unsigned)minfo->State);
}
st->print(", prot=%x, type=", (unsigned)minfo->AllocationProtect);
st->print(", prot=%3x, type=", (unsigned)minfo->Protect);
switch (minfo->Type) {
case MEM_IMAGE: st->print("MEM_IMAGE"); break;
case MEM_MAPPED: st->print("MEM_MAPPED"); break;
case MEM_PRIVATE: st->print("MEM_PRIVATE"); break;
case MEM_IMAGE: st->print_raw("MEM_IMAGE "); break;
case MEM_MAPPED: st->print_raw("MEM_MAPPED "); break;
case MEM_PRIVATE: st->print_raw("MEM_PRIVATE"); break;
default: st->print("%x?", (unsigned)minfo->State);
}
// At the start of every allocation, print some more information about this mapping.
// Notes:
// - this could be beefed up a lot, similar to os::print_location
// - for now we just query the allocation start point. This may be confusing for cases where
// the kernel merges multiple mappings.
if (first_line) {
char buf[MAX_PATH];
if (os::dll_address_to_library_name(allocation_base, buf, sizeof(buf), nullptr)) {
st->print(", %s", buf);
is_dll = true;
}
}
// If memory is accessible, and we do not know anything else about it, print a snippet
if (!is_dll &&
minfo->State == MEM_COMMIT &&
!(minfo->Protect & PAGE_NOACCESS || minfo->Protect & PAGE_GUARD)) {
st->print_raw(", ");
print_snippet(region_start, st);
}
st->cr();
// Next region...
bool rc = checkedVirtualQuery(region_end, minfo);
if (rc == false || // VirtualQuery error, end of allocation?
(minfo->State == MEM_FREE) || // end of allocation, free memory follows
@@ -6035,6 +6092,7 @@ static address print_one_mapping(MEMORY_BASIC_INFORMATION* minfo, address start,
{
return region_end;
}
first_line = false;
}
#undef IS_IN
ShouldNotReachHere();
@@ -6046,7 +6104,14 @@ void os::print_memory_mappings(char* addr, size_t bytes, outputStream* st) {
address start = (address)addr;
address end = start + bytes;
address p = start;
while (p < end) {
if (p == nullptr) { // Lets skip the zero pages.
p += os::vm_allocation_granularity();
}
address p2 = p; // guard against wraparounds
int fuse = 0;

while (p < end && p >= p2) {
p2 = p;
// Probe for the next mapping.
if (checkedVirtualQuery(p, &minfo)) {
if (minfo.State != MEM_FREE) {
@@ -6064,8 +6129,24 @@ void os::print_memory_mappings(char* addr, size_t bytes, outputStream* st) {
p = region_end;
}
} else {
// advance probe pointer.
p += os::vm_allocation_granularity();
// MSDN doc on VirtualQuery is unclear about what it means if it returns an error.
// In particular, whether querying an address outside any mappings would report
// a MEM_FREE region or just return an error. From experiments, it seems to return
// a MEM_FREE region for unmapped areas in valid address space and an error if we
// are outside valid address space.
// Here, we advance the probe pointer by alloc granularity. But if the range to print
// is large, this may take a long time. Therefore lets stop right away if the address
// is outside of what we know are valid addresses on Windows. Also, add a loop fuse.
static const address end_virt = (address)(LP64_ONLY(0x7ffffffffffULL) NOT_LP64(3*G));
if (p >= end_virt) {
break;
} else {
// Advance probe pointer, but with a fuse to break long loops.
if (fuse++ == 100000) {
break;
}
p += os::vm_allocation_granularity();
}
}
}
}
@@ -457,7 +457,7 @@ void CompilerConfig::ergo_initialize() {
#endif

#if INCLUDE_JVMCI
// Check that JVMCI compiler supports selested GC.
// Check that JVMCI supports selected GC.
// Should be done after GCConfig::initialize() was called.
JVMCIGlobals::check_jvmci_supported_gc();

@@ -200,9 +200,9 @@
product(bool, AlwaysPreTouch, false, \
"Force all freshly committed pages to be pre-touched") \
\
product(size_t, PreTouchParallelChunkSize, 1 * G, \
product_pd(size_t, PreTouchParallelChunkSize, \
"Per-thread chunk size for parallel memory pre-touch.") \
range(1, SIZE_MAX / 2) \
range(4*K, SIZE_MAX / 2) \
\
/* where does the range max value of (max_jint - 1) come from? */ \
product(size_t, MarkStackSizeMax, NOT_LP64(4*M) LP64_ONLY(512*M), \
@@ -111,6 +111,11 @@ inline void ShenandoahConcurrentMark::do_chunked_array_start(ShenandoahObjToScan
objArrayOop array = objArrayOop(obj);
int len = array->length();

// Mark objArray klass metadata
if (Devirtualizer::do_metadata(cl)) {
Devirtualizer::do_klass(cl, array->klass());
}

if (len <= (int) ObjArrayMarkingStride*2) {
// A few slices only, process directly
array->oop_iterate_range(cl, 0, len);
@@ -122,6 +122,7 @@ class ShenandoahHeap : public CollectedHeap {
friend class ShenandoahGCSession;
friend class ShenandoahGCStateResetter;
friend class ShenandoahParallelObjectIterator;
friend class ShenandoahSafepoint;
// ---------- Locks that guard important data structures in Heap
//
private:
@@ -147,9 +147,15 @@ class ShenandoahSafepoint : public AllStatic {
static inline bool is_at_shenandoah_safepoint() {
if (!SafepointSynchronize::is_at_safepoint()) return false;

Thread* const thr = Thread::current();
// Shenandoah GC specific safepoints are scheduled by control thread.
// So if we are enter here from control thread, then we are definitely not
// at Shenandoah safepoint, but at something else.
if (thr == ShenandoahHeap::heap()->control_thread()) return false;

// This is not VM thread, cannot see what VM thread is doing,
// so pretend this is a proper Shenandoah safepoint
if (!Thread::current()->is_VM_thread()) return true;
if (!thr->is_VM_thread()) return true;

// Otherwise check we are at proper operation type
VM_Operation* vm_op = VMThread::vm_operation();
@@ -29,6 +29,7 @@
#include "interpreter/linkResolver.hpp"
#include "jvmci/compilerRuntime.hpp"
#include "oops/cpCache.inline.hpp"
#include "oops/klass.inline.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/deoptimization.hpp"
#include "runtime/frame.inline.hpp"

0 comments on commit 33a384e

Please sign in to comment.