diff --git a/doc/building.html b/doc/building.html
index 99eb3e0c473f1..19313ebf43a82 100644
--- a/doc/building.html
+++ b/doc/building.html
@@ -668,7 +668,7 @@
Microsoft Visual Studio
(Note that this version is often presented as "MSVC 14.28", and reported
by cl.exe as 19.28.) Older versions will not be accepted by
configure and will not work. The maximum accepted version
-of Visual Studio is 2022.
+of Visual Studio is 2026.
If you have multiple versions of Visual Studio installed,
configure will by default pick the latest. You can request
a specific version to be used by setting
diff --git a/doc/building.md b/doc/building.md
index 047255d184803..1fbd395a9d130 100644
--- a/doc/building.md
+++ b/doc/building.md
@@ -468,7 +468,7 @@ available for this update.
The minimum accepted version is Visual Studio 2019 version 16.8. (Note that
this version is often presented as "MSVC 14.28", and reported by cl.exe as
19.28.) Older versions will not be accepted by `configure` and will not work.
-The maximum accepted version of Visual Studio is 2022.
+The maximum accepted version of Visual Studio is 2026.
If you have multiple versions of Visual Studio installed, `configure` will by
default pick the latest. You can request a specific version to be used by
diff --git a/make/autoconf/toolchain_microsoft.m4 b/make/autoconf/toolchain_microsoft.m4
index 17ad2666b3ab8..f577cf1a2a1d6 100644
--- a/make/autoconf/toolchain_microsoft.m4
+++ b/make/autoconf/toolchain_microsoft.m4
@@ -25,7 +25,7 @@
################################################################################
# The order of these defines the priority by which we try to find them.
-VALID_VS_VERSIONS="2022 2019"
+VALID_VS_VERSIONS="2022 2019 2026"
VS_DESCRIPTION_2019="Microsoft Visual Studio 2019"
VS_VERSION_INTERNAL_2019=142
@@ -57,6 +57,21 @@ VS_SDK_PLATFORM_NAME_2022=
VS_SUPPORTED_2022=true
VS_TOOLSET_SUPPORTED_2022=true
+VS_DESCRIPTION_2026="Microsoft Visual Studio 2026"
+VS_VERSION_INTERNAL_2026=145
+VS_MSVCR_2026=vcruntime140.dll
+VS_VCRUNTIME_1_2026=vcruntime140_1.dll
+VS_MSVCP_2026=msvcp140.dll
+VS_ENVVAR_2026="VS180COMNTOOLS"
+VS_USE_UCRT_2026="true"
+VS_VS_INSTALLDIR_2026="Microsoft Visual Studio/18"
+VS_EDITIONS_2026="BuildTools Community Professional Enterprise"
+VS_SDK_INSTALLDIR_2026=
+VS_VS_PLATFORM_NAME_2026="v145"
+VS_SDK_PLATFORM_NAME_2026=
+VS_SUPPORTED_2026=true
+VS_TOOLSET_SUPPORTED_2026=true
+
################################################################################
AC_DEFUN([TOOLCHAIN_CHECK_POSSIBLE_VISUAL_STUDIO_ROOT],
diff --git a/make/hotspot/lib/CompileGtest.gmk b/make/hotspot/lib/CompileGtest.gmk
index d615e254f5a07..6091299213433 100644
--- a/make/hotspot/lib/CompileGtest.gmk
+++ b/make/hotspot/lib/CompileGtest.gmk
@@ -95,6 +95,7 @@ $(eval $(call SetupJdkLibrary, BUILD_GTEST_LIBJVM, \
EXTRA_OBJECT_FILES := $(BUILD_LIBJVM_ALL_OBJS), \
DEFAULT_CFLAGS := false, \
CFLAGS := $(JVM_CFLAGS) \
+ -DHOTSPOT_GTEST \
-I$(GTEST_FRAMEWORK_SRC)/googletest/include \
-I$(GTEST_FRAMEWORK_SRC)/googlemock/include \
$(addprefix -I, $(GTEST_TEST_SRC)), \
diff --git a/make/jdk/src/classes/build/tools/taglet/SealedGraph.java b/make/jdk/src/classes/build/tools/taglet/SealedGraph.java
index 17867b99595bc..3e93826c1804d 100644
--- a/make/jdk/src/classes/build/tools/taglet/SealedGraph.java
+++ b/make/jdk/src/classes/build/tools/taglet/SealedGraph.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2022, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -219,13 +219,13 @@ public void addNode(TypeElement node) {
// This implies the module is always the same.
private String relativeLink(TypeElement node) {
var util = SealedGraph.this.docletEnvironment.getElementUtils();
- var rootPackage = util.getPackageOf(rootNode);
var nodePackage = util.getPackageOf(node);
- var backNavigator = rootPackage.getQualifiedName().toString().chars()
+ // Note: SVG files for nested types use the simple names of containing types as parent directories.
+ // We therefore need to convert all dots in the qualified name to "../" below.
+ var backNavigator = rootNode.getQualifiedName().toString().chars()
.filter(c -> c == '.')
.mapToObj(c -> "../")
- .collect(joining()) +
- "../";
+ .collect(joining());
var forwardNavigator = nodePackage.getQualifiedName().toString()
.replace(".", "/");
diff --git a/src/hotspot/cpu/aarch64/gc/z/zBarrierSetAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/gc/z/zBarrierSetAssembler_aarch64.cpp
index 5d4f0801ec62f..07a2d6fbfa0be 100644
--- a/src/hotspot/cpu/aarch64/gc/z/zBarrierSetAssembler_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/gc/z/zBarrierSetAssembler_aarch64.cpp
@@ -879,7 +879,6 @@ void ZBarrierSetAssembler::patch_barrier_relocation(address addr, int format) {
ShouldNotReachHere();
}
- OrderAccess::fence();
ICache::invalidate_word((address)patch_addr);
}
diff --git a/src/hotspot/cpu/aarch64/templateInterpreterGenerator_aarch64.cpp b/src/hotspot/cpu/aarch64/templateInterpreterGenerator_aarch64.cpp
index c1eabed8ade82..dd70c98797f0b 100644
--- a/src/hotspot/cpu/aarch64/templateInterpreterGenerator_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/templateInterpreterGenerator_aarch64.cpp
@@ -1375,7 +1375,6 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
__ ldr(r10, Address(rmethod, Method::native_function_offset()));
ExternalAddress unsatisfied(SharedRuntime::native_method_throw_unsatisfied_link_error_entry());
__ lea(rscratch2, unsatisfied);
- __ ldr(rscratch2, rscratch2);
__ cmp(r10, rscratch2);
__ br(Assembler::NE, L);
__ call_VM(noreg,
diff --git a/src/hotspot/share/cds/aotArtifactFinder.hpp b/src/hotspot/share/cds/aotArtifactFinder.hpp
index 405222a8753e7..05bcde6b0ace5 100644
--- a/src/hotspot/share/cds/aotArtifactFinder.hpp
+++ b/src/hotspot/share/cds/aotArtifactFinder.hpp
@@ -39,7 +39,7 @@ class TypeArrayKlass;
// It also decides what Klasses must be cached in aot-initialized state.
//
// ArchiveBuilder uses [1] as roots to scan for all MetaspaceObjs that need to be cached.
-// ArchiveHeapWriter uses [2] to create an image of the archived heap.
+// HeapShared uses [2] to create an image of the archived heap.
//
// [1] is stored in _all_cached_classes in aotArtifactFinder.cpp.
// [2] is stored in HeapShared::archived_object_cache().
diff --git a/src/hotspot/share/cds/aotMapLogger.cpp b/src/hotspot/share/cds/aotMapLogger.cpp
index d0a63c56093e5..a252eae4b84c6 100644
--- a/src/hotspot/share/cds/aotMapLogger.cpp
+++ b/src/hotspot/share/cds/aotMapLogger.cpp
@@ -796,7 +796,7 @@ void AOTMapLogger::dumptime_log_mapped_heap_region(ArchiveMappedHeapInfo* heap_i
address buffer_start = address(r.start()); // start of the current oop inside the buffer
address buffer_end = address(r.end());
- address requested_base = UseCompressedOops ? (address)CompressedOops::base() : (address)AOTMappedHeapWriter::NOCOOPS_REQUESTED_BASE;
+ address requested_base = UseCompressedOops ? AOTMappedHeapWriter::narrow_oop_base() : (address)AOTMappedHeapWriter::NOCOOPS_REQUESTED_BASE;
address requested_start = UseCompressedOops ? AOTMappedHeapWriter::buffered_addr_to_requested_addr(buffer_start) : requested_base;
log_region_range("heap", buffer_start, buffer_end, requested_start);
diff --git a/src/hotspot/share/cds/aotMappedHeapWriter.cpp b/src/hotspot/share/cds/aotMappedHeapWriter.cpp
index ff9319d266ba9..98f400c989c78 100644
--- a/src/hotspot/share/cds/aotMappedHeapWriter.cpp
+++ b/src/hotspot/share/cds/aotMappedHeapWriter.cpp
@@ -55,7 +55,7 @@
GrowableArrayCHeap* AOTMappedHeapWriter::_buffer = nullptr;
-// The following are offsets from buffer_bottom()
+bool AOTMappedHeapWriter::_is_writing_deterministic_heap = false;
size_t AOTMappedHeapWriter::_buffer_used;
// Heap root segments
@@ -74,7 +74,7 @@ AOTMappedHeapWriter::_buffer_offset_to_source_obj_table = nullptr;
DumpedInternedStrings *AOTMappedHeapWriter::_dumped_interned_strings = nullptr;
typedef HashTable<
- size_t, // offset of a filler from ArchiveHeapWriter::buffer_bottom()
+ size_t, // offset of a filler from AOTMappedHeapWriter::buffer_bottom()
size_t, // size of this filler (in bytes)
127, // prime number
AnyObj::C_HEAP,
@@ -96,6 +96,45 @@ void AOTMappedHeapWriter::init() {
_source_objs = new GrowableArrayCHeap(10000);
guarantee(MIN_GC_REGION_ALIGNMENT <= G1HeapRegion::min_region_size_in_words() * HeapWordSize, "must be");
+
+ if (CDSConfig::old_cds_flags_used()) {
+ // With the old CDS workflow, we can guatantee determninistic output: given
+ // the same classlist file, we can generate the same static CDS archive.
+ // To ensure determinism, we always use the same compressed oop encoding
+ // (zero-based, no shift). See set_requested_address_range().
+ _is_writing_deterministic_heap = true;
+ } else {
+ // Determninistic output is not supported by the new AOT workflow, so
+ // we don't force the (zero-based, no shift) encoding. This way, it is more
+ // likely that we can avoid oop relocation in the production run.
+ _is_writing_deterministic_heap = false;
+ }
+ }
+}
+
+// For AOTMappedHeapWriter::narrow_oop_{mode, base, shift}(), see comments
+// in AOTMappedHeapWriter::set_requested_address_range(),
+CompressedOops::Mode AOTMappedHeapWriter::narrow_oop_mode() {
+ if (is_writing_deterministic_heap()) {
+ return CompressedOops::UnscaledNarrowOop;
+ } else {
+ return CompressedOops::mode();
+ }
+}
+
+address AOTMappedHeapWriter::narrow_oop_base() {
+ if (is_writing_deterministic_heap()) {
+ return (address)0;
+ } else {
+ return CompressedOops::base();
+ }
+}
+
+int AOTMappedHeapWriter::narrow_oop_shift() {
+ if (is_writing_deterministic_heap()) {
+ return 0;
+ } else {
+ return CompressedOops::shift();
}
}
@@ -116,7 +155,7 @@ void AOTMappedHeapWriter::write(GrowableArrayCHeap* roots,
assert(CDSConfig::is_dumping_heap(), "sanity");
allocate_buffer();
copy_source_objs_to_buffer(roots);
- set_requested_address(heap_info);
+ set_requested_address_range(heap_info);
relocate_embedded_oops(roots, heap_info);
}
@@ -536,14 +575,55 @@ size_t AOTMappedHeapWriter::copy_one_source_obj_to_buffer(oop src_obj) {
return buffered_obj_offset;
}
-void AOTMappedHeapWriter::set_requested_address(ArchiveMappedHeapInfo* info) {
+// Set the range [_requested_bottom, _requested_top), the requested address range of all
+// the archived heap objects in the production run.
+//
+// (1) UseCompressedOops == true && !is_writing_deterministic_heap()
+//
+// The archived objects are stored using the COOPS encoding of the assembly phase.
+// We pick a range within the heap used by the assembly phase.
+//
+// In the production run, if different COOPS encodings are used:
+// - The heap contents needs to be relocated.
+//
+// (2) UseCompressedOops == true && is_writing_deterministic_heap()
+//
+// We always use zero-based, zero-shift encoding. _requested_top is aligned to 0x10000000.
+//
+// (3) UseCompressedOops == false:
+//
+// In the production run, the heap range is usually picked (randomly) by the OS, so we
+// will almost always need to perform relocation, regardless of how we pick the requested
+// address range.
+//
+// So we just hard code it to NOCOOPS_REQUESTED_BASE.
+//
+void AOTMappedHeapWriter::set_requested_address_range(ArchiveMappedHeapInfo* info) {
assert(!info->is_used(), "only set once");
size_t heap_region_byte_size = _buffer_used;
assert(heap_region_byte_size > 0, "must archived at least one object!");
if (UseCompressedOops) {
- if (UseG1GC) {
+ if (is_writing_deterministic_heap()) {
+ // Pick a heap range so that requested addresses can be encoded with zero-base/no shift.
+ // We align the requested bottom to at least 1 MB: if the production run uses G1 with a small
+ // heap (e.g., -Xmx256m), it's likely that we can map the archived objects at the
+ // requested location to avoid relocation.
+ //
+ // For other collectors or larger heaps, relocation is unavoidable, but is usually
+ // quite cheap. If you really want to avoid relocation, use the AOT workflow instead.
+ address heap_end = (address)0x100000000;
+ size_t alignment = MAX2(MIN_GC_REGION_ALIGNMENT, 1024 * 1024);
+ if (align_up(heap_region_byte_size, alignment) >= (size_t)heap_end) {
+ log_error(aot, heap)("cached heap space is too large: %zu bytes", heap_region_byte_size);
+ AOTMetaspace::unrecoverable_writing_error();
+ }
+ _requested_bottom = align_down(heap_end - heap_region_byte_size, alignment);
+ } else if (UseG1GC) {
+ // For G1, pick the range at the top of the current heap. If the exact same heap sizes
+ // are used in the production run, it's likely that we can map the archived objects
+ // at the requested location to avoid relocation.
address heap_end = (address)G1CollectedHeap::heap()->reserved().end();
log_info(aot, heap)("Heap end = %p", heap_end);
_requested_bottom = align_down(heap_end - heap_region_byte_size, G1HeapRegion::GrainBytes);
@@ -612,7 +692,14 @@ oop AOTMappedHeapWriter::load_oop_from_buffer(narrowOop* buffered_addr) {
template void AOTMappedHeapWriter::relocate_field_in_buffer(T* field_addr_in_buffer, oop source_referent, CHeapBitMap* oopmap) {
oop request_referent = source_obj_to_requested_obj(source_referent);
- store_requested_oop_in_buffer(field_addr_in_buffer, request_referent);
+ if (UseCompressedOops && is_writing_deterministic_heap()) {
+ // We use zero-based, 0-shift encoding, so the narrowOop is just the lower
+ // 32 bits of request_referent
+ intptr_t addr = cast_from_oop(request_referent);
+ *((narrowOop*)field_addr_in_buffer) = checked_cast(addr);
+ } else {
+ store_requested_oop_in_buffer(field_addr_in_buffer, request_referent);
+ }
if (request_referent != nullptr) {
mark_oop_pointer(field_addr_in_buffer, oopmap);
}
@@ -918,9 +1005,9 @@ AOTMapLogger::OopDataIterator* AOTMappedHeapWriter::oop_iterator(ArchiveMappedHe
address buffer_start = address(r.start());
address buffer_end = address(r.end());
- address requested_base = UseCompressedOops ? (address)CompressedOops::base() : (address)AOTMappedHeapWriter::NOCOOPS_REQUESTED_BASE;
- address requested_start = UseCompressedOops ? buffered_addr_to_requested_addr(buffer_start) : requested_base;
- int requested_shift = CompressedOops::shift();
+ address requested_base = UseCompressedOops ? AOTMappedHeapWriter::narrow_oop_base() : (address)AOTMappedHeapWriter::NOCOOPS_REQUESTED_BASE;
+ address requested_start = UseCompressedOops ? AOTMappedHeapWriter::buffered_addr_to_requested_addr(buffer_start) : requested_base;
+ int requested_shift = AOTMappedHeapWriter::narrow_oop_shift();
intptr_t buffer_to_requested_delta = requested_start - buffer_start;
uint64_t buffer_start_narrow_oop = 0xdeadbeed;
if (UseCompressedOops) {
diff --git a/src/hotspot/share/cds/aotMappedHeapWriter.hpp b/src/hotspot/share/cds/aotMappedHeapWriter.hpp
index 9a85b83d3d187..eafd38ac8bb98 100644
--- a/src/hotspot/share/cds/aotMappedHeapWriter.hpp
+++ b/src/hotspot/share/cds/aotMappedHeapWriter.hpp
@@ -29,6 +29,7 @@
#include "cds/heapShared.hpp"
#include "memory/allocation.hpp"
#include "memory/allStatic.hpp"
+#include "oops/compressedOops.hpp"
#include "oops/oopHandle.hpp"
#include "utilities/bitMap.hpp"
#include "utilities/exceptions.hpp"
@@ -71,7 +72,7 @@ class AOTMappedHeapWriter : AllStatic {
// These are entered into HeapShared::archived_object_cache().
//
// - "buffered objects" are copies of the "source objects", and are stored in into
- // ArchiveHeapWriter::_buffer, which is a GrowableArray that sits outside of
+ // AOTMappedHeapWriter::_buffer, which is a GrowableArray that sits outside of
// the valid heap range. Therefore we avoid using the addresses of these copies
// as oops. They are usually called "buffered_addr" in the code (of the type "address").
//
@@ -81,26 +82,11 @@ class AOTMappedHeapWriter : AllStatic {
// - Each archived object has a "requested address" -- at run time, if the object
// can be mapped at this address, we can avoid relocation.
//
- // The requested address is implemented differently depending on UseCompressedOops:
+ // The requested address of an archived object is essentially its buffered_addr + delta,
+ // where delta is (_requested_bottom - buffer_bottom());
//
- // UseCompressedOops == true:
- // The archived objects are stored assuming that the runtime COOPS compression
- // scheme is exactly the same as in dump time (or else a more expensive runtime relocation
- // would be needed.)
- //
- // At dump time, we assume that the runtime heap range is exactly the same as
- // in dump time. The requested addresses of the archived objects are chosen such that
- // they would occupy the top end of a G1 heap (TBD when dumping is supported by other
- // collectors. See JDK-8298614).
- //
- // UseCompressedOops == false:
- // At runtime, the heap range is usually picked (randomly) by the OS, so we will almost always
- // need to perform relocation. Hence, the goal of the "requested address" is to ensure that
- // the contents of the archived objects are deterministic. I.e., the oop fields of archived
- // objects will always point to deterministic addresses.
- //
- // For G1, the archived heap is written such that the lowest archived object is placed
- // at NOCOOPS_REQUESTED_BASE. (TBD after JDK-8298614).
+ // The requested addresses of all archived objects are within [_requested_bottom, _requested_top).
+ // See AOTMappedHeapWriter::set_requested_address_range() for more info.
// ----------------------------------------------------------------------
public:
@@ -111,6 +97,15 @@ class AOTMappedHeapWriter : AllStatic {
// Shenandoah heap region size can never be smaller than 256K.
static constexpr int MIN_GC_REGION_ALIGNMENT = 256 * K;
+ // The heap contents are required to be deterministic when dumping "old" CDS archives, in order
+ // to support reproducible lib/server/classes*.jsa when building the JDK.
+ static bool is_writing_deterministic_heap() { return _is_writing_deterministic_heap; }
+
+ // The oop encoding used by the archived heap objects.
+ static CompressedOops::Mode narrow_oop_mode();
+ static address narrow_oop_base();
+ static int narrow_oop_shift();
+
static const int INITIAL_TABLE_SIZE = 15889; // prime number
static const int MAX_TABLE_SIZE = 1000000;
@@ -121,6 +116,7 @@ class AOTMappedHeapWriter : AllStatic {
int _field_offset;
};
+ static bool _is_writing_deterministic_heap;
static GrowableArrayCHeap* _buffer;
// The number of bytes that have written into _buffer (may be smaller than _buffer->length()).
@@ -130,15 +126,15 @@ class AOTMappedHeapWriter : AllStatic {
static HeapRootSegments _heap_root_segments;
// The address range of the requested location of the archived heap objects.
- static address _requested_bottom;
- static address _requested_top;
+ static address _requested_bottom; // The requested address of the lowest archived heap object
+ static address _requested_top; // The exclusive end of the highest archived heap object
static GrowableArrayCHeap* _native_pointers;
static GrowableArrayCHeap* _source_objs;
static DumpedInternedStrings *_dumped_interned_strings;
// We sort _source_objs_order to minimize the number of bits in ptrmap and oopmap.
- // See comments near the body of ArchiveHeapWriter::compare_objs_by_oop_fields().
+ // See comments near the body of AOTMappedHeapWriter::compare_objs_by_oop_fields().
// The objects will be written in the order of:
//_source_objs->at(_source_objs_order->at(0)._index)
// source_objs->at(_source_objs_order->at(1)._index)
@@ -200,7 +196,7 @@ class AOTMappedHeapWriter : AllStatic {
static int filler_array_length(size_t fill_bytes);
static HeapWord* init_filler_array_at_buffer_top(int array_length, size_t fill_bytes);
- static void set_requested_address(ArchiveMappedHeapInfo* info);
+ static void set_requested_address_range(ArchiveMappedHeapInfo* info);
static void mark_native_pointers(oop orig_obj);
static void relocate_embedded_oops(GrowableArrayCHeap* roots, ArchiveMappedHeapInfo* info);
static void compute_ptrmap(ArchiveMappedHeapInfo *info);
diff --git a/src/hotspot/share/cds/aotMetaspace.cpp b/src/hotspot/share/cds/aotMetaspace.cpp
index 8642b1a6de880..42d41e6ae896c 100644
--- a/src/hotspot/share/cds/aotMetaspace.cpp
+++ b/src/hotspot/share/cds/aotMetaspace.cpp
@@ -114,6 +114,7 @@ intx AOTMetaspace::_relocation_delta;
char* AOTMetaspace::_requested_base_address;
Array* AOTMetaspace::_archived_method_handle_intrinsics = nullptr;
bool AOTMetaspace::_use_optimized_module_handling = true;
+FileMapInfo* AOTMetaspace::_output_mapinfo = nullptr;
// The CDS archive is divided into the following regions:
// rw - read-write metadata
@@ -322,6 +323,24 @@ void AOTMetaspace::initialize_for_static_dump() {
AOTMetaspace::unrecoverable_writing_error();
}
_symbol_region.init(&_symbol_rs, &_symbol_vs);
+ if (CDSConfig::is_dumping_preimage_static_archive()) {
+ // We are in the AOT training run. User code is executed.
+ //
+ // On Windows, if the user code closes System.out and we open the AOT config file for output
+ // only at VM exit, we might get back the same file HANDLE as stdout, and the AOT config
+ // file may get corrupted by UL logs. By opening early, we ensure that the output
+ // HANDLE is different than stdout so we can avoid such corruption.
+ open_output_mapinfo();
+ } else {
+ // No need for the above as we won't execute any user code.
+ }
+}
+
+void AOTMetaspace::open_output_mapinfo() {
+ const char* static_archive = CDSConfig::output_archive_path();
+ assert(static_archive != nullptr, "sanity");
+ _output_mapinfo = new FileMapInfo(static_archive, true);
+ _output_mapinfo->open_as_output();
}
// Called by universe_post_init()
@@ -655,15 +674,14 @@ class VM_PopulateDumpSharedSpace : public VM_Operation {
public:
- VM_PopulateDumpSharedSpace(StaticArchiveBuilder& b) :
- VM_Operation(), _mapped_heap_info(), _streamed_heap_info(), _map_info(nullptr), _builder(b) {}
+ VM_PopulateDumpSharedSpace(StaticArchiveBuilder& b, FileMapInfo* map_info) :
+ VM_Operation(), _mapped_heap_info(), _streamed_heap_info(), _map_info(map_info), _builder(b) {}
bool skip_operation() const { return false; }
VMOp_Type type() const { return VMOp_PopulateDumpSharedSpace; }
ArchiveMappedHeapInfo* mapped_heap_info() { return &_mapped_heap_info; }
ArchiveStreamedHeapInfo* streamed_heap_info() { return &_streamed_heap_info; }
- FileMapInfo* map_info() const { return _map_info; }
void doit(); // outline because gdb sucks
bool allow_nested_vm_operations() const { return true; }
}; // class VM_PopulateDumpSharedSpace
@@ -795,12 +813,6 @@ void VM_PopulateDumpSharedSpace::doit() {
CppVtables::zero_archived_vtables();
// Write the archive file
- if (CDSConfig::is_dumping_final_static_archive()) {
- FileMapInfo::free_current_info(); // FIXME: should not free current info
- }
- const char* static_archive = CDSConfig::output_archive_path();
- assert(static_archive != nullptr, "sanity");
- _map_info = new FileMapInfo(static_archive, true);
_map_info->populate_header(AOTMetaspace::core_region_alignment());
_map_info->set_early_serialized_data(early_serialized_data);
_map_info->set_serialized_data(serialized_data);
@@ -1138,7 +1150,14 @@ void AOTMetaspace::dump_static_archive_impl(StaticArchiveBuilder& builder, TRAPS
}
#endif
- VM_PopulateDumpSharedSpace op(builder);
+ if (!CDSConfig::is_dumping_preimage_static_archive()) {
+ if (CDSConfig::is_dumping_final_static_archive()) {
+ FileMapInfo::free_current_info(); // FIXME: should not free current info
+ }
+ open_output_mapinfo();
+ }
+
+ VM_PopulateDumpSharedSpace op(builder, _output_mapinfo);
VMThread::execute(&op);
if (AOTCodeCache::is_on_for_dump() && CDSConfig::is_dumping_final_static_archive()) {
@@ -1152,7 +1171,9 @@ void AOTMetaspace::dump_static_archive_impl(StaticArchiveBuilder& builder, TRAPS
CDSConfig::disable_dumping_aot_code();
}
- bool status = write_static_archive(&builder, op.map_info(), op.mapped_heap_info(), op.streamed_heap_info());
+ bool status = write_static_archive(&builder, _output_mapinfo, op.mapped_heap_info(), op.streamed_heap_info());
+ assert(!_output_mapinfo->is_open(), "Must be closed already");
+ _output_mapinfo = nullptr;
if (status && CDSConfig::is_dumping_preimage_static_archive()) {
tty->print_cr("%s AOTConfiguration recorded: %s",
CDSConfig::has_temp_aot_config_file() ? "Temporary" : "", AOTConfiguration);
@@ -1173,11 +1194,10 @@ bool AOTMetaspace::write_static_archive(ArchiveBuilder* builder,
// relocate the data so that it can be mapped to AOTMetaspace::requested_base_address()
// without runtime relocation.
builder->relocate_to_requested();
-
- map_info->open_as_output();
if (!map_info->is_open()) {
return false;
}
+ map_info->prepare_for_writing();
builder->write_archive(map_info, mapped_heap_info, streamed_heap_info);
return true;
}
diff --git a/src/hotspot/share/cds/aotMetaspace.hpp b/src/hotspot/share/cds/aotMetaspace.hpp
index bfd9f4bcc7553..1712a7865adbf 100644
--- a/src/hotspot/share/cds/aotMetaspace.hpp
+++ b/src/hotspot/share/cds/aotMetaspace.hpp
@@ -60,6 +60,7 @@ class AOTMetaspace : AllStatic {
static char* _requested_base_address;
static bool _use_optimized_module_handling;
static Array* _archived_method_handle_intrinsics;
+ static FileMapInfo* _output_mapinfo;
public:
enum {
@@ -185,6 +186,7 @@ class AOTMetaspace : AllStatic {
private:
static void read_extra_data(JavaThread* current, const char* filename) NOT_CDS_RETURN;
static void fork_and_dump_final_static_archive(TRAPS);
+ static void open_output_mapinfo();
static bool write_static_archive(ArchiveBuilder* builder,
FileMapInfo* map_info,
ArchiveMappedHeapInfo* mapped_heap_info,
diff --git a/src/hotspot/share/cds/dynamicArchive.cpp b/src/hotspot/share/cds/dynamicArchive.cpp
index 85e59e23f8cc0..8fae8dabf8cd7 100644
--- a/src/hotspot/share/cds/dynamicArchive.cpp
+++ b/src/hotspot/share/cds/dynamicArchive.cpp
@@ -353,6 +353,7 @@ void DynamicArchiveBuilder::write_archive(char* serialized_data, AOTClassLocatio
assert(dynamic_info != nullptr, "Sanity");
dynamic_info->open_as_output();
+ dynamic_info->prepare_for_writing();
ArchiveBuilder::write_archive(dynamic_info, nullptr, nullptr);
address base = _requested_dynamic_archive_bottom;
diff --git a/src/hotspot/share/cds/filemap.cpp b/src/hotspot/share/cds/filemap.cpp
index ae92ce3105843..0eeb96bb2692f 100644
--- a/src/hotspot/share/cds/filemap.cpp
+++ b/src/hotspot/share/cds/filemap.cpp
@@ -216,12 +216,14 @@ void FileMapHeader::populate(FileMapInfo *info, size_t core_region_alignment,
_obj_alignment = ObjectAlignmentInBytes;
_compact_strings = CompactStrings;
_compact_headers = UseCompactObjectHeaders;
+#if INCLUDE_CDS_JAVA_HEAP
if (CDSConfig::is_dumping_heap()) {
_object_streaming_mode = HeapShared::is_writing_streaming_mode();
- _narrow_oop_mode = CompressedOops::mode();
- _narrow_oop_base = CompressedOops::base();
- _narrow_oop_shift = CompressedOops::shift();
+ _narrow_oop_mode = AOTMappedHeapWriter::narrow_oop_mode();
+ _narrow_oop_base = AOTMappedHeapWriter::narrow_oop_base();
+ _narrow_oop_shift = AOTMappedHeapWriter::narrow_oop_shift();
}
+#endif
_compressed_oops = UseCompressedOops;
_compressed_class_ptrs = UseCompressedClassPointers;
if (UseCompressedClassPointers) {
@@ -777,7 +779,9 @@ void FileMapInfo::open_as_output() {
}
_fd = fd;
_file_open = true;
+}
+void FileMapInfo::prepare_for_writing() {
// Seek past the header. We will write the header after all regions are written
// and their CRCs computed.
size_t header_bytes = header()->header_size();
@@ -911,7 +915,7 @@ void FileMapInfo::write_region(int region, char* base, size_t size,
if (HeapShared::is_writing_mapping_mode()) {
requested_base = (char*)AOTMappedHeapWriter::requested_address();
if (UseCompressedOops) {
- mapping_offset = (size_t)((address)requested_base - CompressedOops::base());
+ mapping_offset = (size_t)((address)requested_base - AOTMappedHeapWriter::narrow_oop_base());
assert((mapping_offset >> CompressedOops::shift()) << CompressedOops::shift() == mapping_offset, "must be");
}
} else {
diff --git a/src/hotspot/share/cds/filemap.hpp b/src/hotspot/share/cds/filemap.hpp
index 2a761843e473b..fbd3c8e16811f 100644
--- a/src/hotspot/share/cds/filemap.hpp
+++ b/src/hotspot/share/cds/filemap.hpp
@@ -365,6 +365,7 @@ class FileMapInfo : public CHeapObj {
// File manipulation.
bool open_as_input() NOT_CDS_RETURN_(false);
void open_as_output();
+ void prepare_for_writing();
void write_header();
void write_region(int region, char* base, size_t size,
bool read_only, bool allow_exec);
diff --git a/src/hotspot/share/cds/heapShared.hpp b/src/hotspot/share/cds/heapShared.hpp
index 2c782f7231bd7..118c60faa6011 100644
--- a/src/hotspot/share/cds/heapShared.hpp
+++ b/src/hotspot/share/cds/heapShared.hpp
@@ -332,7 +332,7 @@ class HeapShared: AllStatic {
// Used by CDSHeapVerifier.
OopHandle _orig_referrer;
- // The location of this object inside ArchiveHeapWriter::_buffer
+ // The location of this object inside {AOTMappedHeapWriter, AOTStreamedHeapWriter}::_buffer
size_t _buffer_offset;
// One or more fields in this object are pointing to non-null oops.
diff --git a/src/hotspot/share/ci/ciInstanceKlass.cpp b/src/hotspot/share/ci/ciInstanceKlass.cpp
index 9bbf005356cec..64b9acf9146a1 100644
--- a/src/hotspot/share/ci/ciInstanceKlass.cpp
+++ b/src/hotspot/share/ci/ciInstanceKlass.cpp
@@ -605,7 +605,7 @@ bool ciInstanceKlass::is_leaf_type() {
if (is_shared()) {
return is_final(); // approximately correct
} else {
- return !has_subklass() && (nof_implementors() == 0);
+ return !has_subklass() && (!is_interface() || nof_implementors() == 0);
}
}
@@ -619,6 +619,7 @@ bool ciInstanceKlass::is_leaf_type() {
// This is OK, since any dependencies we decide to assert
// will be checked later under the Compile_lock.
ciInstanceKlass* ciInstanceKlass::implementor() {
+ assert(is_interface(), "required");
ciInstanceKlass* impl = _implementor;
if (impl == nullptr) {
if (is_shared()) {
diff --git a/src/hotspot/share/ci/ciInstanceKlass.hpp b/src/hotspot/share/ci/ciInstanceKlass.hpp
index ec8fc789c7d19..1f887771f5413 100644
--- a/src/hotspot/share/ci/ciInstanceKlass.hpp
+++ b/src/hotspot/share/ci/ciInstanceKlass.hpp
@@ -259,6 +259,7 @@ class ciInstanceKlass : public ciKlass {
ciInstanceKlass* unique_implementor() {
assert(is_loaded(), "must be loaded");
+ assert(is_interface(), "must be");
ciInstanceKlass* impl = implementor();
return (impl != this ? impl : nullptr);
}
diff --git a/src/hotspot/share/classfile/classLoader.cpp b/src/hotspot/share/classfile/classLoader.cpp
index 082c745f4c36a..12fbda899b9d1 100644
--- a/src/hotspot/share/classfile/classLoader.cpp
+++ b/src/hotspot/share/classfile/classLoader.cpp
@@ -412,31 +412,30 @@ ClassFileStream* ClassPathImageEntry::open_stream(JavaThread* current, const cha
//
ClassFileStream* ClassPathImageEntry::open_stream_for_loader(JavaThread* current, const char* name, ClassLoaderData* loader_data) {
jlong size;
- JImageLocationRef location = (*JImageFindResource)(jimage_non_null(), "", get_jimage_version_string(), name, &size);
+ JImageLocationRef location = 0;
- if (location == 0) {
- TempNewSymbol class_name = SymbolTable::new_symbol(name);
- TempNewSymbol pkg_name = ClassLoader::package_from_class_name(class_name);
+ TempNewSymbol class_name = SymbolTable::new_symbol(name);
+ TempNewSymbol pkg_name = ClassLoader::package_from_class_name(class_name);
- if (pkg_name != nullptr) {
- if (!Universe::is_module_initialized()) {
- location = (*JImageFindResource)(jimage_non_null(), JAVA_BASE_NAME, get_jimage_version_string(), name, &size);
- } else {
- PackageEntry* package_entry = ClassLoader::get_package_entry(pkg_name, loader_data);
- if (package_entry != nullptr) {
- ResourceMark rm(current);
- // Get the module name
- ModuleEntry* module = package_entry->module();
- assert(module != nullptr, "Boot classLoader package missing module");
- assert(module->is_named(), "Boot classLoader package is in unnamed module");
- const char* module_name = module->name()->as_C_string();
- if (module_name != nullptr) {
- location = (*JImageFindResource)(jimage_non_null(), module_name, get_jimage_version_string(), name, &size);
- }
+ if (pkg_name != nullptr) {
+ if (!Universe::is_module_initialized()) {
+ location = (*JImageFindResource)(jimage_non_null(), JAVA_BASE_NAME, get_jimage_version_string(), name, &size);
+ } else {
+ PackageEntry* package_entry = ClassLoader::get_package_entry(pkg_name, loader_data);
+ if (package_entry != nullptr) {
+ ResourceMark rm(current);
+ // Get the module name
+ ModuleEntry* module = package_entry->module();
+ assert(module != nullptr, "Boot classLoader package missing module");
+ assert(module->is_named(), "Boot classLoader package is in unnamed module");
+ const char* module_name = module->name()->as_C_string();
+ if (module_name != nullptr) {
+ location = (*JImageFindResource)(jimage_non_null(), module_name, get_jimage_version_string(), name, &size);
}
}
}
}
+
if (location != 0) {
if (UsePerfData) {
ClassLoader::perf_sys_classfile_bytes_read()->inc(size);
diff --git a/src/hotspot/share/code/relocInfo.cpp b/src/hotspot/share/code/relocInfo.cpp
index 286d407c94b2c..2a6335e21182b 100644
--- a/src/hotspot/share/code/relocInfo.cpp
+++ b/src/hotspot/share/code/relocInfo.cpp
@@ -26,6 +26,7 @@
#include "code/compiledIC.hpp"
#include "code/nmethod.hpp"
#include "code/relocInfo.hpp"
+#include "cppstdlib/new.hpp"
#include "cppstdlib/type_traits.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"
@@ -37,8 +38,6 @@
#include "utilities/checkedCast.hpp"
#include "utilities/copy.hpp"
-#include
-
const RelocationHolder RelocationHolder::none; // its type is relocInfo::none
diff --git a/src/hotspot/share/code/relocInfo.hpp b/src/hotspot/share/code/relocInfo.hpp
index a6a08815d1055..6f1778ef479c4 100644
--- a/src/hotspot/share/code/relocInfo.hpp
+++ b/src/hotspot/share/code/relocInfo.hpp
@@ -25,6 +25,7 @@
#ifndef SHARE_CODE_RELOCINFO_HPP
#define SHARE_CODE_RELOCINFO_HPP
+#include "cppstdlib/new.hpp"
#include "memory/allocation.hpp"
#include "oops/oopsHierarchy.hpp"
#include "runtime/osInfo.hpp"
@@ -32,8 +33,6 @@
#include "utilities/globalDefinitions.hpp"
#include "utilities/macros.hpp"
-#include
-
class CodeBlob;
class Metadata;
class NativeMovConstReg;
diff --git a/src/hotspot/share/cppstdlib/new.hpp b/src/hotspot/share/cppstdlib/new.hpp
new file mode 100644
index 0000000000000..3536ac13288f1
--- /dev/null
+++ b/src/hotspot/share/cppstdlib/new.hpp
@@ -0,0 +1,154 @@
+/*
+ * Copyright (c) 2022, 2025, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_CPPSTDLIB_NEW_HPP
+#define SHARE_CPPSTDLIB_NEW_HPP
+
+#include "utilities/compilerWarnings.hpp"
+
+// HotSpot usage:
+// Only the following may be used:
+// * std::nothrow_t, std::nothrow
+// * std::align_val_t
+// * The non-allocating forms of `operator new` and `operator new[]` are
+// implicitly used by the corresponding `new` and `new[]` expressions.
+// - operator new(size_t, void*) noexcept
+// - operator new[](size_t, void*) noexcept
+// Note that the non-allocating forms of `operator delete` and `operator
+// delete[]` are not used, since they are only invoked by a placement new
+// expression that fails by throwing an exception. But they might still
+// end up being referenced in such a situation.
+
+BEGIN_ALLOW_FORBIDDEN_FUNCTIONS
+#include "utilities/vmassert_uninstall.hpp"
+
+#include
+
+#include "utilities/vmassert_reinstall.hpp" // don't reorder
+END_ALLOW_FORBIDDEN_FUNCTIONS
+
+// Deprecation declarations to forbid use of the default global allocator.
+// See C++17 21.6.1 Header synopsis.
+
+namespace std {
+
+#if 0
+// We could deprecate exception types, for completeness, but don't bother. We
+// already have exceptions disabled, and run into compiler bugs when we try.
+//
+// gcc -Wattributes => type attributes ignored after type is already defined
+// See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=122167
+//
+// clang -Wignored-attributes => attribute declaration must precede definition
+// The clang warning is https://github.com/llvm/llvm-project/issues/135481,
+// which should be fixed in clang 21.
+class [[deprecated]] bad_alloc;
+class [[deprecated]] bad_array_new_length;
+#endif // #if 0
+
+// Forbid new_handler manipulation by HotSpot code, leaving it untouched for
+// use by application code.
+[[deprecated]] new_handler get_new_handler() noexcept;
+[[deprecated]] new_handler set_new_handler(new_handler) noexcept;
+
+// Prefer HotSpot mechanisms for padding.
+//
+// The syntax for redeclaring these for deprecation is tricky, and not
+// supported by some versions of some compilers. Dispatch on compiler and
+// version to decide whether to redeclare deprecated.
+
+#if defined(__clang__)
+#if __clang_major__ >= 19
+// clang18 and earlier may accept the declaration but go wrong with uses.
+// Different warnings and link-time failures are both possible.
+#define CAN_DEPRECATE_HARDWARE_INTERFERENCE_SIZES 1
+#endif // restrict clang version
+
+#elif defined(__GNUC__)
+#if (__GNUC__ > 13) || (__GNUC__ == 13 && __GNUC_MINOR__ >= 2)
+// g++11.5 accepts the declaration and reports deprecation for uses, but also
+// has link-time failure for uses. Haven't tested intermediate versions.
+#define CAN_DEPRECATE_HARDWARE_INTERFERENCE_SIZES 1
+#endif // restrict gcc version
+
+#elif defined(_MSVC)
+// VS2022-17.13.2 => error C2370: '...': redefinition; different storage class
+
+#endif // Compiler dispatch
+
+// Redeclare deprecated if such is supported.
+#ifdef CAN_DEPRECATE_HARDWARE_INTERFERENCE_SIZES
+[[deprecated]] extern const size_t hardware_destructive_interference_size;
+[[deprecated]] extern const size_t hardware_constructive_interference_size;
+#undef CAN_DEPRECATE_HARDWARE_INTERFERENCE_SIZES
+#endif // CAN_DEPRECATE_HARDWARE_INTERFERENCE_SIZES
+
+} // namespace std
+
+// Forbid using the global allocator by HotSpot code.
+// This doesn't provide complete coverage. Some global allocation and
+// deallocation functions are implicitly declared in all translation units,
+// without needing to include ; see C++17 6.7.4. So this doesn't remove
+// the need for the link-time verification that these functions aren't used.
+//
+// But don't poison them when compiling gtests. The gtest framework, the
+// HotSpot wrapper around it (gtestMain.cpp), and even some tests, all have
+// new/new[] and delete/delete[] expressions that use the default global
+// allocator. We also don't apply the link-time check for gtests, for the
+// same reason.
+#ifndef HOTSPOT_GTEST
+
+[[deprecated]] void* operator new(std::size_t);
+[[deprecated]] void* operator new(std::size_t, std::align_val_t);
+[[deprecated]] void* operator new(std::size_t, const std::nothrow_t&) noexcept;
+[[deprecated]] void* operator new(std::size_t, std::align_val_t,
+ const std::nothrow_t&) noexcept;
+
+[[deprecated]] void operator delete(void*) noexcept;
+[[deprecated]] void operator delete(void*, std::size_t) noexcept;
+[[deprecated]] void operator delete(void*, std::align_val_t) noexcept;
+[[deprecated]] void operator delete(void*, std::size_t, std::align_val_t) noexcept;
+[[deprecated]] void operator delete(void*, const std::nothrow_t&) noexcept;
+[[deprecated]] void operator delete(void*, std::align_val_t,
+ const std::nothrow_t&) noexcept;
+
+[[deprecated]] void* operator new[](std::size_t);
+[[deprecated]] void* operator new[](std::size_t, std::align_val_t);
+[[deprecated]] void* operator new[](std::size_t, const std::nothrow_t&) noexcept;
+[[deprecated]] void* operator new[](std::size_t, std::align_val_t,
+ const std::nothrow_t&) noexcept;
+
+[[deprecated]] void operator delete[](void*) noexcept;
+[[deprecated]] void operator delete[](void*, std::size_t) noexcept;
+[[deprecated]] void operator delete[](void*, std::align_val_t) noexcept;
+[[deprecated]] void operator delete[](void*, std::size_t, std::align_val_t) noexcept;
+[[deprecated]] void operator delete[](void*, const std::nothrow_t&) noexcept;
+[[deprecated]] void operator delete[](void*, std::align_val_t,
+ const std::nothrow_t&) noexcept;
+
+#endif // HOTSPOT_GTEST
+
+// Allow (don't poison) the non-allocating forms from [new.delete.placement].
+
+#endif // SHARE_CPPSTDLIB_NEW_HPP
diff --git a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp
index ebaea3ecba457..747e2f3228ce8 100644
--- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp
+++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp
@@ -370,6 +370,55 @@ void ParallelScavengeHeap::do_full_collection(bool clear_all_soft_refs) {
PSParallelCompact::invoke(clear_all_soft_refs, should_do_max_compaction);
}
+bool ParallelScavengeHeap::should_attempt_young_gc() const {
+ const bool ShouldRunYoungGC = true;
+ const bool ShouldRunFullGC = false;
+
+ if (!_young_gen->to_space()->is_empty()) {
+ log_debug(gc, ergo)("To-space is not empty; run full-gc instead.");
+ return ShouldRunFullGC;
+ }
+
+ // Check if the predicted promoted bytes will overflow free space in old-gen.
+ PSAdaptiveSizePolicy* policy = _size_policy;
+
+ size_t avg_promoted = (size_t) policy->padded_average_promoted_in_bytes();
+ size_t promotion_estimate = MIN2(avg_promoted, _young_gen->used_in_bytes());
+ // Total free size after possible old gen expansion
+ size_t free_in_old_gen_with_expansion = _old_gen->max_gen_size() - _old_gen->used_in_bytes();
+
+ log_trace(gc, ergo)("average_promoted %zu; padded_average_promoted %zu",
+ (size_t) policy->average_promoted_in_bytes(),
+ (size_t) policy->padded_average_promoted_in_bytes());
+
+ if (promotion_estimate >= free_in_old_gen_with_expansion) {
+ log_debug(gc, ergo)("Run full-gc; predicted promotion size >= max free space in old-gen: %zu >= %zu",
+ promotion_estimate, free_in_old_gen_with_expansion);
+ return ShouldRunFullGC;
+ }
+
+ if (UseAdaptiveSizePolicy) {
+ // Also checking OS has enough free memory to commit and expand old-gen.
+ // Otherwise, the recorded gc-pause-time might be inflated to include time
+ // of OS preparing free memory, resulting in inaccurate young-gen resizing.
+ assert(_old_gen->committed().byte_size() >= _old_gen->used_in_bytes(), "inv");
+ // Use uint64_t instead of size_t for 32bit compatibility.
+ uint64_t free_mem_in_os;
+ if (os::free_memory(free_mem_in_os)) {
+ size_t actual_free = (size_t)MIN2(_old_gen->committed().byte_size() - _old_gen->used_in_bytes() + free_mem_in_os,
+ (uint64_t)SIZE_MAX);
+ if (promotion_estimate > actual_free) {
+ log_debug(gc, ergo)("Run full-gc; predicted promotion size > free space in old-gen and OS: %zu > %zu",
+ promotion_estimate, actual_free);
+ return ShouldRunFullGC;
+ }
+ }
+ }
+
+ // No particular reasons to run full-gc, so young-gc.
+ return ShouldRunYoungGC;
+}
+
static bool check_gc_heap_free_limit(size_t free_bytes, size_t capacity_bytes) {
return (free_bytes * 100 / capacity_bytes) < GCHeapFreeLimit;
}
@@ -516,17 +565,18 @@ void ParallelScavengeHeap::collect(GCCause::Cause cause) {
VMThread::execute(&op);
}
-void ParallelScavengeHeap::collect_at_safepoint(bool full) {
+void ParallelScavengeHeap::collect_at_safepoint(bool is_full) {
assert(!GCLocker::is_active(), "precondition");
bool clear_soft_refs = GCCause::should_clear_all_soft_refs(_gc_cause);
- if (!full) {
- bool success = PSScavenge::invoke(clear_soft_refs);
- if (success) {
+ if (!is_full && should_attempt_young_gc()) {
+ bool young_gc_success = PSScavenge::invoke(clear_soft_refs);
+ if (young_gc_success) {
return;
}
- // Upgrade to Full-GC if young-gc fails
+ log_debug(gc, heap)("Upgrade to Full-GC since Young-gc failed.");
}
+
const bool should_do_max_compaction = false;
PSParallelCompact::invoke(clear_soft_refs, should_do_max_compaction);
}
diff --git a/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp b/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp
index f9161afc28f26..0221fd2a90e42 100644
--- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp
+++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp
@@ -119,6 +119,9 @@ class ParallelScavengeHeap : public CollectedHeap {
void print_tracing_info() const override;
void stop() override {};
+ // Returns true if a young GC should be attempted, false if a full GC is preferred.
+ bool should_attempt_young_gc() const;
+
public:
ParallelScavengeHeap() :
CollectedHeap(),
diff --git a/src/hotspot/share/gc/parallel/psScavenge.cpp b/src/hotspot/share/gc/parallel/psScavenge.cpp
index e738a13d464db..d1d595df52945 100644
--- a/src/hotspot/share/gc/parallel/psScavenge.cpp
+++ b/src/hotspot/share/gc/parallel/psScavenge.cpp
@@ -313,12 +313,6 @@ bool PSScavenge::invoke(bool clear_soft_refs) {
assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
- // Check for potential problems.
- if (!should_attempt_scavenge()) {
- log_info(gc, ergo)("Young-gc might fail so skipping");
- return false;
- }
-
IsSTWGCActiveMark mark;
_gc_timer.register_gc_start();
@@ -336,8 +330,7 @@ bool PSScavenge::invoke(bool clear_soft_refs) {
PSOldGen* old_gen = heap->old_gen();
PSAdaptiveSizePolicy* size_policy = heap->size_policy();
- assert(young_gen->to_space()->is_empty(),
- "Attempt to scavenge with live objects in to_space");
+ assert(young_gen->to_space()->is_empty(), "precondition");
heap->increment_total_collections();
@@ -520,59 +513,6 @@ void PSScavenge::clean_up_failed_promotion() {
NOT_PRODUCT(ParallelScavengeHeap::heap()->reset_promotion_should_fail();)
}
-bool PSScavenge::should_attempt_scavenge() {
- const bool ShouldRunYoungGC = true;
- const bool ShouldRunFullGC = false;
-
- ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
- PSYoungGen* young_gen = heap->young_gen();
- PSOldGen* old_gen = heap->old_gen();
-
- if (!young_gen->to_space()->is_empty()) {
- log_debug(gc, ergo)("To-space is not empty; run full-gc instead.");
- return ShouldRunFullGC;
- }
-
- // Check if the predicted promoted bytes will overflow free space in old-gen.
- PSAdaptiveSizePolicy* policy = heap->size_policy();
-
- size_t avg_promoted = (size_t) policy->padded_average_promoted_in_bytes();
- size_t promotion_estimate = MIN2(avg_promoted, young_gen->used_in_bytes());
- // Total free size after possible old gen expansion
- size_t free_in_old_gen_with_expansion = old_gen->max_gen_size() - old_gen->used_in_bytes();
-
- log_trace(gc, ergo)("average_promoted %zu; padded_average_promoted %zu",
- (size_t) policy->average_promoted_in_bytes(),
- (size_t) policy->padded_average_promoted_in_bytes());
-
- if (promotion_estimate >= free_in_old_gen_with_expansion) {
- log_debug(gc, ergo)("Run full-gc; predicted promotion size >= max free space in old-gen: %zu >= %zu",
- promotion_estimate, free_in_old_gen_with_expansion);
- return ShouldRunFullGC;
- }
-
- if (UseAdaptiveSizePolicy) {
- // Also checking OS has enough free memory to commit and expand old-gen.
- // Otherwise, the recorded gc-pause-time might be inflated to include time
- // of OS preparing free memory, resulting in inaccurate young-gen resizing.
- assert(old_gen->committed().byte_size() >= old_gen->used_in_bytes(), "inv");
- // Use uint64_t instead of size_t for 32bit compatibility.
- uint64_t free_mem_in_os;
- if (os::free_memory(free_mem_in_os)) {
- size_t actual_free = (size_t)MIN2(old_gen->committed().byte_size() - old_gen->used_in_bytes() + free_mem_in_os,
- (uint64_t)SIZE_MAX);
- if (promotion_estimate > actual_free) {
- log_debug(gc, ergo)("Run full-gc; predicted promotion size > free space in old-gen and OS: %zu > %zu",
- promotion_estimate, actual_free);
- return ShouldRunFullGC;
- }
- }
- }
-
- // No particular reasons to run full-gc, so young-gc.
- return ShouldRunYoungGC;
-}
-
// Adaptive size policy support.
void PSScavenge::set_young_generation_boundary(HeapWord* v) {
_young_generation_boundary = v;
diff --git a/src/hotspot/share/gc/parallel/psScavenge.hpp b/src/hotspot/share/gc/parallel/psScavenge.hpp
index c297a46a46e44..af9b91f74bcf1 100644
--- a/src/hotspot/share/gc/parallel/psScavenge.hpp
+++ b/src/hotspot/share/gc/parallel/psScavenge.hpp
@@ -64,8 +64,6 @@ class PSScavenge: AllStatic {
static void clean_up_failed_promotion();
- static bool should_attempt_scavenge();
-
// Private accessors
static PSCardTable* card_table() { assert(_card_table != nullptr, "Sanity"); return _card_table; }
static const ParallelScavengeTracer* gc_tracer() { return &_gc_tracer; }
diff --git a/src/hotspot/share/gc/shared/bufferNode.cpp b/src/hotspot/share/gc/shared/bufferNode.cpp
index b064f9c7efedb..90e50f52e84ad 100644
--- a/src/hotspot/share/gc/shared/bufferNode.cpp
+++ b/src/hotspot/share/gc/shared/bufferNode.cpp
@@ -22,12 +22,11 @@
*
*/
+#include "cppstdlib/new.hpp"
#include "gc/shared/bufferNode.hpp"
#include "memory/allocation.inline.hpp"
#include "utilities/debug.hpp"
-#include
-
BufferNode::AllocatorConfig::AllocatorConfig(size_t size)
: _buffer_capacity(size)
{
diff --git a/src/hotspot/share/gc/shared/oopStorage.cpp b/src/hotspot/share/gc/shared/oopStorage.cpp
index d52efc13dacaf..a1cc3ffa553f0 100644
--- a/src/hotspot/share/gc/shared/oopStorage.cpp
+++ b/src/hotspot/share/gc/shared/oopStorage.cpp
@@ -28,7 +28,7 @@
#include "logging/logStream.hpp"
#include "memory/allocation.inline.hpp"
#include "nmt/memTracker.hpp"
-#include "runtime/atomicAccess.hpp"
+#include "runtime/atomic.hpp"
#include "runtime/globals.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/interfaceSupport.inline.hpp"
@@ -122,7 +122,7 @@ OopStorage::ActiveArray::ActiveArray(size_t size) :
{}
OopStorage::ActiveArray::~ActiveArray() {
- assert(_refcount == 0, "precondition");
+ assert(_refcount.load_relaxed() == 0, "precondition");
}
OopStorage::ActiveArray* OopStorage::ActiveArray::create(size_t size,
@@ -144,32 +144,32 @@ size_t OopStorage::ActiveArray::size() const {
}
size_t OopStorage::ActiveArray::block_count() const {
- return _block_count;
+ return _block_count.load_relaxed();
}
size_t OopStorage::ActiveArray::block_count_acquire() const {
- return AtomicAccess::load_acquire(&_block_count);
+ return _block_count.load_acquire();
}
void OopStorage::ActiveArray::increment_refcount() const {
- int new_value = AtomicAccess::add(&_refcount, 1);
- assert(new_value >= 1, "negative refcount %d", new_value - 1);
+ int old_value = _refcount.fetch_then_add(1);
+ assert(old_value >= 0, "negative refcount %d", old_value);
}
bool OopStorage::ActiveArray::decrement_refcount() const {
- int new_value = AtomicAccess::sub(&_refcount, 1);
+ int new_value = _refcount.sub_then_fetch(1);
assert(new_value >= 0, "negative refcount %d", new_value);
return new_value == 0;
}
bool OopStorage::ActiveArray::push(Block* block) {
- size_t index = _block_count;
+ size_t index = _block_count.load_relaxed();
if (index < _size) {
block->set_active_index(index);
*block_ptr(index) = block;
// Use a release_store to ensure all the setup is complete before
// making the block visible.
- AtomicAccess::release_store(&_block_count, index + 1);
+ _block_count.release_store(index + 1);
return true;
} else {
return false;
@@ -177,19 +177,19 @@ bool OopStorage::ActiveArray::push(Block* block) {
}
void OopStorage::ActiveArray::remove(Block* block) {
- assert(_block_count > 0, "array is empty");
+ assert(_block_count.load_relaxed() > 0, "array is empty");
size_t index = block->active_index();
assert(*block_ptr(index) == block, "block not present");
- size_t last_index = _block_count - 1;
+ size_t last_index = _block_count.load_relaxed() - 1;
Block* last_block = *block_ptr(last_index);
last_block->set_active_index(index);
*block_ptr(index) = last_block;
- _block_count = last_index;
+ _block_count.store_relaxed(last_index);
}
void OopStorage::ActiveArray::copy_from(const ActiveArray* from) {
- assert(_block_count == 0, "array must be empty");
- size_t count = from->_block_count;
+ assert(_block_count.load_relaxed() == 0, "array must be empty");
+ size_t count = from->_block_count.load_relaxed();
assert(count <= _size, "precondition");
Block* const* from_ptr = from->block_ptr(0);
Block** to_ptr = block_ptr(0);
@@ -198,7 +198,7 @@ void OopStorage::ActiveArray::copy_from(const ActiveArray* from) {
assert(block->active_index() == i, "invariant");
*to_ptr++ = block;
}
- _block_count = count;
+ _block_count.store_relaxed(count);
}
// Blocks start with an array of BitsPerWord oop entries. That array
@@ -230,14 +230,17 @@ OopStorage::Block::Block(const OopStorage* owner, void* memory) :
assert(is_aligned(this, block_alignment), "misaligned block");
}
+#ifdef ASSERT
OopStorage::Block::~Block() {
- assert(_release_refcount == 0, "deleting block while releasing");
- assert(_deferred_updates_next == nullptr, "deleting block with deferred update");
+ assert(_release_refcount.load_relaxed() == 0, "deleting block while releasing");
+ assert(_deferred_updates_next.load_relaxed() == nullptr, "deleting block with deferred update");
// Clear fields used by block_for_ptr and entry validation, which
- // might help catch bugs. Volatile to prevent dead-store elimination.
- const_cast(_allocated_bitmask) = 0;
+ // might help catch bugs.
+ _allocated_bitmask.store_relaxed(0);
+ // Volatile to prevent dead-store elimination.
const_cast(_owner_address) = 0;
}
+#endif // ASSERT
size_t OopStorage::Block::allocation_size() {
// _data must be first member, so aligning Block aligns _data.
@@ -272,16 +275,16 @@ uintx OopStorage::Block::bitmask_for_entry(const oop* ptr) const {
bool OopStorage::Block::is_safe_to_delete() const {
assert(is_empty(), "precondition");
OrderAccess::loadload();
- return (AtomicAccess::load_acquire(&_release_refcount) == 0) &&
- (AtomicAccess::load_acquire(&_deferred_updates_next) == nullptr);
+ return ((_release_refcount.load_acquire() == 0) &&
+ (_deferred_updates_next.load_acquire() == nullptr));
}
OopStorage::Block* OopStorage::Block::deferred_updates_next() const {
- return _deferred_updates_next;
+ return _deferred_updates_next.load_relaxed();
}
void OopStorage::Block::set_deferred_updates_next(Block* block) {
- _deferred_updates_next = block;
+ _deferred_updates_next.store_relaxed(block);
}
bool OopStorage::Block::contains(const oop* ptr) const {
@@ -321,9 +324,8 @@ void OopStorage::Block::atomic_add_allocated(uintx add) {
// we can use an atomic add to implement the operation. The assert post
// facto verifies the precondition held; if there were any set bits in
// common, then after the add at least one of them will be zero.
- uintx sum = AtomicAccess::add(&_allocated_bitmask, add);
- assert((sum & add) == add, "some already present: %zu:%zu",
- sum, add);
+ uintx sum = _allocated_bitmask.add_then_fetch(add);
+ assert((sum & add) == add, "some already present: %zu:%zu", sum, add);
}
oop* OopStorage::Block::allocate() {
@@ -452,7 +454,7 @@ oop* OopStorage::allocate() {
oop* result = block->allocate();
assert(result != nullptr, "allocation failed");
assert(!block->is_empty(), "postcondition");
- AtomicAccess::inc(&_allocation_count); // release updates outside lock.
+ _allocation_count.add_then_fetch(1u); // release updates outside lock.
if (block->is_full()) {
// Transitioning from not full to full.
// Remove full blocks from consideration by future allocates.
@@ -490,7 +492,7 @@ size_t OopStorage::allocate(oop** ptrs, size_t size) {
assert(!is_empty_bitmask(taken), "invariant");
} // Drop lock, now that we've taken all available entries from block.
size_t num_taken = population_count(taken);
- AtomicAccess::add(&_allocation_count, num_taken);
+ _allocation_count.add_then_fetch(num_taken);
// Fill ptrs from those taken entries.
size_t limit = MIN2(num_taken, size);
for (size_t i = 0; i < limit; ++i) {
@@ -506,7 +508,7 @@ size_t OopStorage::allocate(oop** ptrs, size_t size) {
assert(size == limit, "invariant");
assert(num_taken == (limit + population_count(taken)), "invariant");
block->release_entries(taken, this);
- AtomicAccess::sub(&_allocation_count, num_taken - limit);
+ _allocation_count.sub_then_fetch(num_taken - limit);
}
log_trace(oopstorage, ref)("%s: bulk allocate %zu, returned %zu",
name(), limit, num_taken - limit);
@@ -527,9 +529,9 @@ bool OopStorage::try_add_block() {
if (block == nullptr) return false;
// Add new block to the _active_array, growing if needed.
- if (!_active_array->push(block)) {
+ if (!_active_array.load_relaxed()->push(block)) {
if (expand_active_array()) {
- guarantee(_active_array->push(block), "push failed after expansion");
+ guarantee(_active_array.load_relaxed()->push(block), "push failed after expansion");
} else {
log_debug(oopstorage, blocks)("%s: failed active array expand", name());
Block::delete_block(*block);
@@ -576,7 +578,7 @@ OopStorage::Block* OopStorage::block_for_allocation() {
// indicate allocation failure.
bool OopStorage::expand_active_array() {
assert_lock_strong(_allocation_mutex);
- ActiveArray* old_array = _active_array;
+ ActiveArray* old_array = _active_array.load_relaxed();
size_t new_size = 2 * old_array->size();
log_debug(oopstorage, blocks)("%s: expand active array %zu",
name(), new_size);
@@ -599,7 +601,7 @@ void OopStorage::replace_active_array(ActiveArray* new_array) {
// Update new_array refcount to account for the new reference.
new_array->increment_refcount();
// Install new_array, ensuring its initialization is complete first.
- AtomicAccess::release_store(&_active_array, new_array);
+ _active_array.release_store(new_array);
// Wait for any readers that could read the old array from _active_array.
// Can't use GlobalCounter here, because this is called from allocate(),
// which may be called in the scope of a GlobalCounter critical section
@@ -617,7 +619,7 @@ void OopStorage::replace_active_array(ActiveArray* new_array) {
// using it.
OopStorage::ActiveArray* OopStorage::obtain_active_array() const {
SingleWriterSynchronizer::CriticalSection cs(&_protect_active);
- ActiveArray* result = AtomicAccess::load_acquire(&_active_array);
+ ActiveArray* result = _active_array.load_acquire();
result->increment_refcount();
return result;
}
@@ -625,7 +627,7 @@ OopStorage::ActiveArray* OopStorage::obtain_active_array() const {
// Decrement refcount of array and destroy if refcount is zero.
void OopStorage::relinquish_block_array(ActiveArray* array) const {
if (array->decrement_refcount()) {
- assert(array != _active_array, "invariant");
+ assert(array != _active_array.load_relaxed(), "invariant");
ActiveArray::destroy(array);
}
}
@@ -672,14 +674,14 @@ static void log_release_transitions(uintx releasing,
void OopStorage::Block::release_entries(uintx releasing, OopStorage* owner) {
assert(releasing != 0, "preconditon");
// Prevent empty block deletion when transitioning to empty.
- AtomicAccess::inc(&_release_refcount);
+ _release_refcount.add_then_fetch(1u);
// Atomically update allocated bitmask.
- uintx old_allocated = _allocated_bitmask;
+ uintx old_allocated = _allocated_bitmask.load_relaxed();
while (true) {
assert((releasing & ~old_allocated) == 0, "releasing unallocated entries");
uintx new_value = old_allocated ^ releasing;
- uintx fetched = AtomicAccess::cmpxchg(&_allocated_bitmask, old_allocated, new_value);
+ uintx fetched = _allocated_bitmask.compare_exchange(old_allocated, new_value);
if (fetched == old_allocated) break; // Successful update.
old_allocated = fetched; // Retry with updated bitmask.
}
@@ -698,12 +700,12 @@ void OopStorage::Block::release_entries(uintx releasing, OopStorage* owner) {
// then someone else has made such a claim and the deferred update has not
// yet been processed and will include our change, so we don't need to do
// anything further.
- if (AtomicAccess::replace_if_null(&_deferred_updates_next, this)) {
+ if (_deferred_updates_next.compare_exchange(nullptr, this) == nullptr) {
// Successfully claimed. Push, with self-loop for end-of-list.
- Block* head = owner->_deferred_updates;
+ Block* head = owner->_deferred_updates.load_relaxed();
while (true) {
- _deferred_updates_next = (head == nullptr) ? this : head;
- Block* fetched = AtomicAccess::cmpxchg(&owner->_deferred_updates, head, this);
+ _deferred_updates_next.store_relaxed((head == nullptr) ? this : head);
+ Block* fetched = owner->_deferred_updates.compare_exchange(head, this);
if (fetched == head) break; // Successful update.
head = fetched; // Retry with updated head.
}
@@ -720,7 +722,7 @@ void OopStorage::Block::release_entries(uintx releasing, OopStorage* owner) {
}
}
// Release hold on empty block deletion.
- AtomicAccess::dec(&_release_refcount);
+ _release_refcount.sub_then_fetch(1u);
}
// Process one available deferred update. Returns true if one was processed.
@@ -729,13 +731,13 @@ bool OopStorage::reduce_deferred_updates() {
// Atomically pop a block off the list, if any available.
// No ABA issue because this is only called by one thread at a time.
// The atomicity is wrto pushes by release().
- Block* block = AtomicAccess::load_acquire(&_deferred_updates);
+ Block* block = _deferred_updates.load_acquire();
while (true) {
if (block == nullptr) return false;
// Try atomic pop of block from list.
Block* tail = block->deferred_updates_next();
if (block == tail) tail = nullptr; // Handle self-loop end marker.
- Block* fetched = AtomicAccess::cmpxchg(&_deferred_updates, block, tail);
+ Block* fetched = _deferred_updates.compare_exchange(block, tail);
if (fetched == block) break; // Update successful.
block = fetched; // Retry with updated block.
}
@@ -780,7 +782,7 @@ void OopStorage::release(const oop* ptr) {
assert(block != nullptr, "%s: invalid release " PTR_FORMAT, name(), p2i(ptr));
log_trace(oopstorage, ref)("%s: releasing " PTR_FORMAT, name(), p2i(ptr));
block->release_entries(block->bitmask_for_entry(ptr), this);
- AtomicAccess::dec(&_allocation_count);
+ _allocation_count.sub_then_fetch(1u);
}
void OopStorage::release(const oop* const* ptrs, size_t size) {
@@ -806,7 +808,7 @@ void OopStorage::release(const oop* const* ptrs, size_t size) {
}
// Release the contiguous entries that are in block.
block->release_entries(releasing, this);
- AtomicAccess::sub(&_allocation_count, count);
+ _allocation_count.sub_then_fetch(count);
}
}
@@ -837,7 +839,7 @@ OopStorage::OopStorage(const char* name, MemTag mem_tag) :
_mem_tag(mem_tag),
_needs_cleanup(false)
{
- _active_array->increment_refcount();
+ _active_array.load_relaxed()->increment_refcount();
assert(_active_mutex->rank() < _allocation_mutex->rank(),
"%s: active_mutex must have lower rank than allocation_mutex", _name);
assert(Service_lock->rank() < _active_mutex->rank(),
@@ -852,20 +854,21 @@ void OopStorage::delete_empty_block(const Block& block) {
OopStorage::~OopStorage() {
Block* block;
- while ((block = _deferred_updates) != nullptr) {
- _deferred_updates = block->deferred_updates_next();
+ while ((block = _deferred_updates.load_relaxed()) != nullptr) {
+ _deferred_updates.store_relaxed(block->deferred_updates_next());
block->set_deferred_updates_next(nullptr);
}
while ((block = _allocation_list.head()) != nullptr) {
_allocation_list.unlink(*block);
}
- bool unreferenced = _active_array->decrement_refcount();
+ ActiveArray* array = _active_array.load_relaxed();
+ bool unreferenced = array->decrement_refcount();
assert(unreferenced, "deleting storage while _active_array is referenced");
- for (size_t i = _active_array->block_count(); 0 < i; ) {
- block = _active_array->at(--i);
+ for (size_t i = array->block_count(); 0 < i; ) {
+ block = array->at(--i);
Block::delete_block(*block);
}
- ActiveArray::destroy(_active_array);
+ ActiveArray::destroy(array);
os::free(const_cast(_name));
}
@@ -894,7 +897,7 @@ bool OopStorage::should_report_num_dead() const {
// face of frequent explicit ServiceThread wakeups, hence the defer period.
// Global cleanup request state.
-static volatile bool needs_cleanup_requested = false;
+static Atomic needs_cleanup_requested{false};
// Time after which a cleanup is permitted.
static jlong cleanup_permit_time = 0;
@@ -906,12 +909,11 @@ const jlong cleanup_defer_period = 500 * NANOSECS_PER_MILLISEC;
bool OopStorage::has_cleanup_work_and_reset() {
assert_lock_strong(Service_lock);
- if (AtomicAccess::load_acquire(&needs_cleanup_requested) &&
- os::javaTimeNanos() > cleanup_permit_time) {
- cleanup_permit_time =
- os::javaTimeNanos() + cleanup_defer_period;
+ if (needs_cleanup_requested.load_acquire() &&
+ (os::javaTimeNanos() > cleanup_permit_time)) {
+ cleanup_permit_time = os::javaTimeNanos() + cleanup_defer_period;
// Set the request flag false and return its old value.
- AtomicAccess::release_store(&needs_cleanup_requested, false);
+ needs_cleanup_requested.release_store(false);
return true;
} else {
return false;
@@ -923,22 +925,22 @@ bool OopStorage::has_cleanup_work_and_reset() {
void OopStorage::record_needs_cleanup() {
// Set local flag first, else ServiceThread could wake up and miss
// the request.
- AtomicAccess::release_store(&_needs_cleanup, true);
- AtomicAccess::release_store_fence(&needs_cleanup_requested, true);
+ _needs_cleanup.release_store(true);
+ needs_cleanup_requested.release_store_fence(true);
}
bool OopStorage::delete_empty_blocks() {
// ServiceThread might have oopstorage work, but not for this object.
// But check for deferred updates, which might provide cleanup work.
- if (!AtomicAccess::load_acquire(&_needs_cleanup) &&
- (AtomicAccess::load_acquire(&_deferred_updates) == nullptr)) {
+ if (!_needs_cleanup.load_acquire() &&
+ (_deferred_updates.load_acquire() == nullptr)) {
return false;
}
MutexLocker ml(_allocation_mutex, Mutex::_no_safepoint_check_flag);
// Clear the request before processing.
- AtomicAccess::release_store_fence(&_needs_cleanup, false);
+ _needs_cleanup.release_store_fence(false);
// Other threads could be adding to the empty block count or the
// deferred update list while we're working. Set an upper bound on
@@ -977,7 +979,7 @@ bool OopStorage::delete_empty_blocks() {
// but don't re-notify, to avoid useless spinning of the
// ServiceThread. Instead, iteration completion notifies.
if (_concurrent_iteration_count > 0) return true;
- _active_array->remove(block);
+ _active_array.load_relaxed()->remove(block);
}
// Remove block from _allocation_list and delete it.
_allocation_list.unlink(*block);
@@ -1001,8 +1003,9 @@ OopStorage::EntryStatus OopStorage::allocation_status(const oop* ptr) const {
MutexLocker ml(_allocation_mutex, Mutex::_no_safepoint_check_flag);
// Block could be a false positive, so get index carefully.
size_t index = Block::active_index_safe(block);
- if ((index < _active_array->block_count()) &&
- (block == _active_array->at(index)) &&
+ ActiveArray* array = _active_array.load_relaxed();
+ if ((index < array->block_count()) &&
+ (block == array->at(index)) &&
block->contains(ptr)) {
if ((block->allocated_bitmask() & block->bitmask_for_entry(ptr)) != 0) {
return ALLOCATED_ENTRY;
@@ -1015,7 +1018,7 @@ OopStorage::EntryStatus OopStorage::allocation_status(const oop* ptr) const {
}
size_t OopStorage::allocation_count() const {
- return _allocation_count;
+ return _allocation_count.load_relaxed();
}
size_t OopStorage::block_count() const {
@@ -1084,7 +1087,7 @@ void OopStorage::BasicParState::update_concurrent_iteration_count(int value) {
bool OopStorage::BasicParState::claim_next_segment(IterationData* data) {
data->_processed += data->_segment_end - data->_segment_start;
- size_t start = AtomicAccess::load_acquire(&_next_block);
+ size_t start = _next_block.load_acquire();
if (start >= _block_count) {
return finish_iteration(data); // No more blocks available.
}
@@ -1097,11 +1100,11 @@ bool OopStorage::BasicParState::claim_next_segment(IterationData* data) {
size_t max_step = 10;
size_t remaining = _block_count - start;
size_t step = MIN2(max_step, 1 + (remaining / _estimated_thread_count));
- // AtomicAccess::add with possible overshoot. This can perform better
+ // Atomic add with possible overshoot. This can perform better
// than a CAS loop on some platforms when there is contention.
// We can cope with the uncertainty by recomputing start/end from
// the result of the add, and dealing with potential overshoot.
- size_t end = AtomicAccess::add(&_next_block, step);
+ size_t end = _next_block.add_then_fetch(step);
// _next_block may have changed, so recompute start from result of add.
start = end - step;
// _next_block may have changed so much that end has overshot.
@@ -1128,15 +1131,15 @@ bool OopStorage::BasicParState::finish_iteration(const IterationData* data) cons
}
size_t OopStorage::BasicParState::num_dead() const {
- return AtomicAccess::load(&_num_dead);
+ return _num_dead.load_relaxed();
}
void OopStorage::BasicParState::increment_num_dead(size_t num_dead) {
- AtomicAccess::add(&_num_dead, num_dead);
+ _num_dead.add_then_fetch(num_dead);
}
void OopStorage::BasicParState::report_num_dead() const {
- _storage->report_num_dead(AtomicAccess::load(&_num_dead));
+ _storage->report_num_dead(_num_dead.load_relaxed());
}
const char* OopStorage::name() const { return _name; }
@@ -1164,8 +1167,8 @@ bool OopStorage::Block::print_containing(const oop* addr, outputStream* st) {
#ifndef PRODUCT
void OopStorage::print_on(outputStream* st) const {
- size_t allocations = _allocation_count;
- size_t blocks = _active_array->block_count();
+ size_t allocations = _allocation_count.load_relaxed();
+ size_t blocks = _active_array.load_relaxed()->block_count();
double data_size = section_size * section_count;
double alloc_percentage = percent_of((double)allocations, blocks * data_size);
diff --git a/src/hotspot/share/gc/shared/oopStorage.hpp b/src/hotspot/share/gc/shared/oopStorage.hpp
index 34c980a058659..6097eeaa4f479 100644
--- a/src/hotspot/share/gc/shared/oopStorage.hpp
+++ b/src/hotspot/share/gc/shared/oopStorage.hpp
@@ -27,6 +27,7 @@
#include "memory/allocation.hpp"
#include "oops/oop.hpp"
+#include "runtime/atomic.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/macros.hpp"
#include "utilities/singleWriterSynchronizer.hpp"
@@ -258,15 +259,15 @@ class OopStorage : public CHeapObjBase {
private:
const char* _name;
- ActiveArray* _active_array;
+ Atomic _active_array;
AllocationList _allocation_list;
- Block* volatile _deferred_updates;
+ Atomic _deferred_updates;
Mutex* _allocation_mutex;
Mutex* _active_mutex;
NumDeadCallback _num_dead_callback;
- // Volatile for racy unlocked accesses.
- volatile size_t _allocation_count;
+ // Atomic for racy unlocked accesses.
+ Atomic _allocation_count;
// Protection for _active_array.
mutable SingleWriterSynchronizer _protect_active;
@@ -278,7 +279,7 @@ class OopStorage : public CHeapObjBase {
MemTag _mem_tag;
// Flag indicating this storage object is a candidate for empty block deletion.
- volatile bool _needs_cleanup;
+ Atomic _needs_cleanup;
// Clients construct via "create" factory function.
OopStorage(const char* name, MemTag mem_tag);
diff --git a/src/hotspot/share/gc/shared/oopStorage.inline.hpp b/src/hotspot/share/gc/shared/oopStorage.inline.hpp
index 4fb1d8fcaf126..c2747781a6b88 100644
--- a/src/hotspot/share/gc/shared/oopStorage.inline.hpp
+++ b/src/hotspot/share/gc/shared/oopStorage.inline.hpp
@@ -30,6 +30,7 @@
#include "cppstdlib/type_traits.hpp"
#include "memory/allocation.hpp"
#include "oops/oop.hpp"
+#include "runtime/atomic.hpp"
#include "runtime/safepoint.hpp"
#include "utilities/align.hpp"
#include "utilities/count_trailing_zeros.hpp"
@@ -42,8 +43,8 @@ class OopStorage::ActiveArray {
friend class OopStorage::TestAccess;
size_t _size;
- volatile size_t _block_count;
- mutable volatile int _refcount;
+ Atomic _block_count;
+ mutable Atomic _refcount;
// Block* _blocks[1]; // Pseudo flexible array member.
ActiveArray(size_t size);
@@ -104,7 +105,7 @@ inline OopStorage::Block** OopStorage::ActiveArray::block_ptr(size_t index) {
}
inline OopStorage::Block* OopStorage::ActiveArray::at(size_t index) const {
- assert(index < _block_count, "precondition");
+ assert(index < _block_count.load_relaxed(), "precondition");
return *block_ptr(index);
}
@@ -135,16 +136,16 @@ class OopStorage::Block /* No base class, to avoid messing up alignment. */ {
oop _data[BitsPerWord];
static const unsigned _data_pos = 0; // Position of _data.
- volatile uintx _allocated_bitmask; // One bit per _data element.
+ Atomic _allocated_bitmask; // One bit per _data element.
intptr_t _owner_address;
void* _memory; // Unaligned storage containing block.
size_t _active_index;
AllocationListEntry _allocation_list_entry;
- Block* volatile _deferred_updates_next;
- volatile uintx _release_refcount;
+ Atomic _deferred_updates_next;
+ Atomic _release_refcount;
Block(const OopStorage* owner, void* memory);
- ~Block();
+ ~Block() NOT_DEBUG(= default);
void check_index(unsigned index) const;
unsigned get_index(const oop* ptr) const;
@@ -322,7 +323,7 @@ inline const oop* OopStorage::Block::get_pointer(unsigned index) const {
}
inline uintx OopStorage::Block::allocated_bitmask() const {
- return _allocated_bitmask;
+ return _allocated_bitmask.load_relaxed();
}
inline uintx OopStorage::Block::bitmask_for_index(unsigned index) const {
@@ -366,7 +367,7 @@ inline bool OopStorage::iterate_impl(F f, Storage* storage) {
// Propagate const/non-const iteration to the block layer, by using
// const or non-const blocks as corresponding to Storage.
using BlockPtr = std::conditional_t::value, const Block*, Block*>;
- ActiveArray* blocks = storage->_active_array;
+ ActiveArray* blocks = storage->_active_array.load_relaxed();
size_t limit = blocks->block_count();
for (size_t i = 0; i < limit; ++i) {
BlockPtr block = blocks->at(i);
diff --git a/src/hotspot/share/gc/shared/oopStorageParState.hpp b/src/hotspot/share/gc/shared/oopStorageParState.hpp
index 046bf9de8c239..cad1a1f0cf62c 100644
--- a/src/hotspot/share/gc/shared/oopStorageParState.hpp
+++ b/src/hotspot/share/gc/shared/oopStorageParState.hpp
@@ -27,6 +27,7 @@
#include "cppstdlib/type_traits.hpp"
#include "gc/shared/oopStorage.hpp"
+#include "runtime/atomic.hpp"
#include "utilities/globalDefinitions.hpp"
//////////////////////////////////////////////////////////////////////////////
@@ -131,10 +132,10 @@ class OopStorage::BasicParState {
const OopStorage* _storage;
ActiveArray* _active_array;
size_t _block_count;
- volatile size_t _next_block;
+ Atomic _next_block;
uint _estimated_thread_count;
bool _concurrent;
- volatile size_t _num_dead;
+ Atomic _num_dead;
NONCOPYABLE(BasicParState);
diff --git a/src/hotspot/share/gc/shared/partialArrayState.cpp b/src/hotspot/share/gc/shared/partialArrayState.cpp
index f913f3db4ba76..39c1fe4fc783e 100644
--- a/src/hotspot/share/gc/shared/partialArrayState.cpp
+++ b/src/hotspot/share/gc/shared/partialArrayState.cpp
@@ -22,6 +22,7 @@
*
*/
+#include "cppstdlib/new.hpp"
#include "gc/shared/partialArrayState.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/arena.hpp"
@@ -33,8 +34,6 @@
#include "utilities/globalDefinitions.hpp"
#include "utilities/macros.hpp"
-#include
-
PartialArrayState::PartialArrayState(oop src, oop dst,
size_t index, size_t length,
size_t initial_refcount)
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahScanRemembered.cpp b/src/hotspot/share/gc/shenandoah/shenandoahScanRemembered.cpp
index 3a99023eca42a..34713898fc637 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahScanRemembered.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahScanRemembered.cpp
@@ -378,24 +378,20 @@ HeapWord* ShenandoahCardCluster::first_object_start(const size_t card_index, con
// evacuation phase) of young collections. This is never called
// during global collections during marking or update refs..
// 4. Every allocation under TAMS updates the object start array.
+#ifdef ASSERT
oop obj = cast_to_oop(p);
assert(oopDesc::is_oop(obj), "Should be an object");
-#ifdef ASSERT
-#define WALK_FORWARD_IN_BLOCK_START true
-#else
-#define WALK_FORWARD_IN_BLOCK_START false
-#endif // ASSERT
- while (WALK_FORWARD_IN_BLOCK_START && p + obj->size() < left) {
+ while (p + obj->size() < left) {
p += obj->size();
obj = cast_to_oop(p);
assert(oopDesc::is_oop(obj), "Should be an object");
assert(Klass::is_valid(obj->klass()), "Not a valid klass ptr");
// Check assumptions in previous block comment if this assert fires
- guarantee(false, "Should never need forward walk in block start");
+ fatal("Should never need forward walk in block start");
}
-#undef WALK_FORWARD_IN_BLOCK_START
assert(p <= left, "p should start at or before left end of card");
assert(p + obj->size() > left, "obj should end after left end of card");
+#endif // ASSERT
return p;
}
diff --git a/src/hotspot/share/gc/z/zDeferredConstructed.inline.hpp b/src/hotspot/share/gc/z/zDeferredConstructed.inline.hpp
index d6d35ecddcd82..f686bc78d1540 100644
--- a/src/hotspot/share/gc/z/zDeferredConstructed.inline.hpp
+++ b/src/hotspot/share/gc/z/zDeferredConstructed.inline.hpp
@@ -27,10 +27,9 @@
#include "gc/z/zDeferredConstructed.hpp"
+#include "cppstdlib/new.hpp"
#include "cppstdlib/type_traits.hpp"
-#include
-
template
inline ZDeferredConstructed::ZDeferredConstructed()
DEBUG_ONLY(: _initialized(false)) {
diff --git a/src/hotspot/share/memory/allocation.hpp b/src/hotspot/share/memory/allocation.hpp
index 35180fdba5e01..963ca04aadf96 100644
--- a/src/hotspot/share/memory/allocation.hpp
+++ b/src/hotspot/share/memory/allocation.hpp
@@ -25,14 +25,13 @@
#ifndef SHARE_MEMORY_ALLOCATION_HPP
#define SHARE_MEMORY_ALLOCATION_HPP
+#include "cppstdlib/new.hpp"
#include "memory/allStatic.hpp"
#include "nmt/memTag.hpp"
#include "utilities/debug.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/macros.hpp"
-#include
-
class outputStream;
class Thread;
class JavaThread;
diff --git a/src/hotspot/share/memory/arena.cpp b/src/hotspot/share/memory/arena.cpp
index b9968083e0e85..2de3f837c001a 100644
--- a/src/hotspot/share/memory/arena.cpp
+++ b/src/hotspot/share/memory/arena.cpp
@@ -24,6 +24,7 @@
*/
#include "compiler/compilationMemoryStatistic.hpp"
+#include "cppstdlib/new.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/arena.hpp"
#include "memory/resourceArea.hpp"
diff --git a/src/hotspot/share/memory/arena.hpp b/src/hotspot/share/memory/arena.hpp
index b4a0546babf83..a8450b5543a68 100644
--- a/src/hotspot/share/memory/arena.hpp
+++ b/src/hotspot/share/memory/arena.hpp
@@ -31,8 +31,6 @@
#include "utilities/globalDefinitions.hpp"
#include "utilities/powerOfTwo.hpp"
-#include
-
// The byte alignment to be used by Arena::Amalloc.
#define ARENA_AMALLOC_ALIGNMENT BytesPerLong
#define ARENA_ALIGN(x) (align_up((x), ARENA_AMALLOC_ALIGNMENT))
diff --git a/src/hotspot/share/nmt/mallocTracker.cpp b/src/hotspot/share/nmt/mallocTracker.cpp
index a61a27db25d2f..2cf5034c0bfd2 100644
--- a/src/hotspot/share/nmt/mallocTracker.cpp
+++ b/src/hotspot/share/nmt/mallocTracker.cpp
@@ -45,7 +45,7 @@
#include "utilities/ostream.hpp"
#include "utilities/vmError.hpp"
-MallocMemorySnapshot MallocMemorySummary::_snapshot;
+DeferredStatic MallocMemorySummary::_snapshot;
void MemoryCounter::update_peak(size_t size, size_t cnt) {
size_t peak_sz = peak_size();
@@ -101,7 +101,7 @@ void MallocMemorySnapshot::make_adjustment() {
}
void MallocMemorySummary::initialize() {
- // Uses placement new operator to initialize static area.
+ _snapshot.initialize();
MallocLimitHandler::initialize(MallocLimit);
}
diff --git a/src/hotspot/share/nmt/mallocTracker.hpp b/src/hotspot/share/nmt/mallocTracker.hpp
index 0ead41f24110f..fc03faf721258 100644
--- a/src/hotspot/share/nmt/mallocTracker.hpp
+++ b/src/hotspot/share/nmt/mallocTracker.hpp
@@ -30,6 +30,7 @@
#include "nmt/memTag.hpp"
#include "nmt/nmtCommon.hpp"
#include "runtime/atomicAccess.hpp"
+#include "utilities/deferredStatic.hpp"
#include "utilities/nativeCallStack.hpp"
class outputStream;
@@ -204,7 +205,7 @@ class MallocMemorySnapshot {
class MallocMemorySummary : AllStatic {
private:
// Reserve memory for placement of MallocMemorySnapshot object
- static MallocMemorySnapshot _snapshot;
+ static DeferredStatic _snapshot;
static bool _have_limits;
// Called when a total limit break was detected.
@@ -251,7 +252,7 @@ class MallocMemorySummary : AllStatic {
}
static MallocMemorySnapshot* as_snapshot() {
- return &_snapshot;
+ return _snapshot.get();
}
// MallocLimit: returns true if allocating s bytes on f would trigger
diff --git a/src/hotspot/share/oops/instanceKlass.cpp b/src/hotspot/share/oops/instanceKlass.cpp
index 2d03b69ee92e9..24358f662bc6c 100644
--- a/src/hotspot/share/oops/instanceKlass.cpp
+++ b/src/hotspot/share/oops/instanceKlass.cpp
@@ -2870,7 +2870,7 @@ void InstanceKlass::restore_unshareable_info(ClassLoaderData* loader_data, Handl
}
bool InstanceKlass::can_be_verified_at_dumptime() const {
- if (AOTMetaspace::in_aot_cache(this)) {
+ if (CDSConfig::is_dumping_dynamic_archive() && AOTMetaspace::in_aot_cache(this)) {
// This is a class that was dumped into the base archive, so we know
// it was verified at dump time.
return true;
diff --git a/src/hotspot/share/oops/resolvedFieldEntry.cpp b/src/hotspot/share/oops/resolvedFieldEntry.cpp
index dd0a81ce0f3cb..83f1a6919a6ae 100644
--- a/src/hotspot/share/oops/resolvedFieldEntry.cpp
+++ b/src/hotspot/share/oops/resolvedFieldEntry.cpp
@@ -23,8 +23,17 @@
*/
#include "cds/archiveBuilder.hpp"
+#include "cppstdlib/type_traits.hpp"
#include "oops/resolvedFieldEntry.hpp"
+static_assert(std::is_trivially_copyable_v);
+
+// Detect inadvertently introduced trailing padding.
+class ResolvedFieldEntryWithExtra : public ResolvedFieldEntry {
+ u1 _extra_field;
+};
+static_assert(sizeof(ResolvedFieldEntryWithExtra) > sizeof(ResolvedFieldEntry));
+
void ResolvedFieldEntry::print_on(outputStream* st) const {
st->print_cr("Field Entry:");
@@ -45,9 +54,7 @@ void ResolvedFieldEntry::print_on(outputStream* st) const {
#if INCLUDE_CDS
void ResolvedFieldEntry::remove_unshareable_info() {
- u2 saved_cpool_index = _cpool_index;
- memset(this, 0, sizeof(*this));
- _cpool_index = saved_cpool_index;
+ *this = ResolvedFieldEntry(_cpool_index);
}
void ResolvedFieldEntry::mark_and_relocate() {
diff --git a/src/hotspot/share/oops/resolvedFieldEntry.hpp b/src/hotspot/share/oops/resolvedFieldEntry.hpp
index 1df4ae8d956bc..77ad4815730e2 100644
--- a/src/hotspot/share/oops/resolvedFieldEntry.hpp
+++ b/src/hotspot/share/oops/resolvedFieldEntry.hpp
@@ -43,6 +43,9 @@
// Field bytecodes start with a constant pool index as their operand, which is then rewritten to
// a "field index", which is an index into the array of ResolvedFieldEntry.
+// The explicit paddings are necessary for generating deterministic CDS archives. They prevent
+// the C++ compiler from potentially inserting random values in unused gaps.
+
//class InstanceKlass;
class ResolvedFieldEntry {
friend class VMStructs;
@@ -54,17 +57,9 @@ class ResolvedFieldEntry {
u1 _tos_state; // TOS state
u1 _flags; // Flags: [0000|00|is_final|is_volatile]
u1 _get_code, _put_code; // Get and Put bytecodes of the field
-
- void copy_from(const ResolvedFieldEntry& other) {
- _field_holder = other._field_holder;
- _field_offset = other._field_offset;
- _field_index = other._field_index;
- _cpool_index = other._cpool_index;
- _tos_state = other._tos_state;
- _flags = other._flags;
- _get_code = other._get_code;
- _put_code = other._put_code;
- }
+#ifdef _LP64
+ u4 _padding;
+#endif
public:
ResolvedFieldEntry(u2 cpi) :
@@ -75,48 +70,15 @@ class ResolvedFieldEntry {
_tos_state(0),
_flags(0),
_get_code(0),
- _put_code(0) {}
+ _put_code(0)
+#ifdef _LP64
+ , _padding(0)
+#endif
+ {}
ResolvedFieldEntry() :
ResolvedFieldEntry(0) {}
- // Notes on copy constructor, copy assignment operator, and copy_from().
- // These are necessary for generating deterministic CDS archives.
- //
- // We have some unused padding on 64-bit platforms (4 bytes at the tail end).
- //
- // When ResolvedFieldEntries in a ConstantPoolCache are allocated from the metaspace,
- // their entire content (including the padding) is filled with zeros. They are
- // then initialized with initialize_resolved_entries_array() in cpCache.cpp from a
- // GrowableArray.
- //
- // The GrowableArray is initialized in rewriter.cpp, using ResolvedFieldEntries that
- // are originally allocated from the C++ stack. Functions like GrowableArray::expand_to()
- // will also allocate ResolvedFieldEntries from the stack. These may have random bits
- // in the padding as the C++ compiler is allowed to leave the padding in uninitialized
- // states.
- //
- // If we use the default copy constructor and/or default copy assignment operator,
- // the random padding will be copied into the GrowableArray, from there
- // to the ConstantPoolCache, and eventually to the CDS archive. As a result, the
- // CDS archive will contain random bits, causing failures in
- // test/hotspot/jtreg/runtime/cds/DeterministicDump.java (usually on Windows).
- //
- // By using copy_from(), we can prevent the random padding from being copied,
- // ensuring that the ResolvedFieldEntries in a ConstantPoolCache (and thus the
- // CDS archive) will have all zeros in the padding.
-
- // Copy constructor
- ResolvedFieldEntry(const ResolvedFieldEntry& other) {
- copy_from(other);
- }
-
- // Copy assignment operator
- ResolvedFieldEntry& operator=(const ResolvedFieldEntry& other) {
- copy_from(other);
- return *this;
- }
-
// Bit shift to get flags
// Note: Only two flags exists at the moment but more could be added
enum {
diff --git a/src/hotspot/share/oops/resolvedMethodEntry.cpp b/src/hotspot/share/oops/resolvedMethodEntry.cpp
index 2dc533dbee00a..bb96ca86012b5 100644
--- a/src/hotspot/share/oops/resolvedMethodEntry.cpp
+++ b/src/hotspot/share/oops/resolvedMethodEntry.cpp
@@ -23,9 +23,18 @@
*/
#include "cds/archiveBuilder.hpp"
+#include "cppstdlib/type_traits.hpp"
#include "oops/method.hpp"
#include "oops/resolvedMethodEntry.hpp"
+static_assert(std::is_trivially_copyable_v);
+
+// Detect inadvertently introduced trailing padding.
+class ResolvedMethodEntryWithExtra : public ResolvedMethodEntry {
+ u1 _extra_field;
+};
+static_assert(sizeof(ResolvedMethodEntryWithExtra) > sizeof(ResolvedMethodEntry));
+
bool ResolvedMethodEntry::check_no_old_or_obsolete_entry() {
// return false if m refers to a non-deleted old or obsolete method
if (_method != nullptr) {
@@ -39,14 +48,10 @@ bool ResolvedMethodEntry::check_no_old_or_obsolete_entry() {
void ResolvedMethodEntry::reset_entry() {
if (has_resolved_references_index()) {
u2 saved_resolved_references_index = _entry_specific._resolved_references_index;
- u2 saved_cpool_index = _cpool_index;
- memset(this, 0, sizeof(*this));
+ *this = ResolvedMethodEntry(_cpool_index);
set_resolved_references_index(saved_resolved_references_index);
- _cpool_index = saved_cpool_index;
} else {
- u2 saved_cpool_index = _cpool_index;
- memset(this, 0, sizeof(*this));
- _cpool_index = saved_cpool_index;
+ *this = ResolvedMethodEntry(_cpool_index);
}
}
diff --git a/src/hotspot/share/oops/resolvedMethodEntry.hpp b/src/hotspot/share/oops/resolvedMethodEntry.hpp
index c95efb751e961..802cf252a6d3c 100644
--- a/src/hotspot/share/oops/resolvedMethodEntry.hpp
+++ b/src/hotspot/share/oops/resolvedMethodEntry.hpp
@@ -61,6 +61,9 @@
// pool entry and thus the same resolved method entry.
// The is_vfinal flag indicates method pointer for a final method or an index.
+// The explicit paddings are necessary for generating deterministic CDS archives. They prevent
+// the C++ compiler from potentially inserting random values in unused gaps.
+
class InstanceKlass;
class ResolvedMethodEntry {
friend class VMStructs;
@@ -70,6 +73,7 @@ class ResolvedMethodEntry {
InstanceKlass* _interface_klass; // for interface and static
u2 _resolved_references_index; // Index of resolved references array that holds the appendix oop for invokehandle
u2 _table_index; // vtable/itable index for virtual and interface calls
+ // The padding field is unused here, as the parent constructor zeroes the union.
} _entry_specific;
u2 _cpool_index; // Constant pool index
@@ -80,51 +84,36 @@ class ResolvedMethodEntry {
#ifdef ASSERT
bool _has_interface_klass;
bool _has_table_index;
+# ifdef _LP64
+ u2 _padding1;
+ u4 _padding2;
+# else
+ u1 _padding1;
+ u1 _padding2;
+# endif
#endif
- // See comments in resolvedFieldEntry.hpp about copy_from and padding.
- // We have unused padding on debug builds.
- void copy_from(const ResolvedMethodEntry& other) {
- _method = other._method;
- _entry_specific = other._entry_specific;
- _cpool_index = other._cpool_index;
- _number_of_parameters = other._number_of_parameters;
- _tos_state = other._tos_state;
- _flags = other._flags;
- _bytecode1 = other._bytecode1;
- _bytecode2 = other._bytecode2;
-#ifdef ASSERT
- _has_interface_klass = other._has_interface_klass;
- _has_table_index = other._has_table_index;
-#endif
- }
-
// Constructors
public:
ResolvedMethodEntry(u2 cpi) :
_method(nullptr),
+ _entry_specific{nullptr},
_cpool_index(cpi),
_number_of_parameters(0),
_tos_state(0),
_flags(0),
_bytecode1(0),
- _bytecode2(0) {
- _entry_specific._interface_klass = nullptr;
- DEBUG_ONLY(_has_interface_klass = false;)
- DEBUG_ONLY(_has_table_index = false;)
- }
+ _bytecode2(0)
+#ifdef ASSERT
+ , _has_interface_klass(false),
+ _has_table_index(false),
+ _padding1(0),
+ _padding2(0)
+#endif
+ {}
ResolvedMethodEntry() :
ResolvedMethodEntry(0) {}
- ResolvedMethodEntry(const ResolvedMethodEntry& other) {
- copy_from(other);
- }
-
- ResolvedMethodEntry& operator=(const ResolvedMethodEntry& other) {
- copy_from(other);
- return *this;
- }
-
// Bit shift to get flags
enum {
diff --git a/src/hotspot/share/opto/c2_globals.hpp b/src/hotspot/share/opto/c2_globals.hpp
index 0a4f231c49b36..2b2b4db47b134 100644
--- a/src/hotspot/share/opto/c2_globals.hpp
+++ b/src/hotspot/share/opto/c2_globals.hpp
@@ -428,7 +428,7 @@
"0=print nothing except PhasePrintLevel directives, " \
"6=all details printed. " \
"Level of detail of printouts can be set on a per-method level " \
- "as well by using CompileCommand=PrintPhaseLevel.") \
+ "as well by using CompileCommand=PhasePrintLevel.") \
range(-1, 6) \
\
develop(bool, PrintIdealGraph, false, \
diff --git a/src/hotspot/share/opto/compile.cpp b/src/hotspot/share/opto/compile.cpp
index 6babc13e1b315..89b5e36b120f1 100644
--- a/src/hotspot/share/opto/compile.cpp
+++ b/src/hotspot/share/opto/compile.cpp
@@ -5233,7 +5233,7 @@ void Compile::end_method() {
#ifndef PRODUCT
bool Compile::should_print_phase(const int level) const {
- return PrintPhaseLevel > 0 && directive()->PhasePrintLevelOption >= level &&
+ return PrintPhaseLevel >= 0 && directive()->PhasePrintLevelOption >= level &&
_method != nullptr; // Do not print phases for stubs.
}
diff --git a/src/hotspot/share/opto/doCall.cpp b/src/hotspot/share/opto/doCall.cpp
index 754b0fa8d1c14..91bb743618b31 100644
--- a/src/hotspot/share/opto/doCall.cpp
+++ b/src/hotspot/share/opto/doCall.cpp
@@ -97,10 +97,9 @@ CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool
Bytecodes::Code bytecode = caller->java_code_at_bci(bci);
ciMethod* orig_callee = caller->get_method_at_bci(bci);
- const bool is_virtual_or_interface = (bytecode == Bytecodes::_invokevirtual) ||
- (bytecode == Bytecodes::_invokeinterface) ||
- (orig_callee->intrinsic_id() == vmIntrinsics::_linkToVirtual) ||
- (orig_callee->intrinsic_id() == vmIntrinsics::_linkToInterface);
+ const bool is_virtual = (bytecode == Bytecodes::_invokevirtual) || (orig_callee->intrinsic_id() == vmIntrinsics::_linkToVirtual);
+ const bool is_interface = (bytecode == Bytecodes::_invokeinterface) || (orig_callee->intrinsic_id() == vmIntrinsics::_linkToInterface);
+ const bool is_virtual_or_interface = is_virtual || is_interface;
const bool check_access = !orig_callee->is_method_handle_intrinsic(); // method handle intrinsics don't perform access checks
@@ -339,17 +338,25 @@ CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool
// number of implementors for decl_interface is 0 or 1. If
// it's 0 then no class implements decl_interface and there's
// no point in inlining.
- if (call_does_dispatch && bytecode == Bytecodes::_invokeinterface) {
- ciInstanceKlass* declared_interface =
- caller->get_declared_method_holder_at_bci(bci)->as_instance_klass();
+ if (call_does_dispatch && is_interface) {
+ ciInstanceKlass* declared_interface = nullptr;
+ if (orig_callee->intrinsic_id() == vmIntrinsics::_linkToInterface) {
+ // MemberName doesn't keep information about resolved interface class (REFC) once
+ // resolution is over, but resolved method holder (DECC) can be used as a
+ // conservative approximation.
+ declared_interface = callee->holder();
+ } else {
+ assert(!orig_callee->is_method_handle_intrinsic(), "not allowed");
+ declared_interface = caller->get_declared_method_holder_at_bci(bci)->as_instance_klass();
+ }
+ assert(declared_interface->is_interface(), "required");
ciInstanceKlass* singleton = declared_interface->unique_implementor();
if (singleton != nullptr) {
assert(singleton != declared_interface, "not a unique implementor");
- assert(check_access, "required");
ciMethod* cha_monomorphic_target =
- callee->find_monomorphic_target(caller->holder(), declared_interface, singleton);
+ callee->find_monomorphic_target(caller->holder(), declared_interface, singleton, check_access);
if (cha_monomorphic_target != nullptr &&
cha_monomorphic_target->holder() != env()->Object_klass()) { // subtype check against Object is useless
@@ -372,7 +379,7 @@ CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool
}
}
}
- } // call_does_dispatch && bytecode == Bytecodes::_invokeinterface
+ } // call_does_dispatch && is_interface
// Nothing claimed the intrinsic, we go with straight-forward inlining
// for already discovered intrinsic.
diff --git a/src/hotspot/share/opto/node.cpp b/src/hotspot/share/opto/node.cpp
index 93ded36363ec7..2452677caf354 100644
--- a/src/hotspot/share/opto/node.cpp
+++ b/src/hotspot/share/opto/node.cpp
@@ -1209,9 +1209,12 @@ bool Node::has_special_unique_user() const {
if (this->is_Store()) {
// Condition for back-to-back stores folding.
return n->Opcode() == op && n->in(MemNode::Memory) == this;
- } else if (this->is_Load() || this->is_DecodeN() || this->is_Phi()) {
+ } else if ((this->is_Load() || this->is_DecodeN() || this->is_Phi()) && n->Opcode() == Op_MemBarAcquire) {
// Condition for removing an unused LoadNode or DecodeNNode from the MemBarAcquire precedence input
- return n->Opcode() == Op_MemBarAcquire;
+ return true;
+ } else if (this->is_Load() && n->is_Move()) {
+ // Condition for MoveX2Y (LoadX mem) => LoadY mem
+ return true;
} else if (op == Op_AddL) {
// Condition for convL2I(addL(x,y)) ==> addI(convL2I(x),convL2I(y))
return n->Opcode() == Op_ConvL2I && n->in(1) == this;
diff --git a/src/hotspot/share/runtime/arguments.cpp b/src/hotspot/share/runtime/arguments.cpp
index 1ef2ee9de0dd2..55ee7641a5f36 100644
--- a/src/hotspot/share/runtime/arguments.cpp
+++ b/src/hotspot/share/runtime/arguments.cpp
@@ -2483,6 +2483,9 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args, JVMFlagOrigin
}
} else if (match_option(option, "-Xmaxjitcodesize", &tail) ||
match_option(option, "-XX:ReservedCodeCacheSize=", &tail)) {
+ if (match_option(option, "-Xmaxjitcodesize", &tail)) {
+ warning("Option -Xmaxjitcodesize was deprecated in JDK 26 and will likely be removed in a future release.");
+ }
julong long_ReservedCodeCacheSize = 0;
ArgsRange errcode = parse_memory_size(tail, &long_ReservedCodeCacheSize, 1);
diff --git a/src/hotspot/share/utilities/debug.cpp b/src/hotspot/share/utilities/debug.cpp
index 89c0a1ebc0810..de39fe32dc1d6 100644
--- a/src/hotspot/share/utilities/debug.cpp
+++ b/src/hotspot/share/utilities/debug.cpp
@@ -29,6 +29,7 @@
#include "code/vtableStubs.hpp"
#include "compiler/compileBroker.hpp"
#include "compiler/disassembler.hpp"
+#include "cppstdlib/new.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "interpreter/interpreter.hpp"
#include "jvm.h"
@@ -63,7 +64,6 @@
#include "utilities/unsigned5.hpp"
#include "utilities/vmError.hpp"
-#include
#include
#include
diff --git a/src/hotspot/share/utilities/deferredStatic.hpp b/src/hotspot/share/utilities/deferredStatic.hpp
index 56bdb9b8e6bbf..3a32f920fe81b 100644
--- a/src/hotspot/share/utilities/deferredStatic.hpp
+++ b/src/hotspot/share/utilities/deferredStatic.hpp
@@ -25,11 +25,10 @@
#ifndef SHARE_UTILITIES_DEFERREDSTATIC_HPP
#define SHARE_UTILITIES_DEFERREDSTATIC_HPP
+#include "cppstdlib/new.hpp"
#include "cppstdlib/type_traits.hpp"
#include "utilities/globalDefinitions.hpp"
-#include
-
// The purpose of this class is to provide control over the initialization
// time for an object of type T with static storage duration. An instance of
// this class provides storage for an object, sized and aligned for T. The
diff --git a/src/hotspot/share/utilities/elfFile.cpp b/src/hotspot/share/utilities/elfFile.cpp
index 9ea19b38276ca..0b7713e9ca9ca 100644
--- a/src/hotspot/share/utilities/elfFile.cpp
+++ b/src/hotspot/share/utilities/elfFile.cpp
@@ -25,6 +25,7 @@
#if !defined(_WINDOWS) && !defined(__APPLE__)
+#include "cppstdlib/new.hpp"
#include "jvm_io.h"
#include "logging/log.hpp"
#include "memory/allocation.inline.hpp"
@@ -37,7 +38,6 @@
#include "utilities/ostream.hpp"
#include
-#include
#include
#include
diff --git a/src/hotspot/share/utilities/globalDefinitions.hpp b/src/hotspot/share/utilities/globalDefinitions.hpp
index 1910759b43497..3284fd3bd15be 100644
--- a/src/hotspot/share/utilities/globalDefinitions.hpp
+++ b/src/hotspot/share/utilities/globalDefinitions.hpp
@@ -1386,4 +1386,25 @@ template inline constexpr bool DependentAlwaysFalse = false;
// handled.
bool IEEE_subnormal_handling_OK();
+//----------------------------------------------------------------------------------------------------
+// Forbid using the global allocator by HotSpot code.
+//
+// This is a subset of allocator and deallocator functions. These are
+// implicitly declared in all translation units, without needing to include
+// ; see C++17 6.7.4. This isn't even the full set of those; implicit
+// declarations involving std::align_val_t are not covered here, since that
+// type is defined in . A translation unit that doesn't include is
+// still likely to include this file. See cppstdlib/new.hpp for more details.
+#ifndef HOTSPOT_GTEST
+
+[[deprecated]] void* operator new(std::size_t);
+[[deprecated]] void operator delete(void*) noexcept;
+[[deprecated]] void operator delete(void*, std::size_t) noexcept;
+
+[[deprecated]] void* operator new[](std::size_t);
+[[deprecated]] void operator delete[](void*) noexcept;
+[[deprecated]] void operator delete[](void*, std::size_t) noexcept;
+
+#endif // HOTSPOT_GTEST
+
#endif // SHARE_UTILITIES_GLOBALDEFINITIONS_HPP
diff --git a/src/hotspot/share/utilities/lockFreeStack.hpp b/src/hotspot/share/utilities/lockFreeStack.hpp
index 43bc58fbc445b..3f63482a2681b 100644
--- a/src/hotspot/share/utilities/lockFreeStack.hpp
+++ b/src/hotspot/share/utilities/lockFreeStack.hpp
@@ -25,6 +25,7 @@
#ifndef SHARE_UTILITIES_LOCKFREESTACK_HPP
#define SHARE_UTILITIES_LOCKFREESTACK_HPP
+#include "runtime/atomic.hpp"
#include "runtime/atomicAccess.hpp"
#include "utilities/debug.hpp"
#include "utilities/globalDefinitions.hpp"
@@ -34,11 +35,14 @@
// a result, there is no allocation involved in adding objects to the stack
// or removing them from the stack.
//
-// To be used in a LockFreeStack of objects of type T, an object of
-// type T must have a list entry member of type T* volatile, with an
-// non-member accessor function returning a pointer to that member. A
-// LockFreeStack is associated with the class of its elements and an
-// entry member from that class.
+// To be used in a LockFreeStack of objects of type T, an object of type T
+// must have a list entry member. A list entry member is a data member whose
+// type is either (1) Atomic, or (2) T* volatile. There must be a
+// non-member or static member function returning a pointer to that member,
+// which is used to provide access to it by a LockFreeStack. A LockFreeStack
+// is associated with the class of its elements and an entry member from that
+// class by being specialized on the element class and a pointer to the
+// function for accessing that entry member.
//
// An object can be in multiple stacks at the same time, so long as
// each stack uses a different entry member. That is, the class of the
@@ -52,12 +56,12 @@
//
// \tparam T is the class of the elements in the stack.
//
-// \tparam next_ptr is a function pointer. Applying this function to
+// \tparam next_accessor is a function pointer. Applying this function to
// an object of type T must return a pointer to the list entry member
// of the object associated with the LockFreeStack type.
-template
+template
class LockFreeStack {
- T* volatile _top;
+ Atomic _top;
void prepend_impl(T* first, T* last) {
T* cur = top();
@@ -65,12 +69,21 @@ class LockFreeStack {
do {
old = cur;
set_next(*last, cur);
- cur = AtomicAccess::cmpxchg(&_top, cur, first);
+ cur = _top.compare_exchange(cur, first);
} while (old != cur);
}
NONCOPYABLE(LockFreeStack);
+ template
+ static constexpr void use_atomic_access_impl(NextAccessor) {
+ static_assert(DependentAlwaysFalse, "Invalid next accessor");
+ }
+ static constexpr bool use_atomic_access_impl(T* volatile* (*)(T&)) { return true; }
+ static constexpr bool use_atomic_access_impl(Atomic* (*)(T&)) { return false; }
+
+ static constexpr bool use_atomic_access = use_atomic_access_impl(next_accessor);
+
public:
LockFreeStack() : _top(nullptr) {}
~LockFreeStack() { assert(empty(), "stack not empty"); }
@@ -89,7 +102,7 @@ class LockFreeStack {
new_top = next(*result);
}
// CAS even on empty pop, for consistent membar behavior.
- result = AtomicAccess::cmpxchg(&_top, result, new_top);
+ result = _top.compare_exchange(result, new_top);
} while (result != old);
if (result != nullptr) {
set_next(*result, nullptr);
@@ -101,7 +114,7 @@ class LockFreeStack {
// list of elements. Acts as a full memory barrier.
// postcondition: empty()
T* pop_all() {
- return AtomicAccess::xchg(&_top, (T*)nullptr);
+ return _top.exchange(nullptr);
}
// Atomically adds value to the top of this stack. Acts as a full
@@ -143,9 +156,9 @@ class LockFreeStack {
// Return true if the stack is empty.
bool empty() const { return top() == nullptr; }
- // Return the most recently pushed element, or nullptr if the stack is empty.
+ // Return the most recently pushed element, or null if the stack is empty.
// The returned element is not removed from the stack.
- T* top() const { return AtomicAccess::load(&_top); }
+ T* top() const { return _top.load_relaxed(); }
// Return the number of objects in the stack. There must be no concurrent
// pops while the length is being determined.
@@ -160,7 +173,11 @@ class LockFreeStack {
// Return the entry following value in the list used by the
// specialized LockFreeStack class.
static T* next(const T& value) {
- return AtomicAccess::load(next_ptr(const_cast(value)));
+ if constexpr (use_atomic_access) {
+ return AtomicAccess::load(next_accessor(const_cast(value)));
+ } else {
+ return next_accessor(const_cast(value))->load_relaxed();
+ }
}
// Set the entry following value to new_next in the list used by the
@@ -168,7 +185,11 @@ class LockFreeStack {
// if value is in an instance of this specialization of LockFreeStack,
// there must be no concurrent push or pop operations on that stack.
static void set_next(T& value, T* new_next) {
- AtomicAccess::store(next_ptr(value), new_next);
+ if constexpr (use_atomic_access) {
+ AtomicAccess::store(next_accessor(value), new_next);
+ } else {
+ next_accessor(value)->store_relaxed(new_next);
+ }
}
};
diff --git a/src/java.base/share/classes/java/lang/Character.java b/src/java.base/share/classes/java/lang/Character.java
index 72ff33651f961..d866202909cae 100644
--- a/src/java.base/share/classes/java/lang/Character.java
+++ b/src/java.base/share/classes/java/lang/Character.java
@@ -63,7 +63,7 @@
* from the Unicode Consortium at
* http://www.unicode.org.
*
- * Character information is based on the Unicode Standard, version 16.0.
+ * Character information is based on the Unicode Standard, version 17.0.
*
* The Java platform has supported different versions of the Unicode
* Standard over time. Upgrades to newer versions of the Unicode Standard
@@ -75,6 +75,8 @@
*
Unicode version |
*
*
+ * | Java SE 26 |
+ * Unicode 17.0 |
* | Java SE 24 |
* Unicode 16.0 |
* | Java SE 22 |
@@ -745,7 +747,7 @@ public static final class UnicodeBlock extends Subset {
* It should be adjusted whenever the Unicode Character Database
* is upgraded.
*/
- private static final int NUM_ENTITIES = 782;
+ private static final int NUM_ENTITIES = 804;
private static Map map = HashMap.newHashMap(NUM_ENTITIES);
/**
@@ -3715,6 +3717,85 @@ private UnicodeBlock(String idName, String... aliases) {
"OL ONAL",
"OLONAL");
+ /**
+ * Constant for the "Sidetic" Unicode
+ * character block.
+ * @since 26
+ */
+ public static final UnicodeBlock SIDETIC =
+ new UnicodeBlock("SIDETIC");
+
+ /**
+ * Constant for the "Sharada Supplement" Unicode
+ * character block.
+ * @since 26
+ */
+ public static final UnicodeBlock SHARADA_SUPPLEMENT =
+ new UnicodeBlock("SHARADA_SUPPLEMENT",
+ "SHARADA SUPPLEMENT",
+ "SHARADASUPPLEMENT");
+
+ /**
+ * Constant for the "Tolong Siki" Unicode
+ * character block.
+ * @since 26
+ */
+ public static final UnicodeBlock TOLONG_SIKI =
+ new UnicodeBlock("TOLONG_SIKI",
+ "TOLONG SIKI",
+ "TOLONGSIKI");
+
+ /**
+ * Constant for the "Beria Erfe" Unicode
+ * character block.
+ * @since 26
+ */
+ public static final UnicodeBlock BERIA_ERFE =
+ new UnicodeBlock("BERIA_ERFE",
+ "BERIA ERFE",
+ "BERIAERFE");
+
+ /**
+ * Constant for the "Tangut Components Supplement" Unicode
+ * character block.
+ * @since 26
+ */
+ public static final UnicodeBlock TANGUT_COMPONENTS_SUPPLEMENT =
+ new UnicodeBlock("TANGUT_COMPONENTS_SUPPLEMENT",
+ "TANGUT COMPONENTS SUPPLEMENT",
+ "TANGUTCOMPONENTSSUPPLEMENT");
+
+ /**
+ * Constant for the "Miscellaneous Symbols Supplement" Unicode
+ * character block.
+ * @since 26
+ */
+ public static final UnicodeBlock MISCELLANEOUS_SYMBOLS_SUPPLEMENT =
+ new UnicodeBlock("MISCELLANEOUS_SYMBOLS_SUPPLEMENT",
+ "MISCELLANEOUS SYMBOLS SUPPLEMENT",
+ "MISCELLANEOUSSYMBOLSSUPPLEMENT");
+
+ /**
+ * Constant for the "Tai Yo" Unicode
+ * character block.
+ * @since 26
+ */
+ public static final UnicodeBlock TAI_YO =
+ new UnicodeBlock("TAI_YO",
+ "TAI YO",
+ "TAIYO");
+
+ /**
+ * Constant for the "CJK Unified Ideographs Extension J" Unicode
+ * character block.
+ * @since 26
+ */
+ public static final UnicodeBlock CJK_UNIFIED_IDEOGRAPHS_EXTENSION_J =
+ new UnicodeBlock("CJK_UNIFIED_IDEOGRAPHS_EXTENSION_J",
+ "CJK UNIFIED IDEOGRAPHS EXTENSION J",
+ "CJKUNIFIEDIDEOGRAPHSEXTENSIONJ");
+
+
private static final int[] blockStarts = {
0x0000, // 0000..007F; Basic Latin
0x0080, // 0080..00FF; Latin-1 Supplement
@@ -3916,7 +3997,8 @@ private UnicodeBlock(String idName, String... aliases) {
0x108E0, // 108E0..108FF; Hatran
0x10900, // 10900..1091F; Phoenician
0x10920, // 10920..1093F; Lydian
- 0x10940, // unassigned
+ 0x10940, // 10940..1095F; Sidetic
+ 0x10960, // unassigned
0x10980, // 10980..1099F; Meroitic Hieroglyphs
0x109A0, // 109A0..109FF; Meroitic Cursive
0x10A00, // 10A00..10A5F; Kharoshthi
@@ -3977,14 +4059,16 @@ private UnicodeBlock(String idName, String... aliases) {
0x11AB0, // 11AB0..11ABF; Unified Canadian Aboriginal Syllabics Extended-A
0x11AC0, // 11AC0..11AFF; Pau Cin Hau
0x11B00, // 11B00..11B5F; Devanagari Extended-A
- 0x11B60, // unassigned
+ 0x11B60, // 11B60..11B7F; Sharada Supplement
+ 0x11B80, // unassigned
0x11BC0, // 11BC0..11BFF; Sunuwar
0x11C00, // 11C00..11C6F; Bhaiksuki
0x11C70, // 11C70..11CBF; Marchen
0x11CC0, // unassigned
0x11D00, // 11D00..11D5F; Masaram Gondi
0x11D60, // 11D60..11DAF; Gunjala Gondi
- 0x11DB0, // unassigned
+ 0x11DB0, // 11DB0..11DEF; Tolong Siki
+ 0x11DF0, // unassigned
0x11EE0, // 11EE0..11EFF; Makasar
0x11F00, // 11F00..11F5F; Kawi
0x11F60, // unassigned
@@ -4011,7 +4095,8 @@ private UnicodeBlock(String idName, String... aliases) {
0x16D40, // 16D40..16D7F; Kirat Rai
0x16D80, // unassigned
0x16E40, // 16E40..16E9F; Medefaidrin
- 0x16EA0, // unassigned
+ 0x16EA0, // 16EA0..16EDF; Beria Erfe
+ 0x16EE0, // unassigned
0x16F00, // 16F00..16F9F; Miao
0x16FA0, // unassigned
0x16FE0, // 16FE0..16FFF; Ideographic Symbols and Punctuation
@@ -4019,7 +4104,8 @@ private UnicodeBlock(String idName, String... aliases) {
0x18800, // 18800..18AFF; Tangut Components
0x18B00, // 18B00..18CFF; Khitan Small Script
0x18D00, // 18D00..18D7F; Tangut Supplement
- 0x18D80, // unassigned
+ 0x18D80, // 18D80..18DFF; Tangut Components Supplement
+ 0x18E00, // unassigned
0x1AFF0, // 1AFF0..1AFFF; Kana Extended-B
0x1B000, // 1B000..1B0FF; Kana Supplement
0x1B100, // 1B100..1B12F; Kana Extended-A
@@ -4030,7 +4116,7 @@ private UnicodeBlock(String idName, String... aliases) {
0x1BCA0, // 1BCA0..1BCAF; Shorthand Format Controls
0x1BCB0, // unassigned
0x1CC00, // 1CC00..1CEBF; Symbols for Legacy Computing Supplement
- 0x1CEC0, // unassigned
+ 0x1CEC0, // 1CEC0..1CEFF; Miscellaneous Symbols Supplement
0x1CF00, // 1CF00..1CFCF; Znamenny Musical Notation
0x1CFD0, // unassigned
0x1D000, // 1D000..1D0FF; Byzantine Musical Symbols
@@ -4058,6 +4144,8 @@ private UnicodeBlock(String idName, String... aliases) {
0x1E500, // unassigned
0x1E5D0, // 1E5D0..1E5FF; Ol Onal
0x1E600, // unassigned
+ 0x1E6C0, // 1E6C0..1E6FF; Tai Yo
+ 0x1E700, // unassigned
0x1E7E0, // 1E7E0..1E7FF; Ethiopic Extended-B
0x1E800, // 1E800..1E8DF; Mende Kikakui
0x1E8E0, // unassigned
@@ -4098,7 +4186,8 @@ private UnicodeBlock(String idName, String... aliases) {
0x2FA20, // unassigned
0x30000, // 30000..3134F; CJK Unified Ideographs Extension G
0x31350, // 31350..323AF; CJK Unified Ideographs Extension H
- 0x323B0, // unassigned
+ 0x323B0, // 323B0..3347F; CJK Unified Ideographs Extension J
+ 0x33480, // unassigned
0xE0000, // E0000..E007F; Tags
0xE0080, // unassigned
0xE0100, // E0100..E01EF; Variation Selectors Supplement
@@ -4308,6 +4397,7 @@ private UnicodeBlock(String idName, String... aliases) {
HATRAN,
PHOENICIAN,
LYDIAN,
+ SIDETIC,
null,
MEROITIC_HIEROGLYPHS,
MEROITIC_CURSIVE,
@@ -4369,6 +4459,7 @@ private UnicodeBlock(String idName, String... aliases) {
UNIFIED_CANADIAN_ABORIGINAL_SYLLABICS_EXTENDED_A,
PAU_CIN_HAU,
DEVANAGARI_EXTENDED_A,
+ SHARADA_SUPPLEMENT,
null,
SUNUWAR,
BHAIKSUKI,
@@ -4376,6 +4467,7 @@ private UnicodeBlock(String idName, String... aliases) {
null,
MASARAM_GONDI,
GUNJALA_GONDI,
+ TOLONG_SIKI,
null,
MAKASAR,
KAWI,
@@ -4403,6 +4495,7 @@ private UnicodeBlock(String idName, String... aliases) {
KIRAT_RAI,
null,
MEDEFAIDRIN,
+ BERIA_ERFE,
null,
MIAO,
null,
@@ -4411,6 +4504,7 @@ private UnicodeBlock(String idName, String... aliases) {
TANGUT_COMPONENTS,
KHITAN_SMALL_SCRIPT,
TANGUT_SUPPLEMENT,
+ TANGUT_COMPONENTS_SUPPLEMENT,
null,
KANA_EXTENDED_B,
KANA_SUPPLEMENT,
@@ -4422,7 +4516,7 @@ private UnicodeBlock(String idName, String... aliases) {
SHORTHAND_FORMAT_CONTROLS,
null,
SYMBOLS_FOR_LEGACY_COMPUTING_SUPPLEMENT,
- null,
+ MISCELLANEOUS_SYMBOLS_SUPPLEMENT,
ZNAMENNY_MUSICAL_NOTATION,
null,
BYZANTINE_MUSICAL_SYMBOLS,
@@ -4450,6 +4544,8 @@ private UnicodeBlock(String idName, String... aliases) {
null,
OL_ONAL,
null,
+ TAI_YO,
+ null,
ETHIOPIC_EXTENDED_B,
MENDE_KIKAKUI,
null,
@@ -4490,6 +4586,7 @@ private UnicodeBlock(String idName, String... aliases) {
null,
CJK_UNIFIED_IDEOGRAPHS_EXTENSION_G,
CJK_UNIFIED_IDEOGRAPHS_EXTENSION_H,
+ CJK_UNIFIED_IDEOGRAPHS_EXTENSION_J,
null,
TAGS,
null,
@@ -5547,6 +5644,30 @@ public static enum UnicodeScript {
*/
OL_ONAL,
+ /**
+ * Unicode script "Sidetic".
+ * @since 26
+ */
+ SIDETIC,
+
+ /**
+ * Unicode script "Tolong Siki".
+ * @since 26
+ */
+ TOLONG_SIKI,
+
+ /**
+ * Unicode script "Beria Erfe".
+ * @since 26
+ */
+ BERIA_ERFE,
+
+ /**
+ * Unicode script "Tai Yo".
+ * @since 26
+ */
+ TAI_YO,
+
/**
* Unicode script "Unknown".
*/
@@ -5648,9 +5769,7 @@ public static enum UnicodeScript {
0x085F, // 085F ; UNKNOWN
0x0860, // 0860..086A; SYRIAC
0x086B, // 086B..086F; UNKNOWN
- 0x0870, // 0870..088E; ARABIC
- 0x088F, // 088F ; UNKNOWN
- 0x0890, // 0890..0891; ARABIC
+ 0x0870, // 0870..0891; ARABIC
0x0892, // 0892..0896; UNKNOWN
0x0897, // 0897..08E1; ARABIC
0x08E2, // 08E2 ; COMMON
@@ -5825,8 +5944,8 @@ public static enum UnicodeScript {
0x0C55, // 0C55..0C56; TELUGU
0x0C57, // 0C57 ; UNKNOWN
0x0C58, // 0C58..0C5A; TELUGU
- 0x0C5B, // 0C5B..0C5C; UNKNOWN
- 0x0C5D, // 0C5D ; TELUGU
+ 0x0C5B, // 0C5B ; UNKNOWN
+ 0x0C5C, // 0C5C..0C5D; TELUGU
0x0C5E, // 0C5E..0C5F; UNKNOWN
0x0C60, // 0C60..0C63; TELUGU
0x0C64, // 0C64..0C65; UNKNOWN
@@ -5850,8 +5969,8 @@ public static enum UnicodeScript {
0x0CCA, // 0CCA..0CCD; KANNADA
0x0CCE, // 0CCE..0CD4; UNKNOWN
0x0CD5, // 0CD5..0CD6; KANNADA
- 0x0CD7, // 0CD7..0CDC; UNKNOWN
- 0x0CDD, // 0CDD..0CDE; KANNADA
+ 0x0CD7, // 0CD7..0CDB; UNKNOWN
+ 0x0CDC, // 0CDC..0CDE; KANNADA
0x0CDF, // 0CDF ; UNKNOWN
0x0CE0, // 0CE0..0CE3; KANNADA
0x0CE4, // 0CE4..0CE5; UNKNOWN
@@ -6062,8 +6181,10 @@ public static enum UnicodeScript {
0x1A9A, // 1A9A..1A9F; UNKNOWN
0x1AA0, // 1AA0..1AAD; TAI_THAM
0x1AAE, // 1AAE..1AAF; UNKNOWN
- 0x1AB0, // 1AB0..1ACE; INHERITED
- 0x1ACF, // 1ACF..1AFF; UNKNOWN
+ 0x1AB0, // 1AB0..1ADD; INHERITED
+ 0x1ADE, // 1ADE..1ADF; UNKNOWN
+ 0x1AE0, // 1AE0..1AEB; INHERITED
+ 0x1AEC, // 1AEC..1AFF; UNKNOWN
0x1B00, // 1B00..1B4C; BALINESE
0x1B4D, // 1B4D ; UNKNOWN
0x1B4E, // 1B4E..1B7F; BALINESE
@@ -6155,8 +6276,8 @@ public static enum UnicodeScript {
0x208F, // 208F ; UNKNOWN
0x2090, // 2090..209C; LATIN
0x209D, // 209D..209F; UNKNOWN
- 0x20A0, // 20A0..20C0; COMMON
- 0x20C1, // 20C1..20CF; UNKNOWN
+ 0x20A0, // 20A0..20C1; COMMON
+ 0x20C2, // 20C2..20CF; UNKNOWN
0x20D0, // 20D0..20F0; INHERITED
0x20F1, // 20F1..20FF; UNKNOWN
0x2100, // 2100..2125; COMMON
@@ -6179,9 +6300,7 @@ public static enum UnicodeScript {
0x2800, // 2800..28FF; BRAILLE
0x2900, // 2900..2B73; COMMON
0x2B74, // 2B74..2B75; UNKNOWN
- 0x2B76, // 2B76..2B95; COMMON
- 0x2B96, // 2B96 ; UNKNOWN
- 0x2B97, // 2B97..2BFF; COMMON
+ 0x2B76, // 2B76..2BFF; COMMON
0x2C00, // 2C00..2C5F; GLAGOLITIC
0x2C60, // 2C60..2C7F; LATIN
0x2C80, // 2C80..2CF3; COPTIC
@@ -6282,15 +6401,9 @@ public static enum UnicodeScript {
0xA700, // A700..A721; COMMON
0xA722, // A722..A787; LATIN
0xA788, // A788..A78A; COMMON
- 0xA78B, // A78B..A7CD; LATIN
- 0xA7CE, // A7CE..A7CF; UNKNOWN
- 0xA7D0, // A7D0..A7D1; LATIN
- 0xA7D2, // A7D2 ; UNKNOWN
- 0xA7D3, // A7D3 ; LATIN
- 0xA7D4, // A7D4 ; UNKNOWN
- 0xA7D5, // A7D5..A7DC; LATIN
- 0xA7DD, // A7DD..A7F1; UNKNOWN
- 0xA7F2, // A7F2..A7FF; LATIN
+ 0xA78B, // A78B..A7DC; LATIN
+ 0xA7DD, // A7DD..A7F0; UNKNOWN
+ 0xA7F1, // A7F1..A7FF; LATIN
0xA800, // A800..A82C; SYLOTI_NAGRI
0xA82D, // A82D..A82F; UNKNOWN
0xA830, // A830..A839; COMMON
@@ -6378,15 +6491,9 @@ public static enum UnicodeScript {
0xFB43, // FB43..FB44; HEBREW
0xFB45, // FB45 ; UNKNOWN
0xFB46, // FB46..FB4F; HEBREW
- 0xFB50, // FB50..FBC2; ARABIC
- 0xFBC3, // FBC3..FBD2; UNKNOWN
- 0xFBD3, // FBD3..FD3D; ARABIC
+ 0xFB50, // FB50..FD3D; ARABIC
0xFD3E, // FD3E..FD3F; COMMON
- 0xFD40, // FD40..FD8F; ARABIC
- 0xFD90, // FD90..FD91; UNKNOWN
- 0xFD92, // FD92..FDC7; ARABIC
- 0xFDC8, // FDC8..FDCE; UNKNOWN
- 0xFDCF, // FDCF ; ARABIC
+ 0xFD40, // FD40..FDCF; ARABIC
0xFDD0, // FDD0..FDEF; UNKNOWN
0xFDF0, // FDF0..FDFF; ARABIC
0xFE00, // FE00..FE0F; INHERITED
@@ -6555,7 +6662,8 @@ public static enum UnicodeScript {
0x10920, // 10920..10939; LYDIAN
0x1093A, // 1093A..1093E; UNKNOWN
0x1093F, // 1093F ; LYDIAN
- 0x10940, // 10940..1097F; UNKNOWN
+ 0x10940, // 10940..10959; SIDETIC
+ 0x1095A, // 1095A..1097F; UNKNOWN
0x10980, // 10980..1099F; MEROITIC_HIEROGLYPHS
0x109A0, // 109A0..109B7; MEROITIC_CURSIVE
0x109B8, // 109B8..109BB; UNKNOWN
@@ -6625,9 +6733,11 @@ public static enum UnicodeScript {
0x10EAE, // 10EAE..10EAF; UNKNOWN
0x10EB0, // 10EB0..10EB1; YEZIDI
0x10EB2, // 10EB2..10EC1; UNKNOWN
- 0x10EC2, // 10EC2..10EC4; ARABIC
- 0x10EC5, // 10EC5..10EFB; UNKNOWN
- 0x10EFC, // 10EFC..10EFF; ARABIC
+ 0x10EC2, // 10EC2..10EC7; ARABIC
+ 0x10EC8, // 10EC8..10ECF; UNKNOWN
+ 0x10ED0, // 10ED0..10ED8; ARABIC
+ 0x10ED9, // 10ED9..10EF9; UNKNOWN
+ 0x10EFA, // 10EFA..10EFF; ARABIC
0x10F00, // 10F00..10F27; OLD_SOGDIAN
0x10F28, // 10F28..10F2F; UNKNOWN
0x10F30, // 10F30..10F59; SOGDIAN
@@ -6797,7 +6907,9 @@ public static enum UnicodeScript {
0x11AC0, // 11AC0..11AF8; PAU_CIN_HAU
0x11AF9, // 11AF9..11AFF; UNKNOWN
0x11B00, // 11B00..11B09; DEVANAGARI
- 0x11B0A, // 11B0A..11BBF; UNKNOWN
+ 0x11B0A, // 11B0A..11B5F; UNKNOWN
+ 0x11B60, // 11B60..11B67; SHARADA
+ 0x11B68, // 11B68..11BBF; UNKNOWN
0x11BC0, // 11BC0..11BE1; SUNUWAR
0x11BE2, // 11BE2..11BEF; UNKNOWN
0x11BF0, // 11BF0..11BF9; SUNUWAR
@@ -6841,7 +6953,11 @@ public static enum UnicodeScript {
0x11D93, // 11D93..11D98; GUNJALA_GONDI
0x11D99, // 11D99..11D9F; UNKNOWN
0x11DA0, // 11DA0..11DA9; GUNJALA_GONDI
- 0x11DAA, // 11DAA..11EDF; UNKNOWN
+ 0x11DAA, // 11DAA..11DAF; UNKNOWN
+ 0x11DB0, // 11DB0..11DDB; TOLONG_SIKI
+ 0x11DDC, // 11DDC..11DDF; UNKNOWN
+ 0x11DE0, // 11DE0..11DE9; TOLONG_SIKI
+ 0x11DEA, // 11DEA..11EDF; UNKNOWN
0x11EE0, // 11EE0..11EF8; MAKASAR
0x11EF9, // 11EF9..11EFF; UNKNOWN
0x11F00, // 11F00..11F10; KAWI
@@ -6901,7 +7017,11 @@ public static enum UnicodeScript {
0x16D40, // 16D40..16D79; KIRAT_RAI
0x16D7A, // 16D7A..16E3F; UNKNOWN
0x16E40, // 16E40..16E9A; MEDEFAIDRIN
- 0x16E9B, // 16E9B..16EFF; UNKNOWN
+ 0x16E9B, // 16E9B..16E9F; UNKNOWN
+ 0x16EA0, // 16EA0..16EB8; BERIA_ERFE
+ 0x16EB9, // 16EB9..16EBA; UNKNOWN
+ 0x16EBB, // 16EBB..16ED3; BERIA_ERFE
+ 0x16ED4, // 16ED4..16EFF; UNKNOWN
0x16F00, // 16F00..16F4A; MIAO
0x16F4B, // 16F4B..16F4E; UNKNOWN
0x16F4F, // 16F4F..16F87; MIAO
@@ -6913,16 +7033,16 @@ public static enum UnicodeScript {
0x16FE2, // 16FE2..16FE3; HAN
0x16FE4, // 16FE4 ; KHITAN_SMALL_SCRIPT
0x16FE5, // 16FE5..16FEF; UNKNOWN
- 0x16FF0, // 16FF0..16FF1; HAN
- 0x16FF2, // 16FF2..16FFF; UNKNOWN
- 0x17000, // 17000..187F7; TANGUT
- 0x187F8, // 187F8..187FF; UNKNOWN
- 0x18800, // 18800..18AFF; TANGUT
+ 0x16FF0, // 16FF0..16FF6; HAN
+ 0x16FF7, // 16FF7..16FFF; UNKNOWN
+ 0x17000, // 17000..18AFF; TANGUT
0x18B00, // 18B00..18CD5; KHITAN_SMALL_SCRIPT
0x18CD6, // 18CD6..18CFE; UNKNOWN
0x18CFF, // 18CFF ; KHITAN_SMALL_SCRIPT
- 0x18D00, // 18D00..18D08; TANGUT
- 0x18D09, // 18D09..1AFEF; UNKNOWN
+ 0x18D00, // 18D00..18D1E; TANGUT
+ 0x18D1F, // 18D1F..18D7F; UNKNOWN
+ 0x18D80, // 18D80..18DF2; TANGUT
+ 0x18DF3, // 18DF3..1AFEF; UNKNOWN
0x1AFF0, // 1AFF0..1AFF3; KATAKANA
0x1AFF4, // 1AFF4 ; UNKNOWN
0x1AFF5, // 1AFF5..1AFFB; KATAKANA
@@ -6954,10 +7074,14 @@ public static enum UnicodeScript {
0x1BC9C, // 1BC9C..1BC9F; DUPLOYAN
0x1BCA0, // 1BCA0..1BCA3; COMMON
0x1BCA4, // 1BCA4..1CBFF; UNKNOWN
- 0x1CC00, // 1CC00..1CCF9; COMMON
- 0x1CCFA, // 1CCFA..1CCFF; UNKNOWN
+ 0x1CC00, // 1CC00..1CCFC; COMMON
+ 0x1CCFD, // 1CCFD..1CCFF; UNKNOWN
0x1CD00, // 1CD00..1CEB3; COMMON
- 0x1CEB4, // 1CEB4..1CEFF; UNKNOWN
+ 0x1CEB4, // 1CEB4..1CEB9; UNKNOWN
+ 0x1CEBA, // 1CEBA..1CED0; COMMON
+ 0x1CED1, // 1CED1..1CEDF; UNKNOWN
+ 0x1CEE0, // 1CEE0..1CEF0; COMMON
+ 0x1CEF1, // 1CEF1..1CEFF; UNKNOWN
0x1CF00, // 1CF00..1CF2D; INHERITED
0x1CF2E, // 1CF2E..1CF2F; UNKNOWN
0x1CF30, // 1CF30..1CF46; INHERITED
@@ -7072,7 +7196,13 @@ public static enum UnicodeScript {
0x1E5D0, // 1E5D0..1E5FA; OL_ONAL
0x1E5FB, // 1E5FB..1E5FE; UNKNOWN
0x1E5FF, // 1E5FF ; OL_ONAL
- 0x1E600, // 1E600..1E7DF; UNKNOWN
+ 0x1E600, // 1E600..1E6BF; UNKNOWN
+ 0x1E6C0, // 1E6C0..1E6DE; TAI_YO
+ 0x1E6DF, // 1E6DF ; UNKNOWN
+ 0x1E6E0, // 1E6E0..1E6F5; TAI_YO
+ 0x1E6F6, // 1E6F6..1E6FD; UNKNOWN
+ 0x1E6FE, // 1E6FE..1E6FF; TAI_YO
+ 0x1E700, // 1E700..1E7DF; UNKNOWN
0x1E7E0, // 1E7E0..1E7E6; ETHIOPIC
0x1E7E7, // 1E7E7 ; UNKNOWN
0x1E7E8, // 1E7E8..1E7EB; ETHIOPIC
@@ -7189,15 +7319,13 @@ public static enum UnicodeScript {
0x1F252, // 1F252..1F25F; UNKNOWN
0x1F260, // 1F260..1F265; COMMON
0x1F266, // 1F266..1F2FF; UNKNOWN
- 0x1F300, // 1F300..1F6D7; COMMON
- 0x1F6D8, // 1F6D8..1F6DB; UNKNOWN
+ 0x1F300, // 1F300..1F6D8; COMMON
+ 0x1F6D9, // 1F6D9..1F6DB; UNKNOWN
0x1F6DC, // 1F6DC..1F6EC; COMMON
0x1F6ED, // 1F6ED..1F6EF; UNKNOWN
0x1F6F0, // 1F6F0..1F6FC; COMMON
0x1F6FD, // 1F6FD..1F6FF; UNKNOWN
- 0x1F700, // 1F700..1F776; COMMON
- 0x1F777, // 1F777..1F77A; UNKNOWN
- 0x1F77B, // 1F77B..1F7D9; COMMON
+ 0x1F700, // 1F700..1F7D9; COMMON
0x1F7DA, // 1F7DA..1F7DF; UNKNOWN
0x1F7E0, // 1F7E0..1F7EB; COMMON
0x1F7EC, // 1F7EC..1F7EF; UNKNOWN
@@ -7216,35 +7344,37 @@ public static enum UnicodeScript {
0x1F8B0, // 1F8B0..1F8BB; COMMON
0x1F8BC, // 1F8BC..1F8BF; UNKNOWN
0x1F8C0, // 1F8C0..1F8C1; COMMON
- 0x1F8C2, // 1F8C2..1F8FF; UNKNOWN
- 0x1F900, // 1F900..1FA53; COMMON
- 0x1FA54, // 1FA54..1FA5F; UNKNOWN
+ 0x1F8C2, // 1F8C2..1F8CF; UNKNOWN
+ 0x1F8D0, // 1F8D0..1F8D8; COMMON
+ 0x1F8D9, // 1F8D9..1F8FF; UNKNOWN
+ 0x1F900, // 1F900..1FA57; COMMON
+ 0x1FA58, // 1FA58..1FA5F; UNKNOWN
0x1FA60, // 1FA60..1FA6D; COMMON
0x1FA6E, // 1FA6E..1FA6F; UNKNOWN
0x1FA70, // 1FA70..1FA7C; COMMON
0x1FA7D, // 1FA7D..1FA7F; UNKNOWN
- 0x1FA80, // 1FA80..1FA89; COMMON
- 0x1FA8A, // 1FA8A..1FA8E; UNKNOWN
- 0x1FA8F, // 1FA8F..1FAC6; COMMON
- 0x1FAC7, // 1FAC7..1FACD; UNKNOWN
- 0x1FACE, // 1FACE..1FADC; COMMON
+ 0x1FA80, // 1FA80..1FA8A; COMMON
+ 0x1FA8B, // 1FA8B..1FA8D; UNKNOWN
+ 0x1FA8E, // 1FA8E..1FAC6; COMMON
+ 0x1FAC7, // 1FAC7 ; UNKNOWN
+ 0x1FAC8, // 1FAC8 ; COMMON
+ 0x1FAC9, // 1FAC9..1FACC; UNKNOWN
+ 0x1FACD, // 1FACD..1FADC; COMMON
0x1FADD, // 1FADD..1FADE; UNKNOWN
- 0x1FADF, // 1FADF..1FAE9; COMMON
- 0x1FAEA, // 1FAEA..1FAEF; UNKNOWN
- 0x1FAF0, // 1FAF0..1FAF8; COMMON
+ 0x1FADF, // 1FADF..1FAEA; COMMON
+ 0x1FAEB, // 1FAEB..1FAEE; UNKNOWN
+ 0x1FAEF, // 1FAEF..1FAF8; COMMON
0x1FAF9, // 1FAF9..1FAFF; UNKNOWN
0x1FB00, // 1FB00..1FB92; COMMON
0x1FB93, // 1FB93 ; UNKNOWN
- 0x1FB94, // 1FB94..1FBF9; COMMON
- 0x1FBFA, // 1FBFA..1FFFF; UNKNOWN
+ 0x1FB94, // 1FB94..1FBFA; COMMON
+ 0x1FBFB, // 1FBFB..1FFFF; UNKNOWN
0x20000, // 20000..2A6DF; HAN
0x2A6E0, // 2A6E0..2A6FF; UNKNOWN
- 0x2A700, // 2A700..2B739; HAN
- 0x2B73A, // 2B73A..2B73F; UNKNOWN
- 0x2B740, // 2B740..2B81D; HAN
+ 0x2A700, // 2A700..2B81D; HAN
0x2B81E, // 2B81E..2B81F; UNKNOWN
- 0x2B820, // 2B820..2CEA1; HAN
- 0x2CEA2, // 2CEA2..2CEAF; UNKNOWN
+ 0x2B820, // 2B820..2CEAD; HAN
+ 0x2CEAE, // 2CEAE..2CEAF; UNKNOWN
0x2CEB0, // 2CEB0..2EBE0; HAN
0x2EBE1, // 2EBE1..2EBEF; UNKNOWN
0x2EBF0, // 2EBF0..2EE5D; HAN
@@ -7253,8 +7383,8 @@ public static enum UnicodeScript {
0x2FA1E, // 2FA1E..2FFFF; UNKNOWN
0x30000, // 30000..3134A; HAN
0x3134B, // 3134B..3134F; UNKNOWN
- 0x31350, // 31350..323AF; HAN
- 0x323B0, // 323B0..E0000; UNKNOWN
+ 0x31350, // 31350..33479; HAN
+ 0x3347A, // 3347A..E0000; UNKNOWN
0xE0001, // E0001 ; COMMON
0xE0002, // E0002..E001F; UNKNOWN
0xE0020, // E0020..E007F; COMMON
@@ -7359,9 +7489,7 @@ public static enum UnicodeScript {
UNKNOWN, // 085F
SYRIAC, // 0860..086A
UNKNOWN, // 086B..086F
- ARABIC, // 0870..088E
- UNKNOWN, // 088F
- ARABIC, // 0890..0891
+ ARABIC, // 0870..0891
UNKNOWN, // 0892..0896
ARABIC, // 0897..08E1
COMMON, // 08E2
@@ -7536,8 +7664,8 @@ public static enum UnicodeScript {
TELUGU, // 0C55..0C56
UNKNOWN, // 0C57
TELUGU, // 0C58..0C5A
- UNKNOWN, // 0C5B..0C5C
- TELUGU, // 0C5D
+ UNKNOWN, // 0C5B
+ TELUGU, // 0C5C..0C5D
UNKNOWN, // 0C5E..0C5F
TELUGU, // 0C60..0C63
UNKNOWN, // 0C64..0C65
@@ -7561,8 +7689,8 @@ public static enum UnicodeScript {
KANNADA, // 0CCA..0CCD
UNKNOWN, // 0CCE..0CD4
KANNADA, // 0CD5..0CD6
- UNKNOWN, // 0CD7..0CDC
- KANNADA, // 0CDD..0CDE
+ UNKNOWN, // 0CD7..0CDB
+ KANNADA, // 0CDC..0CDE
UNKNOWN, // 0CDF
KANNADA, // 0CE0..0CE3
UNKNOWN, // 0CE4..0CE5
@@ -7773,8 +7901,10 @@ public static enum UnicodeScript {
UNKNOWN, // 1A9A..1A9F
TAI_THAM, // 1AA0..1AAD
UNKNOWN, // 1AAE..1AAF
- INHERITED, // 1AB0..1ACE
- UNKNOWN, // 1ACF..1AFF
+ INHERITED, // 1AB0..1ADD
+ UNKNOWN, // 1ADE..1ADF
+ INHERITED, // 1AE0..1AEB
+ UNKNOWN, // 1AEC..1AFF
BALINESE, // 1B00..1B4C
UNKNOWN, // 1B4D
BALINESE, // 1B4E..1B7F
@@ -7866,8 +7996,8 @@ public static enum UnicodeScript {
UNKNOWN, // 208F
LATIN, // 2090..209C
UNKNOWN, // 209D..209F
- COMMON, // 20A0..20C0
- UNKNOWN, // 20C1..20CF
+ COMMON, // 20A0..20C1
+ UNKNOWN, // 20C2..20CF
INHERITED, // 20D0..20F0
UNKNOWN, // 20F1..20FF
COMMON, // 2100..2125
@@ -7890,9 +8020,7 @@ public static enum UnicodeScript {
BRAILLE, // 2800..28FF
COMMON, // 2900..2B73
UNKNOWN, // 2B74..2B75
- COMMON, // 2B76..2B95
- UNKNOWN, // 2B96
- COMMON, // 2B97..2BFF
+ COMMON, // 2B76..2BFF
GLAGOLITIC, // 2C00..2C5F
LATIN, // 2C60..2C7F
COPTIC, // 2C80..2CF3
@@ -7993,15 +8121,9 @@ public static enum UnicodeScript {
COMMON, // A700..A721
LATIN, // A722..A787
COMMON, // A788..A78A
- LATIN, // A78B..A7CD
- UNKNOWN, // A7CE..A7CF
- LATIN, // A7D0..A7D1
- UNKNOWN, // A7D2
- LATIN, // A7D3
- UNKNOWN, // A7D4
- LATIN, // A7D5..A7DC
- UNKNOWN, // A7DD..A7F1
- LATIN, // A7F2..A7FF
+ LATIN, // A78B..A7DC
+ UNKNOWN, // A7DD..A7F0
+ LATIN, // A7F1..A7FF
SYLOTI_NAGRI, // A800..A82C
UNKNOWN, // A82D..A82F
COMMON, // A830..A839
@@ -8089,15 +8211,9 @@ public static enum UnicodeScript {
HEBREW, // FB43..FB44
UNKNOWN, // FB45
HEBREW, // FB46..FB4F
- ARABIC, // FB50..FBC2
- UNKNOWN, // FBC3..FBD2
- ARABIC, // FBD3..FD3D
+ ARABIC, // FB50..FD3D
COMMON, // FD3E..FD3F
- ARABIC, // FD40..FD8F
- UNKNOWN, // FD90..FD91
- ARABIC, // FD92..FDC7
- UNKNOWN, // FDC8..FDCE
- ARABIC, // FDCF
+ ARABIC, // FD40..FDCF
UNKNOWN, // FDD0..FDEF
ARABIC, // FDF0..FDFF
INHERITED, // FE00..FE0F
@@ -8266,7 +8382,8 @@ public static enum UnicodeScript {
LYDIAN, // 10920..10939
UNKNOWN, // 1093A..1093E
LYDIAN, // 1093F
- UNKNOWN, // 10940..1097F
+ SIDETIC, // 10940..10959
+ UNKNOWN, // 1095A..1097F
MEROITIC_HIEROGLYPHS, // 10980..1099F
MEROITIC_CURSIVE, // 109A0..109B7
UNKNOWN, // 109B8..109BB
@@ -8336,9 +8453,11 @@ public static enum UnicodeScript {
UNKNOWN, // 10EAE..10EAF
YEZIDI, // 10EB0..10EB1
UNKNOWN, // 10EB2..10EC1
- ARABIC, // 10EC2..10EC4
- UNKNOWN, // 10EC5..10EFB
- ARABIC, // 10EFC..10EFF
+ ARABIC, // 10EC2..10EC7
+ UNKNOWN, // 10EC8..10ECF
+ ARABIC, // 10ED0..10ED8
+ UNKNOWN, // 10ED9..10EF9
+ ARABIC, // 10EFA..10EFF
OLD_SOGDIAN, // 10F00..10F27
UNKNOWN, // 10F28..10F2F
SOGDIAN, // 10F30..10F59
@@ -8508,7 +8627,9 @@ public static enum UnicodeScript {
PAU_CIN_HAU, // 11AC0..11AF8
UNKNOWN, // 11AF9..11AFF
DEVANAGARI, // 11B00..11B09
- UNKNOWN, // 11B0A..11BBF
+ UNKNOWN, // 11B0A..11B5F
+ SHARADA, // 11B60..11B67
+ UNKNOWN, // 11B68..11BBF
SUNUWAR, // 11BC0..11BE1
UNKNOWN, // 11BE2..11BEF
SUNUWAR, // 11BF0..11BF9
@@ -8552,7 +8673,11 @@ public static enum UnicodeScript {
GUNJALA_GONDI, // 11D93..11D98
UNKNOWN, // 11D99..11D9F
GUNJALA_GONDI, // 11DA0..11DA9
- UNKNOWN, // 11DAA..11EDF
+ UNKNOWN, // 11DAA..11DAF
+ TOLONG_SIKI, // 11DB0..11DDB
+ UNKNOWN, // 11DDC..11DDF
+ TOLONG_SIKI, // 11DE0..11DE9
+ UNKNOWN, // 11DEA..11EDF
MAKASAR, // 11EE0..11EF8
UNKNOWN, // 11EF9..11EFF
KAWI, // 11F00..11F10
@@ -8612,7 +8737,11 @@ public static enum UnicodeScript {
KIRAT_RAI, // 16D40..16D79
UNKNOWN, // 16D7A..16E3F
MEDEFAIDRIN, // 16E40..16E9A
- UNKNOWN, // 16E9B..16EFF
+ UNKNOWN, // 16E9B..16E9F
+ BERIA_ERFE, // 16EA0..16EB8
+ UNKNOWN, // 16EB9..16EBA
+ BERIA_ERFE, // 16EBB..16ED3
+ UNKNOWN, // 16ED4..16EFF
MIAO, // 16F00..16F4A
UNKNOWN, // 16F4B..16F4E
MIAO, // 16F4F..16F87
@@ -8624,16 +8753,16 @@ public static enum UnicodeScript {
HAN, // 16FE2..16FE3
KHITAN_SMALL_SCRIPT, // 16FE4
UNKNOWN, // 16FE5..16FEF
- HAN, // 16FF0..16FF1
- UNKNOWN, // 16FF2..16FFF
- TANGUT, // 17000..187F7
- UNKNOWN, // 187F8..187FF
- TANGUT, // 18800..18AFF
+ HAN, // 16FF0..16FF6
+ UNKNOWN, // 16FF7..16FFF
+ TANGUT, // 17000..18AFF
KHITAN_SMALL_SCRIPT, // 18B00..18CD5
UNKNOWN, // 18CD6..18CFE
KHITAN_SMALL_SCRIPT, // 18CFF
- TANGUT, // 18D00..18D08
- UNKNOWN, // 18D09..1AFEF
+ TANGUT, // 18D00..18D1E
+ UNKNOWN, // 18D1F..18D7F
+ TANGUT, // 18D80..18DF2
+ UNKNOWN, // 18DF3..1AFEF
KATAKANA, // 1AFF0..1AFF3
UNKNOWN, // 1AFF4
KATAKANA, // 1AFF5..1AFFB
@@ -8665,10 +8794,14 @@ public static enum UnicodeScript {
DUPLOYAN, // 1BC9C..1BC9F
COMMON, // 1BCA0..1BCA3
UNKNOWN, // 1BCA4..1CBFF
- COMMON, // 1CC00..1CCF9
- UNKNOWN, // 1CCFA..1CCFF
+ COMMON, // 1CC00..1CCFC
+ UNKNOWN, // 1CCFD..1CCFF
COMMON, // 1CD00..1CEB3
- UNKNOWN, // 1CEB4..1CEFF
+ UNKNOWN, // 1CEB4..1CEB9
+ COMMON, // 1CEBA..1CED0
+ UNKNOWN, // 1CED1..1CEDF
+ COMMON, // 1CEE0..1CEF0
+ UNKNOWN, // 1CEF1..1CEFF
INHERITED, // 1CF00..1CF2D
UNKNOWN, // 1CF2E..1CF2F
INHERITED, // 1CF30..1CF46
@@ -8783,7 +8916,13 @@ public static enum UnicodeScript {
OL_ONAL, // 1E5D0..1E5FA
UNKNOWN, // 1E5FB..1E5FE
OL_ONAL, // 1E5FF
- UNKNOWN, // 1E600..1E7DF
+ UNKNOWN, // 1E600..1E6BF
+ TAI_YO, // 1E6C0..1E6DE
+ UNKNOWN, // 1E6DF
+ TAI_YO, // 1E6E0..1E6F5
+ UNKNOWN, // 1E6F6..1E6FD
+ TAI_YO, // 1E6FE..1E6FF
+ UNKNOWN, // 1E700..1E7DF
ETHIOPIC, // 1E7E0..1E7E6
UNKNOWN, // 1E7E7
ETHIOPIC, // 1E7E8..1E7EB
@@ -8900,15 +9039,13 @@ public static enum UnicodeScript {
UNKNOWN, // 1F252..1F25F
COMMON, // 1F260..1F265
UNKNOWN, // 1F266..1F2FF
- COMMON, // 1F300..1F6D7
- UNKNOWN, // 1F6D8..1F6DB
+ COMMON, // 1F300..1F6D8
+ UNKNOWN, // 1F6D9..1F6DB
COMMON, // 1F6DC..1F6EC
UNKNOWN, // 1F6ED..1F6EF
COMMON, // 1F6F0..1F6FC
UNKNOWN, // 1F6FD..1F6FF
- COMMON, // 1F700..1F776
- UNKNOWN, // 1F777..1F77A
- COMMON, // 1F77B..1F7D9
+ COMMON, // 1F700..1F7D9
UNKNOWN, // 1F7DA..1F7DF
COMMON, // 1F7E0..1F7EB
UNKNOWN, // 1F7EC..1F7EF
@@ -8927,35 +9064,37 @@ public static enum UnicodeScript {
COMMON, // 1F8B0..1F8BB
UNKNOWN, // 1F8BC..1F8BF
COMMON, // 1F8C0..1F8C1
- UNKNOWN, // 1F8C2..1F8FF
- COMMON, // 1F900..1FA53
- UNKNOWN, // 1FA54..1FA5F
+ UNKNOWN, // 1F8C2..1F8CF
+ COMMON, // 1F8D0..1F8D8
+ UNKNOWN, // 1F8D9..1F8FF
+ COMMON, // 1F900..1FA57
+ UNKNOWN, // 1FA58..1FA5F
COMMON, // 1FA60..1FA6D
UNKNOWN, // 1FA6E..1FA6F
COMMON, // 1FA70..1FA7C
UNKNOWN, // 1FA7D..1FA7F
- COMMON, // 1FA80..1FA89
- UNKNOWN, // 1FA8A..1FA8E
- COMMON, // 1FA8F..1FAC6
- UNKNOWN, // 1FAC7..1FACD
- COMMON, // 1FACE..1FADC
+ COMMON, // 1FA80..1FA8A
+ UNKNOWN, // 1FA8B..1FA8D
+ COMMON, // 1FA8E..1FAC6
+ UNKNOWN, // 1FAC7
+ COMMON, // 1FAC8
+ UNKNOWN, // 1FAC9..1FACC
+ COMMON, // 1FACD..1FADC
UNKNOWN, // 1FADD..1FADE
- COMMON, // 1FADF..1FAE9
- UNKNOWN, // 1FAEA..1FAEF
- COMMON, // 1FAF0..1FAF8
+ COMMON, // 1FADF..1FAEA
+ UNKNOWN, // 1FAEB..1FAEE
+ COMMON, // 1FAEF..1FAF8
UNKNOWN, // 1FAF9..1FAFF
COMMON, // 1FB00..1FB92
UNKNOWN, // 1FB93
- COMMON, // 1FB94..1FBF9
- UNKNOWN, // 1FBFA..1FFFF
+ COMMON, // 1FB94..1FBFA
+ UNKNOWN, // 1FBFB..1FFFF
HAN, // 20000..2A6DF
UNKNOWN, // 2A6E0..2A6FF
- HAN, // 2A700..2B739
- UNKNOWN, // 2B73A..2B73F
- HAN, // 2B740..2B81D
+ HAN, // 2A700..2B81D
UNKNOWN, // 2B81E..2B81F
- HAN, // 2B820..2CEA1
- UNKNOWN, // 2CEA2..2CEAF
+ HAN, // 2B820..2CEAD
+ UNKNOWN, // 2CEAE..2CEAF
HAN, // 2CEB0..2EBE0
UNKNOWN, // 2EBE1..2EBEF
HAN, // 2EBF0..2EE5D
@@ -8964,8 +9103,8 @@ public static enum UnicodeScript {
UNKNOWN, // 2FA1E..2FFFF
HAN, // 30000..3134A
UNKNOWN, // 3134B..3134F
- HAN, // 31350..323AF
- UNKNOWN, // 323B0..E0000
+ HAN, // 31350..33479
+ UNKNOWN, // 3347A..E0000
COMMON, // E0001
UNKNOWN, // E0002..E001F
COMMON, // E0020..E007F
@@ -8989,6 +9128,7 @@ public static enum UnicodeScript {
aliases.put("BASS", BASSA_VAH);
aliases.put("BATK", BATAK);
aliases.put("BENG", BENGALI);
+ aliases.put("BERF", BERIA_ERFE);
aliases.put("BHKS", BHAIKSUKI);
aliases.put("BOPO", BOPOMOFO);
aliases.put("BRAH", BRAHMI);
@@ -9107,6 +9247,7 @@ public static enum UnicodeScript {
aliases.put("SHAW", SHAVIAN);
aliases.put("SHRD", SHARADA);
aliases.put("SIDD", SIDDHAM);
+ aliases.put("SIDT", SIDETIC);
aliases.put("SIND", KHUDAWADI);
aliases.put("SINH", SINHALA);
aliases.put("SOGD", SOGDIAN);
@@ -9124,6 +9265,7 @@ public static enum UnicodeScript {
aliases.put("TAML", TAMIL);
aliases.put("TANG", TANGUT);
aliases.put("TAVT", TAI_VIET);
+ aliases.put("TAYO", TAI_YO);
aliases.put("TELU", TELUGU);
aliases.put("TFNG", TIFINAGH);
aliases.put("TGLG", TAGALOG);
@@ -9133,6 +9275,7 @@ public static enum UnicodeScript {
aliases.put("TIRH", TIRHUTA);
aliases.put("TNSA", TANGSA);
aliases.put("TODR", TODHRI);
+ aliases.put("TOLS", TOLONG_SIKI);
aliases.put("TOTO", TOTO);
aliases.put("TUTG", TULU_TIGALARI);
aliases.put("UGAR", UGARITIC);
diff --git a/src/java.base/share/classes/java/lang/LazyConstant.java b/src/java.base/share/classes/java/lang/LazyConstant.java
new file mode 100644
index 0000000000000..34f3d754a10f3
--- /dev/null
+++ b/src/java.base/share/classes/java/lang/LazyConstant.java
@@ -0,0 +1,307 @@
+/*
+ * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package java.lang;
+
+import jdk.internal.javac.PreviewFeature;
+import jdk.internal.lang.LazyConstantImpl;
+
+import java.io.Serializable;
+import java.util.*;
+import java.util.function.Function;
+import java.util.function.IntFunction;
+import java.util.function.Supplier;
+
+/**
+ * A lazy constant is a holder of contents that can be set at most once.
+ *
+ * A lazy constant is created using the factory method
+ * {@linkplain LazyConstant#of(Supplier) LazyConstant.of({@code })}.
+ * When created, the lazy constant is not initialized, meaning it has no contents.
+ * The lazy constant (of type {@code T}) can then be initialized
+ * (and its contents retrieved) by calling {@linkplain #get() get()}. The first time
+ * {@linkplain #get() get()} is called, the underlying computing function
+ * (provided at construction) will be invoked and the result will be used to initialize
+ * the constant. Once a lazy constant is initialized, its contents can never change
+ * and will be retrieved over and over again upon subsequent {@linkplain #get() get()}
+ * invocations.
+ *
+ * Consider the following example where a lazy constant field "{@code logger}" holds
+ * an object of type {@code Logger}:
+ *
+ * {@snippet lang = java:
+ * public class Component {
+ *
+ * // Creates a new uninitialized lazy constant
+ * private final LazyConstant logger =
+ * // @link substring="of" target="#of" :
+ * LazyConstant.of( () -> Logger.create(Component.class) );
+ *
+ * public void process() {
+ * logger.get().info("Process started");
+ * // ...
+ * }
+ * }
+ *}
+ *
+ * Initially, the lazy constant is not initialized. When {@code logger.get()}
+ * is first invoked, it evaluates the computing function and initializes the constant to
+ * the result; the result is then returned to the client. Hence, {@linkplain #get() get()}
+ * guarantees that the constant is initialized before it returns, barring
+ * any exceptions.
+ *
+ * Furthermore, {@linkplain #get() get()} guarantees that, out of several threads trying to
+ * invoke the computing function simultaneously, {@linkplain ##thread-safety only one is
+ * ever selected} for computation. This property is crucial as evaluation of the computing
+ * function may have side effects, for example, the call above to {@code Logger.create()}
+ * may result in storage resources being prepared.
+ *
+ *
Exception handling
+ * If the computing function returns {@code null}, a {@linkplain NullPointerException}
+ * is thrown. Hence, a lazy constant can never hold a {@code null} value. Clients who
+ * want to use a nullable constant can wrap the value into an {@linkplain Optional} holder.
+ *
+ * If the computing function recursively invokes itself (directly or indirectly via
+ * the lazy constant), an {@linkplain IllegalStateException} is thrown, and the lazy
+ * constant is not initialized.
+ *
+ *
Composing lazy constants
+ * A lazy constant can depend on other lazy constants, forming a dependency graph
+ * that can be lazily computed but where access to individual elements can still be
+ * performant. In the following example, a single {@code Foo} and a {@code Bar}
+ * instance (that is dependent on the {@code Foo} instance) are lazily created, both of
+ * which are held by lazy constants:
+ *
+ * {@snippet lang = java:
+ * public final class DependencyUtil {
+ *
+ * private DependencyUtil() {}
+ *
+ * public static class Foo {
+ * // ...
+ * }
+ *
+ * public static class Bar {
+ * public Bar(Foo foo) {
+ * // ...
+ * }
+ * }
+ *
+ * private static final LazyConstant FOO = LazyConstant.of( Foo::new );
+ * private static final LazyConstant BAR = LazyConstant.of( () -> new Bar(FOO.get()) );
+ *
+ * public static Foo foo() {
+ * return FOO.get();
+ * }
+ *
+ * public static Bar bar() {
+ * return BAR.get();
+ * }
+ *
+ * }
+ *}
+ * Calling {@code BAR.get()} will create the {@code Bar} singleton if it is not already
+ * created. Upon such a creation, a dependent {@code Foo} will first be created if
+ * the {@code Foo} does not already exist.
+ *
+ * Thread Safety
+ * A lazy constant is guaranteed to be initialized atomically and at most once. If
+ * competing threads are racing to initialize a lazy constant, only one updating thread
+ * runs the computing function (which runs on the caller's thread and is hereafter denoted
+ * the computing thread), while the other threads are blocked until the constant
+ * is initialized, after which the other threads observe the lazy constant is initialized
+ * and leave the constant unchanged and will never invoke any computation.
+ *
+ * The invocation of the computing function and the resulting initialization of
+ * the constant {@linkplain java.util.concurrent##MemoryVisibility happens-before}
+ * the initialized constant's content is read. Hence, the initialized constant's content,
+ * including any {@code final} fields of any newly created objects, is safely published.
+ *
+ * Thread interruption does not cancel the initialization of a lazy constant. In other
+ * words, if the computing thread is interrupted, {@code LazyConstant::get} doesn't clear
+ * the interrupted thread’s status, nor does it throw an {@linkplain InterruptedException}.
+ *
+ * If the computing function blocks indefinitely, other threads operating on this
+ * lazy constant may block indefinitely; no timeouts or cancellations are provided.
+ *
+ *
+ * The contents of a lazy constant can never change after the lazy constant has been
+ * initialized. Therefore, a JVM implementation may, for an initialized lazy constant,
+ * elide all future reads of that lazy constant's contents and instead use the contents
+ * that has been previously observed. We call this optimization constant folding.
+ * This is only possible if there is a direct reference from a {@code static final} field
+ * to a lazy constant or if there is a chain from a {@code static final} field -- via one
+ * or more trusted fields (i.e., {@code static final} fields,
+ * {@linkplain Record record} fields, or final instance fields in hidden classes) --
+ * to a lazy constant.
+ *
+ * Miscellaneous
+ * Except for {@linkplain Object#equals(Object) equals(obj)} and
+ * {@linkplain #orElse(Object) orElse(other)} parameters, all method parameters
+ * must be non-null, or a {@link NullPointerException} will be thrown.
+ *
+ * @apiNote Once a lazy constant is initialized, its contents cannot ever be removed.
+ * This can be a source of an unintended memory leak. More specifically,
+ * a lazy constant {@linkplain java.lang.ref##reachability strongly references}
+ * it contents. Hence, the contents of a lazy constant will be reachable as long
+ * as the lazy constant itself is reachable.
+ *
+ * While it's possible to store an array inside a lazy constant, doing so will
+ * not result in improved access performance of the array elements. Instead, a
+ * {@linkplain List#ofLazy(int, IntFunction) lazy list} of arbitrary depth can
+ * be used, which provides constant components.
+ *
+ * The {@code LazyConstant} type is not {@link Serializable}.
+ *
+ * Use in static initializers may interact with class initialization order;
+ * cyclic initialization may result in initialization errors as described
+ * in section {@jls 12.4} of The Java Language Specification.
+ *
+ * @implNote
+ * A lazy constant is free to synchronize on itself. Hence, care must be
+ * taken when directly or indirectly synchronizing on a lazy constant.
+ * A lazy constant is unmodifiable but its contents may or may not be
+ * immutable (e.g., it may hold an {@linkplain ArrayList}).
+ *
+ * @param type of the constant
+ *
+ * @since 26
+ *
+ * @see Optional
+ * @see Supplier
+ * @see List#ofLazy(int, IntFunction)
+ * @see Map#ofLazy(Set, Function)
+ * @jls 12.4 Initialization of Classes and Interfaces
+ * @jls 17.4.5 Happens-before Order
+ */
+@PreviewFeature(feature = PreviewFeature.Feature.LAZY_CONSTANTS)
+public sealed interface LazyConstant
+ extends Supplier
+ permits LazyConstantImpl {
+
+ /**
+ * {@return the contents of this lazy constant if initialized, otherwise,
+ * returns {@code other}}
+ *
+ * This method never triggers initialization of this lazy constant and will observe
+ * initialization by other threads atomically (i.e., it returns the contents
+ * if and only if the initialization has already completed).
+ *
+ * @param other value to return if the content is not initialized
+ * (can be {@code null})
+ */
+ T orElse(T other);
+
+ /**
+ * {@return the contents of this initialized constant. If not initialized, first
+ * computes and initializes this constant using the computing function}
+ *
+ * After this method returns successfully, the constant is guaranteed to be
+ * initialized.
+ *
+ * If the computing function throws, the throwable is relayed to the caller and
+ * the lazy constant remains uninitialized; a subsequent call to get() may then
+ * attempt the computation again.
+ */
+ T get();
+
+ /**
+ * {@return {@code true} if the constant is initialized, {@code false} otherwise}
+ *
+ * This method never triggers initialization of this lazy constant and will observe
+ * changes in the initialization state made by other threads atomically.
+ */
+ boolean isInitialized();
+
+ // Object methods
+
+ /**
+ * {@return if this lazy constant is the same as the provided {@code obj}}
+ *
+ * In other words, equals compares the identity of this lazy constant and {@code obj}
+ * to determine equality. Hence, two lazy constants with the same contents are
+ * not equal.
+ *
+ * This method never triggers initialization of this lazy constant.
+ */
+ @Override
+ boolean equals(Object obj);
+
+ /**
+ * {@return the {@linkplain System#identityHashCode(Object) identity hash code} for
+ * this lazy constant}
+ *
+ * This method never triggers initialization of this lazy constant.
+ */
+ @Override
+ int hashCode();
+
+ /**
+ * {@return a string suitable for debugging}
+ *
+ * This method never triggers initialization of this lazy constant and will observe
+ * initialization by other threads atomically (i.e., it observes the
+ * contents if and only if the initialization has already completed).
+ *
+ * If this lazy constant is initialized, an implementation-dependent string
+ * containing the {@linkplain Object#toString()} of the
+ * contents will be returned; otherwise, an implementation-dependent string is
+ * returned that indicates this lazy constant is not yet initialized.
+ */
+ @Override
+ String toString();
+
+ // Factory
+
+ /**
+ * {@return a lazy constant whose contents is to be computed later via the provided
+ * {@code computingFunction}}
+ *
+ * The returned lazy constant strongly references the provided
+ * {@code computingFunction} at least until initialization completes successfully.
+ *
+ * If the provided computing function is already an instance of
+ * {@code LazyConstant}, the method is free to return the provided computing function
+ * directly.
+ *
+ * @implNote after initialization completes successfully, the computing function is
+ * no longer strongly referenced and becomes eligible for
+ * garbage collection.
+ *
+ * @param computingFunction in the form of a {@linkplain Supplier} to be used
+ * to initialize the constant
+ * @param type of the constant
+ *
+ */
+ @SuppressWarnings("unchecked")
+ static LazyConstant of(Supplier extends T> computingFunction) {
+ Objects.requireNonNull(computingFunction);
+ if (computingFunction instanceof LazyConstant extends T> lc) {
+ return (LazyConstant) lc;
+ }
+ return LazyConstantImpl.ofLazy(computingFunction);
+ }
+
+}
diff --git a/src/java.base/share/classes/java/lang/StableValue.java b/src/java.base/share/classes/java/lang/StableValue.java
deleted file mode 100644
index 1815cb1a5b198..0000000000000
--- a/src/java.base/share/classes/java/lang/StableValue.java
+++ /dev/null
@@ -1,756 +0,0 @@
-/*
- * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation. Oracle designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Oracle in the LICENSE file that accompanied this code.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-package java.lang;
-
-import jdk.internal.access.SharedSecrets;
-import jdk.internal.javac.PreviewFeature;
-import jdk.internal.lang.stable.StableEnumFunction;
-import jdk.internal.lang.stable.StableFunction;
-import jdk.internal.lang.stable.StableIntFunction;
-import jdk.internal.lang.stable.StableSupplier;
-import jdk.internal.lang.stable.StableUtil;
-import jdk.internal.lang.stable.StableValueImpl;
-
-import java.io.Serializable;
-import java.util.Collection;
-import java.util.EnumSet;
-import java.util.List;
-import java.util.Map;
-import java.util.NoSuchElementException;
-import java.util.Objects;
-import java.util.RandomAccess;
-import java.util.Set;
-import java.util.function.Function;
-import java.util.function.IntFunction;
-import java.util.function.Supplier;
-
-/**
- * A stable value is a holder of contents that can be set at most once.
- *
- * A {@code StableValue} is typically created using the factory method
- * {@linkplain StableValue#of() {@code StableValue.of()}}. When created this way,
- * the stable value is unset, which means it holds no contents.
- * Its contents, of type {@code T}, can be set by calling
- * {@linkplain #trySet(Object) trySet()}, {@linkplain #setOrThrow(Object) setOrThrow()},
- * or {@linkplain #orElseSet(Supplier) orElseSet()}. Once set, the contents
- * can never change and can be retrieved by calling {@linkplain #orElseThrow() orElseThrow()}
- * , {@linkplain #orElse(Object) orElse()}, or {@linkplain #orElseSet(Supplier) orElseSet()}.
- *
- * Consider the following example where a stable value field "{@code logger}" is a
- * shallowly immutable holder of contents of type {@code Logger} and that is initially
- * created as unset, which means it holds no contents. Later in the example, the
- * state of the "{@code logger}" field is checked and if it is still unset,
- * the contents is set:
- *
- * {@snippet lang = java:
- * public class Component {
- *
- * // Creates a new unset stable value with no contents
- * // @link substring="of" target="#of" :
- * private final StableValue logger = StableValue.of();
- *
- * private Logger getLogger() {
- * if (!logger.isSet()) {
- * logger.trySet(Logger.create(Component.class));
- * }
- * return logger.orElseThrow();
- * }
- *
- * public void process() {
- * getLogger().info("Process started");
- * // ...
- * }
- * }
- *}
- *
- * If {@code getLogger()} is called from several threads, several instances of
- * {@code Logger} might be created. However, the contents can only be set at most once
- * meaning the first writer wins.
- *
- * In order to guarantee that, even under races, only one instance of {@code Logger} is
- * ever created, the {@linkplain #orElseSet(Supplier) orElseSet()} method can be used
- * instead, where the contents are lazily computed, and atomically set, via a
- * {@linkplain Supplier supplier}. In the example below, the supplier is provided in the
- * form of a lambda expression:
- *
- * {@snippet lang = java:
- * public class Component {
- *
- * // Creates a new unset stable value with no contents
- * // @link substring="of" target="#of" :
- * private final StableValue logger = StableValue.of();
- *
- * private Logger getLogger() {
- * return logger.orElseSet( () -> Logger.create(Component.class) );
- * }
- *
- * public void process() {
- * getLogger().info("Process started");
- * // ...
- * }
- * }
- *}
- *
- * The {@code getLogger()} method calls {@code logger.orElseSet()} on the stable value to
- * retrieve its contents. If the stable value is unset, then {@code orElseSet()}
- * evaluates the given supplier, and sets the contents to the result; the result is then
- * returned to the client. In other words, {@code orElseSet()} guarantees that a
- * stable value's contents is set before it returns.
- *
- * Furthermore, {@code orElseSet()} guarantees that out of one or more suppliers provided,
- * only at most one is ever evaluated, and that one is only ever evaluated once,
- * even when {@code logger.orElseSet()} is invoked concurrently. This property is crucial
- * as evaluation of the supplier may have side effects, for example, the call above to
- * {@code Logger.create()} may result in storage resources being prepared.
- *
- *
Stable Functions
- * Stable values provide the foundation for higher-level functional abstractions. A
- * stable supplier is a supplier that computes a value and then caches it into
- * a backing stable value storage for subsequent use. A stable supplier is created via the
- * {@linkplain StableValue#supplier(Supplier) StableValue.supplier()} factory, by
- * providing an underlying {@linkplain Supplier} which is invoked when the stable supplier
- * is first accessed:
- *
- * {@snippet lang = java:
- * public class Component {
- *
- * private final Supplier logger =
- * // @link substring="supplier" target="#supplier(Supplier)" :
- * StableValue.supplier( () -> Logger.getLogger(Component.class) );
- *
- * public void process() {
- * logger.get().info("Process started");
- * // ...
- * }
- * }
- *}
- * A stable supplier encapsulates access to its backing stable value storage. This means
- * that code inside {@code Component} can obtain the logger object directly from the
- * stable supplier, without having to go through an accessor method like {@code getLogger()}.
- *
- * A stable int function is a function that takes an {@code int} parameter and
- * uses it to compute a result that is then cached by the backing stable value storage
- * for that parameter value. A stable {@link IntFunction} is created via the
- * {@linkplain StableValue#intFunction(int, IntFunction) StableValue.intFunction()}
- * factory. Upon creation, the input range (i.e. {@code [0, size)}) is specified together
- * with an underlying {@linkplain IntFunction} which is invoked at most once per input
- * value. In effect, the stable int function will act like a cache for the underlying
- * {@linkplain IntFunction}:
- *
- * {@snippet lang = java:
- * final class PowerOf2Util {
- *
- * private PowerOf2Util() {}
- *
- * private static final int SIZE = 6;
- * private static final IntFunction UNDERLYING_POWER_OF_TWO =
- * v -> 1 << v;
- *
- * private static final IntFunction POWER_OF_TWO =
- * // @link substring="intFunction" target="#intFunction(int,IntFunction)" :
- * StableValue.intFunction(SIZE, UNDERLYING_POWER_OF_TWO);
- *
- * public static int powerOfTwo(int a) {
- * return POWER_OF_TWO.apply(a);
- * }
- * }
- *
- * int result = PowerOf2Util.powerOfTwo(4); // May eventually constant fold to 16 at runtime
- *
- *}
- * The {@code PowerOf2Util.powerOfTwo()} function is a partial function that only
- * allows a subset {@code [0, 5]} of the underlying function's {@code UNDERLYING_POWER_OF_TWO}
- * input range.
- *
- *
- * A stable function is a function that takes a parameter (of type {@code T}) and
- * uses it to compute a result (of type {@code R}) that is then cached by the backing
- * stable value storage for that parameter value. A stable function is created via the
- * {@linkplain StableValue#function(Set, Function) StableValue.function()} factory.
- * Upon creation, the input {@linkplain Set} is specified together with an underlying
- * {@linkplain Function} which is invoked at most once per input value. In effect, the
- * stable function will act like a cache for the underlying {@linkplain Function}:
- *
- * {@snippet lang = java:
- * class Log2Util {
- *
- * private Log2Util() {}
- *
- * private static final Set KEYS =
- * Set.of(1, 2, 4, 8, 16, 32);
- * private static final UnaryOperator UNDERLYING_LOG2 =
- * i -> 31 - Integer.numberOfLeadingZeros(i);
- *
- * private static final Function LOG2 =
- * // @link substring="function" target="#function(Set,Function)" :
- * StableValue.function(KEYS, UNDERLYING_LOG2);
- *
- * public static int log2(int a) {
- * return LOG2.apply(a);
- * }
- *
- * }
- *
- * int result = Log2Util.log2(16); // May eventually constant fold to 4 at runtime
- *}
- *
- * The {@code Log2Util.log2()} function is a partial function that only allows
- * a subset {@code {1, 2, 4, 8, 16, 32}} of the underlying function's
- * {@code UNDERLYING_LOG2} input range.
- *
- * Stable Collections
- * Stable values can also be used as backing storage for
- * {@linkplain Collection##unmodifiable unmodifiable collections}. A stable list
- * is an unmodifiable list, backed by an array of stable values. The stable list elements
- * are computed when they are first accessed, using a provided {@linkplain IntFunction}:
- *
- * {@snippet lang = java:
- * final class PowerOf2Util {
- *
- * private PowerOf2Util() {}
- *
- * private static final int SIZE = 6;
- * private static final IntFunction UNDERLYING_POWER_OF_TWO =
- * v -> 1 << v;
- *
- * private static final List POWER_OF_TWO =
- * // @link substring="list" target="#list(int,IntFunction)" :
- * StableValue.list(SIZE, UNDERLYING_POWER_OF_TWO);
- *
- * public static int powerOfTwo(int a) {
- * return POWER_OF_TWO.get(a);
- * }
- * }
- *
- * int result = PowerOf2Util.powerOfTwo(4); // May eventually constant fold to 16 at runtime
- *
- * }
- *
- * Similarly, a stable map is an unmodifiable map whose keys are known at
- * construction. The stable map values are computed when they are first accessed,
- * using a provided {@linkplain Function}:
- *
- * {@snippet lang = java:
- * class Log2Util {
- *
- * private Log2Util() {}
- *
- * private static final Set KEYS =
- * Set.of(1, 2, 4, 8, 16, 32);
- * private static final UnaryOperator UNDERLYING_LOG2 =
- * i -> 31 - Integer.numberOfLeadingZeros(i);
- *
- * private static final Map LOG2 =
- * // @link substring="map" target="#map(Set,Function)" :
- * StableValue.map(CACHED_KEYS, UNDERLYING_LOG2);
- *
- * public static int log2(int a) {
- * return LOG2.get(a);
- * }
- *
- * }
- *
- * int result = Log2Util.log2(16); // May eventually constant fold to 4 at runtime
- *
- *}
- *
- * Composing stable values
- * A stable value can depend on other stable values, forming a dependency graph
- * that can be lazily computed but where access to individual elements can still be
- * performant. In the following example, a single {@code Foo} and a {@code Bar}
- * instance (that is dependent on the {@code Foo} instance) are lazily created, both of
- * which are held by stable values:
- * {@snippet lang = java:
- * public final class DependencyUtil {
- *
- * private DependencyUtil() {}
- *
- * public static class Foo {
- * // ...
- * }
- *
- * public static class Bar {
- * public Bar(Foo foo) {
- * // ...
- * }
- * }
- *
- * private static final Supplier FOO = StableValue.supplier(Foo::new);
- * private static final Supplier BAR = StableValue.supplier(() -> new Bar(FOO.get()));
- *
- * public static Foo foo() {
- * return FOO.get();
- * }
- *
- * public static Bar bar() {
- * return BAR.get();
- * }
- *
- * }
- *}
- * Calling {@code bar()} will create the {@code Bar} singleton if it is not already
- * created. Upon such a creation, the dependent {@code Foo} will first be created if
- * the {@code Foo} does not already exist.
- *
- * Another example, which has a more complex dependency graph, is to compute the
- * Fibonacci sequence lazily:
- * {@snippet lang = java:
- * public final class Fibonacci {
- *
- * private Fibonacci() {}
- *
- * private static final int MAX_SIZE_INT = 46;
- *
- * private static final IntFunction FIB =
- * StableValue.intFunction(MAX_SIZE_INT, Fibonacci::fib);
- *
- * public static int fib(int n) {
- * return n < 2
- * ? n
- * : FIB.apply(n - 1) + FIB.apply(n - 2);
- * }
- *
- * }
- *}
- * Both {@code FIB} and {@code Fibonacci::fib} recurse into each other. Because the
- * stable int function {@code FIB} caches intermediate results, the initial
- * computational complexity is reduced from exponential to linear compared to a
- * traditional non-caching recursive fibonacci method. Once computed, the VM is free to
- * constant-fold expressions like {@code Fibonacci.fib(5)}.
- *
- * The fibonacci example above is a directed acyclic graph (i.e.,
- * it has no circular dependencies and is therefore a dependency tree):
- *{@snippet lang=text :
- *
- * ___________fib(5)____________
- * / \
- * ____fib(4)____ ____fib(3)____
- * / \ / \
- * fib(3) fib(2) fib(2) fib(1)
- * / \ / \ / \
- * fib(2) fib(1) fib(1) fib(0) fib(1) fib(0)
- *}
- *
- * If there are circular dependencies in a dependency graph, a stable value will
- * eventually throw an {@linkplain IllegalStateException} upon referencing elements in
- * a circularity.
- *
- *
Thread Safety
- * The contents of a stable value is guaranteed to be set at most once. If competing
- * threads are racing to set a stable value, only one update succeeds, while the other
- * updates are blocked until the stable value is set, whereafter the other updates
- * observes the stable value is set and leave the stable value unchanged.
- *
- * The at-most-once write operation on a stable value that succeeds
- * (e.g. {@linkplain #trySet(Object) trySet()})
- * {@linkplain java.util.concurrent##MemoryVisibility happens-before}
- * any successful read operation (e.g. {@linkplain #orElseThrow()}).
- * A successful write operation can be either:
- *
- * - a {@link #trySet(Object)} that returns {@code true},
- * - a {@link #setOrThrow(Object)} that does not throw, or
- * - an {@link #orElseSet(Supplier)} that successfully runs the supplier
- *
- * A successful read operation can be either:
- *
- * - a {@link #orElseThrow()} that does not throw,
- * - a {@link #orElse(Object) orElse(other)} that does not return the {@code other} value
- * - an {@link #orElseSet(Supplier)} that does not {@code throw}, or
- * - an {@link #isSet()} that returns {@code true}
- *
- *
- * The method {@link #orElseSet(Supplier)} guarantees that the provided
- * {@linkplain Supplier} is invoked successfully at most once, even under race.
- * Invocations of {@link #orElseSet(Supplier)} form a total order of zero or
- * more exceptional invocations followed by zero (if the contents were already set) or one
- * successful invocation. Since stable functions and stable collections are built on top
- * of the same principles as {@linkplain StableValue#orElseSet(Supplier) orElseSet()} they
- * too are thread safe and guarantee at-most-once-per-input invocation.
- *
- *
- * As the contents of a stable value can never change after it has been set, a JVM
- * implementation may, for a set stable value, elide all future reads of that
- * stable value, and instead directly use any contents that it has previously observed.
- * This is true if the reference to the stable value is a constant (e.g. in cases where
- * the stable value itself is stored in a {@code static final} field). Stable functions
- * and collections are built on top of StableValue. As such, they might also be eligible
- * for the same JVM optimizations as for StableValue.
- *
- * @implSpec Implementing classes of {@code StableValue} are free to synchronize on
- * {@code this} and consequently, it should be avoided to
- * (directly or indirectly) synchronize on a {@code StableValue}. Hence,
- * synchronizing on {@code this} may lead to deadlock.
- *
- * Except for a {@code StableValue}'s contents itself,
- * an {@linkplain #orElse(Object) orElse(other)} parameter, and
- * an {@linkplain #equals(Object) equals(obj)} parameter; all
- * method parameters must be non-null or a {@link NullPointerException}
- * will be thrown.
- *
- * @implNote A {@code StableValue} is mainly intended to be a non-public field in
- * a class and is usually neither exposed directly via accessors nor passed as
- * a method parameter.
- *
- * Stable functions and collections make reasonable efforts to provide
- * {@link Object#toString()} operations that do not trigger evaluation
- * of the internal stable values when called.
- * Stable collections have {@link Object#equals(Object)} operations that try
- * to minimize evaluation of the internal stable values when called.
- *
- * As objects can be set via stable values but never removed, this can be a
- * source of unintended memory leaks. A stable value's contents are
- * {@linkplain java.lang.ref##reachability strongly reachable}.
- * Be advised that reachable stable values will hold their set contents until
- * the stable value itself is collected.
- *
- * A {@code StableValue} that has a type parameter {@code T} that is an array
- * type (of arbitrary rank) will only allow the JVM to treat the
- * array reference as a stable value but not its components.
- * Instead, a {@linkplain #list(int, IntFunction) a stable list} of arbitrary
- * depth can be used, which provides stable components. More generally, a
- * stable value can hold other stable values of arbitrary depth and still
- * provide transitive constantness.
- *
- * Stable values, functions, and collections are not {@link Serializable}.
- *
- * @param type of the contents
- *
- * @since 25
- */
-@PreviewFeature(feature = PreviewFeature.Feature.STABLE_VALUES)
-public sealed interface StableValue
- permits StableValueImpl {
-
- /**
- * Tries to set the contents of this StableValue to the provided {@code contents}.
- * The contents of this StableValue can only be set once, implying this method only
- * returns {@code true} once.
- *
- * When this method returns, the contents of this StableValue is always set.
- *
- * @return {@code true} if the contents of this StableValue was set to the
- * provided {@code contents}, {@code false} otherwise
- * @param contents to set
- * @throws IllegalStateException if a supplier invoked by {@link #orElseSet(Supplier)}
- * recursively attempts to set this stable value by calling this method
- * directly or indirectly.
- */
- boolean trySet(T contents);
-
- /**
- * {@return the contents if set, otherwise, returns the provided {@code other} value}
- *
- * @param other to return if the contents is not set
- */
- T orElse(T other);
-
- /**
- * {@return the contents if set, otherwise, throws {@code NoSuchElementException}}
- *
- * @throws NoSuchElementException if no contents is set
- */
- T orElseThrow();
-
- /**
- * {@return {@code true} if the contents is set, {@code false} otherwise}
- */
- boolean isSet();
-
- /**
- * {@return the contents; if unset, first attempts to compute and set the
- * contents using the provided {@code supplier}}
- *
- * The provided {@code supplier} is guaranteed to be invoked at most once if it
- * completes without throwing an exception. If this method is invoked several times
- * with different suppliers, only one of them will be invoked provided it completes
- * without throwing an exception.
- *
- * If the supplier throws an (unchecked) exception, the exception is rethrown and no
- * contents is set. The most common usage is to construct a new object serving
- * as a lazily computed value or memoized result, as in:
- *
- * {@snippet lang=java:
- * Value v = stable.orElseSet(Value::new);
- * }
- *
- * When this method returns successfully, the contents is always set.
- *
- * The provided {@code supplier} will only be invoked once even if invoked from
- * several threads unless the {@code supplier} throws an exception.
- *
- * @param supplier to be used for computing the contents, if not previously set
- * @throws IllegalStateException if the provided {@code supplier} recursively
- * attempts to set this stable value.
- */
- T orElseSet(Supplier extends T> supplier);
-
- /**
- * Sets the contents of this StableValue to the provided {@code contents}, or, if
- * already set, throws {@code IllegalStateException}.
- *
- * When this method returns (or throws an exception), the contents is always set.
- *
- * @param contents to set
- * @throws IllegalStateException if the contents was already set
- * @throws IllegalStateException if a supplier invoked by {@link #orElseSet(Supplier)}
- * recursively attempts to set this stable value by calling this method
- * directly or indirectly.
- */
- void setOrThrow(T contents);
-
- // Object methods
-
- /**
- * {@return {@code true} if {@code this == obj}, {@code false} otherwise}
- *
- * @param obj to check for equality
- */
- boolean equals(Object obj);
-
- /**
- * {@return the {@linkplain System#identityHashCode(Object) identity hash code} of
- * {@code this} object}
- */
- int hashCode();
-
- // Factories
-
- /**
- * {@return a new unset stable value}
- *
- * An unset stable value has no contents.
- *
- * @param type of the contents
- */
- static StableValue of() {
- return StableValueImpl.of();
- }
-
- /**
- * {@return a new pre-set stable value with the provided {@code contents}}
- *
- * @param contents to set
- * @param type of the contents
- */
- static StableValue of(T contents) {
- final StableValue stableValue = StableValue.of();
- stableValue.trySet(contents);
- return stableValue;
- }
-
- /**
- * {@return a new stable supplier}
- *
- * The returned {@linkplain Supplier supplier} is a caching supplier that records
- * the value of the provided {@code underlying} supplier upon being first accessed via
- * the returned supplier's {@linkplain Supplier#get() get()} method.
- *
- * The provided {@code underlying} supplier is guaranteed to be successfully invoked
- * at most once even in a multi-threaded environment. Competing threads invoking the
- * returned supplier's {@linkplain Supplier#get() get()} method when a value is
- * already under computation will block until a value is computed or an exception is
- * thrown by the computing thread. The competing threads will then observe the newly
- * computed value (if any) and will then never execute the {@code underlying} supplier.
- *
- * If the provided {@code underlying} supplier throws an exception, it is rethrown
- * to the initial caller and no contents is recorded.
- *
- * If the provided {@code underlying} supplier recursively calls the returned
- * supplier, an {@linkplain IllegalStateException} will be thrown.
- *
- * @param underlying supplier used to compute a cached value
- * @param the type of results supplied by the returned supplier
- */
- static Supplier supplier(Supplier extends T> underlying) {
- Objects.requireNonNull(underlying);
- return StableSupplier.of(underlying);
- }
-
- /**
- * {@return a new stable {@linkplain IntFunction}}
- *
- * The returned function is a caching function that, for each allowed {@code int}
- * input, records the values of the provided {@code underlying}
- * function upon being first accessed via the returned function's
- * {@linkplain IntFunction#apply(int) apply()} method. If the returned function is
- * invoked with an input that is not in the range {@code [0, size)}, an
- * {@link IllegalArgumentException} will be thrown.
- *
- * The provided {@code underlying} function is guaranteed to be successfully invoked
- * at most once per allowed input, even in a multi-threaded environment. Competing
- * threads invoking the returned function's
- * {@linkplain IntFunction#apply(int) apply()} method when a value is already under
- * computation will block until a value is computed or an exception is thrown by
- * the computing thread.
- *
- * If invoking the provided {@code underlying} function throws an exception, it is
- * rethrown to the initial caller and no contents is recorded.
- *
- * If the provided {@code underlying} function recursively calls the returned
- * function for the same input, an {@linkplain IllegalStateException} will
- * be thrown.
- *
- * @param size the upper bound of the range {@code [0, size)} indicating
- * the allowed inputs
- * @param underlying {@code IntFunction} used to compute cached values
- * @param the type of results delivered by the returned IntFunction
- * @throws IllegalArgumentException if the provided {@code size} is negative.
- */
- static IntFunction intFunction(int size,
- IntFunction extends R> underlying) {
- StableUtil.assertSizeNonNegative(size);
- Objects.requireNonNull(underlying);
- return StableIntFunction.of(size, underlying);
- }
-
- /**
- * {@return a new stable {@linkplain Function}}
- *
- * The returned function is a caching function that, for each allowed
- * input in the given set of {@code inputs}, records the values of the provided
- * {@code underlying} function upon being first accessed via the returned function's
- * {@linkplain Function#apply(Object) apply()} method. If the returned function is
- * invoked with an input that is not in {@code inputs}, an {@link IllegalArgumentException}
- * will be thrown.
- *
- * The provided {@code underlying} function is guaranteed to be successfully invoked
- * at most once per allowed input, even in a multi-threaded environment. Competing
- * threads invoking the returned function's {@linkplain Function#apply(Object) apply()}
- * method when a value is already under computation will block until a value is
- * computed or an exception is thrown by the computing thread.
- *
- * If invoking the provided {@code underlying} function throws an exception, it is
- * rethrown to the initial caller and no contents is recorded.
- *
- * If the provided {@code underlying} function recursively calls the returned
- * function for the same input, an {@linkplain IllegalStateException} will
- * be thrown.
- *
- * @param inputs the set of (non-null) allowed input values
- * @param underlying {@code Function} used to compute cached values
- * @param the type of the input to the returned Function
- * @param the type of results delivered by the returned Function
- * @throws NullPointerException if the provided set of {@code inputs} contains a
- * {@code null} element.
- */
- static Function function(Set extends T> inputs,
- Function super T, ? extends R> underlying) {
- Objects.requireNonNull(inputs);
- // Checking that the Set of inputs does not contain a `null` value is made in the
- // implementing classes.
- Objects.requireNonNull(underlying);
- return inputs instanceof EnumSet> && !inputs.isEmpty()
- ? StableEnumFunction.of(inputs, underlying)
- : StableFunction.of(inputs, underlying);
- }
-
- /**
- * {@return a new stable list with the provided {@code size}}
- *
- * The returned list is an {@linkplain Collection##unmodifiable unmodifiable} list
- * with the provided {@code size}. The list's elements are computed via the
- * provided {@code mapper} when they are first accessed
- * (e.g. via {@linkplain List#get(int) List::get}).
- *
- * The provided {@code mapper} function is guaranteed to be successfully invoked
- * at most once per list index, even in a multi-threaded environment. Competing
- * threads accessing an element already under computation will block until an element
- * is computed or an exception is thrown by the computing thread.
- *
- * If invoking the provided {@code mapper} function throws an exception, it
- * is rethrown to the initial caller and no value for the element is recorded.
- *
- * Any {@link List#subList(int, int) subList} or {@link List#reversed()} views
- * of the returned list are also stable.
- *
- * The returned list and its {@link List#subList(int, int) subList} or
- * {@link List#reversed()} views implement the {@link RandomAccess} interface.
- *
- * The returned list is unmodifiable and does not implement the
- * {@linkplain Collection##optional-operation optional operations} in the
- * {@linkplain List} interface.
- *
- * If the provided {@code mapper} recursively calls the returned list for the
- * same index, an {@linkplain IllegalStateException} will be thrown.
- *
- * @param size the size of the returned list
- * @param mapper to invoke whenever an element is first accessed
- * (may return {@code null})
- * @param the type of elements in the returned list
- * @throws IllegalArgumentException if the provided {@code size} is negative.
- */
- static List list(int size,
- IntFunction extends E> mapper) {
- StableUtil.assertSizeNonNegative(size);
- Objects.requireNonNull(mapper);
- return SharedSecrets.getJavaUtilCollectionAccess().stableList(size, mapper);
- }
-
- /**
- * {@return a new stable map with the provided {@code keys}}
- *
- * The returned map is an {@linkplain Collection##unmodifiable unmodifiable} map whose
- * keys are known at construction. The map's values are computed via the provided
- * {@code mapper} when they are first accessed
- * (e.g. via {@linkplain Map#get(Object) Map::get}).
- *
- * The provided {@code mapper} function is guaranteed to be successfully invoked
- * at most once per key, even in a multi-threaded environment. Competing
- * threads accessing a value already under computation will block until an element
- * is computed or an exception is thrown by the computing thread.
- *
- * If invoking the provided {@code mapper} function throws an exception, it
- * is rethrown to the initial caller and no value associated with the provided key
- * is recorded.
- *
- * Any {@link Map#values()} or {@link Map#entrySet()} views of the returned map are
- * also stable.
- *
- * The returned map is unmodifiable and does not implement the
- * {@linkplain Collection##optional-operations optional operations} in the
- * {@linkplain Map} interface.
- *
- * If the provided {@code mapper} recursively calls the returned map for
- * the same key, an {@linkplain IllegalStateException} will be thrown.
- *
- * @param keys the (non-null) keys in the returned map
- * @param mapper to invoke whenever an associated value is first accessed
- * (may return {@code null})
- * @param the type of keys maintained by the returned map
- * @param the type of mapped values in the returned map
- * @throws NullPointerException if the provided set of {@code inputs} contains a
- * {@code null} element.
- */
- static Map map(Set keys,
- Function super K, ? extends V> mapper) {
- Objects.requireNonNull(keys);
- // Checking that the Set of keys does not contain a `null` value is made in the
- // implementing class.
- Objects.requireNonNull(mapper);
- return SharedSecrets.getJavaUtilCollectionAccess().stableMap(keys, mapper);
- }
-
-}
diff --git a/src/java.base/share/classes/java/lang/invoke/VarHandles.java b/src/java.base/share/classes/java/lang/invoke/VarHandles.java
index c97d44ba5d3e1..8c3e123f39a69 100644
--- a/src/java.base/share/classes/java/lang/invoke/VarHandles.java
+++ b/src/java.base/share/classes/java/lang/invoke/VarHandles.java
@@ -166,12 +166,15 @@ else if (type == double.class) {
static Field getFieldFromReceiverAndOffset(Class> receiverType,
long offset,
Class> fieldType) {
- for (Field f : receiverType.getDeclaredFields()) {
- if (Modifier.isStatic(f.getModifiers())) continue;
-
- if (offset == UNSAFE.objectFieldOffset(f)) {
- assert f.getType() == fieldType;
- return f;
+ // The receiver may be a referenced class different from the declaring class
+ for (var declaringClass = receiverType; declaringClass != null; declaringClass = declaringClass.getSuperclass()) {
+ for (Field f : declaringClass.getDeclaredFields()) {
+ if (Modifier.isStatic(f.getModifiers())) continue;
+
+ if (offset == UNSAFE.objectFieldOffset(f)) {
+ assert f.getType() == fieldType;
+ return f;
+ }
}
}
throw new InternalError("Field not found at offset");
diff --git a/src/java.base/share/classes/java/lang/reflect/Executable.java b/src/java.base/share/classes/java/lang/reflect/Executable.java
index a22d0fa80768b..4f32d33048d83 100644
--- a/src/java.base/share/classes/java/lang/reflect/Executable.java
+++ b/src/java.base/share/classes/java/lang/reflect/Executable.java
@@ -431,7 +431,7 @@ private Parameter[] synthesizeAllParams() {
// modifiers? Probably not in the general case, since
// we'd have no way of knowing about them, but there
// may be specific cases.
- out[i] = new Parameter("arg" + i, 0, this, i);
+ out[i] = new Parameter(null, 0, this, i);
return out;
}
diff --git a/src/java.base/share/classes/java/lang/reflect/Parameter.java b/src/java.base/share/classes/java/lang/reflect/Parameter.java
index b8a57a9790b73..d4a53e193a9d1 100644
--- a/src/java.base/share/classes/java/lang/reflect/Parameter.java
+++ b/src/java.base/share/classes/java/lang/reflect/Parameter.java
@@ -55,7 +55,7 @@ public final class Parameter implements AnnotatedElement {
* absent, however, then {@code Executable} uses this constructor
* to synthesize them.
*
- * @param name The name of the parameter.
+ * @param name The name of the parameter, or {@code null} if absent
* @param modifiers The modifier flags for the parameter.
* @param executable The executable which defines this parameter.
* @param index The index of the parameter.
@@ -104,7 +104,7 @@ public int hashCode() {
* to the class file.
*/
public boolean isNamePresent() {
- return executable.hasRealParameterData() && name != null;
+ return name != null;
}
/**
diff --git a/src/java.base/share/classes/java/lang/runtime/SwitchBootstraps.java b/src/java.base/share/classes/java/lang/runtime/SwitchBootstraps.java
index 99716baf43963..30b6df0073ec2 100644
--- a/src/java.base/share/classes/java/lang/runtime/SwitchBootstraps.java
+++ b/src/java.base/share/classes/java/lang/runtime/SwitchBootstraps.java
@@ -777,7 +777,7 @@ private static String typeSwitchClassName(Class> targetClass) {
return name + "$$TypeSwitch";
}
- // this method should be in sync with com.sun.tools.javac.code.Types.checkUnconditionallyExactPrimitives
+ // this method should be in sync with com.sun.tools.javac.code.Types.isUnconditionallyExactTypeBased
private static boolean unconditionalExactnessCheck(Class> selectorType, Class> targetType) {
Wrapper selectorWrapper = Wrapper.forBasicType(selectorType);
Wrapper targetWrapper = Wrapper.forBasicType(targetType);
diff --git a/src/java.base/share/classes/java/nio/charset/Charset.java b/src/java.base/share/classes/java/nio/charset/Charset.java
index 1eb3c9c209411..736ed4f12d548 100644
--- a/src/java.base/share/classes/java/nio/charset/Charset.java
+++ b/src/java.base/share/classes/java/nio/charset/Charset.java
@@ -25,7 +25,6 @@
package java.nio.charset;
-import jdk.internal.misc.ThreadTracker;
import jdk.internal.misc.VM;
import jdk.internal.util.StaticProperty;
import jdk.internal.vm.annotation.Stable;
@@ -41,7 +40,6 @@
import java.util.Locale;
import java.util.Map;
import java.util.NoSuchElementException;
-import java.util.Objects;
import java.util.ServiceLoader;
import java.util.Set;
import java.util.SortedMap;
@@ -426,7 +424,7 @@ public Charset call() {
}
/* The extended set of charsets */
- private static final Supplier> EXTENDED_PROVIDERS = StableValue.supplier(
+ private static final LazyConstant> EXTENDED_PROVIDERS = LazyConstant.of(
new Supplier<>() { public List get() { return extendedProviders0(); }});
private static List extendedProviders0() {
@@ -617,7 +615,7 @@ public static SortedMap availableCharsets() {
return Collections.unmodifiableSortedMap(m);
}
- private static final Supplier defaultCharset = StableValue.supplier(
+ private static final LazyConstant defaultCharset = LazyConstant.of(
new Supplier<>() { public Charset get() { return defaultCharset0(); }});
private static Charset defaultCharset0() {
@@ -658,7 +656,7 @@ public static Charset defaultCharset() {
@Stable
private final String[] aliases;
@Stable
- private final Supplier> aliasSet = StableValue.supplier(
+ private final LazyConstant> aliasSet = LazyConstant.of(
new Supplier<>() { public Set get() { return Set.of(aliases); }});
/**
diff --git a/src/java.base/share/classes/java/text/CollationElementIterator.java b/src/java.base/share/classes/java/text/CollationElementIterator.java
index 5469b95b11e8f..46fe80e1bda4d 100644
--- a/src/java.base/share/classes/java/text/CollationElementIterator.java
+++ b/src/java.base/share/classes/java/text/CollationElementIterator.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1996, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1996, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -661,7 +661,7 @@ private int nextContractChar(int ch)
// (the Normalizer is cloned here so that the seeking we do in the next loop
// won't affect our real position in the text)
- NormalizerBase tempText = (NormalizerBase)text.clone();
+ NormalizerBase tempText = text.clone();
// extract the next maxLength characters in the string (we have to do this using the
// Normalizer to ensure that our offsets correspond to those the rest of the
@@ -732,7 +732,7 @@ private int prevContractChar(int ch)
pair = list.lastElement();
int maxLength = pair.entryName.length();
- NormalizerBase tempText = (NormalizerBase)text.clone();
+ NormalizerBase tempText = text.clone();
tempText.next();
key.setLength(0);
diff --git a/src/java.base/share/classes/java/util/ArrayList.java b/src/java.base/share/classes/java/util/ArrayList.java
index c00b130a553a2..53e818b99c51f 100644
--- a/src/java.base/share/classes/java/util/ArrayList.java
+++ b/src/java.base/share/classes/java/util/ArrayList.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -750,9 +750,17 @@ public void clear() {
* @throws NullPointerException if the specified collection is null
*/
public boolean addAll(Collection extends E> c) {
- Object[] a = c.toArray();
+ Object[] a;
+ int numNew;
+ if (c.getClass() == ArrayList.class) {
+ ArrayList> src = (ArrayList>) c;
+ a = src.elementData;
+ numNew = src.size;
+ } else {
+ a = c.toArray();
+ numNew = a.length;
+ }
modCount++;
- int numNew = a.length;
if (numNew == 0)
return false;
Object[] elementData;
diff --git a/src/java.base/share/classes/java/util/Collections.java b/src/java.base/share/classes/java/util/Collections.java
index c48dbd8cf6c13..316458d6f9091 100644
--- a/src/java.base/share/classes/java/util/Collections.java
+++ b/src/java.base/share/classes/java/util/Collections.java
@@ -5253,6 +5253,20 @@ public boolean removeIf(Predicate super E> filter) {
public int hashCode() {
return Objects.hashCode(element);
}
+ @Override
+ public Object[] toArray() {
+ return new Object[] {element};
+ }
+ @Override
+ @SuppressWarnings("unchecked")
+ public T[] toArray(T[] a) {
+ if (a.length < 1)
+ a = (T[])Array.newInstance(a.getClass().getComponentType(), 1);
+ a[0] = (T)element;
+ if (a.length > 1)
+ a[1] = null;
+ return a;
+ }
}
/**
diff --git a/src/java.base/share/classes/java/util/Currency.java b/src/java.base/share/classes/java/util/Currency.java
index febae04a77b6c..b254bae32a123 100644
--- a/src/java.base/share/classes/java/util/Currency.java
+++ b/src/java.base/share/classes/java/util/Currency.java
@@ -142,8 +142,8 @@ public final class Currency implements Serializable {
// class data: instance map
private static ConcurrentMap instances = new ConcurrentHashMap<>(7);
- private static final Supplier> available =
- StableValue.supplier(Currency::computeAllCurrencies);
+ private static final LazyConstant> available =
+ LazyConstant.of(Currency::computeAllCurrencies);
// Class data: currency data obtained from currency.data file.
// Purpose:
diff --git a/src/java.base/share/classes/java/util/ImmutableCollections.java b/src/java.base/share/classes/java/util/ImmutableCollections.java
index 1dd7808da20f7..abc48ff5ed995 100644
--- a/src/java.base/share/classes/java/util/ImmutableCollections.java
+++ b/src/java.base/share/classes/java/util/ImmutableCollections.java
@@ -36,18 +36,12 @@
import java.util.function.BiFunction;
import java.util.function.Consumer;
import java.util.function.Function;
-import java.util.function.IntFunction;
import java.util.function.Predicate;
-import java.util.function.Supplier;
import java.util.function.UnaryOperator;
import jdk.internal.access.JavaUtilCollectionAccess;
import jdk.internal.access.SharedSecrets;
-import jdk.internal.lang.stable.StableUtil;
-import jdk.internal.lang.stable.StableValueImpl;
import jdk.internal.misc.CDS;
-import jdk.internal.util.ArraysSupport;
-import jdk.internal.vm.annotation.ForceInline;
import jdk.internal.vm.annotation.Stable;
/**
@@ -135,14 +129,6 @@ public List listFromTrustedArray(Object[] array) {
public List listFromTrustedArrayNullsAllowed(Object[] array) {
return ImmutableCollections.listFromTrustedArrayNullsAllowed(array);
}
- public List stableList(int size, IntFunction extends E> mapper) {
- // A stable list is not Serializable, so we cannot return `List.of()` if `size == 0`
- return new StableList<>(size, mapper);
- }
- public Map stableMap(Set keys, Function super K, ? extends V> mapper) {
- // A stable map is not Serializable, so we cannot return `Map.of()` if `keys.isEmpty()`
- return new StableMap<>(keys, mapper);
- }
});
}
}
@@ -450,7 +436,7 @@ public void add(E e) {
}
}
- static sealed class SubList extends AbstractImmutableList
+ static final class SubList extends AbstractImmutableList
implements RandomAccess {
@Stable
@@ -462,8 +448,10 @@ static sealed class SubList extends AbstractImmutableList
@Stable
final int size;
- private SubList(AbstractImmutableList root, int offset, int size) {
- assert root instanceof List12 || root instanceof ListN || root instanceof StableList;
+ SubList(AbstractImmutableList root, int offset, int size) {
+ assert root instanceof List12
+ || root instanceof ListN
+ || root instanceof LazyCollections.LazyList;
this.root = root;
this.offset = offset;
this.size = size;
@@ -795,187 +783,6 @@ public int lastIndexOf(Object o) {
}
}
- @FunctionalInterface
- interface HasStableDelegates {
- StableValueImpl[] delegates();
- }
-
- @jdk.internal.ValueBased
- static final class StableList
- extends AbstractImmutableList
- implements HasStableDelegates {
-
- @Stable
- private final IntFunction extends E> mapper;
- @Stable
- final StableValueImpl[] delegates;
-
- StableList(int size, IntFunction extends E> mapper) {
- this.mapper = mapper;
- this.delegates = StableUtil.array(size);
- }
-
- @Override public boolean isEmpty() { return delegates.length == 0;}
- @Override public int size() { return delegates.length; }
- @Override public Object[] toArray() { return copyInto(new Object[size()]); }
-
- @ForceInline
- @Override
- public E get(int i) {
- final StableValueImpl delegate;
- try {
- delegate = delegates[i];
- } catch (ArrayIndexOutOfBoundsException aioobe) {
- throw new IndexOutOfBoundsException(i);
- }
- return delegate.orElseSet(new Supplier() {
- @Override public E get() { return mapper.apply(i); }});
- }
-
- @Override
- @SuppressWarnings("unchecked")
- public T[] toArray(T[] a) {
- final int size = delegates.length;
- if (a.length < size) {
- // Make a new array of a's runtime type, but my contents:
- T[] n = (T[])Array.newInstance(a.getClass().getComponentType(), size);
- return copyInto(n);
- }
- copyInto(a);
- if (a.length > size) {
- a[size] = null; // null-terminate
- }
- return a;
- }
-
- @Override
- public int indexOf(Object o) {
- final int size = size();
- for (int i = 0; i < size; i++) {
- if (Objects.equals(o, get(i))) {
- return i;
- }
- }
- return -1;
- }
-
- @Override
- public int lastIndexOf(Object o) {
- for (int i = size() - 1; i >= 0; i--) {
- if (Objects.equals(o, get(i))) {
- return i;
- }
- }
- return -1;
- }
-
- @SuppressWarnings("unchecked")
- private T[] copyInto(Object[] a) {
- final int len = delegates.length;
- for (int i = 0; i < len; i++) {
- a[i] = get(i);
- }
- return (T[]) a;
- }
-
- @Override
- public List reversed() {
- return new StableReverseOrderListView<>(this);
- }
-
- @Override
- public List subList(int fromIndex, int toIndex) {
- subListRangeCheck(fromIndex, toIndex, size());
- return StableSubList.fromStableList(this, fromIndex, toIndex);
- }
-
- @Override
- public String toString() {
- return StableUtil.renderElements(this, "StableCollection", delegates);
- }
-
- @Override
- public StableValueImpl[] delegates() {
- return delegates;
- }
-
- private static final class StableSubList extends SubList
- implements HasStableDelegates {
-
- private StableSubList(AbstractImmutableList root, int offset, int size) {
- super(root, offset, size);
- }
-
- @Override
- public List reversed() {
- return new StableReverseOrderListView<>(this);
- }
-
- @Override
- public List subList(int fromIndex, int toIndex) {
- subListRangeCheck(fromIndex, toIndex, size());
- return StableSubList.fromStableSubList(this, fromIndex, toIndex);
- }
-
- @Override
- public String toString() {
- return StableUtil.renderElements(this, "StableCollection", delegates());
- }
-
- @Override
- boolean allowNulls() {
- return true;
- }
-
- @Override
- public StableValueImpl[] delegates() {
- @SuppressWarnings("unchecked")
- final var rootDelegates = ((HasStableDelegates) root).delegates();
- return Arrays.copyOfRange(rootDelegates, offset, offset + size);
- }
-
- static SubList fromStableList(StableList list, int fromIndex, int toIndex) {
- return new StableSubList<>(list, fromIndex, toIndex - fromIndex);
- }
-
- static SubList fromStableSubList(StableSubList parent, int fromIndex, int toIndex) {
- return new StableSubList<>(parent.root, parent.offset + fromIndex, toIndex - fromIndex);
- }
-
- }
-
- private static final class StableReverseOrderListView
- extends ReverseOrderListView.Rand
- implements HasStableDelegates {
-
- private StableReverseOrderListView(List base) {
- super(base, false);
- }
-
- // This method does not evaluate the elements
- @Override
- public String toString() {
- return StableUtil.renderElements(this, "StableCollection", delegates());
- }
-
- @Override
- public List subList(int fromIndex, int toIndex) {
- final int size = base.size();
- subListRangeCheck(fromIndex, toIndex, size);
- return new StableReverseOrderListView<>(base.subList(size - toIndex, size - fromIndex));
- }
-
- @Override
- public StableValueImpl[] delegates() {
- @SuppressWarnings("unchecked")
- final var baseDelegates = ((HasStableDelegates) base).delegates();
- return ArraysSupport.reverse(
- Arrays.copyOf(baseDelegates, baseDelegates.length));
- }
- }
-
- }
-
// ---------- Set Implementations ----------
@jdk.internal.ValueBased
@@ -1614,187 +1421,6 @@ private Object writeReplace() {
}
}
- static final class StableMap
- extends AbstractImmutableMap {
-
- @Stable
- private final Function super K, ? extends V> mapper;
- @Stable
- private final Map> delegate;
-
- StableMap(Set keys, Function super K, ? extends V> mapper) {
- this.mapper = mapper;
- this.delegate = StableUtil.map(keys);
- }
-
- @Override public boolean containsKey(Object o) { return delegate.containsKey(o); }
- @Override public int size() { return delegate.size(); }
- @Override public Set> entrySet() { return StableMapEntrySet.of(this); }
-
- @ForceInline
- @Override
- public V get(Object key) {
- return getOrDefault(key, null);
- }
-
- @ForceInline
- @Override
- public V getOrDefault(Object key, V defaultValue) {
- final StableValueImpl stable = delegate.get(key);
- if (stable == null) {
- return defaultValue;
- }
- @SuppressWarnings("unchecked")
- final K k = (K) key;
- return stable.orElseSet(new Supplier() {
- @Override public V get() { return mapper.apply(k); }});
- }
-
- @jdk.internal.ValueBased
- static final class StableMapEntrySet extends AbstractImmutableSet> {
-
- // Use a separate field for the outer class in order to facilitate
- // a @Stable annotation.
- @Stable
- private final StableMap outer;
-
- @Stable
- private final Set>> delegateEntrySet;
-
- private StableMapEntrySet(StableMap outer) {
- this.outer = outer;
- this.delegateEntrySet = outer.delegate.entrySet();
- }
-
- @Override public Iterator> iterator() { return LazyMapIterator.of(this); }
- @Override public int size() { return delegateEntrySet.size(); }
- @Override public int hashCode() { return outer.hashCode(); }
-
- @Override
- public String toString() {
- return StableUtil.renderMappings(this, "StableCollection", delegateEntrySet, false);
- }
-
- // For @ValueBased
- private static StableMapEntrySet of(StableMap outer) {
- return new StableMapEntrySet<>(outer);
- }
-
- @jdk.internal.ValueBased
- static final class LazyMapIterator implements Iterator> {
-
- // Use a separate field for the outer class in order to facilitate
- // a @Stable annotation.
- @Stable
- private final StableMapEntrySet outer;
-
- @Stable
- private final Iterator>> delegateIterator;
-
- private LazyMapIterator(StableMapEntrySet outer) {
- this.outer = outer;
- this.delegateIterator = outer.delegateEntrySet.iterator();
- }
-
- @Override public boolean hasNext() { return delegateIterator.hasNext(); }
-
- @Override
- public Entry next() {
- final Map.Entry> inner = delegateIterator.next();
- final K k = inner.getKey();
- return new StableEntry<>(k, inner.getValue(), new Supplier() {
- @Override public V get() { return outer.outer.mapper.apply(k); }});
- }
-
- @Override
- public void forEachRemaining(Consumer super Map.Entry> action) {
- final Consumer super Map.Entry>> innerAction =
- new Consumer<>() {
- @Override
- public void accept(Entry> inner) {
- final K k = inner.getKey();
- action.accept(new StableEntry<>(k, inner.getValue(), new Supplier() {
- @Override public V get() { return outer.outer.mapper.apply(k); }}));
- }
- };
- delegateIterator.forEachRemaining(innerAction);
- }
-
- // For @ValueBased
- private static LazyMapIterator of(StableMapEntrySet outer) {
- return new LazyMapIterator<>(outer);
- }
-
- }
- }
-
- private record StableEntry(K getKey, // trick
- StableValueImpl stableValue,
- Supplier extends V> supplier) implements Map.Entry {
-
- @Override public V setValue(V value) { throw uoe(); }
- @Override public V getValue() { return stableValue.orElseSet(supplier); }
- @Override public int hashCode() { return hash(getKey()) ^ hash(getValue()); }
- @Override public String toString() { return getKey() + "=" + stableValue.toString(); }
- @Override public boolean equals(Object o) {
- return o instanceof Map.Entry, ?> e
- && Objects.equals(getKey(), e.getKey())
- // Invoke `getValue()` as late as possible to avoid evaluation
- && Objects.equals(getValue(), e.getValue());
- }
-
- private int hash(Object obj) { return (obj == null) ? 0 : obj.hashCode(); }
- }
-
- @Override
- public Collection values() {
- return StableMapValues.of(this);
- }
-
- @jdk.internal.ValueBased
- static final class StableMapValues extends AbstractImmutableCollection {
-
- // Use a separate field for the outer class in order to facilitate
- // a @Stable annotation.
- @Stable
- private final StableMap, V> outer;
-
- private StableMapValues(StableMap, V> outer) {
- this.outer = outer;
- }
-
- @Override public Iterator iterator() { return outer.new ValueIterator(); }
- @Override public int size() { return outer.size(); }
- @Override public boolean isEmpty() { return outer.isEmpty();}
- @Override public boolean contains(Object v) { return outer.containsValue(v); }
-
- private static final IntFunction[]> GENERATOR = new IntFunction[]>() {
- @Override
- public StableValueImpl>[] apply(int len) {
- return new StableValueImpl>[len];
- }
- };
-
- @Override
- public String toString() {
- final StableValueImpl>[] values = outer.delegate.values().toArray(GENERATOR);
- return StableUtil.renderElements(this, "StableCollection", values);
- }
-
- // For @ValueBased
- private static StableMapValues
|---|