diff --git a/make/autoconf/flags-cflags.m4 b/make/autoconf/flags-cflags.m4 index e9959196acb62..f8fbe14cc38d3 100644 --- a/make/autoconf/flags-cflags.m4 +++ b/make/autoconf/flags-cflags.m4 @@ -51,6 +51,14 @@ AC_DEFUN([FLAGS_SETUP_SHARED_LIBS], SET_SHARED_LIBRARY_NAME='-Wl,-install_name,@rpath/[$]1' SET_SHARED_LIBRARY_MAPFILE='-Wl,-exported_symbols_list,[$]1' + elif test "x$OPENJDK_TARGET_OS" = xaix; then + # Linking is different on aix + SHARED_LIBRARY_FLAGS="-shared -Wl,-bM:SRE -Wl,-bnoentry" + SET_EXECUTABLE_ORIGIN="" + SET_SHARED_LIBRARY_ORIGIN='' + SET_SHARED_LIBRARY_NAME='' + SET_SHARED_LIBRARY_MAPFILE='' + else # Default works for linux, might work on other platforms as well. SHARED_LIBRARY_FLAGS='-shared' @@ -206,7 +214,6 @@ AC_DEFUN([FLAGS_SETUP_WARNINGS], WARNINGS_ENABLE_ALL="-Wall -Wextra -Wformat=2 $WARNINGS_ENABLE_ADDITIONAL" DISABLED_WARNINGS="unknown-warning-option unused-parameter unused" - ;; xlc) @@ -284,9 +291,15 @@ AC_DEFUN([FLAGS_SETUP_OPTIMIZATION], C_O_FLAG_NONE="${C_O_FLAG_NONE} ${DISABLE_FORTIFY_CFLAGS}" fi elif test "x$TOOLCHAIN_TYPE" = xclang; then - C_O_FLAG_HIGHEST_JVM="-O3" - C_O_FLAG_HIGHEST="-O3" - C_O_FLAG_HI="-O3" + if test "x$OPENJDK_TARGET_OS" = xaix; then + C_O_FLAG_HIGHEST_JVM="-O3 -finline-functions" + C_O_FLAG_HIGHEST="-O3 -finline-functions" + C_O_FLAG_HI="-O3 -finline-functions" + else + C_O_FLAG_HIGHEST_JVM="-O3" + C_O_FLAG_HIGHEST="-O3" + C_O_FLAG_HI="-O3" + fi C_O_FLAG_NORM="-O2" C_O_FLAG_DEBUG_JVM="-O0" C_O_FLAG_SIZE="-Os" @@ -458,6 +471,9 @@ AC_DEFUN([FLAGS_SETUP_CFLAGS_HELPER], # so for debug we build with '-qpic=large -bbigtoc'. DEBUG_CFLAGS_JVM="-qpic=large" fi + if test "x$TOOLCHAIN_TYPE" = xclang && test "x$OPENJDK_TARGET_OS" = xaix; then + DEBUG_CFLAGS_JVM="-fpic -mcmodel=large" + fi fi if test "x$DEBUG_LEVEL" != xrelease; then @@ -493,6 +509,12 @@ AC_DEFUN([FLAGS_SETUP_CFLAGS_HELPER], -fvisibility=hidden -fno-strict-aliasing -fno-omit-frame-pointer" fi + if test "x$TOOLCHAIN_TYPE" = xclang && test "x$OPENJDK_TARGET_OS" = xaix; then + # clang compiler on aix needs -ffunction-sections + TOOLCHAIN_CFLAGS_JVM="$TOOLCHAIN_CFLAGS_JVM -ffunction-sections -ftls-model -fno-math-errno -fstack-protector" + TOOLCHAIN_CFLAGS_JDK="-ffunction-sections -fsigned-char -fstack-protector" + fi + if test "x$TOOLCHAIN_TYPE" = xgcc; then TOOLCHAIN_CFLAGS_JVM="$TOOLCHAIN_CFLAGS_JVM -fstack-protector" TOOLCHAIN_CFLAGS_JDK="-pipe -fstack-protector" @@ -601,6 +623,9 @@ AC_DEFUN([FLAGS_SETUP_CFLAGS_HELPER], if test "x$TOOLCHAIN_TYPE" = xgcc || test "x$TOOLCHAIN_TYPE" = xclang; then PICFLAG="-fPIC" PIEFLAG="-fPIE" + elif test "x$TOOLCHAIN_TYPE" = xclang && test "x$OPENJDK_TARGET_OS" = xaix; then + JVM_PICFLAG="-fpic -mcmodel=large -Wl,-bbigtoc + JDK_PICFLAG="-fpic elif test "x$TOOLCHAIN_TYPE" = xxlc; then # '-qpic' defaults to 'qpic=small'. This means that the compiler generates only # one instruction for accessing the TOC. If the TOC grows larger than 64K, the linker @@ -746,6 +771,9 @@ AC_DEFUN([FLAGS_SETUP_CFLAGS_CPU_DEP], $1_CFLAGS_CPU_JDK="${$1_CFLAGS_CPU_JDK} -fno-omit-frame-pointer" fi fi + if test "x$OPENJDK_TARGET_OS" = xaix; then + $1_CFLAGS_CPU="-mcpu=pwr8" + fi elif test "x$TOOLCHAIN_TYPE" = xxlc; then if test "x$FLAGS_CPU" = xppc64; then diff --git a/make/autoconf/flags-ldflags.m4 b/make/autoconf/flags-ldflags.m4 index c1e7218ca2c49..e5c345fdef16d 100644 --- a/make/autoconf/flags-ldflags.m4 +++ b/make/autoconf/flags-ldflags.m4 @@ -1,5 +1,5 @@ # -# Copyright (c) 2011, 2022, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2011, 2023, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -75,7 +75,11 @@ AC_DEFUN([FLAGS_SETUP_LDFLAGS_HELPER], elif test "x$TOOLCHAIN_TYPE" = xclang; then BASIC_LDFLAGS_JVM_ONLY="-mno-omit-leaf-frame-pointer -mstack-alignment=16 \ -fPIC" - + if test "x$OPENJDK_TARGET_OS" = xaix; then + BASIC_LDFLAGS="-Wl,-b64 -Wl,-brtl -Wl,-bnorwexec -Wl,-bnolibpath -Wl,-bnoexpall \ + -Wl,-bernotok -Wl,-bdatapsize:64k -Wl,-btextpsize:64k -Wl,-bstackpsize:64k" + BASIC_LDFLAGS_JVM_ONLY="$BASIC_LDFLAGS_JVM_ONLY -Wl,-lC_r -Wl,-bbigtoc" + fi elif test "x$TOOLCHAIN_TYPE" = xxlc; then BASIC_LDFLAGS="-b64 -brtl -bnorwexec -bnolibpath -bnoexpall -bernotok -btextpsize:64K \ -bdatapsize:64K -bstackpsize:64K" @@ -88,7 +92,8 @@ AC_DEFUN([FLAGS_SETUP_LDFLAGS_HELPER], BASIC_LDFLAGS_JVM_ONLY="-opt:icf,8 -subsystem:windows" fi - if test "x$TOOLCHAIN_TYPE" = xgcc || test "x$TOOLCHAIN_TYPE" = xclang; then + if (test "x$TOOLCHAIN_TYPE" = xgcc || test "x$TOOLCHAIN_TYPE" = xclang) \ + && test "x$OPENJDK_TARGET_OS" != xaix; then if test -n "$HAS_NOEXECSTACK"; then BASIC_LDFLAGS="$BASIC_LDFLAGS -Wl,-z,noexecstack" fi @@ -116,6 +121,14 @@ AC_DEFUN([FLAGS_SETUP_LDFLAGS_HELPER], if test "x$DEBUG_LEVEL" != xrelease; then DEBUGLEVEL_LDFLAGS_JVM_ONLY="$DEBUGLEVEL_LDFLAGS_JVM_ONLY -bbigtoc" fi + + elif test "x$TOOLCHAIN_TYPE" = xclang && test "x$OPENJDK_TARGET_OS" = xaix; then + # We need '-fpic' or '-fpic -mcmodel=large -Wl,-bbigtoc' if the TOC overflows. + # Hotspot now overflows its 64K TOC (currently only for debug), + # so we build with '-fpic -mcmodel=large -Wl,-bbigtoc'. + if test "x$DEBUG_LEVEL" != xrelease; then + DEBUGLEVEL_LDFLAGS_JVM_ONLY="$DEBUGLEVEL_LDFLAGS_JVM_ONLY -Wl,-bbigtoc" + fi fi # Setup LDFLAGS for linking executables diff --git a/make/autoconf/toolchain.m4 b/make/autoconf/toolchain.m4 index 1ae0902e3f311..c89aab11d4bcb 100644 --- a/make/autoconf/toolchain.m4 +++ b/make/autoconf/toolchain.m4 @@ -1,5 +1,5 @@ # -# Copyright (c) 2011, 2022, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2011, 2023, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -40,7 +40,7 @@ VALID_TOOLCHAINS_all="gcc clang xlc microsoft" # These toolchains are valid on different platforms VALID_TOOLCHAINS_linux="gcc clang" VALID_TOOLCHAINS_macosx="clang" -VALID_TOOLCHAINS_aix="xlc" +VALID_TOOLCHAINS_aix="xlc clang" VALID_TOOLCHAINS_windows="microsoft" # Toolchain descriptions @@ -234,6 +234,25 @@ AC_DEFUN_ONCE([TOOLCHAIN_DETERMINE_TOOLCHAIN_TYPE], # First toolchain type in the list is the default DEFAULT_TOOLCHAIN=${VALID_TOOLCHAINS%% *} + # On AIX the default toolchain depends on the installed (found) compiler + # xlclang++ -> xlc toolchain + # ibm-clang++_r -> clang toolchain + # The compiler is searched on the PATH and TOOLCHAIN_PATH + # xlclang++ has precedence over ibm-clang++_r if both are installed + if test "x$OPENJDK_TARGET_OS" = xaix; then + DEFAULT_TOOLCHAIN="clang" + if test "x$TOOLCHAIN_PATH" != x; then + if test -e ${TOOLCHAIN_PATH}/xlclang++; then + DEFAULT_TOOLCHAIN="xlc" + fi + else + UTIL_LOOKUP_PROGS(XLCLANG_TEST_PATH, xlclang++) + if test "x$XLCLANG_TEST_PATH" != x; then + DEFAULT_TOOLCHAIN="xlc" + fi + fi + fi + if test "x$with_toolchain_type" = xlist; then # List all toolchains AC_MSG_NOTICE([The following toolchains are valid on this platform:]) @@ -263,22 +282,40 @@ AC_DEFUN_ONCE([TOOLCHAIN_DETERMINE_TOOLCHAIN_TYPE], if test "x$TOOLCHAIN_PATH" != x; then XLC_TEST_PATH=${TOOLCHAIN_PATH}/ fi - - XLCLANG_VERSION_OUTPUT=`${XLC_TEST_PATH}xlclang++ -qversion 2>&1 | $HEAD -n 1` - $ECHO "$XLCLANG_VERSION_OUTPUT" | $GREP "IBM XL C/C++ for AIX" > /dev/null - if test $? -eq 0; then - AC_MSG_NOTICE([xlclang++ output: $XLCLANG_VERSION_OUTPUT]) + if test "x$TOOLCHAIN_TYPE" = xclang; then + TOOLCHAIN_DESCRIPTION_clang="IBM Open XL C/C++" + XLCLANG_VERSION_OUTPUT=`${XLC_TEST_PATH}ibm-clang++_r --version 2>&1 | $HEAD -n 1` + $ECHO "$XLCLANG_VERSION_OUTPUT" | $GREP "IBM Open XL C/C++ for AIX" > /dev/null + if test $? -eq 0; then + AC_MSG_NOTICE([ibm-clang++_r output: $XLCLANG_VERSION_OUTPUT]) + else + AC_MSG_ERROR([ibm-clang++_r version output check failed, output: $XLCLANG_VERSION_OUTPUT]) + fi else - AC_MSG_ERROR([xlclang++ version output check failed, output: $XLCLANG_VERSION_OUTPUT]) + XLCLANG_VERSION_OUTPUT=`${XLC_TEST_PATH}xlclang++ -qversion 2>&1 | $HEAD -n 1` + $ECHO "$XLCLANG_VERSION_OUTPUT" | $GREP "IBM XL C/C++ for AIX" > /dev/null + if test $? -eq 0; then + AC_MSG_NOTICE([xlclang++ output: $XLCLANG_VERSION_OUTPUT]) + else + AC_MSG_ERROR([xlclang++ version output check failed, output: $XLCLANG_VERSION_OUTPUT]) + fi fi fi - TOOLCHAIN_CC_BINARY_clang="clang" + if test "x$OPENJDK_TARGET_OS" = xaix; then + TOOLCHAIN_CC_BINARY_clang="ibm-clang_r" + else + TOOLCHAIN_CC_BINARY_clang="clang" + fi TOOLCHAIN_CC_BINARY_gcc="gcc" TOOLCHAIN_CC_BINARY_microsoft="cl" TOOLCHAIN_CC_BINARY_xlc="xlclang" - TOOLCHAIN_CXX_BINARY_clang="clang++" + if test "x$OPENJDK_TARGET_OS" = xaix; then + TOOLCHAIN_CXX_BINARY_clang="ibm-clang++_r" + else + TOOLCHAIN_CXX_BINARY_clang="clang++" + fi TOOLCHAIN_CXX_BINARY_gcc="g++" TOOLCHAIN_CXX_BINARY_microsoft="cl" TOOLCHAIN_CXX_BINARY_xlc="xlclang++" @@ -966,7 +1003,11 @@ AC_DEFUN_ONCE([TOOLCHAIN_MISC_CHECKS], # Setup hotspot lecagy names for toolchains HOTSPOT_TOOLCHAIN_TYPE=$TOOLCHAIN_TYPE if test "x$TOOLCHAIN_TYPE" = xclang; then - HOTSPOT_TOOLCHAIN_TYPE=gcc + if test "x$OPENJDK_TARGET_OS" = xaix; then + HOTSPOT_TOOLCHAIN_TYPE=xlc + else + HOTSPOT_TOOLCHAIN_TYPE=gcc + fi elif test "x$TOOLCHAIN_TYPE" = xmicrosoft; then HOTSPOT_TOOLCHAIN_TYPE=visCPP fi diff --git a/make/hotspot/gensrc/GensrcAdlc.gmk b/make/hotspot/gensrc/GensrcAdlc.gmk index 5c8989e3efd3d..427a0dfd34b79 100644 --- a/make/hotspot/gensrc/GensrcAdlc.gmk +++ b/make/hotspot/gensrc/GensrcAdlc.gmk @@ -1,5 +1,5 @@ # -# Copyright (c) 2013, 2022, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2013, 2023, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -37,8 +37,13 @@ ifeq ($(call check-jvm-feature, compiler2), true) ifeq ($(call isBuildOs, linux), true) ADLC_CFLAGS := -fno-exceptions -DLINUX else ifeq ($(call isBuildOs, aix), true) - ADLC_LDFLAGS += -q64 - ADLC_CFLAGS := -qnortti -qeh -q64 -DAIX + ifeq ($(TOOLCHAIN_TYPE), clang) + ADLC_LDFLAGS += -m64 + ADLC_CFLAGS := -fno-rtti -fexceptions -ffunction-sections -m64 -DAIX -mcpu=pwr8 + else + ADLC_LDFLAGS += -q64 + ADLC_CFLAGS := -qnortti -qeh -q64 -DAIX + endif else ifeq ($(call isBuildOs, windows), true) ADLC_LDFLAGS += -nologo ADLC_CFLAGS := -nologo -EHsc @@ -167,6 +172,8 @@ ifeq ($(call check-jvm-feature, compiler2), true) ifeq ($(call check-jvm-feature, zgc), true) AD_SRC_FILES += $(call uniq, $(wildcard $(foreach d, $(AD_SRC_ROOTS), \ + $d/cpu/$(HOTSPOT_TARGET_CPU_ARCH)/gc/x/x_$(HOTSPOT_TARGET_CPU).ad \ + $d/cpu/$(HOTSPOT_TARGET_CPU_ARCH)/gc/x/x_$(HOTSPOT_TARGET_CPU_ARCH).ad \ $d/cpu/$(HOTSPOT_TARGET_CPU_ARCH)/gc/z/z_$(HOTSPOT_TARGET_CPU).ad \ $d/cpu/$(HOTSPOT_TARGET_CPU_ARCH)/gc/z/z_$(HOTSPOT_TARGET_CPU_ARCH).ad \ ))) diff --git a/make/hotspot/lib/JvmFeatures.gmk b/make/hotspot/lib/JvmFeatures.gmk index 1e24475ea464a..cbe60fde20522 100644 --- a/make/hotspot/lib/JvmFeatures.gmk +++ b/make/hotspot/lib/JvmFeatures.gmk @@ -149,6 +149,7 @@ endif ifneq ($(call check-jvm-feature, zgc), true) JVM_CFLAGS_FEATURES += -DINCLUDE_ZGC=0 JVM_EXCLUDE_PATTERNS += gc/z + JVM_EXCLUDE_PATTERNS += gc/x endif ifneq ($(call check-jvm-feature, shenandoahgc), true) diff --git a/make/hotspot/lib/JvmOverrideFiles.gmk b/make/hotspot/lib/JvmOverrideFiles.gmk index 377018cdb518b..b50d6f8bb36d5 100644 --- a/make/hotspot/lib/JvmOverrideFiles.gmk +++ b/make/hotspot/lib/JvmOverrideFiles.gmk @@ -1,5 +1,5 @@ # -# Copyright (c) 2013, 2021, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2013, 2023, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -109,7 +109,11 @@ else ifeq ($(call isTargetOs, macosx), true) endif else ifeq ($(call isTargetOs, aix), true) - BUILD_LIBJVM_synchronizer.cpp_CXXFLAGS := -qnoinline + ifeq ($(TOOLCHAIN_TYPE), clang) + BUILD_LIBJVM_synchronizer.cpp_CXXFLAGS := -fno-inline + else + BUILD_LIBJVM_synchronizer.cpp_CXXFLAGS := -qnoinline + endif BUILD_LIBJVM_sharedRuntimeTrans.cpp_CXXFLAGS := $(CXX_O_FLAG_NONE) # Disable aggressive optimizations for functions in sharedRuntimeTrig.cpp # and sharedRuntimeTrans.cpp on ppc64. diff --git a/make/test/BuildTestLib.gmk b/make/test/BuildTestLib.gmk index b9f03209df404..f1574988b6fee 100644 --- a/make/test/BuildTestLib.gmk +++ b/make/test/BuildTestLib.gmk @@ -36,7 +36,7 @@ TEST_LIB_SUPPORT := $(SUPPORT_OUTPUTDIR)/test/lib $(eval $(call SetupJavaCompilation, BUILD_WB_JAR, \ TARGET_RELEASE := $(TARGET_RELEASE_NEWJDK_UPGRADED), \ - SRC := $(TEST_LIB_SOURCE_DIR)/jdk/test/whitebox/parser, \ + SRC := $(TEST_LIB_SOURCE_DIR)/jdk/test/whitebox/, \ BIN := $(TEST_LIB_SUPPORT)/wb_classes, \ JAR := $(TEST_LIB_SUPPORT)/wb.jar, \ DISABLED_WARNINGS := deprecation removal preview, \ @@ -53,7 +53,13 @@ $(eval $(call SetupJavaCompilation, BUILD_TEST_LIB_JAR, \ HEADERS := $(TEST_LIB_SUPPORT)/test-lib_headers, \ JAR := $(TEST_LIB_SUPPORT)/test-lib.jar, \ DISABLED_WARNINGS := try deprecation rawtypes unchecked serial cast removal preview, \ - JAVAC_FLAGS := --enable-preview, \ + JAVAC_FLAGS := --add-exports java.base/sun.security.util=ALL-UNNAMED \ + --add-exports java.base/jdk.internal.classfile=ALL-UNNAMED \ + --add-exports java.base/jdk.internal.classfile.attribute=ALL-UNNAMED \ + --add-exports java.base/jdk.internal.classfile.constantpool=ALL-UNNAMED \ + --add-exports java.base/jdk.internal.classfile.java.lang.constant=ALL-UNNAMED \ + --add-exports java.base/jdk.internal.module=ALL-UNNAMED \ + --enable-preview, \ )) TARGETS += $(BUILD_TEST_LIB_JAR) diff --git a/src/hotspot/cpu/aarch64/aarch64.ad b/src/hotspot/cpu/aarch64/aarch64.ad index a7c6ddd792c4c..b31e5d0df6b6f 100644 --- a/src/hotspot/cpu/aarch64/aarch64.ad +++ b/src/hotspot/cpu/aarch64/aarch64.ad @@ -17105,14 +17105,17 @@ instruct string_compareUU_sve(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI %} instruct string_indexofUU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2, - iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, - iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, rFlagsReg cr) + iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, + iRegINoSp tmp3, iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, + vRegD_V0 vtmp0, vRegD_V1 vtmp1, rFlagsReg cr) %{ predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU); match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2))); effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, - TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, KILL cr); - format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UU)" %} + TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, + TEMP vtmp0, TEMP vtmp1, KILL cr); + format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UU) " + "# KILL $str1 $cnt1 $str2 $cnt2 $tmp1 $tmp2 $tmp3 $tmp4 $tmp5 $tmp6 V0-V1 cr" %} ins_encode %{ __ string_indexof($str1$$Register, $str2$$Register, @@ -17126,14 +17129,17 @@ instruct string_indexofUU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 %} instruct string_indexofLL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2, - iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, - iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, rFlagsReg cr) + iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, + iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, + vRegD_V0 vtmp0, vRegD_V1 vtmp1, rFlagsReg cr) %{ predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL); match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2))); effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, - TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, KILL cr); - format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (LL)" %} + TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, + TEMP vtmp0, TEMP vtmp1, KILL cr); + format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (LL) " + "# KILL $str1 $cnt1 $str2 $cnt2 $tmp1 $tmp2 $tmp3 $tmp4 $tmp5 $tmp6 V0-V1 cr" %} ins_encode %{ __ string_indexof($str1$$Register, $str2$$Register, @@ -17147,14 +17153,17 @@ instruct string_indexofLL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 %} instruct string_indexofUL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2, - iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, - iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, rFlagsReg cr) + iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,iRegINoSp tmp3, + iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, + vRegD_V0 vtmp0, vRegD_V1 vtmp1, rFlagsReg cr) %{ predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL); match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2))); effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, - TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, KILL cr); - format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UL)" %} + TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, + TEMP tmp6, TEMP vtmp0, TEMP vtmp1, KILL cr); + format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UL) " + "# KILL $str1 cnt1 $str2 $cnt2 $tmp1 $tmp2 $tmp3 $tmp4 $tmp5 $tmp6 V0-V1 cr" %} ins_encode %{ __ string_indexof($str1$$Register, $str2$$Register, @@ -17168,14 +17177,15 @@ instruct string_indexofUL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 %} instruct string_indexof_conUU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, - immI_le_4 int_cnt2, iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, - iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr) + immI_le_4 int_cnt2, iRegI_R0 result, iRegINoSp tmp1, + iRegINoSp tmp2, iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr) %{ predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU); match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2))); effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr); - format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UU)" %} + format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UU) " + "# KILL $str1 $cnt1 $str2 $tmp1 $tmp2 $tmp3 $tmp4 cr" %} ins_encode %{ int icnt2 = (int)$int_cnt2$$constant; @@ -17189,14 +17199,15 @@ instruct string_indexof_conUU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, %} instruct string_indexof_conLL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, - immI_le_4 int_cnt2, iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, - iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr) + immI_le_4 int_cnt2, iRegI_R0 result, iRegINoSp tmp1, + iRegINoSp tmp2, iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr) %{ predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL); match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2))); effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr); - format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (LL)" %} + format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (LL) " + "# KILL $str1 $cnt1 $str2 $tmp1 $tmp2 $tmp3 $tmp4 cr" %} ins_encode %{ int icnt2 = (int)$int_cnt2$$constant; @@ -17210,14 +17221,15 @@ instruct string_indexof_conLL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, %} instruct string_indexof_conUL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, - immI_1 int_cnt2, iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, - iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr) + immI_1 int_cnt2, iRegI_R0 result, iRegINoSp tmp1, + iRegINoSp tmp2, iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr) %{ predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL); match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2))); effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr); - format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UL)" %} + format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UL) " + "# KILL $str1 $cnt1 $str2 $tmp1 $tmp2 $tmp3 $tmp4 cr" %} ins_encode %{ int icnt2 = (int)$int_cnt2$$constant; @@ -17334,13 +17346,17 @@ instruct string_equalsU(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt, instruct array_equalsB(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result, iRegP_R3 tmp1, iRegP_R4 tmp2, iRegP_R5 tmp3, + vRegD_V0 vtmp0, vRegD_V1 vtmp1, vRegD_V2 vtmp2, vRegD_V3 vtmp3, + vRegD_V4 vtmp4, vRegD_V5 vtmp5, vRegD_V6 vtmp6, vRegD_V7 vtmp7, iRegP_R10 tmp, rFlagsReg cr) %{ predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::LL); match(Set result (AryEq ary1 ary2)); - effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr); + effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, TEMP tmp3, + TEMP vtmp0, TEMP vtmp1, TEMP vtmp2, TEMP vtmp3, TEMP vtmp4, TEMP vtmp5, + TEMP vtmp6, TEMP vtmp7, KILL cr); - format %{ "Array Equals $ary1,ary2 -> $result // KILL $tmp" %} + format %{ "Array Equals $ary1,ary2 -> $result # KILL $ary1 $ary2 $tmp $tmp1 $tmp2 $tmp3 V0-V7 cr" %} ins_encode %{ address tpc = __ arrays_equals($ary1$$Register, $ary2$$Register, $tmp1$$Register, $tmp2$$Register, $tmp3$$Register, @@ -17355,13 +17371,17 @@ instruct array_equalsB(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result, instruct array_equalsC(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result, iRegP_R3 tmp1, iRegP_R4 tmp2, iRegP_R5 tmp3, + vRegD_V0 vtmp0, vRegD_V1 vtmp1, vRegD_V2 vtmp2, vRegD_V3 vtmp3, + vRegD_V4 vtmp4, vRegD_V5 vtmp5, vRegD_V6 vtmp6, vRegD_V7 vtmp7, iRegP_R10 tmp, rFlagsReg cr) %{ predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::UU); match(Set result (AryEq ary1 ary2)); - effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr); + effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, TEMP tmp3, + TEMP vtmp0, TEMP vtmp1, TEMP vtmp2, TEMP vtmp3, TEMP vtmp4, TEMP vtmp5, + TEMP vtmp6, TEMP vtmp7, KILL cr); - format %{ "Array Equals $ary1,ary2 -> $result // KILL $tmp" %} + format %{ "Array Equals $ary1,ary2 -> $result # KILL $ary1 $ary2 $tmp $tmp1 $tmp2 $tmp3 V0-V7 cr" %} ins_encode %{ address tpc = __ arrays_equals($ary1$$Register, $ary2$$Register, $tmp1$$Register, $tmp2$$Register, $tmp3$$Register, @@ -17391,36 +17411,39 @@ instruct count_positives(iRegP_R1 ary1, iRegI_R2 len, iRegI_R0 result, rFlagsReg // fast char[] to byte[] compression instruct string_compress(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len, - vRegD_V0 tmp1, vRegD_V1 tmp2, - vRegD_V2 tmp3, vRegD_V3 tmp4, + vRegD_V0 vtmp0, vRegD_V1 vtmp1, vRegD_V2 vtmp2, + vRegD_V3 vtmp3, vRegD_V4 vtmp4, vRegD_V5 vtmp5, iRegI_R0 result, rFlagsReg cr) %{ match(Set result (StrCompressedCopy src (Binary dst len))); - effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, + effect(TEMP vtmp0, TEMP vtmp1, TEMP vtmp2, TEMP vtmp3, TEMP vtmp4, TEMP vtmp5, USE_KILL src, USE_KILL dst, USE len, KILL cr); - format %{ "String Compress $src,$dst,$len -> $result // KILL $src,$dst" %} + format %{ "String Compress $src,$dst,$len -> $result # KILL $src $dst V0-V5 cr" %} ins_encode %{ __ char_array_compress($src$$Register, $dst$$Register, $len$$Register, - $result$$Register, - $tmp1$$FloatRegister, $tmp2$$FloatRegister, - $tmp3$$FloatRegister, $tmp4$$FloatRegister); + $result$$Register, $vtmp0$$FloatRegister, $vtmp1$$FloatRegister, + $vtmp2$$FloatRegister, $vtmp3$$FloatRegister, + $vtmp4$$FloatRegister, $vtmp5$$FloatRegister); %} ins_pipe(pipe_slow); %} // fast byte[] to char[] inflation -instruct string_inflate(Universe dummy, iRegP_R0 src, iRegP_R1 dst, iRegI_R2 len, - vRegD_V0 tmp1, vRegD_V1 tmp2, vRegD_V2 tmp3, iRegP_R3 tmp4, rFlagsReg cr) +instruct string_inflate(Universe dummy, iRegP_R0 src, iRegP_R1 dst, iRegI_R2 len, iRegP_R3 tmp, + vRegD_V0 vtmp0, vRegD_V1 vtmp1, vRegD_V2 vtmp2, vRegD_V3 vtmp3, + vRegD_V4 vtmp4, vRegD_V5 vtmp5, vRegD_V6 vtmp6, rFlagsReg cr) %{ match(Set dummy (StrInflatedCopy src (Binary dst len))); - effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, USE_KILL src, USE_KILL dst, USE_KILL len, KILL cr); + effect(TEMP vtmp0, TEMP vtmp1, TEMP vtmp2, TEMP vtmp3, + TEMP vtmp4, TEMP vtmp5, TEMP vtmp6, TEMP tmp, + USE_KILL src, USE_KILL dst, USE_KILL len, KILL cr); - format %{ "String Inflate $src,$dst // KILL $tmp1, $tmp2" %} + format %{ "String Inflate $src,$dst # KILL $tmp $src $dst $len V0-V6 cr" %} ins_encode %{ address tpc = __ byte_array_inflate($src$$Register, $dst$$Register, $len$$Register, - $tmp1$$FloatRegister, $tmp2$$FloatRegister, - $tmp3$$FloatRegister, $tmp4$$Register); + $vtmp0$$FloatRegister, $vtmp1$$FloatRegister, + $vtmp2$$FloatRegister, $tmp$$Register); if (tpc == NULL) { ciEnv::current()->record_failure("CodeCache is full"); return; @@ -17431,41 +17454,43 @@ instruct string_inflate(Universe dummy, iRegP_R0 src, iRegP_R1 dst, iRegI_R2 len // encode char[] to byte[] in ISO_8859_1 instruct encode_iso_array(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len, - vRegD_V0 vtmp0, vRegD_V1 vtmp1, - vRegD_V2 vtmp2, vRegD_V3 vtmp3, + vRegD_V0 vtmp0, vRegD_V1 vtmp1, vRegD_V2 vtmp2, + vRegD_V3 vtmp3, vRegD_V4 vtmp4, vRegD_V5 vtmp5, iRegI_R0 result, rFlagsReg cr) %{ predicate(!((EncodeISOArrayNode*)n)->is_ascii()); match(Set result (EncodeISOArray src (Binary dst len))); - effect(USE_KILL src, USE_KILL dst, USE len, - KILL vtmp0, KILL vtmp1, KILL vtmp2, KILL vtmp3, KILL cr); + effect(USE_KILL src, USE_KILL dst, USE len, KILL vtmp0, KILL vtmp1, + KILL vtmp2, KILL vtmp3, KILL vtmp4, KILL vtmp5, KILL cr); - format %{ "Encode ISO array $src,$dst,$len -> $result" %} + format %{ "Encode ISO array $src,$dst,$len -> $result # KILL $src $dst V0-V5 cr" %} ins_encode %{ __ encode_iso_array($src$$Register, $dst$$Register, $len$$Register, $result$$Register, false, $vtmp0$$FloatRegister, $vtmp1$$FloatRegister, - $vtmp2$$FloatRegister, $vtmp3$$FloatRegister); + $vtmp2$$FloatRegister, $vtmp3$$FloatRegister, + $vtmp4$$FloatRegister, $vtmp5$$FloatRegister); %} ins_pipe(pipe_class_memory); %} instruct encode_ascii_array(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len, - vRegD_V0 vtmp0, vRegD_V1 vtmp1, - vRegD_V2 vtmp2, vRegD_V3 vtmp3, + vRegD_V0 vtmp0, vRegD_V1 vtmp1, vRegD_V2 vtmp2, + vRegD_V3 vtmp3, vRegD_V4 vtmp4, vRegD_V5 vtmp5, iRegI_R0 result, rFlagsReg cr) %{ predicate(((EncodeISOArrayNode*)n)->is_ascii()); match(Set result (EncodeISOArray src (Binary dst len))); - effect(USE_KILL src, USE_KILL dst, USE len, - KILL vtmp0, KILL vtmp1, KILL vtmp2, KILL vtmp3, KILL cr); + effect(USE_KILL src, USE_KILL dst, USE len, KILL vtmp0, KILL vtmp1, + KILL vtmp2, KILL vtmp3, KILL vtmp4, KILL vtmp5, KILL cr); - format %{ "Encode ASCII array $src,$dst,$len -> $result" %} + format %{ "Encode ASCII array $src,$dst,$len -> $result # KILL $src $dst V0-V5 cr" %} ins_encode %{ __ encode_iso_array($src$$Register, $dst$$Register, $len$$Register, $result$$Register, true, $vtmp0$$FloatRegister, $vtmp1$$FloatRegister, - $vtmp2$$FloatRegister, $vtmp3$$FloatRegister); + $vtmp2$$FloatRegister, $vtmp3$$FloatRegister, + $vtmp4$$FloatRegister, $vtmp5$$FloatRegister); %} ins_pipe(pipe_class_memory); %} diff --git a/src/hotspot/cpu/aarch64/assembler_aarch64.hpp b/src/hotspot/cpu/aarch64/assembler_aarch64.hpp index 46ff431bd8837..a53d83296459d 100644 --- a/src/hotspot/cpu/aarch64/assembler_aarch64.hpp +++ b/src/hotspot/cpu/aarch64/assembler_aarch64.hpp @@ -228,7 +228,7 @@ class Instruction_aarch64 { static void spatch(address a, int msb, int lsb, int64_t val) { int nbits = msb - lsb + 1; int64_t chk = val >> (nbits - 1); - guarantee (chk == -1 || chk == 0, "Field too big for insn"); + guarantee (chk == -1 || chk == 0, "Field too big for insn at " INTPTR_FORMAT, p2i(a)); unsigned uval = val; unsigned mask = checked_cast(right_n_bits(nbits)); uval &= mask; diff --git a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp index dc19c72fd11e6..4bf7fee936bac 100644 --- a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp @@ -1010,7 +1010,7 @@ void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch __ decode_heap_oop(dest->as_register()); } - if (!UseZGC) { + if (!(UseZGC && !ZGenerational)) { // Load barrier has not yet been applied, so ZGC can't verify the oop here __ verify_oop(dest->as_register()); } diff --git a/src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.cpp index e96621ae2d378..dbe64f8f9ca74 100644 --- a/src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.cpp @@ -46,6 +46,7 @@ typedef void (MacroAssembler::* chr_insn)(Register Rt, const Address &adr); // Search for str1 in str2 and return index or -1 +// Clobbers: rscratch1, rscratch2, rflags. May also clobber v0-v1, when icnt1==-1. void C2_MacroAssembler::string_indexof(Register str2, Register str1, Register cnt2, Register cnt1, Register tmp1, Register tmp2, diff --git a/src/hotspot/cpu/aarch64/gc/shared/barrierSetNMethod_aarch64.cpp b/src/hotspot/cpu/aarch64/gc/shared/barrierSetNMethod_aarch64.cpp index d79705579b639..5169a510154ab 100644 --- a/src/hotspot/cpu/aarch64/gc/shared/barrierSetNMethod_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/gc/shared/barrierSetNMethod_aarch64.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -32,17 +32,21 @@ #include "memory/resourceArea.hpp" #include "runtime/frame.inline.hpp" #include "runtime/javaThread.hpp" -#include "runtime/sharedRuntime.hpp" #include "runtime/registerMap.hpp" +#include "runtime/sharedRuntime.hpp" #include "utilities/align.hpp" #include "utilities/debug.hpp" +#include "utilities/formatBuffer.hpp" +#if INCLUDE_JVMCI +#include "jvmci/jvmciRuntime.hpp" +#endif static int slow_path_size(nmethod* nm) { // The slow path code is out of line with C2 return nm->is_compiled_by_c2() ? 0 : 6; } -// This is the offset of the entry barrier from where the frame is completed. +// This is the offset of the entry barrier relative to where the frame is completed. // If any code changes between the end of the verified entry where the entry // barrier resides, and the completion of the frame, then // NativeNMethodCmpBarrier::verify() will immediately complain when it does @@ -62,58 +66,80 @@ static int entry_barrier_offset(nmethod* nm) { return 0; } -class NativeNMethodBarrier: public NativeInstruction { - address instruction_address() const { return addr_at(0); } +class NativeNMethodBarrier { + address _instruction_address; + int* _guard_addr; + nmethod* _nm; + + address instruction_address() const { return _instruction_address; } + + int *guard_addr() { + return _guard_addr; + } int local_guard_offset(nmethod* nm) { // It's the last instruction return (-entry_barrier_offset(nm)) - 4; } - int *guard_addr(nmethod* nm) { - if (nm->is_compiled_by_c2()) { - // With c2 compiled code, the guard is out-of-line in a stub - // We find it using the RelocIterator. - RelocIterator iter(nm); - while (iter.next()) { - if (iter.type() == relocInfo::entry_guard_type) { - entry_guard_Relocation* const reloc = iter.entry_guard_reloc(); - return reinterpret_cast(reloc->addr()); +public: + NativeNMethodBarrier(nmethod* nm): _nm(nm) { +#if INCLUDE_JVMCI + if (nm->is_compiled_by_jvmci()) { + address pc = nm->code_begin() + nm->jvmci_nmethod_data()->nmethod_entry_patch_offset(); + RelocIterator iter(nm, pc, pc + 4); + guarantee(iter.next(), "missing relocs"); + guarantee(iter.type() == relocInfo::section_word_type, "unexpected reloc"); + + _guard_addr = (int*) iter.section_word_reloc()->target(); + _instruction_address = pc; + } else +#endif + { + _instruction_address = nm->code_begin() + nm->frame_complete_offset() + entry_barrier_offset(nm); + if (nm->is_compiled_by_c2()) { + // With c2 compiled code, the guard is out-of-line in a stub + // We find it using the RelocIterator. + RelocIterator iter(nm); + while (iter.next()) { + if (iter.type() == relocInfo::entry_guard_type) { + entry_guard_Relocation* const reloc = iter.entry_guard_reloc(); + _guard_addr = reinterpret_cast(reloc->addr()); + return; + } + } + ShouldNotReachHere(); } + _guard_addr = reinterpret_cast(instruction_address() + local_guard_offset(nm)); } - ShouldNotReachHere(); - } - return reinterpret_cast(instruction_address() + local_guard_offset(nm)); } -public: - int get_value(nmethod* nm) { - return Atomic::load_acquire(guard_addr(nm)); + int get_value() { + return Atomic::load_acquire(guard_addr()); } - void set_value(nmethod* nm, int value) { - Atomic::release_store(guard_addr(nm), value); + void set_value(int value) { + Atomic::release_store(guard_addr(), value); } - void verify() const; -}; - -// Store the instruction bitmask, bits and name for checking the barrier. -struct CheckInsn { - uint32_t mask; - uint32_t bits; - const char *name; + bool check_barrier(err_msg& msg) const; + void verify() const { + err_msg msg("%s", ""); + assert(check_barrier(msg), "%s", msg.buffer()); + } }; // The first instruction of the nmethod entry barrier is an ldr (literal) // instruction. Verify that it's really there, so the offsets are not skewed. -void NativeNMethodBarrier::verify() const { +bool NativeNMethodBarrier::check_barrier(err_msg& msg) const { uint32_t* addr = (uint32_t*) instruction_address(); uint32_t inst = *addr; if ((inst & 0xff000000) != 0x18000000) { - tty->print_cr("Addr: " INTPTR_FORMAT " Code: 0x%x", (intptr_t)addr, inst); - fatal("not an ldr (literal) instruction."); + msg.print("Nmethod entry barrier did not start with ldr (literal) as expected. " + "Addr: " PTR_FORMAT " Code: " UINT32_FORMAT, p2i(addr), inst); + return false; } + return true; } @@ -156,13 +182,6 @@ void BarrierSetNMethod::deoptimize(nmethod* nm, address* return_address_ptr) { new_frame->pc = SharedRuntime::get_handle_wrong_method_stub(); } -static NativeNMethodBarrier* native_nmethod_barrier(nmethod* nm) { - address barrier_address = nm->code_begin() + nm->frame_complete_offset() + entry_barrier_offset(nm); - NativeNMethodBarrier* barrier = reinterpret_cast(barrier_address); - debug_only(barrier->verify()); - return barrier; -} - void BarrierSetNMethod::set_guard_value(nmethod* nm, int value) { if (!supports_entry_barrier(nm)) { return; @@ -179,8 +198,8 @@ void BarrierSetNMethod::set_guard_value(nmethod* nm, int value) { bs_asm->increment_patching_epoch(); } - NativeNMethodBarrier* barrier = native_nmethod_barrier(nm); - barrier->set_value(nm, value); + NativeNMethodBarrier barrier(nm); + barrier.set_value(value); } int BarrierSetNMethod::guard_value(nmethod* nm) { @@ -188,6 +207,13 @@ int BarrierSetNMethod::guard_value(nmethod* nm) { return disarmed_guard_value(); } - NativeNMethodBarrier* barrier = native_nmethod_barrier(nm); - return barrier->get_value(nm); + NativeNMethodBarrier barrier(nm); + return barrier.get_value(); +} + +#if INCLUDE_JVMCI +bool BarrierSetNMethod::verify_barrier(nmethod* nm, err_msg& msg) { + NativeNMethodBarrier barrier(nm); + return barrier.check_barrier(msg); } +#endif diff --git a/src/hotspot/cpu/aarch64/gc/x/xBarrierSetAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/gc/x/xBarrierSetAssembler_aarch64.cpp new file mode 100644 index 0000000000000..5c891e8c170fb --- /dev/null +++ b/src/hotspot/cpu/aarch64/gc/x/xBarrierSetAssembler_aarch64.cpp @@ -0,0 +1,462 @@ +/* + * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "asm/macroAssembler.inline.hpp" +#include "code/codeBlob.hpp" +#include "code/vmreg.inline.hpp" +#include "gc/x/xBarrier.inline.hpp" +#include "gc/x/xBarrierSet.hpp" +#include "gc/x/xBarrierSetAssembler.hpp" +#include "gc/x/xBarrierSetRuntime.hpp" +#include "gc/x/xThreadLocalData.hpp" +#include "memory/resourceArea.hpp" +#include "runtime/sharedRuntime.hpp" +#include "utilities/macros.hpp" +#ifdef COMPILER1 +#include "c1/c1_LIRAssembler.hpp" +#include "c1/c1_MacroAssembler.hpp" +#include "gc/x/c1/xBarrierSetC1.hpp" +#endif // COMPILER1 +#ifdef COMPILER2 +#include "gc/x/c2/xBarrierSetC2.hpp" +#endif // COMPILER2 + +#ifdef PRODUCT +#define BLOCK_COMMENT(str) /* nothing */ +#else +#define BLOCK_COMMENT(str) __ block_comment(str) +#endif + +#undef __ +#define __ masm-> + +void XBarrierSetAssembler::load_at(MacroAssembler* masm, + DecoratorSet decorators, + BasicType type, + Register dst, + Address src, + Register tmp1, + Register tmp2) { + if (!XBarrierSet::barrier_needed(decorators, type)) { + // Barrier not needed + BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp2); + return; + } + + assert_different_registers(rscratch1, rscratch2, src.base()); + assert_different_registers(rscratch1, rscratch2, dst); + + Label done; + + // Load bad mask into scratch register. + __ ldr(rscratch1, address_bad_mask_from_thread(rthread)); + __ lea(rscratch2, src); + __ ldr(dst, src); + + // Test reference against bad mask. If mask bad, then we need to fix it up. + __ tst(dst, rscratch1); + __ br(Assembler::EQ, done); + + __ enter(/*strip_ret_addr*/true); + + __ push_call_clobbered_registers_except(RegSet::of(dst)); + + if (c_rarg0 != dst) { + __ mov(c_rarg0, dst); + } + __ mov(c_rarg1, rscratch2); + + __ call_VM_leaf(XBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators), 2); + + // Make sure dst has the return value. + if (dst != r0) { + __ mov(dst, r0); + } + + __ pop_call_clobbered_registers_except(RegSet::of(dst)); + __ leave(); + + __ bind(done); +} + +#ifdef ASSERT + +void XBarrierSetAssembler::store_at(MacroAssembler* masm, + DecoratorSet decorators, + BasicType type, + Address dst, + Register val, + Register tmp1, + Register tmp2, + Register tmp3) { + // Verify value + if (is_reference_type(type)) { + // Note that src could be noreg, which means we + // are storing null and can skip verification. + if (val != noreg) { + Label done; + + // tmp1, tmp2 and tmp3 are often set to noreg. + RegSet savedRegs = RegSet::of(rscratch1); + __ push(savedRegs, sp); + + __ ldr(rscratch1, address_bad_mask_from_thread(rthread)); + __ tst(val, rscratch1); + __ br(Assembler::EQ, done); + __ stop("Verify oop store failed"); + __ should_not_reach_here(); + __ bind(done); + __ pop(savedRegs, sp); + } + } + + // Store value + BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2, noreg); +} + +#endif // ASSERT + +void XBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, + DecoratorSet decorators, + bool is_oop, + Register src, + Register dst, + Register count, + RegSet saved_regs) { + if (!is_oop) { + // Barrier not needed + return; + } + + BLOCK_COMMENT("XBarrierSetAssembler::arraycopy_prologue {"); + + assert_different_registers(src, count, rscratch1); + + __ push(saved_regs, sp); + + if (count == c_rarg0) { + if (src == c_rarg1) { + // exactly backwards!! + __ mov(rscratch1, c_rarg0); + __ mov(c_rarg0, c_rarg1); + __ mov(c_rarg1, rscratch1); + } else { + __ mov(c_rarg1, count); + __ mov(c_rarg0, src); + } + } else { + __ mov(c_rarg0, src); + __ mov(c_rarg1, count); + } + + __ call_VM_leaf(XBarrierSetRuntime::load_barrier_on_oop_array_addr(), 2); + + __ pop(saved_regs, sp); + + BLOCK_COMMENT("} XBarrierSetAssembler::arraycopy_prologue"); +} + +void XBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, + Register jni_env, + Register robj, + Register tmp, + Label& slowpath) { + BLOCK_COMMENT("XBarrierSetAssembler::try_resolve_jobject_in_native {"); + + assert_different_registers(jni_env, robj, tmp); + + // Resolve jobject + BarrierSetAssembler::try_resolve_jobject_in_native(masm, jni_env, robj, tmp, slowpath); + + // The Address offset is too large to direct load - -784. Our range is +127, -128. + __ mov(tmp, (int64_t)(in_bytes(XThreadLocalData::address_bad_mask_offset()) - + in_bytes(JavaThread::jni_environment_offset()))); + + // Load address bad mask + __ add(tmp, jni_env, tmp); + __ ldr(tmp, Address(tmp)); + + // Check address bad mask + __ tst(robj, tmp); + __ br(Assembler::NE, slowpath); + + BLOCK_COMMENT("} XBarrierSetAssembler::try_resolve_jobject_in_native"); +} + +#ifdef COMPILER1 + +#undef __ +#define __ ce->masm()-> + +void XBarrierSetAssembler::generate_c1_load_barrier_test(LIR_Assembler* ce, + LIR_Opr ref) const { + assert_different_registers(rscratch1, rthread, ref->as_register()); + + __ ldr(rscratch1, address_bad_mask_from_thread(rthread)); + __ tst(ref->as_register(), rscratch1); +} + +void XBarrierSetAssembler::generate_c1_load_barrier_stub(LIR_Assembler* ce, + XLoadBarrierStubC1* stub) const { + // Stub entry + __ bind(*stub->entry()); + + Register ref = stub->ref()->as_register(); + Register ref_addr = noreg; + Register tmp = noreg; + + if (stub->tmp()->is_valid()) { + // Load address into tmp register + ce->leal(stub->ref_addr(), stub->tmp()); + ref_addr = tmp = stub->tmp()->as_pointer_register(); + } else { + // Address already in register + ref_addr = stub->ref_addr()->as_address_ptr()->base()->as_pointer_register(); + } + + assert_different_registers(ref, ref_addr, noreg); + + // Save r0 unless it is the result or tmp register + // Set up SP to accommodate parameters and maybe r0.. + if (ref != r0 && tmp != r0) { + __ sub(sp, sp, 32); + __ str(r0, Address(sp, 16)); + } else { + __ sub(sp, sp, 16); + } + + // Setup arguments and call runtime stub + ce->store_parameter(ref_addr, 1); + ce->store_parameter(ref, 0); + + __ far_call(stub->runtime_stub()); + + // Verify result + __ verify_oop(r0); + + // Move result into place + if (ref != r0) { + __ mov(ref, r0); + } + + // Restore r0 unless it is the result or tmp register + if (ref != r0 && tmp != r0) { + __ ldr(r0, Address(sp, 16)); + __ add(sp, sp, 32); + } else { + __ add(sp, sp, 16); + } + + // Stub exit + __ b(*stub->continuation()); +} + +#undef __ +#define __ sasm-> + +void XBarrierSetAssembler::generate_c1_load_barrier_runtime_stub(StubAssembler* sasm, + DecoratorSet decorators) const { + __ prologue("zgc_load_barrier stub", false); + + __ push_call_clobbered_registers_except(RegSet::of(r0)); + + // Setup arguments + __ load_parameter(0, c_rarg0); + __ load_parameter(1, c_rarg1); + + __ call_VM_leaf(XBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators), 2); + + __ pop_call_clobbered_registers_except(RegSet::of(r0)); + + __ epilogue(); +} +#endif // COMPILER1 + +#ifdef COMPILER2 + +OptoReg::Name XBarrierSetAssembler::refine_register(const Node* node, OptoReg::Name opto_reg) { + if (!OptoReg::is_reg(opto_reg)) { + return OptoReg::Bad; + } + + const VMReg vm_reg = OptoReg::as_VMReg(opto_reg); + if (vm_reg->is_FloatRegister()) { + return opto_reg & ~1; + } + + return opto_reg; +} + +#undef __ +#define __ _masm-> + +class XSaveLiveRegisters { +private: + MacroAssembler* const _masm; + RegSet _gp_regs; + FloatRegSet _fp_regs; + PRegSet _p_regs; + +public: + void initialize(XLoadBarrierStubC2* stub) { + // Record registers that needs to be saved/restored + RegMaskIterator rmi(stub->live()); + while (rmi.has_next()) { + const OptoReg::Name opto_reg = rmi.next(); + if (OptoReg::is_reg(opto_reg)) { + const VMReg vm_reg = OptoReg::as_VMReg(opto_reg); + if (vm_reg->is_Register()) { + _gp_regs += RegSet::of(vm_reg->as_Register()); + } else if (vm_reg->is_FloatRegister()) { + _fp_regs += FloatRegSet::of(vm_reg->as_FloatRegister()); + } else if (vm_reg->is_PRegister()) { + _p_regs += PRegSet::of(vm_reg->as_PRegister()); + } else { + fatal("Unknown register type"); + } + } + } + + // Remove C-ABI SOE registers, scratch regs and _ref register that will be updated + _gp_regs -= RegSet::range(r19, r30) + RegSet::of(r8, r9, stub->ref()); + } + + XSaveLiveRegisters(MacroAssembler* masm, XLoadBarrierStubC2* stub) : + _masm(masm), + _gp_regs(), + _fp_regs(), + _p_regs() { + + // Figure out what registers to save/restore + initialize(stub); + + // Save registers + __ push(_gp_regs, sp); + __ push_fp(_fp_regs, sp); + __ push_p(_p_regs, sp); + } + + ~XSaveLiveRegisters() { + // Restore registers + __ pop_p(_p_regs, sp); + __ pop_fp(_fp_regs, sp); + + // External runtime call may clobber ptrue reg + __ reinitialize_ptrue(); + + __ pop(_gp_regs, sp); + } +}; + +#undef __ +#define __ _masm-> + +class XSetupArguments { +private: + MacroAssembler* const _masm; + const Register _ref; + const Address _ref_addr; + +public: + XSetupArguments(MacroAssembler* masm, XLoadBarrierStubC2* stub) : + _masm(masm), + _ref(stub->ref()), + _ref_addr(stub->ref_addr()) { + + // Setup arguments + if (_ref_addr.base() == noreg) { + // No self healing + if (_ref != c_rarg0) { + __ mov(c_rarg0, _ref); + } + __ mov(c_rarg1, 0); + } else { + // Self healing + if (_ref == c_rarg0) { + // _ref is already at correct place + __ lea(c_rarg1, _ref_addr); + } else if (_ref != c_rarg1) { + // _ref is in wrong place, but not in c_rarg1, so fix it first + __ lea(c_rarg1, _ref_addr); + __ mov(c_rarg0, _ref); + } else if (_ref_addr.base() != c_rarg0 && _ref_addr.index() != c_rarg0) { + assert(_ref == c_rarg1, "Mov ref first, vacating c_rarg0"); + __ mov(c_rarg0, _ref); + __ lea(c_rarg1, _ref_addr); + } else { + assert(_ref == c_rarg1, "Need to vacate c_rarg1 and _ref_addr is using c_rarg0"); + if (_ref_addr.base() == c_rarg0 || _ref_addr.index() == c_rarg0) { + __ mov(rscratch2, c_rarg1); + __ lea(c_rarg1, _ref_addr); + __ mov(c_rarg0, rscratch2); + } else { + ShouldNotReachHere(); + } + } + } + } + + ~XSetupArguments() { + // Transfer result + if (_ref != r0) { + __ mov(_ref, r0); + } + } +}; + +#undef __ +#define __ masm-> + +void XBarrierSetAssembler::generate_c2_load_barrier_stub(MacroAssembler* masm, XLoadBarrierStubC2* stub) const { + BLOCK_COMMENT("XLoadBarrierStubC2"); + + // Stub entry + __ bind(*stub->entry()); + + { + XSaveLiveRegisters save_live_registers(masm, stub); + XSetupArguments setup_arguments(masm, stub); + __ mov(rscratch1, stub->slow_path()); + __ blr(rscratch1); + } + // Stub exit + __ b(*stub->continuation()); +} + +#endif // COMPILER2 + +#undef __ +#define __ masm-> + +void XBarrierSetAssembler::check_oop(MacroAssembler* masm, Register obj, Register tmp1, Register tmp2, Label& error) { + // Check if mask is good. + // verifies that XAddressBadMask & r0 == 0 + __ ldr(tmp2, Address(rthread, XThreadLocalData::address_bad_mask_offset())); + __ andr(tmp1, obj, tmp2); + __ cbnz(tmp1, error); + + BarrierSetAssembler::check_oop(masm, obj, tmp1, tmp2, error); +} + +#undef __ diff --git a/src/hotspot/cpu/aarch64/gc/x/xBarrierSetAssembler_aarch64.hpp b/src/hotspot/cpu/aarch64/gc/x/xBarrierSetAssembler_aarch64.hpp new file mode 100644 index 0000000000000..8c1e9521757b4 --- /dev/null +++ b/src/hotspot/cpu/aarch64/gc/x/xBarrierSetAssembler_aarch64.hpp @@ -0,0 +1,110 @@ +/* + * Copyright (c) 2019, 2022, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef CPU_AARCH64_GC_X_XBARRIERSETASSEMBLER_AARCH64_HPP +#define CPU_AARCH64_GC_X_XBARRIERSETASSEMBLER_AARCH64_HPP + +#include "code/vmreg.hpp" +#include "oops/accessDecorators.hpp" +#ifdef COMPILER2 +#include "opto/optoreg.hpp" +#endif // COMPILER2 + +#ifdef COMPILER1 +class LIR_Assembler; +class LIR_Opr; +class StubAssembler; +#endif // COMPILER1 + +#ifdef COMPILER2 +class Node; +#endif // COMPILER2 + +#ifdef COMPILER1 +class XLoadBarrierStubC1; +#endif // COMPILER1 + +#ifdef COMPILER2 +class XLoadBarrierStubC2; +#endif // COMPILER2 + +class XBarrierSetAssembler : public XBarrierSetAssemblerBase { +public: + virtual void load_at(MacroAssembler* masm, + DecoratorSet decorators, + BasicType type, + Register dst, + Address src, + Register tmp1, + Register tmp2); + +#ifdef ASSERT + virtual void store_at(MacroAssembler* masm, + DecoratorSet decorators, + BasicType type, + Address dst, + Register val, + Register tmp1, + Register tmp2, + Register tmp3); +#endif // ASSERT + + virtual void arraycopy_prologue(MacroAssembler* masm, + DecoratorSet decorators, + bool is_oop, + Register src, + Register dst, + Register count, + RegSet saved_regs); + + virtual void try_resolve_jobject_in_native(MacroAssembler* masm, + Register jni_env, + Register robj, + Register tmp, + Label& slowpath); + + virtual NMethodPatchingType nmethod_patching_type() { return NMethodPatchingType::conc_data_patch; } + +#ifdef COMPILER1 + void generate_c1_load_barrier_test(LIR_Assembler* ce, + LIR_Opr ref) const; + + void generate_c1_load_barrier_stub(LIR_Assembler* ce, + XLoadBarrierStubC1* stub) const; + + void generate_c1_load_barrier_runtime_stub(StubAssembler* sasm, + DecoratorSet decorators) const; +#endif // COMPILER1 + +#ifdef COMPILER2 + OptoReg::Name refine_register(const Node* node, + OptoReg::Name opto_reg); + + void generate_c2_load_barrier_stub(MacroAssembler* masm, + XLoadBarrierStubC2* stub) const; +#endif // COMPILER2 + + void check_oop(MacroAssembler* masm, Register obj, Register tmp1, Register tmp2, Label& error); +}; + +#endif // CPU_AARCH64_GC_X_XBARRIERSETASSEMBLER_AARCH64_HPP diff --git a/src/hotspot/cpu/aarch64/gc/z/zGlobals_aarch64.cpp b/src/hotspot/cpu/aarch64/gc/x/xGlobals_aarch64.cpp similarity index 98% rename from src/hotspot/cpu/aarch64/gc/z/zGlobals_aarch64.cpp rename to src/hotspot/cpu/aarch64/gc/x/xGlobals_aarch64.cpp index 5ee7c2b231c3f..6204f21270374 100644 --- a/src/hotspot/cpu/aarch64/gc/z/zGlobals_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/gc/x/xGlobals_aarch64.cpp @@ -24,7 +24,7 @@ #include "precompiled.hpp" #include "gc/shared/gcLogPrecious.hpp" #include "gc/shared/gc_globals.hpp" -#include "gc/z/zGlobals.hpp" +#include "gc/x/xGlobals.hpp" #include "runtime/globals.hpp" #include "runtime/os.hpp" #include "utilities/globalDefinitions.hpp" @@ -196,15 +196,15 @@ static size_t probe_valid_max_address_bit() { #endif // LINUX } -size_t ZPlatformAddressOffsetBits() { +size_t XPlatformAddressOffsetBits() { const static size_t valid_max_address_offset_bits = probe_valid_max_address_bit() + 1; const size_t max_address_offset_bits = valid_max_address_offset_bits - 3; const size_t min_address_offset_bits = max_address_offset_bits - 2; - const size_t address_offset = round_up_power_of_2(MaxHeapSize * ZVirtualToPhysicalRatio); + const size_t address_offset = round_up_power_of_2(MaxHeapSize * XVirtualToPhysicalRatio); const size_t address_offset_bits = log2i_exact(address_offset); return clamp(address_offset_bits, min_address_offset_bits, max_address_offset_bits); } -size_t ZPlatformAddressMetadataShift() { - return ZPlatformAddressOffsetBits(); +size_t XPlatformAddressMetadataShift() { + return XPlatformAddressOffsetBits(); } diff --git a/src/hotspot/cpu/aarch64/gc/x/xGlobals_aarch64.hpp b/src/hotspot/cpu/aarch64/gc/x/xGlobals_aarch64.hpp new file mode 100644 index 0000000000000..870b0d74d5766 --- /dev/null +++ b/src/hotspot/cpu/aarch64/gc/x/xGlobals_aarch64.hpp @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef CPU_AARCH64_GC_X_XGLOBALS_AARCH64_HPP +#define CPU_AARCH64_GC_X_XGLOBALS_AARCH64_HPP + +const size_t XPlatformHeapViews = 3; +const size_t XPlatformCacheLineSize = 64; + +size_t XPlatformAddressOffsetBits(); +size_t XPlatformAddressMetadataShift(); + +#endif // CPU_AARCH64_GC_X_XGLOBALS_AARCH64_HPP diff --git a/src/hotspot/cpu/aarch64/gc/x/x_aarch64.ad b/src/hotspot/cpu/aarch64/gc/x/x_aarch64.ad new file mode 100644 index 0000000000000..a8ef3ce9f13d6 --- /dev/null +++ b/src/hotspot/cpu/aarch64/gc/x/x_aarch64.ad @@ -0,0 +1,243 @@ +// +// Copyright (c) 2019, 2021, Oracle and/or its affiliates. All rights reserved. +// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +// +// This code is free software; you can redistribute it and/or modify it +// under the terms of the GNU General Public License version 2 only, as +// published by the Free Software Foundation. +// +// This code is distributed in the hope that it will be useful, but WITHOUT +// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +// version 2 for more details (a copy is included in the LICENSE file that +// accompanied this code). +// +// You should have received a copy of the GNU General Public License version +// 2 along with this work; if not, write to the Free Software Foundation, +// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +// +// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +// or visit www.oracle.com if you need additional information or have any +// questions. +// + +source_hpp %{ + +#include "gc/shared/gc_globals.hpp" +#include "gc/x/c2/xBarrierSetC2.hpp" +#include "gc/x/xThreadLocalData.hpp" + +%} + +source %{ + +static void x_load_barrier(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref, Register tmp, uint8_t barrier_data) { + if (barrier_data == XLoadBarrierElided) { + return; + } + XLoadBarrierStubC2* const stub = XLoadBarrierStubC2::create(node, ref_addr, ref, tmp, barrier_data); + __ ldr(tmp, Address(rthread, XThreadLocalData::address_bad_mask_offset())); + __ andr(tmp, tmp, ref); + __ cbnz(tmp, *stub->entry()); + __ bind(*stub->continuation()); +} + +static void x_load_barrier_slow_path(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref, Register tmp) { + XLoadBarrierStubC2* const stub = XLoadBarrierStubC2::create(node, ref_addr, ref, tmp, XLoadBarrierStrong); + __ b(*stub->entry()); + __ bind(*stub->continuation()); +} + +%} + +// Load Pointer +instruct xLoadP(iRegPNoSp dst, memory8 mem, rFlagsReg cr) +%{ + match(Set dst (LoadP mem)); + predicate(UseZGC && !ZGenerational && !needs_acquiring_load(n) && (n->as_Load()->barrier_data() != 0)); + effect(TEMP dst, KILL cr); + + ins_cost(4 * INSN_COST); + + format %{ "ldr $dst, $mem" %} + + ins_encode %{ + const Address ref_addr = mem2address($mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp); + __ ldr($dst$$Register, ref_addr); + x_load_barrier(_masm, this, ref_addr, $dst$$Register, rscratch2 /* tmp */, barrier_data()); + %} + + ins_pipe(iload_reg_mem); +%} + +// Load Pointer Volatile +instruct xLoadPVolatile(iRegPNoSp dst, indirect mem /* sync_memory */, rFlagsReg cr) +%{ + match(Set dst (LoadP mem)); + predicate(UseZGC && !ZGenerational && needs_acquiring_load(n) && n->as_Load()->barrier_data() != 0); + effect(TEMP dst, KILL cr); + + ins_cost(VOLATILE_REF_COST); + + format %{ "ldar $dst, $mem\t" %} + + ins_encode %{ + __ ldar($dst$$Register, $mem$$Register); + x_load_barrier(_masm, this, Address($mem$$Register), $dst$$Register, rscratch2 /* tmp */, barrier_data()); + %} + + ins_pipe(pipe_serial); +%} + +instruct xCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{ + match(Set res (CompareAndSwapP mem (Binary oldval newval))); + match(Set res (WeakCompareAndSwapP mem (Binary oldval newval))); + predicate(UseZGC && !ZGenerational && !needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == XLoadBarrierStrong); + effect(KILL cr, TEMP_DEF res); + + ins_cost(2 * VOLATILE_REF_COST); + + format %{ "cmpxchg $mem, $oldval, $newval\n\t" + "cset $res, EQ" %} + + ins_encode %{ + guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding"); + __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword, + false /* acquire */, true /* release */, false /* weak */, rscratch2); + __ cset($res$$Register, Assembler::EQ); + if (barrier_data() != XLoadBarrierElided) { + Label good; + __ ldr(rscratch1, Address(rthread, XThreadLocalData::address_bad_mask_offset())); + __ andr(rscratch1, rscratch1, rscratch2); + __ cbz(rscratch1, good); + x_load_barrier_slow_path(_masm, this, Address($mem$$Register), rscratch2 /* ref */, rscratch1 /* tmp */); + __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword, + false /* acquire */, true /* release */, false /* weak */, rscratch2); + __ cset($res$$Register, Assembler::EQ); + __ bind(good); + } + %} + + ins_pipe(pipe_slow); +%} + +instruct xCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{ + match(Set res (CompareAndSwapP mem (Binary oldval newval))); + match(Set res (WeakCompareAndSwapP mem (Binary oldval newval))); + predicate(UseZGC && !ZGenerational && needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == XLoadBarrierStrong)); + effect(KILL cr, TEMP_DEF res); + + ins_cost(2 * VOLATILE_REF_COST); + + format %{ "cmpxchg $mem, $oldval, $newval\n\t" + "cset $res, EQ" %} + + ins_encode %{ + guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding"); + __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword, + true /* acquire */, true /* release */, false /* weak */, rscratch2); + __ cset($res$$Register, Assembler::EQ); + if (barrier_data() != XLoadBarrierElided) { + Label good; + __ ldr(rscratch1, Address(rthread, XThreadLocalData::address_bad_mask_offset())); + __ andr(rscratch1, rscratch1, rscratch2); + __ cbz(rscratch1, good); + x_load_barrier_slow_path(_masm, this, Address($mem$$Register), rscratch2 /* ref */, rscratch1 /* tmp */ ); + __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword, + true /* acquire */, true /* release */, false /* weak */, rscratch2); + __ cset($res$$Register, Assembler::EQ); + __ bind(good); + } + %} + + ins_pipe(pipe_slow); +%} + +instruct xCompareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{ + match(Set res (CompareAndExchangeP mem (Binary oldval newval))); + predicate(UseZGC && !ZGenerational && !needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == XLoadBarrierStrong); + effect(TEMP_DEF res, KILL cr); + + ins_cost(2 * VOLATILE_REF_COST); + + format %{ "cmpxchg $res = $mem, $oldval, $newval" %} + + ins_encode %{ + guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding"); + __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword, + false /* acquire */, true /* release */, false /* weak */, $res$$Register); + if (barrier_data() != XLoadBarrierElided) { + Label good; + __ ldr(rscratch1, Address(rthread, XThreadLocalData::address_bad_mask_offset())); + __ andr(rscratch1, rscratch1, $res$$Register); + __ cbz(rscratch1, good); + x_load_barrier_slow_path(_masm, this, Address($mem$$Register), $res$$Register /* ref */, rscratch1 /* tmp */); + __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword, + false /* acquire */, true /* release */, false /* weak */, $res$$Register); + __ bind(good); + } + %} + + ins_pipe(pipe_slow); +%} + +instruct xCompareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{ + match(Set res (CompareAndExchangeP mem (Binary oldval newval))); + predicate(UseZGC && !ZGenerational && needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == XLoadBarrierStrong); + effect(TEMP_DEF res, KILL cr); + + ins_cost(2 * VOLATILE_REF_COST); + + format %{ "cmpxchg $res = $mem, $oldval, $newval" %} + + ins_encode %{ + guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding"); + __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword, + true /* acquire */, true /* release */, false /* weak */, $res$$Register); + if (barrier_data() != XLoadBarrierElided) { + Label good; + __ ldr(rscratch1, Address(rthread, XThreadLocalData::address_bad_mask_offset())); + __ andr(rscratch1, rscratch1, $res$$Register); + __ cbz(rscratch1, good); + x_load_barrier_slow_path(_masm, this, Address($mem$$Register), $res$$Register /* ref */, rscratch1 /* tmp */); + __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword, + true /* acquire */, true /* release */, false /* weak */, $res$$Register); + __ bind(good); + } + %} + + ins_pipe(pipe_slow); +%} + +instruct xGetAndSetP(indirect mem, iRegP newv, iRegPNoSp prev, rFlagsReg cr) %{ + match(Set prev (GetAndSetP mem newv)); + predicate(UseZGC && !ZGenerational && !needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0); + effect(TEMP_DEF prev, KILL cr); + + ins_cost(2 * VOLATILE_REF_COST); + + format %{ "atomic_xchg $prev, $newv, [$mem]" %} + + ins_encode %{ + __ atomic_xchg($prev$$Register, $newv$$Register, $mem$$Register); + x_load_barrier(_masm, this, Address(noreg, 0), $prev$$Register, rscratch2 /* tmp */, barrier_data()); + %} + + ins_pipe(pipe_serial); +%} + +instruct xGetAndSetPAcq(indirect mem, iRegP newv, iRegPNoSp prev, rFlagsReg cr) %{ + match(Set prev (GetAndSetP mem newv)); + predicate(UseZGC && !ZGenerational && needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() != 0)); + effect(TEMP_DEF prev, KILL cr); + + ins_cost(VOLATILE_REF_COST); + + format %{ "atomic_xchg_acq $prev, $newv, [$mem]" %} + + ins_encode %{ + __ atomic_xchgal($prev$$Register, $newv$$Register, $mem$$Register); + x_load_barrier(_masm, this, Address(noreg, 0), $prev$$Register, rscratch2 /* tmp */, barrier_data()); + %} + ins_pipe(pipe_serial); +%} diff --git a/src/hotspot/cpu/aarch64/gc/z/zAddress_aarch64.cpp b/src/hotspot/cpu/aarch64/gc/z/zAddress_aarch64.cpp new file mode 100644 index 0000000000000..6c3cea73d1a79 --- /dev/null +++ b/src/hotspot/cpu/aarch64/gc/z/zAddress_aarch64.cpp @@ -0,0 +1,108 @@ +/* + * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/shared/gcLogPrecious.hpp" +#include "gc/shared/gc_globals.hpp" +#include "gc/z/zAddress.hpp" +#include "gc/z/zBarrierSetAssembler.hpp" +#include "gc/z/zGlobals.hpp" +#include "runtime/globals.hpp" +#include "runtime/os.hpp" +#include "utilities/globalDefinitions.hpp" +#include "utilities/powerOfTwo.hpp" + +#ifdef LINUX +#include +#endif // LINUX + +// Default value if probing is not implemented for a certain platform: 128TB +static const size_t DEFAULT_MAX_ADDRESS_BIT = 47; +// Minimum value returned, if probing fails: 64GB +static const size_t MINIMUM_MAX_ADDRESS_BIT = 36; + +static size_t probe_valid_max_address_bit() { +#ifdef LINUX + size_t max_address_bit = 0; + const size_t page_size = os::vm_page_size(); + for (size_t i = DEFAULT_MAX_ADDRESS_BIT; i > MINIMUM_MAX_ADDRESS_BIT; --i) { + const uintptr_t base_addr = ((uintptr_t) 1U) << i; + if (msync((void*)base_addr, page_size, MS_ASYNC) == 0) { + // msync suceeded, the address is valid, and maybe even already mapped. + max_address_bit = i; + break; + } + if (errno != ENOMEM) { + // Some error occured. This should never happen, but msync + // has some undefined behavior, hence ignore this bit. +#ifdef ASSERT + fatal("Received '%s' while probing the address space for the highest valid bit", os::errno_name(errno)); +#else // ASSERT + log_warning_p(gc)("Received '%s' while probing the address space for the highest valid bit", os::errno_name(errno)); +#endif // ASSERT + continue; + } + // Since msync failed with ENOMEM, the page might not be mapped. + // Try to map it, to see if the address is valid. + void* const result_addr = mmap((void*) base_addr, page_size, PROT_NONE, MAP_PRIVATE|MAP_ANONYMOUS|MAP_NORESERVE, -1, 0); + if (result_addr != MAP_FAILED) { + munmap(result_addr, page_size); + } + if ((uintptr_t) result_addr == base_addr) { + // address is valid + max_address_bit = i; + break; + } + } + if (max_address_bit == 0) { + // probing failed, allocate a very high page and take that bit as the maximum + const uintptr_t high_addr = ((uintptr_t) 1U) << DEFAULT_MAX_ADDRESS_BIT; + void* const result_addr = mmap((void*) high_addr, page_size, PROT_NONE, MAP_PRIVATE|MAP_ANONYMOUS|MAP_NORESERVE, -1, 0); + if (result_addr != MAP_FAILED) { + max_address_bit = BitsPerSize_t - count_leading_zeros((size_t) result_addr) - 1; + munmap(result_addr, page_size); + } + } + log_info_p(gc, init)("Probing address space for the highest valid bit: " SIZE_FORMAT, max_address_bit); + return MAX2(max_address_bit, MINIMUM_MAX_ADDRESS_BIT); +#else // LINUX + return DEFAULT_MAX_ADDRESS_BIT; +#endif // LINUX +} + +size_t ZPlatformAddressOffsetBits() { + const static size_t valid_max_address_offset_bits = probe_valid_max_address_bit() + 1; + const size_t max_address_offset_bits = valid_max_address_offset_bits - 3; + const size_t min_address_offset_bits = max_address_offset_bits - 2; + const size_t address_offset = round_up_power_of_2(MaxHeapSize * ZVirtualToPhysicalRatio); + const size_t address_offset_bits = log2i_exact(address_offset); + return clamp(address_offset_bits, min_address_offset_bits, max_address_offset_bits); +} + +size_t ZPlatformAddressHeapBaseShift() { + return ZPlatformAddressOffsetBits(); +} + +void ZGlobalsPointers::pd_set_good_masks() { + BarrierSetAssembler::clear_patching_epoch(); +} diff --git a/src/hotspot/cpu/aarch64/gc/z/zAddress_aarch64.hpp b/src/hotspot/cpu/aarch64/gc/z/zAddress_aarch64.hpp new file mode 100644 index 0000000000000..430ee53ebfbfc --- /dev/null +++ b/src/hotspot/cpu/aarch64/gc/z/zAddress_aarch64.hpp @@ -0,0 +1,34 @@ +/* + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef CPU_AARCH64_GC_Z_ZADDRESS_AARCH64_HPP +#define CPU_AARCH64_GC_Z_ZADDRESS_AARCH64_HPP + +#include "utilities/globalDefinitions.hpp" + +const size_t ZPointerLoadShift = 16; + +size_t ZPlatformAddressOffsetBits(); +size_t ZPlatformAddressHeapBaseShift(); + +#endif // CPU_AARCH64_GC_Z_ZADDRESS_AARCH64_HPP diff --git a/src/hotspot/cpu/aarch64/gc/z/zAddress_aarch64.inline.hpp b/src/hotspot/cpu/aarch64/gc/z/zAddress_aarch64.inline.hpp new file mode 100644 index 0000000000000..1102254a037d5 --- /dev/null +++ b/src/hotspot/cpu/aarch64/gc/z/zAddress_aarch64.inline.hpp @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef CPU_AARCH64_GC_Z_ZADDRESS_AARCH64_INLINE_HPP +#define CPU_AARCH64_GC_Z_ZADDRESS_AARCH64_INLINE_HPP + +#include "utilities/globalDefinitions.hpp" + +inline uintptr_t ZPointer::remap_bits(uintptr_t colored) { + return (colored ^ ZPointerRemappedMask) & ZPointerRemappedMask; +} + +inline constexpr int ZPointer::load_shift_lookup(uintptr_t value) { + return ZPointerLoadShift; +} + +#endif // CPU_AARCH64_GC_Z_ZADDRESS_AARCH64_INLINE_HPP diff --git a/src/hotspot/cpu/aarch64/gc/z/zBarrierSetAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/gc/z/zBarrierSetAssembler_aarch64.cpp index 6f4d201a27af8..2f5f356337d3f 100644 --- a/src/hotspot/cpu/aarch64/gc/z/zBarrierSetAssembler_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/gc/z/zBarrierSetAssembler_aarch64.cpp @@ -25,12 +25,16 @@ #include "asm/macroAssembler.inline.hpp" #include "code/codeBlob.hpp" #include "code/vmreg.inline.hpp" +#include "gc/z/zAddress.hpp" #include "gc/z/zBarrier.inline.hpp" #include "gc/z/zBarrierSet.hpp" #include "gc/z/zBarrierSetAssembler.hpp" #include "gc/z/zBarrierSetRuntime.hpp" #include "gc/z/zThreadLocalData.hpp" #include "memory/resourceArea.hpp" +#include "nativeInst_aarch64.hpp" +#include "runtime/icache.hpp" +#include "runtime/jniHandles.hpp" #include "runtime/sharedRuntime.hpp" #include "utilities/macros.hpp" #ifdef COMPILER1 @@ -40,6 +44,7 @@ #endif // COMPILER1 #ifdef COMPILER2 #include "gc/z/c2/zBarrierSetC2.hpp" +#include "opto/output.hpp" #endif // COMPILER2 #ifdef PRODUCT @@ -51,6 +56,52 @@ #undef __ #define __ masm-> +// Helper for saving and restoring registers across a runtime call that does +// not have any live vector registers. +class ZRuntimeCallSpill { +private: + MacroAssembler* _masm; + Register _result; + + void save() { + MacroAssembler* masm = _masm; + + __ enter(true /* strip_ret_addr */); + if (_result != noreg) { + __ push_call_clobbered_registers_except(RegSet::of(_result)); + } else { + __ push_call_clobbered_registers(); + } + } + + void restore() { + MacroAssembler* masm = _masm; + + if (_result != noreg) { + // Make sure _result has the return value. + if (_result != r0) { + __ mov(_result, r0); + } + + __ pop_call_clobbered_registers_except(RegSet::of(_result)); + } else { + __ pop_call_clobbered_registers(); + } + __ leave(); + } + +public: + ZRuntimeCallSpill(MacroAssembler* masm, Register result) + : _masm(masm), + _result(result) { + save(); + } + + ~ZRuntimeCallSpill() { + restore(); + } +}; + void ZBarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, @@ -64,78 +115,329 @@ void ZBarrierSetAssembler::load_at(MacroAssembler* masm, return; } - assert_different_registers(rscratch1, rscratch2, src.base()); - assert_different_registers(rscratch1, rscratch2, dst); + assert_different_registers(tmp1, tmp2, src.base(), noreg); + assert_different_registers(tmp1, tmp2, src.index()); + assert_different_registers(tmp1, tmp2, dst, noreg); + assert_different_registers(tmp2, rscratch1); Label done; + Label uncolor; // Load bad mask into scratch register. - __ ldr(rscratch1, address_bad_mask_from_thread(rthread)); - __ lea(rscratch2, src); - __ ldr(dst, src); + const bool on_non_strong = + (decorators & ON_WEAK_OOP_REF) != 0 || + (decorators & ON_PHANTOM_OOP_REF) != 0; + + if (on_non_strong) { + __ ldr(tmp1, mark_bad_mask_from_thread(rthread)); + } else { + __ ldr(tmp1, load_bad_mask_from_thread(rthread)); + } + + __ lea(tmp2, src); + __ ldr(dst, tmp2); // Test reference against bad mask. If mask bad, then we need to fix it up. - __ tst(dst, rscratch1); - __ br(Assembler::EQ, done); + __ tst(dst, tmp1); + __ br(Assembler::EQ, uncolor); - __ enter(/*strip_ret_addr*/true); + { + // Call VM + ZRuntimeCallSpill rcs(masm, dst); - __ push_call_clobbered_registers_except(RegSet::of(dst)); + if (c_rarg0 != dst) { + __ mov(c_rarg0, dst); + } + __ mov(c_rarg1, tmp2); - if (c_rarg0 != dst) { - __ mov(c_rarg0, dst); + __ call_VM_leaf(ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators), 2); } - __ mov(c_rarg1, rscratch2); - __ call_VM_leaf(ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators), 2); + // Slow-path has already uncolored + __ b(done); - // Make sure dst has the return value. - if (dst != r0) { - __ mov(dst, r0); - } + __ bind(uncolor); - __ pop_call_clobbered_registers_except(RegSet::of(dst)); - __ leave(); + // Remove the color bits + __ lsr(dst, dst, ZPointerLoadShift); __ bind(done); } -#ifdef ASSERT +void ZBarrierSetAssembler::store_barrier_fast(MacroAssembler* masm, + Address ref_addr, + Register rnew_zaddress, + Register rnew_zpointer, + Register rtmp, + bool in_nmethod, + bool is_atomic, + Label& medium_path, + Label& medium_path_continuation) const { + assert_different_registers(ref_addr.base(), rnew_zpointer, rtmp); + assert_different_registers(ref_addr.index(), rnew_zpointer, rtmp); + assert_different_registers(rnew_zaddress, rnew_zpointer, rtmp); + + if (in_nmethod) { + if (is_atomic) { + __ ldrh(rtmp, ref_addr); + // Atomic operations must ensure that the contents of memory are store-good before + // an atomic operation can execute. + // A not relocatable object could have spurious raw null pointers in its fields after + // getting promoted to the old generation. + __ relocate(barrier_Relocation::spec(), ZBarrierRelocationFormatStoreGoodBeforeMov); + __ movzw(rnew_zpointer, barrier_Relocation::unpatched); + __ cmpw(rtmp, rnew_zpointer); + } else { + __ ldr(rtmp, ref_addr); + // Stores on relocatable objects never need to deal with raw null pointers in fields. + // Raw null pointers may only exist in the young generation, as they get pruned when + // the object is relocated to old. And no pre-write barrier needs to perform any action + // in the young generation. + __ relocate(barrier_Relocation::spec(), ZBarrierRelocationFormatStoreBadBeforeMov); + __ movzw(rnew_zpointer, barrier_Relocation::unpatched); + __ tst(rtmp, rnew_zpointer); + } + __ br(Assembler::NE, medium_path); + __ bind(medium_path_continuation); + assert_different_registers(rnew_zaddress, rnew_zpointer); + __ relocate(barrier_Relocation::spec(), ZBarrierRelocationFormatStoreGoodBeforeMov); + __ movzw(rnew_zpointer, barrier_Relocation::unpatched); + __ orr(rnew_zpointer, rnew_zpointer, rnew_zaddress, Assembler::LSL, ZPointerLoadShift); + } else { + assert(!is_atomic, "atomics outside of nmethods not supported"); + __ lea(rtmp, ref_addr); + __ ldr(rtmp, rtmp); + __ ldr(rnew_zpointer, Address(rthread, ZThreadLocalData::store_bad_mask_offset())); + __ tst(rtmp, rnew_zpointer); + __ br(Assembler::NE, medium_path); + __ bind(medium_path_continuation); + if (rnew_zaddress == noreg) { + __ eor(rnew_zpointer, rnew_zpointer, rnew_zpointer); + } else { + __ mov(rnew_zpointer, rnew_zaddress); + } + + // Load the current good shift, and add the color bits + __ lsl(rnew_zpointer, rnew_zpointer, ZPointerLoadShift); + __ ldr(rtmp, Address(rthread, ZThreadLocalData::store_good_mask_offset())); + __ orr(rnew_zpointer, rnew_zpointer, rtmp); + } +} + +static void store_barrier_buffer_add(MacroAssembler* masm, + Address ref_addr, + Register tmp1, + Register tmp2, + Label& slow_path) { + Address buffer(rthread, ZThreadLocalData::store_barrier_buffer_offset()); + assert_different_registers(ref_addr.base(), ref_addr.index(), tmp1, tmp2); + + __ ldr(tmp1, buffer); + + // Combined pointer bump and check if the buffer is disabled or full + __ ldr(tmp2, Address(tmp1, ZStoreBarrierBuffer::current_offset())); + __ cmp(tmp2, (uint8_t)0); + __ br(Assembler::EQ, slow_path); + + // Bump the pointer + __ sub(tmp2, tmp2, sizeof(ZStoreBarrierEntry)); + __ str(tmp2, Address(tmp1, ZStoreBarrierBuffer::current_offset())); + + // Compute the buffer entry address + __ lea(tmp2, Address(tmp2, ZStoreBarrierBuffer::buffer_offset())); + __ add(tmp2, tmp2, tmp1); + + // Compute and log the store address + __ lea(tmp1, ref_addr); + __ str(tmp1, Address(tmp2, in_bytes(ZStoreBarrierEntry::p_offset()))); + + // Load and log the prev value + __ ldr(tmp1, tmp1); + __ str(tmp1, Address(tmp2, in_bytes(ZStoreBarrierEntry::prev_offset()))); +} + +void ZBarrierSetAssembler::store_barrier_medium(MacroAssembler* masm, + Address ref_addr, + Register rtmp1, + Register rtmp2, + Register rtmp3, + bool is_native, + bool is_atomic, + Label& medium_path_continuation, + Label& slow_path, + Label& slow_path_continuation) const { + assert_different_registers(ref_addr.base(), ref_addr.index(), rtmp1, rtmp2); + + // The reason to end up in the medium path is that the pre-value was not 'good'. + + if (is_native) { + __ b(slow_path); + __ bind(slow_path_continuation); + __ b(medium_path_continuation); + } else if (is_atomic) { + // Atomic accesses can get to the medium fast path because the value was a + // raw null value. If it was not null, then there is no doubt we need to take a slow path. + __ lea(rtmp2, ref_addr); + __ ldr(rtmp1, rtmp2); + __ cbnz(rtmp1, slow_path); + + // If we get this far, we know there is a young raw null value in the field. + __ relocate(barrier_Relocation::spec(), ZBarrierRelocationFormatStoreGoodBeforeMov); + __ movzw(rtmp1, barrier_Relocation::unpatched); + __ cmpxchg(rtmp2, zr, rtmp1, + Assembler::xword, + false /* acquire */, false /* release */, true /* weak */, + rtmp3); + __ br(Assembler::NE, slow_path); + + __ bind(slow_path_continuation); + __ b(medium_path_continuation); + } else { + // A non-atomic relocatable object won't get to the medium fast path due to a + // raw null in the young generation. We only get here because the field is bad. + // In this path we don't need any self healing, so we can avoid a runtime call + // most of the time by buffering the store barrier to be applied lazily. + store_barrier_buffer_add(masm, + ref_addr, + rtmp1, + rtmp2, + slow_path); + __ bind(slow_path_continuation); + __ b(medium_path_continuation); + } +} void ZBarrierSetAssembler::store_at(MacroAssembler* masm, - DecoratorSet decorators, - BasicType type, - Address dst, - Register val, - Register tmp1, - Register tmp2, - Register tmp3) { - // Verify value - if (is_reference_type(type)) { - // Note that src could be noreg, which means we - // are storing null and can skip verification. - if (val != noreg) { - Label done; - - // tmp1, tmp2 and tmp3 are often set to noreg. - RegSet savedRegs = RegSet::of(rscratch1); - __ push(savedRegs, sp); - - __ ldr(rscratch1, address_bad_mask_from_thread(rthread)); - __ tst(val, rscratch1); - __ br(Assembler::EQ, done); - __ stop("Verify oop store failed"); - __ should_not_reach_here(); - __ bind(done); - __ pop(savedRegs, sp); + DecoratorSet decorators, + BasicType type, + Address dst, + Register val, + Register tmp1, + Register tmp2, + Register tmp3) { + if (!ZBarrierSet::barrier_needed(decorators, type)) { + BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2, tmp3); + return; + } + + bool dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0; + + assert_different_registers(val, tmp1, dst.base(), dst.index()); + + if (dest_uninitialized) { + if (val == noreg) { + __ eor(tmp1, tmp1, tmp1); + } else { + __ mov(tmp1, val); } + // Add the color bits + __ lsl(tmp1, tmp1, ZPointerLoadShift); + __ ldr(tmp2, Address(rthread, ZThreadLocalData::store_good_mask_offset())); + __ orr(tmp1, tmp2, tmp1); + } else { + Label done; + Label medium; + Label medium_continuation; + Label slow; + Label slow_continuation; + store_barrier_fast(masm, dst, val, tmp1, tmp2, false, false, medium, medium_continuation); + __ b(done); + __ bind(medium); + store_barrier_medium(masm, + dst, + tmp1, + tmp2, + noreg /* tmp3 */, + false /* is_native */, + false /* is_atomic */, + medium_continuation, + slow, + slow_continuation); + + __ bind(slow); + { + // Call VM + ZRuntimeCallSpill rcs(masm, noreg); + __ lea(c_rarg0, dst); + __ MacroAssembler::call_VM_leaf(ZBarrierSetRuntime::store_barrier_on_oop_field_without_healing_addr(), 1); + } + + __ b(slow_continuation); + __ bind(done); } // Store value - BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2, noreg); + BarrierSetAssembler::store_at(masm, decorators, type, dst, tmp1, tmp2, tmp3, noreg); } -#endif // ASSERT +static FloatRegister z_copy_load_bad_vreg = v17; +static FloatRegister z_copy_store_good_vreg = v18; +static FloatRegister z_copy_store_bad_vreg = v19; + +static void load_wide_arraycopy_masks(MacroAssembler* masm) { + __ lea(rscratch1, ExternalAddress((address)&ZPointerVectorLoadBadMask)); + __ ldrq(z_copy_load_bad_vreg, Address(rscratch1, 0)); + __ lea(rscratch1, ExternalAddress((address)&ZPointerVectorStoreBadMask)); + __ ldrq(z_copy_store_bad_vreg, Address(rscratch1, 0)); + __ lea(rscratch1, ExternalAddress((address)&ZPointerVectorStoreGoodMask)); + __ ldrq(z_copy_store_good_vreg, Address(rscratch1, 0)); +} + +class ZCopyRuntimeCallSpill { +private: + MacroAssembler* _masm; + Register _result; + + void save() { + MacroAssembler* masm = _masm; + + __ enter(true /* strip_ret_addr */); + if (_result != noreg) { + __ push(__ call_clobbered_gp_registers() - RegSet::of(_result), sp); + } else { + __ push(__ call_clobbered_gp_registers(), sp); + } + int neonSize = wordSize * 2; + __ sub(sp, sp, 4 * neonSize); + __ st1(v0, v1, v2, v3, Assembler::T16B, Address(sp, 0)); + __ sub(sp, sp, 4 * neonSize); + __ st1(v4, v5, v6, v7, Assembler::T16B, Address(sp, 0)); + __ sub(sp, sp, 4 * neonSize); + __ st1(v16, v17, v18, v19, Assembler::T16B, Address(sp, 0)); + } + + void restore() { + MacroAssembler* masm = _masm; + + int neonSize = wordSize * 2; + __ ld1(v16, v17, v18, v19, Assembler::T16B, Address(sp, 0)); + __ add(sp, sp, 4 * neonSize); + __ ld1(v4, v5, v6, v7, Assembler::T16B, Address(sp, 0)); + __ add(sp, sp, 4 * neonSize); + __ ld1(v0, v1, v2, v3, Assembler::T16B, Address(sp, 0)); + __ add(sp, sp, 4 * neonSize); + if (_result != noreg) { + if (_result != r0) { + __ mov(_result, r0); + } + __ pop(__ call_clobbered_gp_registers() - RegSet::of(_result), sp); + } else { + __ pop(__ call_clobbered_gp_registers(), sp); + } + __ leave(); + } + +public: + ZCopyRuntimeCallSpill(MacroAssembler* masm, Register result) + : _masm(masm), + _result(result) { + save(); + } + + ~ZCopyRuntimeCallSpill() { + restore(); + } +}; void ZBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, @@ -151,30 +453,338 @@ void ZBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, BLOCK_COMMENT("ZBarrierSetAssembler::arraycopy_prologue {"); - assert_different_registers(src, count, rscratch1); + load_wide_arraycopy_masks(masm); + + BLOCK_COMMENT("} ZBarrierSetAssembler::arraycopy_prologue"); +} + +static void copy_load_barrier(MacroAssembler* masm, + Register ref, + Address src, + Register tmp) { + Label done; + + __ ldr(tmp, Address(rthread, ZThreadLocalData::load_bad_mask_offset())); + + // Test reference against bad mask. If mask bad, then we need to fix it up. + __ tst(ref, tmp); + __ br(Assembler::EQ, done); + + { + // Call VM + ZCopyRuntimeCallSpill rcs(masm, ref); + + __ lea(c_rarg1, src); + + if (c_rarg0 != ref) { + __ mov(c_rarg0, ref); + } + + __ call_VM_leaf(ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(IN_HEAP | ON_STRONG_OOP_REF), 2); + } + + // Slow-path has uncolored; revert + __ lsl(ref, ref, ZPointerLoadShift); + + __ bind(done); +} + +static void copy_load_barrier(MacroAssembler* masm, + FloatRegister ref, + Address src, + Register tmp1, + Register tmp2, + FloatRegister vec_tmp) { + Label done; + + // Test reference against bad mask. If mask bad, then we need to fix it up. + __ andr(vec_tmp, Assembler::T16B, ref, z_copy_load_bad_vreg); + __ umaxv(vec_tmp, Assembler::T16B, vec_tmp); + __ fcmpd(vec_tmp, 0.0); + __ br(Assembler::EQ, done); + + __ umov(tmp2, ref, Assembler::D, 0); + copy_load_barrier(masm, tmp2, Address(src.base(), src.offset() + 0), tmp1); + __ mov(ref, __ D, 0, tmp2); + + __ umov(tmp2, ref, Assembler::D, 1); + copy_load_barrier(masm, tmp2, Address(src.base(), src.offset() + 8), tmp1); + __ mov(ref, __ D, 1, tmp2); + + __ bind(done); +} + +static void copy_store_barrier(MacroAssembler* masm, + Register pre_ref, + Register new_ref, + Address src, + Register tmp1, + Register tmp2) { + Label done; + Label slow; + + // Test reference against bad mask. If mask bad, then we need to fix it up. + __ ldr(tmp1, Address(rthread, ZThreadLocalData::store_bad_mask_offset())); + __ tst(pre_ref, tmp1); + __ br(Assembler::EQ, done); + + store_barrier_buffer_add(masm, src, tmp1, tmp2, slow); + __ b(done); + + __ bind(slow); + { + // Call VM + ZCopyRuntimeCallSpill rcs(masm, noreg); + + __ lea(c_rarg0, src); + + __ call_VM_leaf(ZBarrierSetRuntime::store_barrier_on_oop_field_without_healing_addr(), 1); + } + + __ bind(done); + + if (new_ref != noreg) { + // Set store-good color, replacing whatever color was there before + __ ldr(tmp1, Address(rthread, ZThreadLocalData::store_good_mask_offset())); + __ bfi(new_ref, tmp1, 0, 16); + } +} + +static void copy_store_barrier(MacroAssembler* masm, + FloatRegister pre_ref, + FloatRegister new_ref, + Address src, + Register tmp1, + Register tmp2, + Register tmp3, + FloatRegister vec_tmp) { + Label done; + + // Test reference against bad mask. If mask bad, then we need to fix it up. + __ andr(vec_tmp, Assembler::T16B, pre_ref, z_copy_store_bad_vreg); + __ umaxv(vec_tmp, Assembler::T16B, vec_tmp); + __ fcmpd(vec_tmp, 0.0); + __ br(Assembler::EQ, done); + + // Extract the 2 oops from the pre_ref vector register + __ umov(tmp2, pre_ref, Assembler::D, 0); + copy_store_barrier(masm, tmp2, noreg, Address(src.base(), src.offset() + 0), tmp1, tmp3); + + __ umov(tmp2, pre_ref, Assembler::D, 1); + copy_store_barrier(masm, tmp2, noreg, Address(src.base(), src.offset() + 8), tmp1, tmp3); + + __ bind(done); + + // Remove any bad colors + __ bic(new_ref, Assembler::T16B, new_ref, z_copy_store_bad_vreg); + // Add good colors + __ orr(new_ref, Assembler::T16B, new_ref, z_copy_store_good_vreg); +} + +class ZAdjustAddress { +private: + MacroAssembler* _masm; + Address _addr; + int _pre_adjustment; + int _post_adjustment; + + void pre() { + if (_pre_adjustment != 0) { + _masm->add(_addr.base(), _addr.base(), _addr.offset()); + } + } + + void post() { + if (_post_adjustment != 0) { + _masm->add(_addr.base(), _addr.base(), _addr.offset()); + } + } + +public: + ZAdjustAddress(MacroAssembler* masm, Address addr) : + _masm(masm), + _addr(addr), + _pre_adjustment(addr.getMode() == Address::pre ? addr.offset() : 0), + _post_adjustment(addr.getMode() == Address::post ? addr.offset() : 0) { + pre(); + } - __ push(saved_regs, sp); + ~ZAdjustAddress() { + post(); + } - if (count == c_rarg0) { - if (src == c_rarg1) { - // exactly backwards!! - __ mov(rscratch1, c_rarg0); - __ mov(c_rarg0, c_rarg1); - __ mov(c_rarg1, rscratch1); + Address address() { + if (_pre_adjustment != 0 || _post_adjustment != 0) { + return Address(_addr.base(), 0); } else { - __ mov(c_rarg1, count); - __ mov(c_rarg0, src); + return Address(_addr.base(), _addr.offset()); } + } +}; + +void ZBarrierSetAssembler::copy_load_at(MacroAssembler* masm, + DecoratorSet decorators, + BasicType type, + size_t bytes, + Register dst1, + Register dst2, + Address src, + Register tmp) { + if (!is_reference_type(type)) { + BarrierSetAssembler::copy_load_at(masm, decorators, type, bytes, dst1, dst2, src, noreg); + return; + } + + ZAdjustAddress adjust(masm, src); + src = adjust.address(); + + BarrierSetAssembler::copy_load_at(masm, decorators, type, bytes, dst1, dst2, src, noreg); + + if (bytes == 8) { + copy_load_barrier(masm, dst1, src, tmp); + } else if (bytes == 16) { + copy_load_barrier(masm, dst1, Address(src.base(), src.offset() + 0), tmp); + copy_load_barrier(masm, dst2, Address(src.base(), src.offset() + 8), tmp); } else { - __ mov(c_rarg0, src); - __ mov(c_rarg1, count); + ShouldNotReachHere(); + } + if ((decorators & ARRAYCOPY_CHECKCAST) != 0) { + __ lsr(dst1, dst1, ZPointerLoadShift); + } +} + +void ZBarrierSetAssembler::copy_store_at(MacroAssembler* masm, + DecoratorSet decorators, + BasicType type, + size_t bytes, + Address dst, + Register src1, + Register src2, + Register tmp1, + Register tmp2, + Register tmp3) { + if (!is_reference_type(type)) { + BarrierSetAssembler::copy_store_at(masm, decorators, type, bytes, dst, src1, src2, noreg, noreg, noreg); + return; } - __ call_VM_leaf(ZBarrierSetRuntime::load_barrier_on_oop_array_addr(), 2); + ZAdjustAddress adjust(masm, dst); + dst = adjust.address(); - __ pop(saved_regs, sp); + if ((decorators & ARRAYCOPY_CHECKCAST) != 0) { + __ lsl(src1, src1, ZPointerLoadShift); + } - BLOCK_COMMENT("} ZBarrierSetAssembler::arraycopy_prologue"); + bool is_dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0; + + if (is_dest_uninitialized) { + __ ldr(tmp1, Address(rthread, ZThreadLocalData::store_good_mask_offset())); + if (bytes == 8) { + __ bfi(src1, tmp1, 0, 16); + } else if (bytes == 16) { + __ bfi(src1, tmp1, 0, 16); + __ bfi(src2, tmp1, 0, 16); + } else { + ShouldNotReachHere(); + } + } else { + // Store barrier pre values and color new values + if (bytes == 8) { + __ ldr(tmp1, dst); + copy_store_barrier(masm, tmp1, src1, dst, tmp2, tmp3); + } else if (bytes == 16) { + Address dst1(dst.base(), dst.offset() + 0); + Address dst2(dst.base(), dst.offset() + 8); + + __ ldr(tmp1, dst1); + copy_store_barrier(masm, tmp1, src1, dst1, tmp2, tmp3); + + __ ldr(tmp1, dst2); + copy_store_barrier(masm, tmp1, src2, dst2, tmp2, tmp3); + } else { + ShouldNotReachHere(); + } + } + + // Store new values + BarrierSetAssembler::copy_store_at(masm, decorators, type, bytes, dst, src1, src2, noreg, noreg, noreg); +} + +void ZBarrierSetAssembler::copy_load_at(MacroAssembler* masm, + DecoratorSet decorators, + BasicType type, + size_t bytes, + FloatRegister dst1, + FloatRegister dst2, + Address src, + Register tmp1, + Register tmp2, + FloatRegister vec_tmp) { + if (!is_reference_type(type)) { + BarrierSetAssembler::copy_load_at(masm, decorators, type, bytes, dst1, dst2, src, noreg, noreg, fnoreg); + return; + } + + ZAdjustAddress adjust(masm, src); + src = adjust.address(); + + BarrierSetAssembler::copy_load_at(masm, decorators, type, bytes, dst1, dst2, src, noreg, noreg, fnoreg); + + if (bytes == 32) { + copy_load_barrier(masm, dst1, Address(src.base(), src.offset() + 0), tmp1, tmp2, vec_tmp); + copy_load_barrier(masm, dst2, Address(src.base(), src.offset() + 16), tmp1, tmp2, vec_tmp); + } else { + ShouldNotReachHere(); + } +} + +void ZBarrierSetAssembler::copy_store_at(MacroAssembler* masm, + DecoratorSet decorators, + BasicType type, + size_t bytes, + Address dst, + FloatRegister src1, + FloatRegister src2, + Register tmp1, + Register tmp2, + Register tmp3, + FloatRegister vec_tmp1, + FloatRegister vec_tmp2, + FloatRegister vec_tmp3) { + if (!is_reference_type(type)) { + BarrierSetAssembler::copy_store_at(masm, decorators, type, bytes, dst, src1, src2, noreg, noreg, noreg, fnoreg, fnoreg, fnoreg); + return; + } + + bool is_dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0; + + ZAdjustAddress adjust(masm, dst); + dst = adjust.address(); + + if (is_dest_uninitialized) { + if (bytes == 32) { + __ bic(src1, Assembler::T16B, src1, z_copy_store_bad_vreg); + __ orr(src1, Assembler::T16B, src1, z_copy_store_good_vreg); + __ bic(src2, Assembler::T16B, src2, z_copy_store_bad_vreg); + __ orr(src2, Assembler::T16B, src2, z_copy_store_good_vreg); + } else { + ShouldNotReachHere(); + } + } else { + // Load pre values + BarrierSetAssembler::copy_load_at(masm, decorators, type, bytes, vec_tmp1, vec_tmp2, dst, noreg, noreg, fnoreg); + + // Store barrier pre values and color new values + if (bytes == 32) { + copy_store_barrier(masm, vec_tmp1, src1, Address(dst.base(), dst.offset() + 0), tmp1, tmp2, tmp3, vec_tmp3); + copy_store_barrier(masm, vec_tmp2, src2, Address(dst.base(), dst.offset() + 16), tmp1, tmp2, tmp3, vec_tmp3); + } else { + ShouldNotReachHere(); + } + } + + // Store new values + BarrierSetAssembler::copy_store_at(masm, decorators, type, bytes, dst, src1, src2, noreg, noreg, noreg, fnoreg, fnoreg, fnoreg); } void ZBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, @@ -184,37 +794,141 @@ void ZBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, Label& slowpath) { BLOCK_COMMENT("ZBarrierSetAssembler::try_resolve_jobject_in_native {"); - assert_different_registers(jni_env, robj, tmp); + Label done, tagged, weak_tagged, uncolor; + + // Test for tag + __ tst(robj, JNIHandles::tag_mask); + __ br(Assembler::NE, tagged); - // Resolve jobject - BarrierSetAssembler::try_resolve_jobject_in_native(masm, jni_env, robj, tmp, slowpath); + // Resolve local handle + __ ldr(robj, robj); + __ b(done); - // The Address offset is too large to direct load - -784. Our range is +127, -128. - __ mov(tmp, (int64_t)(in_bytes(ZThreadLocalData::address_bad_mask_offset()) - - in_bytes(JavaThread::jni_environment_offset()))); + __ bind(tagged); - // Load address bad mask - __ add(tmp, jni_env, tmp); - __ ldr(tmp, Address(tmp)); + // Test for weak tag + __ tst(robj, JNIHandles::TypeTag::weak_global); + __ br(Assembler::NE, weak_tagged); - // Check address bad mask + // Resolve global handle + __ ldr(robj, Address(robj, -JNIHandles::TypeTag::global)); + __ lea(tmp, load_bad_mask_from_jni_env(jni_env)); + __ ldr(tmp, tmp); __ tst(robj, tmp); __ br(Assembler::NE, slowpath); + __ b(uncolor); + + __ bind(weak_tagged); + + // Resolve weak handle + __ ldr(robj, Address(robj, -JNIHandles::TypeTag::weak_global)); + __ lea(tmp, mark_bad_mask_from_jni_env(jni_env)); + __ ldr(tmp, tmp); + __ tst(robj, tmp); + __ br(Assembler::NE, slowpath); + + __ bind(uncolor); + + // Uncolor + __ lsr(robj, robj, ZPointerLoadShift); + + __ bind(done); BLOCK_COMMENT("} ZBarrierSetAssembler::try_resolve_jobject_in_native"); } +static uint16_t patch_barrier_relocation_value(int format) { + switch (format) { + case ZBarrierRelocationFormatLoadGoodBeforeTbX: + return (uint16_t)exact_log2(ZPointerRemapped); + + case ZBarrierRelocationFormatMarkBadBeforeMov: + return (uint16_t)ZPointerMarkBadMask; + + case ZBarrierRelocationFormatStoreGoodBeforeMov: + return (uint16_t)ZPointerStoreGoodMask; + + case ZBarrierRelocationFormatStoreBadBeforeMov: + return (uint16_t)ZPointerStoreBadMask; + + default: + ShouldNotReachHere(); + return 0; + } +} + +static void change_immediate(uint32_t& instr, uint32_t imm, uint32_t start, uint32_t end) { + uint32_t imm_mask = ((1u << start) - 1u) ^ ((1u << (end + 1)) - 1u); + instr &= ~imm_mask; + instr |= imm << start; +} + +void ZBarrierSetAssembler::patch_barrier_relocation(address addr, int format) { + const uint16_t value = patch_barrier_relocation_value(format); + uint32_t* const patch_addr = (uint32_t*)addr; + + switch (format) { + case ZBarrierRelocationFormatLoadGoodBeforeTbX: + change_immediate(*patch_addr, value, 19, 23); + break; + case ZBarrierRelocationFormatStoreGoodBeforeMov: + case ZBarrierRelocationFormatMarkBadBeforeMov: + case ZBarrierRelocationFormatStoreBadBeforeMov: + change_immediate(*patch_addr, value, 5, 20); + break; + default: + ShouldNotReachHere(); + } + + OrderAccess::fence(); + ICache::invalidate_word((address)patch_addr); +} + #ifdef COMPILER1 #undef __ #define __ ce->masm()-> -void ZBarrierSetAssembler::generate_c1_load_barrier_test(LIR_Assembler* ce, - LIR_Opr ref) const { - assert_different_registers(rscratch1, rthread, ref->as_register()); +static void z_uncolor(LIR_Assembler* ce, LIR_Opr ref) { + __ lsr(ref->as_register(), ref->as_register(), ZPointerLoadShift); +} + +static void z_color(LIR_Assembler* ce, LIR_Opr ref) { + __ relocate(barrier_Relocation::spec(), ZBarrierRelocationFormatStoreGoodBeforeMov); + __ movzw(rscratch2, barrier_Relocation::unpatched); + __ orr(ref->as_register(), rscratch2, ref->as_register(), Assembler::LSL, ZPointerLoadShift); +} + +void ZBarrierSetAssembler::generate_c1_uncolor(LIR_Assembler* ce, LIR_Opr ref) const { + z_uncolor(ce, ref); +} + +void ZBarrierSetAssembler::generate_c1_color(LIR_Assembler* ce, LIR_Opr ref) const { + z_color(ce, ref); +} - __ ldr(rscratch1, address_bad_mask_from_thread(rthread)); - __ tst(ref->as_register(), rscratch1); +void ZBarrierSetAssembler::generate_c1_load_barrier(LIR_Assembler* ce, + LIR_Opr ref, + ZLoadBarrierStubC1* stub, + bool on_non_strong) const { + + if (on_non_strong) { + // Test against MarkBad mask + assert_different_registers(rscratch1, rthread, ref->as_register()); + __ relocate(barrier_Relocation::spec(), ZBarrierRelocationFormatMarkBadBeforeMov); + __ movzw(rscratch1, barrier_Relocation::unpatched); + __ tst(ref->as_register(), rscratch1); + __ br(Assembler::NE, *stub->entry()); + z_uncolor(ce, ref); + } else { + Label good; + __ relocate(barrier_Relocation::spec(), ZBarrierRelocationFormatLoadGoodBeforeTbX); + __ tbz(ref->as_register(), barrier_Relocation::unpatched, good); + __ b(*stub->entry()); + __ bind(good); + z_uncolor(ce, ref); + } + __ bind(*stub->continuation()); } void ZBarrierSetAssembler::generate_c1_load_barrier_stub(LIR_Assembler* ce, @@ -272,6 +986,57 @@ void ZBarrierSetAssembler::generate_c1_load_barrier_stub(LIR_Assembler* ce, __ b(*stub->continuation()); } +void ZBarrierSetAssembler::generate_c1_store_barrier(LIR_Assembler* ce, + LIR_Address* addr, + LIR_Opr new_zaddress, + LIR_Opr new_zpointer, + ZStoreBarrierStubC1* stub) const { + Register rnew_zaddress = new_zaddress->as_register(); + Register rnew_zpointer = new_zpointer->as_register(); + + store_barrier_fast(ce->masm(), + ce->as_Address(addr), + rnew_zaddress, + rnew_zpointer, + rscratch2, + true, + stub->is_atomic(), + *stub->entry(), + *stub->continuation()); +} + +void ZBarrierSetAssembler::generate_c1_store_barrier_stub(LIR_Assembler* ce, + ZStoreBarrierStubC1* stub) const { + // Stub entry + __ bind(*stub->entry()); + Label slow; + Label slow_continuation; + store_barrier_medium(ce->masm(), + ce->as_Address(stub->ref_addr()->as_address_ptr()), + rscratch2, + stub->new_zpointer()->as_register(), + rscratch1, + false /* is_native */, + stub->is_atomic(), + *stub->continuation(), + slow, + slow_continuation); + + __ bind(slow); + + __ lea(stub->new_zpointer()->as_register(), ce->as_Address(stub->ref_addr()->as_address_ptr())); + + __ sub(sp, sp, 16); + // Setup arguments and call runtime stub + assert(stub->new_zpointer()->is_valid(), "invariant"); + ce->store_parameter(stub->new_zpointer()->as_register(), 0); + __ far_call(stub->runtime_stub()); + __ add(sp, sp, 16); + + // Stub exit + __ b(slow_continuation); +} + #undef __ #define __ sasm-> @@ -291,6 +1056,27 @@ void ZBarrierSetAssembler::generate_c1_load_barrier_runtime_stub(StubAssembler* __ epilogue(); } + +void ZBarrierSetAssembler::generate_c1_store_barrier_runtime_stub(StubAssembler* sasm, + bool self_healing) const { + __ prologue("zgc_store_barrier stub", false); + + __ push_call_clobbered_registers(); + + // Setup arguments + __ load_parameter(0, c_rarg0); + + if (self_healing) { + __ call_VM_leaf(ZBarrierSetRuntime::store_barrier_on_oop_field_with_healing_addr(), 1); + } else { + __ call_VM_leaf(ZBarrierSetRuntime::store_barrier_on_oop_field_without_healing_addr(), 1); + } + + __ pop_call_clobbered_registers(); + + __ epilogue(); +} + #endif // COMPILER1 #ifdef COMPILER2 @@ -319,7 +1105,7 @@ class ZSaveLiveRegisters { PRegSet _p_regs; public: - void initialize(ZLoadBarrierStubC2* stub) { + void initialize(ZBarrierStubC2* stub) { // Record registers that needs to be saved/restored RegMaskIterator rmi(stub->live()); while (rmi.has_next()) { @@ -339,10 +1125,14 @@ class ZSaveLiveRegisters { } // Remove C-ABI SOE registers, scratch regs and _ref register that will be updated - _gp_regs -= RegSet::range(r19, r30) + RegSet::of(r8, r9, stub->ref()); + if (stub->result() != noreg) { + _gp_regs -= RegSet::range(r19, r30) + RegSet::of(r8, r9, stub->result()); + } else { + _gp_regs -= RegSet::range(r19, r30) + RegSet::of(r8, r9); + } } - ZSaveLiveRegisters(MacroAssembler* masm, ZLoadBarrierStubC2* stub) : + ZSaveLiveRegisters(MacroAssembler* masm, ZBarrierStubC2* stub) : _masm(masm), _gp_regs(), _fp_regs(), @@ -429,10 +1219,13 @@ class ZSetupArguments { #define __ masm-> void ZBarrierSetAssembler::generate_c2_load_barrier_stub(MacroAssembler* masm, ZLoadBarrierStubC2* stub) const { + Assembler::InlineSkippedInstructionsCounter skipped_counter(masm); BLOCK_COMMENT("ZLoadBarrierStubC2"); // Stub entry - __ bind(*stub->entry()); + if (!Compile::current()->output()->in_scratch_emit_size()) { + __ bind(*stub->entry()); + } { ZSaveLiveRegisters save_live_registers(masm, stub); @@ -444,19 +1237,221 @@ void ZBarrierSetAssembler::generate_c2_load_barrier_stub(MacroAssembler* masm, Z __ b(*stub->continuation()); } +void ZBarrierSetAssembler::generate_c2_store_barrier_stub(MacroAssembler* masm, ZStoreBarrierStubC2* stub) const { + Assembler::InlineSkippedInstructionsCounter skipped_counter(masm); + BLOCK_COMMENT("ZStoreBarrierStubC2"); + + // Stub entry + __ bind(*stub->entry()); + + Label slow; + Label slow_continuation; + store_barrier_medium(masm, + stub->ref_addr(), + stub->new_zpointer(), + rscratch1, + rscratch2, + stub->is_native(), + stub->is_atomic(), + *stub->continuation(), + slow, + slow_continuation); + + __ bind(slow); + + { + ZSaveLiveRegisters save_live_registers(masm, stub); + __ lea(c_rarg0, stub->ref_addr()); + + if (stub->is_native()) { + __ lea(rscratch1, RuntimeAddress(ZBarrierSetRuntime::store_barrier_on_native_oop_field_without_healing_addr())); + } else if (stub->is_atomic()) { + __ lea(rscratch1, RuntimeAddress(ZBarrierSetRuntime::store_barrier_on_oop_field_with_healing_addr())); + } else { + __ lea(rscratch1, RuntimeAddress(ZBarrierSetRuntime::store_barrier_on_oop_field_without_healing_addr())); + } + __ blr(rscratch1); + } + + // Stub exit + __ b(slow_continuation); +} + +// Only handles forward branch jumps, target_offset >= branch_offset +static bool aarch64_test_and_branch_reachable(int branch_offset, int target_offset) { + assert(branch_offset >= 0, "branch to stub offsets must be positive"); + assert(target_offset >= 0, "offset in stubs section must be positive"); + assert(target_offset >= branch_offset, "forward branches only, branch_offset -> target_offset"); + + const int test_and_branch_delta_limit = 32 * K; + + const int test_and_branch_to_trampoline_delta = target_offset - branch_offset; + + return test_and_branch_to_trampoline_delta < test_and_branch_delta_limit; +} + +ZLoadBarrierStubC2Aarch64::ZLoadBarrierStubC2Aarch64(const MachNode* node, Address ref_addr, Register ref, int offset) + : ZLoadBarrierStubC2(node, ref_addr, ref), _test_and_branch_reachable_entry(), _offset(offset), _deferred_emit(false), _test_and_branch_reachable(false) { + PhaseOutput* const output = Compile::current()->output(); + if (output->in_scratch_emit_size()) { + return; + } + const int code_size = output->buffer_sizing_data()->_code; + const int offset_code = _offset; + // Assumption that the stub can always be reached from a branch immediate. (128 M Product, 2 M Debug) + // Same assumption is made in z_aarch64.ad + const int trampoline_offset = trampoline_stubs_count() * NativeInstruction::instruction_size; + _test_and_branch_reachable = aarch64_test_and_branch_reachable(offset_code, code_size + trampoline_offset); + if (_test_and_branch_reachable) { + inc_trampoline_stubs_count(); + } +} + +int ZLoadBarrierStubC2Aarch64::get_stub_size() { + PhaseOutput* const output = Compile::current()->output(); + assert(!output->in_scratch_emit_size(), "only used when emitting stubs"); + BufferBlob* const blob = output->scratch_buffer_blob(); + CodeBuffer cb(blob->content_begin(), (address)output->scratch_locs_memory() - blob->content_begin()); + MacroAssembler masm(&cb); + output->set_in_scratch_emit_size(true); + ZLoadBarrierStubC2::emit_code(masm); + output->set_in_scratch_emit_size(false); + return cb.insts_size(); +} + +ZLoadBarrierStubC2Aarch64* ZLoadBarrierStubC2Aarch64::create(const MachNode* node, Address ref_addr, Register ref, int offset) { + ZLoadBarrierStubC2Aarch64* const stub = new (Compile::current()->comp_arena()) ZLoadBarrierStubC2Aarch64(node, ref_addr, ref, offset); + register_stub(stub); + return stub; +} + +#undef __ +#define __ masm. + +void ZLoadBarrierStubC2Aarch64::emit_code(MacroAssembler& masm) { + PhaseOutput* const output = Compile::current()->output(); + const int branch_offset = _offset; + const int target_offset = __ offset(); + + // Deferred emission, emit actual stub + if (_deferred_emit) { + ZLoadBarrierStubC2::emit_code(masm); + return; + } + _deferred_emit = true; + + // No trampoline used, defer emission to after trampolines + if (!_test_and_branch_reachable) { + register_stub(this); + return; + } + + // Current assumption is that the barrier stubs are the first stubs emitted after the actual code + assert(stubs_start_offset() <= output->buffer_sizing_data()->_code, "stubs are assumed to be emitted directly after code and code_size is a hard limit on where it can start"); + + __ bind(_test_and_branch_reachable_entry); + + // Next branch's offset is unknown, but is > branch_offset + const int next_branch_offset = branch_offset + NativeInstruction::instruction_size; + // If emitting the stub directly does not interfere with emission of the next trampoline then do it to avoid a double jump. + if (aarch64_test_and_branch_reachable(next_branch_offset, target_offset + get_stub_size())) { + // The next potential trampoline will still be reachable even if we emit the whole stub + ZLoadBarrierStubC2::emit_code(masm); + } else { + // Emit trampoline and defer actual stub to the end + assert(aarch64_test_and_branch_reachable(branch_offset, target_offset), "trampoline should be reachable"); + __ b(*ZLoadBarrierStubC2::entry()); + register_stub(this); + } +} + +bool ZLoadBarrierStubC2Aarch64::is_test_and_branch_reachable() { + return _test_and_branch_reachable; +} + +Label* ZLoadBarrierStubC2Aarch64::entry() { + if (_test_and_branch_reachable) { + return &_test_and_branch_reachable_entry; + } + return ZBarrierStubC2::entry(); +} + +ZStoreBarrierStubC2Aarch64::ZStoreBarrierStubC2Aarch64(const MachNode* node, Address ref_addr, Register new_zaddress, Register new_zpointer, bool is_native, bool is_atomic) + : ZStoreBarrierStubC2(node, ref_addr, new_zaddress, new_zpointer, is_native, is_atomic), _deferred_emit(false) {} + +ZStoreBarrierStubC2Aarch64* ZStoreBarrierStubC2Aarch64::create(const MachNode* node, Address ref_addr, Register new_zaddress, Register new_zpointer, bool is_native, bool is_atomic) { + ZStoreBarrierStubC2Aarch64* const stub = new (Compile::current()->comp_arena()) ZStoreBarrierStubC2Aarch64(node, ref_addr, new_zaddress, new_zpointer, is_native, is_atomic); + register_stub(stub); + return stub; +} + +void ZStoreBarrierStubC2Aarch64::emit_code(MacroAssembler& masm) { + if (_deferred_emit) { + ZStoreBarrierStubC2::emit_code(masm); + return; + } + // Defer emission of store barriers so that trampolines are emitted first + _deferred_emit = true; + register_stub(this); +} + +#undef __ + #endif // COMPILER2 #undef __ #define __ masm-> void ZBarrierSetAssembler::check_oop(MacroAssembler* masm, Register obj, Register tmp1, Register tmp2, Label& error) { - // Check if mask is good. - // verifies that ZAddressBadMask & r0 == 0 - __ ldr(tmp2, Address(rthread, ZThreadLocalData::address_bad_mask_offset())); - __ andr(tmp1, obj, tmp2); - __ cbnz(tmp1, error); + // C1 calls verfy_oop in the middle of barriers, before they have been uncolored + // and after being colored. Therefore, we must deal with colored oops as well. + Label done; + Label check_oop; + Label check_zaddress; + int color_bits = ZPointerRemappedShift + ZPointerRemappedBits; + + uintptr_t shifted_base_start_mask = (UCONST64(1) << (ZAddressHeapBaseShift + color_bits + 1)) - 1; + uintptr_t shifted_base_end_mask = (UCONST64(1) << (ZAddressHeapBaseShift + 1)) - 1; + uintptr_t shifted_base_mask = shifted_base_start_mask ^ shifted_base_end_mask; + + uintptr_t shifted_address_end_mask = (UCONST64(1) << (color_bits + 1)) - 1; + uintptr_t shifted_address_mask = shifted_address_end_mask ^ (uintptr_t)CONST64(-1); + + __ get_nzcv(tmp2); + + // Check colored null + __ mov(tmp1, shifted_address_mask); + __ tst(tmp1, obj); + __ br(Assembler::EQ, done); + + // Check for zpointer + __ mov(tmp1, shifted_base_mask); + __ tst(tmp1, obj); + __ br(Assembler::EQ, check_oop); + + // Uncolor presumed zpointer + __ lsr(obj, obj, ZPointerLoadShift); + + __ b(check_zaddress); + + __ bind(check_oop); + + // make sure klass is 'reasonable', which is not zero. + __ load_klass(tmp1, obj); // get klass + __ tst(tmp1, tmp1); + __ br(Assembler::EQ, error); // if klass is null it is broken + + __ bind(check_zaddress); + // Check if the oop is in the right area of memory + __ mov(tmp1, (intptr_t) Universe::verify_oop_mask()); + __ andr(tmp1, tmp1, obj); + __ mov(obj, (intptr_t) Universe::verify_oop_bits()); + __ cmp(tmp1, obj); + __ br(Assembler::NE, error); + + __ bind(done); - BarrierSetAssembler::check_oop(masm, obj, tmp1, tmp2, error); + __ set_nzcv(tmp2); } #undef __ diff --git a/src/hotspot/cpu/aarch64/gc/z/zBarrierSetAssembler_aarch64.hpp b/src/hotspot/cpu/aarch64/gc/z/zBarrierSetAssembler_aarch64.hpp index c852dac3a4df5..00714e5c0c04b 100644 --- a/src/hotspot/cpu/aarch64/gc/z/zBarrierSetAssembler_aarch64.hpp +++ b/src/hotspot/cpu/aarch64/gc/z/zBarrierSetAssembler_aarch64.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,21 +27,32 @@ #include "code/vmreg.hpp" #include "oops/accessDecorators.hpp" #ifdef COMPILER2 +#include "gc/z/c2/zBarrierSetC2.hpp" #include "opto/optoreg.hpp" #endif // COMPILER2 #ifdef COMPILER1 +class LIR_Address; class LIR_Assembler; class LIR_Opr; class StubAssembler; class ZLoadBarrierStubC1; +class ZStoreBarrierStubC1; #endif // COMPILER1 #ifdef COMPILER2 +class MachNode; class Node; -class ZLoadBarrierStubC2; #endif // COMPILER2 +// ZBarrierRelocationFormatLoadGoodBeforeTbX is used for both tbnz and tbz +// They are patched in the same way, their immediate value has the same +// structure +const int ZBarrierRelocationFormatLoadGoodBeforeTbX = 0; +const int ZBarrierRelocationFormatMarkBadBeforeMov = 1; +const int ZBarrierRelocationFormatStoreGoodBeforeMov = 2; +const int ZBarrierRelocationFormatStoreBadBeforeMov = 3; + class ZBarrierSetAssembler : public ZBarrierSetAssemblerBase { public: virtual void load_at(MacroAssembler* masm, @@ -52,7 +63,27 @@ class ZBarrierSetAssembler : public ZBarrierSetAssemblerBase { Register tmp1, Register tmp2); -#ifdef ASSERT + void store_barrier_fast(MacroAssembler* masm, + Address ref_addr, + Register rnew_zaddress, + Register rnew_zpointer, + Register rtmp, + bool in_nmethod, + bool is_atomic, + Label& medium_path, + Label& medium_path_continuation) const; + + void store_barrier_medium(MacroAssembler* masm, + Address ref_addr, + Register rtmp1, + Register rtmp2, + Register rtmp3, + bool is_native, + bool is_atomic, + Label& medium_path_continuation, + Label& slow_path, + Label& slow_path_continuation) const; + virtual void store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, @@ -61,7 +92,6 @@ class ZBarrierSetAssembler : public ZBarrierSetAssemblerBase { Register tmp1, Register tmp2, Register tmp3); -#endif // ASSERT virtual void arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, @@ -71,23 +101,89 @@ class ZBarrierSetAssembler : public ZBarrierSetAssemblerBase { Register count, RegSet saved_regs); + virtual void copy_load_at(MacroAssembler* masm, + DecoratorSet decorators, + BasicType type, + size_t bytes, + Register dst1, + Register dst2, + Address src, + Register tmp); + + virtual void copy_store_at(MacroAssembler* masm, + DecoratorSet decorators, + BasicType type, + size_t bytes, + Address dst, + Register src1, + Register src2, + Register tmp1, + Register tmp2, + Register tmp3); + + virtual void copy_load_at(MacroAssembler* masm, + DecoratorSet decorators, + BasicType type, + size_t bytes, + FloatRegister dst1, + FloatRegister dst2, + Address src, + Register tmp1, + Register tmp2, + FloatRegister vec_tmp); + + virtual void copy_store_at(MacroAssembler* masm, + DecoratorSet decorators, + BasicType type, + size_t bytes, + Address dst, + FloatRegister src1, + FloatRegister src2, + Register tmp1, + Register tmp2, + Register tmp3, + FloatRegister vec_tmp1, + FloatRegister vec_tmp2, + FloatRegister vec_tmp3); + virtual void try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env, Register robj, Register tmp, Label& slowpath); - virtual NMethodPatchingType nmethod_patching_type() { return NMethodPatchingType::conc_data_patch; } + virtual NMethodPatchingType nmethod_patching_type() { return NMethodPatchingType::conc_instruction_and_data_patch; } + + void patch_barrier_relocation(address addr, int format); + + void patch_barriers() {} #ifdef COMPILER1 - void generate_c1_load_barrier_test(LIR_Assembler* ce, - LIR_Opr ref) const; + void generate_c1_color(LIR_Assembler* ce, LIR_Opr ref) const; + void generate_c1_uncolor(LIR_Assembler* ce, LIR_Opr ref) const; + + void generate_c1_load_barrier(LIR_Assembler* ce, + LIR_Opr ref, + ZLoadBarrierStubC1* stub, + bool on_non_strong) const; void generate_c1_load_barrier_stub(LIR_Assembler* ce, ZLoadBarrierStubC1* stub) const; void generate_c1_load_barrier_runtime_stub(StubAssembler* sasm, DecoratorSet decorators) const; + + void generate_c1_store_barrier(LIR_Assembler* ce, + LIR_Address* addr, + LIR_Opr new_zaddress, + LIR_Opr new_zpointer, + ZStoreBarrierStubC1* stub) const; + + void generate_c1_store_barrier_stub(LIR_Assembler* ce, + ZStoreBarrierStubC1* stub) const; + + void generate_c1_store_barrier_runtime_stub(StubAssembler* sasm, + bool self_healing) const; #endif // COMPILER1 #ifdef COMPILER2 @@ -96,9 +192,103 @@ class ZBarrierSetAssembler : public ZBarrierSetAssemblerBase { void generate_c2_load_barrier_stub(MacroAssembler* masm, ZLoadBarrierStubC2* stub) const; + void generate_c2_store_barrier_stub(MacroAssembler* masm, + ZStoreBarrierStubC2* stub) const; #endif // COMPILER2 void check_oop(MacroAssembler* masm, Register obj, Register tmp1, Register tmp2, Label& error); }; +#ifdef COMPILER2 + +// Load barriers on aarch64 are implemented with a test-and-branch immediate instruction. +// This immediate has a max delta of 32K. Because of this the branch is implemented with +// a small jump, as follows: +// __ tbz(ref, barrier_Relocation::unpatched, good); +// __ b(*stub->entry()); +// __ bind(good); +// +// If we can guarantee that the *stub->entry() label is within 32K we can replace the above +// code with: +// __ tbnz(ref, barrier_Relocation::unpatched, *stub->entry()); +// +// From the branch shortening part of PhaseOutput we get a pessimistic code size that the code +// will not grow beyond. +// +// The stubs objects are created and registered when the load barriers are emitted. The decision +// between emitting the long branch or the test and branch is done at this point and uses the +// pessimistic code size from branch shortening. +// +// After the code has been emitted the barrier set will emit all the stubs. When the stubs are +// emitted we know the real code size. Because of this the trampoline jump can be skipped in +// favour of emitting the stub directly if it does not interfere with the next trampoline stub. +// (With respect to test and branch distance) +// +// The algorithm for emitting the load barrier branches and stubs now have three versions +// depending on the distance between the barrier and the stub. +// Version 1: Not Reachable with a test-and-branch immediate +// Version 2: Reachable with a test-and-branch immediate via trampoline +// Version 3: Reachable with a test-and-branch immediate without trampoline +// +// +--------------------- Code ----------------------+ +// | *** | +// | b(stub1) | (Version 1) +// | *** | +// | tbnz(ref, barrier_Relocation::unpatched, tramp) | (Version 2) +// | *** | +// | tbnz(ref, barrier_Relocation::unpatched, stub3) | (Version 3) +// | *** | +// +--------------------- Stub ----------------------+ +// | tramp: b(stub2) | (Trampoline slot) +// | stub3: | +// | * Stub Code* | +// | stub1: | +// | * Stub Code* | +// | stub2: | +// | * Stub Code* | +// +-------------------------------------------------+ +// +// Version 1: Is emitted if the pessimistic distance between the branch instruction and the current +// trampoline slot cannot fit in a test and branch immediate. +// +// Version 2: Is emitted if the distance between the branch instruction and the current trampoline +// slot can fit in a test and branch immediate. But emitting the stub directly would +// interfere with the next trampoline. +// +// Version 3: Same as version two but emitting the stub directly (skipping the trampoline) does not +// interfere with the next trampoline. +// +class ZLoadBarrierStubC2Aarch64 : public ZLoadBarrierStubC2 { +private: + Label _test_and_branch_reachable_entry; + const int _offset; + bool _deferred_emit; + bool _test_and_branch_reachable; + + ZLoadBarrierStubC2Aarch64(const MachNode* node, Address ref_addr, Register ref, int offset); + + int get_stub_size(); +public: + static ZLoadBarrierStubC2Aarch64* create(const MachNode* node, Address ref_addr, Register ref, int offset); + + virtual void emit_code(MacroAssembler& masm); + bool is_test_and_branch_reachable(); + Label* entry(); +}; + + +class ZStoreBarrierStubC2Aarch64 : public ZStoreBarrierStubC2 { +private: + bool _deferred_emit; + + ZStoreBarrierStubC2Aarch64(const MachNode* node, Address ref_addr, Register new_zaddress, Register new_zpointer, bool is_native, bool is_atomic); + +public: + static ZStoreBarrierStubC2Aarch64* create(const MachNode* node, Address ref_addr, Register new_zaddress, Register new_zpointer, bool is_native, bool is_atomic); + + virtual void emit_code(MacroAssembler& masm); +}; + +#endif // COMPILER2 + #endif // CPU_AARCH64_GC_Z_ZBARRIERSETASSEMBLER_AARCH64_HPP diff --git a/src/hotspot/cpu/aarch64/gc/z/zGlobals_aarch64.hpp b/src/hotspot/cpu/aarch64/gc/z/zGlobals_aarch64.hpp index 6ced6afe02a19..b5f2f8525c603 100644 --- a/src/hotspot/cpu/aarch64/gc/z/zGlobals_aarch64.hpp +++ b/src/hotspot/cpu/aarch64/gc/z/zGlobals_aarch64.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,10 +24,8 @@ #ifndef CPU_AARCH64_GC_Z_ZGLOBALS_AARCH64_HPP #define CPU_AARCH64_GC_Z_ZGLOBALS_AARCH64_HPP -const size_t ZPlatformHeapViews = 3; -const size_t ZPlatformCacheLineSize = 64; +#include "utilities/globalDefinitions.hpp" -size_t ZPlatformAddressOffsetBits(); -size_t ZPlatformAddressMetadataShift(); +const size_t ZPlatformCacheLineSize = 64; #endif // CPU_AARCH64_GC_Z_ZGLOBALS_AARCH64_HPP diff --git a/src/hotspot/cpu/aarch64/gc/z/z_aarch64.ad b/src/hotspot/cpu/aarch64/gc/z/z_aarch64.ad index bd1c2cc9f938a..8c698635ad0f5 100644 --- a/src/hotspot/cpu/aarch64/gc/z/z_aarch64.ad +++ b/src/hotspot/cpu/aarch64/gc/z/z_aarch64.ad @@ -1,5 +1,5 @@ // -// Copyright (c) 2019, 2021, Oracle and/or its affiliates. All rights reserved. +// Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved. // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. // // This code is free software; you can redistribute it and/or modify it @@ -31,21 +31,70 @@ source_hpp %{ source %{ -static void z_load_barrier(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref, Register tmp, uint8_t barrier_data) { - if (barrier_data == ZLoadBarrierElided) { +#include "gc/z/zBarrierSetAssembler.hpp" + +static void z_color(MacroAssembler& _masm, const MachNode* node, Register dst, Register src) { + assert_different_registers(src, dst); + __ relocate(barrier_Relocation::spec(), ZBarrierRelocationFormatStoreGoodBeforeMov); + __ movzw(dst, barrier_Relocation::unpatched); + __ orr(dst, dst, src, Assembler::LSL, ZPointerLoadShift); +} + +static void z_uncolor(MacroAssembler& _masm, const MachNode* node, Register ref) { + __ lsr(ref, ref, ZPointerLoadShift); +} + +static void z_keep_alive_load_barrier(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref, Register tmp) { + __ relocate(barrier_Relocation::spec(), ZBarrierRelocationFormatMarkBadBeforeMov); + __ movzw(tmp, barrier_Relocation::unpatched); + __ tst(ref, tmp); + ZLoadBarrierStubC2* const stub = ZLoadBarrierStubC2::create(node, ref_addr, ref); + __ br(Assembler::NE, *stub->entry()); + z_uncolor(_masm, node, ref); + __ bind(*stub->continuation()); +} + +static void z_load_barrier(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref, Register tmp) { + Assembler::InlineSkippedInstructionsCounter skipped_counter(&_masm); + const bool on_non_strong = + ((node->barrier_data() & ZBarrierWeak) != 0) || + ((node->barrier_data() & ZBarrierPhantom) != 0); + + if (on_non_strong) { + z_keep_alive_load_barrier(_masm, node, ref_addr, ref, tmp); return; } - ZLoadBarrierStubC2* const stub = ZLoadBarrierStubC2::create(node, ref_addr, ref, tmp, barrier_data); - __ ldr(tmp, Address(rthread, ZThreadLocalData::address_bad_mask_offset())); - __ andr(tmp, tmp, ref); - __ cbnz(tmp, *stub->entry()); + + if (node->barrier_data() == ZBarrierElided) { + z_uncolor(_masm, node, ref); + return; + } + + ZLoadBarrierStubC2Aarch64* const stub = ZLoadBarrierStubC2Aarch64::create(node, ref_addr, ref, __ offset()); + if (stub->is_test_and_branch_reachable()) { + __ relocate(barrier_Relocation::spec(), ZBarrierRelocationFormatLoadGoodBeforeTbX); + __ tbnz(ref, barrier_Relocation::unpatched, *stub->entry()); + } else { + Label good; + __ relocate(barrier_Relocation::spec(), ZBarrierRelocationFormatLoadGoodBeforeTbX); + __ tbz(ref, barrier_Relocation::unpatched, good); + __ b(*stub->entry()); + __ bind(good); + } + z_uncolor(_masm, node, ref); __ bind(*stub->continuation()); } -static void z_load_barrier_slow_path(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref, Register tmp) { - ZLoadBarrierStubC2* const stub = ZLoadBarrierStubC2::create(node, ref_addr, ref, tmp, ZLoadBarrierStrong); - __ b(*stub->entry()); - __ bind(*stub->continuation()); +static void z_store_barrier(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register rnew_zaddress, Register rnew_zpointer, Register tmp, bool is_atomic) { + Assembler::InlineSkippedInstructionsCounter skipped_counter(&_masm); + if (node->barrier_data() == ZBarrierElided) { + z_color(_masm, node, rnew_zpointer, rnew_zaddress); + } else { + bool is_native = (node->barrier_data() & ZBarrierNative) != 0; + ZStoreBarrierStubC2Aarch64* const stub = ZStoreBarrierStubC2Aarch64::create(node, ref_addr, rnew_zaddress, rnew_zpointer, is_native, is_atomic); + ZBarrierSetAssembler* bs_asm = ZBarrierSet::assembler(); + bs_asm->store_barrier_fast(&_masm, ref_addr, rnew_zaddress, rnew_zpointer, tmp, true /* in_nmethod */, is_atomic, *stub->entry(), *stub->continuation()); + } } %} @@ -54,7 +103,7 @@ static void z_load_barrier_slow_path(MacroAssembler& _masm, const MachNode* node instruct zLoadP(iRegPNoSp dst, memory8 mem, rFlagsReg cr) %{ match(Set dst (LoadP mem)); - predicate(UseZGC && !needs_acquiring_load(n) && (n->as_Load()->barrier_data() != 0)); + predicate(UseZGC && ZGenerational && !needs_acquiring_load(n) && n->as_Load()->barrier_data() != 0); effect(TEMP dst, KILL cr); ins_cost(4 * INSN_COST); @@ -64,7 +113,7 @@ instruct zLoadP(iRegPNoSp dst, memory8 mem, rFlagsReg cr) ins_encode %{ const Address ref_addr = mem2address($mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp); __ ldr($dst$$Register, ref_addr); - z_load_barrier(_masm, this, ref_addr, $dst$$Register, rscratch2 /* tmp */, barrier_data()); + z_load_barrier(_masm, this, ref_addr, $dst$$Register, rscratch1); %} ins_pipe(iload_reg_mem); @@ -74,7 +123,7 @@ instruct zLoadP(iRegPNoSp dst, memory8 mem, rFlagsReg cr) instruct zLoadPVolatile(iRegPNoSp dst, indirect mem /* sync_memory */, rFlagsReg cr) %{ match(Set dst (LoadP mem)); - predicate(UseZGC && needs_acquiring_load(n) && n->as_Load()->barrier_data() != 0); + predicate(UseZGC && ZGenerational && needs_acquiring_load(n) && n->as_Load()->barrier_data() != 0); effect(TEMP dst, KILL cr); ins_cost(VOLATILE_REF_COST); @@ -82,18 +131,53 @@ instruct zLoadPVolatile(iRegPNoSp dst, indirect mem /* sync_memory */, rFlagsReg format %{ "ldar $dst, $mem\t" %} ins_encode %{ + const Address ref_addr = Address($mem$$Register); __ ldar($dst$$Register, $mem$$Register); - z_load_barrier(_masm, this, Address($mem$$Register), $dst$$Register, rscratch2 /* tmp */, barrier_data()); + z_load_barrier(_masm, this, ref_addr, $dst$$Register, rscratch1); + %} + + ins_pipe(pipe_serial); +%} + +// Store Pointer +instruct zStoreP(memory mem, iRegP src, iRegPNoSp tmp, rFlagsReg cr) +%{ + predicate(UseZGC && ZGenerational && !needs_releasing_store(n) && n->as_Store()->barrier_data() != 0); + match(Set mem (StoreP mem src)); + effect(TEMP tmp, KILL cr); + + ins_cost(125); // XXX + format %{ "movq $mem, $src\t# ptr" %} + ins_encode %{ + const Address ref_addr = mem2address($mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp); + z_store_barrier(_masm, this, ref_addr, $src$$Register, $tmp$$Register, rscratch2, false /* is_atomic */); + __ str($tmp$$Register, ref_addr); %} + ins_pipe(pipe_serial); +%} +// Store Pointer Volatile +instruct zStorePVolatile(indirect mem, iRegP src, iRegPNoSp tmp, rFlagsReg cr) +%{ + predicate(UseZGC && ZGenerational && needs_releasing_store(n) && n->as_Store()->barrier_data() != 0); + match(Set mem (StoreP mem src)); + effect(TEMP tmp, KILL cr); + + ins_cost(125); // XXX + format %{ "movq $mem, $src\t# ptr" %} + ins_encode %{ + const Address ref_addr = Address($mem$$Register); + z_store_barrier(_masm, this, ref_addr, $src$$Register, $tmp$$Register, rscratch2, false /* is_atomic */); + __ stlr($tmp$$Register, $mem$$Register); + %} ins_pipe(pipe_serial); %} -instruct zCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{ +instruct zCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp oldval_tmp, iRegPNoSp newval_tmp, rFlagsReg cr) %{ match(Set res (CompareAndSwapP mem (Binary oldval newval))); match(Set res (WeakCompareAndSwapP mem (Binary oldval newval))); - predicate(UseZGC && !needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong); - effect(KILL cr, TEMP_DEF res); + predicate(UseZGC && ZGenerational && !needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0); + effect(TEMP oldval_tmp, TEMP newval_tmp, TEMP res, KILL cr); ins_cost(2 * VOLATILE_REF_COST); @@ -102,108 +186,83 @@ instruct zCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newva ins_encode %{ guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding"); - __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword, - false /* acquire */, true /* release */, false /* weak */, rscratch2); + Address ref_addr($mem$$Register); + z_store_barrier(_masm, this, ref_addr, $newval$$Register, $newval_tmp$$Register, rscratch2, true /* is_atomic */); + z_color(_masm, this, $oldval_tmp$$Register, $oldval$$Register); + __ cmpxchg($mem$$Register, $oldval_tmp$$Register, $newval_tmp$$Register, Assembler::xword, + false /* acquire */, true /* release */, false /* weak */, noreg); __ cset($res$$Register, Assembler::EQ); - if (barrier_data() != ZLoadBarrierElided) { - Label good; - __ ldr(rscratch1, Address(rthread, ZThreadLocalData::address_bad_mask_offset())); - __ andr(rscratch1, rscratch1, rscratch2); - __ cbz(rscratch1, good); - z_load_barrier_slow_path(_masm, this, Address($mem$$Register), rscratch2 /* ref */, rscratch1 /* tmp */); - __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword, - false /* acquire */, true /* release */, false /* weak */, rscratch2); - __ cset($res$$Register, Assembler::EQ); - __ bind(good); - } %} ins_pipe(pipe_slow); %} -instruct zCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{ +instruct zCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp oldval_tmp, iRegPNoSp newval_tmp, rFlagsReg cr) %{ match(Set res (CompareAndSwapP mem (Binary oldval newval))); match(Set res (WeakCompareAndSwapP mem (Binary oldval newval))); - predicate(UseZGC && needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong)); - effect(KILL cr, TEMP_DEF res); + predicate(UseZGC && ZGenerational && needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0); + effect(TEMP oldval_tmp, TEMP newval_tmp, TEMP res, KILL cr); ins_cost(2 * VOLATILE_REF_COST); - format %{ "cmpxchg $mem, $oldval, $newval\n\t" - "cset $res, EQ" %} + format %{ "cmpxchg $mem, $oldval, $newval\n\t" + "cset $res, EQ" %} ins_encode %{ guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding"); - __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword, - true /* acquire */, true /* release */, false /* weak */, rscratch2); + Address ref_addr($mem$$Register); + z_store_barrier(_masm, this, ref_addr, $newval$$Register, $newval_tmp$$Register, rscratch2, true /* is_atomic */); + z_color(_masm, this, $oldval_tmp$$Register, $oldval$$Register); + __ cmpxchg($mem$$Register, $oldval_tmp$$Register, $newval_tmp$$Register, Assembler::xword, + true /* acquire */, true /* release */, false /* weak */, noreg); __ cset($res$$Register, Assembler::EQ); - if (barrier_data() != ZLoadBarrierElided) { - Label good; - __ ldr(rscratch1, Address(rthread, ZThreadLocalData::address_bad_mask_offset())); - __ andr(rscratch1, rscratch1, rscratch2); - __ cbz(rscratch1, good); - z_load_barrier_slow_path(_masm, this, Address($mem$$Register), rscratch2 /* ref */, rscratch1 /* tmp */ ); - __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword, - true /* acquire */, true /* release */, false /* weak */, rscratch2); - __ cset($res$$Register, Assembler::EQ); - __ bind(good); - } %} ins_pipe(pipe_slow); %} -instruct zCompareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{ + +instruct zCompareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp oldval_tmp, iRegPNoSp newval_tmp, rFlagsReg cr) %{ match(Set res (CompareAndExchangeP mem (Binary oldval newval))); - predicate(UseZGC && !needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong); - effect(TEMP_DEF res, KILL cr); + predicate(UseZGC && ZGenerational && !needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0); + effect(TEMP oldval_tmp, TEMP newval_tmp, TEMP res, KILL cr); ins_cost(2 * VOLATILE_REF_COST); - format %{ "cmpxchg $res = $mem, $oldval, $newval" %} + format %{ "cmpxchg $mem, $oldval, $newval\n\t" + "cset $res, EQ" %} ins_encode %{ guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding"); - __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword, + Address ref_addr($mem$$Register); + z_store_barrier(_masm, this, ref_addr, $newval$$Register, $newval_tmp$$Register, rscratch2, true /* is_atomic */); + z_color(_masm, this, $oldval_tmp$$Register, $oldval$$Register); + __ cmpxchg($mem$$Register, $oldval_tmp$$Register, $newval_tmp$$Register, Assembler::xword, false /* acquire */, true /* release */, false /* weak */, $res$$Register); - if (barrier_data() != ZLoadBarrierElided) { - Label good; - __ ldr(rscratch1, Address(rthread, ZThreadLocalData::address_bad_mask_offset())); - __ andr(rscratch1, rscratch1, $res$$Register); - __ cbz(rscratch1, good); - z_load_barrier_slow_path(_masm, this, Address($mem$$Register), $res$$Register /* ref */, rscratch1 /* tmp */); - __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword, - false /* acquire */, true /* release */, false /* weak */, $res$$Register); - __ bind(good); - } + z_uncolor(_masm, this, $res$$Register); %} ins_pipe(pipe_slow); %} -instruct zCompareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{ +instruct zCompareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp oldval_tmp, iRegPNoSp newval_tmp, rFlagsReg cr) %{ match(Set res (CompareAndExchangeP mem (Binary oldval newval))); - predicate(UseZGC && needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong); - effect(TEMP_DEF res, KILL cr); + predicate(UseZGC && ZGenerational && needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0); + effect(TEMP oldval_tmp, TEMP newval_tmp, TEMP res, KILL cr); ins_cost(2 * VOLATILE_REF_COST); - format %{ "cmpxchg $res = $mem, $oldval, $newval" %} + format %{ "cmpxchg $mem, $oldval, $newval\n\t" + "cset $res, EQ" %} ins_encode %{ guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding"); - __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword, + Address ref_addr($mem$$Register); + z_store_barrier(_masm, this, ref_addr, $newval$$Register, $newval_tmp$$Register, rscratch2, true /* is_atomic */); + z_color(_masm, this, $oldval_tmp$$Register, $oldval$$Register); + __ cmpxchg($mem$$Register, $oldval_tmp$$Register, $newval_tmp$$Register, Assembler::xword, true /* acquire */, true /* release */, false /* weak */, $res$$Register); - if (barrier_data() != ZLoadBarrierElided) { - Label good; - __ ldr(rscratch1, Address(rthread, ZThreadLocalData::address_bad_mask_offset())); - __ andr(rscratch1, rscratch1, $res$$Register); - __ cbz(rscratch1, good); - z_load_barrier_slow_path(_masm, this, Address($mem$$Register), $res$$Register /* ref */, rscratch1 /* tmp */); - __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword, - true /* acquire */, true /* release */, false /* weak */, $res$$Register); - __ bind(good); - } + z_uncolor(_masm, this, $res$$Register); %} ins_pipe(pipe_slow); @@ -211,16 +270,17 @@ instruct zCompareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iReg instruct zGetAndSetP(indirect mem, iRegP newv, iRegPNoSp prev, rFlagsReg cr) %{ match(Set prev (GetAndSetP mem newv)); - predicate(UseZGC && !needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0); - effect(TEMP_DEF prev, KILL cr); + predicate(UseZGC && ZGenerational && !needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0); + effect(TEMP prev, KILL cr); ins_cost(2 * VOLATILE_REF_COST); format %{ "atomic_xchg $prev, $newv, [$mem]" %} ins_encode %{ - __ atomic_xchg($prev$$Register, $newv$$Register, $mem$$Register); - z_load_barrier(_masm, this, Address(noreg, 0), $prev$$Register, rscratch2 /* tmp */, barrier_data()); + z_store_barrier(_masm, this, Address($mem$$Register), $newv$$Register, $prev$$Register, rscratch2, true /* is_atomic */); + __ atomic_xchg($prev$$Register, $prev$$Register, $mem$$Register); + z_uncolor(_masm, this, $prev$$Register); %} ins_pipe(pipe_serial); @@ -228,16 +288,18 @@ instruct zGetAndSetP(indirect mem, iRegP newv, iRegPNoSp prev, rFlagsReg cr) %{ instruct zGetAndSetPAcq(indirect mem, iRegP newv, iRegPNoSp prev, rFlagsReg cr) %{ match(Set prev (GetAndSetP mem newv)); - predicate(UseZGC && needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() != 0)); - effect(TEMP_DEF prev, KILL cr); + predicate(UseZGC && ZGenerational && needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0); + effect(TEMP prev, KILL cr); - ins_cost(VOLATILE_REF_COST); + ins_cost(2 * VOLATILE_REF_COST); - format %{ "atomic_xchg_acq $prev, $newv, [$mem]" %} + format %{ "atomic_xchg $prev, $newv, [$mem]" %} ins_encode %{ - __ atomic_xchgal($prev$$Register, $newv$$Register, $mem$$Register); - z_load_barrier(_masm, this, Address(noreg, 0), $prev$$Register, rscratch2 /* tmp */, barrier_data()); + z_store_barrier(_masm, this, Address($mem$$Register), $newv$$Register, $prev$$Register, rscratch2, true /* is_atomic */); + __ atomic_xchgal($prev$$Register, $prev$$Register, $mem$$Register); + z_uncolor(_masm, this, $prev$$Register); %} + ins_pipe(pipe_serial); %} diff --git a/src/hotspot/cpu/aarch64/globalDefinitions_aarch64.hpp b/src/hotspot/cpu/aarch64/globalDefinitions_aarch64.hpp index 622145ad155d6..94f26b6d062c6 100644 --- a/src/hotspot/cpu/aarch64/globalDefinitions_aarch64.hpp +++ b/src/hotspot/cpu/aarch64/globalDefinitions_aarch64.hpp @@ -58,8 +58,6 @@ const bool CCallingConventionRequiresIntsAsLongs = false; #define SUPPORT_RESERVED_STACK_AREA -#define COMPRESSED_CLASS_POINTERS_DEPENDS_ON_COMPRESSED_OOPS false - #if defined(__APPLE__) || defined(_WIN64) #define R18_RESERVED #define R18_RESERVED_ONLY(code) code diff --git a/src/hotspot/cpu/aarch64/jvmciCodeInstaller_aarch64.cpp b/src/hotspot/cpu/aarch64/jvmciCodeInstaller_aarch64.cpp index 6ffb9aa724394..16741299f8a6c 100644 --- a/src/hotspot/cpu/aarch64/jvmciCodeInstaller_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/jvmciCodeInstaller_aarch64.cpp @@ -122,27 +122,28 @@ void CodeInstaller::pd_relocate_ForeignCall(NativeInstruction* inst, jlong forei } void CodeInstaller::pd_relocate_JavaMethod(CodeBuffer &cbuf, methodHandle& method, jint pc_offset, JVMCI_TRAPS) { + NativeCall* call = NULL; switch (_next_call_type) { case INLINE_INVOKE: - break; + return; case INVOKEVIRTUAL: case INVOKEINTERFACE: { assert(!method->is_static(), "cannot call static method with invokeinterface"); - NativeCall* call = nativeCall_at(_instructions->start() + pc_offset); + call = nativeCall_at(_instructions->start() + pc_offset); _instructions->relocate(call->instruction_address(), virtual_call_Relocation::spec(_invoke_mark_pc)); call->trampoline_jump(cbuf, SharedRuntime::get_resolve_virtual_call_stub(), JVMCI_CHECK); break; } case INVOKESTATIC: { assert(method->is_static(), "cannot call non-static method with invokestatic"); - NativeCall* call = nativeCall_at(_instructions->start() + pc_offset); + call = nativeCall_at(_instructions->start() + pc_offset); _instructions->relocate(call->instruction_address(), relocInfo::static_call_type); call->trampoline_jump(cbuf, SharedRuntime::get_resolve_static_call_stub(), JVMCI_CHECK); break; } case INVOKESPECIAL: { assert(!method->is_static(), "cannot call static method with invokespecial"); - NativeCall* call = nativeCall_at(_instructions->start() + pc_offset); + call = nativeCall_at(_instructions->start() + pc_offset); _instructions->relocate(call->instruction_address(), relocInfo::opt_virtual_call_type); call->trampoline_jump(cbuf, SharedRuntime::get_resolve_opt_virtual_call_stub(), JVMCI_CHECK); break; @@ -151,6 +152,15 @@ void CodeInstaller::pd_relocate_JavaMethod(CodeBuffer &cbuf, methodHandle& metho JVMCI_ERROR("invalid _next_call_type value"); break; } + if (Continuations::enabled()) { + // Check for proper post_call_nop + NativePostCallNop* nop = nativePostCallNop_at(call->next_instruction_address()); + if (nop == NULL) { + JVMCI_ERROR("missing post call nop at offset %d", pc_offset); + } else { + _instructions->relocate(call->next_instruction_address(), relocInfo::post_call_nop_type); + } + } } void CodeInstaller::pd_relocate_poll(address pc, jint mark, JVMCI_TRAPS) { diff --git a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp index ef2fe7cef8c97..6374562903804 100644 --- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp @@ -5008,6 +5008,8 @@ address MacroAssembler::count_positives(Register ary1, Register len, Register re return pc(); } +// Clobbers: rscratch1, rscratch2, rflags +// May also clobber v0-v7 when (!UseSimpleArrayEquals && UseSIMDForArrayEquals) address MacroAssembler::arrays_equals(Register a1, Register a2, Register tmp3, Register tmp4, Register tmp5, Register result, Register cnt1, int elem_size) { @@ -5557,10 +5559,12 @@ void MacroAssembler::fill_words(Register base, Register cnt, Register value) // Using 'umaxv' in the ASCII-case comes with a small penalty but does // avoid additional bloat. // +// Clobbers: src, dst, res, rscratch1, rscratch2, rflags void MacroAssembler::encode_iso_array(Register src, Register dst, Register len, Register res, bool ascii, FloatRegister vtmp0, FloatRegister vtmp1, - FloatRegister vtmp2, FloatRegister vtmp3) + FloatRegister vtmp2, FloatRegister vtmp3, + FloatRegister vtmp4, FloatRegister vtmp5) { Register cnt = res; Register max = rscratch1; @@ -5579,8 +5583,8 @@ void MacroAssembler::encode_iso_array(Register src, Register dst, br(LT, DONE_32); ld1(vtmp0, vtmp1, vtmp2, vtmp3, T8H, Address(post(src, 64))); // Extract lower bytes. - FloatRegister vlo0 = v4; - FloatRegister vlo1 = v5; + FloatRegister vlo0 = vtmp4; + FloatRegister vlo1 = vtmp5; uzp1(vlo0, T16B, vtmp0, vtmp1); uzp1(vlo1, T16B, vtmp2, vtmp3); // Merge bits... @@ -5653,6 +5657,7 @@ void MacroAssembler::encode_iso_array(Register src, Register dst, } // Inflate byte[] array to char[]. +// Clobbers: src, dst, len, rflags, rscratch1, v0-v6 address MacroAssembler::byte_array_inflate(Register src, Register dst, Register len, FloatRegister vtmp1, FloatRegister vtmp2, FloatRegister vtmp3, Register tmp4) { @@ -5761,8 +5766,9 @@ address MacroAssembler::byte_array_inflate(Register src, Register dst, Register void MacroAssembler::char_array_compress(Register src, Register dst, Register len, Register res, FloatRegister tmp0, FloatRegister tmp1, - FloatRegister tmp2, FloatRegister tmp3) { - encode_iso_array(src, dst, len, res, false, tmp0, tmp1, tmp2, tmp3); + FloatRegister tmp2, FloatRegister tmp3, + FloatRegister tmp4, FloatRegister tmp5) { + encode_iso_array(src, dst, len, res, false, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5); // Adjust result: res == len ? len : 0 cmp(len, res); csel(res, res, zr, EQ); diff --git a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp index 7e58720727cdd..6b45be8ce43ad 100644 --- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp +++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp @@ -1393,12 +1393,14 @@ class MacroAssembler: public Assembler { void char_array_compress(Register src, Register dst, Register len, Register res, FloatRegister vtmp0, FloatRegister vtmp1, - FloatRegister vtmp2, FloatRegister vtmp3); + FloatRegister vtmp2, FloatRegister vtmp3, + FloatRegister vtmp4, FloatRegister vtmp5); void encode_iso_array(Register src, Register dst, Register len, Register res, bool ascii, FloatRegister vtmp0, FloatRegister vtmp1, - FloatRegister vtmp2, FloatRegister vtmp3); + FloatRegister vtmp2, FloatRegister vtmp3, + FloatRegister vtmp4, FloatRegister vtmp5); void fast_log(FloatRegister vtmp0, FloatRegister vtmp1, FloatRegister vtmp2, FloatRegister vtmp3, FloatRegister vtmp4, FloatRegister vtmp5, diff --git a/src/hotspot/cpu/aarch64/relocInfo_aarch64.hpp b/src/hotspot/cpu/aarch64/relocInfo_aarch64.hpp index 904fe08142744..47cfc86d17f7d 100644 --- a/src/hotspot/cpu/aarch64/relocInfo_aarch64.hpp +++ b/src/hotspot/cpu/aarch64/relocInfo_aarch64.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, Red Hat Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -33,7 +33,8 @@ // the two lowest offset bits can always be discarded. offset_unit = 4, // Must be at least 1 for RelocInfo::narrow_oop_in_const. - format_width = 1 + // Must be at least 2 for ZGC GC barrier patching. + format_width = 2 }; public: diff --git a/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp b/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp index 389bb0d7d0e88..ed3602892988a 100644 --- a/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp @@ -5151,6 +5151,7 @@ class StubGenerator: public StubCodeGenerator { // result = r0 - return value. Already contains "false" // cnt1 = r10 - amount of elements left to check, reduced by wordSize // r3-r5 are reserved temporary registers + // Clobbers: v0-v7 when UseSIMDForArrayEquals, rscratch1, rscratch2 address generate_large_array_equals() { Register a1 = r1, a2 = r2, result = r0, cnt1 = r10, tmp1 = rscratch1, tmp2 = rscratch2, tmp3 = r3, tmp4 = r4, tmp5 = r5, tmp6 = r11, @@ -5734,6 +5735,8 @@ class StubGenerator: public StubCodeGenerator { // R2 = cnt1 // R3 = str1 // R4 = cnt2 + // Clobbers: rscratch1, rscratch2, v0, v1, rflags + // // This generic linear code use few additional ideas, which makes it faster: // 1) we can safely keep at least 1st register of pattern(since length >= 8) // in order to skip initial loading(help in systems with 1 ld pipeline) @@ -6048,6 +6051,7 @@ class StubGenerator: public StubCodeGenerator { // R3 = len >> 3 // V0 = 0 // v1 = loaded 8 bytes + // Clobbers: r0, r1, r3, rscratch1, rflags, v0-v6 address generate_large_byte_array_inflate() { __ align(CodeEntryAlignment); StubCodeMark mark(this, "StubRoutines", "large_byte_array_inflate"); diff --git a/src/hotspot/cpu/aarch64/stubRoutines_aarch64.hpp b/src/hotspot/cpu/aarch64/stubRoutines_aarch64.hpp index fc93e14c080ff..76ea4cad7832f 100644 --- a/src/hotspot/cpu/aarch64/stubRoutines_aarch64.hpp +++ b/src/hotspot/cpu/aarch64/stubRoutines_aarch64.hpp @@ -38,8 +38,8 @@ enum platform_dependent_constants { // simply increase sizes if too small (assembler will crash if too small) _initial_stubs_code_size = 10000, _continuation_stubs_code_size = 2000, - _compiler_stubs_code_size = 30000, - _final_stubs_code_size = 20000 + _compiler_stubs_code_size = 30000 ZGC_ONLY(+10000), + _final_stubs_code_size = 20000 ZGC_ONLY(+60000) }; class aarch64 { diff --git a/src/hotspot/cpu/arm/c1_MacroAssembler_arm.cpp b/src/hotspot/cpu/arm/c1_MacroAssembler_arm.cpp index 53001b9f12477..59826782e99be 100644 --- a/src/hotspot/cpu/arm/c1_MacroAssembler_arm.cpp +++ b/src/hotspot/cpu/arm/c1_MacroAssembler_arm.cpp @@ -30,7 +30,6 @@ #include "gc/shared/collectedHeap.hpp" #include "gc/shared/tlab_globals.hpp" #include "interpreter/interpreter.hpp" -#include "logging/log.hpp" #include "oops/arrayOop.hpp" #include "oops/markWord.hpp" #include "runtime/basicLock.hpp" @@ -215,7 +214,6 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr assert(oopDesc::mark_offset_in_bytes() == 0, "Required by atomic instructions"); if (LockingMode == LM_LIGHTWEIGHT) { - log_trace(fastlock)("C1_MacroAssembler::lock fast"); Register t1 = disp_hdr; // Needs saving, probably Register t2 = hdr; // blow @@ -277,7 +275,6 @@ void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_ assert(oopDesc::mark_offset_in_bytes() == 0, "Required by atomic instructions"); if (LockingMode == LM_LIGHTWEIGHT) { - log_trace(fastlock)("C1_MacroAssembler::unlock fast"); ldr(obj, Address(disp_hdr, obj_offset)); diff --git a/src/hotspot/cpu/arm/c2_MacroAssembler_arm.cpp b/src/hotspot/cpu/arm/c2_MacroAssembler_arm.cpp index ae4c42fc88765..f887f5d889cc8 100644 --- a/src/hotspot/cpu/arm/c2_MacroAssembler_arm.cpp +++ b/src/hotspot/cpu/arm/c2_MacroAssembler_arm.cpp @@ -25,7 +25,6 @@ #include "precompiled.hpp" #include "asm/assembler.hpp" #include "asm/assembler.inline.hpp" -#include "logging/log.hpp" #include "opto/c2_MacroAssembler.hpp" #include "runtime/basicLock.hpp" @@ -93,7 +92,6 @@ void C2_MacroAssembler::fast_lock(Register Roop, Register Rbox, Register Rscratc } if (LockingMode == LM_LIGHTWEIGHT) { - log_trace(fastlock)("C2_MacroAssembler::lock fast"); fast_lock_2(Roop /* obj */, Rbox /* t1 */, Rscratch /* t2 */, Rscratch2 /* t3 */, 1 /* savemask (save t1) */, done); @@ -144,7 +142,6 @@ void C2_MacroAssembler::fast_unlock(Register Roop, Register Rbox, Register Rscra Label done; if (LockingMode == LM_LIGHTWEIGHT) { - log_trace(fastlock)("C2_MacroAssembler::unlock fast"); fast_unlock_2(Roop /* obj */, Rbox /* t1 */, Rscratch /* t2 */, Rscratch2 /* t3 */, 1 /* savemask (save t1) */, done); diff --git a/src/hotspot/cpu/arm/interp_masm_arm.cpp b/src/hotspot/cpu/arm/interp_masm_arm.cpp index 9b1902cae8bfa..2d8149c331073 100644 --- a/src/hotspot/cpu/arm/interp_masm_arm.cpp +++ b/src/hotspot/cpu/arm/interp_masm_arm.cpp @@ -911,7 +911,6 @@ void InterpreterMacroAssembler::lock_object(Register Rlock) { } if (LockingMode == LM_LIGHTWEIGHT) { - log_trace(fastlock)("InterpreterMacroAssembler lock fast"); fast_lock_2(Robj, R0 /* t1 */, Rmark /* t2 */, Rtemp /* t3 */, 0 /* savemask */, slow_case); b(done); } else if (LockingMode == LM_LEGACY) { @@ -1026,8 +1025,6 @@ void InterpreterMacroAssembler::unlock_object(Register Rlock) { if (LockingMode == LM_LIGHTWEIGHT) { - log_trace(fastlock)("InterpreterMacroAssembler unlock fast"); - // Check for non-symmetric locking. This is allowed by the spec and the interpreter // must handle it. ldr(Rtemp, Address(Rthread, JavaThread::lock_stack_top_offset())); diff --git a/src/hotspot/cpu/ppc/assembler_ppc.hpp b/src/hotspot/cpu/ppc/assembler_ppc.hpp index 094d894d2da3c..afb0164d3d334 100644 --- a/src/hotspot/cpu/ppc/assembler_ppc.hpp +++ b/src/hotspot/cpu/ppc/assembler_ppc.hpp @@ -237,10 +237,12 @@ class Assembler : public AbstractAssembler { enum opcdxos_masks { XL_FORM_OPCODE_MASK = (63u << OPCODE_SHIFT) | (1023u << 1), + ANDI_OPCODE_MASK = (63u << OPCODE_SHIFT), ADDI_OPCODE_MASK = (63u << OPCODE_SHIFT), ADDIS_OPCODE_MASK = (63u << OPCODE_SHIFT), BXX_OPCODE_MASK = (63u << OPCODE_SHIFT), BCXX_OPCODE_MASK = (63u << OPCODE_SHIFT), + CMPLI_OPCODE_MASK = (63u << OPCODE_SHIFT), // trap instructions TDI_OPCODE_MASK = (63u << OPCODE_SHIFT), TWI_OPCODE_MASK = (63u << OPCODE_SHIFT), @@ -1478,6 +1480,9 @@ class Assembler : public AbstractAssembler { static bool is_addis(int x) { return ADDIS_OPCODE == (x & ADDIS_OPCODE_MASK); } + static bool is_andi(int x) { + return ANDI_OPCODE == (x & ANDI_OPCODE_MASK); + } static bool is_bxx(int x) { return BXX_OPCODE == (x & BXX_OPCODE_MASK); } @@ -1502,6 +1507,9 @@ class Assembler : public AbstractAssembler { static bool is_bclr(int x) { return BCLR_OPCODE == (x & XL_FORM_OPCODE_MASK); } + static bool is_cmpli(int x) { + return CMPLI_OPCODE == (x & CMPLI_OPCODE_MASK); + } static bool is_li(int x) { return is_addi(x) && inv_ra_field(x)==0; } diff --git a/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp b/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp index 4f48f7efe508f..ce347fe66d974 100644 --- a/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp +++ b/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp @@ -2686,7 +2686,7 @@ void LIR_Assembler::emit_lock(LIR_OpLock* op) { // Obj may not be an oop. if (op->code() == lir_lock) { MonitorEnterStub* stub = (MonitorEnterStub*)op->stub(); - if (!UseHeavyMonitors) { + if (LockingMode != LM_MONITOR) { assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); // Add debug info for NullPointerException only if one is possible. if (op->info() != nullptr) { @@ -2712,7 +2712,7 @@ void LIR_Assembler::emit_lock(LIR_OpLock* op) { } } else { assert (op->code() == lir_unlock, "Invalid code, expected lir_unlock"); - if (!UseHeavyMonitors) { + if (LockingMode != LM_MONITOR) { assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); __ unlock_object(hdr, obj, lock, *op->stub()->entry()); } else { diff --git a/src/hotspot/cpu/ppc/gc/x/xBarrierSetAssembler_ppc.cpp b/src/hotspot/cpu/ppc/gc/x/xBarrierSetAssembler_ppc.cpp new file mode 100644 index 0000000000000..b83994ee8de94 --- /dev/null +++ b/src/hotspot/cpu/ppc/gc/x/xBarrierSetAssembler_ppc.cpp @@ -0,0 +1,585 @@ +/* + * Copyright (c) 2021, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2021, 2022 SAP SE. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "asm/register.hpp" +#include "precompiled.hpp" +#include "asm/macroAssembler.inline.hpp" +#include "code/codeBlob.hpp" +#include "code/vmreg.inline.hpp" +#include "gc/x/xBarrier.inline.hpp" +#include "gc/x/xBarrierSet.hpp" +#include "gc/x/xBarrierSetAssembler.hpp" +#include "gc/x/xBarrierSetRuntime.hpp" +#include "gc/x/xThreadLocalData.hpp" +#include "memory/resourceArea.hpp" +#include "register_ppc.hpp" +#include "runtime/sharedRuntime.hpp" +#include "utilities/globalDefinitions.hpp" +#include "utilities/macros.hpp" +#ifdef COMPILER1 +#include "c1/c1_LIRAssembler.hpp" +#include "c1/c1_MacroAssembler.hpp" +#include "gc/x/c1/xBarrierSetC1.hpp" +#endif // COMPILER1 +#ifdef COMPILER2 +#include "gc/x/c2/xBarrierSetC2.hpp" +#endif // COMPILER2 + +#undef __ +#define __ masm-> + +void XBarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, + Register base, RegisterOrConstant ind_or_offs, Register dst, + Register tmp1, Register tmp2, + MacroAssembler::PreservationLevel preservation_level, Label *L_handle_null) { + __ block_comment("load_at (zgc) {"); + + // Check whether a special gc barrier is required for this particular load + // (e.g. whether it's a reference load or not) + if (!XBarrierSet::barrier_needed(decorators, type)) { + BarrierSetAssembler::load_at(masm, decorators, type, base, ind_or_offs, dst, + tmp1, tmp2, preservation_level, L_handle_null); + return; + } + + if (ind_or_offs.is_register()) { + assert_different_registers(base, ind_or_offs.as_register(), tmp1, tmp2, R0, noreg); + assert_different_registers(dst, ind_or_offs.as_register(), tmp1, tmp2, R0, noreg); + } else { + assert_different_registers(base, tmp1, tmp2, R0, noreg); + assert_different_registers(dst, tmp1, tmp2, R0, noreg); + } + + /* ==== Load the pointer using the standard implementation for the actual heap access + and the decompression of compressed pointers ==== */ + // Result of 'load_at' (standard implementation) will be written back to 'dst'. + // As 'base' is required for the C-call, it must be reserved in case of a register clash. + Register saved_base = base; + if (base == dst) { + __ mr(tmp2, base); + saved_base = tmp2; + } + + BarrierSetAssembler::load_at(masm, decorators, type, base, ind_or_offs, dst, + tmp1, noreg, preservation_level, L_handle_null); + + /* ==== Check whether pointer is dirty ==== */ + Label skip_barrier; + + // Load bad mask into scratch register. + __ ld(tmp1, (intptr_t) XThreadLocalData::address_bad_mask_offset(), R16_thread); + + // The color bits of the to-be-tested pointer do not have to be equivalent to the 'bad_mask' testing bits. + // A pointer is classified as dirty if any of the color bits that also match the bad mask is set. + // Conversely, it follows that the logical AND of the bad mask and the pointer must be zero + // if the pointer is not dirty. + // Only dirty pointers must be processed by this barrier, so we can skip it in case the latter condition holds true. + __ and_(tmp1, tmp1, dst); + __ beq(CCR0, skip_barrier); + + /* ==== Invoke barrier ==== */ + int nbytes_save = 0; + + const bool needs_frame = preservation_level >= MacroAssembler::PRESERVATION_FRAME_LR; + const bool preserve_gp_registers = preservation_level >= MacroAssembler::PRESERVATION_FRAME_LR_GP_REGS; + const bool preserve_fp_registers = preservation_level >= MacroAssembler::PRESERVATION_FRAME_LR_GP_FP_REGS; + + const bool preserve_R3 = dst != R3_ARG1; + + if (needs_frame) { + if (preserve_gp_registers) { + nbytes_save = (preserve_fp_registers + ? MacroAssembler::num_volatile_gp_regs + MacroAssembler::num_volatile_fp_regs + : MacroAssembler::num_volatile_gp_regs) * BytesPerWord; + nbytes_save -= preserve_R3 ? 0 : BytesPerWord; + __ save_volatile_gprs(R1_SP, -nbytes_save, preserve_fp_registers, preserve_R3); + } + + __ save_LR_CR(tmp1); + __ push_frame_reg_args(nbytes_save, tmp1); + } + + // Setup arguments + if (saved_base != R3_ARG1) { + __ mr_if_needed(R3_ARG1, dst); + __ add(R4_ARG2, ind_or_offs, saved_base); + } else if (dst != R4_ARG2) { + __ add(R4_ARG2, ind_or_offs, saved_base); + __ mr(R3_ARG1, dst); + } else { + __ add(R0, ind_or_offs, saved_base); + __ mr(R3_ARG1, dst); + __ mr(R4_ARG2, R0); + } + + __ call_VM_leaf(XBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators)); + + Register result = R3_RET; + if (needs_frame) { + __ pop_frame(); + __ restore_LR_CR(tmp1); + + if (preserve_R3) { + __ mr(R0, R3_RET); + result = R0; + } + + if (preserve_gp_registers) { + __ restore_volatile_gprs(R1_SP, -nbytes_save, preserve_fp_registers, preserve_R3); + } + } + __ mr_if_needed(dst, result); + + __ bind(skip_barrier); + __ block_comment("} load_at (zgc)"); +} + +#ifdef ASSERT +// The Z store barrier only verifies the pointers it is operating on and is thus a sole debugging measure. +void XBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, + Register base, RegisterOrConstant ind_or_offs, Register val, + Register tmp1, Register tmp2, Register tmp3, + MacroAssembler::PreservationLevel preservation_level) { + __ block_comment("store_at (zgc) {"); + + // If the 'val' register is 'noreg', the to-be-stored value is a null pointer. + if (is_reference_type(type) && val != noreg) { + __ ld(tmp1, in_bytes(XThreadLocalData::address_bad_mask_offset()), R16_thread); + __ and_(tmp1, tmp1, val); + __ asm_assert_eq("Detected dirty pointer on the heap in Z store barrier"); + } + + // Store value + BarrierSetAssembler::store_at(masm, decorators, type, base, ind_or_offs, val, tmp1, tmp2, tmp3, preservation_level); + + __ block_comment("} store_at (zgc)"); +} +#endif // ASSERT + +void XBarrierSetAssembler::arraycopy_prologue(MacroAssembler *masm, DecoratorSet decorators, BasicType component_type, + Register src, Register dst, Register count, + Register preserve1, Register preserve2) { + __ block_comment("arraycopy_prologue (zgc) {"); + + /* ==== Check whether a special gc barrier is required for this particular load ==== */ + if (!is_reference_type(component_type)) { + return; + } + + Label skip_barrier; + + // Fast path: Array is of length zero + __ cmpdi(CCR0, count, 0); + __ beq(CCR0, skip_barrier); + + /* ==== Ensure register sanity ==== */ + Register tmp_R11 = R11_scratch1; + + assert_different_registers(src, dst, count, tmp_R11, noreg); + if (preserve1 != noreg) { + // Not technically required, but unlikely being intended. + assert_different_registers(preserve1, preserve2); + } + + /* ==== Invoke barrier (slowpath) ==== */ + int nbytes_save = 0; + + { + assert(!noreg->is_volatile(), "sanity"); + + if (preserve1->is_volatile()) { + __ std(preserve1, -BytesPerWord * ++nbytes_save, R1_SP); + } + + if (preserve2->is_volatile() && preserve1 != preserve2) { + __ std(preserve2, -BytesPerWord * ++nbytes_save, R1_SP); + } + + __ std(src, -BytesPerWord * ++nbytes_save, R1_SP); + __ std(dst, -BytesPerWord * ++nbytes_save, R1_SP); + __ std(count, -BytesPerWord * ++nbytes_save, R1_SP); + + __ save_LR_CR(tmp_R11); + __ push_frame_reg_args(nbytes_save, tmp_R11); + } + + // XBarrierSetRuntime::load_barrier_on_oop_array_addr(src, count) + if (count == R3_ARG1) { + if (src == R4_ARG2) { + // Arguments are provided in reverse order + __ mr(tmp_R11, count); + __ mr(R3_ARG1, src); + __ mr(R4_ARG2, tmp_R11); + } else { + __ mr(R4_ARG2, count); + __ mr(R3_ARG1, src); + } + } else { + __ mr_if_needed(R3_ARG1, src); + __ mr_if_needed(R4_ARG2, count); + } + + __ call_VM_leaf(XBarrierSetRuntime::load_barrier_on_oop_array_addr()); + + __ pop_frame(); + __ restore_LR_CR(tmp_R11); + + { + __ ld(count, -BytesPerWord * nbytes_save--, R1_SP); + __ ld(dst, -BytesPerWord * nbytes_save--, R1_SP); + __ ld(src, -BytesPerWord * nbytes_save--, R1_SP); + + if (preserve2->is_volatile() && preserve1 != preserve2) { + __ ld(preserve2, -BytesPerWord * nbytes_save--, R1_SP); + } + + if (preserve1->is_volatile()) { + __ ld(preserve1, -BytesPerWord * nbytes_save--, R1_SP); + } + } + + __ bind(skip_barrier); + + __ block_comment("} arraycopy_prologue (zgc)"); +} + +void XBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, Register dst, Register jni_env, + Register obj, Register tmp, Label& slowpath) { + __ block_comment("try_resolve_jobject_in_native (zgc) {"); + + assert_different_registers(jni_env, obj, tmp); + + // Resolve the pointer using the standard implementation for weak tag handling and pointer verification. + BarrierSetAssembler::try_resolve_jobject_in_native(masm, dst, jni_env, obj, tmp, slowpath); + + // Check whether pointer is dirty. + __ ld(tmp, + in_bytes(XThreadLocalData::address_bad_mask_offset() - JavaThread::jni_environment_offset()), + jni_env); + + __ and_(tmp, obj, tmp); + __ bne(CCR0, slowpath); + + __ block_comment("} try_resolve_jobject_in_native (zgc)"); +} + +#undef __ + +#ifdef COMPILER1 +#define __ ce->masm()-> + +// Code emitted by LIR node "LIR_OpXLoadBarrierTest" which in turn is emitted by XBarrierSetC1::load_barrier. +// The actual compare and branch instructions are represented as stand-alone LIR nodes. +void XBarrierSetAssembler::generate_c1_load_barrier_test(LIR_Assembler* ce, + LIR_Opr ref) const { + __ block_comment("load_barrier_test (zgc) {"); + + __ ld(R0, in_bytes(XThreadLocalData::address_bad_mask_offset()), R16_thread); + __ andr(R0, R0, ref->as_pointer_register()); + __ cmpdi(CCR5 /* as mandated by LIR node */, R0, 0); + + __ block_comment("} load_barrier_test (zgc)"); +} + +// Code emitted by code stub "XLoadBarrierStubC1" which in turn is emitted by XBarrierSetC1::load_barrier. +// Invokes the runtime stub which is defined just below. +void XBarrierSetAssembler::generate_c1_load_barrier_stub(LIR_Assembler* ce, + XLoadBarrierStubC1* stub) const { + __ block_comment("c1_load_barrier_stub (zgc) {"); + + __ bind(*stub->entry()); + + /* ==== Determine relevant data registers and ensure register sanity ==== */ + Register ref = stub->ref()->as_register(); + Register ref_addr = noreg; + + // Determine reference address + if (stub->tmp()->is_valid()) { + // 'tmp' register is given, so address might have an index or a displacement. + ce->leal(stub->ref_addr(), stub->tmp()); + ref_addr = stub->tmp()->as_pointer_register(); + } else { + // 'tmp' register is not given, so address must have neither an index nor a displacement. + // The address' base register is thus usable as-is. + assert(stub->ref_addr()->as_address_ptr()->disp() == 0, "illegal displacement"); + assert(!stub->ref_addr()->as_address_ptr()->index()->is_valid(), "illegal index"); + + ref_addr = stub->ref_addr()->as_address_ptr()->base()->as_pointer_register(); + } + + assert_different_registers(ref, ref_addr, R0, noreg); + + /* ==== Invoke stub ==== */ + // Pass arguments via stack. The stack pointer will be bumped by the stub. + __ std(ref, (intptr_t) -1 * BytesPerWord, R1_SP); + __ std(ref_addr, (intptr_t) -2 * BytesPerWord, R1_SP); + + __ load_const_optimized(R0, stub->runtime_stub()); + __ call_stub(R0); + + // The runtime stub passes the result via the R0 register, overriding the previously-loaded stub address. + __ mr_if_needed(ref, R0); + __ b(*stub->continuation()); + + __ block_comment("} c1_load_barrier_stub (zgc)"); +} + +#undef __ +#define __ sasm-> + +// Code emitted by runtime code stub which in turn is emitted by XBarrierSetC1::generate_c1_runtime_stubs. +void XBarrierSetAssembler::generate_c1_load_barrier_runtime_stub(StubAssembler* sasm, + DecoratorSet decorators) const { + __ block_comment("c1_load_barrier_runtime_stub (zgc) {"); + + const int stack_parameters = 2; + const int nbytes_save = (MacroAssembler::num_volatile_regs + stack_parameters) * BytesPerWord; + + __ save_volatile_gprs(R1_SP, -nbytes_save); + __ save_LR_CR(R0); + + // Load arguments back again from the stack. + __ ld(R3_ARG1, (intptr_t) -1 * BytesPerWord, R1_SP); // ref + __ ld(R4_ARG2, (intptr_t) -2 * BytesPerWord, R1_SP); // ref_addr + + __ push_frame_reg_args(nbytes_save, R0); + + __ call_VM_leaf(XBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators)); + + __ verify_oop(R3_RET, "Bad pointer after barrier invocation"); + __ mr(R0, R3_RET); + + __ pop_frame(); + __ restore_LR_CR(R3_RET); + __ restore_volatile_gprs(R1_SP, -nbytes_save); + + __ blr(); + + __ block_comment("} c1_load_barrier_runtime_stub (zgc)"); +} + +#undef __ +#endif // COMPILER1 + +#ifdef COMPILER2 + +OptoReg::Name XBarrierSetAssembler::refine_register(const Node* node, OptoReg::Name opto_reg) const { + if (!OptoReg::is_reg(opto_reg)) { + return OptoReg::Bad; + } + + VMReg vm_reg = OptoReg::as_VMReg(opto_reg); + if ((vm_reg->is_Register() || vm_reg ->is_FloatRegister()) && (opto_reg & 1) != 0) { + return OptoReg::Bad; + } + + return opto_reg; +} + +#define __ _masm-> + +class XSaveLiveRegisters { + MacroAssembler* _masm; + RegMask _reg_mask; + Register _result_reg; + int _frame_size; + + public: + XSaveLiveRegisters(MacroAssembler *masm, XLoadBarrierStubC2 *stub) + : _masm(masm), _reg_mask(stub->live()), _result_reg(stub->ref()) { + + const int register_save_size = iterate_over_register_mask(ACTION_COUNT_ONLY) * BytesPerWord; + _frame_size = align_up(register_save_size, frame::alignment_in_bytes) + + frame::native_abi_reg_args_size; + + __ save_LR_CR(R0); + __ push_frame(_frame_size, R0); + + iterate_over_register_mask(ACTION_SAVE, _frame_size); + } + + ~XSaveLiveRegisters() { + iterate_over_register_mask(ACTION_RESTORE, _frame_size); + + __ addi(R1_SP, R1_SP, _frame_size); + __ restore_LR_CR(R0); + } + + private: + enum IterationAction : int { + ACTION_SAVE, + ACTION_RESTORE, + ACTION_COUNT_ONLY + }; + + int iterate_over_register_mask(IterationAction action, int offset = 0) { + int reg_save_index = 0; + RegMaskIterator live_regs_iterator(_reg_mask); + + while(live_regs_iterator.has_next()) { + const OptoReg::Name opto_reg = live_regs_iterator.next(); + + // Filter out stack slots (spilled registers, i.e., stack-allocated registers). + if (!OptoReg::is_reg(opto_reg)) { + continue; + } + + const VMReg vm_reg = OptoReg::as_VMReg(opto_reg); + if (vm_reg->is_Register()) { + Register std_reg = vm_reg->as_Register(); + + // '_result_reg' will hold the end result of the operation. Its content must thus not be preserved. + if (std_reg == _result_reg) { + continue; + } + + if (std_reg->encoding() >= R2->encoding() && std_reg->encoding() <= R12->encoding()) { + reg_save_index++; + + if (action == ACTION_SAVE) { + _masm->std(std_reg, offset - reg_save_index * BytesPerWord, R1_SP); + } else if (action == ACTION_RESTORE) { + _masm->ld(std_reg, offset - reg_save_index * BytesPerWord, R1_SP); + } else { + assert(action == ACTION_COUNT_ONLY, "Sanity"); + } + } + } else if (vm_reg->is_FloatRegister()) { + FloatRegister fp_reg = vm_reg->as_FloatRegister(); + if (fp_reg->encoding() >= F0->encoding() && fp_reg->encoding() <= F13->encoding()) { + reg_save_index++; + + if (action == ACTION_SAVE) { + _masm->stfd(fp_reg, offset - reg_save_index * BytesPerWord, R1_SP); + } else if (action == ACTION_RESTORE) { + _masm->lfd(fp_reg, offset - reg_save_index * BytesPerWord, R1_SP); + } else { + assert(action == ACTION_COUNT_ONLY, "Sanity"); + } + } + } else if (vm_reg->is_ConditionRegister()) { + // NOP. Conditions registers are covered by save_LR_CR + } else if (vm_reg->is_VectorSRegister()) { + assert(SuperwordUseVSX, "or should not reach here"); + VectorSRegister vs_reg = vm_reg->as_VectorSRegister(); + if (vs_reg->encoding() >= VSR32->encoding() && vs_reg->encoding() <= VSR51->encoding()) { + reg_save_index += 2; + + Register spill_addr = R0; + if (action == ACTION_SAVE) { + _masm->addi(spill_addr, R1_SP, offset - reg_save_index * BytesPerWord); + _masm->stxvd2x(vs_reg, spill_addr); + } else if (action == ACTION_RESTORE) { + _masm->addi(spill_addr, R1_SP, offset - reg_save_index * BytesPerWord); + _masm->lxvd2x(vs_reg, spill_addr); + } else { + assert(action == ACTION_COUNT_ONLY, "Sanity"); + } + } + } else { + if (vm_reg->is_SpecialRegister()) { + fatal("Special registers are unsupported. Found register %s", vm_reg->name()); + } else { + fatal("Register type is not known"); + } + } + } + + return reg_save_index; + } +}; + +#undef __ +#define __ _masm-> + +class XSetupArguments { + MacroAssembler* const _masm; + const Register _ref; + const Address _ref_addr; + + public: + XSetupArguments(MacroAssembler* masm, XLoadBarrierStubC2* stub) : + _masm(masm), + _ref(stub->ref()), + _ref_addr(stub->ref_addr()) { + + // Desired register/argument configuration: + // _ref: R3_ARG1 + // _ref_addr: R4_ARG2 + + // '_ref_addr' can be unspecified. In that case, the barrier will not heal the reference. + if (_ref_addr.base() == noreg) { + assert_different_registers(_ref, R0, noreg); + + __ mr_if_needed(R3_ARG1, _ref); + __ li(R4_ARG2, 0); + } else { + assert_different_registers(_ref, _ref_addr.base(), R0, noreg); + assert(!_ref_addr.index()->is_valid(), "reference addresses must not contain an index component"); + + if (_ref != R4_ARG2) { + // Calculate address first as the address' base register might clash with R4_ARG2 + __ addi(R4_ARG2, _ref_addr.base(), _ref_addr.disp()); + __ mr_if_needed(R3_ARG1, _ref); + } else if (_ref_addr.base() != R3_ARG1) { + __ mr(R3_ARG1, _ref); + __ addi(R4_ARG2, _ref_addr.base(), _ref_addr.disp()); // Clobbering _ref + } else { + // Arguments are provided in inverse order (i.e. _ref == R4_ARG2, _ref_addr == R3_ARG1) + __ mr(R0, _ref); + __ addi(R4_ARG2, _ref_addr.base(), _ref_addr.disp()); + __ mr(R3_ARG1, R0); + } + } + } +}; + +#undef __ +#define __ masm-> + +void XBarrierSetAssembler::generate_c2_load_barrier_stub(MacroAssembler* masm, XLoadBarrierStubC2* stub) const { + __ block_comment("generate_c2_load_barrier_stub (zgc) {"); + + __ bind(*stub->entry()); + + Register ref = stub->ref(); + Address ref_addr = stub->ref_addr(); + + assert_different_registers(ref, ref_addr.base()); + + { + XSaveLiveRegisters save_live_registers(masm, stub); + XSetupArguments setup_arguments(masm, stub); + + __ call_VM_leaf(stub->slow_path()); + __ mr_if_needed(ref, R3_RET); + } + + __ b(*stub->continuation()); + + __ block_comment("} generate_c2_load_barrier_stub (zgc)"); +} + +#undef __ +#endif // COMPILER2 diff --git a/src/hotspot/cpu/ppc/gc/x/xBarrierSetAssembler_ppc.hpp b/src/hotspot/cpu/ppc/gc/x/xBarrierSetAssembler_ppc.hpp new file mode 100644 index 0000000000000..14c338dd212c6 --- /dev/null +++ b/src/hotspot/cpu/ppc/gc/x/xBarrierSetAssembler_ppc.hpp @@ -0,0 +1,93 @@ +/* + * Copyright (c) 2021, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2021, 2022 SAP SE. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef CPU_PPC_GC_X_XBARRIERSETASSEMBLER_PPC_HPP +#define CPU_PPC_GC_X_XBARRIERSETASSEMBLER_PPC_HPP + +#include "code/vmreg.hpp" +#include "oops/accessDecorators.hpp" +#ifdef COMPILER2 +#include "opto/optoreg.hpp" +#endif // COMPILER2 + +#ifdef COMPILER1 +class LIR_Assembler; +class LIR_Opr; +class StubAssembler; +#endif // COMPILER1 + +#ifdef COMPILER2 +class Node; +#endif // COMPILER2 + +#ifdef COMPILER1 +class XLoadBarrierStubC1; +#endif // COMPILER1 + +#ifdef COMPILER2 +class XLoadBarrierStubC2; +#endif // COMPILER2 + +class XBarrierSetAssembler : public XBarrierSetAssemblerBase { +public: + virtual void load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, + Register base, RegisterOrConstant ind_or_offs, Register dst, + Register tmp1, Register tmp2, + MacroAssembler::PreservationLevel preservation_level, Label *L_handle_null = NULL); + +#ifdef ASSERT + virtual void store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, + Register base, RegisterOrConstant ind_or_offs, Register val, + Register tmp1, Register tmp2, Register tmp3, + MacroAssembler::PreservationLevel preservation_level); +#endif // ASSERT + + virtual void arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, BasicType type, + Register src, Register dst, Register count, + Register preserve1, Register preserve2); + + virtual void try_resolve_jobject_in_native(MacroAssembler* masm, Register dst, Register jni_env, + Register obj, Register tmp, Label& slowpath); + + virtual NMethodPatchingType nmethod_patching_type() { return NMethodPatchingType::conc_data_patch; } + +#ifdef COMPILER1 + void generate_c1_load_barrier_test(LIR_Assembler* ce, + LIR_Opr ref) const; + + void generate_c1_load_barrier_stub(LIR_Assembler* ce, + XLoadBarrierStubC1* stub) const; + + void generate_c1_load_barrier_runtime_stub(StubAssembler* sasm, + DecoratorSet decorators) const; +#endif // COMPILER1 + +#ifdef COMPILER2 + OptoReg::Name refine_register(const Node* node, OptoReg::Name opto_reg) const; + + void generate_c2_load_barrier_stub(MacroAssembler* masm, XLoadBarrierStubC2* stub) const; +#endif // COMPILER2 +}; + +#endif // CPU_PPC_GC_X_XBARRIERSETASSEMBLER_PPC_HPP diff --git a/src/hotspot/cpu/ppc/gc/z/zGlobals_ppc.cpp b/src/hotspot/cpu/ppc/gc/x/xGlobals_ppc.cpp similarity index 97% rename from src/hotspot/cpu/ppc/gc/z/zGlobals_ppc.cpp rename to src/hotspot/cpu/ppc/gc/x/xGlobals_ppc.cpp index fbf6d13dc8f62..3218a765fc703 100644 --- a/src/hotspot/cpu/ppc/gc/z/zGlobals_ppc.cpp +++ b/src/hotspot/cpu/ppc/gc/x/xGlobals_ppc.cpp @@ -25,7 +25,7 @@ #include "precompiled.hpp" #include "gc/shared/gcLogPrecious.hpp" #include "gc/shared/gc_globals.hpp" -#include "gc/z/zGlobals.hpp" +#include "gc/x/xGlobals.hpp" #include "runtime/globals.hpp" #include "runtime/os.hpp" #include "utilities/globalDefinitions.hpp" @@ -154,7 +154,7 @@ static unsigned int probe_valid_max_address_bit(size_t init_bit, size_t min_bit) // It should thus be a "close enough" approximation to the real virtual memory address space limit. // // This recovery strategy is only applied in production builds. - // In debug builds, an assertion in 'ZPlatformAddressOffsetBits' will bail out the VM to indicate that + // In debug builds, an assertion in 'XPlatformAddressOffsetBits' will bail out the VM to indicate that // the assumed address space is no longer up-to-date. if (last_allocatable_address != MAP_FAILED) { const unsigned int bitpos = BitsPerSize_t - count_leading_zeros((size_t) last_allocatable_address) - 1; @@ -184,7 +184,7 @@ static unsigned int probe_valid_max_address_bit(size_t init_bit, size_t min_bit) #endif // LINUX } -size_t ZPlatformAddressOffsetBits() { +size_t XPlatformAddressOffsetBits() { const static unsigned int valid_max_address_offset_bits = probe_valid_max_address_bit(DEFAULT_MAX_ADDRESS_BIT, MINIMUM_MAX_ADDRESS_BIT) + 1; assert(valid_max_address_offset_bits >= MINIMUM_MAX_ADDRESS_BIT, @@ -192,12 +192,12 @@ size_t ZPlatformAddressOffsetBits() { const size_t max_address_offset_bits = valid_max_address_offset_bits - 3; const size_t min_address_offset_bits = max_address_offset_bits - 2; - const size_t address_offset = round_up_power_of_2(MaxHeapSize * ZVirtualToPhysicalRatio); + const size_t address_offset = round_up_power_of_2(MaxHeapSize * XVirtualToPhysicalRatio); const size_t address_offset_bits = log2i_exact(address_offset); return clamp(address_offset_bits, min_address_offset_bits, max_address_offset_bits); } -size_t ZPlatformAddressMetadataShift() { - return ZPlatformAddressOffsetBits(); +size_t XPlatformAddressMetadataShift() { + return XPlatformAddressOffsetBits(); } diff --git a/src/hotspot/cpu/ppc/gc/x/xGlobals_ppc.hpp b/src/hotspot/cpu/ppc/gc/x/xGlobals_ppc.hpp new file mode 100644 index 0000000000000..be88b05b02a82 --- /dev/null +++ b/src/hotspot/cpu/ppc/gc/x/xGlobals_ppc.hpp @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2021, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2021 SAP SE. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef CPU_PPC_GC_X_XGLOBALS_PPC_HPP +#define CPU_PPC_GC_X_XGLOBALS_PPC_HPP + +#include "globalDefinitions_ppc.hpp" + +const size_t XPlatformHeapViews = 3; +const size_t XPlatformCacheLineSize = DEFAULT_CACHE_LINE_SIZE; + +size_t XPlatformAddressOffsetBits(); +size_t XPlatformAddressMetadataShift(); + +#endif // CPU_PPC_GC_X_XGLOBALS_PPC_HPP diff --git a/src/hotspot/cpu/ppc/gc/x/x_ppc.ad b/src/hotspot/cpu/ppc/gc/x/x_ppc.ad new file mode 100644 index 0000000000000..dd46b46a3a316 --- /dev/null +++ b/src/hotspot/cpu/ppc/gc/x/x_ppc.ad @@ -0,0 +1,298 @@ +// +// Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved. +// Copyright (c) 2021 SAP SE. All rights reserved. +// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +// +// This code is free software; you can redistribute it and/or modify it +// under the terms of the GNU General Public License version 2 only, as +// published by the Free Software Foundation. +// +// This code is distributed in the hope that it will be useful, but WITHOUT +// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +// version 2 for more details (a copy is included in the LICENSE file that +// accompanied this code). +// +// You should have received a copy of the GNU General Public License version +// 2 along with this work; if not, write to the Free Software Foundation, +// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +// +// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +// or visit www.oracle.com if you need additional information or have any +// questions. +// + +source_hpp %{ + +#include "gc/shared/gc_globals.hpp" +#include "gc/x/c2/xBarrierSetC2.hpp" +#include "gc/x/xThreadLocalData.hpp" + +%} + +source %{ + +static void x_load_barrier(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref, + Register tmp, uint8_t barrier_data) { + if (barrier_data == XLoadBarrierElided) { + return; + } + + XLoadBarrierStubC2* const stub = XLoadBarrierStubC2::create(node, ref_addr, ref, tmp, barrier_data); + __ ld(tmp, in_bytes(XThreadLocalData::address_bad_mask_offset()), R16_thread); + __ and_(tmp, tmp, ref); + __ bne_far(CCR0, *stub->entry(), MacroAssembler::bc_far_optimize_on_relocate); + __ bind(*stub->continuation()); +} + +static void x_load_barrier_slow_path(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref, + Register tmp) { + XLoadBarrierStubC2* const stub = XLoadBarrierStubC2::create(node, ref_addr, ref, tmp, XLoadBarrierStrong); + __ b(*stub->entry()); + __ bind(*stub->continuation()); +} + +static void x_compare_and_swap(MacroAssembler& _masm, const MachNode* node, + Register res, Register mem, Register oldval, Register newval, + Register tmp_xchg, Register tmp_mask, + bool weak, bool acquire) { + // z-specific load barrier requires strong CAS operations. + // Weak CAS operations are thus only emitted if the barrier is elided. + __ cmpxchgd(CCR0, tmp_xchg, oldval, newval, mem, + MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(), res, NULL, true, + weak && node->barrier_data() == XLoadBarrierElided); + + if (node->barrier_data() != XLoadBarrierElided) { + Label skip_barrier; + + __ ld(tmp_mask, in_bytes(XThreadLocalData::address_bad_mask_offset()), R16_thread); + __ and_(tmp_mask, tmp_mask, tmp_xchg); + __ beq(CCR0, skip_barrier); + + // CAS must have failed because pointer in memory is bad. + x_load_barrier_slow_path(_masm, node, Address(mem), tmp_xchg, res /* used as tmp */); + + __ cmpxchgd(CCR0, tmp_xchg, oldval, newval, mem, + MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(), res, NULL, true, weak); + + __ bind(skip_barrier); + } + + if (acquire) { + if (support_IRIW_for_not_multiple_copy_atomic_cpu) { + // Uses the isync instruction as an acquire barrier. + // This exploits the compare and the branch in the z load barrier (load, compare and branch, isync). + __ isync(); + } else { + __ sync(); + } + } +} + +static void x_compare_and_exchange(MacroAssembler& _masm, const MachNode* node, + Register res, Register mem, Register oldval, Register newval, Register tmp, + bool weak, bool acquire) { + // z-specific load barrier requires strong CAS operations. + // Weak CAS operations are thus only emitted if the barrier is elided. + __ cmpxchgd(CCR0, res, oldval, newval, mem, + MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(), noreg, NULL, true, + weak && node->barrier_data() == XLoadBarrierElided); + + if (node->barrier_data() != XLoadBarrierElided) { + Label skip_barrier; + __ ld(tmp, in_bytes(XThreadLocalData::address_bad_mask_offset()), R16_thread); + __ and_(tmp, tmp, res); + __ beq(CCR0, skip_barrier); + + x_load_barrier_slow_path(_masm, node, Address(mem), res, tmp); + + __ cmpxchgd(CCR0, res, oldval, newval, mem, + MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(), noreg, NULL, true, weak); + + __ bind(skip_barrier); + } + + if (acquire) { + if (support_IRIW_for_not_multiple_copy_atomic_cpu) { + // Uses the isync instruction as an acquire barrier. + // This exploits the compare and the branch in the z load barrier (load, compare and branch, isync). + __ isync(); + } else { + __ sync(); + } + } +} + +%} + +instruct xLoadP(iRegPdst dst, memoryAlg4 mem, iRegPdst tmp, flagsRegCR0 cr0) +%{ + match(Set dst (LoadP mem)); + effect(TEMP_DEF dst, TEMP tmp, KILL cr0); + ins_cost(MEMORY_REF_COST); + + predicate((UseZGC && !ZGenerational && n->as_Load()->barrier_data() != 0) + && (n->as_Load()->is_unordered() || followed_by_acquire(n))); + + format %{ "LD $dst, $mem" %} + ins_encode %{ + assert($mem$$index == 0, "sanity"); + __ ld($dst$$Register, $mem$$disp, $mem$$base$$Register); + x_load_barrier(_masm, this, Address($mem$$base$$Register, $mem$$disp), $dst$$Register, $tmp$$Register, barrier_data()); + %} + ins_pipe(pipe_class_default); +%} + +// Load Pointer Volatile +instruct xLoadP_acq(iRegPdst dst, memoryAlg4 mem, iRegPdst tmp, flagsRegCR0 cr0) +%{ + match(Set dst (LoadP mem)); + effect(TEMP_DEF dst, TEMP tmp, KILL cr0); + ins_cost(3 * MEMORY_REF_COST); + + // Predicate on instruction order is implicitly present due to the predicate of the cheaper zLoadP operation + predicate(UseZGC && !ZGenerational && n->as_Load()->barrier_data() != 0); + + format %{ "LD acq $dst, $mem" %} + ins_encode %{ + __ ld($dst$$Register, $mem$$disp, $mem$$base$$Register); + x_load_barrier(_masm, this, Address($mem$$base$$Register, $mem$$disp), $dst$$Register, $tmp$$Register, barrier_data()); + + // Uses the isync instruction as an acquire barrier. + // This exploits the compare and the branch in the z load barrier (load, compare and branch, isync). + __ isync(); + %} + ins_pipe(pipe_class_default); +%} + +instruct xCompareAndSwapP(iRegIdst res, iRegPdst mem, iRegPsrc oldval, iRegPsrc newval, + iRegPdst tmp_xchg, iRegPdst tmp_mask, flagsRegCR0 cr0) %{ + match(Set res (CompareAndSwapP mem (Binary oldval newval))); + effect(TEMP_DEF res, TEMP tmp_xchg, TEMP tmp_mask, KILL cr0); + + predicate((UseZGC && !ZGenerational && n->as_LoadStore()->barrier_data() == XLoadBarrierStrong) + && (((CompareAndSwapNode*)n)->order() != MemNode::acquire && ((CompareAndSwapNode*) n)->order() != MemNode::seqcst)); + + format %{ "CMPXCHG $res, $mem, $oldval, $newval; as bool; ptr" %} + ins_encode %{ + x_compare_and_swap(_masm, this, + $res$$Register, $mem$$Register, $oldval$$Register, $newval$$Register, + $tmp_xchg$$Register, $tmp_mask$$Register, + false /* weak */, false /* acquire */); + %} + ins_pipe(pipe_class_default); +%} + +instruct xCompareAndSwapP_acq(iRegIdst res, iRegPdst mem, iRegPsrc oldval, iRegPsrc newval, + iRegPdst tmp_xchg, iRegPdst tmp_mask, flagsRegCR0 cr0) %{ + match(Set res (CompareAndSwapP mem (Binary oldval newval))); + effect(TEMP_DEF res, TEMP tmp_xchg, TEMP tmp_mask, KILL cr0); + + predicate((UseZGC && !ZGenerational && n->as_LoadStore()->barrier_data() == XLoadBarrierStrong) + && (((CompareAndSwapNode*)n)->order() == MemNode::acquire || ((CompareAndSwapNode*) n)->order() == MemNode::seqcst)); + + format %{ "CMPXCHG acq $res, $mem, $oldval, $newval; as bool; ptr" %} + ins_encode %{ + x_compare_and_swap(_masm, this, + $res$$Register, $mem$$Register, $oldval$$Register, $newval$$Register, + $tmp_xchg$$Register, $tmp_mask$$Register, + false /* weak */, true /* acquire */); + %} + ins_pipe(pipe_class_default); +%} + +instruct xCompareAndSwapPWeak(iRegIdst res, iRegPdst mem, iRegPsrc oldval, iRegPsrc newval, + iRegPdst tmp_xchg, iRegPdst tmp_mask, flagsRegCR0 cr0) %{ + match(Set res (WeakCompareAndSwapP mem (Binary oldval newval))); + effect(TEMP_DEF res, TEMP tmp_xchg, TEMP tmp_mask, KILL cr0); + + predicate((UseZGC && !ZGenerational && n->as_LoadStore()->barrier_data() == XLoadBarrierStrong) + && ((CompareAndSwapNode*)n)->order() != MemNode::acquire && ((CompareAndSwapNode*) n)->order() != MemNode::seqcst); + + format %{ "weak CMPXCHG $res, $mem, $oldval, $newval; as bool; ptr" %} + ins_encode %{ + x_compare_and_swap(_masm, this, + $res$$Register, $mem$$Register, $oldval$$Register, $newval$$Register, + $tmp_xchg$$Register, $tmp_mask$$Register, + true /* weak */, false /* acquire */); + %} + ins_pipe(pipe_class_default); +%} + +instruct xCompareAndSwapPWeak_acq(iRegIdst res, iRegPdst mem, iRegPsrc oldval, iRegPsrc newval, + iRegPdst tmp_xchg, iRegPdst tmp_mask, flagsRegCR0 cr0) %{ + match(Set res (WeakCompareAndSwapP mem (Binary oldval newval))); + effect(TEMP_DEF res, TEMP tmp_xchg, TEMP tmp_mask, KILL cr0); + + predicate((UseZGC && !ZGenerational && n->as_LoadStore()->barrier_data() == XLoadBarrierStrong) + && (((CompareAndSwapNode*)n)->order() == MemNode::acquire || ((CompareAndSwapNode*) n)->order() == MemNode::seqcst)); + + format %{ "weak CMPXCHG acq $res, $mem, $oldval, $newval; as bool; ptr" %} + ins_encode %{ + x_compare_and_swap(_masm, this, + $res$$Register, $mem$$Register, $oldval$$Register, $newval$$Register, + $tmp_xchg$$Register, $tmp_mask$$Register, + true /* weak */, true /* acquire */); + %} + ins_pipe(pipe_class_default); +%} + +instruct xCompareAndExchangeP(iRegPdst res, iRegPdst mem, iRegPsrc oldval, iRegPsrc newval, + iRegPdst tmp, flagsRegCR0 cr0) %{ + match(Set res (CompareAndExchangeP mem (Binary oldval newval))); + effect(TEMP_DEF res, TEMP tmp, KILL cr0); + + predicate((UseZGC && !ZGenerational && n->as_LoadStore()->barrier_data() == XLoadBarrierStrong) + && ( + ((CompareAndSwapNode*)n)->order() != MemNode::acquire + && ((CompareAndSwapNode*)n)->order() != MemNode::seqcst + )); + + format %{ "CMPXCHG $res, $mem, $oldval, $newval; as ptr; ptr" %} + ins_encode %{ + x_compare_and_exchange(_masm, this, + $res$$Register, $mem$$Register, $oldval$$Register, $newval$$Register, $tmp$$Register, + false /* weak */, false /* acquire */); + %} + ins_pipe(pipe_class_default); +%} + +instruct xCompareAndExchangeP_acq(iRegPdst res, iRegPdst mem, iRegPsrc oldval, iRegPsrc newval, + iRegPdst tmp, flagsRegCR0 cr0) %{ + match(Set res (CompareAndExchangeP mem (Binary oldval newval))); + effect(TEMP_DEF res, TEMP tmp, KILL cr0); + + predicate((UseZGC && !ZGenerational && n->as_LoadStore()->barrier_data() == XLoadBarrierStrong) + && ( + ((CompareAndSwapNode*)n)->order() == MemNode::acquire + || ((CompareAndSwapNode*)n)->order() == MemNode::seqcst + )); + + format %{ "CMPXCHG acq $res, $mem, $oldval, $newval; as ptr; ptr" %} + ins_encode %{ + x_compare_and_exchange(_masm, this, + $res$$Register, $mem$$Register, $oldval$$Register, $newval$$Register, $tmp$$Register, + false /* weak */, true /* acquire */); + %} + ins_pipe(pipe_class_default); +%} + +instruct xGetAndSetP(iRegPdst res, iRegPdst mem, iRegPsrc newval, iRegPdst tmp, flagsRegCR0 cr0) %{ + match(Set res (GetAndSetP mem newval)); + effect(TEMP_DEF res, TEMP tmp, KILL cr0); + + predicate(UseZGC && !ZGenerational && n->as_LoadStore()->barrier_data() != 0); + + format %{ "GetAndSetP $res, $mem, $newval" %} + ins_encode %{ + __ getandsetd($res$$Register, $newval$$Register, $mem$$Register, MacroAssembler::cmpxchgx_hint_atomic_update()); + x_load_barrier(_masm, this, Address(noreg, (intptr_t) 0), $res$$Register, $tmp$$Register, barrier_data()); + + if (support_IRIW_for_not_multiple_copy_atomic_cpu) { + __ isync(); + } else { + __ sync(); + } + %} + ins_pipe(pipe_class_default); +%} diff --git a/src/hotspot/cpu/ppc/gc/z/zAddress_ppc.cpp b/src/hotspot/cpu/ppc/gc/z/zAddress_ppc.cpp new file mode 100644 index 0000000000000..136fd7a8ad1cd --- /dev/null +++ b/src/hotspot/cpu/ppc/gc/z/zAddress_ppc.cpp @@ -0,0 +1,106 @@ +/* + * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/shared/gcLogPrecious.hpp" +#include "gc/shared/gc_globals.hpp" +#include "gc/z/zAddress.inline.hpp" +#include "gc/z/zGlobals.hpp" +#include "runtime/globals.hpp" +#include "runtime/os.hpp" +#include "utilities/globalDefinitions.hpp" +#include "utilities/powerOfTwo.hpp" + +#ifdef LINUX +#include +#endif // LINUX + +// Default value if probing is not implemented for a certain platform: 128TB +static const size_t DEFAULT_MAX_ADDRESS_BIT = 47; +// Minimum value returned, if probing fails: 64GB +static const size_t MINIMUM_MAX_ADDRESS_BIT = 36; + +static size_t probe_valid_max_address_bit() { +#ifdef LINUX + size_t max_address_bit = 0; + const size_t page_size = os::vm_page_size(); + for (size_t i = DEFAULT_MAX_ADDRESS_BIT; i > MINIMUM_MAX_ADDRESS_BIT; --i) { + const uintptr_t base_addr = ((uintptr_t) 1U) << i; + if (msync((void*)base_addr, page_size, MS_ASYNC) == 0) { + // msync suceeded, the address is valid, and maybe even already mapped. + max_address_bit = i; + break; + } + if (errno != ENOMEM) { + // Some error occured. This should never happen, but msync + // has some undefined behavior, hence ignore this bit. +#ifdef ASSERT + fatal("Received '%s' while probing the address space for the highest valid bit", os::errno_name(errno)); +#else // ASSERT + log_warning_p(gc)("Received '%s' while probing the address space for the highest valid bit", os::errno_name(errno)); +#endif // ASSERT + continue; + } + // Since msync failed with ENOMEM, the page might not be mapped. + // Try to map it, to see if the address is valid. + void* const result_addr = mmap((void*) base_addr, page_size, PROT_NONE, MAP_PRIVATE|MAP_ANONYMOUS|MAP_NORESERVE, -1, 0); + if (result_addr != MAP_FAILED) { + munmap(result_addr, page_size); + } + if ((uintptr_t) result_addr == base_addr) { + // address is valid + max_address_bit = i; + break; + } + } + if (max_address_bit == 0) { + // probing failed, allocate a very high page and take that bit as the maximum + const uintptr_t high_addr = ((uintptr_t) 1U) << DEFAULT_MAX_ADDRESS_BIT; + void* const result_addr = mmap((void*) high_addr, page_size, PROT_NONE, MAP_PRIVATE|MAP_ANONYMOUS|MAP_NORESERVE, -1, 0); + if (result_addr != MAP_FAILED) { + max_address_bit = BitsPerSize_t - count_leading_zeros((size_t) result_addr) - 1; + munmap(result_addr, page_size); + } + } + log_info_p(gc, init)("Probing address space for the highest valid bit: " SIZE_FORMAT, max_address_bit); + return MAX2(max_address_bit, MINIMUM_MAX_ADDRESS_BIT); +#else // LINUX + return DEFAULT_MAX_ADDRESS_BIT; +#endif // LINUX +} + +size_t ZPlatformAddressOffsetBits() { + const static size_t valid_max_address_offset_bits = probe_valid_max_address_bit() + 1; + const size_t max_address_offset_bits = valid_max_address_offset_bits - 3; + const size_t min_address_offset_bits = max_address_offset_bits - 2; + const size_t address_offset = round_up_power_of_2(MaxHeapSize * ZVirtualToPhysicalRatio); + const size_t address_offset_bits = log2i_exact(address_offset); + return clamp(address_offset_bits, min_address_offset_bits, max_address_offset_bits); +} + +size_t ZPlatformAddressHeapBaseShift() { + return ZPlatformAddressOffsetBits(); +} + +void ZGlobalsPointers::pd_set_good_masks() { +} diff --git a/src/hotspot/cpu/ppc/gc/z/zAddress_ppc.hpp b/src/hotspot/cpu/ppc/gc/z/zAddress_ppc.hpp new file mode 100644 index 0000000000000..ffaadca4d8280 --- /dev/null +++ b/src/hotspot/cpu/ppc/gc/z/zAddress_ppc.hpp @@ -0,0 +1,34 @@ +/* + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef CPU_PPC_GC_Z_ZADDRESS_PPC_HPP +#define CPU_PPC_GC_Z_ZADDRESS_PPC_HPP + +#include "utilities/globalDefinitions.hpp" + +const size_t ZPointerLoadShift = 16; + +size_t ZPlatformAddressOffsetBits(); +size_t ZPlatformAddressHeapBaseShift(); + +#endif // CPU_PPC_GC_Z_ZADDRESS_PPC_HPP diff --git a/src/hotspot/cpu/ppc/gc/z/zAddress_ppc.inline.hpp b/src/hotspot/cpu/ppc/gc/z/zAddress_ppc.inline.hpp new file mode 100644 index 0000000000000..438f4f5663c32 --- /dev/null +++ b/src/hotspot/cpu/ppc/gc/z/zAddress_ppc.inline.hpp @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef CPU_PPC_GC_Z_ZADDRESS_PPC_INLINE_HPP +#define CPU_PPC_GC_Z_ZADDRESS_PPC_INLINE_HPP + +#include "utilities/globalDefinitions.hpp" + +inline uintptr_t ZPointer::remap_bits(uintptr_t colored) { + return colored & ZPointerRemappedMask; +} + +inline constexpr int ZPointer::load_shift_lookup(uintptr_t value) { + return ZPointerLoadShift; +} + +#endif // CPU_PPC_GC_Z_ZADDRESS_PPC_INLINE_HPP diff --git a/src/hotspot/cpu/ppc/gc/z/zBarrierSetAssembler_ppc.cpp b/src/hotspot/cpu/ppc/gc/z/zBarrierSetAssembler_ppc.cpp index 84042505089aa..9ca0ee0ecf629 100644 --- a/src/hotspot/cpu/ppc/gc/z/zBarrierSetAssembler_ppc.cpp +++ b/src/hotspot/cpu/ppc/gc/z/zBarrierSetAssembler_ppc.cpp @@ -22,11 +22,12 @@ * questions. */ -#include "asm/register.hpp" #include "precompiled.hpp" #include "asm/macroAssembler.inline.hpp" +#include "asm/register.hpp" #include "code/codeBlob.hpp" #include "code/vmreg.inline.hpp" +#include "gc/z/zAddress.hpp" #include "gc/z/zBarrier.inline.hpp" #include "gc/z/zBarrierSet.hpp" #include "gc/z/zBarrierSetAssembler.hpp" @@ -34,6 +35,7 @@ #include "gc/z/zThreadLocalData.hpp" #include "memory/resourceArea.hpp" #include "register_ppc.hpp" +#include "runtime/jniHandles.hpp" #include "runtime/sharedRuntime.hpp" #include "utilities/globalDefinitions.hpp" #include "utilities/macros.hpp" @@ -44,11 +46,76 @@ #endif // COMPILER1 #ifdef COMPILER2 #include "gc/z/c2/zBarrierSetC2.hpp" +#include "opto/output.hpp" #endif // COMPILER2 #undef __ #define __ masm-> +// Helper for saving and restoring registers across a runtime call that does +// not have any live vector registers. +class ZRuntimeCallSpill { + MacroAssembler* _masm; + Register _result; + bool _needs_frame, _preserve_gp_registers, _preserve_fp_registers; + int _nbytes_save; + + void save() { + MacroAssembler* masm = _masm; + + if (_needs_frame) { + if (_preserve_gp_registers) { + bool preserve_R3 = _result != R3_ARG1; + _nbytes_save = (MacroAssembler::num_volatile_gp_regs + + (_preserve_fp_registers ? MacroAssembler::num_volatile_fp_regs : 0) + - (preserve_R3 ? 0 : 1) + ) * BytesPerWord; + __ save_volatile_gprs(R1_SP, -_nbytes_save, _preserve_fp_registers, preserve_R3); + } + + __ save_LR_CR(R0); + __ push_frame_reg_args(_nbytes_save, R0); + } + } + + void restore() { + MacroAssembler* masm = _masm; + + Register result = R3_RET; + if (_needs_frame) { + __ pop_frame(); + __ restore_LR_CR(R0); + + if (_preserve_gp_registers) { + bool restore_R3 = _result != R3_ARG1; + if (restore_R3 && _result != noreg) { + __ mr(R0, R3_RET); + result = R0; + } + __ restore_volatile_gprs(R1_SP, -_nbytes_save, _preserve_fp_registers, restore_R3); + } + } + if (_result != noreg) { + __ mr_if_needed(_result, result); + } + } + +public: + ZRuntimeCallSpill(MacroAssembler* masm, Register result, MacroAssembler::PreservationLevel preservation_level) + : _masm(masm), + _result(result), + _needs_frame(preservation_level >= MacroAssembler::PRESERVATION_FRAME_LR), + _preserve_gp_registers(preservation_level >= MacroAssembler::PRESERVATION_FRAME_LR_GP_REGS), + _preserve_fp_registers(preservation_level >= MacroAssembler::PRESERVATION_FRAME_LR_GP_FP_REGS), + _nbytes_save(0) { + save(); + } + ~ZRuntimeCallSpill() { + restore(); + } +}; + + void ZBarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, Register base, RegisterOrConstant ind_or_offs, Register dst, Register tmp1, Register tmp2, @@ -81,14 +148,21 @@ void ZBarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators saved_base = tmp2; } - BarrierSetAssembler::load_at(masm, decorators, type, base, ind_or_offs, dst, - tmp1, noreg, preservation_level, L_handle_null); + __ ld(dst, ind_or_offs, base); /* ==== Check whether pointer is dirty ==== */ - Label skip_barrier; + Label done, uncolor; + + const bool on_non_strong = + (decorators & ON_WEAK_OOP_REF) != 0 || + (decorators & ON_PHANTOM_OOP_REF) != 0; // Load bad mask into scratch register. - __ ld(tmp1, (intptr_t) ZThreadLocalData::address_bad_mask_offset(), R16_thread); + if (on_non_strong) { + __ ld(tmp1, in_bytes(ZThreadLocalData::mark_bad_mask_offset()), R16_thread); + } else { + __ ld(tmp1, in_bytes(ZThreadLocalData::load_bad_mask_offset()), R16_thread); + } // The color bits of the to-be-tested pointer do not have to be equivalent to the 'bad_mask' testing bits. // A pointer is classified as dirty if any of the color bits that also match the bad mask is set. @@ -96,66 +170,195 @@ void ZBarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators // if the pointer is not dirty. // Only dirty pointers must be processed by this barrier, so we can skip it in case the latter condition holds true. __ and_(tmp1, tmp1, dst); - __ beq(CCR0, skip_barrier); + __ beq(CCR0, uncolor); /* ==== Invoke barrier ==== */ - int nbytes_save = 0; + { + ZRuntimeCallSpill rcs(masm, dst, preservation_level); + + // Setup arguments + if (saved_base != R3_ARG1 && ind_or_offs.register_or_noreg() != R3_ARG1) { + __ mr_if_needed(R3_ARG1, dst); + __ add(R4_ARG2, ind_or_offs, saved_base); + } else if (dst != R4_ARG2) { + __ add(R4_ARG2, ind_or_offs, saved_base); + __ mr(R3_ARG1, dst); + } else { + __ add(R0, ind_or_offs, saved_base); + __ mr(R3_ARG1, dst); + __ mr(R4_ARG2, R0); + } - const bool needs_frame = preservation_level >= MacroAssembler::PRESERVATION_FRAME_LR; - const bool preserve_gp_registers = preservation_level >= MacroAssembler::PRESERVATION_FRAME_LR_GP_REGS; - const bool preserve_fp_registers = preservation_level >= MacroAssembler::PRESERVATION_FRAME_LR_GP_FP_REGS; + __ call_VM_leaf(ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators)); + } - const bool preserve_R3 = dst != R3_ARG1; + // Slow-path has already uncolored + if (L_handle_null != nullptr) { + __ cmpdi(CCR0, dst, 0); + __ beq(CCR0, *L_handle_null); + } + __ b(done); - if (needs_frame) { - if (preserve_gp_registers) { - nbytes_save = (preserve_fp_registers - ? MacroAssembler::num_volatile_gp_regs + MacroAssembler::num_volatile_fp_regs - : MacroAssembler::num_volatile_gp_regs) * BytesPerWord; - nbytes_save -= preserve_R3 ? 0 : BytesPerWord; - __ save_volatile_gprs(R1_SP, -nbytes_save, preserve_fp_registers, preserve_R3); - } + __ bind(uncolor); + if (L_handle_null == nullptr) { + __ srdi(dst, dst, ZPointerLoadShift); + } else { + __ srdi_(dst, dst, ZPointerLoadShift); + __ beq(CCR0, *L_handle_null); + } - __ save_LR_CR(tmp1); - __ push_frame_reg_args(nbytes_save, tmp1); + __ bind(done); + __ block_comment("} load_at (zgc)"); +} + +static void load_least_significant_16_oop_bits(MacroAssembler* masm, Register dst, RegisterOrConstant ind_or_offs, Register base) { + assert_different_registers(dst, base); +#ifndef VM_LITTLE_ENDIAN + const int BE_offset = 6; + if (ind_or_offs.is_register()) { + __ addi(dst, ind_or_offs.as_register(), BE_offset); + __ lhzx(dst, base, dst); + } else { + __ lhz(dst, ind_or_offs.as_constant() + BE_offset, base); } +#else + __ lhz(dst, ind_or_offs, base); +#endif +} - // Setup arguments - if (saved_base != R3_ARG1) { - __ mr_if_needed(R3_ARG1, dst); - __ add(R4_ARG2, ind_or_offs, saved_base); - } else if (dst != R4_ARG2) { - __ add(R4_ARG2, ind_or_offs, saved_base); - __ mr(R3_ARG1, dst); +static void emit_store_fast_path_check(MacroAssembler* masm, Register base, RegisterOrConstant ind_or_offs, bool is_atomic, Label& medium_path) { + if (is_atomic) { + assert(ZPointerLoadShift + LogMinObjAlignmentInBytes >= 16, "or replace following code"); + load_least_significant_16_oop_bits(masm, R0, ind_or_offs, base); + // Atomic operations must ensure that the contents of memory are store-good before + // an atomic operation can execute. + // A not relocatable object could have spurious raw null pointers in its fields after + // getting promoted to the old generation. + __ relocate(barrier_Relocation::spec(), ZBarrierRelocationFormatStoreGoodBits); + __ cmplwi(CCR0, R0, barrier_Relocation::unpatched); } else { - __ add(R0, ind_or_offs, saved_base); - __ mr(R3_ARG1, dst); - __ mr(R4_ARG2, R0); + __ ld(R0, ind_or_offs, base); + // Stores on relocatable objects never need to deal with raw null pointers in fields. + // Raw null pointers may only exist in the young generation, as they get pruned when + // the object is relocated to old. And no pre-write barrier needs to perform any action + // in the young generation. + __ relocate(barrier_Relocation::spec(), ZBarrierRelocationFormatStoreBadMask); + __ andi_(R0, R0, barrier_Relocation::unpatched); } + __ bc_far_optimized(Assembler::bcondCRbiIs0, __ bi0(CCR0, Assembler::equal), medium_path); +} - __ call_VM_leaf(ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators)); +void ZBarrierSetAssembler::store_barrier_fast(MacroAssembler* masm, + Register ref_base, + RegisterOrConstant ind_or_offset, + Register rnew_zaddress, + Register rnew_zpointer, + bool in_nmethod, + bool is_atomic, + Label& medium_path, + Label& medium_path_continuation) const { + assert_different_registers(ref_base, rnew_zpointer); + assert_different_registers(ind_or_offset.register_or_noreg(), rnew_zpointer); + assert_different_registers(rnew_zaddress, rnew_zpointer); + + if (in_nmethod) { + emit_store_fast_path_check(masm, ref_base, ind_or_offset, is_atomic, medium_path); + __ bind(medium_path_continuation); + __ relocate(barrier_Relocation::spec(), ZBarrierRelocationFormatStoreGoodBits); + __ li(rnew_zpointer, barrier_Relocation::unpatched); // Load color bits. + if (rnew_zaddress == noreg) { // noreg encodes null. + if (ZPointerLoadShift >= 16) { + __ rldicl(rnew_zpointer, rnew_zpointer, 0, 64 - ZPointerLoadShift); // Clear sign extension from li. + } + } + } else { + __ ld(R0, ind_or_offset, ref_base); + __ ld(rnew_zpointer, in_bytes(ZThreadLocalData::store_bad_mask_offset()), R16_thread); + __ and_(R0, R0, rnew_zpointer); + __ bne(CCR0, medium_path); + __ bind(medium_path_continuation); + __ ld(rnew_zpointer, in_bytes(ZThreadLocalData::store_good_mask_offset()), R16_thread); + } + if (rnew_zaddress != noreg) { // noreg encodes null. + __ rldimi(rnew_zpointer, rnew_zaddress, ZPointerLoadShift, 0); // Insert shifted pointer. + } +} - Register result = R3_RET; - if (needs_frame) { - __ pop_frame(); - __ restore_LR_CR(tmp1); +static void store_barrier_buffer_add(MacroAssembler* masm, + Register ref_base, + RegisterOrConstant ind_or_offs, + Register tmp1, + Label& slow_path) { + __ ld(tmp1, in_bytes(ZThreadLocalData::store_barrier_buffer_offset()), R16_thread); + + // Combined pointer bump and check if the buffer is disabled or full + __ ld(R0, in_bytes(ZStoreBarrierBuffer::current_offset()), tmp1); + __ addic_(R0, R0, -(int)sizeof(ZStoreBarrierEntry)); + __ blt(CCR0, slow_path); + __ std(R0, in_bytes(ZStoreBarrierBuffer::current_offset()), tmp1); + + // Entry is at ZStoreBarrierBuffer (tmp1) + buffer_offset + scaled index (R0) + __ add(tmp1, tmp1, R0); + + // Compute and log the store address + Register store_addr = ref_base; + if (!ind_or_offs.is_constant() || ind_or_offs.as_constant() != 0) { + __ add(R0, ind_or_offs, ref_base); + store_addr = R0; + } + __ std(store_addr, in_bytes(ZStoreBarrierBuffer::buffer_offset()) + in_bytes(ZStoreBarrierEntry::p_offset()), tmp1); - if (preserve_R3) { - __ mr(R0, R3_RET); - result = R0; - } + // Load and log the prev value + __ ld(R0, ind_or_offs, ref_base); + __ std(R0, in_bytes(ZStoreBarrierBuffer::buffer_offset()) + in_bytes(ZStoreBarrierEntry::prev_offset()), tmp1); +} - if (preserve_gp_registers) { - __ restore_volatile_gprs(R1_SP, -nbytes_save, preserve_fp_registers, preserve_R3); +void ZBarrierSetAssembler::store_barrier_medium(MacroAssembler* masm, + Register ref_base, + RegisterOrConstant ind_or_offs, + Register tmp, + bool is_atomic, + Label& medium_path_continuation, + Label& slow_path) const { + assert_different_registers(ref_base, tmp, R0); + + // The reason to end up in the medium path is that the pre-value was not 'good'. + + if (is_atomic) { + // Atomic accesses can get to the medium fast path because the value was a + // raw null value. If it was not null, then there is no doubt we need to take a slow path. + __ ld(tmp, ind_or_offs, ref_base); + __ cmpdi(CCR0, tmp, 0); + __ bne(CCR0, slow_path); + + // If we get this far, we know there is a young raw null value in the field. + // Try to self-heal null values for atomic accesses + bool need_restore = false; + if (!ind_or_offs.is_constant() || ind_or_offs.as_constant() != 0) { + __ add(ref_base, ind_or_offs, ref_base); + need_restore = true; + } + __ ld(R0, in_bytes(ZThreadLocalData::store_good_mask_offset()), R16_thread); + __ cmpxchgd(CCR0, tmp, (intptr_t)0, R0, ref_base, + MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update()); + if (need_restore) { + __ subf(ref_base, ind_or_offs, ref_base); } + __ bne(CCR0, slow_path); + } else { + // A non-atomic relocatable object won't get to the medium fast path due to a + // raw null in the young generation. We only get here because the field is bad. + // In this path we don't need any self healing, so we can avoid a runtime call + // most of the time by buffering the store barrier to be applied lazily. + store_barrier_buffer_add(masm, + ref_base, + ind_or_offs, + tmp, + slow_path); } - __ mr_if_needed(dst, result); - - __ bind(skip_barrier); - __ block_comment("} load_at (zgc)"); + __ b(medium_path_continuation); } -#ifdef ASSERT // The Z store barrier only verifies the pointers it is operating on and is thus a sole debugging measure. void ZBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, Register base, RegisterOrConstant ind_or_offs, Register val, @@ -163,124 +366,267 @@ void ZBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorator MacroAssembler::PreservationLevel preservation_level) { __ block_comment("store_at (zgc) {"); - // If the 'val' register is 'noreg', the to-be-stored value is a null pointer. - if (is_reference_type(type) && val != noreg) { - __ ld(tmp1, in_bytes(ZThreadLocalData::address_bad_mask_offset()), R16_thread); - __ and_(tmp1, tmp1, val); - __ asm_assert_eq("Detected dirty pointer on the heap in Z store barrier"); - } + bool dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0; + + if (is_reference_type(type)) { + assert_different_registers(base, val, tmp1, tmp2, tmp3); + + if (dest_uninitialized) { + // tmp1 = (val << ZPointerLoadShift) | store_good_mask + __ ld(tmp1, in_bytes(ZThreadLocalData::store_good_mask_offset()), R16_thread); + if (val != noreg) { // noreg encodes null. + __ rldimi(tmp1, val, ZPointerLoadShift, 0); + } + } else { + Label done; + Label medium; + Label medium_continuation; // bound in store_barrier_fast + Label slow; + + store_barrier_fast(masm, base, ind_or_offs, val, tmp1, false, false, medium, medium_continuation); + __ b(done); + __ bind(medium); + store_barrier_medium(masm, base, ind_or_offs, tmp1, false, medium_continuation, slow); + __ bind(slow); + { + ZRuntimeCallSpill rcs(masm, noreg, preservation_level); + __ add(R3_ARG1, ind_or_offs, base); + __ call_VM_leaf(ZBarrierSetRuntime::store_barrier_on_oop_field_without_healing_addr(), R3_ARG1); + } + __ b(medium_continuation); - // Store value - BarrierSetAssembler::store_at(masm, decorators, type, base, ind_or_offs, val, tmp1, tmp2, tmp3, preservation_level); + __ bind(done); + } + BarrierSetAssembler::store_at(masm, decorators, type, base, ind_or_offs, tmp1, tmp2, tmp3, noreg, preservation_level); + } else { + BarrierSetAssembler::store_at(masm, decorators, type, base, ind_or_offs, val, tmp1, tmp2, tmp3, preservation_level); + } __ block_comment("} store_at (zgc)"); } -#endif // ASSERT -void ZBarrierSetAssembler::arraycopy_prologue(MacroAssembler *masm, DecoratorSet decorators, BasicType component_type, +/* arraycopy */ +const Register _load_bad_mask = R6, _store_bad_mask = R7, _store_good_mask = R8; + +void ZBarrierSetAssembler::arraycopy_prologue(MacroAssembler *masm, DecoratorSet decorators, BasicType type, Register src, Register dst, Register count, Register preserve1, Register preserve2) { - __ block_comment("arraycopy_prologue (zgc) {"); + bool is_checkcast_copy = (decorators & ARRAYCOPY_CHECKCAST) != 0, + dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0; - /* ==== Check whether a special gc barrier is required for this particular load ==== */ - if (!is_reference_type(component_type)) { + if (!ZBarrierSet::barrier_needed(decorators, type) || is_checkcast_copy) { + // Barrier not needed return; } - Label skip_barrier; + __ block_comment("arraycopy_prologue (zgc) {"); - // Fast path: Array is of length zero - __ cmpdi(CCR0, count, 0); - __ beq(CCR0, skip_barrier); + load_copy_masks(masm, _load_bad_mask, _store_bad_mask, _store_good_mask, dest_uninitialized); - /* ==== Ensure register sanity ==== */ - Register tmp_R11 = R11_scratch1; + __ block_comment("} arraycopy_prologue (zgc)"); +} - assert_different_registers(src, dst, count, tmp_R11, noreg); - if (preserve1 != noreg) { - // Not technically required, but unlikely being intended. - assert_different_registers(preserve1, preserve2); +void ZBarrierSetAssembler::load_copy_masks(MacroAssembler* masm, + Register load_bad_mask, + Register store_bad_mask, + Register store_good_mask, + bool dest_uninitialized) const { + __ ld(load_bad_mask, in_bytes(ZThreadLocalData::load_bad_mask_offset()), R16_thread); + __ ld(store_good_mask, in_bytes(ZThreadLocalData::store_good_mask_offset()), R16_thread); + if (dest_uninitialized) { + DEBUG_ONLY( __ li(store_bad_mask, -1); ) + } else { + __ ld(store_bad_mask, in_bytes(ZThreadLocalData::store_bad_mask_offset()), R16_thread); } - - /* ==== Invoke barrier (slowpath) ==== */ - int nbytes_save = 0; - +} +void ZBarrierSetAssembler::copy_load_at_fast(MacroAssembler* masm, + Register zpointer, + Register addr, + Register load_bad_mask, + Label& slow_path, + Label& continuation) const { + __ ldx(zpointer, addr); + __ and_(R0, zpointer, load_bad_mask); + __ bne(CCR0, slow_path); + __ bind(continuation); +} +void ZBarrierSetAssembler::copy_load_at_slow(MacroAssembler* masm, + Register zpointer, + Register addr, + Register tmp, + Label& slow_path, + Label& continuation) const { + __ align(32); + __ bind(slow_path); + __ mfctr(tmp); // preserve loop counter { - assert(!noreg->is_volatile(), "sanity"); - - if (preserve1->is_volatile()) { - __ std(preserve1, -BytesPerWord * ++nbytes_save, R1_SP); - } - - if (preserve2->is_volatile() && preserve1 != preserve2) { - __ std(preserve2, -BytesPerWord * ++nbytes_save, R1_SP); - } - - __ std(src, -BytesPerWord * ++nbytes_save, R1_SP); - __ std(dst, -BytesPerWord * ++nbytes_save, R1_SP); - __ std(count, -BytesPerWord * ++nbytes_save, R1_SP); - - __ save_LR_CR(tmp_R11); - __ push_frame_reg_args(nbytes_save, tmp_R11); + ZRuntimeCallSpill rcs(masm, R0, MacroAssembler::PRESERVATION_FRAME_LR_GP_REGS); + assert(zpointer != R4_ARG2, "or change argument setup"); + __ mr_if_needed(R4_ARG2, addr); + __ call_VM_leaf(ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(), zpointer, R4_ARG2); } - - // ZBarrierSetRuntime::load_barrier_on_oop_array_addr(src, count) - if (count == R3_ARG1) { - if (src == R4_ARG2) { - // Arguments are provided in reverse order - __ mr(tmp_R11, count); - __ mr(R3_ARG1, src); - __ mr(R4_ARG2, tmp_R11); - } else { - __ mr(R4_ARG2, count); - __ mr(R3_ARG1, src); + __ sldi(zpointer, R0, ZPointerLoadShift); // Slow-path has uncolored; revert + __ mtctr(tmp); // restore loop counter + __ b(continuation); +} +void ZBarrierSetAssembler::copy_store_at_fast(MacroAssembler* masm, + Register zpointer, + Register addr, + Register store_bad_mask, + Register store_good_mask, + Label& medium_path, + Label& continuation, + bool dest_uninitialized) const { + if (!dest_uninitialized) { + __ ldx(R0, addr); + __ and_(R0, R0, store_bad_mask); + __ bne(CCR0, medium_path); + __ bind(continuation); + } + __ rldimi(zpointer, store_good_mask, 0, 64 - ZPointerLoadShift); // Replace color bits. + __ stdx(zpointer, addr); +} +void ZBarrierSetAssembler::copy_store_at_slow(MacroAssembler* masm, + Register addr, + Register tmp, + Label& medium_path, + Label& continuation, + bool dest_uninitialized) const { + if (!dest_uninitialized) { + Label slow_path; + __ align(32); + __ bind(medium_path); + store_barrier_medium(masm, addr, (intptr_t)0, tmp, false, continuation, slow_path); + __ bind(slow_path); + __ mfctr(tmp); // preserve loop counter + { + ZRuntimeCallSpill rcs(masm, noreg, MacroAssembler::PRESERVATION_FRAME_LR_GP_REGS); + __ call_VM_leaf(ZBarrierSetRuntime::store_barrier_on_oop_field_without_healing_addr(), addr); } - } else { - __ mr_if_needed(R3_ARG1, src); - __ mr_if_needed(R4_ARG2, count); + __ mtctr(tmp); // restore loop counter + __ b(continuation); } +} - __ call_VM_leaf(ZBarrierSetRuntime::load_barrier_on_oop_array_addr()); - - __ pop_frame(); - __ restore_LR_CR(tmp_R11); +// Arguments for generated stub: +// from: R3_ARG1 +// to: R4_ARG2 +// count: R5_ARG3 (int >= 0) +void ZBarrierSetAssembler::generate_disjoint_oop_copy(MacroAssembler* masm, bool dest_uninitialized) { + const Register zpointer = R2, tmp = R9; + Label done, loop, load_bad, load_good, store_bad, store_good; + __ cmpdi(CCR0, R5_ARG3, 0); + __ beq(CCR0, done); + __ mtctr(R5_ARG3); + + __ align(32); + __ bind(loop); + copy_load_at_fast(masm, zpointer, R3_ARG1, _load_bad_mask, load_bad, load_good); + copy_store_at_fast(masm, zpointer, R4_ARG2, _store_bad_mask, _store_good_mask, store_bad, store_good, dest_uninitialized); + __ addi(R3_ARG1, R3_ARG1, 8); + __ addi(R4_ARG2, R4_ARG2, 8); + __ bdnz(loop); + + __ bind(done); + __ li(R3_RET, 0); + __ blr(); - { - __ ld(count, -BytesPerWord * nbytes_save--, R1_SP); - __ ld(dst, -BytesPerWord * nbytes_save--, R1_SP); - __ ld(src, -BytesPerWord * nbytes_save--, R1_SP); + copy_load_at_slow(masm, zpointer, R3_ARG1, tmp, load_bad, load_good); + copy_store_at_slow(masm, R4_ARG2, tmp, store_bad, store_good, dest_uninitialized); +} - if (preserve2->is_volatile() && preserve1 != preserve2) { - __ ld(preserve2, -BytesPerWord * nbytes_save--, R1_SP); - } +void ZBarrierSetAssembler::generate_conjoint_oop_copy(MacroAssembler* masm, bool dest_uninitialized) { + const Register zpointer = R2, tmp = R9; + Label done, loop, load_bad, load_good, store_bad, store_good; + __ sldi_(R0, R5_ARG3, 3); + __ beq(CCR0, done); + __ mtctr(R5_ARG3); + // Point behind last elements and copy backwards. + __ add(R3_ARG1, R3_ARG1, R0); + __ add(R4_ARG2, R4_ARG2, R0); + + __ align(32); + __ bind(loop); + __ addi(R3_ARG1, R3_ARG1, -8); + __ addi(R4_ARG2, R4_ARG2, -8); + copy_load_at_fast(masm, zpointer, R3_ARG1, _load_bad_mask, load_bad, load_good); + copy_store_at_fast(masm, zpointer, R4_ARG2, _store_bad_mask, _store_good_mask, store_bad, store_good, dest_uninitialized); + __ bdnz(loop); + + __ bind(done); + __ li(R3_RET, 0); + __ blr(); - if (preserve1->is_volatile()) { - __ ld(preserve1, -BytesPerWord * nbytes_save--, R1_SP); - } - } + copy_load_at_slow(masm, zpointer, R3_ARG1, tmp, load_bad, load_good); + copy_store_at_slow(masm, R4_ARG2, tmp, store_bad, store_good, dest_uninitialized); +} - __ bind(skip_barrier); - __ block_comment("} arraycopy_prologue (zgc)"); +// Verify a colored pointer. +void ZBarrierSetAssembler::check_oop(MacroAssembler *masm, Register obj, const char* msg) { + if (!VerifyOops) { + return; + } + Label done, skip_uncolor; + // Skip (colored) null. + __ srdi_(R0, obj, ZPointerLoadShift); + __ beq(CCR0, done); + + // Check if ZAddressHeapBase << ZPointerLoadShift is set. If so, we need to uncolor. + __ rldicl_(R0, obj, 64 - ZAddressHeapBaseShift - ZPointerLoadShift, 63); + __ mr(R0, obj); + __ beq(CCR0, skip_uncolor); + __ srdi(R0, obj, ZPointerLoadShift); + __ bind(skip_uncolor); + + __ verify_oop(R0, msg); + __ bind(done); } + void ZBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, Register dst, Register jni_env, Register obj, Register tmp, Label& slowpath) { __ block_comment("try_resolve_jobject_in_native (zgc) {"); - assert_different_registers(jni_env, obj, tmp); + Label done, tagged, weak_tagged, check_color; + Address load_bad_mask = load_bad_mask_from_jni_env(jni_env), + mark_bad_mask = mark_bad_mask_from_jni_env(jni_env); + + // Test for tag + __ andi_(tmp, obj, JNIHandles::tag_mask); + __ bne(CCR0, tagged); + + // Resolve local handle + __ ld(dst, 0, obj); + __ b(done); + + __ bind(tagged); + + // Test for weak tag + __ andi_(tmp, obj, JNIHandles::TypeTag::weak_global); + __ clrrdi(dst, obj, JNIHandles::tag_size); // Untag. + __ bne(CCR0, weak_tagged); - // Resolve the pointer using the standard implementation for weak tag handling and pointer verification. - BarrierSetAssembler::try_resolve_jobject_in_native(masm, dst, jni_env, obj, tmp, slowpath); + // Resolve global handle + __ ld(dst, 0, dst); + __ ld(tmp, load_bad_mask.disp(), load_bad_mask.base()); + __ b(check_color); - // Check whether pointer is dirty. - __ ld(tmp, - in_bytes(ZThreadLocalData::address_bad_mask_offset() - JavaThread::jni_environment_offset()), - jni_env); + __ bind(weak_tagged); - __ and_(tmp, obj, tmp); + // Resolve weak handle + __ ld(dst, 0, dst); + __ ld(tmp, mark_bad_mask.disp(), mark_bad_mask.base()); + + __ bind(check_color); + __ and_(tmp, tmp, dst); __ bne(CCR0, slowpath); + // Uncolor + __ srdi(dst, dst, ZPointerLoadShift); + + __ bind(done); + __ block_comment("} try_resolve_jobject_in_native (zgc)"); } @@ -289,17 +635,40 @@ void ZBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, R #ifdef COMPILER1 #define __ ce->masm()-> -// Code emitted by LIR node "LIR_OpZLoadBarrierTest" which in turn is emitted by ZBarrierSetC1::load_barrier. -// The actual compare and branch instructions are represented as stand-alone LIR nodes. -void ZBarrierSetAssembler::generate_c1_load_barrier_test(LIR_Assembler* ce, - LIR_Opr ref) const { - __ block_comment("load_barrier_test (zgc) {"); +static void z_uncolor(LIR_Assembler* ce, LIR_Opr ref) { + Register r = ref->as_register(); + __ srdi(r, r, ZPointerLoadShift); +} + +static void check_color(LIR_Assembler* ce, LIR_Opr ref, bool on_non_strong) { + int relocFormat = on_non_strong ? ZBarrierRelocationFormatMarkBadMask + : ZBarrierRelocationFormatLoadBadMask; + __ relocate(barrier_Relocation::spec(), relocFormat); + __ andi_(R0, ref->as_register(), barrier_Relocation::unpatched); +} - __ ld(R0, in_bytes(ZThreadLocalData::address_bad_mask_offset()), R16_thread); - __ andr(R0, R0, ref->as_pointer_register()); - __ cmpdi(CCR5 /* as mandated by LIR node */, R0, 0); +static void z_color(LIR_Assembler* ce, LIR_Opr ref) { + __ sldi(ref->as_register(), ref->as_register(), ZPointerLoadShift); + __ relocate(barrier_Relocation::spec(), ZBarrierRelocationFormatStoreGoodBits); + __ ori(ref->as_register(), ref->as_register(), barrier_Relocation::unpatched); +} + +void ZBarrierSetAssembler::generate_c1_uncolor(LIR_Assembler* ce, LIR_Opr ref) const { + z_uncolor(ce, ref); +} + +void ZBarrierSetAssembler::generate_c1_color(LIR_Assembler* ce, LIR_Opr ref) const { + z_color(ce, ref); +} - __ block_comment("} load_barrier_test (zgc)"); +void ZBarrierSetAssembler::generate_c1_load_barrier(LIR_Assembler* ce, + LIR_Opr ref, + ZLoadBarrierStubC1* stub, + bool on_non_strong) const { + check_color(ce, ref, on_non_strong); + __ bc_far_optimized(Assembler::bcondCRbiIs0, __ bi0(CCR0, Assembler::equal), *stub->entry()); + z_uncolor(ce, ref); + __ bind(*stub->continuation()); } // Code emitted by code stub "ZLoadBarrierStubC1" which in turn is emitted by ZBarrierSetC1::load_barrier. @@ -332,19 +701,77 @@ void ZBarrierSetAssembler::generate_c1_load_barrier_stub(LIR_Assembler* ce, /* ==== Invoke stub ==== */ // Pass arguments via stack. The stack pointer will be bumped by the stub. - __ std(ref, (intptr_t) -1 * BytesPerWord, R1_SP); - __ std(ref_addr, (intptr_t) -2 * BytesPerWord, R1_SP); + __ std(ref, -1 * BytesPerWord, R1_SP); + __ std(ref_addr, -2 * BytesPerWord, R1_SP); - __ load_const_optimized(R0, stub->runtime_stub()); + __ load_const_optimized(R0, stub->runtime_stub(), /* temp */ ref); __ call_stub(R0); // The runtime stub passes the result via the R0 register, overriding the previously-loaded stub address. - __ mr_if_needed(ref, R0); + __ mr(ref, R0); __ b(*stub->continuation()); __ block_comment("} c1_load_barrier_stub (zgc)"); } +void ZBarrierSetAssembler::generate_c1_store_barrier(LIR_Assembler* ce, + LIR_Address* addr, + LIR_Opr new_zaddress, + LIR_Opr new_zpointer, + ZStoreBarrierStubC1* stub) const { + Register rnew_zaddress = new_zaddress->as_register(); + Register rnew_zpointer = new_zpointer->as_register(); + + Register rbase = addr->base()->as_pointer_register(); + RegisterOrConstant ind_or_offs = (addr->index()->is_illegal()) + ? (RegisterOrConstant)addr->disp() + : (RegisterOrConstant)addr->index()->as_pointer_register(); + + store_barrier_fast(ce->masm(), + rbase, + ind_or_offs, + rnew_zaddress, + rnew_zpointer, + true, + stub->is_atomic(), + *stub->entry(), + *stub->continuation()); +} + +void ZBarrierSetAssembler::generate_c1_store_barrier_stub(LIR_Assembler* ce, + ZStoreBarrierStubC1* stub) const { + // Stub entry + __ bind(*stub->entry()); + + Label slow; + + LIR_Address* addr = stub->ref_addr()->as_address_ptr(); + assert(addr->index()->is_illegal() || addr->disp() == 0, "can't have both"); + Register rbase = addr->base()->as_pointer_register(); + RegisterOrConstant ind_or_offs = (addr->index()->is_illegal()) + ? (RegisterOrConstant)addr->disp() + : (RegisterOrConstant)addr->index()->as_pointer_register(); + Register new_zpointer = stub->new_zpointer()->as_register(); + + store_barrier_medium(ce->masm(), + rbase, + ind_or_offs, + new_zpointer, // temp + stub->is_atomic(), + *stub->continuation(), + slow); + + __ bind(slow); + + __ load_const_optimized(/*stub address*/ new_zpointer, stub->runtime_stub(), R0); + __ add(R0, ind_or_offs, rbase); // pass store address in R0 + __ mtctr(new_zpointer); + __ bctrl(); + + // Stub exit + __ b(*stub->continuation()); +} + #undef __ #define __ sasm-> @@ -360,8 +787,8 @@ void ZBarrierSetAssembler::generate_c1_load_barrier_runtime_stub(StubAssembler* __ save_LR_CR(R0); // Load arguments back again from the stack. - __ ld(R3_ARG1, (intptr_t) -1 * BytesPerWord, R1_SP); // ref - __ ld(R4_ARG2, (intptr_t) -2 * BytesPerWord, R1_SP); // ref_addr + __ ld(R3_ARG1, -1 * BytesPerWord, R1_SP); // ref + __ ld(R4_ARG2, -2 * BytesPerWord, R1_SP); // ref_addr __ push_frame_reg_args(nbytes_save, R0); @@ -379,6 +806,32 @@ void ZBarrierSetAssembler::generate_c1_load_barrier_runtime_stub(StubAssembler* __ block_comment("} c1_load_barrier_runtime_stub (zgc)"); } +void ZBarrierSetAssembler::generate_c1_store_barrier_runtime_stub(StubAssembler* sasm, + bool self_healing) const { + __ block_comment("c1_store_barrier_runtime_stub (zgc) {"); + + const int nbytes_save = MacroAssembler::num_volatile_regs * BytesPerWord; + __ save_volatile_gprs(R1_SP, -nbytes_save); + __ mr(R3_ARG1, R0); // store address + + __ save_LR_CR(R0); + __ push_frame_reg_args(nbytes_save, R0); + + if (self_healing) { + __ call_VM_leaf(ZBarrierSetRuntime::store_barrier_on_oop_field_with_healing_addr()); + } else { + __ call_VM_leaf(ZBarrierSetRuntime::store_barrier_on_oop_field_without_healing_addr()); + } + + __ pop_frame(); + __ restore_LR_CR(R3_RET); + __ restore_volatile_gprs(R1_SP, -nbytes_save); + + __ blr(); + + __ block_comment("} c1_store_barrier_runtime_stub (zgc)"); +} + #undef __ #endif // COMPILER1 @@ -406,8 +859,8 @@ class ZSaveLiveRegisters { int _frame_size; public: - ZSaveLiveRegisters(MacroAssembler *masm, ZLoadBarrierStubC2 *stub) - : _masm(masm), _reg_mask(stub->live()), _result_reg(stub->ref()) { + ZSaveLiveRegisters(MacroAssembler *masm, ZBarrierStubC2 *stub) + : _masm(masm), _reg_mask(stub->live()), _result_reg(stub->result()) { const int register_save_size = iterate_over_register_mask(ACTION_COUNT_ONLY) * BytesPerWord; _frame_size = align_up(register_save_size, frame::alignment_in_bytes) @@ -559,6 +1012,7 @@ class ZSetupArguments { #define __ masm-> void ZBarrierSetAssembler::generate_c2_load_barrier_stub(MacroAssembler* masm, ZLoadBarrierStubC2* stub) const { + Assembler::InlineSkippedInstructionsCounter skipped_counter(masm); __ block_comment("generate_c2_load_barrier_stub (zgc) {"); __ bind(*stub->entry()); @@ -581,5 +1035,79 @@ void ZBarrierSetAssembler::generate_c2_load_barrier_stub(MacroAssembler* masm, Z __ block_comment("} generate_c2_load_barrier_stub (zgc)"); } +void ZBarrierSetAssembler::generate_c2_store_barrier_stub(MacroAssembler* masm, ZStoreBarrierStubC2* stub) const { + Assembler::InlineSkippedInstructionsCounter skipped_counter(masm); + __ block_comment("ZStoreBarrierStubC2"); + + // Stub entry + __ bind(*stub->entry()); + + Label slow; + + Address addr = stub->ref_addr(); + Register rbase = addr.base(); + RegisterOrConstant ind_or_offs = (addr.index() == noreg) + ? (RegisterOrConstant)addr.disp() + : (RegisterOrConstant)addr.index(); + + if (!stub->is_native()) { + store_barrier_medium(masm, + rbase, + ind_or_offs, + stub->new_zpointer(), + stub->is_atomic(), + *stub->continuation(), + slow); + } + + __ bind(slow); + { + ZSaveLiveRegisters save_live_registers(masm, stub); + __ add(R3_ARG1, ind_or_offs, rbase); + if (stub->is_native()) { + __ call_VM_leaf(ZBarrierSetRuntime::store_barrier_on_native_oop_field_without_healing_addr(), R3_ARG1); + } else if (stub->is_atomic()) { + __ call_VM_leaf(ZBarrierSetRuntime::store_barrier_on_oop_field_with_healing_addr(), R3_ARG1); + } else { + __ call_VM_leaf(ZBarrierSetRuntime::store_barrier_on_oop_field_without_healing_addr(), R3_ARG1); + } + } + + // Stub exit + __ b(*stub->continuation()); +} + #undef __ #endif // COMPILER2 + +static uint16_t patch_barrier_relocation_value(int format) { + switch (format) { + case ZBarrierRelocationFormatLoadBadMask: + return (uint16_t)ZPointerLoadBadMask; + case ZBarrierRelocationFormatMarkBadMask: + return (uint16_t)ZPointerMarkBadMask; + case ZBarrierRelocationFormatStoreGoodBits: + return (uint16_t)ZPointerStoreGoodMask; + case ZBarrierRelocationFormatStoreBadMask: + return (uint16_t)ZPointerStoreBadMask; + default: + ShouldNotReachHere(); + return 0; + } +} + +void ZBarrierSetAssembler::patch_barrier_relocation(address addr, int format) { +#ifdef ASSERT + int inst = *(int*)addr; + if (format == ZBarrierRelocationFormatStoreGoodBits) { + assert(Assembler::is_li(inst) || Assembler::is_ori(inst) || Assembler::is_cmpli(inst), + "unexpected instruction 0x%04x", inst); + // Note: li uses sign extend, but these bits will get cleared by rldimi. + } else { + assert(Assembler::is_andi(inst), "unexpected instruction 0x%04x", inst); + } +#endif + // Patch the signed/unsigned 16 bit immediate field of the instruction. + *(uint16_t*)(addr BIG_ENDIAN_ONLY(+2)) = patch_barrier_relocation_value(format); + ICache::ppc64_flush_icache_bytes(addr, BytesPerInstWord); +} diff --git a/src/hotspot/cpu/ppc/gc/z/zBarrierSetAssembler_ppc.hpp b/src/hotspot/cpu/ppc/gc/z/zBarrierSetAssembler_ppc.hpp index 808edca6939ca..7aca78db7d0b3 100644 --- a/src/hotspot/cpu/ppc/gc/z/zBarrierSetAssembler_ppc.hpp +++ b/src/hotspot/cpu/ppc/gc/z/zBarrierSetAssembler_ppc.hpp @@ -32,17 +32,27 @@ #endif // COMPILER2 #ifdef COMPILER1 +class CodeStub; +class LIR_Address; class LIR_Assembler; class LIR_Opr; class StubAssembler; class ZLoadBarrierStubC1; +class ZStoreBarrierStubC1; #endif // COMPILER1 #ifdef COMPILER2 +class MachNode; class Node; class ZLoadBarrierStubC2; +class ZStoreBarrierStubC2; #endif // COMPILER2 +const int ZBarrierRelocationFormatLoadBadMask = 0; +const int ZBarrierRelocationFormatMarkBadMask = 1; +const int ZBarrierRelocationFormatStoreGoodBits = 2; +const int ZBarrierRelocationFormatStoreBadMask = 3; + class ZBarrierSetAssembler : public ZBarrierSetAssemblerBase { public: virtual void load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, @@ -50,12 +60,10 @@ class ZBarrierSetAssembler : public ZBarrierSetAssemblerBase { Register tmp1, Register tmp2, MacroAssembler::PreservationLevel preservation_level, Label *L_handle_null = nullptr); -#ifdef ASSERT virtual void store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, Register base, RegisterOrConstant ind_or_offs, Register val, Register tmp1, Register tmp2, Register tmp3, MacroAssembler::PreservationLevel preservation_level); -#endif // ASSERT virtual void arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, BasicType type, Register src, Register dst, Register count, @@ -64,24 +72,102 @@ class ZBarrierSetAssembler : public ZBarrierSetAssemblerBase { virtual void try_resolve_jobject_in_native(MacroAssembler* masm, Register dst, Register jni_env, Register obj, Register tmp, Label& slowpath); - virtual NMethodPatchingType nmethod_patching_type() { return NMethodPatchingType::conc_data_patch; } + virtual void check_oop(MacroAssembler *masm, Register obj, const char* msg); -#ifdef COMPILER1 - void generate_c1_load_barrier_test(LIR_Assembler* ce, - LIR_Opr ref) const; + virtual NMethodPatchingType nmethod_patching_type() { return NMethodPatchingType::conc_instruction_and_data_patch; } +#ifdef COMPILER1 void generate_c1_load_barrier_stub(LIR_Assembler* ce, ZLoadBarrierStubC1* stub) const; void generate_c1_load_barrier_runtime_stub(StubAssembler* sasm, DecoratorSet decorators) const; + + void generate_c1_color(LIR_Assembler* ce, LIR_Opr ref) const; + void generate_c1_uncolor(LIR_Assembler* ce, LIR_Opr ref) const; + + void generate_c1_load_barrier(LIR_Assembler* ce, + LIR_Opr ref, + ZLoadBarrierStubC1* stub, + bool on_non_strong) const; + + void generate_c1_store_barrier(LIR_Assembler* ce, + LIR_Address* addr, + LIR_Opr new_zaddress, + LIR_Opr new_zpointer, + ZStoreBarrierStubC1* stub) const; + + void generate_c1_store_barrier_stub(LIR_Assembler* ce, + ZStoreBarrierStubC1* stub) const; + + void generate_c1_store_barrier_runtime_stub(StubAssembler* sasm, + bool self_healing) const; #endif // COMPILER1 #ifdef COMPILER2 OptoReg::Name refine_register(const Node* node, OptoReg::Name opto_reg) const; void generate_c2_load_barrier_stub(MacroAssembler* masm, ZLoadBarrierStubC2* stub) const; + + void generate_c2_store_barrier_stub(MacroAssembler* masm, ZStoreBarrierStubC2* stub) const; #endif // COMPILER2 + + void store_barrier_fast(MacroAssembler* masm, + Register ref_base, + RegisterOrConstant ind_or_offset, + Register rnew_persistent, + Register rnew_transient, + bool in_nmethod, + bool is_atomic, + Label& medium_path, + Label& medium_path_continuation) const; + + void store_barrier_medium(MacroAssembler* masm, + Register ref_base, + RegisterOrConstant ind_or_offs, + Register tmp, + bool is_atomic, + Label& medium_path_continuation, + Label& slow_path) const; + + void load_copy_masks(MacroAssembler* masm, + Register load_bad_mask, + Register store_bad_mask, + Register store_good_mask, + bool dest_uninitialized) const; + void copy_load_at_fast(MacroAssembler* masm, + Register zpointer, + Register addr, + Register load_bad_mask, + Label& slow_path, + Label& continuation) const; + void copy_load_at_slow(MacroAssembler* masm, + Register zpointer, + Register addr, + Register tmp, + Label& slow_path, + Label& continuation) const; + void copy_store_at_fast(MacroAssembler* masm, + Register zpointer, + Register addr, + Register store_bad_mask, + Register store_good_mask, + Label& medium_path, + Label& continuation, + bool dest_uninitialized) const; + void copy_store_at_slow(MacroAssembler* masm, + Register addr, + Register tmp, + Label& medium_path, + Label& continuation, + bool dest_uninitialized) const; + + void generate_disjoint_oop_copy(MacroAssembler* masm, bool dest_uninitialized); + void generate_conjoint_oop_copy(MacroAssembler* masm, bool dest_uninitialized); + + void patch_barrier_relocation(address addr, int format); + + void patch_barriers() {} }; -#endif // CPU_AARCH64_GC_Z_ZBARRIERSETASSEMBLER_AARCH64_HPP +#endif // CPU_PPC_GC_Z_ZBARRIERSETASSEMBLER_PPC_HPP diff --git a/src/hotspot/cpu/ppc/gc/z/zGlobals_ppc.hpp b/src/hotspot/cpu/ppc/gc/z/zGlobals_ppc.hpp index 85d112b437aa3..81c72747ac8cc 100644 --- a/src/hotspot/cpu/ppc/gc/z/zGlobals_ppc.hpp +++ b/src/hotspot/cpu/ppc/gc/z/zGlobals_ppc.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2021 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -26,10 +26,7 @@ #define CPU_PPC_GC_Z_ZGLOBALS_PPC_HPP #include "globalDefinitions_ppc.hpp" -const size_t ZPlatformHeapViews = 3; -const size_t ZPlatformCacheLineSize = DEFAULT_CACHE_LINE_SIZE; -size_t ZPlatformAddressOffsetBits(); -size_t ZPlatformAddressMetadataShift(); +const size_t ZPlatformCacheLineSize = DEFAULT_CACHE_LINE_SIZE; #endif // CPU_PPC_GC_Z_ZGLOBALS_PPC_HPP diff --git a/src/hotspot/cpu/ppc/gc/z/z_ppc.ad b/src/hotspot/cpu/ppc/gc/z/z_ppc.ad index a8ce64ed1d9c1..777e5a785a79d 100644 --- a/src/hotspot/cpu/ppc/gc/z/z_ppc.ad +++ b/src/hotspot/cpu/ppc/gc/z/z_ppc.ad @@ -1,5 +1,5 @@ // -// Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved. +// Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved. // Copyright (c) 2021 SAP SE. All rights reserved. // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. // @@ -32,51 +32,73 @@ source_hpp %{ source %{ -static void z_load_barrier(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref, - Register tmp, uint8_t barrier_data) { - if (barrier_data == ZLoadBarrierElided) { - return; - } +#include "gc/z/zBarrierSetAssembler.hpp" - ZLoadBarrierStubC2* const stub = ZLoadBarrierStubC2::create(node, ref_addr, ref, tmp, barrier_data); - __ ld(tmp, in_bytes(ZThreadLocalData::address_bad_mask_offset()), R16_thread); - __ and_(tmp, tmp, ref); - __ bne_far(CCR0, *stub->entry(), MacroAssembler::bc_far_optimize_on_relocate); - __ bind(*stub->continuation()); +static void z_color(MacroAssembler& _masm, Register dst, Register src) { + assert_different_registers(dst, src); + __ relocate(barrier_Relocation::spec(), ZBarrierRelocationFormatStoreGoodBits); + __ li(dst, barrier_Relocation::unpatched); // Load color bits. + if (src == noreg) { // noreg encodes null. + if (ZPointerLoadShift >= 16) { + __ rldicl(dst, dst, 0, 64 - ZPointerLoadShift); // Clear sign extension from li. + } + } else { + __ rldimi(dst, src, ZPointerLoadShift, 0); // Insert shifted pointer. + } } -static void z_load_barrier_slow_path(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref, - Register tmp) { - ZLoadBarrierStubC2* const stub = ZLoadBarrierStubC2::create(node, ref_addr, ref, tmp, ZLoadBarrierStrong); - __ b(*stub->entry()); - __ bind(*stub->continuation()); +static void z_uncolor(MacroAssembler& _masm, Register ref) { + __ srdi(ref, ref, ZPointerLoadShift); } -static void z_compare_and_swap(MacroAssembler& _masm, const MachNode* node, - Register res, Register mem, Register oldval, Register newval, - Register tmp_xchg, Register tmp_mask, - bool weak, bool acquire) { - // z-specific load barrier requires strong CAS operations. - // Weak CAS operations are thus only emitted if the barrier is elided. - __ cmpxchgd(CCR0, tmp_xchg, oldval, newval, mem, - MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(), res, NULL, true, - weak && node->barrier_data() == ZLoadBarrierElided); +static void check_color(MacroAssembler& _masm, Register ref, bool on_non_strong) { + int relocFormat = on_non_strong ? ZBarrierRelocationFormatMarkBadMask + : ZBarrierRelocationFormatLoadBadMask; + __ relocate(barrier_Relocation::spec(), relocFormat); + __ andi_(R0, ref, barrier_Relocation::unpatched); +} - if (node->barrier_data() != ZLoadBarrierElided) { - Label skip_barrier; +static void z_load_barrier(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref) { + Assembler::InlineSkippedInstructionsCounter skipped_counter(&_masm); + if (node->barrier_data() == ZBarrierElided) { + z_uncolor(_masm, ref); + } else { + const bool on_non_strong = + ((node->barrier_data() & ZBarrierWeak) != 0) || + ((node->barrier_data() & ZBarrierPhantom) != 0); - __ ld(tmp_mask, in_bytes(ZThreadLocalData::address_bad_mask_offset()), R16_thread); - __ and_(tmp_mask, tmp_mask, tmp_xchg); - __ beq(CCR0, skip_barrier); + check_color(_masm, ref, on_non_strong); - // CAS must have failed because pointer in memory is bad. - z_load_barrier_slow_path(_masm, node, Address(mem), tmp_xchg, res /* used as tmp */); + ZLoadBarrierStubC2* const stub = ZLoadBarrierStubC2::create(node, ref_addr, ref); + __ bne_far(CCR0, *stub->entry(), MacroAssembler::bc_far_optimize_on_relocate); - __ cmpxchgd(CCR0, tmp_xchg, oldval, newval, mem, - MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(), res, NULL, true, weak); + z_uncolor(_masm, ref); + __ bind(*stub->continuation()); + } +} - __ bind(skip_barrier); +static void z_store_barrier(MacroAssembler& _masm, const MachNode* node, Register ref_base, intptr_t disp, Register rnew_zaddress, Register rnew_zpointer, bool is_atomic) { + Assembler::InlineSkippedInstructionsCounter skipped_counter(&_masm); + if (node->barrier_data() == ZBarrierElided) { + z_color(_masm, rnew_zpointer, rnew_zaddress); + } else { + bool is_native = (node->barrier_data() & ZBarrierNative) != 0; + ZStoreBarrierStubC2* const stub = ZStoreBarrierStubC2::create(node, Address(ref_base, disp), rnew_zaddress, rnew_zpointer, is_native, is_atomic); + ZBarrierSetAssembler* bs_asm = ZBarrierSet::assembler(); + bs_asm->store_barrier_fast(&_masm, ref_base, disp, rnew_zaddress, rnew_zpointer, true /* in_nmethod */, is_atomic, *stub->entry(), *stub->continuation()); } +} + +static void z_compare_and_swap(MacroAssembler& _masm, const MachNode* node, + Register res, Register mem, Register oldval, Register newval, + Register tmp1, Register tmp2, bool acquire) { + + Register rold_zpointer = tmp1, rnew_zpointer = tmp2; + z_store_barrier(_masm, node, mem, 0, newval, rnew_zpointer, true /* is_atomic */); + z_color(_masm, rold_zpointer, oldval); + __ cmpxchgd(CCR0, R0, rold_zpointer, rnew_zpointer, mem, + MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(), res, nullptr, true, + false /* we could support weak, but benefit is questionable */); if (acquire) { if (support_IRIW_for_not_multiple_copy_atomic_cpu) { @@ -90,27 +112,16 @@ static void z_compare_and_swap(MacroAssembler& _masm, const MachNode* node, } static void z_compare_and_exchange(MacroAssembler& _masm, const MachNode* node, - Register res, Register mem, Register oldval, Register newval, Register tmp, - bool weak, bool acquire) { - // z-specific load barrier requires strong CAS operations. - // Weak CAS operations are thus only emitted if the barrier is elided. - __ cmpxchgd(CCR0, res, oldval, newval, mem, - MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(), noreg, NULL, true, - weak && node->barrier_data() == ZLoadBarrierElided); - - if (node->barrier_data() != ZLoadBarrierElided) { - Label skip_barrier; - __ ld(tmp, in_bytes(ZThreadLocalData::address_bad_mask_offset()), R16_thread); - __ and_(tmp, tmp, res); - __ beq(CCR0, skip_barrier); - - z_load_barrier_slow_path(_masm, node, Address(mem), res, tmp); - - __ cmpxchgd(CCR0, res, oldval, newval, mem, - MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(), noreg, NULL, true, weak); - - __ bind(skip_barrier); - } + Register res, Register mem, Register oldval, Register newval, + Register tmp, bool acquire) { + + Register rold_zpointer = R0, rnew_zpointer = tmp; + z_store_barrier(_masm, node, mem, 0, newval, rnew_zpointer, true /* is_atomic */); + z_color(_masm, rold_zpointer, oldval); + __ cmpxchgd(CCR0, res, rold_zpointer, rnew_zpointer, mem, + MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(), noreg, nullptr, true, + false /* we could support weak, but benefit is questionable */); + z_uncolor(_masm, res); if (acquire) { if (support_IRIW_for_not_multiple_copy_atomic_cpu) { @@ -125,114 +136,110 @@ static void z_compare_and_exchange(MacroAssembler& _masm, const MachNode* node, %} -instruct zLoadP(iRegPdst dst, memoryAlg4 mem, iRegPdst tmp, flagsRegCR0 cr0) +instruct zLoadP(iRegPdst dst, memoryAlg4 mem, flagsRegCR0 cr0) %{ match(Set dst (LoadP mem)); - effect(TEMP_DEF dst, TEMP tmp, KILL cr0); + effect(TEMP_DEF dst, KILL cr0); ins_cost(MEMORY_REF_COST); - predicate((UseZGC && n->as_Load()->barrier_data() != 0) + predicate((UseZGC && ZGenerational && n->as_Load()->barrier_data() != 0) && (n->as_Load()->is_unordered() || followed_by_acquire(n))); format %{ "LD $dst, $mem" %} ins_encode %{ assert($mem$$index == 0, "sanity"); __ ld($dst$$Register, $mem$$disp, $mem$$base$$Register); - z_load_barrier(_masm, this, Address($mem$$base$$Register, $mem$$disp), $dst$$Register, $tmp$$Register, barrier_data()); + z_load_barrier(_masm, this, Address($mem$$base$$Register, $mem$$disp), $dst$$Register); %} ins_pipe(pipe_class_default); %} // Load Pointer Volatile -instruct zLoadP_acq(iRegPdst dst, memoryAlg4 mem, iRegPdst tmp, flagsRegCR0 cr0) +instruct zLoadP_acq(iRegPdst dst, memoryAlg4 mem, flagsRegCR0 cr0) %{ match(Set dst (LoadP mem)); - effect(TEMP_DEF dst, TEMP tmp, KILL cr0); + effect(TEMP_DEF dst, KILL cr0); ins_cost(3 * MEMORY_REF_COST); // Predicate on instruction order is implicitly present due to the predicate of the cheaper zLoadP operation - predicate(UseZGC && n->as_Load()->barrier_data() != 0); + predicate(UseZGC && ZGenerational && n->as_Load()->barrier_data() != 0); format %{ "LD acq $dst, $mem" %} ins_encode %{ __ ld($dst$$Register, $mem$$disp, $mem$$base$$Register); - z_load_barrier(_masm, this, Address($mem$$base$$Register, $mem$$disp), $dst$$Register, $tmp$$Register, barrier_data()); + z_load_barrier(_masm, this, Address($mem$$base$$Register, $mem$$disp), $dst$$Register); // Uses the isync instruction as an acquire barrier. // This exploits the compare and the branch in the z load barrier (load, compare and branch, isync). + if (barrier_data() == ZBarrierElided) __ twi_0($dst$$Register); __ isync(); %} ins_pipe(pipe_class_default); %} -instruct zCompareAndSwapP(iRegIdst res, iRegPdst mem, iRegPsrc oldval, iRegPsrc newval, - iRegPdst tmp_xchg, iRegPdst tmp_mask, flagsRegCR0 cr0) %{ - match(Set res (CompareAndSwapP mem (Binary oldval newval))); - effect(TEMP_DEF res, TEMP tmp_xchg, TEMP tmp_mask, KILL cr0); - - predicate((UseZGC && n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong) - && (((CompareAndSwapNode*)n)->order() != MemNode::acquire && ((CompareAndSwapNode*) n)->order() != MemNode::seqcst)); - - format %{ "CMPXCHG $res, $mem, $oldval, $newval; as bool; ptr" %} +// Store Pointer +instruct zStoreP(memoryAlg4 mem, iRegPsrc src, iRegPdst tmp, flagsRegCR0 cr0) +%{ + predicate(UseZGC && ZGenerational && n->as_Store()->barrier_data() != 0); + match(Set mem (StoreP mem src)); + effect(TEMP tmp, KILL cr0); + ins_cost(2 * MEMORY_REF_COST); + format %{ "std $mem, $src\t# ptr" %} ins_encode %{ - z_compare_and_swap(_masm, this, - $res$$Register, $mem$$Register, $oldval$$Register, $newval$$Register, - $tmp_xchg$$Register, $tmp_mask$$Register, - false /* weak */, false /* acquire */); + z_store_barrier(_masm, this, $mem$$base$$Register, $mem$$disp, $src$$Register, $tmp$$Register, false /* is_atomic */); + __ std($tmp$$Register, $mem$$disp, $mem$$base$$Register); %} ins_pipe(pipe_class_default); %} -instruct zCompareAndSwapP_acq(iRegIdst res, iRegPdst mem, iRegPsrc oldval, iRegPsrc newval, - iRegPdst tmp_xchg, iRegPdst tmp_mask, flagsRegCR0 cr0) %{ - match(Set res (CompareAndSwapP mem (Binary oldval newval))); - effect(TEMP_DEF res, TEMP tmp_xchg, TEMP tmp_mask, KILL cr0); - - predicate((UseZGC && n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong) - && (((CompareAndSwapNode*)n)->order() == MemNode::acquire || ((CompareAndSwapNode*) n)->order() == MemNode::seqcst)); - - format %{ "CMPXCHG acq $res, $mem, $oldval, $newval; as bool; ptr" %} +instruct zStorePNull(memoryAlg4 mem, immP_0 zero, iRegPdst tmp, flagsRegCR0 cr0) +%{ + predicate(UseZGC && ZGenerational && n->as_Store()->barrier_data() != 0); + match(Set mem (StoreP mem zero)); + effect(TEMP tmp, KILL cr0); + ins_cost(MEMORY_REF_COST); + format %{ "std $mem, null\t# ptr" %} ins_encode %{ - z_compare_and_swap(_masm, this, - $res$$Register, $mem$$Register, $oldval$$Register, $newval$$Register, - $tmp_xchg$$Register, $tmp_mask$$Register, - false /* weak */, true /* acquire */); + z_store_barrier(_masm, this, $mem$$base$$Register, $mem$$disp, noreg, $tmp$$Register, false /* is_atomic */); + __ std($tmp$$Register, $mem$$disp, $mem$$base$$Register); %} ins_pipe(pipe_class_default); %} -instruct zCompareAndSwapPWeak(iRegIdst res, iRegPdst mem, iRegPsrc oldval, iRegPsrc newval, - iRegPdst tmp_xchg, iRegPdst tmp_mask, flagsRegCR0 cr0) %{ +instruct zCompareAndSwapP(iRegIdst res, iRegPdst mem, iRegPsrc oldval, iRegPsrc newval, + iRegPdst tmp1, iRegPdst tmp2, flagsRegCR0 cr0) %{ + match(Set res (CompareAndSwapP mem (Binary oldval newval))); match(Set res (WeakCompareAndSwapP mem (Binary oldval newval))); - effect(TEMP_DEF res, TEMP tmp_xchg, TEMP tmp_mask, KILL cr0); + effect(TEMP_DEF res, TEMP tmp1, TEMP tmp2, KILL cr0); - predicate((UseZGC && n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong) - && ((CompareAndSwapNode*)n)->order() != MemNode::acquire && ((CompareAndSwapNode*) n)->order() != MemNode::seqcst); + predicate((UseZGC && ZGenerational && n->as_LoadStore()->barrier_data() != 0) + && (((CompareAndSwapNode*)n)->order() != MemNode::acquire && ((CompareAndSwapNode*) n)->order() != MemNode::seqcst)); - format %{ "weak CMPXCHG $res, $mem, $oldval, $newval; as bool; ptr" %} + format %{ "CMPXCHG $res, $mem, $oldval, $newval; as bool; ptr" %} ins_encode %{ z_compare_and_swap(_masm, this, $res$$Register, $mem$$Register, $oldval$$Register, $newval$$Register, - $tmp_xchg$$Register, $tmp_mask$$Register, - true /* weak */, false /* acquire */); + $tmp1$$Register, $tmp2$$Register, + false /* acquire */); %} ins_pipe(pipe_class_default); %} -instruct zCompareAndSwapPWeak_acq(iRegIdst res, iRegPdst mem, iRegPsrc oldval, iRegPsrc newval, - iRegPdst tmp_xchg, iRegPdst tmp_mask, flagsRegCR0 cr0) %{ +instruct zCompareAndSwapP_acq(iRegIdst res, iRegPdst mem, iRegPsrc oldval, iRegPsrc newval, + iRegPdst tmp1, iRegPdst tmp2, flagsRegCR0 cr0) %{ + match(Set res (CompareAndSwapP mem (Binary oldval newval))); match(Set res (WeakCompareAndSwapP mem (Binary oldval newval))); - effect(TEMP_DEF res, TEMP tmp_xchg, TEMP tmp_mask, KILL cr0); + effect(TEMP_DEF res, TEMP tmp1, TEMP tmp2, KILL cr0); - predicate((UseZGC && n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong) + predicate((UseZGC && ZGenerational && n->as_LoadStore()->barrier_data() != 0) && (((CompareAndSwapNode*)n)->order() == MemNode::acquire || ((CompareAndSwapNode*) n)->order() == MemNode::seqcst)); - format %{ "weak CMPXCHG acq $res, $mem, $oldval, $newval; as bool; ptr" %} + format %{ "CMPXCHG acq $res, $mem, $oldval, $newval; as bool; ptr" %} ins_encode %{ z_compare_and_swap(_masm, this, $res$$Register, $mem$$Register, $oldval$$Register, $newval$$Register, - $tmp_xchg$$Register, $tmp_mask$$Register, - true /* weak */, true /* acquire */); + $tmp1$$Register, $tmp2$$Register, + true /* acquire */); %} ins_pipe(pipe_class_default); %} @@ -242,7 +249,7 @@ instruct zCompareAndExchangeP(iRegPdst res, iRegPdst mem, iRegPsrc oldval, iRegP match(Set res (CompareAndExchangeP mem (Binary oldval newval))); effect(TEMP_DEF res, TEMP tmp, KILL cr0); - predicate((UseZGC && n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong) + predicate((UseZGC && ZGenerational && n->as_LoadStore()->barrier_data() != 0) && ( ((CompareAndSwapNode*)n)->order() != MemNode::acquire && ((CompareAndSwapNode*)n)->order() != MemNode::seqcst @@ -252,7 +259,7 @@ instruct zCompareAndExchangeP(iRegPdst res, iRegPdst mem, iRegPsrc oldval, iRegP ins_encode %{ z_compare_and_exchange(_masm, this, $res$$Register, $mem$$Register, $oldval$$Register, $newval$$Register, $tmp$$Register, - false /* weak */, false /* acquire */); + false /* acquire */); %} ins_pipe(pipe_class_default); %} @@ -262,7 +269,7 @@ instruct zCompareAndExchangeP_acq(iRegPdst res, iRegPdst mem, iRegPsrc oldval, i match(Set res (CompareAndExchangeP mem (Binary oldval newval))); effect(TEMP_DEF res, TEMP tmp, KILL cr0); - predicate((UseZGC && n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong) + predicate((UseZGC && ZGenerational && n->as_LoadStore()->barrier_data() != 0) && ( ((CompareAndSwapNode*)n)->order() == MemNode::acquire || ((CompareAndSwapNode*)n)->order() == MemNode::seqcst @@ -272,7 +279,7 @@ instruct zCompareAndExchangeP_acq(iRegPdst res, iRegPdst mem, iRegPsrc oldval, i ins_encode %{ z_compare_and_exchange(_masm, this, $res$$Register, $mem$$Register, $oldval$$Register, $newval$$Register, $tmp$$Register, - false /* weak */, true /* acquire */); + true /* acquire */); %} ins_pipe(pipe_class_default); %} @@ -281,12 +288,14 @@ instruct zGetAndSetP(iRegPdst res, iRegPdst mem, iRegPsrc newval, iRegPdst tmp, match(Set res (GetAndSetP mem newval)); effect(TEMP_DEF res, TEMP tmp, KILL cr0); - predicate(UseZGC && n->as_LoadStore()->barrier_data() != 0); + predicate(UseZGC && ZGenerational && n->as_LoadStore()->barrier_data() != 0); format %{ "GetAndSetP $res, $mem, $newval" %} ins_encode %{ - __ getandsetd($res$$Register, $newval$$Register, $mem$$Register, MacroAssembler::cmpxchgx_hint_atomic_update()); - z_load_barrier(_masm, this, Address(noreg, (intptr_t) 0), $res$$Register, $tmp$$Register, barrier_data()); + Register rnew_zpointer = $tmp$$Register, result = $res$$Register; + z_store_barrier(_masm, this, $mem$$Register, 0, $newval$$Register, rnew_zpointer, true /* is_atomic */); + __ getandsetd(result, rnew_zpointer, $mem$$Register, MacroAssembler::cmpxchgx_hint_atomic_update()); + z_uncolor(_masm, result); if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ isync(); diff --git a/src/hotspot/cpu/ppc/globalDefinitions_ppc.hpp b/src/hotspot/cpu/ppc/globalDefinitions_ppc.hpp index c14d05d284a65..8ac5c39b8312e 100644 --- a/src/hotspot/cpu/ppc/globalDefinitions_ppc.hpp +++ b/src/hotspot/cpu/ppc/globalDefinitions_ppc.hpp @@ -59,6 +59,4 @@ const bool CCallingConventionRequiresIntsAsLongs = true; // Define the condition to use this -XX flag. #define USE_POLL_BIT_ONLY UseSIGTRAP -#define COMPRESSED_CLASS_POINTERS_DEPENDS_ON_COMPRESSED_OOPS false - #endif // CPU_PPC_GLOBALDEFINITIONS_PPC_HPP diff --git a/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp b/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp index 8dc0b627d930a..cfe69cffa82f1 100644 --- a/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp +++ b/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp @@ -921,7 +921,7 @@ void InterpreterMacroAssembler::remove_activation(TosState state, // object - Address of the object to be locked. // void InterpreterMacroAssembler::lock_object(Register monitor, Register object) { - if (UseHeavyMonitors) { + if (LockingMode == LM_MONITOR) { call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), monitor); } else { // template code: @@ -1037,7 +1037,7 @@ void InterpreterMacroAssembler::lock_object(Register monitor, Register object) { // // Throw IllegalMonitorException if object is not locked by current thread. void InterpreterMacroAssembler::unlock_object(Register monitor) { - if (UseHeavyMonitors) { + if (LockingMode == LM_MONITOR) { call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), monitor); } else { diff --git a/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp b/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp index 3ca2ed3ff69b2..db268481427f2 100644 --- a/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp +++ b/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp @@ -2658,7 +2658,7 @@ void MacroAssembler::compiler_fast_lock_object(ConditionRegister flag, Register andi_(temp, displaced_header, markWord::monitor_value); bne(CCR0, object_has_monitor); - if (!UseHeavyMonitors) { + if (LockingMode != LM_MONITOR) { // Set displaced_header to be (markWord of object | UNLOCK_VALUE). ori(displaced_header, displaced_header, markWord::unlocked_value); @@ -2776,7 +2776,7 @@ void MacroAssembler::compiler_fast_unlock_object(ConditionRegister flag, Registe } #endif - if (!UseHeavyMonitors) { + if (LockingMode != LM_MONITOR) { // Find the lock address and load the displaced header from the stack. ld(displaced_header, BasicLock::displaced_header_offset_in_bytes(), box); @@ -2792,7 +2792,7 @@ void MacroAssembler::compiler_fast_unlock_object(ConditionRegister flag, Registe andi_(R0, current_header, markWord::monitor_value); bne(CCR0, object_has_monitor); - if (!UseHeavyMonitors) { + if (LockingMode != LM_MONITOR) { // Check if it is still a light weight lock, this is is true if we see // the stack address of the basicLock in the markWord of the object. // Cmpxchg sets flag to cmpd(current_header, box). diff --git a/src/hotspot/cpu/ppc/macroAssembler_ppc.inline.hpp b/src/hotspot/cpu/ppc/macroAssembler_ppc.inline.hpp index 69972bf9eac68..f81d49684c992 100644 --- a/src/hotspot/cpu/ppc/macroAssembler_ppc.inline.hpp +++ b/src/hotspot/cpu/ppc/macroAssembler_ppc.inline.hpp @@ -354,7 +354,7 @@ inline void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorat Register tmp1, Register tmp2, Register tmp3, MacroAssembler::PreservationLevel preservation_level) { assert((decorators & ~(AS_RAW | IN_HEAP | IN_NATIVE | IS_ARRAY | IS_NOT_NULL | - ON_UNKNOWN_OOP_REF)) == 0, "unsupported decorator"); + ON_UNKNOWN_OOP_REF | IS_DEST_UNINITIALIZED)) == 0, "unsupported decorator"); BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); bool as_raw = (decorators & AS_RAW) != 0; decorators = AccessInternal::decorator_fixup(decorators, type); diff --git a/src/hotspot/cpu/ppc/relocInfo_ppc.hpp b/src/hotspot/cpu/ppc/relocInfo_ppc.hpp index f3c14c800c30e..e7a23d2de9144 100644 --- a/src/hotspot/cpu/ppc/relocInfo_ppc.hpp +++ b/src/hotspot/cpu/ppc/relocInfo_ppc.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2012, 2018 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -39,7 +39,8 @@ format_width = 0 #else // Except narrow oops in 64-bits VM. - format_width = 1 + // Must be at least 2 for ZGC GC barrier patching. + format_width = 2 #endif }; diff --git a/src/hotspot/cpu/ppc/stubGenerator_ppc.cpp b/src/hotspot/cpu/ppc/stubGenerator_ppc.cpp index a20baaee5c865..ad81348e7faeb 100644 --- a/src/hotspot/cpu/ppc/stubGenerator_ppc.cpp +++ b/src/hotspot/cpu/ppc/stubGenerator_ppc.cpp @@ -47,6 +47,10 @@ #include "runtime/vm_version.hpp" #include "utilities/align.hpp" #include "utilities/powerOfTwo.hpp" +#if INCLUDE_ZGC +#include "gc/x/xBarrierSetAssembler.hpp" +#include "gc/z/zBarrierSetAssembler.hpp" +#endif // Declaration and definition of StubGenerator (no .hpp file). // For a more detailed description of the stub routine structure @@ -61,9 +65,9 @@ #endif #if defined(ABI_ELFv2) -#define STUB_ENTRY(name) StubRoutines::name() +#define STUB_ENTRY(name) StubRoutines::name #else -#define STUB_ENTRY(name) ((FunctionDescriptor*)StubRoutines::name())->entry() +#define STUB_ENTRY(name) ((FunctionDescriptor*)StubRoutines::name)->entry() #endif class StubGenerator: public StubCodeGenerator { @@ -1182,8 +1186,8 @@ class StubGenerator: public StubCodeGenerator { Register tmp3 = R8_ARG6; address nooverlap_target = aligned ? - STUB_ENTRY(arrayof_jbyte_disjoint_arraycopy) : - STUB_ENTRY(jbyte_disjoint_arraycopy); + STUB_ENTRY(arrayof_jbyte_disjoint_arraycopy()) : + STUB_ENTRY(jbyte_disjoint_arraycopy()); array_overlap_test(nooverlap_target, 0); // Do reverse copy. We assume the case of actual overlap is rare enough @@ -1454,8 +1458,8 @@ class StubGenerator: public StubCodeGenerator { Register tmp3 = R8_ARG6; address nooverlap_target = aligned ? - STUB_ENTRY(arrayof_jshort_disjoint_arraycopy) : - STUB_ENTRY(jshort_disjoint_arraycopy); + STUB_ENTRY(arrayof_jshort_disjoint_arraycopy()) : + STUB_ENTRY(jshort_disjoint_arraycopy()); array_overlap_test(nooverlap_target, 1); @@ -1767,8 +1771,8 @@ class StubGenerator: public StubCodeGenerator { address start = __ function_entry(); assert_positive_int(R5_ARG3); address nooverlap_target = aligned ? - STUB_ENTRY(arrayof_jint_disjoint_arraycopy) : - STUB_ENTRY(jint_disjoint_arraycopy); + STUB_ENTRY(arrayof_jint_disjoint_arraycopy()) : + STUB_ENTRY(jint_disjoint_arraycopy()); array_overlap_test(nooverlap_target, 2); { @@ -2024,8 +2028,8 @@ class StubGenerator: public StubCodeGenerator { address start = __ function_entry(); assert_positive_int(R5_ARG3); address nooverlap_target = aligned ? - STUB_ENTRY(arrayof_jlong_disjoint_arraycopy) : - STUB_ENTRY(jlong_disjoint_arraycopy); + STUB_ENTRY(arrayof_jlong_disjoint_arraycopy()) : + STUB_ENTRY(jlong_disjoint_arraycopy()); array_overlap_test(nooverlap_target, 3); { @@ -2054,8 +2058,10 @@ class StubGenerator: public StubCodeGenerator { address start = __ function_entry(); assert_positive_int(R5_ARG3); address nooverlap_target = aligned ? - STUB_ENTRY(arrayof_oop_disjoint_arraycopy) : - STUB_ENTRY(oop_disjoint_arraycopy); + STUB_ENTRY(arrayof_oop_disjoint_arraycopy(dest_uninitialized)) : + STUB_ENTRY(oop_disjoint_arraycopy(dest_uninitialized)); + + array_overlap_test(nooverlap_target, UseCompressedOops ? 2 : 3); DecoratorSet decorators = IN_HEAP | IS_ARRAY; if (dest_uninitialized) { @@ -2069,10 +2075,14 @@ class StubGenerator: public StubCodeGenerator { bs->arraycopy_prologue(_masm, decorators, T_OBJECT, R3_ARG1, R4_ARG2, R5_ARG3, noreg, noreg); if (UseCompressedOops) { - array_overlap_test(nooverlap_target, 2); generate_conjoint_int_copy_core(aligned); } else { - array_overlap_test(nooverlap_target, 3); +#if INCLUDE_ZGC + if (UseZGC && ZGenerational) { + ZBarrierSetAssembler *zbs = (ZBarrierSetAssembler*)bs; + zbs->generate_conjoint_oop_copy(_masm, dest_uninitialized); + } else +#endif generate_conjoint_long_copy_core(aligned); } @@ -2110,6 +2120,12 @@ class StubGenerator: public StubCodeGenerator { if (UseCompressedOops) { generate_disjoint_int_copy_core(aligned); } else { +#if INCLUDE_ZGC + if (UseZGC && ZGenerational) { + ZBarrierSetAssembler *zbs = (ZBarrierSetAssembler*)bs; + zbs->generate_disjoint_oop_copy(_masm, dest_uninitialized); + } else +#endif generate_disjoint_long_copy_core(aligned); } @@ -2222,6 +2238,13 @@ class StubGenerator: public StubCodeGenerator { __ stw(R10_oop, R8_offset, R4_to); } else { __ bind(store_null); +#if INCLUDE_ZGC + if (UseZGC && ZGenerational) { + __ store_heap_oop(R10_oop, R8_offset, R4_to, R11_scratch1, R12_tmp, noreg, + MacroAssembler::PRESERVATION_FRAME_LR_GP_REGS, + dest_uninitialized ? IS_DEST_UNINITIALIZED : 0); + } else +#endif __ std(R10_oop, R8_offset, R4_to); } @@ -2231,6 +2254,14 @@ class StubGenerator: public StubCodeGenerator { // ======== loop entry is here ======== __ bind(load_element); +#if INCLUDE_ZGC + if (UseZGC && ZGenerational) { + __ load_heap_oop(R10_oop, R8_offset, R3_from, + R11_scratch1, R12_tmp, + MacroAssembler::PRESERVATION_FRAME_LR_GP_REGS, + 0, &store_null); + } else +#endif __ load_heap_oop(R10_oop, R8_offset, R3_from, R11_scratch1, R12_tmp, MacroAssembler::PRESERVATION_FRAME_LR_GP_REGS, @@ -3136,18 +3167,18 @@ class StubGenerator: public StubCodeGenerator { StubRoutines::_checkcast_arraycopy_uninit = generate_checkcast_copy("checkcast_arraycopy_uninit", true); StubRoutines::_unsafe_arraycopy = generate_unsafe_copy("unsafe_arraycopy", - STUB_ENTRY(jbyte_arraycopy), - STUB_ENTRY(jshort_arraycopy), - STUB_ENTRY(jint_arraycopy), - STUB_ENTRY(jlong_arraycopy)); + STUB_ENTRY(jbyte_arraycopy()), + STUB_ENTRY(jshort_arraycopy()), + STUB_ENTRY(jint_arraycopy()), + STUB_ENTRY(jlong_arraycopy())); StubRoutines::_generic_arraycopy = generate_generic_copy("generic_arraycopy", - STUB_ENTRY(jbyte_arraycopy), - STUB_ENTRY(jshort_arraycopy), - STUB_ENTRY(jint_arraycopy), - STUB_ENTRY(oop_arraycopy), - STUB_ENTRY(oop_disjoint_arraycopy), - STUB_ENTRY(jlong_arraycopy), - STUB_ENTRY(checkcast_arraycopy)); + STUB_ENTRY(jbyte_arraycopy()), + STUB_ENTRY(jshort_arraycopy()), + STUB_ENTRY(jint_arraycopy()), + STUB_ENTRY(oop_arraycopy()), + STUB_ENTRY(oop_disjoint_arraycopy()), + STUB_ENTRY(jlong_arraycopy()), + STUB_ENTRY(checkcast_arraycopy())); // fill routines #ifdef COMPILER2 diff --git a/src/hotspot/cpu/riscv/c1_LIRAssembler_riscv.cpp b/src/hotspot/cpu/riscv/c1_LIRAssembler_riscv.cpp index 161a8a7376f15..b514f8e115ea6 100644 --- a/src/hotspot/cpu/riscv/c1_LIRAssembler_riscv.cpp +++ b/src/hotspot/cpu/riscv/c1_LIRAssembler_riscv.cpp @@ -858,7 +858,7 @@ void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch __ decode_heap_oop(dest->as_register()); } - if (!UseZGC) { + if (!(UseZGC && !ZGenerational)) { // Load barrier has not yet been applied, so ZGC can't verify the oop here __ verify_oop(dest->as_register()); } @@ -1264,10 +1264,13 @@ void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) { if (UseCompressedOops) { Register tmp1 = op->tmp1()->as_register(); assert(op->tmp1()->is_valid(), "must be"); + Register tmp2 = op->tmp2()->as_register(); + assert(op->tmp2()->is_valid(), "must be"); + __ encode_heap_oop(tmp1, cmpval); cmpval = tmp1; - __ encode_heap_oop(t1, newval); - newval = t1; + __ encode_heap_oop(tmp2, newval); + newval = tmp2; caswu(addr, newval, cmpval); } else { casl(addr, newval, cmpval); @@ -1277,6 +1280,11 @@ void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) { } else { casl(addr, newval, cmpval); } + + if (op->result_opr()->is_valid()) { + assert(op->result_opr()->is_register(), "need a register"); + __ mv(as_reg(op->result_opr()), t0); // cas result in t0, and 0 for success + } } void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr unused, LIR_Opr dest, LIR_Op* op) { diff --git a/src/hotspot/cpu/riscv/c1_MacroAssembler_riscv.cpp b/src/hotspot/cpu/riscv/c1_MacroAssembler_riscv.cpp index 69b6f200f4b11..e65a7b72c4910 100644 --- a/src/hotspot/cpu/riscv/c1_MacroAssembler_riscv.cpp +++ b/src/hotspot/cpu/riscv/c1_MacroAssembler_riscv.cpp @@ -135,7 +135,7 @@ void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_ if (LockingMode == LM_LIGHTWEIGHT) { ld(hdr, Address(obj, oopDesc::mark_offset_in_bytes())); - andi(t0, hdr, markWord::monitor_value); + test_bit(t0, hdr, exact_log2(markWord::monitor_value)); bnez(t0, slow_case, /* is_far */ true); fast_unlock(obj, hdr, t0, t1, slow_case); } else if (LockingMode == LM_LEGACY) { diff --git a/src/hotspot/cpu/riscv/gc/shared/barrierSetNMethod_riscv.cpp b/src/hotspot/cpu/riscv/gc/shared/barrierSetNMethod_riscv.cpp index b8e61ff3a1df9..3c5e780f40991 100644 --- a/src/hotspot/cpu/riscv/gc/shared/barrierSetNMethod_riscv.cpp +++ b/src/hotspot/cpu/riscv/gc/shared/barrierSetNMethod_riscv.cpp @@ -36,6 +36,9 @@ #include "runtime/registerMap.hpp" #include "utilities/align.hpp" #include "utilities/debug.hpp" +#if INCLUDE_JVMCI +#include "jvmci/jvmciRuntime.hpp" +#endif static int slow_path_size(nmethod* nm) { // The slow path code is out of line with C2. @@ -57,40 +60,67 @@ static int entry_barrier_offset(nmethod* nm) { return 0; } -class NativeNMethodBarrier: public NativeInstruction { - address instruction_address() const { return addr_at(0); } +class NativeNMethodBarrier { + address _instruction_address; + int* _guard_addr; + nmethod* _nm; + + address instruction_address() const { return _instruction_address; } + + int *guard_addr() { + return _guard_addr; + } int local_guard_offset(nmethod* nm) { // It's the last instruction return (-entry_barrier_offset(nm)) - 4; } - int *guard_addr(nmethod* nm) { - if (nm->is_compiled_by_c2()) { - // With c2 compiled code, the guard is out-of-line in a stub - // We find it using the RelocIterator. - RelocIterator iter(nm); - while (iter.next()) { - if (iter.type() == relocInfo::entry_guard_type) { - entry_guard_Relocation* const reloc = iter.entry_guard_reloc(); - return reinterpret_cast(reloc->addr()); +public: + NativeNMethodBarrier(nmethod* nm): _nm(nm) { +#if INCLUDE_JVMCI + if (nm->is_compiled_by_jvmci()) { + address pc = nm->code_begin() + nm->jvmci_nmethod_data()->nmethod_entry_patch_offset(); + RelocIterator iter(nm, pc, pc + 4); + guarantee(iter.next(), "missing relocs"); + guarantee(iter.type() == relocInfo::section_word_type, "unexpected reloc"); + + _guard_addr = (int*) iter.section_word_reloc()->target(); + _instruction_address = pc; + } else +#endif + { + _instruction_address = nm->code_begin() + nm->frame_complete_offset() + entry_barrier_offset(nm); + if (nm->is_compiled_by_c2()) { + // With c2 compiled code, the guard is out-of-line in a stub + // We find it using the RelocIterator. + RelocIterator iter(nm); + while (iter.next()) { + if (iter.type() == relocInfo::entry_guard_type) { + entry_guard_Relocation* const reloc = iter.entry_guard_reloc(); + _guard_addr = reinterpret_cast(reloc->addr()); + return; + } + } + ShouldNotReachHere(); } + _guard_addr = reinterpret_cast(instruction_address() + local_guard_offset(nm)); } - ShouldNotReachHere(); - } - return reinterpret_cast(instruction_address() + local_guard_offset(nm)); } -public: - int get_value(nmethod* nm) { - return Atomic::load_acquire(guard_addr(nm)); + int get_value() { + return Atomic::load_acquire(guard_addr()); } - void set_value(nmethod* nm, int value) { - Atomic::release_store(guard_addr(nm), value); + void set_value(int value) { + Atomic::release_store(guard_addr(), value); } - void verify() const; + bool check_barrier(err_msg& msg) const; + void verify() const { + err_msg msg("%s", ""); + assert(check_barrier(msg), "%s", msg.buffer()); + } }; // Store the instruction bitmask, bits and name for checking the barrier. @@ -112,16 +142,17 @@ static const struct CheckInsn barrierInsn[] = { // The encodings must match the instructions emitted by // BarrierSetAssembler::nmethod_entry_barrier. The matching ignores the specific // register numbers and immediate values in the encoding. -void NativeNMethodBarrier::verify() const { +bool NativeNMethodBarrier::check_barrier(err_msg& msg) const { intptr_t addr = (intptr_t) instruction_address(); for(unsigned int i = 0; i < sizeof(barrierInsn)/sizeof(struct CheckInsn); i++ ) { uint32_t inst = *((uint32_t*) addr); if ((inst & barrierInsn[i].mask) != barrierInsn[i].bits) { - tty->print_cr("Addr: " INTPTR_FORMAT " Code: 0x%x", addr, inst); - fatal("not an %s instruction.", barrierInsn[i].name); + msg.print("Addr: " INTPTR_FORMAT " Code: 0x%x not an %s instruction", addr, inst, barrierInsn[i].name); + return false; } addr += 4; } + return true; } @@ -164,13 +195,6 @@ void BarrierSetNMethod::deoptimize(nmethod* nm, address* return_address_ptr) { new_frame->pc = SharedRuntime::get_handle_wrong_method_stub(); } -static NativeNMethodBarrier* native_nmethod_barrier(nmethod* nm) { - address barrier_address = nm->code_begin() + nm->frame_complete_offset() + entry_barrier_offset(nm); - NativeNMethodBarrier* barrier = reinterpret_cast(barrier_address); - debug_only(barrier->verify()); - return barrier; -} - void BarrierSetNMethod::set_guard_value(nmethod* nm, int value) { if (!supports_entry_barrier(nm)) { return; @@ -187,8 +211,8 @@ void BarrierSetNMethod::set_guard_value(nmethod* nm, int value) { bs_asm->increment_patching_epoch(); } - NativeNMethodBarrier* barrier = native_nmethod_barrier(nm); - barrier->set_value(nm, value); + NativeNMethodBarrier barrier(nm); + barrier.set_value(value); } int BarrierSetNMethod::guard_value(nmethod* nm) { @@ -196,6 +220,13 @@ int BarrierSetNMethod::guard_value(nmethod* nm) { return disarmed_guard_value(); } - NativeNMethodBarrier* barrier = native_nmethod_barrier(nm); - return barrier->get_value(nm); + NativeNMethodBarrier barrier(nm); + return barrier.get_value(); +} + +#if INCLUDE_JVMCI +bool BarrierSetNMethod::verify_barrier(nmethod* nm, err_msg& msg) { + NativeNMethodBarrier barrier(nm); + return barrier.check_barrier(msg); } +#endif diff --git a/src/hotspot/cpu/riscv/gc/x/xBarrierSetAssembler_riscv.cpp b/src/hotspot/cpu/riscv/gc/x/xBarrierSetAssembler_riscv.cpp new file mode 100644 index 0000000000000..c48eac944c129 --- /dev/null +++ b/src/hotspot/cpu/riscv/gc/x/xBarrierSetAssembler_riscv.cpp @@ -0,0 +1,458 @@ +/* + * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "asm/macroAssembler.inline.hpp" +#include "code/codeBlob.hpp" +#include "code/vmreg.inline.hpp" +#include "gc/x/xBarrier.inline.hpp" +#include "gc/x/xBarrierSet.hpp" +#include "gc/x/xBarrierSetAssembler.hpp" +#include "gc/x/xBarrierSetRuntime.hpp" +#include "gc/x/xThreadLocalData.hpp" +#include "memory/resourceArea.hpp" +#include "runtime/sharedRuntime.hpp" +#include "utilities/macros.hpp" +#ifdef COMPILER1 +#include "c1/c1_LIRAssembler.hpp" +#include "c1/c1_MacroAssembler.hpp" +#include "gc/x/c1/xBarrierSetC1.hpp" +#endif // COMPILER1 +#ifdef COMPILER2 +#include "gc/x/c2/xBarrierSetC2.hpp" +#endif // COMPILER2 + +#ifdef PRODUCT +#define BLOCK_COMMENT(str) /* nothing */ +#else +#define BLOCK_COMMENT(str) __ block_comment(str) +#endif + +#undef __ +#define __ masm-> + +void XBarrierSetAssembler::load_at(MacroAssembler* masm, + DecoratorSet decorators, + BasicType type, + Register dst, + Address src, + Register tmp1, + Register tmp2) { + if (!XBarrierSet::barrier_needed(decorators, type)) { + // Barrier not needed + BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp2); + return; + } + + assert_different_registers(t1, src.base()); + assert_different_registers(t0, t1, dst); + + Label done; + + // Load bad mask into temp register. + __ la(t0, src); + __ ld(t1, address_bad_mask_from_thread(xthread)); + __ ld(dst, Address(t0)); + + // Test reference against bad mask. If mask bad, then we need to fix it up. + __ andr(t1, dst, t1); + __ beqz(t1, done); + + __ enter(); + + __ push_call_clobbered_registers_except(RegSet::of(dst)); + + if (c_rarg0 != dst) { + __ mv(c_rarg0, dst); + } + + __ mv(c_rarg1, t0); + + __ call_VM_leaf(XBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators), 2); + + // Make sure dst has the return value. + if (dst != x10) { + __ mv(dst, x10); + } + + __ pop_call_clobbered_registers_except(RegSet::of(dst)); + __ leave(); + + __ bind(done); +} + +#ifdef ASSERT + +void XBarrierSetAssembler::store_at(MacroAssembler* masm, + DecoratorSet decorators, + BasicType type, + Address dst, + Register val, + Register tmp1, + Register tmp2, + Register tmp3) { + // Verify value + if (is_reference_type(type)) { + // Note that src could be noreg, which means we + // are storing null and can skip verification. + if (val != noreg) { + Label done; + + // tmp1, tmp2 and tmp3 are often set to noreg. + RegSet savedRegs = RegSet::of(t0); + __ push_reg(savedRegs, sp); + + __ ld(t0, address_bad_mask_from_thread(xthread)); + __ andr(t0, val, t0); + __ beqz(t0, done); + __ stop("Verify oop store failed"); + __ should_not_reach_here(); + __ bind(done); + __ pop_reg(savedRegs, sp); + } + } + + // Store value + BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2, noreg); +} + +#endif // ASSERT + +void XBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, + DecoratorSet decorators, + bool is_oop, + Register src, + Register dst, + Register count, + RegSet saved_regs) { + if (!is_oop) { + // Barrier not needed + return; + } + + BLOCK_COMMENT("XBarrierSetAssembler::arraycopy_prologue {"); + + assert_different_registers(src, count, t0); + + __ push_reg(saved_regs, sp); + + if (count == c_rarg0 && src == c_rarg1) { + // exactly backwards!! + __ xorr(c_rarg0, c_rarg0, c_rarg1); + __ xorr(c_rarg1, c_rarg0, c_rarg1); + __ xorr(c_rarg0, c_rarg0, c_rarg1); + } else { + __ mv(c_rarg0, src); + __ mv(c_rarg1, count); + } + + __ call_VM_leaf(XBarrierSetRuntime::load_barrier_on_oop_array_addr(), 2); + + __ pop_reg(saved_regs, sp); + + BLOCK_COMMENT("} XBarrierSetAssembler::arraycopy_prologue"); +} + +void XBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, + Register jni_env, + Register robj, + Register tmp, + Label& slowpath) { + BLOCK_COMMENT("XBarrierSetAssembler::try_resolve_jobject_in_native {"); + + assert_different_registers(jni_env, robj, tmp); + + // Resolve jobject + BarrierSetAssembler::try_resolve_jobject_in_native(masm, jni_env, robj, tmp, slowpath); + + // Compute the offset of address bad mask from the field of jni_environment + long int bad_mask_relative_offset = (long int) (in_bytes(XThreadLocalData::address_bad_mask_offset()) - + in_bytes(JavaThread::jni_environment_offset())); + + // Load the address bad mask + __ ld(tmp, Address(jni_env, bad_mask_relative_offset)); + + // Check address bad mask + __ andr(tmp, robj, tmp); + __ bnez(tmp, slowpath); + + BLOCK_COMMENT("} XBarrierSetAssembler::try_resolve_jobject_in_native"); +} + +#ifdef COMPILER2 + +OptoReg::Name XBarrierSetAssembler::refine_register(const Node* node, OptoReg::Name opto_reg) { + if (!OptoReg::is_reg(opto_reg)) { + return OptoReg::Bad; + } + + const VMReg vm_reg = OptoReg::as_VMReg(opto_reg); + if (vm_reg->is_FloatRegister()) { + return opto_reg & ~1; + } + + return opto_reg; +} + +#undef __ +#define __ _masm-> + +class XSaveLiveRegisters { +private: + MacroAssembler* const _masm; + RegSet _gp_regs; + FloatRegSet _fp_regs; + VectorRegSet _vp_regs; + +public: + void initialize(XLoadBarrierStubC2* stub) { + // Record registers that needs to be saved/restored + RegMaskIterator rmi(stub->live()); + while (rmi.has_next()) { + const OptoReg::Name opto_reg = rmi.next(); + if (OptoReg::is_reg(opto_reg)) { + const VMReg vm_reg = OptoReg::as_VMReg(opto_reg); + if (vm_reg->is_Register()) { + _gp_regs += RegSet::of(vm_reg->as_Register()); + } else if (vm_reg->is_FloatRegister()) { + _fp_regs += FloatRegSet::of(vm_reg->as_FloatRegister()); + } else if (vm_reg->is_VectorRegister()) { + const VMReg vm_reg_base = OptoReg::as_VMReg(opto_reg & ~(VectorRegister::max_slots_per_register - 1)); + _vp_regs += VectorRegSet::of(vm_reg_base->as_VectorRegister()); + } else { + fatal("Unknown register type"); + } + } + } + + // Remove C-ABI SOE registers, tmp regs and _ref register that will be updated + _gp_regs -= RegSet::range(x18, x27) + RegSet::of(x2) + RegSet::of(x8, x9) + RegSet::of(x5, stub->ref()); + } + + XSaveLiveRegisters(MacroAssembler* masm, XLoadBarrierStubC2* stub) : + _masm(masm), + _gp_regs(), + _fp_regs(), + _vp_regs() { + // Figure out what registers to save/restore + initialize(stub); + + // Save registers + __ push_reg(_gp_regs, sp); + __ push_fp(_fp_regs, sp); + __ push_v(_vp_regs, sp); + } + + ~XSaveLiveRegisters() { + // Restore registers + __ pop_v(_vp_regs, sp); + __ pop_fp(_fp_regs, sp); + __ pop_reg(_gp_regs, sp); + } +}; + +class XSetupArguments { +private: + MacroAssembler* const _masm; + const Register _ref; + const Address _ref_addr; + +public: + XSetupArguments(MacroAssembler* masm, XLoadBarrierStubC2* stub) : + _masm(masm), + _ref(stub->ref()), + _ref_addr(stub->ref_addr()) { + + // Setup arguments + if (_ref_addr.base() == noreg) { + // No self healing + if (_ref != c_rarg0) { + __ mv(c_rarg0, _ref); + } + __ mv(c_rarg1, zr); + } else { + // Self healing + if (_ref == c_rarg0) { + // _ref is already at correct place + __ la(c_rarg1, _ref_addr); + } else if (_ref != c_rarg1) { + // _ref is in wrong place, but not in c_rarg1, so fix it first + __ la(c_rarg1, _ref_addr); + __ mv(c_rarg0, _ref); + } else if (_ref_addr.base() != c_rarg0) { + assert(_ref == c_rarg1, "Mov ref first, vacating c_rarg0"); + __ mv(c_rarg0, _ref); + __ la(c_rarg1, _ref_addr); + } else { + assert(_ref == c_rarg1, "Need to vacate c_rarg1 and _ref_addr is using c_rarg0"); + if (_ref_addr.base() == c_rarg0) { + __ mv(t1, c_rarg1); + __ la(c_rarg1, _ref_addr); + __ mv(c_rarg0, t1); + } else { + ShouldNotReachHere(); + } + } + } + } + + ~XSetupArguments() { + // Transfer result + if (_ref != x10) { + __ mv(_ref, x10); + } + } +}; + +#undef __ +#define __ masm-> + +void XBarrierSetAssembler::generate_c2_load_barrier_stub(MacroAssembler* masm, XLoadBarrierStubC2* stub) const { + BLOCK_COMMENT("XLoadBarrierStubC2"); + + // Stub entry + __ bind(*stub->entry()); + + { + XSaveLiveRegisters save_live_registers(masm, stub); + XSetupArguments setup_arguments(masm, stub); + + Address target(stub->slow_path()); + __ relocate(target.rspec(), [&] { + int32_t offset; + __ la_patchable(t0, target, offset); + __ jalr(x1, t0, offset); + }); + } + + // Stub exit + __ j(*stub->continuation()); +} + +#endif // COMPILER2 + +#ifdef COMPILER1 +#undef __ +#define __ ce->masm()-> + +void XBarrierSetAssembler::generate_c1_load_barrier_test(LIR_Assembler* ce, + LIR_Opr ref) const { + assert_different_registers(xthread, ref->as_register(), t1); + __ ld(t1, address_bad_mask_from_thread(xthread)); + __ andr(t1, t1, ref->as_register()); +} + +void XBarrierSetAssembler::generate_c1_load_barrier_stub(LIR_Assembler* ce, + XLoadBarrierStubC1* stub) const { + // Stub entry + __ bind(*stub->entry()); + + Register ref = stub->ref()->as_register(); + Register ref_addr = noreg; + Register tmp = noreg; + + if (stub->tmp()->is_valid()) { + // Load address into tmp register + ce->leal(stub->ref_addr(), stub->tmp()); + ref_addr = tmp = stub->tmp()->as_pointer_register(); + } else { + // Address already in register + ref_addr = stub->ref_addr()->as_address_ptr()->base()->as_pointer_register(); + } + + assert_different_registers(ref, ref_addr, noreg); + + // Save x10 unless it is the result or tmp register + // Set up SP to accommodate parameters and maybe x10. + if (ref != x10 && tmp != x10) { + __ sub(sp, sp, 32); + __ sd(x10, Address(sp, 16)); + } else { + __ sub(sp, sp, 16); + } + + // Setup arguments and call runtime stub + ce->store_parameter(ref_addr, 1); + ce->store_parameter(ref, 0); + + __ far_call(stub->runtime_stub()); + + // Verify result + __ verify_oop(x10); + + + // Move result into place + if (ref != x10) { + __ mv(ref, x10); + } + + // Restore x10 unless it is the result or tmp register + if (ref != x10 && tmp != x10) { + __ ld(x10, Address(sp, 16)); + __ add(sp, sp, 32); + } else { + __ add(sp, sp, 16); + } + + // Stub exit + __ j(*stub->continuation()); +} + +#undef __ +#define __ sasm-> + +void XBarrierSetAssembler::generate_c1_load_barrier_runtime_stub(StubAssembler* sasm, + DecoratorSet decorators) const { + __ prologue("zgc_load_barrier stub", false); + + __ push_call_clobbered_registers_except(RegSet::of(x10)); + + // Setup arguments + __ load_parameter(0, c_rarg0); + __ load_parameter(1, c_rarg1); + + __ call_VM_leaf(XBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators), 2); + + __ pop_call_clobbered_registers_except(RegSet::of(x10)); + + __ epilogue(); +} + +#endif // COMPILER1 + +#undef __ +#define __ masm-> + +void XBarrierSetAssembler::check_oop(MacroAssembler* masm, Register obj, Register tmp1, Register tmp2, Label& error) { + // Check if mask is good. + // verifies that XAddressBadMask & obj == 0 + __ ld(tmp2, Address(xthread, XThreadLocalData::address_bad_mask_offset())); + __ andr(tmp1, obj, tmp2); + __ bnez(tmp1, error); + + BarrierSetAssembler::check_oop(masm, obj, tmp1, tmp2, error); +} + +#undef __ diff --git a/src/hotspot/cpu/riscv/gc/x/xBarrierSetAssembler_riscv.hpp b/src/hotspot/cpu/riscv/gc/x/xBarrierSetAssembler_riscv.hpp new file mode 100644 index 0000000000000..cbf5077999bfb --- /dev/null +++ b/src/hotspot/cpu/riscv/gc/x/xBarrierSetAssembler_riscv.hpp @@ -0,0 +1,112 @@ +/* + * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef CPU_RISCV_GC_X_XBARRIERSETASSEMBLER_RISCV_HPP +#define CPU_RISCV_GC_X_XBARRIERSETASSEMBLER_RISCV_HPP + +#include "code/vmreg.hpp" +#include "oops/accessDecorators.hpp" +#ifdef COMPILER2 +#include "opto/optoreg.hpp" +#endif // COMPILER2 + +#ifdef COMPILER1 +class LIR_Assembler; +class LIR_Opr; +class StubAssembler; +#endif // COMPILER1 + +#ifdef COMPILER2 +class Node; +#endif // COMPILER2 + +#ifdef COMPILER1 +class XLoadBarrierStubC1; +#endif // COMPILER1 + +#ifdef COMPILER2 +class XLoadBarrierStubC2; +#endif // COMPILER2 + +class XBarrierSetAssembler : public XBarrierSetAssemblerBase { +public: + virtual void load_at(MacroAssembler* masm, + DecoratorSet decorators, + BasicType type, + Register dst, + Address src, + Register tmp1, + Register tmp2); + +#ifdef ASSERT + virtual void store_at(MacroAssembler* masm, + DecoratorSet decorators, + BasicType type, + Address dst, + Register val, + Register tmp1, + Register tmp2, + Register tmp3); +#endif // ASSERT + + virtual void arraycopy_prologue(MacroAssembler* masm, + DecoratorSet decorators, + bool is_oop, + Register src, + Register dst, + Register count, + RegSet saved_regs); + + virtual void try_resolve_jobject_in_native(MacroAssembler* masm, + Register jni_env, + Register robj, + Register tmp, + Label& slowpath); + + virtual NMethodPatchingType nmethod_patching_type() { return NMethodPatchingType::conc_data_patch; } + +#ifdef COMPILER1 + void generate_c1_load_barrier_test(LIR_Assembler* ce, + LIR_Opr ref) const; + + void generate_c1_load_barrier_stub(LIR_Assembler* ce, + XLoadBarrierStubC1* stub) const; + + void generate_c1_load_barrier_runtime_stub(StubAssembler* sasm, + DecoratorSet decorators) const; +#endif // COMPILER1 + +#ifdef COMPILER2 + OptoReg::Name refine_register(const Node* node, + OptoReg::Name opto_reg); + + void generate_c2_load_barrier_stub(MacroAssembler* masm, + XLoadBarrierStubC2* stub) const; +#endif // COMPILER2 + + void check_oop(MacroAssembler* masm, Register obj, Register tmp1, Register tmp2, Label& error); +}; + +#endif // CPU_RISCV_GC_X_XBARRIERSETASSEMBLER_RISCV_HPP diff --git a/src/hotspot/cpu/riscv/gc/z/zGlobals_riscv.cpp b/src/hotspot/cpu/riscv/gc/x/xGlobals_riscv.cpp similarity index 98% rename from src/hotspot/cpu/riscv/gc/z/zGlobals_riscv.cpp rename to src/hotspot/cpu/riscv/gc/x/xGlobals_riscv.cpp index ddf88da646a27..602dab5674738 100644 --- a/src/hotspot/cpu/riscv/gc/z/zGlobals_riscv.cpp +++ b/src/hotspot/cpu/riscv/gc/x/xGlobals_riscv.cpp @@ -26,7 +26,7 @@ #include "precompiled.hpp" #include "gc/shared/gcLogPrecious.hpp" #include "gc/shared/gc_globals.hpp" -#include "gc/z/zGlobals.hpp" +#include "gc/x/xGlobals.hpp" #include "runtime/globals.hpp" #include "runtime/os.hpp" #include "utilities/globalDefinitions.hpp" @@ -198,15 +198,15 @@ static size_t probe_valid_max_address_bit() { #endif // LINUX } -size_t ZPlatformAddressOffsetBits() { +size_t XPlatformAddressOffsetBits() { const static size_t valid_max_address_offset_bits = probe_valid_max_address_bit() + 1; const size_t max_address_offset_bits = valid_max_address_offset_bits - 3; const size_t min_address_offset_bits = max_address_offset_bits - 2; - const size_t address_offset = round_up_power_of_2(MaxHeapSize * ZVirtualToPhysicalRatio); + const size_t address_offset = round_up_power_of_2(MaxHeapSize * XVirtualToPhysicalRatio); const size_t address_offset_bits = log2i_exact(address_offset); return clamp(address_offset_bits, min_address_offset_bits, max_address_offset_bits); } -size_t ZPlatformAddressMetadataShift() { - return ZPlatformAddressOffsetBits(); +size_t XPlatformAddressMetadataShift() { + return XPlatformAddressOffsetBits(); } diff --git a/src/hotspot/cpu/riscv/gc/x/xGlobals_riscv.hpp b/src/hotspot/cpu/riscv/gc/x/xGlobals_riscv.hpp new file mode 100644 index 0000000000000..836dc7aac0d1d --- /dev/null +++ b/src/hotspot/cpu/riscv/gc/x/xGlobals_riscv.hpp @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef CPU_RISCV_GC_X_XGLOBALS_RISCV_HPP +#define CPU_RISCV_GC_X_XGLOBALS_RISCV_HPP + +const size_t XPlatformHeapViews = 3; +const size_t XPlatformCacheLineSize = 64; + +size_t XPlatformAddressOffsetBits(); +size_t XPlatformAddressMetadataShift(); + +#endif // CPU_RISCV_GC_X_XGLOBALS_RISCV_HPP diff --git a/src/hotspot/cpu/riscv/gc/x/x_riscv64.ad b/src/hotspot/cpu/riscv/gc/x/x_riscv64.ad new file mode 100644 index 0000000000000..73d337bc4845e --- /dev/null +++ b/src/hotspot/cpu/riscv/gc/x/x_riscv64.ad @@ -0,0 +1,233 @@ +// +// Copyright (c) 2019, 2021, Oracle and/or its affiliates. All rights reserved. +// Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved. +// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +// +// This code is free software; you can redistribute it and/or modify it +// under the terms of the GNU General Public License version 2 only, as +// published by the Free Software Foundation. +// +// This code is distributed in the hope that it will be useful, but WITHOUT +// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +// version 2 for more details (a copy is included in the LICENSE file that +// accompanied this code). +// +// You should have received a copy of the GNU General Public License version +// 2 along with this work; if not, write to the Free Software Foundation, +// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +// +// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +// or visit www.oracle.com if you need additional information or have any +// questions. +// + +source_hpp %{ + +#include "gc/shared/gc_globals.hpp" +#include "gc/x/c2/xBarrierSetC2.hpp" +#include "gc/x/xThreadLocalData.hpp" + +%} + +source %{ + +static void x_load_barrier(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref, Register tmp, int barrier_data) { + if (barrier_data == XLoadBarrierElided) { + return; + } + XLoadBarrierStubC2* const stub = XLoadBarrierStubC2::create(node, ref_addr, ref, tmp, barrier_data); + __ ld(tmp, Address(xthread, XThreadLocalData::address_bad_mask_offset())); + __ andr(tmp, tmp, ref); + __ bnez(tmp, *stub->entry(), true /* far */); + __ bind(*stub->continuation()); +} + +static void x_load_barrier_slow_path(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref, Register tmp) { + XLoadBarrierStubC2* const stub = XLoadBarrierStubC2::create(node, ref_addr, ref, tmp, XLoadBarrierStrong); + __ j(*stub->entry()); + __ bind(*stub->continuation()); +} + +%} + +// Load Pointer +instruct xLoadP(iRegPNoSp dst, memory mem) +%{ + match(Set dst (LoadP mem)); + predicate(UseZGC && !ZGenerational && (n->as_Load()->barrier_data() != 0)); + effect(TEMP dst); + + ins_cost(4 * DEFAULT_COST); + + format %{ "ld $dst, $mem, #@zLoadP" %} + + ins_encode %{ + const Address ref_addr (as_Register($mem$$base), $mem$$disp); + __ ld($dst$$Register, ref_addr); + x_load_barrier(_masm, this, ref_addr, $dst$$Register, t0 /* tmp */, barrier_data()); + %} + + ins_pipe(iload_reg_mem); +%} + +instruct xCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{ + match(Set res (CompareAndSwapP mem (Binary oldval newval))); + match(Set res (WeakCompareAndSwapP mem (Binary oldval newval))); + predicate(UseZGC && !ZGenerational && !needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() == XLoadBarrierStrong); + effect(KILL cr, TEMP_DEF res); + + ins_cost(2 * VOLATILE_REF_COST); + + format %{ "cmpxchg $mem, $oldval, $newval, #@zCompareAndSwapP\n\t" + "mv $res, $res == $oldval" %} + + ins_encode %{ + Label failed; + guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding"); + __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::int64, + Assembler::relaxed /* acquire */, Assembler::rl /* release */, $res$$Register, + true /* result_as_bool */); + __ beqz($res$$Register, failed); + __ mv(t0, $oldval$$Register); + __ bind(failed); + if (barrier_data() != XLoadBarrierElided) { + Label good; + __ ld(t1, Address(xthread, XThreadLocalData::address_bad_mask_offset()), t1 /* tmp */); + __ andr(t1, t1, t0); + __ beqz(t1, good); + x_load_barrier_slow_path(_masm, this, Address($mem$$Register), t0 /* ref */, t1 /* tmp */); + __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::int64, + Assembler::relaxed /* acquire */, Assembler::rl /* release */, $res$$Register, + true /* result_as_bool */); + __ bind(good); + } + %} + + ins_pipe(pipe_slow); +%} + +instruct xCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{ + match(Set res (CompareAndSwapP mem (Binary oldval newval))); + match(Set res (WeakCompareAndSwapP mem (Binary oldval newval))); + predicate(UseZGC && !ZGenerational && needs_acquiring_load_reserved(n) && (n->as_LoadStore()->barrier_data() == XLoadBarrierStrong)); + effect(KILL cr, TEMP_DEF res); + + ins_cost(2 * VOLATILE_REF_COST); + + format %{ "cmpxchg $mem, $oldval, $newval, #@zCompareAndSwapPAcq\n\t" + "mv $res, $res == $oldval" %} + + ins_encode %{ + Label failed; + guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding"); + __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::int64, + Assembler::aq /* acquire */, Assembler::rl /* release */, $res$$Register, + true /* result_as_bool */); + __ beqz($res$$Register, failed); + __ mv(t0, $oldval$$Register); + __ bind(failed); + if (barrier_data() != XLoadBarrierElided) { + Label good; + __ ld(t1, Address(xthread, XThreadLocalData::address_bad_mask_offset()), t1 /* tmp */); + __ andr(t1, t1, t0); + __ beqz(t1, good); + x_load_barrier_slow_path(_masm, this, Address($mem$$Register), t0 /* ref */, t1 /* tmp */); + __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::int64, + Assembler::aq /* acquire */, Assembler::rl /* release */, $res$$Register, + true /* result_as_bool */); + __ bind(good); + } + %} + + ins_pipe(pipe_slow); +%} + +instruct xCompareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval) %{ + match(Set res (CompareAndExchangeP mem (Binary oldval newval))); + predicate(UseZGC && !ZGenerational && !needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() == XLoadBarrierStrong); + effect(TEMP_DEF res); + + ins_cost(2 * VOLATILE_REF_COST); + + format %{ "cmpxchg $res = $mem, $oldval, $newval, #@zCompareAndExchangeP" %} + + ins_encode %{ + guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding"); + __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::int64, + Assembler::relaxed /* acquire */, Assembler::rl /* release */, $res$$Register); + if (barrier_data() != XLoadBarrierElided) { + Label good; + __ ld(t0, Address(xthread, XThreadLocalData::address_bad_mask_offset())); + __ andr(t0, t0, $res$$Register); + __ beqz(t0, good); + x_load_barrier_slow_path(_masm, this, Address($mem$$Register), $res$$Register /* ref */, t0 /* tmp */); + __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::int64, + Assembler::relaxed /* acquire */, Assembler::rl /* release */, $res$$Register); + __ bind(good); + } + %} + + ins_pipe(pipe_slow); +%} + +instruct xCompareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval) %{ + match(Set res (CompareAndExchangeP mem (Binary oldval newval))); + predicate(UseZGC && !ZGenerational && needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() == XLoadBarrierStrong); + effect(TEMP_DEF res); + + ins_cost(2 * VOLATILE_REF_COST); + + format %{ "cmpxchg $res = $mem, $oldval, $newval, #@zCompareAndExchangePAcq" %} + + ins_encode %{ + guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding"); + __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::int64, + Assembler::aq /* acquire */, Assembler::rl /* release */, $res$$Register); + if (barrier_data() != XLoadBarrierElided) { + Label good; + __ ld(t0, Address(xthread, XThreadLocalData::address_bad_mask_offset())); + __ andr(t0, t0, $res$$Register); + __ beqz(t0, good); + x_load_barrier_slow_path(_masm, this, Address($mem$$Register), $res$$Register /* ref */, t0 /* tmp */); + __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::int64, + Assembler::aq /* acquire */, Assembler::rl /* release */, $res$$Register); + __ bind(good); + } + %} + + ins_pipe(pipe_slow); +%} + +instruct xGetAndSetP(indirect mem, iRegP newv, iRegPNoSp prev, rFlagsReg cr) %{ + match(Set prev (GetAndSetP mem newv)); + predicate(UseZGC && !ZGenerational && !needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() != 0); + effect(TEMP_DEF prev, KILL cr); + + ins_cost(2 * VOLATILE_REF_COST); + + format %{ "atomic_xchg $prev, $newv, [$mem], #@zGetAndSetP" %} + + ins_encode %{ + __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base)); + x_load_barrier(_masm, this, Address(noreg, 0), $prev$$Register, t0 /* tmp */, barrier_data()); + %} + + ins_pipe(pipe_serial); +%} + +instruct xGetAndSetPAcq(indirect mem, iRegP newv, iRegPNoSp prev, rFlagsReg cr) %{ + match(Set prev (GetAndSetP mem newv)); + predicate(UseZGC && !ZGenerational && needs_acquiring_load_reserved(n) && (n->as_LoadStore()->barrier_data() != 0)); + effect(TEMP_DEF prev, KILL cr); + + ins_cost(VOLATILE_REF_COST); + + format %{ "atomic_xchg_acq $prev, $newv, [$mem], #@zGetAndSetPAcq" %} + + ins_encode %{ + __ atomic_xchgal($prev$$Register, $newv$$Register, as_Register($mem$$base)); + x_load_barrier(_masm, this, Address(noreg, 0), $prev$$Register, t0 /* tmp */, barrier_data()); + %} + ins_pipe(pipe_serial); +%} diff --git a/src/hotspot/cpu/riscv/gc/z/zAddress_riscv.cpp b/src/hotspot/cpu/riscv/gc/z/zAddress_riscv.cpp new file mode 100644 index 0000000000000..ef13676b02ed8 --- /dev/null +++ b/src/hotspot/cpu/riscv/gc/z/zAddress_riscv.cpp @@ -0,0 +1,109 @@ +/* + * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2023, Huawei Technologies Co., Ltd. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/shared/gcLogPrecious.hpp" +#include "gc/shared/gc_globals.hpp" +#include "gc/z/zAddress.hpp" +#include "gc/z/zBarrierSetAssembler.hpp" +#include "gc/z/zGlobals.hpp" +#include "runtime/globals.hpp" +#include "runtime/os.hpp" +#include "utilities/globalDefinitions.hpp" +#include "utilities/powerOfTwo.hpp" + +#ifdef LINUX +#include +#endif // LINUX + +// Default value if probe is not implemented for a certain platform: 128TB +static const size_t DEFAULT_MAX_ADDRESS_BIT = 47; +// Minimum value returned, if probing fails: 64GB +static const size_t MINIMUM_MAX_ADDRESS_BIT = 36; + +static size_t probe_valid_max_address_bit() { +#ifdef LINUX + size_t max_address_bit = 0; + const size_t page_size = os::vm_page_size(); + for (size_t i = DEFAULT_MAX_ADDRESS_BIT; i > MINIMUM_MAX_ADDRESS_BIT; --i) { + const uintptr_t base_addr = ((uintptr_t) 1U) << i; + if (msync((void*)base_addr, page_size, MS_ASYNC) == 0) { + // msync suceeded, the address is valid, and maybe even already mapped. + max_address_bit = i; + break; + } + if (errno != ENOMEM) { + // Some error occured. This should never happen, but msync + // has some undefined behavior, hence ignore this bit. +#ifdef ASSERT + fatal("Received '%s' while probing the address space for the highest valid bit", os::errno_name(errno)); +#else // ASSERT + log_warning_p(gc)("Received '%s' while probing the address space for the highest valid bit", os::errno_name(errno)); +#endif // ASSERT + continue; + } + // Since msync failed with ENOMEM, the page might not be mapped. + // Try to map it, to see if the address is valid. + void* const result_addr = mmap((void*) base_addr, page_size, PROT_NONE, MAP_PRIVATE|MAP_ANONYMOUS|MAP_NORESERVE, -1, 0); + if (result_addr != MAP_FAILED) { + munmap(result_addr, page_size); + } + if ((uintptr_t) result_addr == base_addr) { + // address is valid + max_address_bit = i; + break; + } + } + if (max_address_bit == 0) { + // probing failed, allocate a very high page and take that bit as the maximum + const uintptr_t high_addr = ((uintptr_t) 1U) << DEFAULT_MAX_ADDRESS_BIT; + void* const result_addr = mmap((void*) high_addr, page_size, PROT_NONE, MAP_PRIVATE|MAP_ANONYMOUS|MAP_NORESERVE, -1, 0); + if (result_addr != MAP_FAILED) { + max_address_bit = BitsPerSize_t - count_leading_zeros((size_t) result_addr) - 1; + munmap(result_addr, page_size); + } + } + log_info_p(gc, init)("Probing address space for the highest valid bit: " SIZE_FORMAT, max_address_bit); + return MAX2(max_address_bit, MINIMUM_MAX_ADDRESS_BIT); +#else // LINUX + return DEFAULT_MAX_ADDRESS_BIT; +#endif // LINUX +} + +size_t ZPlatformAddressOffsetBits() { + const static size_t valid_max_address_offset_bits = probe_valid_max_address_bit() + 1; + const size_t max_address_offset_bits = valid_max_address_offset_bits - 3; + const size_t min_address_offset_bits = max_address_offset_bits - 2; + const size_t address_offset = round_up_power_of_2(MaxHeapSize * ZVirtualToPhysicalRatio); + const size_t address_offset_bits = log2i_exact(address_offset); + return clamp(address_offset_bits, min_address_offset_bits, max_address_offset_bits); +} + +size_t ZPlatformAddressHeapBaseShift() { + return ZPlatformAddressOffsetBits(); +} + +void ZGlobalsPointers::pd_set_good_masks() { + BarrierSetAssembler::clear_patching_epoch(); +} diff --git a/src/hotspot/cpu/riscv/gc/z/zAddress_riscv.hpp b/src/hotspot/cpu/riscv/gc/z/zAddress_riscv.hpp new file mode 100644 index 0000000000000..0ca8faf1464b8 --- /dev/null +++ b/src/hotspot/cpu/riscv/gc/z/zAddress_riscv.hpp @@ -0,0 +1,34 @@ +/* + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef CPU_RISCV_GC_Z_ZADDRESS_RISCV_HPP +#define CPU_RISCV_GC_Z_ZADDRESS_RISCV_HPP + +#include "utilities/globalDefinitions.hpp" + +const size_t ZPointerLoadShift = 16; + +size_t ZPlatformAddressOffsetBits(); +size_t ZPlatformAddressHeapBaseShift(); + +#endif // CPU_RISCV_GC_Z_ZADDRESS_RISCV_HPP diff --git a/src/hotspot/cpu/riscv/gc/z/zAddress_riscv.inline.hpp b/src/hotspot/cpu/riscv/gc/z/zAddress_riscv.inline.hpp new file mode 100644 index 0000000000000..8992eadeb468a --- /dev/null +++ b/src/hotspot/cpu/riscv/gc/z/zAddress_riscv.inline.hpp @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2023, Huawei Technologies Co., Ltd. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef CPU_RISCV_GC_Z_ZADDRESS_RISCV_INLINE_HPP +#define CPU_RISCV_GC_Z_ZADDRESS_RISCV_INLINE_HPP + +#include "utilities/globalDefinitions.hpp" + +inline uintptr_t ZPointer::remap_bits(uintptr_t colored) { + return colored & ZPointerRemappedMask; +} + +inline constexpr int ZPointer::load_shift_lookup(uintptr_t value) { + return ZPointerLoadShift; +} + +#endif // CPU_RISCV_GC_Z_ZADDRESS_RISCV_INLINE_HPP diff --git a/src/hotspot/cpu/riscv/gc/z/zBarrierSetAssembler_riscv.cpp b/src/hotspot/cpu/riscv/gc/z/zBarrierSetAssembler_riscv.cpp index ee2844e01809d..000a60478460f 100644 --- a/src/hotspot/cpu/riscv/gc/z/zBarrierSetAssembler_riscv.cpp +++ b/src/hotspot/cpu/riscv/gc/z/zBarrierSetAssembler_riscv.cpp @@ -1,6 +1,6 @@ /* * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. + * Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,13 +27,16 @@ #include "asm/macroAssembler.inline.hpp" #include "code/codeBlob.hpp" #include "code/vmreg.inline.hpp" +#include "gc/z/zAddress.hpp" #include "gc/z/zBarrier.inline.hpp" #include "gc/z/zBarrierSet.hpp" #include "gc/z/zBarrierSetAssembler.hpp" #include "gc/z/zBarrierSetRuntime.hpp" #include "gc/z/zThreadLocalData.hpp" #include "memory/resourceArea.hpp" +#include "runtime/jniHandles.hpp" #include "runtime/sharedRuntime.hpp" +#include "utilities/debug.hpp" #include "utilities/macros.hpp" #ifdef COMPILER1 #include "c1/c1_LIRAssembler.hpp" @@ -42,6 +45,7 @@ #endif // COMPILER1 #ifdef COMPILER2 #include "gc/z/c2/zBarrierSetC2.hpp" +#include "opto/output.hpp" #endif // COMPILER2 #ifdef PRODUCT @@ -53,6 +57,52 @@ #undef __ #define __ masm-> +// Helper for saving and restoring registers across a runtime call that does +// not have any live vector registers. +class ZRuntimeCallSpill { +private: + MacroAssembler* _masm; + Register _result; + + void save() { + MacroAssembler* masm = _masm; + + __ enter(); + if (_result != noreg) { + __ push_call_clobbered_registers_except(RegSet::of(_result)); + } else { + __ push_call_clobbered_registers(); + } + } + + void restore() { + MacroAssembler* masm = _masm; + + if (_result != noreg) { + // Make sure _result has the return value. + if (_result != x10) { + __ mv(_result, x10); + } + + __ pop_call_clobbered_registers_except(RegSet::of(_result)); + } else { + __ pop_call_clobbered_registers(); + } + __ leave(); + } + +public: + ZRuntimeCallSpill(MacroAssembler* masm, Register result) + : _masm(masm), + _result(result) { + save(); + } + + ~ZRuntimeCallSpill() { + restore(); + } +}; + void ZBarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, @@ -66,44 +116,197 @@ void ZBarrierSetAssembler::load_at(MacroAssembler* masm, return; } - assert_different_registers(t1, src.base()); - assert_different_registers(t0, t1, dst); + assert_different_registers(tmp1, tmp2, src.base(), noreg); + assert_different_registers(tmp1, tmp2, dst, noreg); + assert_different_registers(tmp2, t0); Label done; + Label uncolor; + + // Load bad mask into scratch register. + const bool on_non_strong = + (decorators & ON_WEAK_OOP_REF) != 0 || + (decorators & ON_PHANTOM_OOP_REF) != 0; - // Load bad mask into temp register. - __ la(t0, src); - __ ld(t1, address_bad_mask_from_thread(xthread)); - __ ld(dst, Address(t0)); + if (on_non_strong) { + __ ld(tmp1, mark_bad_mask_from_thread(xthread)); + } else { + __ ld(tmp1, load_bad_mask_from_thread(xthread)); + } + + __ la(tmp2, src); + __ ld(dst, tmp2); // Test reference against bad mask. If mask bad, then we need to fix it up. - __ andr(t1, dst, t1); - __ beqz(t1, done); + __ andr(tmp1, dst, tmp1); + __ beqz(tmp1, uncolor); - __ enter(); + { + // Call VM + ZRuntimeCallSpill rsc(masm, dst); - __ push_call_clobbered_registers_except(RegSet::of(dst)); + if (c_rarg0 != dst) { + __ mv(c_rarg0, dst); + } + __ mv(c_rarg1, tmp2); - if (c_rarg0 != dst) { - __ mv(c_rarg0, dst); + __ call_VM_leaf(ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators), 2); } - __ mv(c_rarg1, t0); + // Slow-path has already uncolored + __ j(done); - __ call_VM_leaf(ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators), 2); + __ bind(uncolor); + + // Remove the color bits + __ srli(dst, dst, ZPointerLoadShift); - // Make sure dst has the return value. - if (dst != x10) { - __ mv(dst, x10); + __ bind(done); +} + +void ZBarrierSetAssembler::store_barrier_fast(MacroAssembler* masm, + Address ref_addr, + Register rnew_zaddress, + Register rnew_zpointer, + Register rtmp, + bool in_nmethod, + bool is_atomic, + Label& medium_path, + Label& medium_path_continuation) const { + assert_different_registers(ref_addr.base(), rnew_zpointer, rtmp); + assert_different_registers(rnew_zaddress, rnew_zpointer, rtmp); + + if (in_nmethod) { + if (is_atomic) { + __ lhu(rtmp, ref_addr); + // Atomic operations must ensure that the contents of memory are store-good before + // an atomic opertion can execute. + // A non-relocatable object could have spurious raw null pointers in its fields after + // getting promoted to the old generation. + __ relocate(barrier_Relocation::spec(), [&] { + __ li16u(rnew_zpointer, barrier_Relocation::unpatched); + }, ZBarrierRelocationFormatStoreGoodBits); + __ bne(rtmp, rnew_zpointer, medium_path, true /* is_far */); + } else { + __ ld(rtmp, ref_addr); + // Stores on relocatable objects never need to deal with raw null pointers in fields. + // Raw null pointers may only exists in the young generation, as they get pruned when + // the object is relocated to old. And no pre-write barrier needs to perform any action + // in the young generation. + __ relocate(barrier_Relocation::spec(), [&] { + __ li16u(rnew_zpointer, barrier_Relocation::unpatched); + }, ZBarrierRelocationFormatStoreBadMask); + __ andr(rtmp, rtmp, rnew_zpointer); + __ bnez(rtmp, medium_path, true /* is_far */); + } + __ bind(medium_path_continuation); + __ relocate(barrier_Relocation::spec(), [&] { + __ li16u(rtmp, barrier_Relocation::unpatched); + }, ZBarrierRelocationFormatStoreGoodBits); + __ slli(rnew_zpointer, rnew_zaddress, ZPointerLoadShift); + __ orr(rnew_zpointer, rnew_zpointer, rtmp); + } else { + assert(!is_atomic, "atomic outside of nmethods not supported"); + __ la(rtmp, ref_addr); + __ ld(rtmp, rtmp); + __ ld(rnew_zpointer, Address(xthread, ZThreadLocalData::store_bad_mask_offset())); + __ andr(rtmp, rtmp, rnew_zpointer); + __ bnez(rtmp, medium_path, true /* is_far */); + __ bind(medium_path_continuation); + if (rnew_zaddress == noreg) { + __ mv(rnew_zpointer, zr); + } else { + __ mv(rnew_zpointer, rnew_zaddress); + } + + // Load the current good shift, and add the color bits + __ slli(rnew_zpointer, rnew_zpointer, ZPointerLoadShift); + __ ld(rtmp, Address(xthread, ZThreadLocalData::store_good_mask_offset())); + __ orr(rnew_zpointer, rnew_zpointer, rtmp); } +} - __ pop_call_clobbered_registers_except(RegSet::of(dst)); - __ leave(); +static void store_barrier_buffer_add(MacroAssembler* masm, + Address ref_addr, + Register tmp1, + Register tmp2, + Label& slow_path) { + Address buffer(xthread, ZThreadLocalData::store_barrier_buffer_offset()); + assert_different_registers(ref_addr.base(), tmp1, tmp2); - __ bind(done); + __ ld(tmp1, buffer); + + // Combined pointer bump and check if the buffer is disabled or full + __ ld(tmp2, Address(tmp1, ZStoreBarrierBuffer::current_offset())); + __ beqz(tmp2, slow_path); + + // Bump the pointer + __ sub(tmp2, tmp2, sizeof(ZStoreBarrierEntry)); + __ sd(tmp2, Address(tmp1, ZStoreBarrierBuffer::current_offset())); + + // Compute the buffer entry address + __ la(tmp2, Address(tmp2, ZStoreBarrierBuffer::buffer_offset())); + __ add(tmp2, tmp2, tmp1); + + // Compute and log the store address + __ la(tmp1, ref_addr); + __ sd(tmp1, Address(tmp2, in_bytes(ZStoreBarrierEntry::p_offset()))); + + // Load and log the prev value + __ ld(tmp1, tmp1); + __ sd(tmp1, Address(tmp2, in_bytes(ZStoreBarrierEntry::prev_offset()))); } -#ifdef ASSERT +void ZBarrierSetAssembler::store_barrier_medium(MacroAssembler* masm, + Address ref_addr, + Register rtmp1, + Register rtmp2, + Register rtmp3, + bool is_native, + bool is_atomic, + Label& medium_path_continuation, + Label& slow_path, + Label& slow_path_continuation) const { + assert_different_registers(ref_addr.base(), rtmp1, rtmp2, rtmp3); + + // The reason to end up in the medium path is that the pre-value was not 'good'. + if (is_native) { + __ j(slow_path); + __ bind(slow_path_continuation); + __ j(medium_path_continuation); + } else if (is_atomic) { + // Atomic accesses can get to the medium fast path because the value was a + // raw null value. If it was not null, then there is no doubt we need to take a slow path. + + __ la(rtmp2, ref_addr); + __ ld(rtmp1, rtmp2); + __ bnez(rtmp1, slow_path); + + // If we get this far, we know there is a young raw null value in the field. + __ relocate(barrier_Relocation::spec(), [&] { + __ li16u(rtmp1, barrier_Relocation::unpatched); + }, ZBarrierRelocationFormatStoreGoodBits); + __ cmpxchg_weak(rtmp2, zr, rtmp1, + Assembler::int64, + Assembler::relaxed /* acquire */, Assembler::relaxed /* release */, + rtmp3); + __ beqz(rtmp3, slow_path); + __ bind(slow_path_continuation); + __ j(medium_path_continuation); + } else { + // A non-atomic relocatable object wont't get to the medium fast path due to a + // raw null in the young generation. We only get here because the field is bad. + // In this path we don't need any self healing, so we can avoid a runtime call + // most of the time by buffering the store barrier to be applied lazily. + store_barrier_buffer_add(masm, + ref_addr, + rtmp1, + rtmp2, + slow_path); + __ bind(slow_path_continuation); + __ j(medium_path_continuation); + } +} void ZBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, @@ -113,32 +316,103 @@ void ZBarrierSetAssembler::store_at(MacroAssembler* masm, Register tmp1, Register tmp2, Register tmp3) { - // Verify value - if (is_reference_type(type)) { - // Note that src could be noreg, which means we - // are storing null and can skip verification. - if (val != noreg) { - Label done; - - // tmp1, tmp2 and tmp3 are often set to noreg. - RegSet savedRegs = RegSet::of(t0); - __ push_reg(savedRegs, sp); - - __ ld(t0, address_bad_mask_from_thread(xthread)); - __ andr(t0, val, t0); - __ beqz(t0, done); - __ stop("Verify oop store failed"); - __ should_not_reach_here(); - __ bind(done); - __ pop_reg(savedRegs, sp); + if (!ZBarrierSet::barrier_needed(decorators, type)) { + BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2, tmp3); + return; + } + + bool dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0; + + assert_different_registers(val, tmp1, dst.base()); + + if (dest_uninitialized) { + if (val == noreg) { + __ mv(tmp1, zr); + } else { + __ mv(tmp1, val); + } + // Add the color bits + __ slli(tmp1, tmp1, ZPointerLoadShift); + __ ld(tmp2, Address(xthread, ZThreadLocalData::store_good_mask_offset())); + __ orr(tmp1, tmp2, tmp1); + } else { + Label done; + Label medium; + Label medium_continuation; + Label slow; + Label slow_continuation; + store_barrier_fast(masm, dst, val, tmp1, tmp2, false, false, medium, medium_continuation); + + __ j(done); + __ bind(medium); + store_barrier_medium(masm, + dst, + tmp1, + tmp2, + noreg /* tmp3 */, + false /* is_native */, + false /* is_atomic */, + medium_continuation, + slow, + slow_continuation); + + __ bind(slow); + { + // Call VM + ZRuntimeCallSpill rcs(masm, noreg); + __ la(c_rarg0, dst); + __ MacroAssembler::call_VM_leaf(ZBarrierSetRuntime::store_barrier_on_oop_field_without_healing_addr(), 1); } + + __ j(slow_continuation); + __ bind(done); } // Store value - BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2, noreg); + BarrierSetAssembler::store_at(masm, decorators, type, dst, tmp1, tmp2, tmp3, noreg); } -#endif // ASSERT +class ZCopyRuntimeCallSpill { +private: + MacroAssembler* _masm; + Register _result; + + void save() { + MacroAssembler* masm = _masm; + + __ enter(); + if (_result != noreg) { + __ push_call_clobbered_registers_except(RegSet::of(_result)); + } else { + __ push_call_clobbered_registers(); + } + } + + void restore() { + MacroAssembler* masm = _masm; + + if (_result != noreg) { + if (_result != x10) { + __ mv(_result, x10); + } + __ pop_call_clobbered_registers_except(RegSet::of(_result)); + } else { + __ pop_call_clobbered_registers(); + } + __ leave(); + } + +public: + ZCopyRuntimeCallSpill(MacroAssembler* masm, Register result) + : _masm(masm), + _result(result) { + save(); + } + + ~ZCopyRuntimeCallSpill() { + restore(); + } +}; void ZBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, @@ -147,32 +421,137 @@ void ZBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, Register dst, Register count, RegSet saved_regs) { - if (!is_oop) { - // Barrier not needed +} + +static void copy_load_barrier(MacroAssembler* masm, + Register ref, + Address src, + Register tmp) { + Label done; + + __ ld(tmp, Address(xthread, ZThreadLocalData::load_bad_mask_offset())); + + // Test reference against bad mask. If mask bad, then we need to fix it up + __ andr(tmp, ref, tmp); + __ beqz(tmp, done); + + { + // Call VM + ZCopyRuntimeCallSpill rsc(masm, ref); + + __ la(c_rarg1, src); + + if (c_rarg0 != ref) { + __ mv(c_rarg0, ref); + } + + __ call_VM_leaf(ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(IN_HEAP | ON_STRONG_OOP_REF), 2); + } + + // Slow-path has uncolored; revert + __ slli(ref, ref, ZPointerLoadShift); + + __ bind(done); +} + +static void copy_store_barrier(MacroAssembler* masm, + Register pre_ref, + Register new_ref, + Address src, + Register tmp1, + Register tmp2) { + Label done; + Label slow; + + // Test reference against bad mask. If mask bad, then we need to fix it up. + __ ld(tmp1, Address(xthread, ZThreadLocalData::store_bad_mask_offset())); + __ andr(tmp1, pre_ref, tmp1); + __ beqz(tmp1, done); + + store_barrier_buffer_add(masm, src, tmp1, tmp2, slow); + __ j(done); + + __ bind(slow); + { + // Call VM + ZCopyRuntimeCallSpill rcs(masm, noreg); + + __ la(c_rarg0, src); + + __ call_VM_leaf(ZBarrierSetRuntime::store_barrier_on_oop_field_without_healing_addr(), 1); + } + + __ bind(done); + + if (new_ref != noreg) { + // Set store-good color, replacing whatever color was there before + __ ld(tmp1, Address(xthread, ZThreadLocalData::store_good_mask_offset())); + __ srli(new_ref, new_ref, 16); + __ slli(new_ref, new_ref, 16); + __ orr(new_ref, new_ref, tmp1); + } +} + +void ZBarrierSetAssembler::copy_load_at(MacroAssembler* masm, + DecoratorSet decorators, + BasicType type, + size_t bytes, + Register dst, + Address src, + Register tmp) { + if (!is_reference_type(type)) { + BarrierSetAssembler::copy_load_at(masm, decorators, type, bytes, dst, src, noreg); return; } - BLOCK_COMMENT("ZBarrierSetAssembler::arraycopy_prologue {"); + BarrierSetAssembler::copy_load_at(masm, decorators, type, bytes, dst, src, noreg); - assert_different_registers(src, count, t0); + assert(bytes == 8, "unsupported copy step"); + copy_load_barrier(masm, dst, src, tmp); - __ push_reg(saved_regs, sp); + if ((decorators & ARRAYCOPY_CHECKCAST) != 0) { + __ srli(dst, dst, ZPointerLoadShift); + } +} - if (count == c_rarg0 && src == c_rarg1) { - // exactly backwards!! - __ xorr(c_rarg0, c_rarg0, c_rarg1); - __ xorr(c_rarg1, c_rarg0, c_rarg1); - __ xorr(c_rarg0, c_rarg0, c_rarg1); - } else { - __ mv(c_rarg0, src); - __ mv(c_rarg1, count); +void ZBarrierSetAssembler::copy_store_at(MacroAssembler* masm, + DecoratorSet decorators, + BasicType type, + size_t bytes, + Address dst, + Register src, + Register tmp1, + Register tmp2, + Register tmp3) { + if (!is_reference_type(type)) { + BarrierSetAssembler::copy_store_at(masm, decorators, type, bytes, dst, src, noreg, noreg, noreg); + return; + } + + if ((decorators & ARRAYCOPY_CHECKCAST) != 0) { + __ slli(src, src, ZPointerLoadShift); } - __ call_VM_leaf(ZBarrierSetRuntime::load_barrier_on_oop_array_addr(), 2); + bool is_dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0; + + assert(bytes == 8, "unsupported copy step"); + if (is_dest_uninitialized) { + __ ld(tmp1, Address(xthread, ZThreadLocalData::store_good_mask_offset())); + __ srli(src, src, 16); + __ slli(src, src, 16); + __ orr(src, src, tmp1); + } else { + // Store barrier pre values and color new values + __ ld(tmp1, dst); + copy_store_barrier(masm, tmp1, src, dst, tmp2, tmp3); + } - __ pop_reg(saved_regs, sp); + // Store new values + BarrierSetAssembler::copy_store_at(masm, decorators, type, bytes, dst, src, noreg, noreg, noreg); +} - BLOCK_COMMENT("} ZBarrierSetAssembler::arraycopy_prologue"); +bool ZBarrierSetAssembler::supports_rvv_arraycopy() { + return false; } void ZBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, @@ -182,25 +561,86 @@ void ZBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, Label& slowpath) { BLOCK_COMMENT("ZBarrierSetAssembler::try_resolve_jobject_in_native {"); - assert_different_registers(jni_env, robj, tmp); + Label done, tagged, weak_tagged, uncolor; + + // Test for tag + __ andi(tmp, robj, JNIHandles::tag_mask); + __ bnez(tmp, tagged); - // Resolve jobject - BarrierSetAssembler::try_resolve_jobject_in_native(masm, jni_env, robj, tmp, slowpath); + // Resolve local handle + __ ld(robj, robj); + __ j(done); - // Compute the offset of address bad mask from the field of jni_environment - long int bad_mask_relative_offset = (long int) (in_bytes(ZThreadLocalData::address_bad_mask_offset()) - - in_bytes(JavaThread::jni_environment_offset())); + __ bind(tagged); - // Load the address bad mask - __ ld(tmp, Address(jni_env, bad_mask_relative_offset)); + // Test for weak tag + __ andi(tmp, robj, JNIHandles::TypeTag::weak_global); + __ bnez(tmp, weak_tagged); - // Check address bad mask + // Resolve global handle + __ ld(robj, Address(robj, -JNIHandles::TypeTag::global)); + __ la(tmp, load_bad_mask_from_jni_env(jni_env)); + __ ld(tmp, tmp); __ andr(tmp, robj, tmp); __ bnez(tmp, slowpath); + __ j(uncolor); + + __ bind(weak_tagged); + + // Resolve weak handle + __ ld(robj, Address(robj, -JNIHandles::TypeTag::weak_global)); + __ la(tmp, mark_bad_mask_from_jni_env(jni_env)); + __ ld(tmp, tmp); + __ andr(tmp, robj, tmp); + __ bnez(tmp, slowpath); + + __ bind(uncolor); + + // Uncolor + __ srli(robj, robj, ZPointerLoadShift); + + __ bind(done); BLOCK_COMMENT("} ZBarrierSetAssembler::try_resolve_jobject_in_native"); } +static uint16_t patch_barrier_relocation_value(int format) { + switch (format) { + case ZBarrierRelocationFormatLoadBadMask: + return (uint16_t)ZPointerLoadBadMask; + case ZBarrierRelocationFormatMarkBadMask: + return (uint16_t)ZPointerMarkBadMask; + case ZBarrierRelocationFormatStoreGoodBits: + return (uint16_t)ZPointerStoreGoodMask; + case ZBarrierRelocationFormatStoreBadMask: + return (uint16_t)ZPointerStoreBadMask; + + default: + ShouldNotReachHere(); + return 0; + } +} + +void ZBarrierSetAssembler::patch_barrier_relocation(address addr, int format) { + const uint16_t value = patch_barrier_relocation_value(format); + + int bytes; + switch (format) { + case ZBarrierRelocationFormatLoadBadMask: + case ZBarrierRelocationFormatMarkBadMask: + case ZBarrierRelocationFormatStoreGoodBits: + case ZBarrierRelocationFormatStoreBadMask: + assert(NativeInstruction::is_li16u_at(addr), "invalide zgc barrier"); + bytes = MacroAssembler::pd_patch_instruction_size(addr, (address)(uintptr_t)value); + break; + default: + ShouldNotReachHere(); + } + + // A full fence is generated before icache_flush by default in invalidate_word + ICache::invalidate_range(addr, bytes); +} + #ifdef COMPILER2 OptoReg::Name ZBarrierSetAssembler::refine_register(const Node* node, OptoReg::Name opto_reg) { @@ -227,7 +667,7 @@ class ZSaveLiveRegisters { VectorRegSet _vp_regs; public: - void initialize(ZLoadBarrierStubC2* stub) { + void initialize(ZBarrierStubC2* stub) { // Record registers that needs to be saved/restored RegMaskIterator rmi(stub->live()); while (rmi.has_next()) { @@ -248,10 +688,14 @@ class ZSaveLiveRegisters { } // Remove C-ABI SOE registers, tmp regs and _ref register that will be updated - _gp_regs -= RegSet::range(x18, x27) + RegSet::of(x2) + RegSet::of(x8, x9) + RegSet::of(x5, stub->ref()); + if (stub->result() != noreg) { + _gp_regs -= RegSet::range(x18, x27) + RegSet::of(x2) + RegSet::of(x8, x9) + RegSet::of(x5, stub->result()); + } else { + _gp_regs -= RegSet::range(x18, x27) + RegSet::of(x2, x5) + RegSet::of(x8, x9); + } } - ZSaveLiveRegisters(MacroAssembler* masm, ZLoadBarrierStubC2* stub) : + ZSaveLiveRegisters(MacroAssembler* masm, ZBarrierStubC2* stub) : _masm(masm), _gp_regs(), _fp_regs(), @@ -333,35 +777,111 @@ void ZBarrierSetAssembler::generate_c2_load_barrier_stub(MacroAssembler* masm, Z BLOCK_COMMENT("ZLoadBarrierStubC2"); // Stub entry - __ bind(*stub->entry()); + if (!Compile::current()->output()->in_scratch_emit_size()) { + __ bind(*stub->entry()); + } { ZSaveLiveRegisters save_live_registers(masm, stub); ZSetupArguments setup_arguments(masm, stub); - - Address target(stub->slow_path()); - __ relocate(target.rspec(), [&] { - int32_t offset; - __ la_patchable(t0, target, offset); - __ jalr(x1, t0, offset); - }); + __ mv(t0, stub->slow_path()); + __ jalr(t0); } // Stub exit __ j(*stub->continuation()); } +void ZBarrierSetAssembler::generate_c2_store_barrier_stub(MacroAssembler* masm, ZStoreBarrierStubC2* stub) const { + BLOCK_COMMENT("ZStoreBarrierStubC2"); + + // Stub entry + __ bind(*stub->entry()); + + Label slow; + Label slow_continuation; + store_barrier_medium(masm, + stub->ref_addr(), + stub->new_zpointer(), + t1, + t0, + stub->is_native(), + stub->is_atomic(), + *stub->continuation(), + slow, + slow_continuation); + + __ bind(slow); + + { + ZSaveLiveRegisters save_live_registers(masm, stub); + __ la(c_rarg0, stub->ref_addr()); + + if (stub->is_native()) { + __ la(t0, RuntimeAddress(ZBarrierSetRuntime::store_barrier_on_native_oop_field_without_healing_addr())); + } else if (stub->is_atomic()) { + __ la(t0, RuntimeAddress(ZBarrierSetRuntime::store_barrier_on_oop_field_with_healing_addr())); + } else { + __ la(t0, RuntimeAddress(ZBarrierSetRuntime::store_barrier_on_oop_field_without_healing_addr())); + } + __ jalr(t0); + } + + // Stub exit + __ j(slow_continuation); +} + +#undef __ + #endif // COMPILER2 #ifdef COMPILER1 #undef __ #define __ ce->masm()-> -void ZBarrierSetAssembler::generate_c1_load_barrier_test(LIR_Assembler* ce, - LIR_Opr ref) const { - assert_different_registers(xthread, ref->as_register(), t1); - __ ld(t1, address_bad_mask_from_thread(xthread)); - __ andr(t1, t1, ref->as_register()); +static void z_color(LIR_Assembler* ce, LIR_Opr ref) { + __ relocate(barrier_Relocation::spec(), [&] { + __ li16u(t1, barrier_Relocation::unpatched); + }, ZBarrierRelocationFormatStoreGoodBits); + __ slli(ref->as_register(), ref->as_register(), ZPointerLoadShift); + __ orr(ref->as_register(), ref->as_register(), t1); +} + +static void z_uncolor(LIR_Assembler* ce, LIR_Opr ref) { + __ srli(ref->as_register(), ref->as_register(), ZPointerLoadShift); +} + +static void check_color(LIR_Assembler* ce, LIR_Opr ref, bool on_non_strong) { + assert_different_registers(t0, xthread, ref->as_register()); + int format = on_non_strong ? ZBarrierRelocationFormatMarkBadMask + : ZBarrierRelocationFormatLoadBadMask; + Label good; + __ relocate(barrier_Relocation::spec(), [&] { + __ li16u(t0, barrier_Relocation::unpatched); + }, format); + __ andr(t0, ref->as_register(), t0); +} + +void ZBarrierSetAssembler::generate_c1_color(LIR_Assembler* ce, LIR_Opr ref) const { + z_color(ce, ref); +} + +void ZBarrierSetAssembler::generate_c1_uncolor(LIR_Assembler* ce, LIR_Opr ref) const { + z_uncolor(ce, ref); +} + +void ZBarrierSetAssembler::generate_c1_load_barrier(LIR_Assembler* ce, + LIR_Opr ref, + ZLoadBarrierStubC1* stub, + bool on_non_strong) const { + Label good; + check_color(ce, ref, on_non_strong); + __ beqz(t0, good); + __ j(*stub->entry()); + + __ bind(good); + z_uncolor(ce, ref); + __ bind(*stub->continuation()); } void ZBarrierSetAssembler::generate_c1_load_barrier_stub(LIR_Assembler* ce, @@ -382,42 +902,41 @@ void ZBarrierSetAssembler::generate_c1_load_barrier_stub(LIR_Assembler* ce, ref_addr = stub->ref_addr()->as_address_ptr()->base()->as_pointer_register(); } - assert_different_registers(ref, ref_addr, noreg); - - // Save x10 unless it is the result or tmp register - // Set up SP to accommodate parameters and maybe x10. - if (ref != x10 && tmp != x10) { - __ sub(sp, sp, 32); - __ sd(x10, Address(sp, 16)); - } else { - __ sub(sp, sp, 16); - } - - // Setup arguments and call runtime stub - ce->store_parameter(ref_addr, 1); - ce->store_parameter(ref, 0); - - __ far_call(stub->runtime_stub()); - - // Verify result - __ verify_oop(x10); - - - // Move result into place - if (ref != x10) { - __ mv(ref, x10); - } - - // Restore x10 unless it is the result or tmp register - if (ref != x10 && tmp != x10) { - __ ld(x10, Address(sp, 16)); - __ add(sp, sp, 32); - } else { - __ add(sp, sp, 16); - } - - // Stub exit - __ j(*stub->continuation()); + assert_different_registers(ref, ref_addr, noreg); + + // Save x10 unless it is the result or tmp register + // Set up SP to accommdate parameters and maybe x10. + if (ref != x10 && tmp != x10) { + __ sub(sp, sp, 32); + __ sd(x10, Address(sp, 16)); + } else { + __ sub(sp, sp, 16); + } + + // Setup arguments and call runtime stub + ce->store_parameter(ref_addr, 1); + ce->store_parameter(ref, 0); + + __ far_call(stub->runtime_stub()); + + // Verify result + __ verify_oop(x10); + + // Move result into place + if (ref != x10) { + __ mv(ref, x10); + } + + // Restore x10 unless it is the result or tmp register + if (ref != x10 && tmp != x10) { + __ ld(x10, Address(sp, 16)); + __ addi(sp, sp, 32); + } else { + __ addi(sp, sp, 16); + } + + // Stub exit + __ j(*stub->continuation()); } #undef __ @@ -440,19 +959,131 @@ void ZBarrierSetAssembler::generate_c1_load_barrier_runtime_stub(StubAssembler* __ epilogue(); } +void ZBarrierSetAssembler::generate_c1_store_barrier_runtime_stub(StubAssembler* sasm, + bool self_healing) const { + __ prologue("zgc_store_barrier stub", false); + + __ push_call_clobbered_registers(); + + // Setup arguments + __ load_parameter(0, c_rarg0); + + if (self_healing) { + __ call_VM_leaf(ZBarrierSetRuntime::store_barrier_on_oop_field_with_healing_addr(), 1); + } else { + __ call_VM_leaf(ZBarrierSetRuntime::store_barrier_on_oop_field_without_healing_addr(), 1); + } + + __ pop_call_clobbered_registers(); + + __ epilogue(); +} + +#undef __ +#define __ ce->masm()-> + +void ZBarrierSetAssembler::generate_c1_store_barrier(LIR_Assembler* ce, + LIR_Address* addr, + LIR_Opr new_zaddress, + LIR_Opr new_zpointer, + ZStoreBarrierStubC1* stub) const { + Register rnew_zaddress = new_zaddress->as_register(); + Register rnew_zpointer = new_zpointer->as_register(); + + store_barrier_fast(ce->masm(), + ce->as_Address(addr), + rnew_zaddress, + rnew_zpointer, + t1, + true, + stub->is_atomic(), + *stub->entry(), + *stub->continuation()); +} + +void ZBarrierSetAssembler::generate_c1_store_barrier_stub(LIR_Assembler* ce, + ZStoreBarrierStubC1* stub) const { + // Stub entry + __ bind(*stub->entry()); + Label slow; + Label slow_continuation; + store_barrier_medium(ce->masm(), + ce->as_Address(stub->ref_addr()->as_address_ptr()), + t1, + stub->new_zpointer()->as_register(), + stub->tmp()->as_pointer_register(), + false /* is_native */, + stub->is_atomic(), + *stub->continuation(), + slow, + slow_continuation); + + __ bind(slow); + + __ la(stub->new_zpointer()->as_register(), ce->as_Address(stub->ref_addr()->as_address_ptr())); + + __ sub(sp, sp, 16); + //Setup arguments and call runtime stub + assert(stub->new_zpointer()->is_valid(), "invariant"); + ce->store_parameter(stub->new_zpointer()->as_register(), 0); + __ far_call(stub->runtime_stub()); + __ addi(sp, sp, 16); + + // Stub exit + __ j(slow_continuation); +} + +#undef __ + #endif // COMPILER1 #undef __ #define __ masm-> void ZBarrierSetAssembler::check_oop(MacroAssembler* masm, Register obj, Register tmp1, Register tmp2, Label& error) { - // Check if mask is good. - // verifies that ZAddressBadMask & obj == 0 - __ ld(tmp2, Address(xthread, ZThreadLocalData::address_bad_mask_offset())); - __ andr(tmp1, obj, tmp2); - __ bnez(tmp1, error); + // C1 calls verify_oop in the middle of barriers, before they have been uncolored + // and after being colored. Therefore, we must deal with colored oops as well. + Label done; + Label check_oop; + Label check_zaddress; + int color_bits = ZPointerRemappedShift + ZPointerRemappedBits; + + uintptr_t shifted_base_start_mask = (UCONST64(1) << (ZAddressHeapBaseShift + color_bits + 1)) - 1; + uintptr_t shifted_base_end_mask = (UCONST64(1) << (ZAddressHeapBaseShift + 1)) - 1; + uintptr_t shifted_base_mask = shifted_base_start_mask ^ shifted_base_end_mask; + + uintptr_t shifted_address_end_mask = (UCONST64(1) << (color_bits + 1)) - 1; + uintptr_t shifted_address_mask = shifted_base_end_mask ^ (uintptr_t)CONST64(-1); + + // Check colored null + __ mv(tmp1, shifted_address_mask); + __ andr(tmp1, tmp1, obj); + __ beqz(tmp1, done); + + // Check for zpointer + __ mv(tmp1, shifted_base_mask); + __ andr(tmp1, tmp1, obj); + __ beqz(tmp1, check_oop); - BarrierSetAssembler::check_oop(masm, obj, tmp1, tmp2, error); + // Uncolor presumed zpointer + __ srli(obj, obj, ZPointerLoadShift); + + __ j(check_zaddress); + + __ bind(check_oop); + + // Make sure klass is 'reasonable', which is not zero + __ load_klass(tmp1, obj, tmp2); + __ beqz(tmp1, error); + + __ bind(check_zaddress); + // Check if the oop is the right area of memory + __ mv(tmp1, (intptr_t) Universe::verify_oop_mask()); + __ andr(tmp1, tmp1, obj); + __ mv(obj, (intptr_t) Universe::verify_oop_bits()); + __ bne(tmp1, obj, error); + + __ bind(done); } #undef __ diff --git a/src/hotspot/cpu/riscv/gc/z/zBarrierSetAssembler_riscv.hpp b/src/hotspot/cpu/riscv/gc/z/zBarrierSetAssembler_riscv.hpp index c7f29684a6499..4597949d189ca 100644 --- a/src/hotspot/cpu/riscv/gc/z/zBarrierSetAssembler_riscv.hpp +++ b/src/hotspot/cpu/riscv/gc/z/zBarrierSetAssembler_riscv.hpp @@ -1,6 +1,6 @@ /* - * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved. + * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,22 +28,33 @@ #include "code/vmreg.hpp" #include "oops/accessDecorators.hpp" +#ifdef COMPILER1 +#include "c1/c1_LIR.hpp" +#endif // COMPILER1 #ifdef COMPILER2 +#include "gc/z/c2/zBarrierSetC2.hpp" #include "opto/optoreg.hpp" #endif // COMPILER2 #ifdef COMPILER1 +class LIR_Address; class LIR_Assembler; class LIR_Opr; class StubAssembler; class ZLoadBarrierStubC1; +class ZStoreBarrierStubC1; #endif // COMPILER1 #ifdef COMPILER2 +class MachNode; class Node; -class ZLoadBarrierStubC2; #endif // COMPILER2 +const int ZBarrierRelocationFormatLoadBadMask = 0; +const int ZBarrierRelocationFormatMarkBadMask = 1; +const int ZBarrierRelocationFormatStoreGoodBits = 2; +const int ZBarrierRelocationFormatStoreBadMask = 3; + class ZBarrierSetAssembler : public ZBarrierSetAssemblerBase { public: virtual void load_at(MacroAssembler* masm, @@ -54,7 +65,27 @@ class ZBarrierSetAssembler : public ZBarrierSetAssemblerBase { Register tmp1, Register tmp2); -#ifdef ASSERT + void store_barrier_fast(MacroAssembler* masm, + Address ref_addr, + Register rnew_zaddress, + Register rnew_zpointer, + Register rtmp, + bool in_nmethod, + bool is_atomic, + Label& medium_path, + Label& medium_path_continuation) const; + + void store_barrier_medium(MacroAssembler* masm, + Address ref_addr, + Register rtmp1, + Register rtmp2, + Register rtmp3, + bool is_native, + bool is_atomic, + Label& medium_path_continuation, + Label& slow_path, + Label& slow_path_continuation) const; + virtual void store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, @@ -63,7 +94,6 @@ class ZBarrierSetAssembler : public ZBarrierSetAssemblerBase { Register tmp1, Register tmp2, Register tmp3); -#endif // ASSERT virtual void arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, @@ -73,23 +103,66 @@ class ZBarrierSetAssembler : public ZBarrierSetAssemblerBase { Register count, RegSet saved_regs); + virtual void copy_load_at(MacroAssembler* masm, + DecoratorSet decorators, + BasicType type, + size_t bytes, + Register dst, + Address src, + Register tmp); + + virtual void copy_store_at(MacroAssembler* masm, + DecoratorSet decorators, + BasicType type, + size_t bytes, + Address dst, + Register src, + Register tmp1, + Register tmp2, + Register tmp3); + + virtual bool supports_rvv_arraycopy(); + virtual void try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env, Register robj, Register tmp, Label& slowpath); - virtual NMethodPatchingType nmethod_patching_type() { return NMethodPatchingType::conc_data_patch; } + virtual NMethodPatchingType nmethod_patching_type() { return NMethodPatchingType::conc_instruction_and_data_patch; } + + void patch_barrier_relocation(address addr, int format); + + void patch_barriers() {} #ifdef COMPILER1 + void generate_c1_color(LIR_Assembler* ce, LIR_Opr ref) const; + void generate_c1_uncolor(LIR_Assembler* ce, LIR_Opr ref) const; + void generate_c1_load_barrier_test(LIR_Assembler* ce, LIR_Opr ref) const; + void generate_c1_load_barrier(LIR_Assembler* ce, + LIR_Opr ref, + ZLoadBarrierStubC1* stub, + bool on_non_strong) const; void generate_c1_load_barrier_stub(LIR_Assembler* ce, ZLoadBarrierStubC1* stub) const; void generate_c1_load_barrier_runtime_stub(StubAssembler* sasm, DecoratorSet decorators) const; + + void generate_c1_store_barrier(LIR_Assembler* ce, + LIR_Address* addr, + LIR_Opr new_zaddress, + LIR_Opr new_zpointer, + ZStoreBarrierStubC1* stub) const; + + void generate_c1_store_barrier_stub(LIR_Assembler* ce, + ZStoreBarrierStubC1* stub) const; + + void generate_c1_store_barrier_runtime_stub(StubAssembler* sasm, + bool self_healing) const; #endif // COMPILER1 #ifdef COMPILER2 @@ -98,6 +171,8 @@ class ZBarrierSetAssembler : public ZBarrierSetAssemblerBase { void generate_c2_load_barrier_stub(MacroAssembler* masm, ZLoadBarrierStubC2* stub) const; + void generate_c2_store_barrier_stub(MacroAssembler* masm, + ZStoreBarrierStubC2* stub) const; #endif // COMPILER2 void check_oop(MacroAssembler* masm, Register obj, Register tmp1, Register tmp2, Label& error); diff --git a/src/hotspot/cpu/riscv/gc/z/zGlobals_riscv.hpp b/src/hotspot/cpu/riscv/gc/z/zGlobals_riscv.hpp index bd72753b88229..3f90ef07f27cf 100644 --- a/src/hotspot/cpu/riscv/gc/z/zGlobals_riscv.hpp +++ b/src/hotspot/cpu/riscv/gc/z/zGlobals_riscv.hpp @@ -1,6 +1,6 @@ /* - * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved. + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,10 +26,8 @@ #ifndef CPU_RISCV_GC_Z_ZGLOBALS_RISCV_HPP #define CPU_RISCV_GC_Z_ZGLOBALS_RISCV_HPP -const size_t ZPlatformHeapViews = 3; -const size_t ZPlatformCacheLineSize = 64; +#include "utilities/globalDefinitions.hpp" -size_t ZPlatformAddressOffsetBits(); -size_t ZPlatformAddressMetadataShift(); +const size_t ZPlatformCacheLineSize = 64; #endif // CPU_RISCV_GC_Z_ZGLOBALS_RISCV_HPP diff --git a/src/hotspot/cpu/riscv/gc/z/z_riscv64.ad b/src/hotspot/cpu/riscv/gc/z/z_riscv64.ad index 6b6f87814a56e..1429f4ad52043 100644 --- a/src/hotspot/cpu/riscv/gc/z/z_riscv64.ad +++ b/src/hotspot/cpu/riscv/gc/z/z_riscv64.ad @@ -1,6 +1,6 @@ // -// Copyright (c) 2019, 2021, Oracle and/or its affiliates. All rights reserved. -// Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved. +// Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved. +// Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved. // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. // // This code is free software; you can redistribute it and/or modify it @@ -31,31 +31,69 @@ source_hpp %{ %} source %{ +#include "gc/z/zBarrierSetAssembler.hpp" -static void z_load_barrier(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref, Register tmp, int barrier_data) { - if (barrier_data == ZLoadBarrierElided) { +static void z_color(MacroAssembler& _masm, const MachNode* node, Register dst, Register src, Register tmp) { + assert_different_registers(dst, tmp); + + __ relocate(barrier_Relocation::spec(), [&] { + __ li16u(tmp, barrier_Relocation::unpatched); + }, ZBarrierRelocationFormatStoreGoodBits); + __ slli(dst, src, ZPointerLoadShift); + __ orr(dst, dst, tmp); +} + +static void z_uncolor(MacroAssembler& _masm, const MachNode* node, Register ref) { + __ srli(ref, ref, ZPointerLoadShift); +} + +static void check_color(MacroAssembler& _masm, Register ref, bool on_non_strong, Register result) { + int format = on_non_strong ? ZBarrierRelocationFormatMarkBadMask + : ZBarrierRelocationFormatLoadBadMask; + __ relocate(barrier_Relocation::spec(), [&] { + __ li16u(result, barrier_Relocation::unpatched); + }, format); + __ andr(result, ref, result); +} + +static void z_load_barrier(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref, Register tmp) { + const bool on_non_strong = + ((node->barrier_data() & ZBarrierWeak) != 0) || + ((node->barrier_data() & ZBarrierPhantom) != 0); + + if (node->barrier_data() == ZBarrierElided) { + z_uncolor(_masm, node, ref); return; } - ZLoadBarrierStubC2* const stub = ZLoadBarrierStubC2::create(node, ref_addr, ref, tmp, barrier_data); - __ ld(tmp, Address(xthread, ZThreadLocalData::address_bad_mask_offset())); - __ andr(tmp, tmp, ref); - __ bnez(tmp, *stub->entry(), true /* far */); - __ bind(*stub->continuation()); -} -static void z_load_barrier_slow_path(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref, Register tmp) { - ZLoadBarrierStubC2* const stub = ZLoadBarrierStubC2::create(node, ref_addr, ref, tmp, ZLoadBarrierStrong); + ZLoadBarrierStubC2* const stub = ZLoadBarrierStubC2::create(node, ref_addr, ref); + Label good; + check_color(_masm, ref, on_non_strong, tmp); + __ beqz(tmp, good); __ j(*stub->entry()); + + __ bind(good); + z_uncolor(_masm, node, ref); __ bind(*stub->continuation()); } +static void z_store_barrier(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register rnew_zaddress, Register rnew_zpointer, Register tmp, bool is_atomic) { + if (node->barrier_data() == ZBarrierElided) { + z_color(_masm, node, rnew_zpointer, rnew_zaddress, t0); + } else { + bool is_native = (node->barrier_data() & ZBarrierNative) != 0; + ZStoreBarrierStubC2* const stub = ZStoreBarrierStubC2::create(node, ref_addr, rnew_zaddress, rnew_zpointer, is_native, is_atomic); + ZBarrierSetAssembler* bs_asm = ZBarrierSet::assembler(); + bs_asm->store_barrier_fast(&_masm, ref_addr, rnew_zaddress, rnew_zpointer, tmp, true /* in_nmethod */, is_atomic, *stub->entry(), *stub->continuation()); + } +} %} // Load Pointer instruct zLoadP(iRegPNoSp dst, memory mem) %{ match(Set dst (LoadP mem)); - predicate(UseZGC && (n->as_Load()->barrier_data() != 0)); + predicate(UseZGC && ZGenerational && n->as_Load()->barrier_data() != 0); effect(TEMP dst); ins_cost(4 * DEFAULT_COST); @@ -63,19 +101,36 @@ instruct zLoadP(iRegPNoSp dst, memory mem) format %{ "ld $dst, $mem, #@zLoadP" %} ins_encode %{ - const Address ref_addr (as_Register($mem$$base), $mem$$disp); + const Address ref_addr(as_Register($mem$$base), $mem$$disp); __ ld($dst$$Register, ref_addr); - z_load_barrier(_masm, this, ref_addr, $dst$$Register, t0 /* tmp */, barrier_data()); + z_load_barrier(_masm, this, ref_addr, $dst$$Register, t0); %} ins_pipe(iload_reg_mem); %} -instruct zCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{ +// Store Pointer +instruct zStoreP(memory mem, iRegP src, iRegPNoSp tmp, rFlagsReg cr) +%{ + predicate(UseZGC && ZGenerational && n->as_Store()->barrier_data() != 0); + match(Set mem (StoreP mem src)); + effect(TEMP tmp, KILL cr); + + ins_cost(125); // XXX + format %{ "sd $mem, $src\t# ptr" %} + ins_encode %{ + const Address ref_addr(as_Register($mem$$base), $mem$$disp); + z_store_barrier(_masm, this, ref_addr, $src$$Register, $tmp$$Register, t1, false /* is_atomic */); + __ sd($tmp$$Register, ref_addr); + %} + ins_pipe(pipe_serial); +%} + +instruct zCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp oldval_tmp, iRegPNoSp newval_tmp, rFlagsReg cr) %{ match(Set res (CompareAndSwapP mem (Binary oldval newval))); match(Set res (WeakCompareAndSwapP mem (Binary oldval newval))); - predicate(UseZGC && !needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong); - effect(KILL cr, TEMP_DEF res); + predicate(UseZGC && ZGenerational && !needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() != 0); + effect(TEMP oldval_tmp, TEMP newval_tmp, KILL cr, TEMP_DEF res); ins_cost(2 * VOLATILE_REF_COST); @@ -83,35 +138,21 @@ instruct zCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newva "mv $res, $res == $oldval" %} ins_encode %{ - Label failed; - guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding"); - __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::int64, - Assembler::relaxed /* acquire */, Assembler::rl /* release */, $res$$Register, - true /* result_as_bool */); - __ beqz($res$$Register, failed); - __ mv(t0, $oldval$$Register); - __ bind(failed); - if (barrier_data() != ZLoadBarrierElided) { - Label good; - __ ld(t1, Address(xthread, ZThreadLocalData::address_bad_mask_offset()), t1 /* tmp */); - __ andr(t1, t1, t0); - __ beqz(t1, good); - z_load_barrier_slow_path(_masm, this, Address($mem$$Register), t0 /* ref */, t1 /* tmp */); - __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::int64, - Assembler::relaxed /* acquire */, Assembler::rl /* release */, $res$$Register, - true /* result_as_bool */); - __ bind(good); - } + guarantee($mem$$disp == 0, "impossible encoding"); + Address ref_addr($mem$$Register); + z_color(_masm, this, $oldval_tmp$$Register, $oldval$$Register, t0); + z_store_barrier(_masm, this, ref_addr, $newval$$Register, $newval_tmp$$Register, t1, true /* is_atomic */); + __ cmpxchg($mem$$Register, $oldval_tmp$$Register, $newval_tmp$$Register, Assembler::int64, Assembler::relaxed /* acquire */, Assembler::rl /* release */, $res$$Register, true /* result_as_bool */); %} ins_pipe(pipe_slow); %} -instruct zCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{ +instruct zCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp oldval_tmp, iRegPNoSp newval_tmp, rFlagsReg cr) %{ match(Set res (CompareAndSwapP mem (Binary oldval newval))); match(Set res (WeakCompareAndSwapP mem (Binary oldval newval))); - predicate(UseZGC && needs_acquiring_load_reserved(n) && (n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong)); - effect(KILL cr, TEMP_DEF res); + predicate(UseZGC && ZGenerational && needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() != 0); + effect(TEMP oldval_tmp, TEMP newval_tmp, KILL cr, TEMP_DEF res); ins_cost(2 * VOLATILE_REF_COST); @@ -119,81 +160,53 @@ instruct zCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP ne "mv $res, $res == $oldval" %} ins_encode %{ - Label failed; - guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding"); - __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::int64, - Assembler::aq /* acquire */, Assembler::rl /* release */, $res$$Register, - true /* result_as_bool */); - __ beqz($res$$Register, failed); - __ mv(t0, $oldval$$Register); - __ bind(failed); - if (barrier_data() != ZLoadBarrierElided) { - Label good; - __ ld(t1, Address(xthread, ZThreadLocalData::address_bad_mask_offset()), t1 /* tmp */); - __ andr(t1, t1, t0); - __ beqz(t1, good); - z_load_barrier_slow_path(_masm, this, Address($mem$$Register), t0 /* ref */, t1 /* tmp */); - __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::int64, - Assembler::aq /* acquire */, Assembler::rl /* release */, $res$$Register, - true /* result_as_bool */); - __ bind(good); - } + guarantee($mem$$disp == 0, "impossible encoding"); + Address ref_addr($mem$$Register); + z_color(_masm, this, $oldval_tmp$$Register, $oldval$$Register, t0); + z_store_barrier(_masm, this, ref_addr, $newval$$Register, $newval_tmp$$Register, t1, true /* is_atomic */); + __ cmpxchg($mem$$Register, $oldval_tmp$$Register, $newval_tmp$$Register, Assembler::int64, Assembler::aq /* acquire */, Assembler::rl /* release */, $res$$Register, true /* result_as_bool */); %} ins_pipe(pipe_slow); %} -instruct zCompareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval) %{ +instruct zCompareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp oldval_tmp, iRegPNoSp newval_tmp, rFlagsReg cr) %{ match(Set res (CompareAndExchangeP mem (Binary oldval newval))); - predicate(UseZGC && !needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong); - effect(TEMP_DEF res); + predicate(UseZGC && ZGenerational && !needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() != 0); + effect(TEMP oldval_tmp, TEMP newval_tmp, KILL cr, TEMP_DEF res); ins_cost(2 * VOLATILE_REF_COST); format %{ "cmpxchg $res = $mem, $oldval, $newval, #@zCompareAndExchangeP" %} ins_encode %{ - guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding"); - __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::int64, - Assembler::relaxed /* acquire */, Assembler::rl /* release */, $res$$Register); - if (barrier_data() != ZLoadBarrierElided) { - Label good; - __ ld(t0, Address(xthread, ZThreadLocalData::address_bad_mask_offset())); - __ andr(t0, t0, $res$$Register); - __ beqz(t0, good); - z_load_barrier_slow_path(_masm, this, Address($mem$$Register), $res$$Register /* ref */, t0 /* tmp */); - __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::int64, - Assembler::relaxed /* acquire */, Assembler::rl /* release */, $res$$Register); - __ bind(good); - } + guarantee($mem$$disp == 0, "impossible encoding"); + Address ref_addr($mem$$Register); + z_color(_masm, this, $oldval_tmp$$Register, $oldval$$Register, t0); + z_store_barrier(_masm, this, ref_addr, $newval$$Register, $newval_tmp$$Register, t1, true /* is_atomic */); + __ cmpxchg($mem$$Register, $oldval_tmp$$Register, $newval_tmp$$Register, Assembler::int64, Assembler::relaxed /* acquire */, Assembler::rl /* release */, $res$$Register); + z_uncolor(_masm, this, $res$$Register); %} ins_pipe(pipe_slow); %} -instruct zCompareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval) %{ +instruct zCompareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp oldval_tmp, iRegPNoSp newval_tmp, rFlagsReg cr) %{ match(Set res (CompareAndExchangeP mem (Binary oldval newval))); - predicate(UseZGC && needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong); - effect(TEMP_DEF res); + predicate(UseZGC && ZGenerational && needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() != 0); + effect(TEMP oldval_tmp, TEMP newval_tmp, KILL cr, TEMP_DEF res); ins_cost(2 * VOLATILE_REF_COST); format %{ "cmpxchg $res = $mem, $oldval, $newval, #@zCompareAndExchangePAcq" %} ins_encode %{ - guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding"); - __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::int64, - Assembler::aq /* acquire */, Assembler::rl /* release */, $res$$Register); - if (barrier_data() != ZLoadBarrierElided) { - Label good; - __ ld(t0, Address(xthread, ZThreadLocalData::address_bad_mask_offset())); - __ andr(t0, t0, $res$$Register); - __ beqz(t0, good); - z_load_barrier_slow_path(_masm, this, Address($mem$$Register), $res$$Register /* ref */, t0 /* tmp */); - __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::int64, - Assembler::aq /* acquire */, Assembler::rl /* release */, $res$$Register); - __ bind(good); - } + guarantee($mem$$disp == 0, "impossible encoding"); + Address ref_addr($mem$$Register); + z_color(_masm, this, $oldval_tmp$$Register, $oldval$$Register, t0); + z_store_barrier(_masm, this, ref_addr, $newval$$Register, $newval_tmp$$Register, t1, true /* is_atomic */); + __ cmpxchg($mem$$Register, $oldval_tmp$$Register, $newval_tmp$$Register, Assembler::int64, Assembler::aq /* acquire */, Assembler::rl /* release */, $res$$Register); + z_uncolor(_masm, this, $res$$Register); %} ins_pipe(pipe_slow); @@ -201,7 +214,7 @@ instruct zCompareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iReg instruct zGetAndSetP(indirect mem, iRegP newv, iRegPNoSp prev, rFlagsReg cr) %{ match(Set prev (GetAndSetP mem newv)); - predicate(UseZGC && !needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() != 0); + predicate(UseZGC && ZGenerational && !needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() != 0); effect(TEMP_DEF prev, KILL cr); ins_cost(2 * VOLATILE_REF_COST); @@ -209,8 +222,9 @@ instruct zGetAndSetP(indirect mem, iRegP newv, iRegPNoSp prev, rFlagsReg cr) %{ format %{ "atomic_xchg $prev, $newv, [$mem], #@zGetAndSetP" %} ins_encode %{ - __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base)); - z_load_barrier(_masm, this, Address(noreg, 0), $prev$$Register, t0 /* tmp */, barrier_data()); + z_store_barrier(_masm, this, Address($mem$$Register), $newv$$Register, $prev$$Register, t1, true /* is_atomic */); + __ atomic_xchg($prev$$Register, $prev$$Register, $mem$$Register); + z_uncolor(_masm, this, $prev$$Register); %} ins_pipe(pipe_serial); @@ -218,16 +232,17 @@ instruct zGetAndSetP(indirect mem, iRegP newv, iRegPNoSp prev, rFlagsReg cr) %{ instruct zGetAndSetPAcq(indirect mem, iRegP newv, iRegPNoSp prev, rFlagsReg cr) %{ match(Set prev (GetAndSetP mem newv)); - predicate(UseZGC && needs_acquiring_load_reserved(n) && (n->as_LoadStore()->barrier_data() != 0)); + predicate(UseZGC && ZGenerational && needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() != 0); effect(TEMP_DEF prev, KILL cr); - ins_cost(VOLATILE_REF_COST); + ins_cost(2 * VOLATILE_REF_COST); format %{ "atomic_xchg_acq $prev, $newv, [$mem], #@zGetAndSetPAcq" %} ins_encode %{ - __ atomic_xchgal($prev$$Register, $newv$$Register, as_Register($mem$$base)); - z_load_barrier(_masm, this, Address(noreg, 0), $prev$$Register, t0 /* tmp */, barrier_data()); + z_store_barrier(_masm, this, Address($mem$$Register), $newv$$Register, $prev$$Register, t1, true /* is_atomic */); + __ atomic_xchgal($prev$$Register, $prev$$Register, $mem$$Register); + z_uncolor(_masm, this, $prev$$Register); %} ins_pipe(pipe_serial); %} diff --git a/src/hotspot/cpu/riscv/globalDefinitions_riscv.hpp b/src/hotspot/cpu/riscv/globalDefinitions_riscv.hpp index 504768c2409df..2a8cff71c55ac 100644 --- a/src/hotspot/cpu/riscv/globalDefinitions_riscv.hpp +++ b/src/hotspot/cpu/riscv/globalDefinitions_riscv.hpp @@ -47,8 +47,6 @@ const bool CCallingConventionRequiresIntsAsLongs = false; #define SUPPORT_RESERVED_STACK_AREA -#define COMPRESSED_CLASS_POINTERS_DEPENDS_ON_COMPRESSED_OOPS false - #define USE_POINTERS_TO_REGISTER_IMPL_ARRAY #define DEFAULT_CACHE_LINE_SIZE 64 diff --git a/src/hotspot/cpu/riscv/interp_masm_riscv.cpp b/src/hotspot/cpu/riscv/interp_masm_riscv.cpp index 093a762cc06d6..dffb3738048e9 100644 --- a/src/hotspot/cpu/riscv/interp_masm_riscv.cpp +++ b/src/hotspot/cpu/riscv/interp_masm_riscv.cpp @@ -922,7 +922,7 @@ void InterpreterMacroAssembler::unlock_object(Register lock_reg) bne(tmp1, obj_reg, slow_case); ld(header_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes())); - andi(t0, header_reg, markWord::monitor_value); + test_bit(t0, header_reg, exact_log2(markWord::monitor_value)); bnez(t0, slow_case); fast_unlock(obj_reg, header_reg, swap_reg, t0, slow_case); j(count); diff --git a/src/hotspot/cpu/riscv/macroAssembler_riscv.cpp b/src/hotspot/cpu/riscv/macroAssembler_riscv.cpp index c47cc1c967731..b04aebf80dd2a 100644 --- a/src/hotspot/cpu/riscv/macroAssembler_riscv.cpp +++ b/src/hotspot/cpu/riscv/macroAssembler_riscv.cpp @@ -563,8 +563,8 @@ void MacroAssembler::resolve_jobject(Register value, Register tmp1, Register tmp beqz(value, done); // Use null as-is. // Test for tag. - andi(t0, value, JNIHandles::tag_mask); - bnez(t0, tagged); + andi(tmp1, value, JNIHandles::tag_mask); + bnez(tmp1, tagged); // Resolve local handle access_load_at(T_OBJECT, IN_NATIVE | AS_RAW, value, Address(value, 0), tmp1, tmp2); @@ -573,12 +573,14 @@ void MacroAssembler::resolve_jobject(Register value, Register tmp1, Register tmp bind(tagged); // Test for jweak tag. - test_bit(t0, value, exact_log2(JNIHandles::TypeTag::weak_global)); - bnez(t0, weak_tagged); + STATIC_ASSERT(JNIHandles::TypeTag::weak_global == 0b1); + test_bit(tmp1, value, exact_log2(JNIHandles::TypeTag::weak_global)); + bnez(tmp1, weak_tagged); // Resolve global handle access_load_at(T_OBJECT, IN_NATIVE, value, Address(value, -JNIHandles::TypeTag::global), tmp1, tmp2); + verify_oop(value); j(done); bind(weak_tagged); @@ -598,9 +600,10 @@ void MacroAssembler::resolve_global_jobject(Register value, Register tmp1, Regis #ifdef ASSERT { + STATIC_ASSERT(JNIHandles::TypeTag::global == 0b10); Label valid_global_tag; - test_bit(t0, value, exact_log2(JNIHandles::TypeTag::global)); // Test for global tag. - bnez(t0, valid_global_tag); + test_bit(tmp1, value, exact_log2(JNIHandles::TypeTag::global)); // Test for global tag. + bnez(tmp1, valid_global_tag); stop("non global jobject using resolve_global_jobject"); bind(valid_global_tag); } @@ -755,6 +758,11 @@ void MacroAssembler::la(Register Rd, Label &label) { wrap_label(Rd, label, &MacroAssembler::la); } +void MacroAssembler::li16u(Register Rd, int32_t imm) { + lui(Rd, imm << 12); + srli(Rd, Rd, 12); +} + void MacroAssembler::li32(Register Rd, int32_t imm) { // int32_t is in range 0x8000 0000 ~ 0x7fff ffff, and imm[31] is the sign bit int64_t upper = imm, lower = imm; @@ -1404,6 +1412,11 @@ static int patch_imm_in_li64(address branch, address target) { return LI64_INSTRUCTIONS_NUM * NativeInstruction::instruction_size; } +static int patch_imm_in_li16u(address branch, int32_t target) { + Assembler::patch(branch, 31, 12, target & 0xfffff); // patch lui only + return NativeInstruction::instruction_size; +} + int MacroAssembler::patch_imm_in_li32(address branch, int32_t target) { const int LI32_INSTRUCTIONS_NUM = 2; // lui + addiw int64_t upper = (intptr_t)target; @@ -1493,6 +1506,9 @@ int MacroAssembler::pd_patch_instruction_size(address branch, address target) { } else if (NativeInstruction::is_li32_at(branch)) { // li32 int64_t imm = (intptr_t)target; return patch_imm_in_li32(branch, (int32_t)imm); + } else if (NativeInstruction::is_li16u_at(branch)) { + int64_t imm = (intptr_t)target; + return patch_imm_in_li16u(branch, (int32_t)imm); } else { #ifdef ASSERT tty->print_cr("pd_patch_instruction_size: instruction 0x%x at " INTPTR_FORMAT " could not be patched!\n", @@ -2426,6 +2442,10 @@ void MacroAssembler::safepoint_poll(Label& slow_path, bool at_return, bool acqui void MacroAssembler::cmpxchgptr(Register oldv, Register newv, Register addr, Register tmp, Label &succeed, Label *fail) { + assert_different_registers(addr, tmp); + assert_different_registers(newv, tmp); + assert_different_registers(oldv, tmp); + // oldv holds comparison value // newv holds value to write in exchange // addr identifies memory word to compare against/update @@ -2612,6 +2632,9 @@ void MacroAssembler::cmpxchg(Register addr, Register expected, Assembler::Aqrl acquire, Assembler::Aqrl release, Register result, bool result_as_bool) { assert(size != int8 && size != int16, "unsupported operand size"); + assert_different_registers(addr, t0); + assert_different_registers(expected, t0); + assert_different_registers(new_val, t0); Label retry_load, done, ne_done; bind(retry_load); @@ -2644,6 +2667,10 @@ void MacroAssembler::cmpxchg_weak(Register addr, Register expected, enum operand_size size, Assembler::Aqrl acquire, Assembler::Aqrl release, Register result) { + assert_different_registers(addr, t0); + assert_different_registers(expected, t0); + assert_different_registers(new_val, t0); + Label fail, done; load_reserved(addr, size, acquire); bne(t0, expected, fail); diff --git a/src/hotspot/cpu/riscv/macroAssembler_riscv.hpp b/src/hotspot/cpu/riscv/macroAssembler_riscv.hpp index e6a286d4ea3ad..143e71413f1c9 100644 --- a/src/hotspot/cpu/riscv/macroAssembler_riscv.hpp +++ b/src/hotspot/cpu/riscv/macroAssembler_riscv.hpp @@ -689,6 +689,7 @@ class MacroAssembler: public Assembler { void la(Register Rd, const address dest); void la(Register Rd, const Address &adr); + void li16u(Register Rd, int32_t imm); void li32(Register Rd, int32_t imm); void li64(Register Rd, int64_t imm); void li (Register Rd, int64_t imm); // optimized load immediate diff --git a/src/hotspot/cpu/riscv/nativeInst_riscv.cpp b/src/hotspot/cpu/riscv/nativeInst_riscv.cpp index a0a946fb5fec9..ee91ceae80938 100644 --- a/src/hotspot/cpu/riscv/nativeInst_riscv.cpp +++ b/src/hotspot/cpu/riscv/nativeInst_riscv.cpp @@ -1,7 +1,7 @@ /* * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved. - * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. + * Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -97,6 +97,12 @@ bool NativeInstruction::is_movptr_at(address instr) { check_movptr_data_dependency(instr); } +bool NativeInstruction::is_li16u_at(address instr) { + return is_lui_at(instr) && // lui + is_srli_at(instr + instruction_size) && // srli + check_li16u_data_dependency(instr); +} + bool NativeInstruction::is_li32_at(address instr) { return is_lui_at(instr) && // lui is_addiw_at(instr + instruction_size) && // addiw diff --git a/src/hotspot/cpu/riscv/nativeInst_riscv.hpp b/src/hotspot/cpu/riscv/nativeInst_riscv.hpp index 8534b873d9a40..df69b33c3640e 100644 --- a/src/hotspot/cpu/riscv/nativeInst_riscv.hpp +++ b/src/hotspot/cpu/riscv/nativeInst_riscv.hpp @@ -1,7 +1,7 @@ /* * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, 2018, Red Hat Inc. All rights reserved. - * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. + * Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -81,6 +81,14 @@ class NativeInstruction { static bool is_addiw_to_zr_at(address instr) { assert_cond(instr != nullptr); return is_addiw_at(instr) && extract_rd(instr) == zr; } static bool is_lui_at(address instr) { assert_cond(instr != nullptr); return extract_opcode(instr) == 0b0110111; } static bool is_lui_to_zr_at(address instr) { assert_cond(instr != nullptr); return is_lui_at(instr) && extract_rd(instr) == zr; } + + static bool is_srli_at(address instr) { + assert_cond(instr != nullptr); + return extract_opcode(instr) == 0b0010011 && + extract_funct3(instr) == 0b101 && + Assembler::extract(((unsigned*)instr)[0], 31, 26) == 0b000000; + } + static bool is_slli_shift_at(address instr, uint32_t shift) { assert_cond(instr != nullptr); return (extract_opcode(instr) == 0b0010011 && // opcode field @@ -153,6 +161,17 @@ class NativeInstruction { extract_rs1(addi4) == extract_rd(addi4); } + // the instruction sequence of li16u is as below: + // lui + // srli + static bool check_li16u_data_dependency(address instr) { + address lui = instr; + address srli = lui + instruction_size; + + return extract_rs1(srli) == extract_rd(lui) && + extract_rs1(srli) == extract_rd(srli); + } + // the instruction sequence of li32 is as below: // lui // addiw @@ -186,6 +205,7 @@ class NativeInstruction { } static bool is_movptr_at(address instr); + static bool is_li16u_at(address instr); static bool is_li32_at(address instr); static bool is_li64_at(address instr); static bool is_pc_relative_at(address branch); diff --git a/src/hotspot/cpu/riscv/relocInfo_riscv.hpp b/src/hotspot/cpu/riscv/relocInfo_riscv.hpp index 840ed935d88b7..8f2478849447d 100644 --- a/src/hotspot/cpu/riscv/relocInfo_riscv.hpp +++ b/src/hotspot/cpu/riscv/relocInfo_riscv.hpp @@ -1,6 +1,6 @@ /* - * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -32,7 +32,8 @@ // Relocations are byte-aligned. offset_unit = 1, // Must be at least 1 for RelocInfo::narrow_oop_in_const. - format_width = 1 + // Must be at least 2 for ZGC GC barrier patching. + format_width = 2 }; public: diff --git a/src/hotspot/cpu/riscv/riscv.ad b/src/hotspot/cpu/riscv/riscv.ad index ab2d15d459b5d..25b3786cf0712 100644 --- a/src/hotspot/cpu/riscv/riscv.ad +++ b/src/hotspot/cpu/riscv/riscv.ad @@ -2663,7 +2663,7 @@ encode %{ // If the owner is anonymous, we need to fix it -- in an outline stub. Register tmp2 = disp_hdr; __ ld(tmp2, Address(tmp, ObjectMonitor::owner_offset_in_bytes())); - __ andi(t0, tmp2, (int64_t)ObjectMonitor::ANONYMOUS_OWNER); + __ test_bit(t0, tmp2, exact_log2(ObjectMonitor::ANONYMOUS_OWNER)); C2HandleAnonOMOwnerStub* stub = new (Compile::current()->comp_arena()) C2HandleAnonOMOwnerStub(tmp, tmp2); Compile::current()->output()->add_stub(stub); __ bnez(t0, stub->entry(), /* is_far */ true); @@ -5338,15 +5338,15 @@ instruct storeN(iRegN src, memory mem) ins_pipe(istore_reg_mem); %} -instruct storeImmN0(iRegIHeapbase heapbase, immN0 zero, memory mem) +instruct storeImmN0(immN0 zero, memory mem) %{ match(Set mem (StoreN mem zero)); ins_cost(STORE_COST); - format %{ "sw rheapbase, $mem\t# compressed ptr (rheapbase==0), #@storeImmN0" %} + format %{ "sw zr, $mem\t# compressed ptr, #@storeImmN0" %} ins_encode %{ - __ sw(as_Register($heapbase$$reg), Address(as_Register($mem$$base), $mem$$disp)); + __ sw(zr, Address(as_Register($mem$$base), $mem$$disp)); %} ins_pipe(istore_reg_mem); @@ -10206,7 +10206,7 @@ instruct stringU_indexof_char(iRegP_R11 str1, iRegI_R12 cnt1, iRegI_R13 ch, effect(USE_KILL str1, USE_KILL cnt1, USE_KILL ch, TEMP_DEF result, TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr); - format %{ "StringUTF16 IndexOf char[] $str1,$cnt1,$ch -> $result" %} + format %{ "StringUTF16 IndexOf char[] $str1, $cnt1, $ch -> $result" %} ins_encode %{ __ string_indexof_char($str1$$Register, $cnt1$$Register, $ch$$Register, $result$$Register, $tmp1$$Register, $tmp2$$Register, @@ -10225,7 +10225,7 @@ instruct stringL_indexof_char(iRegP_R11 str1, iRegI_R12 cnt1, iRegI_R13 ch, effect(USE_KILL str1, USE_KILL cnt1, USE_KILL ch, TEMP_DEF result, TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr); - format %{ "StringUTF16 IndexOf char[] $str1,$cnt1,$ch -> $result" %} + format %{ "StringLatin1 IndexOf char[] $str1, $cnt1, $ch -> $result" %} ins_encode %{ __ string_indexof_char($str1$$Register, $cnt1$$Register, $ch$$Register, $result$$Register, $tmp1$$Register, $tmp2$$Register, diff --git a/src/hotspot/cpu/riscv/sharedRuntime_riscv.cpp b/src/hotspot/cpu/riscv/sharedRuntime_riscv.cpp index 643550f80a604..70fcde9445c45 100644 --- a/src/hotspot/cpu/riscv/sharedRuntime_riscv.cpp +++ b/src/hotspot/cpu/riscv/sharedRuntime_riscv.cpp @@ -1828,7 +1828,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm, } else { assert(LockingMode == LM_LIGHTWEIGHT, ""); __ ld(old_hdr, Address(obj_reg, oopDesc::mark_offset_in_bytes())); - __ andi(t0, old_hdr, markWord::monitor_value); + __ test_bit(t0, old_hdr, exact_log2(markWord::monitor_value)); __ bnez(t0, slow_path_unlock); __ fast_unlock(obj_reg, old_hdr, swap_reg, t0, slow_path_unlock); __ decrement(Address(xthread, JavaThread::held_monitor_count_offset())); diff --git a/src/hotspot/cpu/riscv/stubGenerator_riscv.cpp b/src/hotspot/cpu/riscv/stubGenerator_riscv.cpp index d1c76beb3f9a4..ab6c3e8feb00a 100644 --- a/src/hotspot/cpu/riscv/stubGenerator_riscv.cpp +++ b/src/hotspot/cpu/riscv/stubGenerator_riscv.cpp @@ -51,9 +51,6 @@ #ifdef COMPILER2 #include "opto/runtime.hpp" #endif -#if INCLUDE_ZGC -#include "gc/z/zThreadLocalData.hpp" -#endif // Declaration and definition of StubGenerator (no .hpp file). // For a more detailed description of the stub routine structure @@ -957,7 +954,11 @@ class StubGenerator: public StubCodeGenerator { Label same_aligned; Label copy_big, copy32_loop, copy8_loop, copy_small, done; - __ beqz(count, done); + // The size of copy32_loop body increases significantly with ZGC GC barriers. + // Need conditional far branches to reach a point beyond the loop in this case. + bool is_far = UseZGC && ZGenerational; + + __ beqz(count, done, is_far); __ slli(cnt, count, exact_log2(granularity)); if (is_backwards) { __ add(src, s, cnt); @@ -971,15 +972,15 @@ class StubGenerator: public StubCodeGenerator { __ addi(t0, cnt, -32); __ bgez(t0, copy32_loop); __ addi(t0, cnt, -8); - __ bgez(t0, copy8_loop); + __ bgez(t0, copy8_loop, is_far); __ j(copy_small); } else { __ mv(t0, 16); - __ blt(cnt, t0, copy_small); + __ blt(cnt, t0, copy_small, is_far); __ xorr(t0, src, dst); __ andi(t0, t0, 0b111); - __ bnez(t0, copy_small); + __ bnez(t0, copy_small, is_far); __ bind(same_aligned); __ andi(t0, src, 0b111); @@ -995,26 +996,27 @@ class StubGenerator: public StubCodeGenerator { __ addi(dst, dst, step); } __ addi(cnt, cnt, -granularity); - __ beqz(cnt, done); + __ beqz(cnt, done, is_far); __ j(same_aligned); __ bind(copy_big); __ mv(t0, 32); - __ blt(cnt, t0, copy8_loop); + __ blt(cnt, t0, copy8_loop, is_far); } + __ bind(copy32_loop); if (is_backwards) { __ addi(src, src, -wordSize * 4); __ addi(dst, dst, -wordSize * 4); } // we first load 32 bytes, then write it, so the direction here doesn't matter - bs_asm->copy_load_at(_masm, decorators, type, 8, tmp3, Address(src), gct1); - bs_asm->copy_load_at(_masm, decorators, type, 8, tmp4, Address(src, 8), gct1); + bs_asm->copy_load_at(_masm, decorators, type, 8, tmp3, Address(src), gct1); + bs_asm->copy_load_at(_masm, decorators, type, 8, tmp4, Address(src, 8), gct1); bs_asm->copy_load_at(_masm, decorators, type, 8, tmp5, Address(src, 16), gct1); bs_asm->copy_load_at(_masm, decorators, type, 8, tmp6, Address(src, 24), gct1); - bs_asm->copy_store_at(_masm, decorators, type, 8, Address(dst), tmp3, gct1, gct2, gct3); - bs_asm->copy_store_at(_masm, decorators, type, 8, Address(dst, 8), tmp4, gct1, gct2, gct3); + bs_asm->copy_store_at(_masm, decorators, type, 8, Address(dst), tmp3, gct1, gct2, gct3); + bs_asm->copy_store_at(_masm, decorators, type, 8, Address(dst, 8), tmp4, gct1, gct2, gct3); bs_asm->copy_store_at(_masm, decorators, type, 8, Address(dst, 16), tmp5, gct1, gct2, gct3); bs_asm->copy_store_at(_masm, decorators, type, 8, Address(dst, 24), tmp6, gct1, gct2, gct3); @@ -3731,7 +3733,7 @@ class StubGenerator: public StubCodeGenerator { framesize // inclusive of return address }; - const int insts_size = 512; + const int insts_size = 1024; const int locs_size = 64; CodeBuffer code(name, insts_size, locs_size); @@ -3970,7 +3972,7 @@ class StubGenerator: public StubCodeGenerator { framesize // inclusive of return address }; - int insts_size = 512; + int insts_size = 1024; int locs_size = 64; CodeBuffer code("jfr_write_checkpoint", insts_size, locs_size); OopMapSet* oop_maps = new OopMapSet(); diff --git a/src/hotspot/cpu/riscv/stubRoutines_riscv.hpp b/src/hotspot/cpu/riscv/stubRoutines_riscv.hpp index cd2dc90c765de..1a177a0c1adbc 100644 --- a/src/hotspot/cpu/riscv/stubRoutines_riscv.hpp +++ b/src/hotspot/cpu/riscv/stubRoutines_riscv.hpp @@ -1,7 +1,7 @@ /* * Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, Red Hat Inc. All rights reserved. - * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. + * Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -39,8 +39,8 @@ enum platform_dependent_constants { // simply increase sizes if too small (assembler will crash if too small) _initial_stubs_code_size = 19000, _continuation_stubs_code_size = 2000, - _compiler_stubs_code_size = 28000, - _final_stubs_code_size = 28000 + _compiler_stubs_code_size = 128000, + _final_stubs_code_size = 128000 }; class riscv { diff --git a/src/hotspot/cpu/s390/c1_LIRAssembler_s390.cpp b/src/hotspot/cpu/s390/c1_LIRAssembler_s390.cpp index 892255f710cdf..f79d647b48c9b 100644 --- a/src/hotspot/cpu/s390/c1_LIRAssembler_s390.cpp +++ b/src/hotspot/cpu/s390/c1_LIRAssembler_s390.cpp @@ -2722,7 +2722,7 @@ void LIR_Assembler::emit_lock(LIR_OpLock* op) { Register obj = op->obj_opr()->as_register(); // May not be an oop. Register hdr = op->hdr_opr()->as_register(); Register lock = op->lock_opr()->as_register(); - if (UseHeavyMonitors) { + if (LockingMode == LM_MONITOR) { if (op->info() != nullptr) { add_debug_info_for_null_check_here(op->info()); __ null_check(obj); diff --git a/src/hotspot/cpu/s390/globalDefinitions_s390.hpp b/src/hotspot/cpu/s390/globalDefinitions_s390.hpp index e8494b423cc4b..99906bb369e54 100644 --- a/src/hotspot/cpu/s390/globalDefinitions_s390.hpp +++ b/src/hotspot/cpu/s390/globalDefinitions_s390.hpp @@ -46,6 +46,4 @@ const bool CCallingConventionRequiresIntsAsLongs = true; #define SUPPORT_RESERVED_STACK_AREA -#define COMPRESSED_CLASS_POINTERS_DEPENDS_ON_COMPRESSED_OOPS false - #endif // CPU_S390_GLOBALDEFINITIONS_S390_HPP diff --git a/src/hotspot/cpu/s390/interp_masm_s390.cpp b/src/hotspot/cpu/s390/interp_masm_s390.cpp index 576f69e7e4d29..76239221a0d20 100644 --- a/src/hotspot/cpu/s390/interp_masm_s390.cpp +++ b/src/hotspot/cpu/s390/interp_masm_s390.cpp @@ -982,7 +982,7 @@ void InterpreterMacroAssembler::remove_activation(TosState state, // object - Address of the object to be locked. void InterpreterMacroAssembler::lock_object(Register monitor, Register object) { - if (UseHeavyMonitors) { + if (LockingMode == LM_MONITOR) { call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), monitor); return; } @@ -1086,7 +1086,7 @@ void InterpreterMacroAssembler::lock_object(Register monitor, Register object) { // Throw IllegalMonitorException if object is not locked by current thread. void InterpreterMacroAssembler::unlock_object(Register monitor, Register object) { - if (UseHeavyMonitors) { + if (LockingMode == LM_MONITOR) { call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), monitor); return; } diff --git a/src/hotspot/cpu/x86/assembler_x86.hpp b/src/hotspot/cpu/x86/assembler_x86.hpp index c45a1580cb93d..d551442ecf2db 100644 --- a/src/hotspot/cpu/x86/assembler_x86.hpp +++ b/src/hotspot/cpu/x86/assembler_x86.hpp @@ -822,6 +822,7 @@ class Assembler : public AbstractAssembler { // These are all easily abused and hence protected + public: // 32BIT ONLY SECTION #ifndef _LP64 // Make these disappear in 64bit mode since they would never be correct @@ -843,6 +844,7 @@ class Assembler : public AbstractAssembler { void mov_narrow_oop(Address dst, int32_t imm32, RelocationHolder const& rspec); #endif // _LP64 + protected: // These are unique in that we are ensured by the caller that the 32bit // relative in these instructions will always be able to reach the potentially // 64bit address described by entry. Since they can take a 64bit address they diff --git a/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp b/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp index 88b45516fdbdd..db70f77c8c215 100644 --- a/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp +++ b/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp @@ -1350,8 +1350,8 @@ void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch } #endif - // Load barrier has not yet been applied, so ZGC can't verify the oop here - if (!UseZGC) { + if (!(UseZGC && !ZGenerational)) { + // Load barrier has not yet been applied, so ZGC can't verify the oop here __ verify_oop(dest->as_register()); } } diff --git a/src/hotspot/cpu/x86/gc/shared/barrierSetNMethod_x86.cpp b/src/hotspot/cpu/x86/gc/shared/barrierSetNMethod_x86.cpp index 648d5fbabe93b..24d6b002b31a5 100644 --- a/src/hotspot/cpu/x86/gc/shared/barrierSetNMethod_x86.cpp +++ b/src/hotspot/cpu/x86/gc/shared/barrierSetNMethod_x86.cpp @@ -32,7 +32,11 @@ #include "runtime/sharedRuntime.hpp" #include "utilities/align.hpp" #include "utilities/debug.hpp" +#include "utilities/formatBuffer.hpp" #include "utilities/macros.hpp" +#if INCLUDE_JVMCI +#include "jvmci/jvmciRuntime.hpp" +#endif class NativeNMethodCmpBarrier: public NativeInstruction { public: @@ -58,55 +62,63 @@ class NativeNMethodCmpBarrier: public NativeInstruction { jint get_immediate() const { return int_at(imm_offset); } void set_immediate(jint imm) { set_int_at(imm_offset, imm); } - void verify() const; + bool check_barrier(err_msg& msg) const; + void verify() const { +#ifdef ASSERT + err_msg msg("%s", ""); + assert(check_barrier(msg), "%s", msg.buffer()); +#endif + } }; #ifdef _LP64 -void NativeNMethodCmpBarrier::verify() const { +bool NativeNMethodCmpBarrier::check_barrier(err_msg& msg) const { + // Only require 4 byte alignment if (((uintptr_t) instruction_address()) & 0x3) { - fatal("Not properly aligned"); + msg.print("Addr: " INTPTR_FORMAT " not properly aligned", p2i(instruction_address())); + return false; } int prefix = ubyte_at(0); if (prefix != instruction_rex_prefix) { - tty->print_cr("Addr: " INTPTR_FORMAT " Prefix: 0x%x", p2i(instruction_address()), - prefix); - fatal("not a cmp barrier"); + msg.print("Addr: " INTPTR_FORMAT " Code: 0x%x expected 0x%x", p2i(instruction_address()), prefix, instruction_rex_prefix); + return false; } int inst = ubyte_at(1); if (inst != instruction_code) { - tty->print_cr("Addr: " INTPTR_FORMAT " Code: 0x%x", p2i(instruction_address()), - inst); - fatal("not a cmp barrier"); + msg.print("Addr: " INTPTR_FORMAT " Code: 0x%x expected 0x%x", p2i(instruction_address()), inst, instruction_code); + return false; } int modrm = ubyte_at(2); if (modrm != instruction_modrm) { - tty->print_cr("Addr: " INTPTR_FORMAT " mod/rm: 0x%x", p2i(instruction_address()), - modrm); - fatal("not a cmp barrier"); + msg.print("Addr: " INTPTR_FORMAT " Code: 0x%x expected mod/rm 0x%x", p2i(instruction_address()), modrm, instruction_modrm); + return false; } + return true; } #else -void NativeNMethodCmpBarrier::verify() const { +bool NativeNMethodCmpBarrier::check_barrier(err_msg& msg) const { if (((uintptr_t) instruction_address()) & 0x3) { - fatal("Not properly aligned"); + msg.print("Addr: " INTPTR_FORMAT " not properly aligned", p2i(instruction_address())); + return false; } int inst = ubyte_at(0); if (inst != instruction_code) { - tty->print_cr("Addr: " INTPTR_FORMAT " Code: 0x%x", p2i(instruction_address()), + msg.print("Addr: " INTPTR_FORMAT " Code: 0x%x", p2i(instruction_address()), inst); - fatal("not a cmp barrier"); + return false; } int modrm = ubyte_at(1); if (modrm != instruction_modrm) { - tty->print_cr("Addr: " INTPTR_FORMAT " mod/rm: 0x%x", p2i(instruction_address()), + msg.print("Addr: " INTPTR_FORMAT " mod/rm: 0x%x", p2i(instruction_address()), modrm); - fatal("not a cmp barrier"); + return false; } + return true; } #endif // _LP64 @@ -170,9 +182,18 @@ static const int entry_barrier_offset(nmethod* nm) { } static NativeNMethodCmpBarrier* native_nmethod_barrier(nmethod* nm) { - address barrier_address = nm->code_begin() + nm->frame_complete_offset() + entry_barrier_offset(nm); + address barrier_address; +#if INCLUDE_JVMCI + if (nm->is_compiled_by_jvmci()) { + barrier_address = nm->code_begin() + nm->jvmci_nmethod_data()->nmethod_entry_patch_offset(); + } else +#endif + { + barrier_address = nm->code_begin() + nm->frame_complete_offset() + entry_barrier_offset(nm); + } + NativeNMethodCmpBarrier* barrier = reinterpret_cast(barrier_address); - debug_only(barrier->verify()); + barrier->verify(); return barrier; } @@ -193,3 +214,11 @@ int BarrierSetNMethod::guard_value(nmethod* nm) { NativeNMethodCmpBarrier* cmp = native_nmethod_barrier(nm); return cmp->get_immediate(); } + + +#if INCLUDE_JVMCI +bool BarrierSetNMethod::verify_barrier(nmethod* nm, err_msg& msg) { + NativeNMethodCmpBarrier* barrier = native_nmethod_barrier(nm); + return barrier->check_barrier(msg); +} +#endif diff --git a/src/hotspot/cpu/x86/gc/x/xBarrierSetAssembler_x86.cpp b/src/hotspot/cpu/x86/gc/x/xBarrierSetAssembler_x86.cpp new file mode 100644 index 0000000000000..38129a9fc81e5 --- /dev/null +++ b/src/hotspot/cpu/x86/gc/x/xBarrierSetAssembler_x86.cpp @@ -0,0 +1,713 @@ +/* + * Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "asm/macroAssembler.inline.hpp" +#include "code/codeBlob.hpp" +#include "code/vmreg.inline.hpp" +#include "gc/x/xBarrier.inline.hpp" +#include "gc/x/xBarrierSet.hpp" +#include "gc/x/xBarrierSetAssembler.hpp" +#include "gc/x/xBarrierSetRuntime.hpp" +#include "gc/x/xThreadLocalData.hpp" +#include "memory/resourceArea.hpp" +#include "runtime/sharedRuntime.hpp" +#include "utilities/macros.hpp" +#ifdef COMPILER1 +#include "c1/c1_LIRAssembler.hpp" +#include "c1/c1_MacroAssembler.hpp" +#include "gc/x/c1/xBarrierSetC1.hpp" +#endif // COMPILER1 +#ifdef COMPILER2 +#include "gc/x/c2/xBarrierSetC2.hpp" +#endif // COMPILER2 + +#ifdef PRODUCT +#define BLOCK_COMMENT(str) /* nothing */ +#else +#define BLOCK_COMMENT(str) __ block_comment(str) +#endif + +#undef __ +#define __ masm-> + +static void call_vm(MacroAssembler* masm, + address entry_point, + Register arg0, + Register arg1) { + // Setup arguments + if (arg1 == c_rarg0) { + if (arg0 == c_rarg1) { + __ xchgptr(c_rarg1, c_rarg0); + } else { + __ movptr(c_rarg1, arg1); + __ movptr(c_rarg0, arg0); + } + } else { + if (arg0 != c_rarg0) { + __ movptr(c_rarg0, arg0); + } + if (arg1 != c_rarg1) { + __ movptr(c_rarg1, arg1); + } + } + + // Call VM + __ MacroAssembler::call_VM_leaf_base(entry_point, 2); +} + +void XBarrierSetAssembler::load_at(MacroAssembler* masm, + DecoratorSet decorators, + BasicType type, + Register dst, + Address src, + Register tmp1, + Register tmp_thread) { + if (!XBarrierSet::barrier_needed(decorators, type)) { + // Barrier not needed + BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread); + return; + } + + BLOCK_COMMENT("XBarrierSetAssembler::load_at {"); + + // Allocate scratch register + Register scratch = tmp1; + if (tmp1 == noreg) { + scratch = r12; + __ push(scratch); + } + + assert_different_registers(dst, scratch); + + Label done; + + // + // Fast Path + // + + // Load address + __ lea(scratch, src); + + // Load oop at address + __ movptr(dst, Address(scratch, 0)); + + // Test address bad mask + __ testptr(dst, address_bad_mask_from_thread(r15_thread)); + __ jcc(Assembler::zero, done); + + // + // Slow path + // + + // Save registers + __ push(rax); + __ push(rcx); + __ push(rdx); + __ push(rdi); + __ push(rsi); + __ push(r8); + __ push(r9); + __ push(r10); + __ push(r11); + + // We may end up here from generate_native_wrapper, then the method may have + // floats as arguments, and we must spill them before calling the VM runtime + // leaf. From the interpreter all floats are passed on the stack. + assert(Argument::n_float_register_parameters_j == 8, "Assumption"); + const int xmm_size = wordSize * 2; + const int xmm_spill_size = xmm_size * Argument::n_float_register_parameters_j; + __ subptr(rsp, xmm_spill_size); + __ movdqu(Address(rsp, xmm_size * 7), xmm7); + __ movdqu(Address(rsp, xmm_size * 6), xmm6); + __ movdqu(Address(rsp, xmm_size * 5), xmm5); + __ movdqu(Address(rsp, xmm_size * 4), xmm4); + __ movdqu(Address(rsp, xmm_size * 3), xmm3); + __ movdqu(Address(rsp, xmm_size * 2), xmm2); + __ movdqu(Address(rsp, xmm_size * 1), xmm1); + __ movdqu(Address(rsp, xmm_size * 0), xmm0); + + // Call VM + call_vm(masm, XBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators), dst, scratch); + + __ movdqu(xmm0, Address(rsp, xmm_size * 0)); + __ movdqu(xmm1, Address(rsp, xmm_size * 1)); + __ movdqu(xmm2, Address(rsp, xmm_size * 2)); + __ movdqu(xmm3, Address(rsp, xmm_size * 3)); + __ movdqu(xmm4, Address(rsp, xmm_size * 4)); + __ movdqu(xmm5, Address(rsp, xmm_size * 5)); + __ movdqu(xmm6, Address(rsp, xmm_size * 6)); + __ movdqu(xmm7, Address(rsp, xmm_size * 7)); + __ addptr(rsp, xmm_spill_size); + + __ pop(r11); + __ pop(r10); + __ pop(r9); + __ pop(r8); + __ pop(rsi); + __ pop(rdi); + __ pop(rdx); + __ pop(rcx); + + if (dst == rax) { + __ addptr(rsp, wordSize); + } else { + __ movptr(dst, rax); + __ pop(rax); + } + + __ bind(done); + + // Restore scratch register + if (tmp1 == noreg) { + __ pop(scratch); + } + + BLOCK_COMMENT("} XBarrierSetAssembler::load_at"); +} + +#ifdef ASSERT + +void XBarrierSetAssembler::store_at(MacroAssembler* masm, + DecoratorSet decorators, + BasicType type, + Address dst, + Register src, + Register tmp1, + Register tmp2, + Register tmp3) { + BLOCK_COMMENT("XBarrierSetAssembler::store_at {"); + + // Verify oop store + if (is_reference_type(type)) { + // Note that src could be noreg, which means we + // are storing null and can skip verification. + if (src != noreg) { + Label done; + __ testptr(src, address_bad_mask_from_thread(r15_thread)); + __ jcc(Assembler::zero, done); + __ stop("Verify oop store failed"); + __ should_not_reach_here(); + __ bind(done); + } + } + + // Store value + BarrierSetAssembler::store_at(masm, decorators, type, dst, src, tmp1, tmp2, tmp3); + + BLOCK_COMMENT("} XBarrierSetAssembler::store_at"); +} + +#endif // ASSERT + +void XBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, + DecoratorSet decorators, + BasicType type, + Register src, + Register dst, + Register count) { + if (!XBarrierSet::barrier_needed(decorators, type)) { + // Barrier not needed + return; + } + + BLOCK_COMMENT("XBarrierSetAssembler::arraycopy_prologue {"); + + // Save registers + __ pusha(); + + // Call VM + call_vm(masm, XBarrierSetRuntime::load_barrier_on_oop_array_addr(), src, count); + + // Restore registers + __ popa(); + + BLOCK_COMMENT("} XBarrierSetAssembler::arraycopy_prologue"); +} + +void XBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, + Register jni_env, + Register obj, + Register tmp, + Label& slowpath) { + BLOCK_COMMENT("XBarrierSetAssembler::try_resolve_jobject_in_native {"); + + // Resolve jobject + BarrierSetAssembler::try_resolve_jobject_in_native(masm, jni_env, obj, tmp, slowpath); + + // Test address bad mask + __ testptr(obj, address_bad_mask_from_jni_env(jni_env)); + __ jcc(Assembler::notZero, slowpath); + + BLOCK_COMMENT("} XBarrierSetAssembler::try_resolve_jobject_in_native"); +} + +#ifdef COMPILER1 + +#undef __ +#define __ ce->masm()-> + +void XBarrierSetAssembler::generate_c1_load_barrier_test(LIR_Assembler* ce, + LIR_Opr ref) const { + __ testptr(ref->as_register(), address_bad_mask_from_thread(r15_thread)); +} + +void XBarrierSetAssembler::generate_c1_load_barrier_stub(LIR_Assembler* ce, + XLoadBarrierStubC1* stub) const { + // Stub entry + __ bind(*stub->entry()); + + Register ref = stub->ref()->as_register(); + Register ref_addr = noreg; + Register tmp = noreg; + + if (stub->tmp()->is_valid()) { + // Load address into tmp register + ce->leal(stub->ref_addr(), stub->tmp()); + ref_addr = tmp = stub->tmp()->as_pointer_register(); + } else { + // Address already in register + ref_addr = stub->ref_addr()->as_address_ptr()->base()->as_pointer_register(); + } + + assert_different_registers(ref, ref_addr, noreg); + + // Save rax unless it is the result or tmp register + if (ref != rax && tmp != rax) { + __ push(rax); + } + + // Setup arguments and call runtime stub + __ subptr(rsp, 2 * BytesPerWord); + ce->store_parameter(ref_addr, 1); + ce->store_parameter(ref, 0); + __ call(RuntimeAddress(stub->runtime_stub())); + __ addptr(rsp, 2 * BytesPerWord); + + // Verify result + __ verify_oop(rax); + + // Move result into place + if (ref != rax) { + __ movptr(ref, rax); + } + + // Restore rax unless it is the result or tmp register + if (ref != rax && tmp != rax) { + __ pop(rax); + } + + // Stub exit + __ jmp(*stub->continuation()); +} + +#undef __ +#define __ sasm-> + +void XBarrierSetAssembler::generate_c1_load_barrier_runtime_stub(StubAssembler* sasm, + DecoratorSet decorators) const { + // Enter and save registers + __ enter(); + __ save_live_registers_no_oop_map(true /* save_fpu_registers */); + + // Setup arguments + __ load_parameter(1, c_rarg1); + __ load_parameter(0, c_rarg0); + + // Call VM + __ call_VM_leaf(XBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators), c_rarg0, c_rarg1); + + // Restore registers and return + __ restore_live_registers_except_rax(true /* restore_fpu_registers */); + __ leave(); + __ ret(0); +} + +#endif // COMPILER1 + +#ifdef COMPILER2 + +OptoReg::Name XBarrierSetAssembler::refine_register(const Node* node, OptoReg::Name opto_reg) { + if (!OptoReg::is_reg(opto_reg)) { + return OptoReg::Bad; + } + + const VMReg vm_reg = OptoReg::as_VMReg(opto_reg); + if (vm_reg->is_XMMRegister()) { + opto_reg &= ~15; + switch (node->ideal_reg()) { + case Op_VecX: + opto_reg |= 2; + break; + case Op_VecY: + opto_reg |= 4; + break; + case Op_VecZ: + opto_reg |= 8; + break; + default: + opto_reg |= 1; + break; + } + } + + return opto_reg; +} + +// We use the vec_spill_helper from the x86.ad file to avoid reinventing this wheel +extern void vec_spill_helper(CodeBuffer *cbuf, bool is_load, + int stack_offset, int reg, uint ireg, outputStream* st); + +#undef __ +#define __ _masm-> + +class XSaveLiveRegisters { +private: + struct XMMRegisterData { + XMMRegister _reg; + int _size; + + // Used by GrowableArray::find() + bool operator == (const XMMRegisterData& other) { + return _reg == other._reg; + } + }; + + MacroAssembler* const _masm; + GrowableArray _gp_registers; + GrowableArray _opmask_registers; + GrowableArray _xmm_registers; + int _spill_size; + int _spill_offset; + + static int xmm_compare_register_size(XMMRegisterData* left, XMMRegisterData* right) { + if (left->_size == right->_size) { + return 0; + } + + return (left->_size < right->_size) ? -1 : 1; + } + + static int xmm_slot_size(OptoReg::Name opto_reg) { + // The low order 4 bytes denote what size of the XMM register is live + return (opto_reg & 15) << 3; + } + + static uint xmm_ideal_reg_for_size(int reg_size) { + switch (reg_size) { + case 8: + return Op_VecD; + case 16: + return Op_VecX; + case 32: + return Op_VecY; + case 64: + return Op_VecZ; + default: + fatal("Invalid register size %d", reg_size); + return 0; + } + } + + bool xmm_needs_vzeroupper() const { + return _xmm_registers.is_nonempty() && _xmm_registers.at(0)._size > 16; + } + + void xmm_register_save(const XMMRegisterData& reg_data) { + const OptoReg::Name opto_reg = OptoReg::as_OptoReg(reg_data._reg->as_VMReg()); + const uint ideal_reg = xmm_ideal_reg_for_size(reg_data._size); + _spill_offset -= reg_data._size; + vec_spill_helper(__ code(), false /* is_load */, _spill_offset, opto_reg, ideal_reg, tty); + } + + void xmm_register_restore(const XMMRegisterData& reg_data) { + const OptoReg::Name opto_reg = OptoReg::as_OptoReg(reg_data._reg->as_VMReg()); + const uint ideal_reg = xmm_ideal_reg_for_size(reg_data._size); + vec_spill_helper(__ code(), true /* is_load */, _spill_offset, opto_reg, ideal_reg, tty); + _spill_offset += reg_data._size; + } + + void gp_register_save(Register reg) { + _spill_offset -= 8; + __ movq(Address(rsp, _spill_offset), reg); + } + + void opmask_register_save(KRegister reg) { + _spill_offset -= 8; + __ kmov(Address(rsp, _spill_offset), reg); + } + + void gp_register_restore(Register reg) { + __ movq(reg, Address(rsp, _spill_offset)); + _spill_offset += 8; + } + + void opmask_register_restore(KRegister reg) { + __ kmov(reg, Address(rsp, _spill_offset)); + _spill_offset += 8; + } + + void initialize(XLoadBarrierStubC2* stub) { + // Create mask of caller saved registers that need to + // be saved/restored if live + RegMask caller_saved; + caller_saved.Insert(OptoReg::as_OptoReg(rax->as_VMReg())); + caller_saved.Insert(OptoReg::as_OptoReg(rcx->as_VMReg())); + caller_saved.Insert(OptoReg::as_OptoReg(rdx->as_VMReg())); + caller_saved.Insert(OptoReg::as_OptoReg(rsi->as_VMReg())); + caller_saved.Insert(OptoReg::as_OptoReg(rdi->as_VMReg())); + caller_saved.Insert(OptoReg::as_OptoReg(r8->as_VMReg())); + caller_saved.Insert(OptoReg::as_OptoReg(r9->as_VMReg())); + caller_saved.Insert(OptoReg::as_OptoReg(r10->as_VMReg())); + caller_saved.Insert(OptoReg::as_OptoReg(r11->as_VMReg())); + caller_saved.Remove(OptoReg::as_OptoReg(stub->ref()->as_VMReg())); + + // Create mask of live registers + RegMask live = stub->live(); + if (stub->tmp() != noreg) { + live.Insert(OptoReg::as_OptoReg(stub->tmp()->as_VMReg())); + } + + int gp_spill_size = 0; + int opmask_spill_size = 0; + int xmm_spill_size = 0; + + // Record registers that needs to be saved/restored + RegMaskIterator rmi(live); + while (rmi.has_next()) { + const OptoReg::Name opto_reg = rmi.next(); + const VMReg vm_reg = OptoReg::as_VMReg(opto_reg); + + if (vm_reg->is_Register()) { + if (caller_saved.Member(opto_reg)) { + _gp_registers.append(vm_reg->as_Register()); + gp_spill_size += 8; + } + } else if (vm_reg->is_KRegister()) { + // All opmask registers are caller saved, thus spill the ones + // which are live. + if (_opmask_registers.find(vm_reg->as_KRegister()) == -1) { + _opmask_registers.append(vm_reg->as_KRegister()); + opmask_spill_size += 8; + } + } else if (vm_reg->is_XMMRegister()) { + // We encode in the low order 4 bits of the opto_reg, how large part of the register is live + const VMReg vm_reg_base = OptoReg::as_VMReg(opto_reg & ~15); + const int reg_size = xmm_slot_size(opto_reg); + const XMMRegisterData reg_data = { vm_reg_base->as_XMMRegister(), reg_size }; + const int reg_index = _xmm_registers.find(reg_data); + if (reg_index == -1) { + // Not previously appended + _xmm_registers.append(reg_data); + xmm_spill_size += reg_size; + } else { + // Previously appended, update size + const int reg_size_prev = _xmm_registers.at(reg_index)._size; + if (reg_size > reg_size_prev) { + _xmm_registers.at_put(reg_index, reg_data); + xmm_spill_size += reg_size - reg_size_prev; + } + } + } else { + fatal("Unexpected register type"); + } + } + + // Sort by size, largest first + _xmm_registers.sort(xmm_compare_register_size); + + // On Windows, the caller reserves stack space for spilling register arguments + const int arg_spill_size = frame::arg_reg_save_area_bytes; + + // Stack pointer must be 16 bytes aligned for the call + _spill_offset = _spill_size = align_up(xmm_spill_size + gp_spill_size + opmask_spill_size + arg_spill_size, 16); + } + +public: + XSaveLiveRegisters(MacroAssembler* masm, XLoadBarrierStubC2* stub) : + _masm(masm), + _gp_registers(), + _opmask_registers(), + _xmm_registers(), + _spill_size(0), + _spill_offset(0) { + + // + // Stack layout after registers have been spilled: + // + // | ... | original rsp, 16 bytes aligned + // ------------------ + // | zmm0 high | + // | ... | + // | zmm0 low | 16 bytes aligned + // | ... | + // | ymm1 high | + // | ... | + // | ymm1 low | 16 bytes aligned + // | ... | + // | xmmN high | + // | ... | + // | xmmN low | 8 bytes aligned + // | reg0 | 8 bytes aligned + // | reg1 | + // | ... | + // | regN | new rsp, if 16 bytes aligned + // | | else new rsp, 16 bytes aligned + // ------------------ + // + + // Figure out what registers to save/restore + initialize(stub); + + // Allocate stack space + if (_spill_size > 0) { + __ subptr(rsp, _spill_size); + } + + // Save XMM/YMM/ZMM registers + for (int i = 0; i < _xmm_registers.length(); i++) { + xmm_register_save(_xmm_registers.at(i)); + } + + if (xmm_needs_vzeroupper()) { + __ vzeroupper(); + } + + // Save general purpose registers + for (int i = 0; i < _gp_registers.length(); i++) { + gp_register_save(_gp_registers.at(i)); + } + + // Save opmask registers + for (int i = 0; i < _opmask_registers.length(); i++) { + opmask_register_save(_opmask_registers.at(i)); + } + } + + ~XSaveLiveRegisters() { + // Restore opmask registers + for (int i = _opmask_registers.length() - 1; i >= 0; i--) { + opmask_register_restore(_opmask_registers.at(i)); + } + + // Restore general purpose registers + for (int i = _gp_registers.length() - 1; i >= 0; i--) { + gp_register_restore(_gp_registers.at(i)); + } + + __ vzeroupper(); + + // Restore XMM/YMM/ZMM registers + for (int i = _xmm_registers.length() - 1; i >= 0; i--) { + xmm_register_restore(_xmm_registers.at(i)); + } + + // Free stack space + if (_spill_size > 0) { + __ addptr(rsp, _spill_size); + } + } +}; + +class XSetupArguments { +private: + MacroAssembler* const _masm; + const Register _ref; + const Address _ref_addr; + +public: + XSetupArguments(MacroAssembler* masm, XLoadBarrierStubC2* stub) : + _masm(masm), + _ref(stub->ref()), + _ref_addr(stub->ref_addr()) { + + // Setup arguments + if (_ref_addr.base() == noreg) { + // No self healing + if (_ref != c_rarg0) { + __ movq(c_rarg0, _ref); + } + __ xorq(c_rarg1, c_rarg1); + } else { + // Self healing + if (_ref == c_rarg0) { + __ lea(c_rarg1, _ref_addr); + } else if (_ref != c_rarg1) { + __ lea(c_rarg1, _ref_addr); + __ movq(c_rarg0, _ref); + } else if (_ref_addr.base() != c_rarg0 && _ref_addr.index() != c_rarg0) { + __ movq(c_rarg0, _ref); + __ lea(c_rarg1, _ref_addr); + } else { + __ xchgq(c_rarg0, c_rarg1); + if (_ref_addr.base() == c_rarg0) { + __ lea(c_rarg1, Address(c_rarg1, _ref_addr.index(), _ref_addr.scale(), _ref_addr.disp())); + } else if (_ref_addr.index() == c_rarg0) { + __ lea(c_rarg1, Address(_ref_addr.base(), c_rarg1, _ref_addr.scale(), _ref_addr.disp())); + } else { + ShouldNotReachHere(); + } + } + } + } + + ~XSetupArguments() { + // Transfer result + if (_ref != rax) { + __ movq(_ref, rax); + } + } +}; + +#undef __ +#define __ masm-> + +void XBarrierSetAssembler::generate_c2_load_barrier_stub(MacroAssembler* masm, XLoadBarrierStubC2* stub) const { + BLOCK_COMMENT("XLoadBarrierStubC2"); + + // Stub entry + __ bind(*stub->entry()); + + { + XSaveLiveRegisters save_live_registers(masm, stub); + XSetupArguments setup_arguments(masm, stub); + __ call(RuntimeAddress(stub->slow_path())); + } + + // Stub exit + __ jmp(*stub->continuation()); +} + +#endif // COMPILER2 + +#undef __ +#define __ masm-> + +void XBarrierSetAssembler::check_oop(MacroAssembler* masm, Register obj, Register tmp1, Register tmp2, Label& error) { + // Check if metadata bits indicate a bad oop + __ testptr(obj, Address(r15_thread, XThreadLocalData::address_bad_mask_offset())); + __ jcc(Assembler::notZero, error); + BarrierSetAssembler::check_oop(masm, obj, tmp1, tmp2, error); +} + +#undef __ diff --git a/src/hotspot/cpu/x86/gc/x/xBarrierSetAssembler_x86.hpp b/src/hotspot/cpu/x86/gc/x/xBarrierSetAssembler_x86.hpp new file mode 100644 index 0000000000000..52034ab786ec2 --- /dev/null +++ b/src/hotspot/cpu/x86/gc/x/xBarrierSetAssembler_x86.hpp @@ -0,0 +1,109 @@ +/* + * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef CPU_X86_GC_X_XBARRIERSETASSEMBLER_X86_HPP +#define CPU_X86_GC_X_XBARRIERSETASSEMBLER_X86_HPP + +#include "code/vmreg.hpp" +#include "oops/accessDecorators.hpp" +#ifdef COMPILER2 +#include "opto/optoreg.hpp" +#endif // COMPILER2 + +class MacroAssembler; + +#ifdef COMPILER1 +class LIR_Assembler; +class LIR_Opr; +class StubAssembler; +#endif // COMPILER1 + +#ifdef COMPILER2 +class Node; +#endif // COMPILER2 + +#ifdef COMPILER1 +class XLoadBarrierStubC1; +#endif // COMPILER1 + +#ifdef COMPILER2 +class XLoadBarrierStubC2; +#endif // COMPILER2 + +class XBarrierSetAssembler : public XBarrierSetAssemblerBase { +public: + virtual void load_at(MacroAssembler* masm, + DecoratorSet decorators, + BasicType type, + Register dst, + Address src, + Register tmp1, + Register tmp_thread); + +#ifdef ASSERT + virtual void store_at(MacroAssembler* masm, + DecoratorSet decorators, + BasicType type, + Address dst, + Register src, + Register tmp1, + Register tmp2, + Register tmp3); +#endif // ASSERT + + virtual void arraycopy_prologue(MacroAssembler* masm, + DecoratorSet decorators, + BasicType type, + Register src, + Register dst, + Register count); + + virtual void try_resolve_jobject_in_native(MacroAssembler* masm, + Register jni_env, + Register obj, + Register tmp, + Label& slowpath); + +#ifdef COMPILER1 + void generate_c1_load_barrier_test(LIR_Assembler* ce, + LIR_Opr ref) const; + + void generate_c1_load_barrier_stub(LIR_Assembler* ce, + XLoadBarrierStubC1* stub) const; + + void generate_c1_load_barrier_runtime_stub(StubAssembler* sasm, + DecoratorSet decorators) const; +#endif // COMPILER1 + +#ifdef COMPILER2 + OptoReg::Name refine_register(const Node* node, + OptoReg::Name opto_reg); + + void generate_c2_load_barrier_stub(MacroAssembler* masm, + XLoadBarrierStubC2* stub) const; +#endif // COMPILER2 + + void check_oop(MacroAssembler* masm, Register obj, Register tmp1, Register tmp2, Label& error); +}; + +#endif // CPU_X86_GC_X_XBARRIERSETASSEMBLER_X86_HPP diff --git a/src/hotspot/cpu/x86/gc/z/zGlobals_x86.cpp b/src/hotspot/cpu/x86/gc/x/xGlobals_x86.cpp similarity index 97% rename from src/hotspot/cpu/x86/gc/z/zGlobals_x86.cpp rename to src/hotspot/cpu/x86/gc/x/xGlobals_x86.cpp index 875eb8855ddca..baa99ddd60db7 100644 --- a/src/hotspot/cpu/x86/gc/z/zGlobals_x86.cpp +++ b/src/hotspot/cpu/x86/gc/x/xGlobals_x86.cpp @@ -23,7 +23,7 @@ #include "precompiled.hpp" #include "gc/shared/gc_globals.hpp" -#include "gc/z/zGlobals.hpp" +#include "gc/x/xGlobals.hpp" #include "runtime/globals.hpp" #include "utilities/globalDefinitions.hpp" #include "utilities/powerOfTwo.hpp" @@ -136,14 +136,14 @@ // * 63-48 Fixed (16-bits, always zero) // -size_t ZPlatformAddressOffsetBits() { +size_t XPlatformAddressOffsetBits() { const size_t min_address_offset_bits = 42; // 4TB const size_t max_address_offset_bits = 44; // 16TB - const size_t address_offset = round_up_power_of_2(MaxHeapSize * ZVirtualToPhysicalRatio); + const size_t address_offset = round_up_power_of_2(MaxHeapSize * XVirtualToPhysicalRatio); const size_t address_offset_bits = log2i_exact(address_offset); return clamp(address_offset_bits, min_address_offset_bits, max_address_offset_bits); } -size_t ZPlatformAddressMetadataShift() { - return ZPlatformAddressOffsetBits(); +size_t XPlatformAddressMetadataShift() { + return XPlatformAddressOffsetBits(); } diff --git a/src/hotspot/cpu/x86/gc/x/xGlobals_x86.hpp b/src/hotspot/cpu/x86/gc/x/xGlobals_x86.hpp new file mode 100644 index 0000000000000..dd00d4ddadcf1 --- /dev/null +++ b/src/hotspot/cpu/x86/gc/x/xGlobals_x86.hpp @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef CPU_X86_GC_X_XGLOBALS_X86_HPP +#define CPU_X86_GC_X_XGLOBALS_X86_HPP + +const size_t XPlatformHeapViews = 3; +const size_t XPlatformCacheLineSize = 64; + +size_t XPlatformAddressOffsetBits(); +size_t XPlatformAddressMetadataShift(); + +#endif // CPU_X86_GC_X_XGLOBALS_X86_HPP diff --git a/src/hotspot/cpu/x86/gc/x/x_x86_64.ad b/src/hotspot/cpu/x86/gc/x/x_x86_64.ad new file mode 100644 index 0000000000000..c33a994a4b87a --- /dev/null +++ b/src/hotspot/cpu/x86/gc/x/x_x86_64.ad @@ -0,0 +1,158 @@ +// +// Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved. +// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +// +// This code is free software; you can redistribute it and/or modify it +// under the terms of the GNU General Public License version 2 only, as +// published by the Free Software Foundation. +// +// This code is distributed in the hope that it will be useful, but WITHOUT +// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +// version 2 for more details (a copy is included in the LICENSE file that +// accompanied this code). +// +// You should have received a copy of the GNU General Public License version +// 2 along with this work; if not, write to the Free Software Foundation, +// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +// +// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +// or visit www.oracle.com if you need additional information or have any +// questions. +// + +source_hpp %{ + +#include "gc/shared/gc_globals.hpp" +#include "gc/x/c2/xBarrierSetC2.hpp" +#include "gc/x/xThreadLocalData.hpp" + +%} + +source %{ + +#include "c2_intelJccErratum_x86.hpp" + +static void x_load_barrier(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref, Register tmp, uint8_t barrier_data) { + if (barrier_data == XLoadBarrierElided) { + return; + } + XLoadBarrierStubC2* const stub = XLoadBarrierStubC2::create(node, ref_addr, ref, tmp, barrier_data); + { + IntelJccErratumAlignment intel_alignment(_masm, 10 /* jcc_size */); + __ testptr(ref, Address(r15_thread, XThreadLocalData::address_bad_mask_offset())); + __ jcc(Assembler::notZero, *stub->entry()); + } + __ bind(*stub->continuation()); +} + +static void x_load_barrier_cmpxchg(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref, Register tmp, Label& good) { + XLoadBarrierStubC2* const stub = XLoadBarrierStubC2::create(node, ref_addr, ref, tmp, XLoadBarrierStrong); + { + IntelJccErratumAlignment intel_alignment(_masm, 10 /* jcc_size */); + __ testptr(ref, Address(r15_thread, XThreadLocalData::address_bad_mask_offset())); + __ jcc(Assembler::zero, good); + } + { + IntelJccErratumAlignment intel_alignment(_masm, 5 /* jcc_size */); + __ jmp(*stub->entry()); + } + __ bind(*stub->continuation()); +} + +static void x_cmpxchg_common(MacroAssembler& _masm, const MachNode* node, Register mem_reg, Register newval, Register tmp) { + // Compare value (oldval) is in rax + const Address mem = Address(mem_reg, 0); + + if (node->barrier_data() != XLoadBarrierElided) { + __ movptr(tmp, rax); + } + + __ lock(); + __ cmpxchgptr(newval, mem); + + if (node->barrier_data() != XLoadBarrierElided) { + Label good; + x_load_barrier_cmpxchg(_masm, node, mem, rax, tmp, good); + __ movptr(rax, tmp); + __ lock(); + __ cmpxchgptr(newval, mem); + __ bind(good); + } +} + +%} + +// Load Pointer +instruct xLoadP(rRegP dst, memory mem, rFlagsReg cr) +%{ + predicate(UseZGC && !ZGenerational && n->as_Load()->barrier_data() != 0); + match(Set dst (LoadP mem)); + effect(KILL cr, TEMP dst); + + ins_cost(125); + + format %{ "movq $dst, $mem" %} + + ins_encode %{ + __ movptr($dst$$Register, $mem$$Address); + x_load_barrier(_masm, this, $mem$$Address, $dst$$Register, noreg /* tmp */, barrier_data()); + %} + + ins_pipe(ialu_reg_mem); +%} + +instruct xCompareAndExchangeP(indirect mem, rax_RegP oldval, rRegP newval, rRegP tmp, rFlagsReg cr) %{ + match(Set oldval (CompareAndExchangeP mem (Binary oldval newval))); + predicate(UseZGC && !ZGenerational && n->as_LoadStore()->barrier_data() == XLoadBarrierStrong); + effect(KILL cr, TEMP tmp); + + format %{ "lock\n\t" + "cmpxchgq $newval, $mem" %} + + ins_encode %{ + precond($oldval$$Register == rax); + x_cmpxchg_common(_masm, this, $mem$$Register, $newval$$Register, $tmp$$Register); + %} + + ins_pipe(pipe_cmpxchg); +%} + +instruct xCompareAndSwapP(rRegI res, indirect mem, rRegP newval, rRegP tmp, rFlagsReg cr, rax_RegP oldval) %{ + match(Set res (CompareAndSwapP mem (Binary oldval newval))); + match(Set res (WeakCompareAndSwapP mem (Binary oldval newval))); + predicate(UseZGC && !ZGenerational && n->as_LoadStore()->barrier_data() == XLoadBarrierStrong); + effect(KILL cr, KILL oldval, TEMP tmp); + + format %{ "lock\n\t" + "cmpxchgq $newval, $mem\n\t" + "sete $res\n\t" + "movzbl $res, $res" %} + + ins_encode %{ + precond($oldval$$Register == rax); + x_cmpxchg_common(_masm, this, $mem$$Register, $newval$$Register, $tmp$$Register); + if (barrier_data() != XLoadBarrierElided) { + __ cmpptr($tmp$$Register, rax); + } + __ setb(Assembler::equal, $res$$Register); + __ movzbl($res$$Register, $res$$Register); + %} + + ins_pipe(pipe_cmpxchg); +%} + +instruct xXChgP(indirect mem, rRegP newval, rFlagsReg cr) %{ + match(Set newval (GetAndSetP mem newval)); + predicate(UseZGC && !ZGenerational && n->as_LoadStore()->barrier_data() != 0); + effect(KILL cr); + + format %{ "xchgq $newval, $mem" %} + + ins_encode %{ + __ xchgptr($newval$$Register, Address($mem$$Register, 0)); + x_load_barrier(_masm, this, Address(noreg, 0), $newval$$Register, noreg /* tmp */, barrier_data()); + %} + + ins_pipe(pipe_cmpxchg); +%} diff --git a/src/hotspot/cpu/x86/gc/z/zAddress_x86.cpp b/src/hotspot/cpu/x86/gc/z/zAddress_x86.cpp new file mode 100644 index 0000000000000..ed177f37e0d45 --- /dev/null +++ b/src/hotspot/cpu/x86/gc/z/zAddress_x86.cpp @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/shared/gc_globals.hpp" +#include "gc/z/zAddress.inline.hpp" +#include "gc/z/zGlobals.hpp" +#include "utilities/globalDefinitions.hpp" +#include "utilities/powerOfTwo.hpp" + +size_t ZPointerLoadShift; + +size_t ZPlatformAddressOffsetBits() { + const size_t min_address_offset_bits = 42; // 4TB + const size_t max_address_offset_bits = 44; // 16TB + const size_t address_offset = round_up_power_of_2(MaxHeapSize * ZVirtualToPhysicalRatio); + const size_t address_offset_bits = log2i_exact(address_offset); + return clamp(address_offset_bits, min_address_offset_bits, max_address_offset_bits); +} + +size_t ZPlatformAddressHeapBaseShift() { + return ZPlatformAddressOffsetBits(); +} + +void ZGlobalsPointers::pd_set_good_masks() { + ZPointerLoadShift = ZPointer::load_shift_lookup(ZPointerLoadGoodMask); +} diff --git a/src/hotspot/cpu/x86/gc/z/zAddress_x86.hpp b/src/hotspot/cpu/x86/gc/z/zAddress_x86.hpp new file mode 100644 index 0000000000000..e71333f6068a4 --- /dev/null +++ b/src/hotspot/cpu/x86/gc/z/zAddress_x86.hpp @@ -0,0 +1,34 @@ +/* + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef CPU_X86_GC_Z_ZADDRESS_X86_HPP +#define CPU_X86_GC_Z_ZADDRESS_X86_HPP + +#include "utilities/globalDefinitions.hpp" + +extern size_t ZPointerLoadShift; + +size_t ZPlatformAddressOffsetBits(); +size_t ZPlatformAddressHeapBaseShift(); + +#endif // CPU_X86_GC_Z_ZADDRESS_X86_HPP diff --git a/src/hotspot/cpu/x86/gc/z/zAddress_x86.inline.hpp b/src/hotspot/cpu/x86/gc/z/zAddress_x86.inline.hpp new file mode 100644 index 0000000000000..e0be06395946a --- /dev/null +++ b/src/hotspot/cpu/x86/gc/z/zAddress_x86.inline.hpp @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef CPU_X86_GC_Z_ZADDRESS_X86_INLINE_HPP +#define CPU_X86_GC_Z_ZADDRESS_X86_INLINE_HPP + +#include "utilities/globalDefinitions.hpp" + +inline uintptr_t ZPointer::remap_bits(uintptr_t colored) { + return colored & ZPointerRemappedMask; +} + +inline constexpr int ZPointer::load_shift_lookup(uintptr_t value) { + const size_t index = load_shift_lookup_index(value); + assert(index == 0 || is_power_of_2(index), "Incorrect load shift: " SIZE_FORMAT, index); + return ZPointerLoadShiftTable[index]; +} + +#endif // CPU_X86_GC_Z_ZADDRESS_X86_INLINE_HPP diff --git a/src/hotspot/cpu/x86/gc/z/zBarrierSetAssembler_x86.cpp b/src/hotspot/cpu/x86/gc/z/zBarrierSetAssembler_x86.cpp index 0f4e62341a572..a5600c45d4849 100644 --- a/src/hotspot/cpu/x86/gc/z/zBarrierSetAssembler_x86.cpp +++ b/src/hotspot/cpu/x86/gc/z/zBarrierSetAssembler_x86.cpp @@ -25,12 +25,15 @@ #include "asm/macroAssembler.inline.hpp" #include "code/codeBlob.hpp" #include "code/vmreg.inline.hpp" +#include "compiler/compileTask.hpp" +#include "gc/z/zAddress.hpp" #include "gc/z/zBarrier.inline.hpp" #include "gc/z/zBarrierSet.hpp" #include "gc/z/zBarrierSetAssembler.hpp" #include "gc/z/zBarrierSetRuntime.hpp" #include "gc/z/zThreadLocalData.hpp" #include "memory/resourceArea.hpp" +#include "runtime/jniHandles.hpp" #include "runtime/sharedRuntime.hpp" #include "utilities/macros.hpp" #ifdef COMPILER1 @@ -39,7 +42,9 @@ #include "gc/z/c1/zBarrierSetC1.hpp" #endif // COMPILER1 #ifdef COMPILER2 +#include "c2_intelJccErratum_x86.hpp" #include "gc/z/c2/zBarrierSetC2.hpp" +#include "opto/output.hpp" #endif // COMPILER2 #ifdef PRODUCT @@ -51,6 +56,142 @@ #undef __ #define __ masm-> +ZBarrierSetAssembler::ZBarrierSetAssembler() + : _load_bad_relocations(), + _store_bad_relocations(), + _store_good_relocations() { +} + +enum class ZXMMSpillMode { + none, + avx128, + avx256 +}; + +// Helper for saving and restoring registers across a runtime call that does +// not have any live vector registers. +class ZRuntimeCallSpill { +private: + const ZXMMSpillMode _xmm_spill_mode; + const int _xmm_size; + const int _xmm_spill_size; + MacroAssembler* _masm; + Register _result; + + void save() { + MacroAssembler* masm = _masm; + __ push(rax); + __ push(rcx); + __ push(rdx); + __ push(rdi); + __ push(rsi); + __ push(r8); + __ push(r9); + __ push(r10); + __ push(r11); + + if (_xmm_spill_size != 0) { + __ subptr(rsp, _xmm_spill_size); + if (_xmm_spill_mode == ZXMMSpillMode::avx128) { + __ movdqu(Address(rsp, _xmm_size * 7), xmm7); + __ movdqu(Address(rsp, _xmm_size * 6), xmm6); + __ movdqu(Address(rsp, _xmm_size * 5), xmm5); + __ movdqu(Address(rsp, _xmm_size * 4), xmm4); + __ movdqu(Address(rsp, _xmm_size * 3), xmm3); + __ movdqu(Address(rsp, _xmm_size * 2), xmm2); + __ movdqu(Address(rsp, _xmm_size * 1), xmm1); + __ movdqu(Address(rsp, _xmm_size * 0), xmm0); + } else { + assert(_xmm_spill_mode == ZXMMSpillMode::avx256, "AVX support ends at avx256"); + __ vmovdqu(Address(rsp, _xmm_size * 7), xmm7); + __ vmovdqu(Address(rsp, _xmm_size * 6), xmm6); + __ vmovdqu(Address(rsp, _xmm_size * 5), xmm5); + __ vmovdqu(Address(rsp, _xmm_size * 4), xmm4); + __ vmovdqu(Address(rsp, _xmm_size * 3), xmm3); + __ vmovdqu(Address(rsp, _xmm_size * 2), xmm2); + __ vmovdqu(Address(rsp, _xmm_size * 1), xmm1); + __ vmovdqu(Address(rsp, _xmm_size * 0), xmm0); + } + } + } + + void restore() { + MacroAssembler* masm = _masm; + if (_xmm_spill_size != 0) { + if (_xmm_spill_mode == ZXMMSpillMode::avx128) { + __ movdqu(xmm0, Address(rsp, _xmm_size * 0)); + __ movdqu(xmm1, Address(rsp, _xmm_size * 1)); + __ movdqu(xmm2, Address(rsp, _xmm_size * 2)); + __ movdqu(xmm3, Address(rsp, _xmm_size * 3)); + __ movdqu(xmm4, Address(rsp, _xmm_size * 4)); + __ movdqu(xmm5, Address(rsp, _xmm_size * 5)); + __ movdqu(xmm6, Address(rsp, _xmm_size * 6)); + __ movdqu(xmm7, Address(rsp, _xmm_size * 7)); + } else { + assert(_xmm_spill_mode == ZXMMSpillMode::avx256, "AVX support ends at avx256"); + __ vmovdqu(xmm0, Address(rsp, _xmm_size * 0)); + __ vmovdqu(xmm1, Address(rsp, _xmm_size * 1)); + __ vmovdqu(xmm2, Address(rsp, _xmm_size * 2)); + __ vmovdqu(xmm3, Address(rsp, _xmm_size * 3)); + __ vmovdqu(xmm4, Address(rsp, _xmm_size * 4)); + __ vmovdqu(xmm5, Address(rsp, _xmm_size * 5)); + __ vmovdqu(xmm6, Address(rsp, _xmm_size * 6)); + __ vmovdqu(xmm7, Address(rsp, _xmm_size * 7)); + } + __ addptr(rsp, _xmm_spill_size); + } + + __ pop(r11); + __ pop(r10); + __ pop(r9); + __ pop(r8); + __ pop(rsi); + __ pop(rdi); + __ pop(rdx); + __ pop(rcx); + if (_result == noreg) { + __ pop(rax); + } else if (_result == rax) { + __ addptr(rsp, wordSize); + } else { + __ movptr(_result, rax); + __ pop(rax); + } + } + + static int compute_xmm_size(ZXMMSpillMode spill_mode) { + switch (spill_mode) { + case ZXMMSpillMode::none: + return 0; + case ZXMMSpillMode::avx128: + return wordSize * 2; + case ZXMMSpillMode::avx256: + return wordSize * 4; + default: + ShouldNotReachHere(); + return 0; + } + } + +public: + ZRuntimeCallSpill(MacroAssembler* masm, Register result, ZXMMSpillMode spill_mode) + : _xmm_spill_mode(spill_mode), + _xmm_size(compute_xmm_size(spill_mode)), + _xmm_spill_size(_xmm_size * Argument::n_float_register_parameters_j), + _masm(masm), + _result(result) { + // We may end up here from generate_native_wrapper, then the method may have + // floats as arguments, and we must spill them before calling the VM runtime + // leaf. From the interpreter all floats are passed on the stack. + assert(Argument::n_float_register_parameters_j == 8, "Assumption"); + save(); + } + + ~ZRuntimeCallSpill() { + restore(); + } +}; + static void call_vm(MacroAssembler* masm, address entry_point, Register arg0, @@ -101,6 +242,7 @@ void ZBarrierSetAssembler::load_at(MacroAssembler* masm, assert_different_registers(dst, scratch); Label done; + Label uncolor; // // Fast Path @@ -112,69 +254,43 @@ void ZBarrierSetAssembler::load_at(MacroAssembler* masm, // Load oop at address __ movptr(dst, Address(scratch, 0)); + const bool on_non_strong = + (decorators & ON_WEAK_OOP_REF) != 0 || + (decorators & ON_PHANTOM_OOP_REF) != 0; + // Test address bad mask - __ testptr(dst, address_bad_mask_from_thread(r15_thread)); - __ jcc(Assembler::zero, done); + if (on_non_strong) { + __ testptr(dst, mark_bad_mask_from_thread(r15_thread)); + } else { + __ testptr(dst, load_bad_mask_from_thread(r15_thread)); + } + + __ jcc(Assembler::zero, uncolor); // // Slow path // - // Save registers - __ push(rax); - __ push(rcx); - __ push(rdx); - __ push(rdi); - __ push(rsi); - __ push(r8); - __ push(r9); - __ push(r10); - __ push(r11); - - // We may end up here from generate_native_wrapper, then the method may have - // floats as arguments, and we must spill them before calling the VM runtime - // leaf. From the interpreter all floats are passed on the stack. - assert(Argument::n_float_register_parameters_j == 8, "Assumption"); - const int xmm_size = wordSize * 2; - const int xmm_spill_size = xmm_size * Argument::n_float_register_parameters_j; - __ subptr(rsp, xmm_spill_size); - __ movdqu(Address(rsp, xmm_size * 7), xmm7); - __ movdqu(Address(rsp, xmm_size * 6), xmm6); - __ movdqu(Address(rsp, xmm_size * 5), xmm5); - __ movdqu(Address(rsp, xmm_size * 4), xmm4); - __ movdqu(Address(rsp, xmm_size * 3), xmm3); - __ movdqu(Address(rsp, xmm_size * 2), xmm2); - __ movdqu(Address(rsp, xmm_size * 1), xmm1); - __ movdqu(Address(rsp, xmm_size * 0), xmm0); + { + // Call VM + ZRuntimeCallSpill rcs(masm, dst, ZXMMSpillMode::avx128); + call_vm(masm, ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators), dst, scratch); + } + + // Slow-path has already uncolored + __ jmp(done); - // Call VM - call_vm(masm, ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators), dst, scratch); - - __ movdqu(xmm0, Address(rsp, xmm_size * 0)); - __ movdqu(xmm1, Address(rsp, xmm_size * 1)); - __ movdqu(xmm2, Address(rsp, xmm_size * 2)); - __ movdqu(xmm3, Address(rsp, xmm_size * 3)); - __ movdqu(xmm4, Address(rsp, xmm_size * 4)); - __ movdqu(xmm5, Address(rsp, xmm_size * 5)); - __ movdqu(xmm6, Address(rsp, xmm_size * 6)); - __ movdqu(xmm7, Address(rsp, xmm_size * 7)); - __ addptr(rsp, xmm_spill_size); - - __ pop(r11); - __ pop(r10); - __ pop(r9); - __ pop(r8); - __ pop(rsi); - __ pop(rdi); - __ pop(rdx); - __ pop(rcx); - - if (dst == rax) { - __ addptr(rsp, wordSize); + __ bind(uncolor); + + __ movptr(scratch, rcx); // Save rcx because shrq needs shift in rcx + __ movptr(rcx, ExternalAddress((address)&ZPointerLoadShift)); + if (dst == rcx) { + // Dst was rcx which is saved in scratch because shrq needs rcx for shift + __ shrq(scratch); } else { - __ movptr(dst, rax); - __ pop(rax); + __ shrq(dst); } + __ movptr(rcx, scratch); // restore rcx __ bind(done); @@ -186,7 +302,208 @@ void ZBarrierSetAssembler::load_at(MacroAssembler* masm, BLOCK_COMMENT("} ZBarrierSetAssembler::load_at"); } -#ifdef ASSERT +static void emit_store_fast_path_check(MacroAssembler* masm, Address ref_addr, bool is_atomic, Label& medium_path) { + if (is_atomic) { + // Atomic operations must ensure that the contents of memory are store-good before + // an atomic operation can execute. + // A not relocatable object could have spurious raw null pointers in its fields after + // getting promoted to the old generation. + __ cmpw(ref_addr, barrier_Relocation::unpatched); + __ relocate(barrier_Relocation::spec(), ZBarrierRelocationFormatStoreGoodAfterCmp); + } else { + // Stores on relocatable objects never need to deal with raw null pointers in fields. + // Raw null pointers may only exist in the young generation, as they get pruned when + // the object is relocated to old. And no pre-write barrier needs to perform any action + // in the young generation. + __ Assembler::testl(ref_addr, barrier_Relocation::unpatched); + __ relocate(barrier_Relocation::spec(), ZBarrierRelocationFormatStoreBadAfterTest); + } + __ jcc(Assembler::notEqual, medium_path); +} + +#ifdef COMPILER2 +static int store_fast_path_check_size(MacroAssembler* masm, Address ref_addr, bool is_atomic, Label& medium_path) { + if (!VM_Version::has_intel_jcc_erratum()) { + return 0; + } + int size = 0; + bool in_scratch_emit_size = masm->code_section()->scratch_emit(); + if (!in_scratch_emit_size) { + // Temporarily register as scratch buffer so that relocations don't register + masm->code_section()->set_scratch_emit(); + } + // First emit the code, to measure its size + address insts_end = masm->code_section()->end(); + // The dummy medium path label is bound after the code emission. This ensures + // full size of the generated jcc, which is what the real barrier will have + // as well, as it also binds after the emission of the barrier. + Label dummy_medium_path; + emit_store_fast_path_check(masm, ref_addr, is_atomic, dummy_medium_path); + address emitted_end = masm->code_section()->end(); + size = (int)(intptr_t)(emitted_end - insts_end); + __ bind(dummy_medium_path); + if (!in_scratch_emit_size) { + // Potentially restore scratchyness + masm->code_section()->clear_scratch_emit(); + } + // Roll back code, now that we know the size + masm->code_section()->set_end(insts_end); + return size; +} +#endif + +static void emit_store_fast_path_check_c2(MacroAssembler* masm, Address ref_addr, bool is_atomic, Label& medium_path) { +#ifdef COMPILER2 + // This is a JCC erratum mitigation wrapper for calling the inner check + int size = store_fast_path_check_size(masm, ref_addr, is_atomic, medium_path); + // Emit JCC erratum mitigation nops with the right size + IntelJccErratumAlignment(*masm, size); + // Emit the JCC erratum mitigation guarded code + emit_store_fast_path_check(masm, ref_addr, is_atomic, medium_path); +#endif +} + +static bool is_c2_compilation() { + CompileTask* task = ciEnv::current()->task(); + return task != nullptr && is_c2_compile(task->comp_level()); +} + +void ZBarrierSetAssembler::store_barrier_fast(MacroAssembler* masm, + Address ref_addr, + Register rnew_zaddress, + Register rnew_zpointer, + bool in_nmethod, + bool is_atomic, + Label& medium_path, + Label& medium_path_continuation) const { + assert_different_registers(ref_addr.base(), rnew_zpointer); + assert_different_registers(ref_addr.index(), rnew_zpointer); + assert_different_registers(rnew_zaddress, rnew_zpointer); + + if (in_nmethod) { + if (is_c2_compilation()) { + emit_store_fast_path_check_c2(masm, ref_addr, is_atomic, medium_path); + } else { + emit_store_fast_path_check(masm, ref_addr, is_atomic, medium_path); + } + __ bind(medium_path_continuation); + if (rnew_zaddress != noreg) { + // noreg means null; no need to color + __ movptr(rnew_zpointer, rnew_zaddress); + __ relocate(barrier_Relocation::spec(), ZBarrierRelocationFormatLoadGoodBeforeShl); + __ shlq(rnew_zpointer, barrier_Relocation::unpatched); + __ orq_imm32(rnew_zpointer, barrier_Relocation::unpatched); + __ relocate(barrier_Relocation::spec(), ZBarrierRelocationFormatStoreGoodAfterOr); + } + } else { + __ movzwq(rnew_zpointer, ref_addr); + __ testq(rnew_zpointer, Address(r15_thread, ZThreadLocalData::store_bad_mask_offset())); + __ jcc(Assembler::notEqual, medium_path); + __ bind(medium_path_continuation); + if (rnew_zaddress == noreg) { + __ xorptr(rnew_zpointer, rnew_zpointer); + } else { + __ movptr(rnew_zpointer, rnew_zaddress); + } + assert_different_registers(rcx, rnew_zpointer); + __ push(rcx); + __ movptr(rcx, ExternalAddress((address)&ZPointerLoadShift)); + __ shlq(rnew_zpointer); + __ pop(rcx); + __ orq(rnew_zpointer, Address(r15_thread, ZThreadLocalData::store_good_mask_offset())); + } +} + +static void store_barrier_buffer_add(MacroAssembler* masm, + Address ref_addr, + Register tmp1, + Label& slow_path) { + Address buffer(r15_thread, ZThreadLocalData::store_barrier_buffer_offset()); + + __ movptr(tmp1, buffer); + + // Combined pointer bump and check if the buffer is disabled or full + __ cmpptr(Address(tmp1, ZStoreBarrierBuffer::current_offset()), 0); + __ jcc(Assembler::equal, slow_path); + + Register tmp2 = r15_thread; + __ push(tmp2); + + // Bump the pointer + __ movq(tmp2, Address(tmp1, ZStoreBarrierBuffer::current_offset())); + __ subq(tmp2, sizeof(ZStoreBarrierEntry)); + __ movq(Address(tmp1, ZStoreBarrierBuffer::current_offset()), tmp2); + + // Compute the buffer entry address + __ lea(tmp2, Address(tmp1, tmp2, Address::times_1, ZStoreBarrierBuffer::buffer_offset())); + + // Compute and log the store address + __ lea(tmp1, ref_addr); + __ movptr(Address(tmp2, in_bytes(ZStoreBarrierEntry::p_offset())), tmp1); + + // Load and log the prev value + __ movptr(tmp1, Address(tmp1, 0)); + __ movptr(Address(tmp2, in_bytes(ZStoreBarrierEntry::prev_offset())), tmp1); + + __ pop(tmp2); +} + +void ZBarrierSetAssembler::store_barrier_medium(MacroAssembler* masm, + Address ref_addr, + Register tmp, + bool is_native, + bool is_atomic, + Label& medium_path_continuation, + Label& slow_path, + Label& slow_path_continuation) const { + assert_different_registers(ref_addr.base(), tmp); + + // The reason to end up in the medium path is that the pre-value was not 'good'. + + if (is_native) { + __ jmp(slow_path); + __ bind(slow_path_continuation); + __ jmp(medium_path_continuation); + } else if (is_atomic) { + // Atomic accesses can get to the medium fast path because the value was a + // raw null value. If it was not null, then there is no doubt we need to take a slow path. + __ cmpptr(ref_addr, 0); + __ jcc(Assembler::notEqual, slow_path); + + // If we get this far, we know there is a young raw null value in the field. + // Try to self-heal null values for atomic accesses + __ push(rax); + __ push(rbx); + __ push(rcx); + + __ lea(rcx, ref_addr); + __ xorq(rax, rax); + __ movptr(rbx, Address(r15, ZThreadLocalData::store_good_mask_offset())); + + __ lock(); + __ cmpxchgq(rbx, Address(rcx, 0)); + + __ pop(rcx); + __ pop(rbx); + __ pop(rax); + + __ jcc(Assembler::notEqual, slow_path); + + __ bind(slow_path_continuation); + __ jmp(medium_path_continuation); + } else { + // A non-atomic relocatable object won't get to the medium fast path due to a + // raw null in the young generation. We only get here because the field is bad. + // In this path we don't need any self healing, so we can avoid a runtime call + // most of the time by buffering the store barrier to be applied lazily. + store_barrier_buffer_add(masm, + ref_addr, + tmp, + slow_path); + __ bind(slow_path_continuation); + __ jmp(medium_path_continuation); + } +} void ZBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, @@ -198,27 +515,378 @@ void ZBarrierSetAssembler::store_at(MacroAssembler* masm, Register tmp3) { BLOCK_COMMENT("ZBarrierSetAssembler::store_at {"); - // Verify oop store + bool dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0; + if (is_reference_type(type)) { - // Note that src could be noreg, which means we - // are storing null and can skip verification. - if (src != noreg) { + assert_different_registers(src, tmp1, dst.base(), dst.index()); + + if (dest_uninitialized) { + assert_different_registers(rcx, tmp1); + if (src == noreg) { + __ xorq(tmp1, tmp1); + } else { + __ movptr(tmp1, src); + } + __ push(rcx); + __ movptr(rcx, ExternalAddress((address)&ZPointerLoadShift)); + __ shlq(tmp1); + __ pop(rcx); + __ orq(tmp1, Address(r15_thread, ZThreadLocalData::store_good_mask_offset())); + } else { Label done; - __ testptr(src, address_bad_mask_from_thread(r15_thread)); - __ jcc(Assembler::zero, done); - __ stop("Verify oop store failed"); - __ should_not_reach_here(); + Label medium; + Label medium_continuation; + Label slow; + Label slow_continuation; + store_barrier_fast(masm, dst, src, tmp1, false, false, medium, medium_continuation); + __ jmp(done); + __ bind(medium); + store_barrier_medium(masm, + dst, + tmp1, + false /* is_native */, + false /* is_atomic */, + medium_continuation, + slow, + slow_continuation); + + __ bind(slow); + { + // Call VM + ZRuntimeCallSpill rcs(masm, noreg, ZXMMSpillMode::avx128); + __ leaq(c_rarg0, dst); + __ MacroAssembler::call_VM_leaf(ZBarrierSetRuntime::store_barrier_on_oop_field_without_healing_addr(), c_rarg0); + } + + __ jmp(slow_continuation); __ bind(done); } + + // Store value + BarrierSetAssembler::store_at(masm, decorators, type, dst, tmp1, noreg, noreg, noreg); + } else { + BarrierSetAssembler::store_at(masm, decorators, type, dst, src, noreg, noreg, noreg); + } + + BLOCK_COMMENT("} ZBarrierSetAssembler::store_at"); +} + +bool ZBarrierSetAssembler::supports_avx3_masked_arraycopy() { + return false; +} + +static void load_arraycopy_masks(MacroAssembler* masm) { + // xmm2: load_bad_mask + // xmm3: store_bad_mask + // xmm4: store_good_mask + if (UseAVX >= 2) { + __ lea(r10, ExternalAddress((address)&ZPointerVectorLoadBadMask)); + __ vmovdqu(xmm2, Address(r10, 0)); + __ lea(r10, ExternalAddress((address)&ZPointerVectorStoreBadMask)); + __ vmovdqu(xmm3, Address(r10, 0)); + __ lea(r10, ExternalAddress((address)&ZPointerVectorStoreGoodMask)); + __ vmovdqu(xmm4, Address(r10, 0)); + } else { + __ lea(r10, ExternalAddress((address)&ZPointerVectorLoadBadMask)); + __ movdqu(xmm2, Address(r10, 0)); + __ lea(r10, ExternalAddress((address)&ZPointerVectorStoreBadMask)); + __ movdqu(xmm3, Address(r10, 0)); + __ lea(r10, ExternalAddress((address)&ZPointerVectorStoreGoodMask)); + __ movdqu(xmm4, Address(r10, 0)); } +} + +static ZXMMSpillMode compute_arraycopy_spill_mode() { + if (UseAVX >= 2) { + return ZXMMSpillMode::avx256; + } else { + return ZXMMSpillMode::avx128; + } +} + +void ZBarrierSetAssembler::copy_load_at(MacroAssembler* masm, + DecoratorSet decorators, + BasicType type, + size_t bytes, + Register dst, + Address src, + Register tmp) { + if (!is_reference_type(type)) { + BarrierSetAssembler::copy_load_at(masm, decorators, type, bytes, dst, src, tmp); + return; + } + + Label load_done; + + // Load oop at address + __ movptr(dst, src); + + // Test address bad mask + __ Assembler::testl(dst, (int32_t)(uint32_t)ZPointerLoadBadMask); + _load_bad_relocations.append(__ code_section()->end()); + __ jcc(Assembler::zero, load_done); + + { + // Call VM + ZRuntimeCallSpill rcs(masm, dst, compute_arraycopy_spill_mode()); + __ leaq(c_rarg1, src); + call_vm(masm, ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_store_good_addr(), dst, c_rarg1); + } + + __ bind(load_done); + + // Remove metadata bits so that the store side (vectorized or non-vectorized) can + // inject the store-good color with an or instruction. + __ andq(dst, _zpointer_address_mask); + + if ((decorators & ARRAYCOPY_CHECKCAST) != 0) { + // The checkcast arraycopy needs to be able to dereference the oops in order to perform a typechecks. + assert(tmp != rcx, "Surprising choice of temp register"); + __ movptr(tmp, rcx); + __ movptr(rcx, ExternalAddress((address)&ZPointerLoadShift)); + __ shrq(dst); + __ movptr(rcx, tmp); + } +} + +void ZBarrierSetAssembler::copy_store_at(MacroAssembler* masm, + DecoratorSet decorators, + BasicType type, + size_t bytes, + Address dst, + Register src, + Register tmp) { + if (!is_reference_type(type)) { + BarrierSetAssembler::copy_store_at(masm, decorators, type, bytes, dst, src, tmp); + return; + } + + bool dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0; + + if (!dest_uninitialized) { + Label store; + Label store_bad; + __ Assembler::testl(dst, (int32_t)(uint32_t)ZPointerStoreBadMask); + _store_bad_relocations.append(__ code_section()->end()); + __ jcc(Assembler::zero, store); + + store_barrier_buffer_add(masm, dst, tmp, store_bad); + __ jmp(store); + + __ bind(store_bad); + { + // Call VM + ZRuntimeCallSpill rcs(masm, noreg, compute_arraycopy_spill_mode()); + __ leaq(c_rarg0, dst); + __ MacroAssembler::call_VM_leaf(ZBarrierSetRuntime::store_barrier_on_oop_field_without_healing_addr(), c_rarg0); + } + + __ bind(store); + } + + if ((decorators & ARRAYCOPY_CHECKCAST) != 0) { + assert(tmp != rcx, "Surprising choice of temp register"); + __ movptr(tmp, rcx); + __ movptr(rcx, ExternalAddress((address)&ZPointerLoadShift)); + __ shlq(src); + __ movptr(rcx, tmp); + } + + // Color + __ orq_imm32(src, (int32_t)(uint32_t)ZPointerStoreGoodMask); + _store_good_relocations.append(__ code_section()->end()); // Store value - BarrierSetAssembler::store_at(masm, decorators, type, dst, src, tmp1, tmp2, tmp3); + __ movptr(dst, src); +} - BLOCK_COMMENT("} ZBarrierSetAssembler::store_at"); +void ZBarrierSetAssembler::copy_load_at(MacroAssembler* masm, + DecoratorSet decorators, + BasicType type, + size_t bytes, + XMMRegister dst, + Address src, + Register tmp, + XMMRegister xmm_tmp) { + if (!is_reference_type(type)) { + BarrierSetAssembler::copy_load_at(masm, decorators, type, bytes, dst, src, tmp, xmm_tmp); + return; + } + Address src0(src.base(), src.index(), src.scale(), src.disp() + 0); + Address src1(src.base(), src.index(), src.scale(), src.disp() + 8); + Address src2(src.base(), src.index(), src.scale(), src.disp() + 16); + Address src3(src.base(), src.index(), src.scale(), src.disp() + 24); + + // Registers set up in the prologue: + // xmm2: load_bad_mask + // xmm3: store_bad_mask + // xmm4: store_good_mask + + if (bytes == 16) { + Label done; + Label fallback; + + if (UseAVX >= 1) { + // Load source vector + __ movdqu(dst, src); + // Check source load-good + __ movdqu(xmm_tmp, dst); + __ ptest(xmm_tmp, xmm2); + __ jcc(Assembler::notZero, fallback); + + // Remove bad metadata bits + __ vpandn(dst, xmm3, dst, Assembler::AVX_128bit); + __ jmp(done); + } + + __ bind(fallback); + + __ subptr(rsp, wordSize * 2); + + ZBarrierSetAssembler::copy_load_at(masm, decorators, type, 8, tmp, src0, noreg); + __ movq(Address(rsp, 0), tmp); + ZBarrierSetAssembler::copy_load_at(masm, decorators, type, 8, tmp, src1, noreg); + __ movq(Address(rsp, 8), tmp); + + __ movdqu(dst, Address(rsp, 0)); + __ addptr(rsp, wordSize * 2); + + __ bind(done); + } else if (bytes == 32) { + Label done; + Label fallback; + assert(UseAVX >= 2, "Assume that UseAVX >= 2"); + + // Load source vector + __ vmovdqu(dst, src); + // Check source load-good + __ vmovdqu(xmm_tmp, dst); + __ vptest(xmm_tmp, xmm2, Assembler::AVX_256bit); + __ jcc(Assembler::notZero, fallback); + + // Remove bad metadata bits so that the store can colour the pointers with an or instruction. + // This makes the fast path and slow path formats look the same, in the sense that they don't + // have any of the store bad bits. + __ vpandn(dst, xmm3, dst, Assembler::AVX_256bit); + __ jmp(done); + + __ bind(fallback); + + __ subptr(rsp, wordSize * 4); + + ZBarrierSetAssembler::copy_load_at(masm, decorators, type, 8, tmp, src0, noreg); + __ movq(Address(rsp, 0), tmp); + ZBarrierSetAssembler::copy_load_at(masm, decorators, type, 8, tmp, src1, noreg); + __ movq(Address(rsp, 8), tmp); + ZBarrierSetAssembler::copy_load_at(masm, decorators, type, 8, tmp, src2, noreg); + __ movq(Address(rsp, 16), tmp); + ZBarrierSetAssembler::copy_load_at(masm, decorators, type, 8, tmp, src3, noreg); + __ movq(Address(rsp, 24), tmp); + + __ vmovdqu(dst, Address(rsp, 0)); + __ addptr(rsp, wordSize * 4); + + __ bind(done); + } } -#endif // ASSERT +void ZBarrierSetAssembler::copy_store_at(MacroAssembler* masm, + DecoratorSet decorators, + BasicType type, + size_t bytes, + Address dst, + XMMRegister src, + Register tmp1, + Register tmp2, + XMMRegister xmm_tmp) { + if (!is_reference_type(type)) { + BarrierSetAssembler::copy_store_at(masm, decorators, type, bytes, dst, src, tmp1, tmp2, xmm_tmp); + return; + } + Address dst0(dst.base(), dst.index(), dst.scale(), dst.disp() + 0); + Address dst1(dst.base(), dst.index(), dst.scale(), dst.disp() + 8); + Address dst2(dst.base(), dst.index(), dst.scale(), dst.disp() + 16); + Address dst3(dst.base(), dst.index(), dst.scale(), dst.disp() + 24); + + bool dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0; + + // Registers set up in the prologue: + // xmm2: load_bad_mask + // xmm3: store_bad_mask + // xmm4: store_good_mask + + if (bytes == 16) { + Label done; + Label fallback; + + if (UseAVX >= 1) { + if (!dest_uninitialized) { + // Load destination vector + __ movdqu(xmm_tmp, dst); + // Check destination store-good + __ ptest(xmm_tmp, xmm3); + __ jcc(Assembler::notZero, fallback); + } + + // Color source + __ por(src, xmm4); + // Store source in destination + __ movdqu(dst, src); + __ jmp(done); + } + + __ bind(fallback); + + __ subptr(rsp, wordSize * 2); + __ movdqu(Address(rsp, 0), src); + + __ movq(tmp1, Address(rsp, 0)); + ZBarrierSetAssembler::copy_store_at(masm, decorators, type, 8, dst0, tmp1, tmp2); + __ movq(tmp1, Address(rsp, 8)); + ZBarrierSetAssembler::copy_store_at(masm, decorators, type, 8, dst1, tmp1, tmp2); + + __ addptr(rsp, wordSize * 2); + + __ bind(done); + } else if (bytes == 32) { + Label done; + Label fallback; + assert(UseAVX >= 2, "Assume UseAVX >= 2"); + + if (!dest_uninitialized) { + // Load destination vector + __ vmovdqu(xmm_tmp, dst); + // Check destination store-good + __ vptest(xmm_tmp, xmm3, Assembler::AVX_256bit); + __ jcc(Assembler::notZero, fallback); + } + + // Color source + __ vpor(src, src, xmm4, Assembler::AVX_256bit); + + // Store colored source in destination + __ vmovdqu(dst, src); + __ jmp(done); + + __ bind(fallback); + + __ subptr(rsp, wordSize * 4); + __ vmovdqu(Address(rsp, 0), src); + + __ movq(tmp1, Address(rsp, 0)); + ZBarrierSetAssembler::copy_store_at(masm, decorators, type, 8, dst0, tmp1, tmp2); + __ movq(tmp1, Address(rsp, 8)); + ZBarrierSetAssembler::copy_store_at(masm, decorators, type, 8, dst1, tmp1, tmp2); + __ movq(tmp1, Address(rsp, 16)); + ZBarrierSetAssembler::copy_store_at(masm, decorators, type, 8, dst2, tmp1, tmp2); + __ movq(tmp1, Address(rsp, 24)); + ZBarrierSetAssembler::copy_store_at(masm, decorators, type, 8, dst3, tmp1, tmp2); + + __ addptr(rsp, wordSize * 4); + + __ bind(done); + } +} void ZBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, @@ -233,14 +901,7 @@ void ZBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, BLOCK_COMMENT("ZBarrierSetAssembler::arraycopy_prologue {"); - // Save registers - __ pusha(); - - // Call VM - call_vm(masm, ZBarrierSetRuntime::load_barrier_on_oop_array_addr(), src, count); - - // Restore registers - __ popa(); + load_arraycopy_masks(masm); BLOCK_COMMENT("} ZBarrierSetAssembler::arraycopy_prologue"); } @@ -252,12 +913,51 @@ void ZBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, Label& slowpath) { BLOCK_COMMENT("ZBarrierSetAssembler::try_resolve_jobject_in_native {"); - // Resolve jobject - BarrierSetAssembler::try_resolve_jobject_in_native(masm, jni_env, obj, tmp, slowpath); + Label done, tagged, weak_tagged, uncolor; - // Test address bad mask - __ testptr(obj, address_bad_mask_from_jni_env(jni_env)); + // Test for tag + __ testptr(obj, JNIHandles::tag_mask); + __ jcc(Assembler::notZero, tagged); + + // Resolve local handle + __ movptr(obj, Address(obj, 0)); + __ jmp(done); + + __ bind(tagged); + + // Test for weak tag + __ testptr(obj, JNIHandles::TypeTag::weak_global); + __ jcc(Assembler::notZero, weak_tagged); + + // Resolve global handle + __ movptr(obj, Address(obj, -JNIHandles::TypeTag::global)); + __ testptr(obj, load_bad_mask_from_jni_env(jni_env)); __ jcc(Assembler::notZero, slowpath); + __ jmp(uncolor); + + __ bind(weak_tagged); + + // Resolve weak handle + __ movptr(obj, Address(obj, -JNIHandles::TypeTag::weak_global)); + __ testptr(obj, mark_bad_mask_from_jni_env(jni_env)); + __ jcc(Assembler::notZero, slowpath); + + __ bind(uncolor); + + // Uncolor + if (obj == rcx) { + __ movptr(tmp, obj); + __ movptr(rcx, ExternalAddress((address)&ZPointerLoadShift)); + __ shrq(tmp); + __ movptr(obj, tmp); + } else { + __ push(rcx); + __ movptr(rcx, ExternalAddress((address)&ZPointerLoadShift)); + __ shrq(obj); + __ pop(rcx); + } + + __ bind(done); BLOCK_COMMENT("} ZBarrierSetAssembler::try_resolve_jobject_in_native"); } @@ -267,9 +967,45 @@ void ZBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, #undef __ #define __ ce->masm()-> -void ZBarrierSetAssembler::generate_c1_load_barrier_test(LIR_Assembler* ce, - LIR_Opr ref) const { - __ testptr(ref->as_register(), address_bad_mask_from_thread(r15_thread)); +static void z_uncolor(LIR_Assembler* ce, LIR_Opr ref) { + __ relocate(barrier_Relocation::spec(), ZBarrierRelocationFormatLoadGoodBeforeShl); + __ shrq(ref->as_register(), barrier_Relocation::unpatched); +} + +static void z_color(LIR_Assembler* ce, LIR_Opr ref) { + __ relocate(barrier_Relocation::spec(), ZBarrierRelocationFormatLoadGoodBeforeShl); + __ shlq(ref->as_register(), barrier_Relocation::unpatched); + __ orq_imm32(ref->as_register(), barrier_Relocation::unpatched); + __ relocate(barrier_Relocation::spec(), ZBarrierRelocationFormatStoreGoodAfterOr); +} + +void ZBarrierSetAssembler::generate_c1_uncolor(LIR_Assembler* ce, LIR_Opr ref) const { + z_uncolor(ce, ref); +} + +void ZBarrierSetAssembler::generate_c1_color(LIR_Assembler* ce, LIR_Opr ref) const { + z_color(ce, ref); +} + +void ZBarrierSetAssembler::generate_c1_load_barrier(LIR_Assembler* ce, + LIR_Opr ref, + ZLoadBarrierStubC1* stub, + bool on_non_strong) const { + if (on_non_strong) { + // Test against MarkBad mask + __ Assembler::testl(ref->as_register(), barrier_Relocation::unpatched); + __ relocate(barrier_Relocation::spec(), ZBarrierRelocationFormatMarkBadAfterTest); + + // Slow path if not zero + __ jcc(Assembler::notZero, *stub->entry()); + // Fast path: convert to colorless + z_uncolor(ce, ref); + } else { + // Convert to colorless and fast path test + z_uncolor(ce, ref); + __ jcc(Assembler::above, *stub->entry()); + } + __ bind(*stub->continuation()); } void ZBarrierSetAssembler::generate_c1_load_barrier_stub(LIR_Assembler* ce, @@ -281,6 +1017,9 @@ void ZBarrierSetAssembler::generate_c1_load_barrier_stub(LIR_Assembler* ce, Register ref_addr = noreg; Register tmp = noreg; + // The fast-path shift destroyed the oop - need to re-read it + __ movptr(ref, ce->as_Address(stub->ref_addr()->as_address_ptr())); + if (stub->tmp()->is_valid()) { // Load address into tmp register ce->leal(stub->ref_addr(), stub->tmp()); @@ -321,6 +1060,55 @@ void ZBarrierSetAssembler::generate_c1_load_barrier_stub(LIR_Assembler* ce, __ jmp(*stub->continuation()); } +void ZBarrierSetAssembler::generate_c1_store_barrier(LIR_Assembler* ce, + LIR_Address* addr, + LIR_Opr new_zaddress, + LIR_Opr new_zpointer, + ZStoreBarrierStubC1* stub) const { + Register rnew_zaddress = new_zaddress->as_register(); + Register rnew_zpointer = new_zpointer->as_register(); + + Register rbase = addr->base()->as_pointer_register(); + store_barrier_fast(ce->masm(), + ce->as_Address(addr), + rnew_zaddress, + rnew_zpointer, + true, + stub->is_atomic(), + *stub->entry(), + *stub->continuation()); +} + +void ZBarrierSetAssembler::generate_c1_store_barrier_stub(LIR_Assembler* ce, + ZStoreBarrierStubC1* stub) const { + // Stub entry + __ bind(*stub->entry()); + + Label slow; + Label slow_continuation; + store_barrier_medium(ce->masm(), + ce->as_Address(stub->ref_addr()->as_address_ptr()), + rscratch1, + false /* is_native */, + stub->is_atomic(), + *stub->continuation(), + slow, + slow_continuation); + + __ bind(slow); + + ce->leal(stub->ref_addr(), stub->new_zpointer()); + + // Setup arguments and call runtime stub + __ subptr(rsp, 2 * BytesPerWord); + ce->store_parameter(stub->new_zpointer()->as_pointer_register(), 0); + __ call(RuntimeAddress(stub->runtime_stub())); + __ addptr(rsp, 2 * BytesPerWord); + + // Stub exit + __ jmp(slow_continuation); +} + #undef __ #define __ sasm-> @@ -343,6 +1131,28 @@ void ZBarrierSetAssembler::generate_c1_load_barrier_runtime_stub(StubAssembler* __ ret(0); } +void ZBarrierSetAssembler::generate_c1_store_barrier_runtime_stub(StubAssembler* sasm, + bool self_healing) const { + // Enter and save registers + __ enter(); + __ save_live_registers_no_oop_map(true /* save_fpu_registers */); + + // Setup arguments + __ load_parameter(0, c_rarg0); + + // Call VM + if (self_healing) { + __ call_VM_leaf(ZBarrierSetRuntime::store_barrier_on_oop_field_with_healing_addr(), c_rarg0); + } else { + __ call_VM_leaf(ZBarrierSetRuntime::store_barrier_on_oop_field_without_healing_addr(), c_rarg0); + } + + // Restore registers and return + __ restore_live_registers(true /* restore_fpu_registers */); + __ leave(); + __ ret(0); +} + #endif // COMPILER1 #ifdef COMPILER2 @@ -467,7 +1277,7 @@ class ZSaveLiveRegisters { _spill_offset += 8; } - void initialize(ZLoadBarrierStubC2* stub) { + void initialize(ZBarrierStubC2* stub) { // Create mask of caller saved registers that need to // be saved/restored if live RegMask caller_saved; @@ -480,13 +1290,13 @@ class ZSaveLiveRegisters { caller_saved.Insert(OptoReg::as_OptoReg(r9->as_VMReg())); caller_saved.Insert(OptoReg::as_OptoReg(r10->as_VMReg())); caller_saved.Insert(OptoReg::as_OptoReg(r11->as_VMReg())); - caller_saved.Remove(OptoReg::as_OptoReg(stub->ref()->as_VMReg())); + + if (stub->result() != noreg) { + caller_saved.Remove(OptoReg::as_OptoReg(stub->result()->as_VMReg())); + } // Create mask of live registers RegMask live = stub->live(); - if (stub->tmp() != noreg) { - live.Insert(OptoReg::as_OptoReg(stub->tmp()->as_VMReg())); - } int gp_spill_size = 0; int opmask_spill_size = 0; @@ -544,7 +1354,7 @@ class ZSaveLiveRegisters { } public: - ZSaveLiveRegisters(MacroAssembler* masm, ZLoadBarrierStubC2* stub) : + ZSaveLiveRegisters(MacroAssembler* masm, ZBarrierStubC2* stub) : _masm(masm), _gp_registers(), _opmask_registers(), @@ -683,11 +1493,15 @@ class ZSetupArguments { #define __ masm-> void ZBarrierSetAssembler::generate_c2_load_barrier_stub(MacroAssembler* masm, ZLoadBarrierStubC2* stub) const { + Assembler::InlineSkippedInstructionsCounter skipped_counter(masm); BLOCK_COMMENT("ZLoadBarrierStubC2"); // Stub entry __ bind(*stub->entry()); + // The fast-path shift destroyed the oop - need to re-read it + __ movptr(stub->ref(), stub->ref_addr()); + { ZSaveLiveRegisters save_live_registers(masm, stub); ZSetupArguments setup_arguments(masm, stub); @@ -698,16 +1512,187 @@ void ZBarrierSetAssembler::generate_c2_load_barrier_stub(MacroAssembler* masm, Z __ jmp(*stub->continuation()); } +void ZBarrierSetAssembler::generate_c2_store_barrier_stub(MacroAssembler* masm, ZStoreBarrierStubC2* stub) const { + Assembler::InlineSkippedInstructionsCounter skipped_counter(masm); + BLOCK_COMMENT("ZStoreBarrierStubC2"); + + // Stub entry + __ bind(*stub->entry()); + + Label slow; + Label slow_continuation; + store_barrier_medium(masm, + stub->ref_addr(), + stub->new_zpointer(), + stub->is_native(), + stub->is_atomic(), + *stub->continuation(), + slow, + slow_continuation); + + __ bind(slow); + + { + ZSaveLiveRegisters save_live_registers(masm, stub); + __ lea(c_rarg0, stub->ref_addr()); + + if (stub->is_native()) { + __ call(RuntimeAddress(ZBarrierSetRuntime::store_barrier_on_native_oop_field_without_healing_addr())); + } else if (stub->is_atomic()) { + __ call(RuntimeAddress(ZBarrierSetRuntime::store_barrier_on_oop_field_with_healing_addr())); + } else { + __ call(RuntimeAddress(ZBarrierSetRuntime::store_barrier_on_oop_field_without_healing_addr())); + } + } + + // Stub exit + __ jmp(slow_continuation); +} + +#undef __ #endif // COMPILER2 +static int patch_barrier_relocation_offset(int format) { + switch (format) { + case ZBarrierRelocationFormatLoadGoodBeforeShl: + return 3; + + case ZBarrierRelocationFormatStoreGoodAfterCmp: + return -2; + + case ZBarrierRelocationFormatLoadBadAfterTest: + case ZBarrierRelocationFormatMarkBadAfterTest: + case ZBarrierRelocationFormatStoreBadAfterTest: + case ZBarrierRelocationFormatStoreGoodAfterOr: + return -4; + case ZBarrierRelocationFormatStoreGoodAfterMov: + return -3; + + default: + ShouldNotReachHere(); + return 0; + } +} + +static uint16_t patch_barrier_relocation_value(int format) { + switch (format) { + case ZBarrierRelocationFormatLoadGoodBeforeShl: + return (uint16_t)ZPointerLoadShift; + + case ZBarrierRelocationFormatMarkBadAfterTest: + return (uint16_t)ZPointerMarkBadMask; + + case ZBarrierRelocationFormatLoadBadAfterTest: + return (uint16_t)ZPointerLoadBadMask; + + case ZBarrierRelocationFormatStoreGoodAfterCmp: + case ZBarrierRelocationFormatStoreGoodAfterOr: + case ZBarrierRelocationFormatStoreGoodAfterMov: + return (uint16_t)ZPointerStoreGoodMask; + + case ZBarrierRelocationFormatStoreBadAfterTest: + return (uint16_t)ZPointerStoreBadMask; + + default: + ShouldNotReachHere(); + return 0; + } +} + +void ZBarrierSetAssembler::patch_barrier_relocation(address addr, int format) { + const int offset = patch_barrier_relocation_offset(format); + const uint16_t value = patch_barrier_relocation_value(format); + uint8_t* const patch_addr = (uint8_t*)addr + offset; + if (format == ZBarrierRelocationFormatLoadGoodBeforeShl) { + *patch_addr = (uint8_t)value; + } else { + *(uint16_t*)patch_addr = value; + } +} + +void ZBarrierSetAssembler::patch_barriers() { + for (int i = 0; i < _load_bad_relocations.length(); ++i) { + address addr = _load_bad_relocations.at(i); + patch_barrier_relocation(addr, ZBarrierRelocationFormatLoadBadAfterTest); + } + for (int i = 0; i < _store_bad_relocations.length(); ++i) { + address addr = _store_bad_relocations.at(i); + patch_barrier_relocation(addr, ZBarrierRelocationFormatStoreBadAfterTest); + } + for (int i = 0; i < _store_good_relocations.length(); ++i) { + address addr = _store_good_relocations.at(i); + patch_barrier_relocation(addr, ZBarrierRelocationFormatStoreGoodAfterOr); + } +} + + #undef __ #define __ masm-> + void ZBarrierSetAssembler::check_oop(MacroAssembler* masm, Register obj, Register tmp1, Register tmp2, Label& error) { - // Check if metadata bits indicate a bad oop - __ testptr(obj, Address(r15_thread, ZThreadLocalData::address_bad_mask_offset())); + // C1 calls verfy_oop in the middle of barriers, before they have been uncolored + // and after being colored. Therefore, we must deal with colored oops as well. + Label done; + Label check_oop; + Label check_zaddress; + int color_bits = ZPointerRemappedShift + ZPointerRemappedBits; + + uintptr_t shifted_base_start_mask = (UCONST64(1) << (ZAddressHeapBaseShift + color_bits + 1)) - 1; + uintptr_t shifted_base_end_mask = (UCONST64(1) << (ZAddressHeapBaseShift + 1)) - 1; + uintptr_t shifted_base_mask = shifted_base_start_mask ^ shifted_base_end_mask; + + uintptr_t shifted_address_end_mask = (UCONST64(1) << (color_bits + 1)) - 1; + uintptr_t shifted_address_mask = shifted_address_end_mask ^ (uintptr_t)CONST64(-1); + + // Check colored null + __ mov64(tmp1, shifted_address_mask); + __ testptr(tmp1, obj); + __ jcc(Assembler::zero, done); + + // Check for zpointer + __ mov64(tmp1, shifted_base_mask); + __ testptr(tmp1, obj); + __ jcc(Assembler::zero, check_oop); + + // Lookup shift + __ movq(tmp1, obj); + __ mov64(tmp2, shifted_address_end_mask); + __ andq(tmp1, tmp2); + __ shrq(tmp1, ZPointerRemappedShift); + __ andq(tmp1, (1 << ZPointerRemappedBits) - 1); + __ lea(tmp2, ExternalAddress((address)&ZPointerLoadShiftTable)); + + // Uncolor presumed zpointer + assert(obj != rcx, "bad choice of register"); + if (rcx != tmp1 && rcx != tmp2) { + __ push(rcx); + } + __ movl(rcx, Address(tmp2, tmp1, Address::times_4, 0)); + __ shrq(obj); + if (rcx != tmp1 && rcx != tmp2) { + __ pop(rcx); + } + + __ jmp(check_zaddress); + + __ bind(check_oop); + + // make sure klass is 'reasonable', which is not zero. + __ load_klass(tmp1, obj, tmp2); // get klass + __ testptr(tmp1, tmp1); + __ jcc(Assembler::zero, error); // if klass is null it is broken + + __ bind(check_zaddress); + // Check if the oop is in the right area of memory + __ movptr(tmp1, obj); + __ movptr(tmp2, (intptr_t) Universe::verify_oop_mask()); + __ andptr(tmp1, tmp2); + __ movptr(tmp2, (intptr_t) Universe::verify_oop_bits()); + __ cmpptr(tmp1, tmp2); __ jcc(Assembler::notZero, error); - BarrierSetAssembler::check_oop(masm, obj, tmp1, tmp2, error); + + __ bind(done); } #undef __ diff --git a/src/hotspot/cpu/x86/gc/z/zBarrierSetAssembler_x86.hpp b/src/hotspot/cpu/x86/gc/z/zBarrierSetAssembler_x86.hpp index 2c5887361eb31..752cff0125d21 100644 --- a/src/hotspot/cpu/x86/gc/z/zBarrierSetAssembler_x86.hpp +++ b/src/hotspot/cpu/x86/gc/z/zBarrierSetAssembler_x86.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -33,19 +33,41 @@ class MacroAssembler; #ifdef COMPILER1 +class CodeStub; +class LIR_Address; class LIR_Assembler; class LIR_Opr; class StubAssembler; class ZLoadBarrierStubC1; +class ZStoreBarrierStubC1; #endif // COMPILER1 #ifdef COMPILER2 +class MachNode; class Node; class ZLoadBarrierStubC2; +class ZStoreBarrierStubC2; #endif // COMPILER2 +const int ZBarrierRelocationFormatLoadGoodBeforeShl = 0; +const int ZBarrierRelocationFormatLoadBadAfterTest = 1; +const int ZBarrierRelocationFormatMarkBadAfterTest = 2; +const int ZBarrierRelocationFormatStoreGoodAfterCmp = 3; +const int ZBarrierRelocationFormatStoreBadAfterTest = 4; +const int ZBarrierRelocationFormatStoreGoodAfterOr = 5; +const int ZBarrierRelocationFormatStoreGoodAfterMov = 6; + class ZBarrierSetAssembler : public ZBarrierSetAssemblerBase { +private: + GrowableArrayCHeap _load_bad_relocations; + GrowableArrayCHeap _store_bad_relocations; + GrowableArrayCHeap _store_good_relocations; + public: + static const int32_t _zpointer_address_mask = 0xFFFF0000; + + ZBarrierSetAssembler(); + virtual void load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, @@ -54,7 +76,6 @@ class ZBarrierSetAssembler : public ZBarrierSetAssemblerBase { Register tmp1, Register tmp_thread); -#ifdef ASSERT virtual void store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, @@ -63,7 +84,43 @@ class ZBarrierSetAssembler : public ZBarrierSetAssemblerBase { Register tmp1, Register tmp2, Register tmp3); -#endif // ASSERT + + virtual bool supports_avx3_masked_arraycopy(); + + virtual void copy_load_at(MacroAssembler* masm, + DecoratorSet decorators, + BasicType type, + size_t bytes, + Register dst, + Address src, + Register tmp); + + virtual void copy_store_at(MacroAssembler* masm, + DecoratorSet decorators, + BasicType type, + size_t bytes, + Address dst, + Register src, + Register tmp); + + virtual void copy_load_at(MacroAssembler* masm, + DecoratorSet decorators, + BasicType type, + size_t bytes, + XMMRegister dst, + Address src, + Register tmp, + XMMRegister xmm_tmp); + + virtual void copy_store_at(MacroAssembler* masm, + DecoratorSet decorators, + BasicType type, + size_t bytes, + Address dst, + XMMRegister src, + Register tmp1, + Register tmp2, + XMMRegister xmm_tmp); virtual void arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, @@ -79,8 +136,25 @@ class ZBarrierSetAssembler : public ZBarrierSetAssemblerBase { Label& slowpath); #ifdef COMPILER1 - void generate_c1_load_barrier_test(LIR_Assembler* ce, - LIR_Opr ref) const; + void generate_c1_color(LIR_Assembler* ce, LIR_Opr ref) const; + void generate_c1_uncolor(LIR_Assembler* ce, LIR_Opr ref) const; + + void generate_c1_store_barrier(LIR_Assembler* ce, + LIR_Address* addr, + LIR_Opr new_zaddress, + LIR_Opr new_zpointer, + ZStoreBarrierStubC1* stub) const; + + void generate_c1_store_barrier_stub(LIR_Assembler* ce, + ZStoreBarrierStubC1* stub) const; + + void generate_c1_store_barrier_runtime_stub(StubAssembler* sasm, + bool self_healing) const; + + void generate_c1_load_barrier(LIR_Assembler* ce, + LIR_Opr ref, + ZLoadBarrierStubC1* stub, + bool on_non_strong) const; void generate_c1_load_barrier_stub(LIR_Assembler* ce, ZLoadBarrierStubC1* stub) const; @@ -95,8 +169,32 @@ class ZBarrierSetAssembler : public ZBarrierSetAssemblerBase { void generate_c2_load_barrier_stub(MacroAssembler* masm, ZLoadBarrierStubC2* stub) const; + void generate_c2_store_barrier_stub(MacroAssembler* masm, + ZStoreBarrierStubC2* stub) const; #endif // COMPILER2 + void store_barrier_fast(MacroAssembler* masm, + Address ref_addr, + Register rnew_persistent, + Register rnew_transient, + bool in_nmethod, + bool is_atomic, + Label& medium_path, + Label& medium_path_continuation) const; + + void store_barrier_medium(MacroAssembler* masm, + Address ref_addr, + Register tmp, + bool is_native, + bool is_atomic, + Label& medium_path_continuation, + Label& slow_path, + Label& slow_path_continuation) const; + + void patch_barrier_relocation(address addr, int format); + + void patch_barriers(); + void check_oop(MacroAssembler* masm, Register obj, Register tmp1, Register tmp2, Label& error); }; diff --git a/src/hotspot/cpu/x86/gc/z/zGlobals_x86.hpp b/src/hotspot/cpu/x86/gc/z/zGlobals_x86.hpp index 51b29b2a32cbf..5ee5aef9baf59 100644 --- a/src/hotspot/cpu/x86/gc/z/zGlobals_x86.hpp +++ b/src/hotspot/cpu/x86/gc/z/zGlobals_x86.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,10 +24,6 @@ #ifndef CPU_X86_GC_Z_ZGLOBALS_X86_HPP #define CPU_X86_GC_Z_ZGLOBALS_X86_HPP -const size_t ZPlatformHeapViews = 3; const size_t ZPlatformCacheLineSize = 64; -size_t ZPlatformAddressOffsetBits(); -size_t ZPlatformAddressMetadataShift(); - #endif // CPU_X86_GC_Z_ZGLOBALS_X86_HPP diff --git a/src/hotspot/cpu/x86/gc/z/z_x86_64.ad b/src/hotspot/cpu/x86/gc/z/z_x86_64.ad index 44f3f221faef1..adb0f3ab8ebf0 100644 --- a/src/hotspot/cpu/x86/gc/z/z_x86_64.ad +++ b/src/hotspot/cpu/x86/gc/z/z_x86_64.ad @@ -1,5 +1,5 @@ // -// Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved. +// Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. // // This code is free software; you can redistribute it and/or modify it @@ -32,52 +32,66 @@ source_hpp %{ source %{ #include "c2_intelJccErratum_x86.hpp" +#include "gc/z/zBarrierSetAssembler.hpp" -static void z_load_barrier(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref, Register tmp, uint8_t barrier_data) { - if (barrier_data == ZLoadBarrierElided) { - return; - } - ZLoadBarrierStubC2* const stub = ZLoadBarrierStubC2::create(node, ref_addr, ref, tmp, barrier_data); - { - IntelJccErratumAlignment intel_alignment(_masm, 10 /* jcc_size */); - __ testptr(ref, Address(r15_thread, ZThreadLocalData::address_bad_mask_offset())); - __ jcc(Assembler::notZero, *stub->entry()); - } - __ bind(*stub->continuation()); +static void z_color(MacroAssembler& _masm, const MachNode* node, Register ref) { + __ relocate(barrier_Relocation::spec(), ZBarrierRelocationFormatLoadGoodBeforeShl); + __ shlq(ref, barrier_Relocation::unpatched); + __ orq_imm32(ref, barrier_Relocation::unpatched); + __ relocate(barrier_Relocation::spec(), ZBarrierRelocationFormatStoreGoodAfterOr); } -static void z_load_barrier_cmpxchg(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref, Register tmp, Label& good) { - ZLoadBarrierStubC2* const stub = ZLoadBarrierStubC2::create(node, ref_addr, ref, tmp, ZLoadBarrierStrong); - { - IntelJccErratumAlignment intel_alignment(_masm, 10 /* jcc_size */); - __ testptr(ref, Address(r15_thread, ZThreadLocalData::address_bad_mask_offset())); - __ jcc(Assembler::zero, good); - } - { - IntelJccErratumAlignment intel_alignment(_masm, 5 /* jcc_size */); - __ jmp(*stub->entry()); - } +static void z_uncolor(MacroAssembler& _masm, const MachNode* node, Register ref) { + __ relocate(barrier_Relocation::spec(), ZBarrierRelocationFormatLoadGoodBeforeShl); + __ shrq(ref, barrier_Relocation::unpatched); +} + +static void z_keep_alive_load_barrier(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref) { + __ Assembler::testl(ref, barrier_Relocation::unpatched); + __ relocate(barrier_Relocation::spec(), ZBarrierRelocationFormatMarkBadAfterTest); + + ZLoadBarrierStubC2* const stub = ZLoadBarrierStubC2::create(node, ref_addr, ref); + __ jcc(Assembler::notEqual, *stub->entry()); + + z_uncolor(_masm, node, ref); + __ bind(*stub->continuation()); } -static void z_cmpxchg_common(MacroAssembler& _masm, const MachNode* node, Register mem_reg, Register newval, Register tmp) { - // Compare value (oldval) is in rax - const Address mem = Address(mem_reg, 0); +static void z_load_barrier(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref) { + Assembler::InlineSkippedInstructionsCounter skipped_counter(&_masm); + const bool on_non_strong = + ((node->barrier_data() & ZBarrierWeak) != 0) || + ((node->barrier_data() & ZBarrierPhantom) != 0); - if (node->barrier_data() != ZLoadBarrierElided) { - __ movptr(tmp, rax); + if (on_non_strong) { + z_keep_alive_load_barrier(_masm, node, ref_addr, ref); + return; } - __ lock(); - __ cmpxchgptr(newval, mem); + z_uncolor(_masm, node, ref); + if (node->barrier_data() == ZBarrierElided) { + return; + } + ZLoadBarrierStubC2* const stub = ZLoadBarrierStubC2::create(node, ref_addr, ref); + IntelJccErratumAlignment(_masm, 6); + __ jcc(Assembler::above, *stub->entry()); + __ bind(*stub->continuation()); +} - if (node->barrier_data() != ZLoadBarrierElided) { - Label good; - z_load_barrier_cmpxchg(_masm, node, mem, rax, tmp, good); - __ movptr(rax, tmp); - __ lock(); - __ cmpxchgptr(newval, mem); - __ bind(good); +static void z_store_barrier(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register rnew_zaddress, Register rnew_zpointer, bool is_atomic) { + Assembler::InlineSkippedInstructionsCounter skipped_counter(&_masm); + if (node->barrier_data() == ZBarrierElided) { + if (rnew_zaddress != noreg) { + // noreg means null; no need to color + __ movptr(rnew_zpointer, rnew_zaddress); + z_color(_masm, node, rnew_zpointer); + } + } else { + bool is_native = (node->barrier_data() & ZBarrierNative) != 0; + ZStoreBarrierStubC2* const stub = ZStoreBarrierStubC2::create(node, ref_addr, rnew_zaddress, rnew_zpointer, is_native, is_atomic); + ZBarrierSetAssembler* bs_asm = ZBarrierSet::assembler(); + bs_asm->store_barrier_fast(&_masm, ref_addr, rnew_zaddress, rnew_zpointer, true /* in_nmethod */, is_atomic, *stub->entry(), *stub->continuation()); } } @@ -86,9 +100,9 @@ static void z_cmpxchg_common(MacroAssembler& _masm, const MachNode* node, Regist // Load Pointer instruct zLoadP(rRegP dst, memory mem, rFlagsReg cr) %{ - predicate(UseZGC && n->as_Load()->barrier_data() != 0); + predicate(UseZGC && ZGenerational && n->as_Load()->barrier_data() != 0); match(Set dst (LoadP mem)); - effect(KILL cr, TEMP dst); + effect(TEMP dst, KILL cr); ins_cost(125); @@ -96,33 +110,91 @@ instruct zLoadP(rRegP dst, memory mem, rFlagsReg cr) ins_encode %{ __ movptr($dst$$Register, $mem$$Address); - z_load_barrier(_masm, this, $mem$$Address, $dst$$Register, noreg /* tmp */, barrier_data()); + z_load_barrier(_masm, this, $mem$$Address, $dst$$Register); %} ins_pipe(ialu_reg_mem); %} -instruct zCompareAndExchangeP(indirect mem, rax_RegP oldval, rRegP newval, rRegP tmp, rFlagsReg cr) %{ +// Load Pointer and Null Check +instruct zLoadPNullCheck(rFlagsReg cr, memory op, immP0 zero) +%{ + predicate(UseZGC && ZGenerational && n->in(1)->as_Load()->barrier_data() != 0); + match(Set cr (CmpP (LoadP op) zero)); + + ins_cost(500); // XXX + format %{ "testq $op, 0xffffffffffff0000\t# ptr" %} + ins_encode %{ + // A null pointer will have all address bits 0. This mask sign extends + // all address bits, so we can test if the address is 0. + __ testq($op$$Address, ZBarrierSetAssembler::_zpointer_address_mask); + %} + ins_pipe(ialu_cr_reg_imm); +%} + +// Store Pointer +instruct zStoreP(memory mem, any_RegP src, rRegP tmp, rFlagsReg cr) +%{ + predicate(UseZGC && ZGenerational && n->as_Store()->barrier_data() != 0); + match(Set mem (StoreP mem src)); + effect(TEMP tmp, KILL cr); + + ins_cost(125); // XXX + format %{ "movq $mem, $src\t# ptr" %} + ins_encode %{ + z_store_barrier(_masm, this, $mem$$Address, $src$$Register, $tmp$$Register, false /* is_atomic */); + __ movq($mem$$Address, $tmp$$Register); + %} + ins_pipe(ialu_mem_reg); +%} + +// Store Null Pointer +instruct zStorePNull(memory mem, immP0 zero, rRegP tmp, rFlagsReg cr) +%{ + predicate(UseZGC && ZGenerational && n->as_Store()->barrier_data() != 0); + match(Set mem (StoreP mem zero)); + effect(TEMP tmp, KILL cr); + + ins_cost(125); // XXX + format %{ "movq $mem, 0\t# ptr" %} + ins_encode %{ + z_store_barrier(_masm, this, $mem$$Address, noreg, $tmp$$Register, false /* is_atomic */); + // Store a colored null - barrier code above does not need to color + __ movq($mem$$Address, barrier_Relocation::unpatched); + // The relocation cant be fully after the mov, as that is the beginning of a random subsequent + // instruction, which violates assumptions made by unrelated code. Hence the end() - 1 + __ code_section()->relocate(__ code_section()->end() - 1, barrier_Relocation::spec(), ZBarrierRelocationFormatStoreGoodAfterMov); + %} + ins_pipe(ialu_mem_reg); +%} + +instruct zCompareAndExchangeP(indirect mem, no_rax_RegP newval, rRegP tmp, rax_RegP oldval, rFlagsReg cr) %{ match(Set oldval (CompareAndExchangeP mem (Binary oldval newval))); - predicate(UseZGC && n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong); - effect(KILL cr, TEMP tmp); + predicate(UseZGC && ZGenerational && n->as_LoadStore()->barrier_data() != 0); + effect(TEMP tmp, KILL cr); format %{ "lock\n\t" "cmpxchgq $newval, $mem" %} ins_encode %{ - precond($oldval$$Register == rax); - z_cmpxchg_common(_masm, this, $mem$$Register, $newval$$Register, $tmp$$Register); + assert_different_registers($oldval$$Register, $mem$$Register); + assert_different_registers($oldval$$Register, $newval$$Register); + const Address mem_addr = Address($mem$$Register, 0); + z_store_barrier(_masm, this, mem_addr, $newval$$Register, $tmp$$Register, true /* is_atomic */); + z_color(_masm, this, $oldval$$Register); + __ lock(); + __ cmpxchgptr($tmp$$Register, mem_addr); + z_uncolor(_masm, this, $oldval$$Register); %} ins_pipe(pipe_cmpxchg); %} -instruct zCompareAndSwapP(rRegI res, indirect mem, rRegP newval, rRegP tmp, rFlagsReg cr, rax_RegP oldval) %{ +instruct zCompareAndSwapP(rRegI res, indirect mem, rRegP newval, rRegP tmp, rax_RegP oldval, rFlagsReg cr) %{ match(Set res (CompareAndSwapP mem (Binary oldval newval))); match(Set res (WeakCompareAndSwapP mem (Binary oldval newval))); - predicate(UseZGC && n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong); - effect(KILL cr, KILL oldval, TEMP tmp); + predicate(UseZGC && ZGenerational && n->as_LoadStore()->barrier_data() != 0); + effect(TEMP tmp, KILL oldval, KILL cr); format %{ "lock\n\t" "cmpxchgq $newval, $mem\n\t" @@ -130,11 +202,12 @@ instruct zCompareAndSwapP(rRegI res, indirect mem, rRegP newval, rRegP tmp, rFla "movzbl $res, $res" %} ins_encode %{ - precond($oldval$$Register == rax); - z_cmpxchg_common(_masm, this, $mem$$Register, $newval$$Register, $tmp$$Register); - if (barrier_data() != ZLoadBarrierElided) { - __ cmpptr($tmp$$Register, rax); - } + assert_different_registers($oldval$$Register, $mem$$Register); + const Address mem_addr = Address($mem$$Register, 0); + z_store_barrier(_masm, this, mem_addr, $newval$$Register, $tmp$$Register, true /* is_atomic */); + z_color(_masm, this, $oldval$$Register); + __ lock(); + __ cmpxchgptr($tmp$$Register, mem_addr); __ setb(Assembler::equal, $res$$Register); __ movzbl($res$$Register, $res$$Register); %} @@ -142,16 +215,20 @@ instruct zCompareAndSwapP(rRegI res, indirect mem, rRegP newval, rRegP tmp, rFla ins_pipe(pipe_cmpxchg); %} -instruct zXChgP(indirect mem, rRegP newval, rFlagsReg cr) %{ +instruct zXChgP(indirect mem, rRegP newval, rRegP tmp, rFlagsReg cr) %{ match(Set newval (GetAndSetP mem newval)); - predicate(UseZGC && n->as_LoadStore()->barrier_data() != 0); - effect(KILL cr); + predicate(UseZGC && ZGenerational && n->as_LoadStore()->barrier_data() != 0); + effect(TEMP tmp, KILL cr); format %{ "xchgq $newval, $mem" %} ins_encode %{ - __ xchgptr($newval$$Register, Address($mem$$Register, 0)); - z_load_barrier(_masm, this, Address(noreg, 0), $newval$$Register, noreg /* tmp */, barrier_data()); + assert_different_registers($mem$$Register, $newval$$Register); + const Address mem_addr = Address($mem$$Register, 0); + z_store_barrier(_masm, this, mem_addr, $newval$$Register, $tmp$$Register, true /* is_atomic */); + __ movptr($newval$$Register, $tmp$$Register); + __ xchgptr($newval$$Register, mem_addr); + z_uncolor(_masm, this, $newval$$Register); %} ins_pipe(pipe_cmpxchg); diff --git a/src/hotspot/cpu/x86/globalDefinitions_x86.hpp b/src/hotspot/cpu/x86/globalDefinitions_x86.hpp index 77b38e45374e5..bdf9ac440d054 100644 --- a/src/hotspot/cpu/x86/globalDefinitions_x86.hpp +++ b/src/hotspot/cpu/x86/globalDefinitions_x86.hpp @@ -71,12 +71,6 @@ const bool CCallingConventionRequiresIntsAsLongs = false; #define SUPPORT_RESERVED_STACK_AREA #endif -#if INCLUDE_JVMCI -#define COMPRESSED_CLASS_POINTERS_DEPENDS_ON_COMPRESSED_OOPS EnableJVMCI -#else -#define COMPRESSED_CLASS_POINTERS_DEPENDS_ON_COMPRESSED_OOPS false -#endif - #define USE_POINTERS_TO_REGISTER_IMPL_ARRAY #endif // CPU_X86_GLOBALDEFINITIONS_X86_HPP diff --git a/src/hotspot/cpu/x86/jvmciCodeInstaller_x86.cpp b/src/hotspot/cpu/x86/jvmciCodeInstaller_x86.cpp index 3e59c458c2af6..eadd9ebda593a 100644 --- a/src/hotspot/cpu/x86/jvmciCodeInstaller_x86.cpp +++ b/src/hotspot/cpu/x86/jvmciCodeInstaller_x86.cpp @@ -185,6 +185,15 @@ void CodeInstaller::pd_relocate_JavaMethod(CodeBuffer &, methodHandle& method, j if (!call->is_displacement_aligned()) { JVMCI_ERROR("unaligned displacement for call at offset %d", pc_offset); } + if (Continuations::enabled()) { + // Check for proper post_call_nop + NativePostCallNop* nop = nativePostCallNop_at(call->next_instruction_address()); + if (nop == NULL) { + JVMCI_ERROR("missing post call nop at offset %d", pc_offset); + } else { + _instructions->relocate(call->next_instruction_address(), relocInfo::post_call_nop_type); + } + } } void CodeInstaller::pd_relocate_poll(address pc, jint mark, JVMCI_TRAPS) { diff --git a/src/hotspot/cpu/x86/relocInfo_x86.hpp b/src/hotspot/cpu/x86/relocInfo_x86.hpp index 621bf7ae7282f..d3f213a6686e1 100644 --- a/src/hotspot/cpu/x86/relocInfo_x86.hpp +++ b/src/hotspot/cpu/x86/relocInfo_x86.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -35,8 +35,8 @@ #ifndef AMD64 format_width = 1 #else - // vs Assembler::narrow_oop_operand. - format_width = 2 + // vs Assembler::narrow_oop_operand and ZGC barrier encodings. + format_width = 3 #endif }; diff --git a/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp b/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp index 489ea3c69cc5d..6cd1765151492 100644 --- a/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp +++ b/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp @@ -44,9 +44,6 @@ #if INCLUDE_JVMCI #include "jvmci/jvmci_globals.hpp" #endif -#if INCLUDE_ZGC -#include "gc/z/zThreadLocalData.hpp" -#endif #if INCLUDE_JFR #include "jfr/support/jfrIntrinsics.hpp" #endif diff --git a/src/hotspot/cpu/x86/stubRoutines_x86.hpp b/src/hotspot/cpu/x86/stubRoutines_x86.hpp index 3f94520b664e2..2038fdff5ae46 100644 --- a/src/hotspot/cpu/x86/stubRoutines_x86.hpp +++ b/src/hotspot/cpu/x86/stubRoutines_x86.hpp @@ -38,7 +38,7 @@ enum platform_dependent_constants { // AVX512 intrinsics add more code in 64-bit VM, // Windows have more code to save/restore registers _compiler_stubs_code_size = 20000 LP64_ONLY(+30000) WINDOWS_ONLY(+2000), - _final_stubs_code_size = 10000 LP64_ONLY(+20000) WINDOWS_ONLY(+2000) + _final_stubs_code_size = 10000 LP64_ONLY(+20000) WINDOWS_ONLY(+2000) ZGC_ONLY(+20000) }; class x86 { diff --git a/src/hotspot/cpu/x86/templateTable_x86.cpp b/src/hotspot/cpu/x86/templateTable_x86.cpp index 6144e10e8e2b1..15efa6ed21dc4 100644 --- a/src/hotspot/cpu/x86/templateTable_x86.cpp +++ b/src/hotspot/cpu/x86/templateTable_x86.cpp @@ -26,6 +26,7 @@ #include "asm/macroAssembler.hpp" #include "compiler/disassembler.hpp" #include "gc/shared/collectedHeap.hpp" +#include "gc/shared/gc_globals.hpp" #include "gc/shared/tlab_globals.hpp" #include "interpreter/interpreter.hpp" #include "interpreter/interpreterRuntime.hpp" diff --git a/src/hotspot/cpu/x86/x86_64.ad b/src/hotspot/cpu/x86/x86_64.ad index 2168ee4252ae6..218cd46a1c4fd 100644 --- a/src/hotspot/cpu/x86/x86_64.ad +++ b/src/hotspot/cpu/x86/x86_64.ad @@ -3235,7 +3235,7 @@ operand immL_32bits() operand immI_Pow2M1() %{ predicate((n->get_int() > 0) - && is_power_of_2(n->get_int() + 1)); + && is_power_of_2((juint)n->get_int() + 1)); match(ConI); op_cost(20); @@ -12449,6 +12449,17 @@ instruct testI_reg_imm(rFlagsReg cr, rRegI src, immI con, immI_0 zero) ins_pipe(ialu_cr_reg_imm); %} +instruct testI_reg_reg(rFlagsReg cr, rRegI src1, rRegI src2, immI_0 zero) +%{ + match(Set cr (CmpI (AndI src1 src2) zero)); + + format %{ "testl $src1, $src2" %} + ins_encode %{ + __ testl($src1$$Register, $src2$$Register); + %} + ins_pipe(ialu_cr_reg_imm); +%} + instruct testI_reg_mem(rFlagsReg cr, rRegI src, memory mem, immI_0 zero) %{ match(Set cr (CmpI (AndI src (LoadI mem)) zero)); @@ -12765,6 +12776,17 @@ instruct testL_reg_imm(rFlagsReg cr, rRegL src, immL32 con, immL0 zero) ins_pipe(ialu_cr_reg_imm); %} +instruct testL_reg_reg(rFlagsReg cr, rRegL src1, rRegL src2, immL0 zero) +%{ + match(Set cr (CmpL (AndL src1 src2) zero)); + + format %{ "testq $src1, $src2\t# long" %} + ins_encode %{ + __ testq($src1$$Register, $src2$$Register); + %} + ins_pipe(ialu_cr_reg_imm); +%} + instruct testL_reg_mem(rFlagsReg cr, rRegL src, memory mem, immL0 zero) %{ match(Set cr (CmpL (AndL src (LoadL mem)) zero)); diff --git a/src/hotspot/cpu/zero/globalDefinitions_zero.hpp b/src/hotspot/cpu/zero/globalDefinitions_zero.hpp index 271d95ee72c08..b8f01f4045486 100644 --- a/src/hotspot/cpu/zero/globalDefinitions_zero.hpp +++ b/src/hotspot/cpu/zero/globalDefinitions_zero.hpp @@ -42,6 +42,4 @@ // 32-bit integer argument values are extended to 64 bits. const bool CCallingConventionRequiresIntsAsLongs = false; -#define COMPRESSED_CLASS_POINTERS_DEPENDS_ON_COMPRESSED_OOPS false - #endif // CPU_ZERO_GLOBALDEFINITIONS_ZERO_HPP diff --git a/src/hotspot/os/aix/decoder_aix.hpp b/src/hotspot/os/aix/decoder_aix.hpp index 0389852e4cbf9..0d87ba87b9496 100644 --- a/src/hotspot/os/aix/decoder_aix.hpp +++ b/src/hotspot/os/aix/decoder_aix.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2023, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2013 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -29,9 +29,7 @@ // Provide simple AIXDecoder which enables decoding of C frames in VM. class AIXDecoder: public AbstractDecoder { public: - AIXDecoder() { - _decoder_status = no_error; - } + AIXDecoder() : AbstractDecoder(no_error) {} virtual ~AIXDecoder() {} virtual bool demangle(const char* symbol, char* buf, int buflen) { return false; } // use AixSymbols::get_function_name to demangle diff --git a/src/hotspot/os/aix/os_aix.cpp b/src/hotspot/os/aix/os_aix.cpp index fe86adcc40a66..82e048aae23c1 100644 --- a/src/hotspot/os/aix/os_aix.cpp +++ b/src/hotspot/os/aix/os_aix.cpp @@ -245,6 +245,10 @@ static bool is_close_to_brk(address a) { return false; } +julong os::free_memory() { + return Aix::available_memory(); +} + julong os::available_memory() { return Aix::available_memory(); } @@ -743,7 +747,7 @@ bool os::create_thread(Thread* thread, ThreadType thr_type, assert(thread->osthread() == nullptr, "caller responsible"); // Allocate the OSThread object. - OSThread* osthread = new OSThread(); + OSThread* osthread = new (std::nothrow) OSThread(); if (osthread == nullptr) { return false; } @@ -857,7 +861,7 @@ bool os::create_attached_thread(JavaThread* thread) { #endif // Allocate the OSThread object - OSThread* osthread = new OSThread(); + OSThread* osthread = new (std::nothrow) OSThread(); if (osthread == nullptr) { return false; diff --git a/src/hotspot/os/aix/os_aix.hpp b/src/hotspot/os/aix/os_aix.hpp index d4af6a6d60648..ecd181f8989d2 100644 --- a/src/hotspot/os/aix/os_aix.hpp +++ b/src/hotspot/os/aix/os_aix.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2013, 2016 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -59,6 +59,7 @@ class os::Aix { static int _extshm; static julong available_memory(); + static julong free_memory(); static julong physical_memory() { return _physical_memory; } static void initialize_system_info(); diff --git a/src/hotspot/os/bsd/decoder_machO.hpp b/src/hotspot/os/bsd/decoder_machO.hpp index 5ceda782c6ee4..b5bb3d2d5ea2f 100644 --- a/src/hotspot/os/bsd/decoder_machO.hpp +++ b/src/hotspot/os/bsd/decoder_machO.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,11 +29,9 @@ #include "utilities/decoder.hpp" -// Just a placehold for now, a real implementation should derive -// from AbstractDecoder class MachODecoder : public AbstractDecoder { public: - MachODecoder() { } + MachODecoder() : AbstractDecoder(no_error) { } virtual ~MachODecoder() { } virtual bool demangle(const char* symbol, char* buf, int buflen); virtual bool decode(address pc, char* buf, int buflen, int* offset, diff --git a/src/hotspot/os/bsd/gc/x/xLargePages_bsd.cpp b/src/hotspot/os/bsd/gc/x/xLargePages_bsd.cpp new file mode 100644 index 0000000000000..1c82e83120881 --- /dev/null +++ b/src/hotspot/os/bsd/gc/x/xLargePages_bsd.cpp @@ -0,0 +1,34 @@ +/* + * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/x/xLargePages.hpp" +#include "runtime/globals.hpp" + +void XLargePages::pd_initialize() { + if (UseLargePages) { + _state = Explicit; + } else { + _state = Disabled; + } +} diff --git a/src/hotspot/os/bsd/gc/x/xNUMA_bsd.cpp b/src/hotspot/os/bsd/gc/x/xNUMA_bsd.cpp new file mode 100644 index 0000000000000..b0e23a1716ad9 --- /dev/null +++ b/src/hotspot/os/bsd/gc/x/xNUMA_bsd.cpp @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/x/xNUMA.hpp" +#include "utilities/globalDefinitions.hpp" + +void XNUMA::pd_initialize() { + _enabled = false; +} + +uint32_t XNUMA::count() { + return 1; +} + +uint32_t XNUMA::id() { + return 0; +} + +uint32_t XNUMA::memory_id(uintptr_t addr) { + // NUMA support not enabled, assume everything belongs to node zero + return 0; +} diff --git a/src/hotspot/os/bsd/gc/x/xPhysicalMemoryBacking_bsd.cpp b/src/hotspot/os/bsd/gc/x/xPhysicalMemoryBacking_bsd.cpp new file mode 100644 index 0000000000000..2c64c3788d34d --- /dev/null +++ b/src/hotspot/os/bsd/gc/x/xPhysicalMemoryBacking_bsd.cpp @@ -0,0 +1,181 @@ +/* + * Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/shared/gcLogPrecious.hpp" +#include "gc/x/xErrno.hpp" +#include "gc/x/xGlobals.hpp" +#include "gc/x/xLargePages.inline.hpp" +#include "gc/x/xPhysicalMemory.inline.hpp" +#include "gc/x/xPhysicalMemoryBacking_bsd.hpp" +#include "logging/log.hpp" +#include "runtime/globals.hpp" +#include "runtime/os.hpp" +#include "utilities/align.hpp" +#include "utilities/debug.hpp" + +#include +#include +#include +#include + +// The backing is represented by a reserved virtual address space, in which +// we commit and uncommit physical memory. Multi-mapping the different heap +// views is done by simply remapping the backing memory using mach_vm_remap(). + +static int vm_flags_superpage() { + if (!XLargePages::is_explicit()) { + return 0; + } + + const int page_size_in_megabytes = XGranuleSize >> 20; + return page_size_in_megabytes << VM_FLAGS_SUPERPAGE_SHIFT; +} + +static XErrno mremap(uintptr_t from_addr, uintptr_t to_addr, size_t size) { + mach_vm_address_t remap_addr = to_addr; + vm_prot_t remap_cur_prot; + vm_prot_t remap_max_prot; + + // Remap memory to an additional location + const kern_return_t res = mach_vm_remap(mach_task_self(), + &remap_addr, + size, + 0 /* mask */, + VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE | vm_flags_superpage(), + mach_task_self(), + from_addr, + FALSE /* copy */, + &remap_cur_prot, + &remap_max_prot, + VM_INHERIT_COPY); + + return (res == KERN_SUCCESS) ? XErrno(0) : XErrno(EINVAL); +} + +XPhysicalMemoryBacking::XPhysicalMemoryBacking(size_t max_capacity) : + _base(0), + _initialized(false) { + + // Reserve address space for backing memory + _base = (uintptr_t)os::reserve_memory(max_capacity); + if (_base == 0) { + // Failed + log_error_pd(gc)("Failed to reserve address space for backing memory"); + return; + } + + // Successfully initialized + _initialized = true; +} + +bool XPhysicalMemoryBacking::is_initialized() const { + return _initialized; +} + +void XPhysicalMemoryBacking::warn_commit_limits(size_t max_capacity) const { + // Does nothing +} + +bool XPhysicalMemoryBacking::commit_inner(size_t offset, size_t length) const { + assert(is_aligned(offset, os::vm_page_size()), "Invalid offset"); + assert(is_aligned(length, os::vm_page_size()), "Invalid length"); + + log_trace(gc, heap)("Committing memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)", + offset / M, (offset + length) / M, length / M); + + const uintptr_t addr = _base + offset; + const void* const res = mmap((void*)addr, length, PROT_READ | PROT_WRITE, MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); + if (res == MAP_FAILED) { + XErrno err; + log_error(gc)("Failed to commit memory (%s)", err.to_string()); + return false; + } + + // Success + return true; +} + +size_t XPhysicalMemoryBacking::commit(size_t offset, size_t length) const { + // Try to commit the whole region + if (commit_inner(offset, length)) { + // Success + return length; + } + + // Failed, try to commit as much as possible + size_t start = offset; + size_t end = offset + length; + + for (;;) { + length = align_down((end - start) / 2, XGranuleSize); + if (length == 0) { + // Done, don't commit more + return start - offset; + } + + if (commit_inner(start, length)) { + // Success, try commit more + start += length; + } else { + // Failed, try commit less + end -= length; + } + } +} + +size_t XPhysicalMemoryBacking::uncommit(size_t offset, size_t length) const { + assert(is_aligned(offset, os::vm_page_size()), "Invalid offset"); + assert(is_aligned(length, os::vm_page_size()), "Invalid length"); + + log_trace(gc, heap)("Uncommitting memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)", + offset / M, (offset + length) / M, length / M); + + const uintptr_t start = _base + offset; + const void* const res = mmap((void*)start, length, PROT_NONE, MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE, -1, 0); + if (res == MAP_FAILED) { + XErrno err; + log_error(gc)("Failed to uncommit memory (%s)", err.to_string()); + return 0; + } + + return length; +} + +void XPhysicalMemoryBacking::map(uintptr_t addr, size_t size, uintptr_t offset) const { + const XErrno err = mremap(_base + offset, addr, size); + if (err) { + fatal("Failed to remap memory (%s)", err.to_string()); + } +} + +void XPhysicalMemoryBacking::unmap(uintptr_t addr, size_t size) const { + // Note that we must keep the address space reservation intact and just detach + // the backing memory. For this reason we map a new anonymous, non-accessible + // and non-reserved page over the mapping instead of actually unmapping. + const void* const res = mmap((void*)addr, size, PROT_NONE, MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE, -1, 0); + if (res == MAP_FAILED) { + XErrno err; + fatal("Failed to map memory (%s)", err.to_string()); + } +} diff --git a/src/hotspot/os/bsd/gc/x/xPhysicalMemoryBacking_bsd.hpp b/src/hotspot/os/bsd/gc/x/xPhysicalMemoryBacking_bsd.hpp new file mode 100644 index 0000000000000..8b4747026ff23 --- /dev/null +++ b/src/hotspot/os/bsd/gc/x/xPhysicalMemoryBacking_bsd.hpp @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef OS_BSD_GC_X_XPHYSICALMEMORYBACKING_BSD_HPP +#define OS_BSD_GC_X_XPHYSICALMEMORYBACKING_BSD_HPP + +class XPhysicalMemoryBacking { +private: + uintptr_t _base; + bool _initialized; + + bool commit_inner(size_t offset, size_t length) const; + +public: + XPhysicalMemoryBacking(size_t max_capacity); + + bool is_initialized() const; + + void warn_commit_limits(size_t max_capacity) const; + + size_t commit(size_t offset, size_t length) const; + size_t uncommit(size_t offset, size_t length) const; + + void map(uintptr_t addr, size_t size, uintptr_t offset) const; + void unmap(uintptr_t addr, size_t size) const; +}; + +#endif // OS_BSD_GC_X_XPHYSICALMEMORYBACKING_BSD_HPP diff --git a/src/hotspot/os/bsd/gc/z/zPhysicalMemoryBacking_bsd.cpp b/src/hotspot/os/bsd/gc/z/zPhysicalMemoryBacking_bsd.cpp index ea9a54bcb39c8..218173a739a75 100644 --- a/src/hotspot/os/bsd/gc/z/zPhysicalMemoryBacking_bsd.cpp +++ b/src/hotspot/os/bsd/gc/z/zPhysicalMemoryBacking_bsd.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -23,6 +23,7 @@ #include "precompiled.hpp" #include "gc/shared/gcLogPrecious.hpp" +#include "gc/z/zAddress.inline.hpp" #include "gc/z/zErrno.hpp" #include "gc/z/zGlobals.hpp" #include "gc/z/zLargePages.inline.hpp" @@ -97,14 +98,14 @@ void ZPhysicalMemoryBacking::warn_commit_limits(size_t max_capacity) const { // Does nothing } -bool ZPhysicalMemoryBacking::commit_inner(size_t offset, size_t length) const { - assert(is_aligned(offset, os::vm_page_size()), "Invalid offset"); +bool ZPhysicalMemoryBacking::commit_inner(zoffset offset, size_t length) const { + assert(is_aligned(untype(offset), os::vm_page_size()), "Invalid offset"); assert(is_aligned(length, os::vm_page_size()), "Invalid length"); log_trace(gc, heap)("Committing memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)", - offset / M, (offset + length) / M, length / M); + untype(offset) / M, untype(offset) + length / M, length / M); - const uintptr_t addr = _base + offset; + const uintptr_t addr = _base + untype(offset); const void* const res = mmap((void*)addr, length, PROT_READ | PROT_WRITE, MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); if (res == MAP_FAILED) { ZErrno err; @@ -116,7 +117,7 @@ bool ZPhysicalMemoryBacking::commit_inner(size_t offset, size_t length) const { return true; } -size_t ZPhysicalMemoryBacking::commit(size_t offset, size_t length) const { +size_t ZPhysicalMemoryBacking::commit(zoffset offset, size_t length) const { // Try to commit the whole region if (commit_inner(offset, length)) { // Success @@ -124,8 +125,8 @@ size_t ZPhysicalMemoryBacking::commit(size_t offset, size_t length) const { } // Failed, try to commit as much as possible - size_t start = offset; - size_t end = offset + length; + zoffset start = offset; + zoffset end = offset + length; for (;;) { length = align_down((end - start) / 2, ZGranuleSize); @@ -144,14 +145,14 @@ size_t ZPhysicalMemoryBacking::commit(size_t offset, size_t length) const { } } -size_t ZPhysicalMemoryBacking::uncommit(size_t offset, size_t length) const { - assert(is_aligned(offset, os::vm_page_size()), "Invalid offset"); +size_t ZPhysicalMemoryBacking::uncommit(zoffset offset, size_t length) const { + assert(is_aligned(untype(offset), os::vm_page_size()), "Invalid offset"); assert(is_aligned(length, os::vm_page_size()), "Invalid length"); log_trace(gc, heap)("Uncommitting memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)", - offset / M, (offset + length) / M, length / M); + untype(offset) / M, untype(offset) + length / M, length / M); - const uintptr_t start = _base + offset; + const uintptr_t start = _base + untype(offset); const void* const res = mmap((void*)start, length, PROT_NONE, MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE, -1, 0); if (res == MAP_FAILED) { ZErrno err; @@ -162,18 +163,18 @@ size_t ZPhysicalMemoryBacking::uncommit(size_t offset, size_t length) const { return length; } -void ZPhysicalMemoryBacking::map(uintptr_t addr, size_t size, uintptr_t offset) const { - const ZErrno err = mremap(_base + offset, addr, size); +void ZPhysicalMemoryBacking::map(zaddress_unsafe addr, size_t size, zoffset offset) const { + const ZErrno err = mremap(_base + untype(offset), untype(addr), size); if (err) { fatal("Failed to remap memory (%s)", err.to_string()); } } -void ZPhysicalMemoryBacking::unmap(uintptr_t addr, size_t size) const { +void ZPhysicalMemoryBacking::unmap(zaddress_unsafe addr, size_t size) const { // Note that we must keep the address space reservation intact and just detach // the backing memory. For this reason we map a new anonymous, non-accessible // and non-reserved page over the mapping instead of actually unmapping. - const void* const res = mmap((void*)addr, size, PROT_NONE, MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE, -1, 0); + const void* const res = mmap((void*)untype(addr), size, PROT_NONE, MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE, -1, 0); if (res == MAP_FAILED) { ZErrno err; fatal("Failed to map memory (%s)", err.to_string()); diff --git a/src/hotspot/os/bsd/gc/z/zPhysicalMemoryBacking_bsd.hpp b/src/hotspot/os/bsd/gc/z/zPhysicalMemoryBacking_bsd.hpp index ca61ec65eea90..d74de5375eecc 100644 --- a/src/hotspot/os/bsd/gc/z/zPhysicalMemoryBacking_bsd.hpp +++ b/src/hotspot/os/bsd/gc/z/zPhysicalMemoryBacking_bsd.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,12 +24,14 @@ #ifndef OS_BSD_GC_Z_ZPHYSICALMEMORYBACKING_BSD_HPP #define OS_BSD_GC_Z_ZPHYSICALMEMORYBACKING_BSD_HPP +#include "gc/z/zAddress.hpp" + class ZPhysicalMemoryBacking { private: uintptr_t _base; bool _initialized; - bool commit_inner(size_t offset, size_t length) const; + bool commit_inner(zoffset offset, size_t length) const; public: ZPhysicalMemoryBacking(size_t max_capacity); @@ -38,11 +40,11 @@ class ZPhysicalMemoryBacking { void warn_commit_limits(size_t max_capacity) const; - size_t commit(size_t offset, size_t length) const; - size_t uncommit(size_t offset, size_t length) const; + size_t commit(zoffset offset, size_t length) const; + size_t uncommit(zoffset offset, size_t length) const; - void map(uintptr_t addr, size_t size, uintptr_t offset) const; - void unmap(uintptr_t addr, size_t size) const; + void map(zaddress_unsafe addr, size_t size, zoffset offset) const; + void unmap(zaddress_unsafe addr, size_t size) const; }; #endif // OS_BSD_GC_Z_ZPHYSICALMEMORYBACKING_BSD_HPP diff --git a/src/hotspot/os/bsd/os_bsd.cpp b/src/hotspot/os/bsd/os_bsd.cpp index f4799e76a32a8..4e3d53ee94253 100644 --- a/src/hotspot/os/bsd/os_bsd.cpp +++ b/src/hotspot/os/bsd/os_bsd.cpp @@ -138,6 +138,10 @@ julong os::available_memory() { return Bsd::available_memory(); } +julong os::free_memory() { + return Bsd::available_memory(); +} + // available here means free julong os::Bsd::available_memory() { uint64_t available = physical_memory() >> 2; @@ -586,7 +590,7 @@ bool os::create_thread(Thread* thread, ThreadType thr_type, assert(thread->osthread() == nullptr, "caller responsible"); // Allocate the OSThread object - OSThread* osthread = new OSThread(); + OSThread* osthread = new (std::nothrow) OSThread(); if (osthread == nullptr) { return false; } @@ -679,7 +683,7 @@ bool os::create_attached_thread(JavaThread* thread) { #endif // Allocate the OSThread object - OSThread* osthread = new OSThread(); + OSThread* osthread = new (std::nothrow) OSThread(); if (osthread == nullptr) { return false; @@ -2073,7 +2077,7 @@ uint os::processor_id() { // Assign processor id to APIC id processor_id = Atomic::cmpxchg(&processor_id_map[apic_id], processor_id_unassigned, processor_id_assigning); if (processor_id == processor_id_unassigned) { - processor_id = Atomic::fetch_and_add(&processor_id_next, 1) % os::processor_count(); + processor_id = Atomic::fetch_then_add(&processor_id_next, 1) % os::processor_count(); Atomic::store(&processor_id_map[apic_id], processor_id); } } diff --git a/src/hotspot/os/bsd/os_bsd.hpp b/src/hotspot/os/bsd/os_bsd.hpp index 87f4b69587f14..d34803e144bf2 100644 --- a/src/hotspot/os/bsd/os_bsd.hpp +++ b/src/hotspot/os/bsd/os_bsd.hpp @@ -46,6 +46,7 @@ class os::Bsd { static pthread_t _main_thread; static julong available_memory(); + static julong free_memory(); static julong physical_memory() { return _physical_memory; } static void initialize_system_info(); diff --git a/src/hotspot/os/linux/gc/x/xLargePages_linux.cpp b/src/hotspot/os/linux/gc/x/xLargePages_linux.cpp new file mode 100644 index 0000000000000..6ad956b1e63fe --- /dev/null +++ b/src/hotspot/os/linux/gc/x/xLargePages_linux.cpp @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/x/xLargePages.hpp" +#include "runtime/globals.hpp" + +void XLargePages::pd_initialize() { + if (UseLargePages) { + if (UseTransparentHugePages) { + _state = Transparent; + } else { + _state = Explicit; + } + } else { + _state = Disabled; + } +} diff --git a/src/hotspot/os/linux/gc/x/xMountPoint_linux.cpp b/src/hotspot/os/linux/gc/x/xMountPoint_linux.cpp new file mode 100644 index 0000000000000..96c0f2f92dbd2 --- /dev/null +++ b/src/hotspot/os/linux/gc/x/xMountPoint_linux.cpp @@ -0,0 +1,154 @@ +/* + * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/shared/gcLogPrecious.hpp" +#include "gc/x/xArray.inline.hpp" +#include "gc/x/xErrno.hpp" +#include "gc/x/xMountPoint_linux.hpp" +#include "runtime/globals.hpp" +#include "runtime/os.hpp" +#include "utilities/globalDefinitions.hpp" + +#include +#include + +// Mount information, see proc(5) for more details. +#define PROC_SELF_MOUNTINFO "/proc/self/mountinfo" + +XMountPoint::XMountPoint(const char* filesystem, const char** preferred_mountpoints) { + if (AllocateHeapAt != nullptr) { + // Use specified path + _path = os::strdup(AllocateHeapAt, mtGC); + } else { + // Find suitable path + _path = find_mountpoint(filesystem, preferred_mountpoints); + } +} + +XMountPoint::~XMountPoint() { + os::free(_path); + _path = nullptr; +} + +char* XMountPoint::get_mountpoint(const char* line, const char* filesystem) const { + char* line_mountpoint = nullptr; + char* line_filesystem = nullptr; + + // Parse line and return a newly allocated string containing the mount point if + // the line contains a matching filesystem and the mount point is accessible by + // the current user. + // sscanf, using %m, will return malloced memory. Need raw ::free, not os::free. + if (sscanf(line, "%*u %*u %*u:%*u %*s %ms %*[^-]- %ms", &line_mountpoint, &line_filesystem) != 2 || + strcmp(line_filesystem, filesystem) != 0 || + access(line_mountpoint, R_OK|W_OK|X_OK) != 0) { + // Not a matching or accessible filesystem + ALLOW_C_FUNCTION(::free, ::free(line_mountpoint);) + line_mountpoint = nullptr; + } + + ALLOW_C_FUNCTION(::free, ::free(line_filesystem);) + + return line_mountpoint; +} + +void XMountPoint::get_mountpoints(const char* filesystem, XArray* mountpoints) const { + FILE* fd = os::fopen(PROC_SELF_MOUNTINFO, "r"); + if (fd == nullptr) { + XErrno err; + log_error_p(gc)("Failed to open %s: %s", PROC_SELF_MOUNTINFO, err.to_string()); + return; + } + + char* line = nullptr; + size_t length = 0; + + while (getline(&line, &length, fd) != -1) { + char* const mountpoint = get_mountpoint(line, filesystem); + if (mountpoint != nullptr) { + mountpoints->append(mountpoint); + } + } + + // readline will return malloced memory. Need raw ::free, not os::free. + ALLOW_C_FUNCTION(::free, ::free(line);) + fclose(fd); +} + +void XMountPoint::free_mountpoints(XArray* mountpoints) const { + XArrayIterator iter(mountpoints); + for (char* mountpoint; iter.next(&mountpoint);) { + ALLOW_C_FUNCTION(::free, ::free(mountpoint);) // *not* os::free + } + mountpoints->clear(); +} + +char* XMountPoint::find_preferred_mountpoint(const char* filesystem, + XArray* mountpoints, + const char** preferred_mountpoints) const { + // Find preferred mount point + XArrayIterator iter1(mountpoints); + for (char* mountpoint; iter1.next(&mountpoint);) { + for (const char** preferred = preferred_mountpoints; *preferred != nullptr; preferred++) { + if (!strcmp(mountpoint, *preferred)) { + // Preferred mount point found + return os::strdup(mountpoint, mtGC); + } + } + } + + // Preferred mount point not found + log_error_p(gc)("More than one %s filesystem found:", filesystem); + XArrayIterator iter2(mountpoints); + for (char* mountpoint; iter2.next(&mountpoint);) { + log_error_p(gc)(" %s", mountpoint); + } + + return nullptr; +} + +char* XMountPoint::find_mountpoint(const char* filesystem, const char** preferred_mountpoints) const { + char* path = nullptr; + XArray mountpoints; + + get_mountpoints(filesystem, &mountpoints); + + if (mountpoints.length() == 0) { + // No mount point found + log_error_p(gc)("Failed to find an accessible %s filesystem", filesystem); + } else if (mountpoints.length() == 1) { + // One mount point found + path = os::strdup(mountpoints.at(0), mtGC); + } else { + // More than one mount point found + path = find_preferred_mountpoint(filesystem, &mountpoints, preferred_mountpoints); + } + + free_mountpoints(&mountpoints); + + return path; +} + +const char* XMountPoint::get() const { + return _path; +} diff --git a/src/hotspot/os/linux/gc/x/xMountPoint_linux.hpp b/src/hotspot/os/linux/gc/x/xMountPoint_linux.hpp new file mode 100644 index 0000000000000..e0ca126e0667a --- /dev/null +++ b/src/hotspot/os/linux/gc/x/xMountPoint_linux.hpp @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef OS_LINUX_GC_X_XMOUNTPOINT_LINUX_HPP +#define OS_LINUX_GC_X_XMOUNTPOINT_LINUX_HPP + +#include "gc/x/xArray.hpp" +#include "memory/allocation.hpp" + +class XMountPoint : public StackObj { +private: + char* _path; + + char* get_mountpoint(const char* line, + const char* filesystem) const; + void get_mountpoints(const char* filesystem, + XArray* mountpoints) const; + void free_mountpoints(XArray* mountpoints) const; + char* find_preferred_mountpoint(const char* filesystem, + XArray* mountpoints, + const char** preferred_mountpoints) const; + char* find_mountpoint(const char* filesystem, + const char** preferred_mountpoints) const; + +public: + XMountPoint(const char* filesystem, const char** preferred_mountpoints); + ~XMountPoint(); + + const char* get() const; +}; + +#endif // OS_LINUX_GC_X_XMOUNTPOINT_LINUX_HPP diff --git a/src/hotspot/os/linux/gc/x/xNUMA_linux.cpp b/src/hotspot/os/linux/gc/x/xNUMA_linux.cpp new file mode 100644 index 0000000000000..0cc557dde6e86 --- /dev/null +++ b/src/hotspot/os/linux/gc/x/xNUMA_linux.cpp @@ -0,0 +1,71 @@ +/* + * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "gc/x/xCPU.inline.hpp" +#include "gc/x/xErrno.hpp" +#include "gc/x/xNUMA.hpp" +#include "gc/x/xSyscall_linux.hpp" +#include "os_linux.hpp" +#include "runtime/globals.hpp" +#include "runtime/os.hpp" +#include "utilities/debug.hpp" + +void XNUMA::pd_initialize() { + _enabled = UseNUMA; +} + +uint32_t XNUMA::count() { + if (!_enabled) { + // NUMA support not enabled + return 1; + } + + return os::Linux::numa_max_node() + 1; +} + +uint32_t XNUMA::id() { + if (!_enabled) { + // NUMA support not enabled + return 0; + } + + return os::Linux::get_node_by_cpu(XCPU::id()); +} + +uint32_t XNUMA::memory_id(uintptr_t addr) { + if (!_enabled) { + // NUMA support not enabled, assume everything belongs to node zero + return 0; + } + + uint32_t id = (uint32_t)-1; + + if (XSyscall::get_mempolicy((int*)&id, nullptr, 0, (void*)addr, MPOL_F_NODE | MPOL_F_ADDR) == -1) { + XErrno err; + fatal("Failed to get NUMA id for memory at " PTR_FORMAT " (%s)", addr, err.to_string()); + } + + assert(id < count(), "Invalid NUMA id"); + + return id; +} diff --git a/src/hotspot/os/linux/gc/x/xPhysicalMemoryBacking_linux.cpp b/src/hotspot/os/linux/gc/x/xPhysicalMemoryBacking_linux.cpp new file mode 100644 index 0000000000000..5db59741a5864 --- /dev/null +++ b/src/hotspot/os/linux/gc/x/xPhysicalMemoryBacking_linux.cpp @@ -0,0 +1,724 @@ +/* + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/shared/gcLogPrecious.hpp" +#include "gc/x/xArray.inline.hpp" +#include "gc/x/xErrno.hpp" +#include "gc/x/xGlobals.hpp" +#include "gc/x/xLargePages.inline.hpp" +#include "gc/x/xMountPoint_linux.hpp" +#include "gc/x/xNUMA.inline.hpp" +#include "gc/x/xPhysicalMemoryBacking_linux.hpp" +#include "gc/x/xSyscall_linux.hpp" +#include "logging/log.hpp" +#include "os_linux.hpp" +#include "runtime/init.hpp" +#include "runtime/os.hpp" +#include "runtime/safefetch.hpp" +#include "utilities/align.hpp" +#include "utilities/debug.hpp" +#include "utilities/growableArray.hpp" + +#include +#include +#include +#include +#include +#include +#include + +// +// Support for building on older Linux systems +// + +// memfd_create(2) flags +#ifndef MFD_CLOEXEC +#define MFD_CLOEXEC 0x0001U +#endif +#ifndef MFD_HUGETLB +#define MFD_HUGETLB 0x0004U +#endif +#ifndef MFD_HUGE_2MB +#define MFD_HUGE_2MB 0x54000000U +#endif + +// open(2) flags +#ifndef O_CLOEXEC +#define O_CLOEXEC 02000000 +#endif +#ifndef O_TMPFILE +#define O_TMPFILE (020000000 | O_DIRECTORY) +#endif + +// fallocate(2) flags +#ifndef FALLOC_FL_KEEP_SIZE +#define FALLOC_FL_KEEP_SIZE 0x01 +#endif +#ifndef FALLOC_FL_PUNCH_HOLE +#define FALLOC_FL_PUNCH_HOLE 0x02 +#endif + +// Filesystem types, see statfs(2) +#ifndef TMPFS_MAGIC +#define TMPFS_MAGIC 0x01021994 +#endif +#ifndef HUGETLBFS_MAGIC +#define HUGETLBFS_MAGIC 0x958458f6 +#endif + +// Filesystem names +#define XFILESYSTEM_TMPFS "tmpfs" +#define XFILESYSTEM_HUGETLBFS "hugetlbfs" + +// Proc file entry for max map mount +#define XFILENAME_PROC_MAX_MAP_COUNT "/proc/sys/vm/max_map_count" + +// Sysfs file for transparent huge page on tmpfs +#define XFILENAME_SHMEM_ENABLED "/sys/kernel/mm/transparent_hugepage/shmem_enabled" + +// Java heap filename +#define XFILENAME_HEAP "java_heap" + +// Preferred tmpfs mount points, ordered by priority +static const char* z_preferred_tmpfs_mountpoints[] = { + "/dev/shm", + "/run/shm", + nullptr +}; + +// Preferred hugetlbfs mount points, ordered by priority +static const char* z_preferred_hugetlbfs_mountpoints[] = { + "/dev/hugepages", + "/hugepages", + nullptr +}; + +static int z_fallocate_hugetlbfs_attempts = 3; +static bool z_fallocate_supported = true; + +XPhysicalMemoryBacking::XPhysicalMemoryBacking(size_t max_capacity) : + _fd(-1), + _filesystem(0), + _block_size(0), + _available(0), + _initialized(false) { + + // Create backing file + _fd = create_fd(XFILENAME_HEAP); + if (_fd == -1) { + return; + } + + // Truncate backing file + while (ftruncate(_fd, max_capacity) == -1) { + if (errno != EINTR) { + XErrno err; + log_error_p(gc)("Failed to truncate backing file (%s)", err.to_string()); + return; + } + } + + // Get filesystem statistics + struct statfs buf; + if (fstatfs(_fd, &buf) == -1) { + XErrno err; + log_error_p(gc)("Failed to determine filesystem type for backing file (%s)", err.to_string()); + return; + } + + _filesystem = buf.f_type; + _block_size = buf.f_bsize; + _available = buf.f_bavail * _block_size; + + log_info_p(gc, init)("Heap Backing Filesystem: %s (" UINT64_FORMAT_X ")", + is_tmpfs() ? XFILESYSTEM_TMPFS : is_hugetlbfs() ? XFILESYSTEM_HUGETLBFS : "other", _filesystem); + + // Make sure the filesystem type matches requested large page type + if (XLargePages::is_transparent() && !is_tmpfs()) { + log_error_p(gc)("-XX:+UseTransparentHugePages can only be enabled when using a %s filesystem", + XFILESYSTEM_TMPFS); + return; + } + + if (XLargePages::is_transparent() && !tmpfs_supports_transparent_huge_pages()) { + log_error_p(gc)("-XX:+UseTransparentHugePages on a %s filesystem not supported by kernel", + XFILESYSTEM_TMPFS); + return; + } + + if (XLargePages::is_explicit() && !is_hugetlbfs()) { + log_error_p(gc)("-XX:+UseLargePages (without -XX:+UseTransparentHugePages) can only be enabled " + "when using a %s filesystem", XFILESYSTEM_HUGETLBFS); + return; + } + + if (!XLargePages::is_explicit() && is_hugetlbfs()) { + log_error_p(gc)("-XX:+UseLargePages must be enabled when using a %s filesystem", + XFILESYSTEM_HUGETLBFS); + return; + } + + // Make sure the filesystem block size is compatible + if (XGranuleSize % _block_size != 0) { + log_error_p(gc)("Filesystem backing the heap has incompatible block size (" SIZE_FORMAT ")", + _block_size); + return; + } + + if (is_hugetlbfs() && _block_size != XGranuleSize) { + log_error_p(gc)("%s filesystem has unexpected block size " SIZE_FORMAT " (expected " SIZE_FORMAT ")", + XFILESYSTEM_HUGETLBFS, _block_size, XGranuleSize); + return; + } + + // Successfully initialized + _initialized = true; +} + +int XPhysicalMemoryBacking::create_mem_fd(const char* name) const { + assert(XGranuleSize == 2 * M, "Granule size must match MFD_HUGE_2MB"); + + // Create file name + char filename[PATH_MAX]; + snprintf(filename, sizeof(filename), "%s%s", name, XLargePages::is_explicit() ? ".hugetlb" : ""); + + // Create file + const int extra_flags = XLargePages::is_explicit() ? (MFD_HUGETLB | MFD_HUGE_2MB) : 0; + const int fd = XSyscall::memfd_create(filename, MFD_CLOEXEC | extra_flags); + if (fd == -1) { + XErrno err; + log_debug_p(gc, init)("Failed to create memfd file (%s)", + (XLargePages::is_explicit() && (err == EINVAL || err == ENODEV)) ? + "Hugepages (2M) not available" : err.to_string()); + return -1; + } + + log_info_p(gc, init)("Heap Backing File: /memfd:%s", filename); + + return fd; +} + +int XPhysicalMemoryBacking::create_file_fd(const char* name) const { + const char* const filesystem = XLargePages::is_explicit() + ? XFILESYSTEM_HUGETLBFS + : XFILESYSTEM_TMPFS; + const char** const preferred_mountpoints = XLargePages::is_explicit() + ? z_preferred_hugetlbfs_mountpoints + : z_preferred_tmpfs_mountpoints; + + // Find mountpoint + XMountPoint mountpoint(filesystem, preferred_mountpoints); + if (mountpoint.get() == nullptr) { + log_error_p(gc)("Use -XX:AllocateHeapAt to specify the path to a %s filesystem", filesystem); + return -1; + } + + // Try to create an anonymous file using the O_TMPFILE flag. Note that this + // flag requires kernel >= 3.11. If this fails we fall back to open/unlink. + const int fd_anon = os::open(mountpoint.get(), O_TMPFILE|O_EXCL|O_RDWR|O_CLOEXEC, S_IRUSR|S_IWUSR); + if (fd_anon == -1) { + XErrno err; + log_debug_p(gc, init)("Failed to create anonymous file in %s (%s)", mountpoint.get(), + (err == EINVAL ? "Not supported" : err.to_string())); + } else { + // Get inode number for anonymous file + struct stat stat_buf; + if (fstat(fd_anon, &stat_buf) == -1) { + XErrno err; + log_error_pd(gc)("Failed to determine inode number for anonymous file (%s)", err.to_string()); + return -1; + } + + log_info_p(gc, init)("Heap Backing File: %s/#" UINT64_FORMAT, mountpoint.get(), (uint64_t)stat_buf.st_ino); + + return fd_anon; + } + + log_debug_p(gc, init)("Falling back to open/unlink"); + + // Create file name + char filename[PATH_MAX]; + snprintf(filename, sizeof(filename), "%s/%s.%d", mountpoint.get(), name, os::current_process_id()); + + // Create file + const int fd = os::open(filename, O_CREAT|O_EXCL|O_RDWR|O_CLOEXEC, S_IRUSR|S_IWUSR); + if (fd == -1) { + XErrno err; + log_error_p(gc)("Failed to create file %s (%s)", filename, err.to_string()); + return -1; + } + + // Unlink file + if (unlink(filename) == -1) { + XErrno err; + log_error_p(gc)("Failed to unlink file %s (%s)", filename, err.to_string()); + return -1; + } + + log_info_p(gc, init)("Heap Backing File: %s", filename); + + return fd; +} + +int XPhysicalMemoryBacking::create_fd(const char* name) const { + if (AllocateHeapAt == nullptr) { + // If the path is not explicitly specified, then we first try to create a memfd file + // instead of looking for a tmpfd/hugetlbfs mount point. Note that memfd_create() might + // not be supported at all (requires kernel >= 3.17), or it might not support large + // pages (requires kernel >= 4.14). If memfd_create() fails, then we try to create a + // file on an accessible tmpfs or hugetlbfs mount point. + const int fd = create_mem_fd(name); + if (fd != -1) { + return fd; + } + + log_debug_p(gc)("Falling back to searching for an accessible mount point"); + } + + return create_file_fd(name); +} + +bool XPhysicalMemoryBacking::is_initialized() const { + return _initialized; +} + +void XPhysicalMemoryBacking::warn_available_space(size_t max_capacity) const { + // Note that the available space on a tmpfs or a hugetlbfs filesystem + // will be zero if no size limit was specified when it was mounted. + if (_available == 0) { + // No size limit set, skip check + log_info_p(gc, init)("Available space on backing filesystem: N/A"); + return; + } + + log_info_p(gc, init)("Available space on backing filesystem: " SIZE_FORMAT "M", _available / M); + + // Warn if the filesystem doesn't currently have enough space available to hold + // the max heap size. The max heap size will be capped if we later hit this limit + // when trying to expand the heap. + if (_available < max_capacity) { + log_warning_p(gc)("***** WARNING! INCORRECT SYSTEM CONFIGURATION DETECTED! *****"); + log_warning_p(gc)("Not enough space available on the backing filesystem to hold the current max Java heap"); + log_warning_p(gc)("size (" SIZE_FORMAT "M). Please adjust the size of the backing filesystem accordingly " + "(available", max_capacity / M); + log_warning_p(gc)("space is currently " SIZE_FORMAT "M). Continuing execution with the current filesystem " + "size could", _available / M); + log_warning_p(gc)("lead to a premature OutOfMemoryError being thrown, due to failure to commit memory."); + } +} + +void XPhysicalMemoryBacking::warn_max_map_count(size_t max_capacity) const { + const char* const filename = XFILENAME_PROC_MAX_MAP_COUNT; + FILE* const file = os::fopen(filename, "r"); + if (file == nullptr) { + // Failed to open file, skip check + log_debug_p(gc, init)("Failed to open %s", filename); + return; + } + + size_t actual_max_map_count = 0; + const int result = fscanf(file, SIZE_FORMAT, &actual_max_map_count); + fclose(file); + if (result != 1) { + // Failed to read file, skip check + log_debug_p(gc, init)("Failed to read %s", filename); + return; + } + + // The required max map count is impossible to calculate exactly since subsystems + // other than ZGC are also creating memory mappings, and we have no control over that. + // However, ZGC tends to create the most mappings and dominate the total count. + // In the worst cases, ZGC will map each granule three times, i.e. once per heap view. + // We speculate that we need another 20% to allow for non-ZGC subsystems to map memory. + const size_t required_max_map_count = (max_capacity / XGranuleSize) * 3 * 1.2; + if (actual_max_map_count < required_max_map_count) { + log_warning_p(gc)("***** WARNING! INCORRECT SYSTEM CONFIGURATION DETECTED! *****"); + log_warning_p(gc)("The system limit on number of memory mappings per process might be too low for the given"); + log_warning_p(gc)("max Java heap size (" SIZE_FORMAT "M). Please adjust %s to allow for at", + max_capacity / M, filename); + log_warning_p(gc)("least " SIZE_FORMAT " mappings (current limit is " SIZE_FORMAT "). Continuing execution " + "with the current", required_max_map_count, actual_max_map_count); + log_warning_p(gc)("limit could lead to a premature OutOfMemoryError being thrown, due to failure to map memory."); + } +} + +void XPhysicalMemoryBacking::warn_commit_limits(size_t max_capacity) const { + // Warn if available space is too low + warn_available_space(max_capacity); + + // Warn if max map count is too low + warn_max_map_count(max_capacity); +} + +bool XPhysicalMemoryBacking::is_tmpfs() const { + return _filesystem == TMPFS_MAGIC; +} + +bool XPhysicalMemoryBacking::is_hugetlbfs() const { + return _filesystem == HUGETLBFS_MAGIC; +} + +bool XPhysicalMemoryBacking::tmpfs_supports_transparent_huge_pages() const { + // If the shmem_enabled file exists and is readable then we + // know the kernel supports transparent huge pages for tmpfs. + return access(XFILENAME_SHMEM_ENABLED, R_OK) == 0; +} + +XErrno XPhysicalMemoryBacking::fallocate_compat_mmap_hugetlbfs(size_t offset, size_t length, bool touch) const { + // On hugetlbfs, mapping a file segment will fail immediately, without + // the need to touch the mapped pages first, if there aren't enough huge + // pages available to back the mapping. + void* const addr = mmap(0, length, PROT_READ|PROT_WRITE, MAP_SHARED, _fd, offset); + if (addr == MAP_FAILED) { + // Failed + return errno; + } + + // Once mapped, the huge pages are only reserved. We need to touch them + // to associate them with the file segment. Note that we can not punch + // hole in file segments which only have reserved pages. + if (touch) { + char* const start = (char*)addr; + char* const end = start + length; + os::pretouch_memory(start, end, _block_size); + } + + // Unmap again. From now on, the huge pages that were mapped are allocated + // to this file. There's no risk of getting a SIGBUS when mapping and + // touching these pages again. + if (munmap(addr, length) == -1) { + // Failed + return errno; + } + + // Success + return 0; +} + +static bool safe_touch_mapping(void* addr, size_t length, size_t page_size) { + char* const start = (char*)addr; + char* const end = start + length; + + // Touching a mapping that can't be backed by memory will generate a + // SIGBUS. By using SafeFetch32 any SIGBUS will be safely caught and + // handled. On tmpfs, doing a fetch (rather than a store) is enough + // to cause backing pages to be allocated (there's no zero-page to + // worry about). + for (char *p = start; p < end; p += page_size) { + if (SafeFetch32((int*)p, -1) == -1) { + // Failed + return false; + } + } + + // Success + return true; +} + +XErrno XPhysicalMemoryBacking::fallocate_compat_mmap_tmpfs(size_t offset, size_t length) const { + // On tmpfs, we need to touch the mapped pages to figure out + // if there are enough pages available to back the mapping. + void* const addr = mmap(0, length, PROT_READ|PROT_WRITE, MAP_SHARED, _fd, offset); + if (addr == MAP_FAILED) { + // Failed + return errno; + } + + // Advise mapping to use transparent huge pages + os::realign_memory((char*)addr, length, XGranuleSize); + + // Touch the mapping (safely) to make sure it's backed by memory + const bool backed = safe_touch_mapping(addr, length, _block_size); + + // Unmap again. If successfully touched, the backing memory will + // be allocated to this file. There's no risk of getting a SIGBUS + // when mapping and touching these pages again. + if (munmap(addr, length) == -1) { + // Failed + return errno; + } + + // Success + return backed ? 0 : ENOMEM; +} + +XErrno XPhysicalMemoryBacking::fallocate_compat_pwrite(size_t offset, size_t length) const { + uint8_t data = 0; + + // Allocate backing memory by writing to each block + for (size_t pos = offset; pos < offset + length; pos += _block_size) { + if (pwrite(_fd, &data, sizeof(data), pos) == -1) { + // Failed + return errno; + } + } + + // Success + return 0; +} + +XErrno XPhysicalMemoryBacking::fallocate_fill_hole_compat(size_t offset, size_t length) const { + // fallocate(2) is only supported by tmpfs since Linux 3.5, and by hugetlbfs + // since Linux 4.3. When fallocate(2) is not supported we emulate it using + // mmap/munmap (for hugetlbfs and tmpfs with transparent huge pages) or pwrite + // (for tmpfs without transparent huge pages and other filesystem types). + if (XLargePages::is_explicit()) { + return fallocate_compat_mmap_hugetlbfs(offset, length, false /* touch */); + } else if (XLargePages::is_transparent()) { + return fallocate_compat_mmap_tmpfs(offset, length); + } else { + return fallocate_compat_pwrite(offset, length); + } +} + +XErrno XPhysicalMemoryBacking::fallocate_fill_hole_syscall(size_t offset, size_t length) const { + const int mode = 0; // Allocate + const int res = XSyscall::fallocate(_fd, mode, offset, length); + if (res == -1) { + // Failed + return errno; + } + + // Success + return 0; +} + +XErrno XPhysicalMemoryBacking::fallocate_fill_hole(size_t offset, size_t length) const { + // Using compat mode is more efficient when allocating space on hugetlbfs. + // Note that allocating huge pages this way will only reserve them, and not + // associate them with segments of the file. We must guarantee that we at + // some point touch these segments, otherwise we can not punch hole in them. + // Also note that we need to use compat mode when using transparent huge pages, + // since we need to use madvise(2) on the mapping before the page is allocated. + if (z_fallocate_supported && !XLargePages::is_enabled()) { + const XErrno err = fallocate_fill_hole_syscall(offset, length); + if (!err) { + // Success + return 0; + } + + if (err != ENOSYS && err != EOPNOTSUPP) { + // Failed + return err; + } + + // Not supported + log_debug_p(gc)("Falling back to fallocate() compatibility mode"); + z_fallocate_supported = false; + } + + return fallocate_fill_hole_compat(offset, length); +} + +XErrno XPhysicalMemoryBacking::fallocate_punch_hole(size_t offset, size_t length) const { + if (XLargePages::is_explicit()) { + // We can only punch hole in pages that have been touched. Non-touched + // pages are only reserved, and not associated with any specific file + // segment. We don't know which pages have been previously touched, so + // we always touch them here to guarantee that we can punch hole. + const XErrno err = fallocate_compat_mmap_hugetlbfs(offset, length, true /* touch */); + if (err) { + // Failed + return err; + } + } + + const int mode = FALLOC_FL_PUNCH_HOLE|FALLOC_FL_KEEP_SIZE; + if (XSyscall::fallocate(_fd, mode, offset, length) == -1) { + // Failed + return errno; + } + + // Success + return 0; +} + +XErrno XPhysicalMemoryBacking::split_and_fallocate(bool punch_hole, size_t offset, size_t length) const { + // Try first half + const size_t offset0 = offset; + const size_t length0 = align_up(length / 2, _block_size); + const XErrno err0 = fallocate(punch_hole, offset0, length0); + if (err0) { + return err0; + } + + // Try second half + const size_t offset1 = offset0 + length0; + const size_t length1 = length - length0; + const XErrno err1 = fallocate(punch_hole, offset1, length1); + if (err1) { + return err1; + } + + // Success + return 0; +} + +XErrno XPhysicalMemoryBacking::fallocate(bool punch_hole, size_t offset, size_t length) const { + assert(is_aligned(offset, _block_size), "Invalid offset"); + assert(is_aligned(length, _block_size), "Invalid length"); + + const XErrno err = punch_hole ? fallocate_punch_hole(offset, length) : fallocate_fill_hole(offset, length); + if (err == EINTR && length > _block_size) { + // Calling fallocate(2) with a large length can take a long time to + // complete. When running profilers, such as VTune, this syscall will + // be constantly interrupted by signals. Expanding the file in smaller + // steps avoids this problem. + return split_and_fallocate(punch_hole, offset, length); + } + + return err; +} + +bool XPhysicalMemoryBacking::commit_inner(size_t offset, size_t length) const { + log_trace(gc, heap)("Committing memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)", + offset / M, (offset + length) / M, length / M); + +retry: + const XErrno err = fallocate(false /* punch_hole */, offset, length); + if (err) { + if (err == ENOSPC && !is_init_completed() && XLargePages::is_explicit() && z_fallocate_hugetlbfs_attempts-- > 0) { + // If we fail to allocate during initialization, due to lack of space on + // the hugetlbfs filesystem, then we wait and retry a few times before + // giving up. Otherwise there is a risk that running JVMs back-to-back + // will fail, since there is a delay between process termination and the + // huge pages owned by that process being returned to the huge page pool + // and made available for new allocations. + log_debug_p(gc, init)("Failed to commit memory (%s), retrying", err.to_string()); + + // Wait and retry in one second, in the hope that huge pages will be + // available by then. + sleep(1); + goto retry; + } + + // Failed + log_error_p(gc)("Failed to commit memory (%s)", err.to_string()); + return false; + } + + // Success + return true; +} + +static int offset_to_node(size_t offset) { + const GrowableArray* mapping = os::Linux::numa_nindex_to_node(); + const size_t nindex = (offset >> XGranuleSizeShift) % mapping->length(); + return mapping->at((int)nindex); +} + +size_t XPhysicalMemoryBacking::commit_numa_interleaved(size_t offset, size_t length) const { + size_t committed = 0; + + // Commit one granule at a time, so that each granule + // can be allocated from a different preferred node. + while (committed < length) { + const size_t granule_offset = offset + committed; + + // Setup NUMA policy to allocate memory from a preferred node + os::Linux::numa_set_preferred(offset_to_node(granule_offset)); + + if (!commit_inner(granule_offset, XGranuleSize)) { + // Failed + break; + } + + committed += XGranuleSize; + } + + // Restore NUMA policy + os::Linux::numa_set_preferred(-1); + + return committed; +} + +size_t XPhysicalMemoryBacking::commit_default(size_t offset, size_t length) const { + // Try to commit the whole region + if (commit_inner(offset, length)) { + // Success + return length; + } + + // Failed, try to commit as much as possible + size_t start = offset; + size_t end = offset + length; + + for (;;) { + length = align_down((end - start) / 2, XGranuleSize); + if (length < XGranuleSize) { + // Done, don't commit more + return start - offset; + } + + if (commit_inner(start, length)) { + // Success, try commit more + start += length; + } else { + // Failed, try commit less + end -= length; + } + } +} + +size_t XPhysicalMemoryBacking::commit(size_t offset, size_t length) const { + if (XNUMA::is_enabled() && !XLargePages::is_explicit()) { + // To get granule-level NUMA interleaving when using non-large pages, + // we must explicitly interleave the memory at commit/fallocate time. + return commit_numa_interleaved(offset, length); + } + + return commit_default(offset, length); +} + +size_t XPhysicalMemoryBacking::uncommit(size_t offset, size_t length) const { + log_trace(gc, heap)("Uncommitting memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)", + offset / M, (offset + length) / M, length / M); + + const XErrno err = fallocate(true /* punch_hole */, offset, length); + if (err) { + log_error(gc)("Failed to uncommit memory (%s)", err.to_string()); + return 0; + } + + return length; +} + +void XPhysicalMemoryBacking::map(uintptr_t addr, size_t size, uintptr_t offset) const { + const void* const res = mmap((void*)addr, size, PROT_READ|PROT_WRITE, MAP_FIXED|MAP_SHARED, _fd, offset); + if (res == MAP_FAILED) { + XErrno err; + fatal("Failed to map memory (%s)", err.to_string()); + } +} + +void XPhysicalMemoryBacking::unmap(uintptr_t addr, size_t size) const { + // Note that we must keep the address space reservation intact and just detach + // the backing memory. For this reason we map a new anonymous, non-accessible + // and non-reserved page over the mapping instead of actually unmapping. + const void* const res = mmap((void*)addr, size, PROT_NONE, MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE, -1, 0); + if (res == MAP_FAILED) { + XErrno err; + fatal("Failed to map memory (%s)", err.to_string()); + } +} diff --git a/src/hotspot/os/linux/gc/x/xPhysicalMemoryBacking_linux.hpp b/src/hotspot/os/linux/gc/x/xPhysicalMemoryBacking_linux.hpp new file mode 100644 index 0000000000000..253a3f87ef427 --- /dev/null +++ b/src/hotspot/os/linux/gc/x/xPhysicalMemoryBacking_linux.hpp @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef OS_LINUX_GC_X_XPHYSICALMEMORYBACKING_LINUX_HPP +#define OS_LINUX_GC_X_XPHYSICALMEMORYBACKING_LINUX_HPP + +class XErrno; + +class XPhysicalMemoryBacking { +private: + int _fd; + size_t _size; + uint64_t _filesystem; + size_t _block_size; + size_t _available; + bool _initialized; + + void warn_available_space(size_t max_capacity) const; + void warn_max_map_count(size_t max_capacity) const; + + int create_mem_fd(const char* name) const; + int create_file_fd(const char* name) const; + int create_fd(const char* name) const; + + bool is_tmpfs() const; + bool is_hugetlbfs() const; + bool tmpfs_supports_transparent_huge_pages() const; + + XErrno fallocate_compat_mmap_hugetlbfs(size_t offset, size_t length, bool touch) const; + XErrno fallocate_compat_mmap_tmpfs(size_t offset, size_t length) const; + XErrno fallocate_compat_pwrite(size_t offset, size_t length) const; + XErrno fallocate_fill_hole_compat(size_t offset, size_t length) const; + XErrno fallocate_fill_hole_syscall(size_t offset, size_t length) const; + XErrno fallocate_fill_hole(size_t offset, size_t length) const; + XErrno fallocate_punch_hole(size_t offset, size_t length) const; + XErrno split_and_fallocate(bool punch_hole, size_t offset, size_t length) const; + XErrno fallocate(bool punch_hole, size_t offset, size_t length) const; + + bool commit_inner(size_t offset, size_t length) const; + size_t commit_numa_interleaved(size_t offset, size_t length) const; + size_t commit_default(size_t offset, size_t length) const; + +public: + XPhysicalMemoryBacking(size_t max_capacity); + + bool is_initialized() const; + + void warn_commit_limits(size_t max_capacity) const; + + size_t commit(size_t offset, size_t length) const; + size_t uncommit(size_t offset, size_t length) const; + + void map(uintptr_t addr, size_t size, uintptr_t offset) const; + void unmap(uintptr_t addr, size_t size) const; +}; + +#endif // OS_LINUX_GC_X_XPHYSICALMEMORYBACKING_LINUX_HPP diff --git a/src/hotspot/os/linux/gc/x/xSyscall_linux.cpp b/src/hotspot/os/linux/gc/x/xSyscall_linux.cpp new file mode 100644 index 0000000000000..6035eaae61bd1 --- /dev/null +++ b/src/hotspot/os/linux/gc/x/xSyscall_linux.cpp @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/x/xSyscall_linux.hpp" +#include OS_CPU_HEADER(gc/x/xSyscall) + +#include + +int XSyscall::memfd_create(const char *name, unsigned int flags) { + return syscall(SYS_memfd_create, name, flags); +} + +int XSyscall::fallocate(int fd, int mode, size_t offset, size_t length) { + return syscall(SYS_fallocate, fd, mode, offset, length); +} + +long XSyscall::get_mempolicy(int* mode, unsigned long* nodemask, unsigned long maxnode, void* addr, unsigned long flags) { + return syscall(SYS_get_mempolicy, mode, nodemask, maxnode, addr, flags); +} diff --git a/src/hotspot/os/linux/gc/x/xSyscall_linux.hpp b/src/hotspot/os/linux/gc/x/xSyscall_linux.hpp new file mode 100644 index 0000000000000..f16d2b2ffdcc6 --- /dev/null +++ b/src/hotspot/os/linux/gc/x/xSyscall_linux.hpp @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2019, 2022, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef OS_LINUX_GC_X_XSYSCALL_LINUX_HPP +#define OS_LINUX_GC_X_XSYSCALL_LINUX_HPP + +#include "memory/allStatic.hpp" +#include "utilities/globalDefinitions.hpp" + +// Flags for get_mempolicy() +#ifndef MPOL_F_NODE +#define MPOL_F_NODE (1<<0) +#endif +#ifndef MPOL_F_ADDR +#define MPOL_F_ADDR (1<<1) +#endif + +class XSyscall : public AllStatic { +public: + static int memfd_create(const char* name, unsigned int flags); + static int fallocate(int fd, int mode, size_t offset, size_t length); + static long get_mempolicy(int* mode, unsigned long* nodemask, unsigned long maxnode, void* addr, unsigned long flags); +}; + +#endif // OS_LINUX_GC_X_XSYSCALL_LINUX_HPP diff --git a/src/hotspot/os/linux/gc/z/zPhysicalMemoryBacking_linux.cpp b/src/hotspot/os/linux/gc/z/zPhysicalMemoryBacking_linux.cpp index f2fff5781763a..bed82b19148b8 100644 --- a/src/hotspot/os/linux/gc/z/zPhysicalMemoryBacking_linux.cpp +++ b/src/hotspot/os/linux/gc/z/zPhysicalMemoryBacking_linux.cpp @@ -23,6 +23,7 @@ #include "precompiled.hpp" #include "gc/shared/gcLogPrecious.hpp" +#include "gc/z/zAddress.inline.hpp" #include "gc/z/zArray.inline.hpp" #include "gc/z/zErrno.hpp" #include "gc/z/zGlobals.hpp" @@ -385,11 +386,11 @@ bool ZPhysicalMemoryBacking::tmpfs_supports_transparent_huge_pages() const { return access(ZFILENAME_SHMEM_ENABLED, R_OK) == 0; } -ZErrno ZPhysicalMemoryBacking::fallocate_compat_mmap_hugetlbfs(size_t offset, size_t length, bool touch) const { +ZErrno ZPhysicalMemoryBacking::fallocate_compat_mmap_hugetlbfs(zoffset offset, size_t length, bool touch) const { // On hugetlbfs, mapping a file segment will fail immediately, without // the need to touch the mapped pages first, if there aren't enough huge // pages available to back the mapping. - void* const addr = mmap(0, length, PROT_READ|PROT_WRITE, MAP_SHARED, _fd, offset); + void* const addr = mmap(0, length, PROT_READ|PROT_WRITE, MAP_SHARED, _fd, untype(offset)); if (addr == MAP_FAILED) { // Failed return errno; @@ -436,10 +437,10 @@ static bool safe_touch_mapping(void* addr, size_t length, size_t page_size) { return true; } -ZErrno ZPhysicalMemoryBacking::fallocate_compat_mmap_tmpfs(size_t offset, size_t length) const { +ZErrno ZPhysicalMemoryBacking::fallocate_compat_mmap_tmpfs(zoffset offset, size_t length) const { // On tmpfs, we need to touch the mapped pages to figure out // if there are enough pages available to back the mapping. - void* const addr = mmap(0, length, PROT_READ|PROT_WRITE, MAP_SHARED, _fd, offset); + void* const addr = mmap(0, length, PROT_READ|PROT_WRITE, MAP_SHARED, _fd, untype(offset)); if (addr == MAP_FAILED) { // Failed return errno; @@ -463,12 +464,12 @@ ZErrno ZPhysicalMemoryBacking::fallocate_compat_mmap_tmpfs(size_t offset, size_t return backed ? 0 : ENOMEM; } -ZErrno ZPhysicalMemoryBacking::fallocate_compat_pwrite(size_t offset, size_t length) const { +ZErrno ZPhysicalMemoryBacking::fallocate_compat_pwrite(zoffset offset, size_t length) const { uint8_t data = 0; // Allocate backing memory by writing to each block - for (size_t pos = offset; pos < offset + length; pos += _block_size) { - if (pwrite(_fd, &data, sizeof(data), pos) == -1) { + for (zoffset pos = offset; pos < offset + length; pos += _block_size) { + if (pwrite(_fd, &data, sizeof(data), untype(pos)) == -1) { // Failed return errno; } @@ -478,7 +479,7 @@ ZErrno ZPhysicalMemoryBacking::fallocate_compat_pwrite(size_t offset, size_t len return 0; } -ZErrno ZPhysicalMemoryBacking::fallocate_fill_hole_compat(size_t offset, size_t length) const { +ZErrno ZPhysicalMemoryBacking::fallocate_fill_hole_compat(zoffset offset, size_t length) const { // fallocate(2) is only supported by tmpfs since Linux 3.5, and by hugetlbfs // since Linux 4.3. When fallocate(2) is not supported we emulate it using // mmap/munmap (for hugetlbfs and tmpfs with transparent huge pages) or pwrite @@ -492,9 +493,9 @@ ZErrno ZPhysicalMemoryBacking::fallocate_fill_hole_compat(size_t offset, size_t } } -ZErrno ZPhysicalMemoryBacking::fallocate_fill_hole_syscall(size_t offset, size_t length) const { +ZErrno ZPhysicalMemoryBacking::fallocate_fill_hole_syscall(zoffset offset, size_t length) const { const int mode = 0; // Allocate - const int res = ZSyscall::fallocate(_fd, mode, offset, length); + const int res = ZSyscall::fallocate(_fd, mode, untype(offset), length); if (res == -1) { // Failed return errno; @@ -504,7 +505,7 @@ ZErrno ZPhysicalMemoryBacking::fallocate_fill_hole_syscall(size_t offset, size_t return 0; } -ZErrno ZPhysicalMemoryBacking::fallocate_fill_hole(size_t offset, size_t length) const { +ZErrno ZPhysicalMemoryBacking::fallocate_fill_hole(zoffset offset, size_t length) const { // Using compat mode is more efficient when allocating space on hugetlbfs. // Note that allocating huge pages this way will only reserve them, and not // associate them with segments of the file. We must guarantee that we at @@ -531,7 +532,7 @@ ZErrno ZPhysicalMemoryBacking::fallocate_fill_hole(size_t offset, size_t length) return fallocate_fill_hole_compat(offset, length); } -ZErrno ZPhysicalMemoryBacking::fallocate_punch_hole(size_t offset, size_t length) const { +ZErrno ZPhysicalMemoryBacking::fallocate_punch_hole(zoffset offset, size_t length) const { if (ZLargePages::is_explicit()) { // We can only punch hole in pages that have been touched. Non-touched // pages are only reserved, and not associated with any specific file @@ -545,7 +546,7 @@ ZErrno ZPhysicalMemoryBacking::fallocate_punch_hole(size_t offset, size_t length } const int mode = FALLOC_FL_PUNCH_HOLE|FALLOC_FL_KEEP_SIZE; - if (ZSyscall::fallocate(_fd, mode, offset, length) == -1) { + if (ZSyscall::fallocate(_fd, mode, untype(offset), length) == -1) { // Failed return errno; } @@ -554,9 +555,9 @@ ZErrno ZPhysicalMemoryBacking::fallocate_punch_hole(size_t offset, size_t length return 0; } -ZErrno ZPhysicalMemoryBacking::split_and_fallocate(bool punch_hole, size_t offset, size_t length) const { +ZErrno ZPhysicalMemoryBacking::split_and_fallocate(bool punch_hole, zoffset offset, size_t length) const { // Try first half - const size_t offset0 = offset; + const zoffset offset0 = offset; const size_t length0 = align_up(length / 2, _block_size); const ZErrno err0 = fallocate(punch_hole, offset0, length0); if (err0) { @@ -564,7 +565,7 @@ ZErrno ZPhysicalMemoryBacking::split_and_fallocate(bool punch_hole, size_t offse } // Try second half - const size_t offset1 = offset0 + length0; + const zoffset offset1 = offset0 + length0; const size_t length1 = length - length0; const ZErrno err1 = fallocate(punch_hole, offset1, length1); if (err1) { @@ -575,8 +576,8 @@ ZErrno ZPhysicalMemoryBacking::split_and_fallocate(bool punch_hole, size_t offse return 0; } -ZErrno ZPhysicalMemoryBacking::fallocate(bool punch_hole, size_t offset, size_t length) const { - assert(is_aligned(offset, _block_size), "Invalid offset"); +ZErrno ZPhysicalMemoryBacking::fallocate(bool punch_hole, zoffset offset, size_t length) const { + assert(is_aligned(untype(offset), _block_size), "Invalid offset"); assert(is_aligned(length, _block_size), "Invalid length"); const ZErrno err = punch_hole ? fallocate_punch_hole(offset, length) : fallocate_fill_hole(offset, length); @@ -591,9 +592,9 @@ ZErrno ZPhysicalMemoryBacking::fallocate(bool punch_hole, size_t offset, size_t return err; } -bool ZPhysicalMemoryBacking::commit_inner(size_t offset, size_t length) const { +bool ZPhysicalMemoryBacking::commit_inner(zoffset offset, size_t length) const { log_trace(gc, heap)("Committing memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)", - offset / M, (offset + length) / M, length / M); + untype(offset) / M, untype(offset + length) / M, length / M); retry: const ZErrno err = fallocate(false /* punch_hole */, offset, length); @@ -622,19 +623,19 @@ bool ZPhysicalMemoryBacking::commit_inner(size_t offset, size_t length) const { return true; } -static int offset_to_node(size_t offset) { +static int offset_to_node(zoffset offset) { const GrowableArray* mapping = os::Linux::numa_nindex_to_node(); - const size_t nindex = (offset >> ZGranuleSizeShift) % mapping->length(); + const size_t nindex = (untype(offset) >> ZGranuleSizeShift) % mapping->length(); return mapping->at((int)nindex); } -size_t ZPhysicalMemoryBacking::commit_numa_interleaved(size_t offset, size_t length) const { +size_t ZPhysicalMemoryBacking::commit_numa_interleaved(zoffset offset, size_t length) const { size_t committed = 0; // Commit one granule at a time, so that each granule // can be allocated from a different preferred node. while (committed < length) { - const size_t granule_offset = offset + committed; + const zoffset granule_offset = offset + committed; // Setup NUMA policy to allocate memory from a preferred node os::Linux::numa_set_preferred(offset_to_node(granule_offset)); @@ -653,7 +654,7 @@ size_t ZPhysicalMemoryBacking::commit_numa_interleaved(size_t offset, size_t len return committed; } -size_t ZPhysicalMemoryBacking::commit_default(size_t offset, size_t length) const { +size_t ZPhysicalMemoryBacking::commit_default(zoffset offset, size_t length) const { // Try to commit the whole region if (commit_inner(offset, length)) { // Success @@ -661,8 +662,8 @@ size_t ZPhysicalMemoryBacking::commit_default(size_t offset, size_t length) cons } // Failed, try to commit as much as possible - size_t start = offset; - size_t end = offset + length; + zoffset start = offset; + zoffset end = offset + length; for (;;) { length = align_down((end - start) / 2, ZGranuleSize); @@ -681,7 +682,7 @@ size_t ZPhysicalMemoryBacking::commit_default(size_t offset, size_t length) cons } } -size_t ZPhysicalMemoryBacking::commit(size_t offset, size_t length) const { +size_t ZPhysicalMemoryBacking::commit(zoffset offset, size_t length) const { if (ZNUMA::is_enabled() && !ZLargePages::is_explicit()) { // To get granule-level NUMA interleaving when using non-large pages, // we must explicitly interleave the memory at commit/fallocate time. @@ -691,9 +692,9 @@ size_t ZPhysicalMemoryBacking::commit(size_t offset, size_t length) const { return commit_default(offset, length); } -size_t ZPhysicalMemoryBacking::uncommit(size_t offset, size_t length) const { +size_t ZPhysicalMemoryBacking::uncommit(zoffset offset, size_t length) const { log_trace(gc, heap)("Uncommitting memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)", - offset / M, (offset + length) / M, length / M); + untype(offset) / M, untype(offset + length) / M, length / M); const ZErrno err = fallocate(true /* punch_hole */, offset, length); if (err) { @@ -704,19 +705,19 @@ size_t ZPhysicalMemoryBacking::uncommit(size_t offset, size_t length) const { return length; } -void ZPhysicalMemoryBacking::map(uintptr_t addr, size_t size, uintptr_t offset) const { - const void* const res = mmap((void*)addr, size, PROT_READ|PROT_WRITE, MAP_FIXED|MAP_SHARED, _fd, offset); +void ZPhysicalMemoryBacking::map(zaddress_unsafe addr, size_t size, zoffset offset) const { + const void* const res = mmap((void*)untype(addr), size, PROT_READ|PROT_WRITE, MAP_FIXED|MAP_SHARED, _fd, untype(offset)); if (res == MAP_FAILED) { ZErrno err; fatal("Failed to map memory (%s)", err.to_string()); } } -void ZPhysicalMemoryBacking::unmap(uintptr_t addr, size_t size) const { +void ZPhysicalMemoryBacking::unmap(zaddress_unsafe addr, size_t size) const { // Note that we must keep the address space reservation intact and just detach // the backing memory. For this reason we map a new anonymous, non-accessible // and non-reserved page over the mapping instead of actually unmapping. - const void* const res = mmap((void*)addr, size, PROT_NONE, MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE, -1, 0); + const void* const res = mmap((void*)untype(addr), size, PROT_NONE, MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE, -1, 0); if (res == MAP_FAILED) { ZErrno err; fatal("Failed to map memory (%s)", err.to_string()); diff --git a/src/hotspot/os/linux/gc/z/zPhysicalMemoryBacking_linux.hpp b/src/hotspot/os/linux/gc/z/zPhysicalMemoryBacking_linux.hpp index 655c5a4209b8b..59c00ad01bf60 100644 --- a/src/hotspot/os/linux/gc/z/zPhysicalMemoryBacking_linux.hpp +++ b/src/hotspot/os/linux/gc/z/zPhysicalMemoryBacking_linux.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,6 +24,8 @@ #ifndef OS_LINUX_GC_Z_ZPHYSICALMEMORYBACKING_LINUX_HPP #define OS_LINUX_GC_Z_ZPHYSICALMEMORYBACKING_LINUX_HPP +#include "gc/z/zAddress.hpp" + class ZErrno; class ZPhysicalMemoryBacking { @@ -46,19 +48,19 @@ class ZPhysicalMemoryBacking { bool is_hugetlbfs() const; bool tmpfs_supports_transparent_huge_pages() const; - ZErrno fallocate_compat_mmap_hugetlbfs(size_t offset, size_t length, bool touch) const; - ZErrno fallocate_compat_mmap_tmpfs(size_t offset, size_t length) const; - ZErrno fallocate_compat_pwrite(size_t offset, size_t length) const; - ZErrno fallocate_fill_hole_compat(size_t offset, size_t length) const; - ZErrno fallocate_fill_hole_syscall(size_t offset, size_t length) const; - ZErrno fallocate_fill_hole(size_t offset, size_t length) const; - ZErrno fallocate_punch_hole(size_t offset, size_t length) const; - ZErrno split_and_fallocate(bool punch_hole, size_t offset, size_t length) const; - ZErrno fallocate(bool punch_hole, size_t offset, size_t length) const; + ZErrno fallocate_compat_mmap_hugetlbfs(zoffset offset, size_t length, bool touch) const; + ZErrno fallocate_compat_mmap_tmpfs(zoffset offset, size_t length) const; + ZErrno fallocate_compat_pwrite(zoffset offset, size_t length) const; + ZErrno fallocate_fill_hole_compat(zoffset offset, size_t length) const; + ZErrno fallocate_fill_hole_syscall(zoffset offset, size_t length) const; + ZErrno fallocate_fill_hole(zoffset offset, size_t length) const; + ZErrno fallocate_punch_hole(zoffset offset, size_t length) const; + ZErrno split_and_fallocate(bool punch_hole, zoffset offset, size_t length) const; + ZErrno fallocate(bool punch_hole, zoffset offset, size_t length) const; - bool commit_inner(size_t offset, size_t length) const; - size_t commit_numa_interleaved(size_t offset, size_t length) const; - size_t commit_default(size_t offset, size_t length) const; + bool commit_inner(zoffset offset, size_t length) const; + size_t commit_numa_interleaved(zoffset offset, size_t length) const; + size_t commit_default(zoffset offset, size_t length) const; public: ZPhysicalMemoryBacking(size_t max_capacity); @@ -67,11 +69,11 @@ class ZPhysicalMemoryBacking { void warn_commit_limits(size_t max_capacity) const; - size_t commit(size_t offset, size_t length) const; - size_t uncommit(size_t offset, size_t length) const; + size_t commit(zoffset offset, size_t length) const; + size_t uncommit(zoffset offset, size_t length) const; - void map(uintptr_t addr, size_t size, uintptr_t offset) const; - void unmap(uintptr_t addr, size_t size) const; + void map(zaddress_unsafe addr, size_t size, zoffset offset) const; + void unmap(zaddress_unsafe addr, size_t size) const; }; #endif // OS_LINUX_GC_Z_ZPHYSICALMEMORYBACKING_LINUX_HPP diff --git a/src/hotspot/os/linux/os_linux.cpp b/src/hotspot/os/linux/os_linux.cpp index b6d2721343cfc..2313f371db95d 100644 --- a/src/hotspot/os/linux/os_linux.cpp +++ b/src/hotspot/os/linux/os_linux.cpp @@ -216,15 +216,8 @@ static bool suppress_primordial_thread_resolution = false; // utility functions -julong os::available_memory() { - return Linux::available_memory(); -} - -julong os::Linux::available_memory() { - // values in struct sysinfo are "unsigned long" - struct sysinfo si; - julong avail_mem; - +julong os::Linux::available_memory_in_container() { + julong avail_mem = static_cast(-1L); if (OSContainer::is_containerized()) { jlong mem_limit = OSContainer::memory_limit_in_bytes(); jlong mem_usage; @@ -233,17 +226,59 @@ julong os::Linux::available_memory() { } if (mem_limit > 0 && mem_usage > 0) { avail_mem = mem_limit > mem_usage ? (julong)mem_limit - (julong)mem_usage : 0; - log_trace(os)("available container memory: " JULONG_FORMAT, avail_mem); - return avail_mem; } } + return avail_mem; +} + +julong os::available_memory() { + return Linux::available_memory(); +} + +julong os::Linux::available_memory() { + julong avail_mem = available_memory_in_container(); + if (avail_mem != static_cast(-1L)) { + log_trace(os)("available container memory: " JULONG_FORMAT, avail_mem); + return avail_mem; + } - sysinfo(&si); - avail_mem = (julong)si.freeram * si.mem_unit; + FILE *fp = os::fopen("/proc/meminfo", "r"); + if (fp != nullptr) { + char buf[80]; + do { + if (fscanf(fp, "MemAvailable: " JULONG_FORMAT " kB", &avail_mem) == 1) { + avail_mem *= K; + break; + } + } while (fgets(buf, sizeof(buf), fp) != nullptr); + fclose(fp); + } + if (avail_mem == static_cast(-1L)) { + avail_mem = free_memory(); + } log_trace(os)("available memory: " JULONG_FORMAT, avail_mem); return avail_mem; } +julong os::free_memory() { + return Linux::free_memory(); +} + +julong os::Linux::free_memory() { + // values in struct sysinfo are "unsigned long" + struct sysinfo si; + julong free_mem = available_memory_in_container(); + if (free_mem != static_cast(-1L)) { + log_trace(os)("free container memory: " JULONG_FORMAT, free_mem); + return free_mem; + } + + sysinfo(&si); + free_mem = (julong)si.freeram * si.mem_unit; + log_trace(os)("free memory: " JULONG_FORMAT, free_mem); + return free_mem; +} + julong os::physical_memory() { jlong phys_mem = 0; if (OSContainer::is_containerized()) { @@ -851,7 +886,7 @@ bool os::create_thread(Thread* thread, ThreadType thr_type, assert(thread->osthread() == nullptr, "caller responsible"); // Allocate the OSThread object - OSThread* osthread = new OSThread(); + OSThread* osthread = new (std::nothrow) OSThread(); if (osthread == nullptr) { return false; } @@ -975,7 +1010,7 @@ bool os::create_attached_thread(JavaThread* thread) { #endif // Allocate the OSThread object - OSThread* osthread = new OSThread(); + OSThread* osthread = new (std::nothrow) OSThread(); if (osthread == nullptr) { return false; diff --git a/src/hotspot/os/linux/os_linux.hpp b/src/hotspot/os/linux/os_linux.hpp index e33a1af10725a..a7cf69f3164c8 100644 --- a/src/hotspot/os/linux/os_linux.hpp +++ b/src/hotspot/os/linux/os_linux.hpp @@ -51,12 +51,16 @@ class os::Linux { static size_t _default_large_page_size; + static julong available_memory_in_container(); + protected: static julong _physical_memory; static pthread_t _main_thread; static julong available_memory(); + static julong free_memory(); + static int active_processor_count(); static void initialize_system_info(); diff --git a/src/hotspot/os/posix/gc/x/xArguments_posix.cpp b/src/hotspot/os/posix/gc/x/xArguments_posix.cpp new file mode 100644 index 0000000000000..6df0a9bd07460 --- /dev/null +++ b/src/hotspot/os/posix/gc/x/xArguments_posix.cpp @@ -0,0 +1,29 @@ +/* + * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/x/xArguments.hpp" + +bool XArguments::is_os_supported() { + return true; +} diff --git a/src/hotspot/os/posix/gc/x/xInitialize_posix.cpp b/src/hotspot/os/posix/gc/x/xInitialize_posix.cpp new file mode 100644 index 0000000000000..acf71e9890178 --- /dev/null +++ b/src/hotspot/os/posix/gc/x/xInitialize_posix.cpp @@ -0,0 +1,29 @@ +/* + * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/x/xInitialize.hpp" + +void XInitialize::pd_initialize() { + // Does nothing +} diff --git a/src/hotspot/os/posix/gc/x/xUtils_posix.cpp b/src/hotspot/os/posix/gc/x/xUtils_posix.cpp new file mode 100644 index 0000000000000..eee3e5cfbe60d --- /dev/null +++ b/src/hotspot/os/posix/gc/x/xUtils_posix.cpp @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/x/xUtils.hpp" +#include "utilities/debug.hpp" +#include "utilities/globalDefinitions.hpp" + +#include + +uintptr_t XUtils::alloc_aligned(size_t alignment, size_t size) { + void* res = nullptr; + + // Use raw posix_memalign as long as we have no wrapper for it + ALLOW_C_FUNCTION(::posix_memalign, int rc = posix_memalign(&res, alignment, size);) + if (rc != 0) { + fatal("posix_memalign() failed"); + } + + memset(res, 0, size); + + return (uintptr_t)res; +} diff --git a/src/hotspot/os/posix/gc/x/xVirtualMemory_posix.cpp b/src/hotspot/os/posix/gc/x/xVirtualMemory_posix.cpp new file mode 100644 index 0000000000000..e2422eb0978fc --- /dev/null +++ b/src/hotspot/os/posix/gc/x/xVirtualMemory_posix.cpp @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/x/xAddress.inline.hpp" +#include "gc/x/xVirtualMemory.hpp" +#include "logging/log.hpp" + +#include +#include + +void XVirtualMemoryManager::pd_initialize_before_reserve() { + // Does nothing +} + +void XVirtualMemoryManager::pd_initialize_after_reserve() { + // Does nothing +} + +bool XVirtualMemoryManager::pd_reserve(uintptr_t addr, size_t size) { + const uintptr_t res = (uintptr_t)mmap((void*)addr, size, PROT_NONE, MAP_ANONYMOUS|MAP_PRIVATE|MAP_NORESERVE, -1, 0); + if (res == (uintptr_t)MAP_FAILED) { + // Failed to reserve memory + return false; + } + + if (res != addr) { + // Failed to reserve memory at the requested address + munmap((void*)res, size); + return false; + } + + // Success + return true; +} + +void XVirtualMemoryManager::pd_unreserve(uintptr_t addr, size_t size) { + const int res = munmap((void*)addr, size); + assert(res == 0, "Failed to unmap memory"); +} diff --git a/src/hotspot/os/posix/gc/z/zArguments_posix.cpp b/src/hotspot/os/posix/gc/z/zArguments_posix.cpp index fd91c6a7d7bc1..4e6d43b16e918 100644 --- a/src/hotspot/os/posix/gc/z/zArguments_posix.cpp +++ b/src/hotspot/os/posix/gc/z/zArguments_posix.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,6 +24,6 @@ #include "precompiled.hpp" #include "gc/z/zArguments.hpp" -bool ZArguments::is_os_supported() const { +bool ZArguments::is_os_supported() { return true; } diff --git a/src/hotspot/os/posix/gc/z/zVirtualMemory_posix.cpp b/src/hotspot/os/posix/gc/z/zVirtualMemory_posix.cpp index 19dfed891daee..936e734e8ff07 100644 --- a/src/hotspot/os/posix/gc/z/zVirtualMemory_posix.cpp +++ b/src/hotspot/os/posix/gc/z/zVirtualMemory_posix.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -37,16 +37,16 @@ void ZVirtualMemoryManager::pd_initialize_after_reserve() { // Does nothing } -bool ZVirtualMemoryManager::pd_reserve(uintptr_t addr, size_t size) { - const uintptr_t res = (uintptr_t)mmap((void*)addr, size, PROT_NONE, MAP_ANONYMOUS|MAP_PRIVATE|MAP_NORESERVE, -1, 0); - if (res == (uintptr_t)MAP_FAILED) { +bool ZVirtualMemoryManager::pd_reserve(zaddress_unsafe addr, size_t size) { + void* const res = mmap((void*)untype(addr), size, PROT_NONE, MAP_ANONYMOUS|MAP_PRIVATE|MAP_NORESERVE, -1, 0); + if (res == MAP_FAILED) { // Failed to reserve memory return false; } - if (res != addr) { + if (res != (void*)untype(addr)) { // Failed to reserve memory at the requested address - munmap((void*)res, size); + munmap(res, size); return false; } @@ -54,7 +54,7 @@ bool ZVirtualMemoryManager::pd_reserve(uintptr_t addr, size_t size) { return true; } -void ZVirtualMemoryManager::pd_unreserve(uintptr_t addr, size_t size) { - const int res = munmap((void*)addr, size); +void ZVirtualMemoryManager::pd_unreserve(zaddress_unsafe addr, size_t size) { + const int res = munmap((void*)untype(addr), size); assert(res == 0, "Failed to unmap memory"); } diff --git a/src/hotspot/os/windows/gc/x/xArguments_windows.cpp b/src/hotspot/os/windows/gc/x/xArguments_windows.cpp new file mode 100644 index 0000000000000..fc5f7eccb911f --- /dev/null +++ b/src/hotspot/os/windows/gc/x/xArguments_windows.cpp @@ -0,0 +1,30 @@ +/* + * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/x/xArguments.hpp" +#include "gc/x/xSyscall_windows.hpp" + +bool XArguments::is_os_supported() { + return XSyscall::is_supported(); +} diff --git a/src/hotspot/os/windows/gc/x/xInitialize_windows.cpp b/src/hotspot/os/windows/gc/x/xInitialize_windows.cpp new file mode 100644 index 0000000000000..99f6432803326 --- /dev/null +++ b/src/hotspot/os/windows/gc/x/xInitialize_windows.cpp @@ -0,0 +1,30 @@ +/* + * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/x/xInitialize.hpp" +#include "gc/x/xSyscall_windows.hpp" + +void XInitialize::pd_initialize() { + XSyscall::initialize(); +} diff --git a/src/hotspot/os/windows/gc/x/xLargePages_windows.cpp b/src/hotspot/os/windows/gc/x/xLargePages_windows.cpp new file mode 100644 index 0000000000000..20b3c4911fc68 --- /dev/null +++ b/src/hotspot/os/windows/gc/x/xLargePages_windows.cpp @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/shared/gcLogPrecious.hpp" +#include "gc/x/xLargePages.hpp" +#include "gc/x/xSyscall_windows.hpp" +#include "runtime/globals.hpp" + +void XLargePages::pd_initialize() { + if (UseLargePages) { + if (XSyscall::is_large_pages_supported()) { + _state = Explicit; + return; + } + log_info_p(gc, init)("Shared large pages not supported on this OS version"); + } + + _state = Disabled; +} diff --git a/src/hotspot/os/windows/gc/x/xMapper_windows.cpp b/src/hotspot/os/windows/gc/x/xMapper_windows.cpp new file mode 100644 index 0000000000000..e69b6ec56e293 --- /dev/null +++ b/src/hotspot/os/windows/gc/x/xMapper_windows.cpp @@ -0,0 +1,310 @@ +/* + * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/x/xMapper_windows.hpp" +#include "gc/x/xSyscall_windows.hpp" +#include "logging/log.hpp" +#include "utilities/debug.hpp" + +#include + +// Memory reservation, commit, views, and placeholders. +// +// To be able to up-front reserve address space for the heap views, and later +// multi-map the heap views to the same physical memory, without ever losing the +// reservation of the reserved address space, we use "placeholders". +// +// These placeholders block out the address space from being used by other parts +// of the process. To commit memory in this address space, the placeholder must +// be replaced by anonymous memory, or replaced by mapping a view against a +// paging file mapping. We use the later to support multi-mapping. +// +// We want to be able to dynamically commit and uncommit the physical memory of +// the heap (and also unmap ZPages), in granules of ZGranuleSize bytes. There is +// no way to grow and shrink the committed memory of a paging file mapping. +// Therefore, we create multiple granule-sized page file mappings. The memory is +// committed by creating a page file mapping, map a view against it, commit the +// memory, unmap the view. The memory will stay committed until all views are +// unmapped, and the paging file mapping handle is closed. +// +// When replacing a placeholder address space reservation with a mapped view +// against a paging file mapping, the virtual address space must exactly match +// an existing placeholder's address and size. Therefore we only deal with +// granule-sized placeholders at this layer. Higher layers that keep track of +// reserved available address space can (and will) coalesce placeholders, but +// they will be split before being used. + +#define fatal_error(msg, addr, size) \ + fatal(msg ": " PTR_FORMAT " " SIZE_FORMAT "M (%d)", \ + (addr), (size) / M, GetLastError()) + +uintptr_t XMapper::reserve(uintptr_t addr, size_t size) { + void* const res = XSyscall::VirtualAlloc2( + GetCurrentProcess(), // Process + (void*)addr, // BaseAddress + size, // Size + MEM_RESERVE | MEM_RESERVE_PLACEHOLDER, // AllocationType + PAGE_NOACCESS, // PageProtection + nullptr, // ExtendedParameters + 0 // ParameterCount + ); + + // Caller responsible for error handling + return (uintptr_t)res; +} + +void XMapper::unreserve(uintptr_t addr, size_t size) { + const bool res = XSyscall::VirtualFreeEx( + GetCurrentProcess(), // hProcess + (void*)addr, // lpAddress + size, // dwSize + MEM_RELEASE // dwFreeType + ); + + if (!res) { + fatal_error("Failed to unreserve memory", addr, size); + } +} + +HANDLE XMapper::create_paging_file_mapping(size_t size) { + // Create mapping with SEC_RESERVE instead of SEC_COMMIT. + // + // We use MapViewOfFile3 for two different reasons: + // 1) When committing memory for the created paging file + // 2) When mapping a view of the memory created in (2) + // + // The non-platform code is only setup to deal with out-of-memory + // errors in (1). By using SEC_RESERVE, we prevent MapViewOfFile3 + // from failing because of "commit limit" checks. To actually commit + // memory in (1), a call to VirtualAlloc2 is done. + + HANDLE const res = XSyscall::CreateFileMappingW( + INVALID_HANDLE_VALUE, // hFile + nullptr, // lpFileMappingAttribute + PAGE_READWRITE | SEC_RESERVE, // flProtect + size >> 32, // dwMaximumSizeHigh + size & 0xFFFFFFFF, // dwMaximumSizeLow + nullptr // lpName + ); + + // Caller responsible for error handling + return res; +} + +bool XMapper::commit_paging_file_mapping(HANDLE file_handle, uintptr_t file_offset, size_t size) { + const uintptr_t addr = map_view_no_placeholder(file_handle, file_offset, size); + if (addr == 0) { + log_error(gc)("Failed to map view of paging file mapping (%d)", GetLastError()); + return false; + } + + const uintptr_t res = commit(addr, size); + if (res != addr) { + log_error(gc)("Failed to commit memory (%d)", GetLastError()); + } + + unmap_view_no_placeholder(addr, size); + + return res == addr; +} + +uintptr_t XMapper::map_view_no_placeholder(HANDLE file_handle, uintptr_t file_offset, size_t size) { + void* const res = XSyscall::MapViewOfFile3( + file_handle, // FileMapping + GetCurrentProcess(), // ProcessHandle + nullptr, // BaseAddress + file_offset, // Offset + size, // ViewSize + 0, // AllocationType + PAGE_NOACCESS, // PageProtection + nullptr, // ExtendedParameters + 0 // ParameterCount + ); + + // Caller responsible for error handling + return (uintptr_t)res; +} + +void XMapper::unmap_view_no_placeholder(uintptr_t addr, size_t size) { + const bool res = XSyscall::UnmapViewOfFile2( + GetCurrentProcess(), // ProcessHandle + (void*)addr, // BaseAddress + 0 // UnmapFlags + ); + + if (!res) { + fatal_error("Failed to unmap memory", addr, size); + } +} + +uintptr_t XMapper::commit(uintptr_t addr, size_t size) { + void* const res = XSyscall::VirtualAlloc2( + GetCurrentProcess(), // Process + (void*)addr, // BaseAddress + size, // Size + MEM_COMMIT, // AllocationType + PAGE_NOACCESS, // PageProtection + nullptr, // ExtendedParameters + 0 // ParameterCount + ); + + // Caller responsible for error handling + return (uintptr_t)res; +} + +HANDLE XMapper::create_and_commit_paging_file_mapping(size_t size) { + HANDLE const file_handle = create_paging_file_mapping(size); + if (file_handle == 0) { + log_error(gc)("Failed to create paging file mapping (%d)", GetLastError()); + return 0; + } + + const bool res = commit_paging_file_mapping(file_handle, 0 /* file_offset */, size); + if (!res) { + close_paging_file_mapping(file_handle); + return 0; + } + + return file_handle; +} + +void XMapper::close_paging_file_mapping(HANDLE file_handle) { + const bool res = CloseHandle( + file_handle // hObject + ); + + if (!res) { + fatal("Failed to close paging file handle (%d)", GetLastError()); + } +} + +HANDLE XMapper::create_shared_awe_section() { + MEM_EXTENDED_PARAMETER parameter = { 0 }; + parameter.Type = MemSectionExtendedParameterUserPhysicalFlags; + parameter.ULong64 = 0; + + HANDLE section = XSyscall::CreateFileMapping2( + INVALID_HANDLE_VALUE, // File + nullptr, // SecurityAttributes + SECTION_MAP_READ | SECTION_MAP_WRITE, // DesiredAccess + PAGE_READWRITE, // PageProtection + SEC_RESERVE | SEC_LARGE_PAGES, // AllocationAttributes + 0, // MaximumSize + nullptr, // Name + ¶meter, // ExtendedParameters + 1 // ParameterCount + ); + + if (section == nullptr) { + fatal("Could not create shared AWE section (%d)", GetLastError()); + } + + return section; +} + +uintptr_t XMapper::reserve_for_shared_awe(HANDLE awe_section, uintptr_t addr, size_t size) { + MEM_EXTENDED_PARAMETER parameter = { 0 }; + parameter.Type = MemExtendedParameterUserPhysicalHandle; + parameter.Handle = awe_section; + + void* const res = XSyscall::VirtualAlloc2( + GetCurrentProcess(), // Process + (void*)addr, // BaseAddress + size, // Size + MEM_RESERVE | MEM_PHYSICAL, // AllocationType + PAGE_READWRITE, // PageProtection + ¶meter, // ExtendedParameters + 1 // ParameterCount + ); + + // Caller responsible for error handling + return (uintptr_t)res; +} + +void XMapper::unreserve_for_shared_awe(uintptr_t addr, size_t size) { + bool res = VirtualFree( + (void*)addr, // lpAddress + 0, // dwSize + MEM_RELEASE // dwFreeType + ); + + if (!res) { + fatal("Failed to unreserve memory: " PTR_FORMAT " " SIZE_FORMAT "M (%d)", + addr, size / M, GetLastError()); + } +} + +void XMapper::split_placeholder(uintptr_t addr, size_t size) { + const bool res = VirtualFree( + (void*)addr, // lpAddress + size, // dwSize + MEM_RELEASE | MEM_PRESERVE_PLACEHOLDER // dwFreeType + ); + + if (!res) { + fatal_error("Failed to split placeholder", addr, size); + } +} + +void XMapper::coalesce_placeholders(uintptr_t addr, size_t size) { + const bool res = VirtualFree( + (void*)addr, // lpAddress + size, // dwSize + MEM_RELEASE | MEM_COALESCE_PLACEHOLDERS // dwFreeType + ); + + if (!res) { + fatal_error("Failed to coalesce placeholders", addr, size); + } +} + +void XMapper::map_view_replace_placeholder(HANDLE file_handle, uintptr_t file_offset, uintptr_t addr, size_t size) { + void* const res = XSyscall::MapViewOfFile3( + file_handle, // FileMapping + GetCurrentProcess(), // ProcessHandle + (void*)addr, // BaseAddress + file_offset, // Offset + size, // ViewSize + MEM_REPLACE_PLACEHOLDER, // AllocationType + PAGE_READWRITE, // PageProtection + nullptr, // ExtendedParameters + 0 // ParameterCount + ); + + if (res == nullptr) { + fatal_error("Failed to map memory", addr, size); + } +} + +void XMapper::unmap_view_preserve_placeholder(uintptr_t addr, size_t size) { + const bool res = XSyscall::UnmapViewOfFile2( + GetCurrentProcess(), // ProcessHandle + (void*)addr, // BaseAddress + MEM_PRESERVE_PLACEHOLDER // UnmapFlags + ); + + if (!res) { + fatal_error("Failed to unmap memory", addr, size); + } +} diff --git a/src/hotspot/os/windows/gc/x/xMapper_windows.hpp b/src/hotspot/os/windows/gc/x/xMapper_windows.hpp new file mode 100644 index 0000000000000..0f266d3fab7c6 --- /dev/null +++ b/src/hotspot/os/windows/gc/x/xMapper_windows.hpp @@ -0,0 +1,94 @@ +/* + * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef OS_WINDOWS_GC_X_XMAPPER_WINDOWS_HPP +#define OS_WINDOWS_GC_X_XMAPPER_WINDOWS_HPP + +#include "memory/allStatic.hpp" +#include "utilities/globalDefinitions.hpp" + +#include + +class XMapper : public AllStatic { +private: + // Create paging file mapping + static HANDLE create_paging_file_mapping(size_t size); + + // Commit paging file mapping + static bool commit_paging_file_mapping(HANDLE file_handle, uintptr_t file_offset, size_t size); + + // Map a view anywhere without a placeholder + static uintptr_t map_view_no_placeholder(HANDLE file_handle, uintptr_t file_offset, size_t size); + + // Unmap a view without preserving a placeholder + static void unmap_view_no_placeholder(uintptr_t addr, size_t size); + + // Commit memory covering the given virtual address range + static uintptr_t commit(uintptr_t addr, size_t size); + +public: + // Reserve memory with a placeholder + static uintptr_t reserve(uintptr_t addr, size_t size); + + // Unreserve memory + static void unreserve(uintptr_t addr, size_t size); + + // Create and commit paging file mapping + static HANDLE create_and_commit_paging_file_mapping(size_t size); + + // Close paging file mapping + static void close_paging_file_mapping(HANDLE file_handle); + + // Create a shared AWE section + static HANDLE create_shared_awe_section(); + + // Reserve memory attached to the shared AWE section + static uintptr_t reserve_for_shared_awe(HANDLE awe_section, uintptr_t addr, size_t size); + + // Unreserve memory attached to a shared AWE section + static void unreserve_for_shared_awe(uintptr_t addr, size_t size); + + // Split a placeholder + // + // A view can only replace an entire placeholder, so placeholders need to be + // split and coalesced to be the exact size of the new views. + // [addr, addr + size) needs to be a proper sub-placeholder of an existing + // placeholder. + static void split_placeholder(uintptr_t addr, size_t size); + + // Coalesce a placeholder + // + // [addr, addr + size) is the new placeholder. A sub-placeholder needs to + // exist within that range. + static void coalesce_placeholders(uintptr_t addr, size_t size); + + // Map a view of the file handle and replace the placeholder covering the + // given virtual address range + static void map_view_replace_placeholder(HANDLE file_handle, uintptr_t file_offset, uintptr_t addr, size_t size); + + // Unmap the view and reinstate a placeholder covering the given virtual + // address range + static void unmap_view_preserve_placeholder(uintptr_t addr, size_t size); +}; + +#endif // OS_WINDOWS_GC_X_XMAPPER_WINDOWS_HPP diff --git a/src/hotspot/os/windows/gc/x/xNUMA_windows.cpp b/src/hotspot/os/windows/gc/x/xNUMA_windows.cpp new file mode 100644 index 0000000000000..47a84df962e92 --- /dev/null +++ b/src/hotspot/os/windows/gc/x/xNUMA_windows.cpp @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/x/xNUMA.hpp" + +void XNUMA::pd_initialize() { + _enabled = false; +} + +uint32_t XNUMA::count() { + return 1; +} + +uint32_t XNUMA::id() { + return 0; +} + +uint32_t XNUMA::memory_id(uintptr_t addr) { + // NUMA support not enabled, assume everything belongs to node zero + return 0; +} diff --git a/src/hotspot/os/windows/gc/x/xPhysicalMemoryBacking_windows.cpp b/src/hotspot/os/windows/gc/x/xPhysicalMemoryBacking_windows.cpp new file mode 100644 index 0000000000000..92d47dfb7c84c --- /dev/null +++ b/src/hotspot/os/windows/gc/x/xPhysicalMemoryBacking_windows.cpp @@ -0,0 +1,252 @@ +/* + * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/x/xGlobals.hpp" +#include "gc/x/xGranuleMap.inline.hpp" +#include "gc/x/xLargePages.inline.hpp" +#include "gc/x/xMapper_windows.hpp" +#include "gc/x/xPhysicalMemoryBacking_windows.hpp" +#include "logging/log.hpp" +#include "runtime/globals.hpp" +#include "utilities/debug.hpp" + +class XPhysicalMemoryBackingImpl : public CHeapObj { +public: + virtual size_t commit(size_t offset, size_t size) = 0; + virtual size_t uncommit(size_t offset, size_t size) = 0; + virtual void map(uintptr_t addr, size_t size, size_t offset) const = 0; + virtual void unmap(uintptr_t addr, size_t size) const = 0; +}; + +// Implements small pages (paged) support using placeholder reservation. +// +// The backing commits and uncommits physical memory, that can be +// multi-mapped into the virtual address space. To support fine-graned +// committing and uncommitting, each XGranuleSize'd chunk is mapped to +// a separate paging file mapping. + +class XPhysicalMemoryBackingSmallPages : public XPhysicalMemoryBackingImpl { +private: + XGranuleMap _handles; + + HANDLE get_handle(uintptr_t offset) const { + HANDLE const handle = _handles.get(offset); + assert(handle != 0, "Should be set"); + return handle; + } + + void put_handle(uintptr_t offset, HANDLE handle) { + assert(handle != INVALID_HANDLE_VALUE, "Invalid handle"); + assert(_handles.get(offset) == 0, "Should be cleared"); + _handles.put(offset, handle); + } + + void clear_handle(uintptr_t offset) { + assert(_handles.get(offset) != 0, "Should be set"); + _handles.put(offset, 0); + } + +public: + XPhysicalMemoryBackingSmallPages(size_t max_capacity) : + XPhysicalMemoryBackingImpl(), + _handles(max_capacity) {} + + size_t commit(size_t offset, size_t size) { + for (size_t i = 0; i < size; i += XGranuleSize) { + HANDLE const handle = XMapper::create_and_commit_paging_file_mapping(XGranuleSize); + if (handle == 0) { + return i; + } + + put_handle(offset + i, handle); + } + + return size; + } + + size_t uncommit(size_t offset, size_t size) { + for (size_t i = 0; i < size; i += XGranuleSize) { + HANDLE const handle = get_handle(offset + i); + clear_handle(offset + i); + XMapper::close_paging_file_mapping(handle); + } + + return size; + } + + void map(uintptr_t addr, size_t size, size_t offset) const { + assert(is_aligned(offset, XGranuleSize), "Misaligned"); + assert(is_aligned(addr, XGranuleSize), "Misaligned"); + assert(is_aligned(size, XGranuleSize), "Misaligned"); + + for (size_t i = 0; i < size; i += XGranuleSize) { + HANDLE const handle = get_handle(offset + i); + XMapper::map_view_replace_placeholder(handle, 0 /* offset */, addr + i, XGranuleSize); + } + } + + void unmap(uintptr_t addr, size_t size) const { + assert(is_aligned(addr, XGranuleSize), "Misaligned"); + assert(is_aligned(size, XGranuleSize), "Misaligned"); + + for (size_t i = 0; i < size; i += XGranuleSize) { + XMapper::unmap_view_preserve_placeholder(addr + i, XGranuleSize); + } + } +}; + +// Implements Large Pages (locked) support using shared AWE physical memory. +// +// Shared AWE physical memory also works with small pages, but it has +// a few drawbacks that makes it a no-go to use it at this point: +// +// 1) It seems to use 8 bytes of committed memory per *reserved* memory. +// Given our scheme to use a large address space range this turns out to +// use too much memory. +// +// 2) It requires memory locking privileges, even for small pages. This +// has always been a requirement for large pages, and would be an extra +// restriction for usage with small pages. +// +// Note: The large pages size is tied to our XGranuleSize. + +extern HANDLE XAWESection; + +class XPhysicalMemoryBackingLargePages : public XPhysicalMemoryBackingImpl { +private: + ULONG_PTR* const _page_array; + + static ULONG_PTR* alloc_page_array(size_t max_capacity) { + const size_t npages = max_capacity / XGranuleSize; + const size_t array_size = npages * sizeof(ULONG_PTR); + + return (ULONG_PTR*)os::malloc(array_size, mtGC); + } + +public: + XPhysicalMemoryBackingLargePages(size_t max_capacity) : + XPhysicalMemoryBackingImpl(), + _page_array(alloc_page_array(max_capacity)) {} + + size_t commit(size_t offset, size_t size) { + const size_t index = offset >> XGranuleSizeShift; + const size_t npages = size >> XGranuleSizeShift; + + size_t npages_res = npages; + const bool res = AllocateUserPhysicalPages(XAWESection, &npages_res, &_page_array[index]); + if (!res) { + fatal("Failed to allocate physical memory " SIZE_FORMAT "M @ " PTR_FORMAT " (%d)", + size / M, offset, GetLastError()); + } else { + log_debug(gc)("Allocated physical memory: " SIZE_FORMAT "M @ " PTR_FORMAT, size / M, offset); + } + + // AllocateUserPhysicalPages might not be able to allocate the requested amount of memory. + // The allocated number of pages are written in npages_res. + return npages_res << XGranuleSizeShift; + } + + size_t uncommit(size_t offset, size_t size) { + const size_t index = offset >> XGranuleSizeShift; + const size_t npages = size >> XGranuleSizeShift; + + size_t npages_res = npages; + const bool res = FreeUserPhysicalPages(XAWESection, &npages_res, &_page_array[index]); + if (!res) { + fatal("Failed to uncommit physical memory " SIZE_FORMAT "M @ " PTR_FORMAT " (%d)", + size, offset, GetLastError()); + } + + return npages_res << XGranuleSizeShift; + } + + void map(uintptr_t addr, size_t size, size_t offset) const { + const size_t npages = size >> XGranuleSizeShift; + const size_t index = offset >> XGranuleSizeShift; + + const bool res = MapUserPhysicalPages((char*)addr, npages, &_page_array[index]); + if (!res) { + fatal("Failed to map view " PTR_FORMAT " " SIZE_FORMAT "M @ " PTR_FORMAT " (%d)", + addr, size / M, offset, GetLastError()); + } + } + + void unmap(uintptr_t addr, size_t size) const { + const size_t npages = size >> XGranuleSizeShift; + + const bool res = MapUserPhysicalPages((char*)addr, npages, nullptr); + if (!res) { + fatal("Failed to unmap view " PTR_FORMAT " " SIZE_FORMAT "M (%d)", + addr, size / M, GetLastError()); + } + } +}; + +static XPhysicalMemoryBackingImpl* select_impl(size_t max_capacity) { + if (XLargePages::is_enabled()) { + return new XPhysicalMemoryBackingLargePages(max_capacity); + } + + return new XPhysicalMemoryBackingSmallPages(max_capacity); +} + +XPhysicalMemoryBacking::XPhysicalMemoryBacking(size_t max_capacity) : + _impl(select_impl(max_capacity)) {} + +bool XPhysicalMemoryBacking::is_initialized() const { + return true; +} + +void XPhysicalMemoryBacking::warn_commit_limits(size_t max_capacity) const { + // Does nothing +} + +size_t XPhysicalMemoryBacking::commit(size_t offset, size_t length) { + log_trace(gc, heap)("Committing memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)", + offset / M, (offset + length) / M, length / M); + + return _impl->commit(offset, length); +} + +size_t XPhysicalMemoryBacking::uncommit(size_t offset, size_t length) { + log_trace(gc, heap)("Uncommitting memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)", + offset / M, (offset + length) / M, length / M); + + return _impl->uncommit(offset, length); +} + +void XPhysicalMemoryBacking::map(uintptr_t addr, size_t size, size_t offset) const { + assert(is_aligned(offset, XGranuleSize), "Misaligned: " PTR_FORMAT, offset); + assert(is_aligned(addr, XGranuleSize), "Misaligned: " PTR_FORMAT, addr); + assert(is_aligned(size, XGranuleSize), "Misaligned: " PTR_FORMAT, size); + + _impl->map(addr, size, offset); +} + +void XPhysicalMemoryBacking::unmap(uintptr_t addr, size_t size) const { + assert(is_aligned(addr, XGranuleSize), "Misaligned"); + assert(is_aligned(size, XGranuleSize), "Misaligned"); + + _impl->unmap(addr, size); +} diff --git a/src/hotspot/os/windows/gc/x/xPhysicalMemoryBacking_windows.hpp b/src/hotspot/os/windows/gc/x/xPhysicalMemoryBacking_windows.hpp new file mode 100644 index 0000000000000..d6e123f21e51a --- /dev/null +++ b/src/hotspot/os/windows/gc/x/xPhysicalMemoryBacking_windows.hpp @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef OS_WINDOWS_GC_X_XPHYSICALMEMORYBACKING_WINDOWS_HPP +#define OS_WINDOWS_GC_X_XPHYSICALMEMORYBACKING_WINDOWS_HPP + +#include "utilities/globalDefinitions.hpp" + +#include + +class XPhysicalMemoryBackingImpl; + +class XPhysicalMemoryBacking { +private: + XPhysicalMemoryBackingImpl* _impl; + +public: + XPhysicalMemoryBacking(size_t max_capacity); + + bool is_initialized() const; + + void warn_commit_limits(size_t max_capacity) const; + + size_t commit(size_t offset, size_t length); + size_t uncommit(size_t offset, size_t length); + + void map(uintptr_t addr, size_t size, size_t offset) const; + void unmap(uintptr_t addr, size_t size) const; +}; + +#endif // OS_WINDOWS_GC_X_XPHYSICALMEMORYBACKING_WINDOWS_HPP diff --git a/src/hotspot/os/windows/gc/x/xSyscall_windows.cpp b/src/hotspot/os/windows/gc/x/xSyscall_windows.cpp new file mode 100644 index 0000000000000..f22966a54898d --- /dev/null +++ b/src/hotspot/os/windows/gc/x/xSyscall_windows.cpp @@ -0,0 +1,93 @@ +/* + * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/shared/gcLogPrecious.hpp" +#include "gc/x/xSyscall_windows.hpp" +#include "runtime/java.hpp" +#include "runtime/os.hpp" + +XSyscall::CreateFileMappingWFn XSyscall::CreateFileMappingW; +XSyscall::CreateFileMapping2Fn XSyscall::CreateFileMapping2; +XSyscall::VirtualAlloc2Fn XSyscall::VirtualAlloc2; +XSyscall::VirtualFreeExFn XSyscall::VirtualFreeEx; +XSyscall::MapViewOfFile3Fn XSyscall::MapViewOfFile3; +XSyscall::UnmapViewOfFile2Fn XSyscall::UnmapViewOfFile2; + +static void* lookup_kernelbase_library() { + const char* const name = "KernelBase"; + char ebuf[1024]; + void* const handle = os::dll_load(name, ebuf, sizeof(ebuf)); + if (handle == nullptr) { + log_error_p(gc)("Failed to load library: %s", name); + } + return handle; +} + +static void* lookup_kernelbase_symbol(const char* name) { + static void* const handle = lookup_kernelbase_library(); + if (handle == nullptr) { + return nullptr; + } + return os::dll_lookup(handle, name); +} + +static bool has_kernelbase_symbol(const char* name) { + return lookup_kernelbase_symbol(name) != nullptr; +} + +template +static void install_kernelbase_symbol(Fn*& fn, const char* name) { + fn = reinterpret_cast(lookup_kernelbase_symbol(name)); +} + +template +static void install_kernelbase_1803_symbol_or_exit(Fn*& fn, const char* name) { + install_kernelbase_symbol(fn, name); + if (fn == nullptr) { + log_error_p(gc)("Failed to lookup symbol: %s", name); + vm_exit_during_initialization("ZGC requires Windows version 1803 or later"); + } +} + +void XSyscall::initialize() { + // Required + install_kernelbase_1803_symbol_or_exit(CreateFileMappingW, "CreateFileMappingW"); + install_kernelbase_1803_symbol_or_exit(VirtualAlloc2, "VirtualAlloc2"); + install_kernelbase_1803_symbol_or_exit(VirtualFreeEx, "VirtualFreeEx"); + install_kernelbase_1803_symbol_or_exit(MapViewOfFile3, "MapViewOfFile3"); + install_kernelbase_1803_symbol_or_exit(UnmapViewOfFile2, "UnmapViewOfFile2"); + + // Optional - for large pages support + install_kernelbase_symbol(CreateFileMapping2, "CreateFileMapping2"); +} + +bool XSyscall::is_supported() { + // Available in Windows version 1803 and later + return has_kernelbase_symbol("VirtualAlloc2"); +} + +bool XSyscall::is_large_pages_supported() { + // Available in Windows version 1809 and later + return has_kernelbase_symbol("CreateFileMapping2"); +} diff --git a/src/hotspot/os/windows/gc/x/xSyscall_windows.hpp b/src/hotspot/os/windows/gc/x/xSyscall_windows.hpp new file mode 100644 index 0000000000000..89ba2573b10cc --- /dev/null +++ b/src/hotspot/os/windows/gc/x/xSyscall_windows.hpp @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef OS_WINDOWS_GC_X_XSYSCALL_WINDOWS_HPP +#define OS_WINDOWS_GC_X_XSYSCALL_WINDOWS_HPP + +#include "utilities/globalDefinitions.hpp" + +#include +#include + +class XSyscall { +private: + typedef HANDLE (*CreateFileMappingWFn)(HANDLE, LPSECURITY_ATTRIBUTES, DWORD, DWORD, DWORD, LPCWSTR); + typedef HANDLE (*CreateFileMapping2Fn)(HANDLE, LPSECURITY_ATTRIBUTES, ULONG, ULONG, ULONG, ULONG64, PCWSTR, PMEM_EXTENDED_PARAMETER, ULONG); + typedef PVOID (*VirtualAlloc2Fn)(HANDLE, PVOID, SIZE_T, ULONG, ULONG, MEM_EXTENDED_PARAMETER*, ULONG); + typedef BOOL (*VirtualFreeExFn)(HANDLE, LPVOID, SIZE_T, DWORD); + typedef PVOID (*MapViewOfFile3Fn)(HANDLE, HANDLE, PVOID, ULONG64, SIZE_T, ULONG, ULONG, MEM_EXTENDED_PARAMETER*, ULONG); + typedef BOOL (*UnmapViewOfFile2Fn)(HANDLE, PVOID, ULONG); + +public: + static CreateFileMappingWFn CreateFileMappingW; + static CreateFileMapping2Fn CreateFileMapping2; + static VirtualAlloc2Fn VirtualAlloc2; + static VirtualFreeExFn VirtualFreeEx; + static MapViewOfFile3Fn MapViewOfFile3; + static UnmapViewOfFile2Fn UnmapViewOfFile2; + + static void initialize(); + + static bool is_supported(); + static bool is_large_pages_supported(); +}; + +#endif // OS_WINDOWS_GC_X_XSYSCALL_WINDOWS_HPP diff --git a/src/hotspot/os/windows/gc/x/xUtils_windows.cpp b/src/hotspot/os/windows/gc/x/xUtils_windows.cpp new file mode 100644 index 0000000000000..788da80834ab3 --- /dev/null +++ b/src/hotspot/os/windows/gc/x/xUtils_windows.cpp @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/x/xUtils.hpp" +#include "utilities/debug.hpp" + +#include + +uintptr_t XUtils::alloc_aligned(size_t alignment, size_t size) { + void* const res = _aligned_malloc(size, alignment); + + if (res == nullptr) { + fatal("_aligned_malloc failed"); + } + + memset(res, 0, size); + + return (uintptr_t)res; +} diff --git a/src/hotspot/os/windows/gc/x/xVirtualMemory_windows.cpp b/src/hotspot/os/windows/gc/x/xVirtualMemory_windows.cpp new file mode 100644 index 0000000000000..c9c891430fe14 --- /dev/null +++ b/src/hotspot/os/windows/gc/x/xVirtualMemory_windows.cpp @@ -0,0 +1,195 @@ +/* + * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/x/xAddress.inline.hpp" +#include "gc/x/xGlobals.hpp" +#include "gc/x/xLargePages.inline.hpp" +#include "gc/x/xMapper_windows.hpp" +#include "gc/x/xSyscall_windows.hpp" +#include "gc/x/xVirtualMemory.hpp" +#include "utilities/align.hpp" +#include "utilities/debug.hpp" + +class XVirtualMemoryManagerImpl : public CHeapObj { +public: + virtual void initialize_before_reserve() {} + virtual void initialize_after_reserve(XMemoryManager* manager) {} + virtual bool reserve(uintptr_t addr, size_t size) = 0; + virtual void unreserve(uintptr_t addr, size_t size) = 0; +}; + +// Implements small pages (paged) support using placeholder reservation. +class XVirtualMemoryManagerSmallPages : public XVirtualMemoryManagerImpl { +private: + class PlaceholderCallbacks : public AllStatic { + public: + static void split_placeholder(uintptr_t start, size_t size) { + XMapper::split_placeholder(XAddress::marked0(start), size); + XMapper::split_placeholder(XAddress::marked1(start), size); + XMapper::split_placeholder(XAddress::remapped(start), size); + } + + static void coalesce_placeholders(uintptr_t start, size_t size) { + XMapper::coalesce_placeholders(XAddress::marked0(start), size); + XMapper::coalesce_placeholders(XAddress::marked1(start), size); + XMapper::coalesce_placeholders(XAddress::remapped(start), size); + } + + static void split_into_placeholder_granules(uintptr_t start, size_t size) { + for (uintptr_t addr = start; addr < start + size; addr += XGranuleSize) { + split_placeholder(addr, XGranuleSize); + } + } + + static void coalesce_into_one_placeholder(uintptr_t start, size_t size) { + assert(is_aligned(size, XGranuleSize), "Must be granule aligned"); + + if (size > XGranuleSize) { + coalesce_placeholders(start, size); + } + } + + static void create_callback(const XMemory* area) { + assert(is_aligned(area->size(), XGranuleSize), "Must be granule aligned"); + coalesce_into_one_placeholder(area->start(), area->size()); + } + + static void destroy_callback(const XMemory* area) { + assert(is_aligned(area->size(), XGranuleSize), "Must be granule aligned"); + // Don't try split the last granule - VirtualFree will fail + split_into_placeholder_granules(area->start(), area->size() - XGranuleSize); + } + + static void shrink_from_front_callback(const XMemory* area, size_t size) { + assert(is_aligned(size, XGranuleSize), "Must be granule aligned"); + split_into_placeholder_granules(area->start(), size); + } + + static void shrink_from_back_callback(const XMemory* area, size_t size) { + assert(is_aligned(size, XGranuleSize), "Must be granule aligned"); + // Don't try split the last granule - VirtualFree will fail + split_into_placeholder_granules(area->end() - size, size - XGranuleSize); + } + + static void grow_from_front_callback(const XMemory* area, size_t size) { + assert(is_aligned(area->size(), XGranuleSize), "Must be granule aligned"); + coalesce_into_one_placeholder(area->start() - size, area->size() + size); + } + + static void grow_from_back_callback(const XMemory* area, size_t size) { + assert(is_aligned(area->size(), XGranuleSize), "Must be granule aligned"); + coalesce_into_one_placeholder(area->start(), area->size() + size); + } + + static void register_with(XMemoryManager* manager) { + // Each reserved virtual memory address area registered in _manager is + // exactly covered by a single placeholder. Callbacks are installed so + // that whenever a memory area changes, the corresponding placeholder + // is adjusted. + // + // The create and grow callbacks are called when virtual memory is + // returned to the memory manager. The new memory area is then covered + // by a new single placeholder. + // + // The destroy and shrink callbacks are called when virtual memory is + // allocated from the memory manager. The memory area is then is split + // into granule-sized placeholders. + // + // See comment in zMapper_windows.cpp explaining why placeholders are + // split into XGranuleSize sized placeholders. + + XMemoryManager::Callbacks callbacks; + + callbacks._create = &create_callback; + callbacks._destroy = &destroy_callback; + callbacks._shrink_from_front = &shrink_from_front_callback; + callbacks._shrink_from_back = &shrink_from_back_callback; + callbacks._grow_from_front = &grow_from_front_callback; + callbacks._grow_from_back = &grow_from_back_callback; + + manager->register_callbacks(callbacks); + } + }; + + virtual void initialize_after_reserve(XMemoryManager* manager) { + PlaceholderCallbacks::register_with(manager); + } + + virtual bool reserve(uintptr_t addr, size_t size) { + const uintptr_t res = XMapper::reserve(addr, size); + + assert(res == addr || res == 0, "Should not reserve other memory than requested"); + return res == addr; + } + + virtual void unreserve(uintptr_t addr, size_t size) { + XMapper::unreserve(addr, size); + } +}; + +// Implements Large Pages (locked) support using shared AWE physical memory. + +// XPhysicalMemory layer needs access to the section +HANDLE XAWESection; + +class XVirtualMemoryManagerLargePages : public XVirtualMemoryManagerImpl { +private: + virtual void initialize_before_reserve() { + XAWESection = XMapper::create_shared_awe_section(); + } + + virtual bool reserve(uintptr_t addr, size_t size) { + const uintptr_t res = XMapper::reserve_for_shared_awe(XAWESection, addr, size); + + assert(res == addr || res == 0, "Should not reserve other memory than requested"); + return res == addr; + } + + virtual void unreserve(uintptr_t addr, size_t size) { + XMapper::unreserve_for_shared_awe(addr, size); + } +}; + +static XVirtualMemoryManagerImpl* _impl = nullptr; + +void XVirtualMemoryManager::pd_initialize_before_reserve() { + if (XLargePages::is_enabled()) { + _impl = new XVirtualMemoryManagerLargePages(); + } else { + _impl = new XVirtualMemoryManagerSmallPages(); + } + _impl->initialize_before_reserve(); +} + +void XVirtualMemoryManager::pd_initialize_after_reserve() { + _impl->initialize_after_reserve(&_manager); +} + +bool XVirtualMemoryManager::pd_reserve(uintptr_t addr, size_t size) { + return _impl->reserve(addr, size); +} + +void XVirtualMemoryManager::pd_unreserve(uintptr_t addr, size_t size) { + _impl->unreserve(addr, size); +} diff --git a/src/hotspot/os/windows/gc/z/zArguments_windows.cpp b/src/hotspot/os/windows/gc/z/zArguments_windows.cpp index b51f4df354e90..e10a06648f0e7 100644 --- a/src/hotspot/os/windows/gc/z/zArguments_windows.cpp +++ b/src/hotspot/os/windows/gc/z/zArguments_windows.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,6 +25,6 @@ #include "gc/z/zArguments.hpp" #include "gc/z/zSyscall_windows.hpp" -bool ZArguments::is_os_supported() const { +bool ZArguments::is_os_supported() { return ZSyscall::is_supported(); } diff --git a/src/hotspot/os/windows/gc/z/zMapper_windows.cpp b/src/hotspot/os/windows/gc/z/zMapper_windows.cpp index 95a2dc07baedb..b2923a300e4bc 100644 --- a/src/hotspot/os/windows/gc/z/zMapper_windows.cpp +++ b/src/hotspot/os/windows/gc/z/zMapper_windows.cpp @@ -22,6 +22,7 @@ */ #include "precompiled.hpp" +#include "gc/z/zAddress.inline.hpp" #include "gc/z/zMapper_windows.hpp" #include "gc/z/zSyscall_windows.hpp" #include "logging/log.hpp" @@ -59,10 +60,10 @@ fatal(msg ": " PTR_FORMAT " " SIZE_FORMAT "M (%d)", \ (addr), (size) / M, GetLastError()) -uintptr_t ZMapper::reserve(uintptr_t addr, size_t size) { +zaddress_unsafe ZMapper::reserve(zaddress_unsafe addr, size_t size) { void* const res = ZSyscall::VirtualAlloc2( GetCurrentProcess(), // Process - (void*)addr, // BaseAddress + (void*)untype(addr), // BaseAddress size, // Size MEM_RESERVE | MEM_RESERVE_PLACEHOLDER, // AllocationType PAGE_NOACCESS, // PageProtection @@ -71,19 +72,19 @@ uintptr_t ZMapper::reserve(uintptr_t addr, size_t size) { ); // Caller responsible for error handling - return (uintptr_t)res; + return to_zaddress_unsafe((uintptr_t)res); } -void ZMapper::unreserve(uintptr_t addr, size_t size) { +void ZMapper::unreserve(zaddress_unsafe addr, size_t size) { const bool res = ZSyscall::VirtualFreeEx( GetCurrentProcess(), // hProcess - (void*)addr, // lpAddress + (void*)untype(addr), // lpAddress size, // dwSize MEM_RELEASE // dwFreeType ); if (!res) { - fatal_error("Failed to unreserve memory", addr, size); + fatal_error("Failed to unreserve memory", untype(addr), size); } } @@ -223,14 +224,14 @@ HANDLE ZMapper::create_shared_awe_section() { return section; } -uintptr_t ZMapper::reserve_for_shared_awe(HANDLE awe_section, uintptr_t addr, size_t size) { +zaddress_unsafe ZMapper::reserve_for_shared_awe(HANDLE awe_section, zaddress_unsafe addr, size_t size) { MEM_EXTENDED_PARAMETER parameter = { 0 }; parameter.Type = MemExtendedParameterUserPhysicalHandle; parameter.Handle = awe_section; void* const res = ZSyscall::VirtualAlloc2( GetCurrentProcess(), // Process - (void*)addr, // BaseAddress + (void*)untype(addr), // BaseAddress size, // Size MEM_RESERVE | MEM_PHYSICAL, // AllocationType PAGE_READWRITE, // PageProtection @@ -239,25 +240,25 @@ uintptr_t ZMapper::reserve_for_shared_awe(HANDLE awe_section, uintptr_t addr, si ); // Caller responsible for error handling - return (uintptr_t)res; + return to_zaddress_unsafe((uintptr_t)res); } -void ZMapper::unreserve_for_shared_awe(uintptr_t addr, size_t size) { +void ZMapper::unreserve_for_shared_awe(zaddress_unsafe addr, size_t size) { bool res = VirtualFree( - (void*)addr, // lpAddress - 0, // dwSize - MEM_RELEASE // dwFreeType + (void*)untype(addr), // lpAddress + 0, // dwSize + MEM_RELEASE // dwFreeType ); if (!res) { fatal("Failed to unreserve memory: " PTR_FORMAT " " SIZE_FORMAT "M (%d)", - addr, size / M, GetLastError()); + untype(addr), size / M, GetLastError()); } } -void ZMapper::split_placeholder(uintptr_t addr, size_t size) { +void ZMapper::split_placeholder(zaddress_unsafe addr, size_t size) { const bool res = VirtualFree( - (void*)addr, // lpAddress + (void*)untype(addr), // lpAddress size, // dwSize MEM_RELEASE | MEM_PRESERVE_PLACEHOLDER // dwFreeType ); @@ -267,9 +268,9 @@ void ZMapper::split_placeholder(uintptr_t addr, size_t size) { } } -void ZMapper::coalesce_placeholders(uintptr_t addr, size_t size) { +void ZMapper::coalesce_placeholders(zaddress_unsafe addr, size_t size) { const bool res = VirtualFree( - (void*)addr, // lpAddress + (void*)untype(addr), // lpAddress size, // dwSize MEM_RELEASE | MEM_COALESCE_PLACEHOLDERS // dwFreeType ); @@ -279,11 +280,11 @@ void ZMapper::coalesce_placeholders(uintptr_t addr, size_t size) { } } -void ZMapper::map_view_replace_placeholder(HANDLE file_handle, uintptr_t file_offset, uintptr_t addr, size_t size) { +void ZMapper::map_view_replace_placeholder(HANDLE file_handle, uintptr_t file_offset, zaddress_unsafe addr, size_t size) { void* const res = ZSyscall::MapViewOfFile3( file_handle, // FileMapping GetCurrentProcess(), // ProcessHandle - (void*)addr, // BaseAddress + (void*)untype(addr), // BaseAddress file_offset, // Offset size, // ViewSize MEM_REPLACE_PLACEHOLDER, // AllocationType @@ -297,10 +298,10 @@ void ZMapper::map_view_replace_placeholder(HANDLE file_handle, uintptr_t file_of } } -void ZMapper::unmap_view_preserve_placeholder(uintptr_t addr, size_t size) { +void ZMapper::unmap_view_preserve_placeholder(zaddress_unsafe addr, size_t size) { const bool res = ZSyscall::UnmapViewOfFile2( GetCurrentProcess(), // ProcessHandle - (void*)addr, // BaseAddress + (void*)untype(addr), // BaseAddress MEM_PRESERVE_PLACEHOLDER // UnmapFlags ); diff --git a/src/hotspot/os/windows/gc/z/zMapper_windows.hpp b/src/hotspot/os/windows/gc/z/zMapper_windows.hpp index 3e47b470f5f28..e6efd6b9e1127 100644 --- a/src/hotspot/os/windows/gc/z/zMapper_windows.hpp +++ b/src/hotspot/os/windows/gc/z/zMapper_windows.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,6 +24,7 @@ #ifndef OS_WINDOWS_GC_Z_ZMAPPER_WINDOWS_HPP #define OS_WINDOWS_GC_Z_ZMAPPER_WINDOWS_HPP +#include "gc/z/zAddress.hpp" #include "memory/allStatic.hpp" #include "utilities/globalDefinitions.hpp" @@ -48,10 +49,10 @@ class ZMapper : public AllStatic { public: // Reserve memory with a placeholder - static uintptr_t reserve(uintptr_t addr, size_t size); + static zaddress_unsafe reserve(zaddress_unsafe addr, size_t size); // Unreserve memory - static void unreserve(uintptr_t addr, size_t size); + static void unreserve(zaddress_unsafe addr, size_t size); // Create and commit paging file mapping static HANDLE create_and_commit_paging_file_mapping(size_t size); @@ -63,10 +64,10 @@ class ZMapper : public AllStatic { static HANDLE create_shared_awe_section(); // Reserve memory attached to the shared AWE section - static uintptr_t reserve_for_shared_awe(HANDLE awe_section, uintptr_t addr, size_t size); + static zaddress_unsafe reserve_for_shared_awe(HANDLE awe_section, zaddress_unsafe addr, size_t size); // Unreserve memory attached to a shared AWE section - static void unreserve_for_shared_awe(uintptr_t addr, size_t size); + static void unreserve_for_shared_awe(zaddress_unsafe addr, size_t size); // Split a placeholder // @@ -74,21 +75,21 @@ class ZMapper : public AllStatic { // split and coalesced to be the exact size of the new views. // [addr, addr + size) needs to be a proper sub-placeholder of an existing // placeholder. - static void split_placeholder(uintptr_t addr, size_t size); + static void split_placeholder(zaddress_unsafe addr, size_t size); // Coalesce a placeholder // // [addr, addr + size) is the new placeholder. A sub-placeholder needs to // exist within that range. - static void coalesce_placeholders(uintptr_t addr, size_t size); + static void coalesce_placeholders(zaddress_unsafe addr, size_t size); // Map a view of the file handle and replace the placeholder covering the // given virtual address range - static void map_view_replace_placeholder(HANDLE file_handle, uintptr_t file_offset, uintptr_t addr, size_t size); + static void map_view_replace_placeholder(HANDLE file_handle, uintptr_t file_offset, zaddress_unsafe addr, size_t size); // Unmap the view and reinstate a placeholder covering the given virtual // address range - static void unmap_view_preserve_placeholder(uintptr_t addr, size_t size); + static void unmap_view_preserve_placeholder(zaddress_unsafe addr, size_t size); }; #endif // OS_WINDOWS_GC_Z_ZMAPPER_WINDOWS_HPP diff --git a/src/hotspot/os/windows/gc/z/zPhysicalMemoryBacking_windows.cpp b/src/hotspot/os/windows/gc/z/zPhysicalMemoryBacking_windows.cpp index fc7105ad8a39c..3fed88f721840 100644 --- a/src/hotspot/os/windows/gc/z/zPhysicalMemoryBacking_windows.cpp +++ b/src/hotspot/os/windows/gc/z/zPhysicalMemoryBacking_windows.cpp @@ -22,6 +22,7 @@ */ #include "precompiled.hpp" +#include "gc/z/zAddress.inline.hpp" #include "gc/z/zGlobals.hpp" #include "gc/z/zGranuleMap.inline.hpp" #include "gc/z/zLargePages.inline.hpp" @@ -33,10 +34,10 @@ class ZPhysicalMemoryBackingImpl : public CHeapObj { public: - virtual size_t commit(size_t offset, size_t size) = 0; - virtual size_t uncommit(size_t offset, size_t size) = 0; - virtual void map(uintptr_t addr, size_t size, size_t offset) const = 0; - virtual void unmap(uintptr_t addr, size_t size) const = 0; + virtual size_t commit(zoffset offset, size_t size) = 0; + virtual size_t uncommit(zoffset offset, size_t size) = 0; + virtual void map(zaddress_unsafe addr, size_t size, zoffset offset) const = 0; + virtual void unmap(zaddress_unsafe addr, size_t size) const = 0; }; // Implements small pages (paged) support using placeholder reservation. @@ -50,19 +51,19 @@ class ZPhysicalMemoryBackingSmallPages : public ZPhysicalMemoryBackingImpl { private: ZGranuleMap _handles; - HANDLE get_handle(uintptr_t offset) const { + HANDLE get_handle(zoffset offset) const { HANDLE const handle = _handles.get(offset); assert(handle != 0, "Should be set"); return handle; } - void put_handle(uintptr_t offset, HANDLE handle) { + void put_handle(zoffset offset, HANDLE handle) { assert(handle != INVALID_HANDLE_VALUE, "Invalid handle"); assert(_handles.get(offset) == 0, "Should be cleared"); _handles.put(offset, handle); } - void clear_handle(uintptr_t offset) { + void clear_handle(zoffset offset) { assert(_handles.get(offset) != 0, "Should be set"); _handles.put(offset, 0); } @@ -72,7 +73,7 @@ class ZPhysicalMemoryBackingSmallPages : public ZPhysicalMemoryBackingImpl { ZPhysicalMemoryBackingImpl(), _handles(max_capacity) {} - size_t commit(size_t offset, size_t size) { + size_t commit(zoffset offset, size_t size) { for (size_t i = 0; i < size; i += ZGranuleSize) { HANDLE const handle = ZMapper::create_and_commit_paging_file_mapping(ZGranuleSize); if (handle == 0) { @@ -85,7 +86,7 @@ class ZPhysicalMemoryBackingSmallPages : public ZPhysicalMemoryBackingImpl { return size; } - size_t uncommit(size_t offset, size_t size) { + size_t uncommit(zoffset offset, size_t size) { for (size_t i = 0; i < size; i += ZGranuleSize) { HANDLE const handle = get_handle(offset + i); clear_handle(offset + i); @@ -95,9 +96,9 @@ class ZPhysicalMemoryBackingSmallPages : public ZPhysicalMemoryBackingImpl { return size; } - void map(uintptr_t addr, size_t size, size_t offset) const { - assert(is_aligned(offset, ZGranuleSize), "Misaligned"); - assert(is_aligned(addr, ZGranuleSize), "Misaligned"); + void map(zaddress_unsafe addr, size_t size, zoffset offset) const { + assert(is_aligned(untype(offset), ZGranuleSize), "Misaligned"); + assert(is_aligned(untype(addr), ZGranuleSize), "Misaligned"); assert(is_aligned(size, ZGranuleSize), "Misaligned"); for (size_t i = 0; i < size; i += ZGranuleSize) { @@ -106,12 +107,12 @@ class ZPhysicalMemoryBackingSmallPages : public ZPhysicalMemoryBackingImpl { } } - void unmap(uintptr_t addr, size_t size) const { - assert(is_aligned(addr, ZGranuleSize), "Misaligned"); + void unmap(zaddress_unsafe addr, size_t size) const { + assert(is_aligned(untype(addr), ZGranuleSize), "Misaligned"); assert(is_aligned(size, ZGranuleSize), "Misaligned"); for (size_t i = 0; i < size; i += ZGranuleSize) { - ZMapper::unmap_view_preserve_placeholder(addr + i, ZGranuleSize); + ZMapper::unmap_view_preserve_placeholder(to_zaddress_unsafe(untype(addr) + i), ZGranuleSize); } } }; @@ -149,17 +150,17 @@ class ZPhysicalMemoryBackingLargePages : public ZPhysicalMemoryBackingImpl { ZPhysicalMemoryBackingImpl(), _page_array(alloc_page_array(max_capacity)) {} - size_t commit(size_t offset, size_t size) { - const size_t index = offset >> ZGranuleSizeShift; + size_t commit(zoffset offset, size_t size) { + const size_t index = untype(offset) >> ZGranuleSizeShift; const size_t npages = size >> ZGranuleSizeShift; size_t npages_res = npages; const bool res = AllocateUserPhysicalPages(ZAWESection, &npages_res, &_page_array[index]); if (!res) { fatal("Failed to allocate physical memory " SIZE_FORMAT "M @ " PTR_FORMAT " (%d)", - size / M, offset, GetLastError()); + size / M, untype(offset), GetLastError()); } else { - log_debug(gc)("Allocated physical memory: " SIZE_FORMAT "M @ " PTR_FORMAT, size / M, offset); + log_debug(gc)("Allocated physical memory: " SIZE_FORMAT "M @ " PTR_FORMAT, size / M, untype(offset)); } // AllocateUserPhysicalPages might not be able to allocate the requested amount of memory. @@ -167,35 +168,35 @@ class ZPhysicalMemoryBackingLargePages : public ZPhysicalMemoryBackingImpl { return npages_res << ZGranuleSizeShift; } - size_t uncommit(size_t offset, size_t size) { - const size_t index = offset >> ZGranuleSizeShift; + size_t uncommit(zoffset offset, size_t size) { + const size_t index = untype(offset) >> ZGranuleSizeShift; const size_t npages = size >> ZGranuleSizeShift; size_t npages_res = npages; const bool res = FreeUserPhysicalPages(ZAWESection, &npages_res, &_page_array[index]); if (!res) { fatal("Failed to uncommit physical memory " SIZE_FORMAT "M @ " PTR_FORMAT " (%d)", - size, offset, GetLastError()); + size, untype(offset), GetLastError()); } return npages_res << ZGranuleSizeShift; } - void map(uintptr_t addr, size_t size, size_t offset) const { + void map(zaddress_unsafe addr, size_t size, zoffset offset) const { const size_t npages = size >> ZGranuleSizeShift; - const size_t index = offset >> ZGranuleSizeShift; + const size_t index = untype(offset) >> ZGranuleSizeShift; - const bool res = MapUserPhysicalPages((char*)addr, npages, &_page_array[index]); + const bool res = MapUserPhysicalPages((char*)untype(addr), npages, &_page_array[index]); if (!res) { fatal("Failed to map view " PTR_FORMAT " " SIZE_FORMAT "M @ " PTR_FORMAT " (%d)", - addr, size / M, offset, GetLastError()); + untype(addr), size / M, untype(offset), GetLastError()); } } - void unmap(uintptr_t addr, size_t size) const { + void unmap(zaddress_unsafe addr, size_t size) const { const size_t npages = size >> ZGranuleSizeShift; - const bool res = MapUserPhysicalPages((char*)addr, npages, nullptr); + const bool res = MapUserPhysicalPages((char*)untype(addr), npages, nullptr); if (!res) { fatal("Failed to unmap view " PTR_FORMAT " " SIZE_FORMAT "M (%d)", addr, size / M, GetLastError()); @@ -222,30 +223,30 @@ void ZPhysicalMemoryBacking::warn_commit_limits(size_t max_capacity) const { // Does nothing } -size_t ZPhysicalMemoryBacking::commit(size_t offset, size_t length) { +size_t ZPhysicalMemoryBacking::commit(zoffset offset, size_t length) { log_trace(gc, heap)("Committing memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)", - offset / M, (offset + length) / M, length / M); + untype(offset) / M, (untype(offset) + length) / M, length / M); return _impl->commit(offset, length); } -size_t ZPhysicalMemoryBacking::uncommit(size_t offset, size_t length) { +size_t ZPhysicalMemoryBacking::uncommit(zoffset offset, size_t length) { log_trace(gc, heap)("Uncommitting memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)", - offset / M, (offset + length) / M, length / M); + untype(offset) / M, (untype(offset) + length) / M, length / M); return _impl->uncommit(offset, length); } -void ZPhysicalMemoryBacking::map(uintptr_t addr, size_t size, size_t offset) const { - assert(is_aligned(offset, ZGranuleSize), "Misaligned: " PTR_FORMAT, offset); - assert(is_aligned(addr, ZGranuleSize), "Misaligned: " PTR_FORMAT, addr); +void ZPhysicalMemoryBacking::map(zaddress_unsafe addr, size_t size, zoffset offset) const { + assert(is_aligned(untype(offset), ZGranuleSize), "Misaligned: " PTR_FORMAT, untype(offset)); + assert(is_aligned(untype(addr), ZGranuleSize), "Misaligned: " PTR_FORMAT, addr); assert(is_aligned(size, ZGranuleSize), "Misaligned: " PTR_FORMAT, size); _impl->map(addr, size, offset); } -void ZPhysicalMemoryBacking::unmap(uintptr_t addr, size_t size) const { - assert(is_aligned(addr, ZGranuleSize), "Misaligned"); +void ZPhysicalMemoryBacking::unmap(zaddress_unsafe addr, size_t size) const { + assert(is_aligned(untype(addr), ZGranuleSize), "Misaligned"); assert(is_aligned(size, ZGranuleSize), "Misaligned"); _impl->unmap(addr, size); diff --git a/src/hotspot/os/windows/gc/z/zPhysicalMemoryBacking_windows.hpp b/src/hotspot/os/windows/gc/z/zPhysicalMemoryBacking_windows.hpp index 3d3479c2a1a6c..b8b73519ab5a9 100644 --- a/src/hotspot/os/windows/gc/z/zPhysicalMemoryBacking_windows.hpp +++ b/src/hotspot/os/windows/gc/z/zPhysicalMemoryBacking_windows.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,6 +24,7 @@ #ifndef OS_WINDOWS_GC_Z_ZPHYSICALMEMORYBACKING_WINDOWS_HPP #define OS_WINDOWS_GC_Z_ZPHYSICALMEMORYBACKING_WINDOWS_HPP +#include "gc/z/zAddress.hpp" #include "utilities/globalDefinitions.hpp" #include @@ -41,11 +42,11 @@ class ZPhysicalMemoryBacking { void warn_commit_limits(size_t max_capacity) const; - size_t commit(size_t offset, size_t length); - size_t uncommit(size_t offset, size_t length); + size_t commit(zoffset offset, size_t length); + size_t uncommit(zoffset offset, size_t length); - void map(uintptr_t addr, size_t size, size_t offset) const; - void unmap(uintptr_t addr, size_t size) const; + void map(zaddress_unsafe addr, size_t size, zoffset offset) const; + void unmap(zaddress_unsafe addr, size_t size) const; }; #endif // OS_WINDOWS_GC_Z_ZPHYSICALMEMORYBACKING_WINDOWS_HPP diff --git a/src/hotspot/os/windows/gc/z/zVirtualMemory_windows.cpp b/src/hotspot/os/windows/gc/z/zVirtualMemory_windows.cpp index de31b49d94062..581296a5ec3f5 100644 --- a/src/hotspot/os/windows/gc/z/zVirtualMemory_windows.cpp +++ b/src/hotspot/os/windows/gc/z/zVirtualMemory_windows.cpp @@ -35,8 +35,8 @@ class ZVirtualMemoryManagerImpl : public CHeapObj { public: virtual void initialize_before_reserve() {} virtual void initialize_after_reserve(ZMemoryManager* manager) {} - virtual bool reserve(uintptr_t addr, size_t size) = 0; - virtual void unreserve(uintptr_t addr, size_t size) = 0; + virtual bool reserve(zaddress_unsafe addr, size_t size) = 0; + virtual void unreserve(zaddress_unsafe addr, size_t size) = 0; }; // Implements small pages (paged) support using placeholder reservation. @@ -44,25 +44,21 @@ class ZVirtualMemoryManagerSmallPages : public ZVirtualMemoryManagerImpl { private: class PlaceholderCallbacks : public AllStatic { public: - static void split_placeholder(uintptr_t start, size_t size) { - ZMapper::split_placeholder(ZAddress::marked0(start), size); - ZMapper::split_placeholder(ZAddress::marked1(start), size); - ZMapper::split_placeholder(ZAddress::remapped(start), size); + static void split_placeholder(zoffset start, size_t size) { + ZMapper::split_placeholder(ZOffset::address_unsafe(start), size); } - static void coalesce_placeholders(uintptr_t start, size_t size) { - ZMapper::coalesce_placeholders(ZAddress::marked0(start), size); - ZMapper::coalesce_placeholders(ZAddress::marked1(start), size); - ZMapper::coalesce_placeholders(ZAddress::remapped(start), size); + static void coalesce_placeholders(zoffset start, size_t size) { + ZMapper::coalesce_placeholders(ZOffset::address_unsafe(start), size); } - static void split_into_placeholder_granules(uintptr_t start, size_t size) { - for (uintptr_t addr = start; addr < start + size; addr += ZGranuleSize) { - split_placeholder(addr, ZGranuleSize); + static void split_into_placeholder_granules(zoffset start, size_t size) { + for (uintptr_t addr = untype(start); addr < untype(start) + size; addr += ZGranuleSize) { + split_placeholder(to_zoffset(addr), ZGranuleSize); } } - static void coalesce_into_one_placeholder(uintptr_t start, size_t size) { + static void coalesce_into_one_placeholder(zoffset start, size_t size) { assert(is_aligned(size, ZGranuleSize), "Must be granule aligned"); if (size > ZGranuleSize) { @@ -89,12 +85,12 @@ class ZVirtualMemoryManagerSmallPages : public ZVirtualMemoryManagerImpl { static void shrink_from_back_callback(const ZMemory* area, size_t size) { assert(is_aligned(size, ZGranuleSize), "Must be granule aligned"); // Don't try split the last granule - VirtualFree will fail - split_into_placeholder_granules(area->end() - size, size - ZGranuleSize); + split_into_placeholder_granules(to_zoffset(untype(area->end()) - size), size - ZGranuleSize); } static void grow_from_front_callback(const ZMemory* area, size_t size) { assert(is_aligned(area->size(), ZGranuleSize), "Must be granule aligned"); - coalesce_into_one_placeholder(area->start() - size, area->size() + size); + coalesce_into_one_placeholder(to_zoffset(untype(area->start()) - size), area->size() + size); } static void grow_from_back_callback(const ZMemory* area, size_t size) { @@ -136,14 +132,14 @@ class ZVirtualMemoryManagerSmallPages : public ZVirtualMemoryManagerImpl { PlaceholderCallbacks::register_with(manager); } - virtual bool reserve(uintptr_t addr, size_t size) { - const uintptr_t res = ZMapper::reserve(addr, size); + virtual bool reserve(zaddress_unsafe addr, size_t size) { + const zaddress_unsafe res = ZMapper::reserve(addr, size); - assert(res == addr || res == 0, "Should not reserve other memory than requested"); + assert(res == addr || untype(res) == 0, "Should not reserve other memory than requested"); return res == addr; } - virtual void unreserve(uintptr_t addr, size_t size) { + virtual void unreserve(zaddress_unsafe addr, size_t size) { ZMapper::unreserve(addr, size); } }; @@ -159,14 +155,14 @@ class ZVirtualMemoryManagerLargePages : public ZVirtualMemoryManagerImpl { ZAWESection = ZMapper::create_shared_awe_section(); } - virtual bool reserve(uintptr_t addr, size_t size) { - const uintptr_t res = ZMapper::reserve_for_shared_awe(ZAWESection, addr, size); + virtual bool reserve(zaddress_unsafe addr, size_t size) { + const zaddress_unsafe res = ZMapper::reserve_for_shared_awe(ZAWESection, addr, size); - assert(res == addr || res == 0, "Should not reserve other memory than requested"); + assert(res == addr || untype(res) == 0, "Should not reserve other memory than requested"); return res == addr; } - virtual void unreserve(uintptr_t addr, size_t size) { + virtual void unreserve(zaddress_unsafe addr, size_t size) { ZMapper::unreserve_for_shared_awe(addr, size); } }; @@ -186,10 +182,10 @@ void ZVirtualMemoryManager::pd_initialize_after_reserve() { _impl->initialize_after_reserve(&_manager); } -bool ZVirtualMemoryManager::pd_reserve(uintptr_t addr, size_t size) { +bool ZVirtualMemoryManager::pd_reserve(zaddress_unsafe addr, size_t size) { return _impl->reserve(addr, size); } -void ZVirtualMemoryManager::pd_unreserve(uintptr_t addr, size_t size) { +void ZVirtualMemoryManager::pd_unreserve(zaddress_unsafe addr, size_t size) { _impl->unreserve(addr, size); } diff --git a/src/hotspot/os/windows/os_windows.cpp b/src/hotspot/os/windows/os_windows.cpp index 3a5dade91af3d..1969df3e98399 100644 --- a/src/hotspot/os/windows/os_windows.cpp +++ b/src/hotspot/os/windows/os_windows.cpp @@ -568,7 +568,7 @@ unsigned __stdcall os::win32::thread_native_entry(void* t) { static OSThread* create_os_thread(Thread* thread, HANDLE thread_handle, int thread_id) { // Allocate the OSThread object - OSThread* osthread = new OSThread(); + OSThread* osthread = new (std::nothrow) OSThread(); if (osthread == nullptr) return nullptr; // Initialize the JDK library's interrupt event. @@ -673,7 +673,7 @@ bool os::create_thread(Thread* thread, ThreadType thr_type, unsigned thread_id; // Allocate the OSThread object - OSThread* osthread = new OSThread(); + OSThread* osthread = new (std::nothrow) OSThread(); if (osthread == nullptr) { return false; } @@ -830,6 +830,10 @@ julong os::available_memory() { return win32::available_memory(); } +julong os::free_memory() { + return win32::available_memory(); +} + julong os::win32::available_memory() { // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect // value if total memory is larger than 4GB diff --git a/src/hotspot/os/windows/os_windows.hpp b/src/hotspot/os/windows/os_windows.hpp index 197797078d7b8..937843593ffc7 100644 --- a/src/hotspot/os/windows/os_windows.hpp +++ b/src/hotspot/os/windows/os_windows.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -63,6 +63,7 @@ class os::win32 { return _processor_level; } static julong available_memory(); + static julong free_memory(); static julong physical_memory() { return _physical_memory; } // load dll from Windows system directory or Windows directory diff --git a/src/hotspot/os_cpu/aix_ppc/atomic_aix_ppc.hpp b/src/hotspot/os_cpu/aix_ppc/atomic_aix_ppc.hpp index 7f319ef713010..722dffc150d0d 100644 --- a/src/hotspot/os_cpu/aix_ppc/atomic_aix_ppc.hpp +++ b/src/hotspot/os_cpu/aix_ppc/atomic_aix_ppc.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2012, 2019 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -95,17 +95,17 @@ inline void post_membar(atomic_memory_order order) { template struct Atomic::PlatformAdd { template - D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const; + D add_then_fetch(D volatile* dest, I add_value, atomic_memory_order order) const; template - D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const { - return add_and_fetch(dest, add_value, order) - add_value; + D fetch_then_add(D volatile* dest, I add_value, atomic_memory_order order) const { + return add_then_fetch(dest, add_value, order) - add_value; } }; template<> template -inline D Atomic::PlatformAdd<4>::add_and_fetch(D volatile* dest, I add_value, +inline D Atomic::PlatformAdd<4>::add_then_fetch(D volatile* dest, I add_value, atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(I)); STATIC_ASSERT(4 == sizeof(D)); @@ -131,7 +131,7 @@ inline D Atomic::PlatformAdd<4>::add_and_fetch(D volatile* dest, I add_value, template<> template -inline D Atomic::PlatformAdd<8>::add_and_fetch(D volatile* dest, I add_value, +inline D Atomic::PlatformAdd<8>::add_then_fetch(D volatile* dest, I add_value, atomic_memory_order order) const { STATIC_ASSERT(8 == sizeof(I)); STATIC_ASSERT(8 == sizeof(D)); diff --git a/src/hotspot/os_cpu/bsd_aarch64/atomic_bsd_aarch64.hpp b/src/hotspot/os_cpu/bsd_aarch64/atomic_bsd_aarch64.hpp index 5a88b1a32f081..14c093de8dd98 100644 --- a/src/hotspot/os_cpu/bsd_aarch64/atomic_bsd_aarch64.hpp +++ b/src/hotspot/os_cpu/bsd_aarch64/atomic_bsd_aarch64.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, 2019, Red Hat Inc. All rights reserved. * Copyright (c) 2021, Azul Systems, Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -36,7 +36,7 @@ template struct Atomic::PlatformAdd { template - D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const { + D add_then_fetch(D volatile* dest, I add_value, atomic_memory_order order) const { if (order == memory_order_relaxed) { return __atomic_add_fetch(dest, add_value, __ATOMIC_RELAXED); } else { @@ -47,8 +47,8 @@ struct Atomic::PlatformAdd { } template - D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const { - return add_and_fetch(dest, add_value, order) - add_value; + D fetch_then_add(D volatile* dest, I add_value, atomic_memory_order order) const { + return add_then_fetch(dest, add_value, order) - add_value; } }; diff --git a/src/hotspot/os_cpu/bsd_x86/atomic_bsd_x86.hpp b/src/hotspot/os_cpu/bsd_x86/atomic_bsd_x86.hpp index 292fc788b944d..77104194b0b78 100644 --- a/src/hotspot/os_cpu/bsd_x86/atomic_bsd_x86.hpp +++ b/src/hotspot/os_cpu/bsd_x86/atomic_bsd_x86.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,18 +30,18 @@ template struct Atomic::PlatformAdd { template - D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order /* order */) const; + D fetch_then_add(D volatile* dest, I add_value, atomic_memory_order /* order */) const; template - D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const { - return fetch_and_add(dest, add_value, order) + add_value; + D add_then_fetch(D volatile* dest, I add_value, atomic_memory_order order) const { + return fetch_then_add(dest, add_value, order) + add_value; } }; template<> template -inline D Atomic::PlatformAdd<4>::fetch_and_add(D volatile* dest, I add_value, - atomic_memory_order /* order */) const { +inline D Atomic::PlatformAdd<4>::fetch_then_add(D volatile* dest, I add_value, + atomic_memory_order /* order */) const { STATIC_ASSERT(4 == sizeof(I)); STATIC_ASSERT(4 == sizeof(D)); D old_value; @@ -96,8 +96,8 @@ inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest, #ifdef AMD64 template<> template -inline D Atomic::PlatformAdd<8>::fetch_and_add(D volatile* dest, I add_value, - atomic_memory_order /* order */) const { +inline D Atomic::PlatformAdd<8>::fetch_then_add(D volatile* dest, I add_value, + atomic_memory_order /* order */) const { STATIC_ASSERT(8 == sizeof(I)); STATIC_ASSERT(8 == sizeof(D)); D old_value; diff --git a/src/hotspot/os_cpu/bsd_zero/atomic_bsd_zero.hpp b/src/hotspot/os_cpu/bsd_zero/atomic_bsd_zero.hpp index 913c9806b6d6e..b39f03fd0d2a7 100644 --- a/src/hotspot/os_cpu/bsd_zero/atomic_bsd_zero.hpp +++ b/src/hotspot/os_cpu/bsd_zero/atomic_bsd_zero.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved. * Copyright 2007, 2008, 2011, 2015, Red Hat, Inc. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -75,7 +75,7 @@ static inline int m68k_compare_and_swap(int newval, } /* Atomically add an int to memory. */ -static inline int m68k_add_and_fetch(int add_value, volatile int *ptr) { +static inline int m68k_add_then_fetch(int add_value, volatile int *ptr) { for (;;) { // Loop until success. @@ -136,7 +136,7 @@ static inline int arm_compare_and_swap(int newval, } /* Atomically add an int to memory. */ -static inline int arm_add_and_fetch(int add_value, volatile int *ptr) { +static inline int arm_add_then_fetch(int add_value, volatile int *ptr) { for (;;) { // Loop until a __kernel_cmpxchg succeeds. @@ -163,26 +163,26 @@ static inline int arm_lock_test_and_set(int newval, volatile int *ptr) { template struct Atomic::PlatformAdd { template - D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const; + D add_then_fetch(D volatile* dest, I add_value, atomic_memory_order order) const; template - D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const { - return add_and_fetch(dest, add_value, order) - add_value; + D fetch_then_add(D volatile* dest, I add_value, atomic_memory_order order) const { + return add_then_fetch(dest, add_value, order) - add_value; } }; template<> template -inline D Atomic::PlatformAdd<4>::add_and_fetch(D volatile* dest, I add_value, - atomic_memory_order order) const { +inline D Atomic::PlatformAdd<4>::add_then_fetch(D volatile* dest, I add_value, + atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(I)); STATIC_ASSERT(4 == sizeof(D)); #ifdef ARM - return add_using_helper(arm_add_and_fetch, dest, add_value); + return add_using_helper(arm_add_then_fetch, dest, add_value); #else #ifdef M68K - return add_using_helper(m68k_add_and_fetch, dest, add_value); + return add_using_helper(m68k_add_then_fetch, dest, add_value); #else D res = __atomic_add_fetch(dest, add_value, __ATOMIC_RELEASE); FULL_MEM_BARRIER; @@ -193,8 +193,8 @@ inline D Atomic::PlatformAdd<4>::add_and_fetch(D volatile* dest, I add_value, template<> template -inline D Atomic::PlatformAdd<8>::add_and_fetch(D volatile* dest, I add_value, - atomic_memory_order order) const { +inline D Atomic::PlatformAdd<8>::add_then_fetch(D volatile* dest, I add_value, + atomic_memory_order order) const { STATIC_ASSERT(8 == sizeof(I)); STATIC_ASSERT(8 == sizeof(D)); diff --git a/src/hotspot/os_cpu/linux_aarch64/atomic_linux_aarch64.hpp b/src/hotspot/os_cpu/linux_aarch64/atomic_linux_aarch64.hpp index 3208db5b4a6b7..fa1ab9524425a 100644 --- a/src/hotspot/os_cpu/linux_aarch64/atomic_linux_aarch64.hpp +++ b/src/hotspot/os_cpu/linux_aarch64/atomic_linux_aarch64.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, 2021, Red Hat Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -72,19 +72,19 @@ inline D atomic_fastcall(F stub, volatile D *dest, T1 arg1, T2 arg2) { template struct Atomic::PlatformAdd { template - D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const; + D fetch_then_add(D volatile* dest, I add_value, atomic_memory_order order) const; template - D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const { - D value = fetch_and_add(dest, add_value, order) + add_value; + D add_then_fetch(D volatile* dest, I add_value, atomic_memory_order order) const { + D value = fetch_then_add(dest, add_value, order) + add_value; return value; } }; template<> template -inline D Atomic::PlatformAdd<4>::fetch_and_add(D volatile* dest, I add_value, - atomic_memory_order order) const { +inline D Atomic::PlatformAdd<4>::fetch_then_add(D volatile* dest, I add_value, + atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(I)); STATIC_ASSERT(4 == sizeof(D)); aarch64_atomic_stub_t stub; @@ -99,8 +99,8 @@ inline D Atomic::PlatformAdd<4>::fetch_and_add(D volatile* dest, I add_value, template<> template -inline D Atomic::PlatformAdd<8>::fetch_and_add(D volatile* dest, I add_value, - atomic_memory_order order) const { +inline D Atomic::PlatformAdd<8>::fetch_then_add(D volatile* dest, I add_value, + atomic_memory_order order) const { STATIC_ASSERT(8 == sizeof(I)); STATIC_ASSERT(8 == sizeof(D)); aarch64_atomic_stub_t stub; diff --git a/src/hotspot/os_cpu/linux_aarch64/gc/x/xSyscall_linux_aarch64.hpp b/src/hotspot/os_cpu/linux_aarch64/gc/x/xSyscall_linux_aarch64.hpp new file mode 100644 index 0000000000000..b4c49f477a677 --- /dev/null +++ b/src/hotspot/os_cpu/linux_aarch64/gc/x/xSyscall_linux_aarch64.hpp @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef OS_CPU_LINUX_AARCH64_GC_X_XSYSCALL_LINUX_AARCH64_HPP +#define OS_CPU_LINUX_AARCH64_GC_X_XSYSCALL_LINUX_AARCH64_HPP + +#include + +// +// Support for building on older Linux systems +// + +#ifndef SYS_memfd_create +#define SYS_memfd_create 279 +#endif +#ifndef SYS_fallocate +#define SYS_fallocate 47 +#endif + +#endif // OS_CPU_LINUX_AARCH64_GC_X_XSYSCALL_LINUX_AARCH64_HPP diff --git a/src/hotspot/os_cpu/linux_arm/atomic_linux_arm.hpp b/src/hotspot/os_cpu/linux_arm/atomic_linux_arm.hpp index ca66514755ea8..814dbd9aab501 100644 --- a/src/hotspot/os_cpu/linux_arm/atomic_linux_arm.hpp +++ b/src/hotspot/os_cpu/linux_arm/atomic_linux_arm.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2008, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -101,17 +101,17 @@ inline void Atomic::PlatformStore<8>::operator()(T volatile* dest, template struct Atomic::PlatformAdd { template - D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const; + D add_then_fetch(D volatile* dest, I add_value, atomic_memory_order order) const; template - D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const { - return add_and_fetch(dest, add_value, order) - add_value; + D fetch_then_add(D volatile* dest, I add_value, atomic_memory_order order) const { + return add_then_fetch(dest, add_value, order) - add_value; } }; template<> template -inline D Atomic::PlatformAdd<4>::add_and_fetch(D volatile* dest, I add_value, +inline D Atomic::PlatformAdd<4>::add_then_fetch(D volatile* dest, I add_value, atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(I)); STATIC_ASSERT(4 == sizeof(D)); diff --git a/src/hotspot/os_cpu/linux_ppc/atomic_linux_ppc.hpp b/src/hotspot/os_cpu/linux_ppc/atomic_linux_ppc.hpp index ba21ed23f89a8..41f1d85a2f2de 100644 --- a/src/hotspot/os_cpu/linux_ppc/atomic_linux_ppc.hpp +++ b/src/hotspot/os_cpu/linux_ppc/atomic_linux_ppc.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2012, 2019 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -95,18 +95,18 @@ inline void post_membar(atomic_memory_order order) { template struct Atomic::PlatformAdd { template - D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const; + D add_then_fetch(D volatile* dest, I add_value, atomic_memory_order order) const; template - D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const { - return add_and_fetch(dest, add_value, order) - add_value; + D fetch_then_add(D volatile* dest, I add_value, atomic_memory_order order) const { + return add_then_fetch(dest, add_value, order) - add_value; } }; template<> template -inline D Atomic::PlatformAdd<4>::add_and_fetch(D volatile* dest, I add_value, - atomic_memory_order order) const { +inline D Atomic::PlatformAdd<4>::add_then_fetch(D volatile* dest, I add_value, + atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(I)); STATIC_ASSERT(4 == sizeof(D)); @@ -131,8 +131,8 @@ inline D Atomic::PlatformAdd<4>::add_and_fetch(D volatile* dest, I add_value, template<> template -inline D Atomic::PlatformAdd<8>::add_and_fetch(D volatile* dest, I add_value, - atomic_memory_order order) const { +inline D Atomic::PlatformAdd<8>::add_then_fetch(D volatile* dest, I add_value, + atomic_memory_order order) const { STATIC_ASSERT(8 == sizeof(I)); STATIC_ASSERT(8 == sizeof(D)); diff --git a/src/hotspot/os_cpu/linux_ppc/gc/x/xSyscall_linux_ppc.hpp b/src/hotspot/os_cpu/linux_ppc/gc/x/xSyscall_linux_ppc.hpp new file mode 100644 index 0000000000000..22d51cd58f542 --- /dev/null +++ b/src/hotspot/os_cpu/linux_ppc/gc/x/xSyscall_linux_ppc.hpp @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2021 SAP SE. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef OS_CPU_LINUX_PPC_GC_X_XSYSCALL_LINUX_PPC_HPP +#define OS_CPU_LINUX_PPC_GC_X_XSYSCALL_LINUX_PPC_HPP + +#include + +// +// Support for building on older Linux systems +// + + +#ifndef SYS_memfd_create +#define SYS_memfd_create 360 +#endif +#ifndef SYS_fallocate +#define SYS_fallocate 309 +#endif + +#endif // OS_CPU_LINUX_PPC_GC_X_XSYSCALL_LINUX_PPC_HPP diff --git a/src/hotspot/os_cpu/linux_riscv/atomic_linux_riscv.hpp b/src/hotspot/os_cpu/linux_riscv/atomic_linux_riscv.hpp index 11cd9e1d72da8..393c245ec0278 100644 --- a/src/hotspot/os_cpu/linux_riscv/atomic_linux_riscv.hpp +++ b/src/hotspot/os_cpu/linux_riscv/atomic_linux_riscv.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -36,7 +36,7 @@ template struct Atomic::PlatformAdd { template - D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const { + D add_then_fetch(D volatile* dest, I add_value, atomic_memory_order order) const { if (order != memory_order_relaxed) { FULL_MEM_BARRIER; } @@ -50,8 +50,8 @@ struct Atomic::PlatformAdd { } template - D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const { - return add_and_fetch(dest, add_value, order) - add_value; + D fetch_then_add(D volatile* dest, I add_value, atomic_memory_order order) const { + return add_then_fetch(dest, add_value, order) - add_value; } }; diff --git a/src/hotspot/os_cpu/linux_riscv/gc/x/xSyscall_linux_riscv.hpp b/src/hotspot/os_cpu/linux_riscv/gc/x/xSyscall_linux_riscv.hpp new file mode 100644 index 0000000000000..bfd49b0bf4e4d --- /dev/null +++ b/src/hotspot/os_cpu/linux_riscv/gc/x/xSyscall_linux_riscv.hpp @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef OS_CPU_LINUX_RISCV_GC_X_XSYSCALL_LINUX_RISCV_HPP +#define OS_CPU_LINUX_RISCV_GC_X_XSYSCALL_LINUX_RISCV_HPP + +#include + +// +// Support for building on older Linux systems +// + +#ifndef SYS_memfd_create +#define SYS_memfd_create 279 +#endif +#ifndef SYS_fallocate +#define SYS_fallocate 47 +#endif + +#endif // OS_CPU_LINUX_RISCV_GC_X_XSYSCALL_LINUX_RISCV_HPP diff --git a/src/hotspot/os_cpu/linux_s390/atomic_linux_s390.hpp b/src/hotspot/os_cpu/linux_s390/atomic_linux_s390.hpp index a29c41eb71369..3daa9d84deaf1 100644 --- a/src/hotspot/os_cpu/linux_s390/atomic_linux_s390.hpp +++ b/src/hotspot/os_cpu/linux_s390/atomic_linux_s390.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2016, 2019 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -77,18 +77,18 @@ inline void z196_fast_sync() { template struct Atomic::PlatformAdd { template - D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const; + D add_then_fetch(D volatile* dest, I add_value, atomic_memory_order order) const; template - D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const { - return add_and_fetch(dest, add_value, order) - add_value; + D fetch_then_add(D volatile* dest, I add_value, atomic_memory_order order) const { + return add_then_fetch(dest, add_value, order) - add_value; } }; template<> template -inline D Atomic::PlatformAdd<4>::add_and_fetch(D volatile* dest, I inc, - atomic_memory_order order) const { +inline D Atomic::PlatformAdd<4>::add_then_fetch(D volatile* dest, I inc, + atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(I)); STATIC_ASSERT(4 == sizeof(D)); @@ -141,8 +141,8 @@ inline D Atomic::PlatformAdd<4>::add_and_fetch(D volatile* dest, I inc, template<> template -inline D Atomic::PlatformAdd<8>::add_and_fetch(D volatile* dest, I inc, - atomic_memory_order order) const { +inline D Atomic::PlatformAdd<8>::add_then_fetch(D volatile* dest, I inc, + atomic_memory_order order) const { STATIC_ASSERT(8 == sizeof(I)); STATIC_ASSERT(8 == sizeof(D)); diff --git a/src/hotspot/os_cpu/linux_x86/atomic_linux_x86.hpp b/src/hotspot/os_cpu/linux_x86/atomic_linux_x86.hpp index e4bba0cfa5811..2e472a020683a 100644 --- a/src/hotspot/os_cpu/linux_x86/atomic_linux_x86.hpp +++ b/src/hotspot/os_cpu/linux_x86/atomic_linux_x86.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,18 +30,18 @@ template struct Atomic::PlatformAdd { template - D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const; + D fetch_then_add(D volatile* dest, I add_value, atomic_memory_order order) const; template - D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const { - return fetch_and_add(dest, add_value, order) + add_value; + D add_then_fetch(D volatile* dest, I add_value, atomic_memory_order order) const { + return fetch_then_add(dest, add_value, order) + add_value; } }; template<> template -inline D Atomic::PlatformAdd<4>::fetch_and_add(D volatile* dest, I add_value, - atomic_memory_order order) const { +inline D Atomic::PlatformAdd<4>::fetch_then_add(D volatile* dest, I add_value, + atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(I)); STATIC_ASSERT(4 == sizeof(D)); D old_value; @@ -97,8 +97,8 @@ inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest, template<> template -inline D Atomic::PlatformAdd<8>::fetch_and_add(D volatile* dest, I add_value, - atomic_memory_order order) const { +inline D Atomic::PlatformAdd<8>::fetch_then_add(D volatile* dest, I add_value, + atomic_memory_order order) const { STATIC_ASSERT(8 == sizeof(I)); STATIC_ASSERT(8 == sizeof(D)); D old_value; diff --git a/src/hotspot/os_cpu/linux_x86/gc/x/xSyscall_linux_x86.hpp b/src/hotspot/os_cpu/linux_x86/gc/x/xSyscall_linux_x86.hpp new file mode 100644 index 0000000000000..2709b373b2812 --- /dev/null +++ b/src/hotspot/os_cpu/linux_x86/gc/x/xSyscall_linux_x86.hpp @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef OS_CPU_LINUX_X86_GC_X_XSYSCALL_LINUX_X86_HPP +#define OS_CPU_LINUX_X86_GC_X_XSYSCALL_LINUX_X86_HPP + +#include + +// +// Support for building on older Linux systems +// + +#ifndef SYS_memfd_create +#define SYS_memfd_create 319 +#endif +#ifndef SYS_fallocate +#define SYS_fallocate 285 +#endif + +#endif // OS_CPU_LINUX_X86_GC_X_XSYSCALL_LINUX_X86_HPP diff --git a/src/hotspot/os_cpu/linux_zero/atomic_linux_zero.hpp b/src/hotspot/os_cpu/linux_zero/atomic_linux_zero.hpp index 05b6d07752475..eefa8d5d06201 100644 --- a/src/hotspot/os_cpu/linux_zero/atomic_linux_zero.hpp +++ b/src/hotspot/os_cpu/linux_zero/atomic_linux_zero.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved. * Copyright 2007, 2008, 2011, 2015, Red Hat, Inc. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -33,18 +33,18 @@ template struct Atomic::PlatformAdd { template - D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const; + D add_then_fetch(D volatile* dest, I add_value, atomic_memory_order order) const; template - D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const { - return add_and_fetch(dest, add_value, order) - add_value; + D fetch_then_add(D volatile* dest, I add_value, atomic_memory_order order) const { + return add_then_fetch(dest, add_value, order) - add_value; } }; template<> template -inline D Atomic::PlatformAdd<4>::add_and_fetch(D volatile* dest, I add_value, - atomic_memory_order order) const { +inline D Atomic::PlatformAdd<4>::add_then_fetch(D volatile* dest, I add_value, + atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(I)); STATIC_ASSERT(4 == sizeof(D)); @@ -55,8 +55,8 @@ inline D Atomic::PlatformAdd<4>::add_and_fetch(D volatile* dest, I add_value, template<> template -inline D Atomic::PlatformAdd<8>::add_and_fetch(D volatile* dest, I add_value, - atomic_memory_order order) const { +inline D Atomic::PlatformAdd<8>::add_then_fetch(D volatile* dest, I add_value, + atomic_memory_order order) const { STATIC_ASSERT(8 == sizeof(I)); STATIC_ASSERT(8 == sizeof(D)); diff --git a/src/hotspot/os_cpu/windows_aarch64/atomic_windows_aarch64.hpp b/src/hotspot/os_cpu/windows_aarch64/atomic_windows_aarch64.hpp index 970b37450ca90..90fc8ecfba412 100644 --- a/src/hotspot/os_cpu/windows_aarch64/atomic_windows_aarch64.hpp +++ b/src/hotspot/os_cpu/windows_aarch64/atomic_windows_aarch64.hpp @@ -39,11 +39,11 @@ template struct Atomic::PlatformAdd { template - D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const; + D add_then_fetch(D volatile* dest, I add_value, atomic_memory_order order) const; template - D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const { - return add_and_fetch(dest, add_value, order) - add_value; + D fetch_then_add(D volatile* dest, I add_value, atomic_memory_order order) const { + return add_then_fetch(dest, add_value, order) - add_value; } }; @@ -53,9 +53,9 @@ struct Atomic::PlatformAdd { #define DEFINE_INTRINSIC_ADD(IntrinsicName, IntrinsicType) \ template<> \ template \ - inline D Atomic::PlatformAdd::add_and_fetch(D volatile* dest, \ - I add_value, \ - atomic_memory_order order) const { \ + inline D Atomic::PlatformAdd::add_then_fetch(D volatile* dest, \ + I add_value, \ + atomic_memory_order order) const { \ STATIC_ASSERT(sizeof(IntrinsicType) == sizeof(D)); \ return PrimitiveConversions::cast( \ IntrinsicName(reinterpret_cast(dest), \ diff --git a/src/hotspot/os_cpu/windows_x86/atomic_windows_x86.hpp b/src/hotspot/os_cpu/windows_x86/atomic_windows_x86.hpp index 2f46c0cd89d99..aa0fef3b83a94 100644 --- a/src/hotspot/os_cpu/windows_x86/atomic_windows_x86.hpp +++ b/src/hotspot/os_cpu/windows_x86/atomic_windows_x86.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -42,11 +42,11 @@ template<> inline void ScopedFence::postfix() { OrderAccess::fe template struct Atomic::PlatformAdd { template - D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const; + D add_then_fetch(D volatile* dest, I add_value, atomic_memory_order order) const; template - D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const { - return add_and_fetch(dest, add_value, order) - add_value; + D fetch_then_add(D volatile* dest, I add_value, atomic_memory_order order) const { + return add_then_fetch(dest, add_value, order) - add_value; } }; @@ -56,9 +56,9 @@ struct Atomic::PlatformAdd { #define DEFINE_INTRINSIC_ADD(IntrinsicName, IntrinsicType) \ template<> \ template \ - inline D Atomic::PlatformAdd::add_and_fetch(D volatile* dest, \ - I add_value, \ - atomic_memory_order order) const { \ + inline D Atomic::PlatformAdd::add_then_fetch(D volatile* dest, \ + I add_value, \ + atomic_memory_order order) const { \ STATIC_ASSERT(sizeof(IntrinsicType) == sizeof(D)); \ return PrimitiveConversions::cast( \ IntrinsicName(reinterpret_cast(dest), \ diff --git a/src/hotspot/share/asm/assembler.hpp b/src/hotspot/share/asm/assembler.hpp index 1593d3885797a..7629d368241c8 100644 --- a/src/hotspot/share/asm/assembler.hpp +++ b/src/hotspot/share/asm/assembler.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -242,6 +242,7 @@ class AbstractAssembler : public ResourceObj { }; friend class InstructionMark; + public: // count size of instructions which are skipped from inline heuristics class InlineSkippedInstructionsCounter: public StackObj { private: @@ -254,6 +255,8 @@ class AbstractAssembler : public ResourceObj { _assm->register_skipped(_assm->pc() - _start); } }; + + protected: #ifdef ASSERT // Make it return true on platforms which need to verify // instruction boundaries for some operations. diff --git a/src/hotspot/share/asm/codeBuffer.cpp b/src/hotspot/share/asm/codeBuffer.cpp index 8c8230bd9ae88..95effc5c5af64 100644 --- a/src/hotspot/share/asm/codeBuffer.cpp +++ b/src/hotspot/share/asm/codeBuffer.cpp @@ -323,7 +323,8 @@ void CodeSection::relocate(address at, RelocationHolder const& spec, int format) rtype == relocInfo::runtime_call_type || rtype == relocInfo::internal_word_type|| rtype == relocInfo::section_word_type || - rtype == relocInfo::external_word_type, + rtype == relocInfo::external_word_type|| + rtype == relocInfo::barrier_type, "code needs relocation information"); // leave behind an indication that we attempted a relocation DEBUG_ONLY(_locs_start = _locs_limit = (relocInfo*)badAddress); diff --git a/src/hotspot/share/asm/codeBuffer.hpp b/src/hotspot/share/asm/codeBuffer.hpp index 029691c95fdd4..b4e41ba51780c 100644 --- a/src/hotspot/share/asm/codeBuffer.hpp +++ b/src/hotspot/share/asm/codeBuffer.hpp @@ -180,6 +180,7 @@ class CodeSection { // Mark scratch buffer. void set_scratch_emit() { _scratch_emit = true; } + void clear_scratch_emit() { _scratch_emit = false; } bool scratch_emit() { return _scratch_emit; } CodeBuffer* outer() const { return _outer; } diff --git a/src/hotspot/share/c1/c1_Canonicalizer.cpp b/src/hotspot/share/c1/c1_Canonicalizer.cpp index a19acff4d14d3..a358125b1c2b6 100644 --- a/src/hotspot/share/c1/c1_Canonicalizer.cpp +++ b/src/hotspot/share/c1/c1_Canonicalizer.cpp @@ -89,9 +89,9 @@ void Canonicalizer::do_Op2(Op2* x) { { jint a = x->x()->type()->as_IntConstant()->value(); jint b = x->y()->type()->as_IntConstant()->value(); switch (x->op()) { - case Bytecodes::_iadd: set_constant(a + b); return; - case Bytecodes::_isub: set_constant(a - b); return; - case Bytecodes::_imul: set_constant(a * b); return; + case Bytecodes::_iadd: set_constant(java_add(a, b)); return; + case Bytecodes::_isub: set_constant(java_subtract(a, b)); return; + case Bytecodes::_imul: set_constant(java_multiply(a, b)); return; case Bytecodes::_idiv: if (b != 0) { if (a == min_jint && b == -1) { @@ -335,9 +335,9 @@ void Canonicalizer::do_NegateOp(NegateOp* x) { ValueType* t = x->x()->type(); if (t->is_constant()) { switch (t->tag()) { - case intTag : set_constant(-t->as_IntConstant ()->value()); return; - case longTag : set_constant(-t->as_LongConstant ()->value()); return; - case floatTag : set_constant(-t->as_FloatConstant ()->value()); return; + case intTag : set_constant(java_negate(t->as_IntConstant()->value())); return; + case longTag : set_constant(java_negate(t->as_LongConstant()->value())); return; + case floatTag : set_constant(-t->as_FloatConstant()->value()); return; case doubleTag: set_constant(-t->as_DoubleConstant()->value()); return; default : ShouldNotReachHere(); } diff --git a/src/hotspot/share/c1/c1_LIR.cpp b/src/hotspot/share/c1/c1_LIR.cpp index a553af52a801a..bddc4c9cbbcf7 100644 --- a/src/hotspot/share/c1/c1_LIR.cpp +++ b/src/hotspot/share/c1/c1_LIR.cpp @@ -1143,8 +1143,14 @@ void LIR_List::set_cmp_oprs(LIR_Op* op) { op->as_Op4()->set_in_opr3(_cmp_opr1); op->as_Op4()->set_in_opr4(_cmp_opr2); break; + case lir_cas_long: + case lir_cas_obj: + case lir_cas_int: + _cmp_opr1 = op->as_OpCompareAndSwap()->result_opr(); + _cmp_opr2 = LIR_OprFact::intConst(0); + break; #if INCLUDE_ZGC - case lir_zloadbarrier_test: + case lir_xloadbarrier_test: _cmp_opr1 = FrameMap::as_opr(t1); _cmp_opr2 = LIR_OprFact::intConst(0); break; diff --git a/src/hotspot/share/c1/c1_LIR.hpp b/src/hotspot/share/c1/c1_LIR.hpp index a0987a23feb74..90f8bbb2d5b2c 100644 --- a/src/hotspot/share/c1/c1_LIR.hpp +++ b/src/hotspot/share/c1/c1_LIR.hpp @@ -1018,9 +1018,9 @@ enum LIR_Code { , lir_assert , end_opAssert #ifdef INCLUDE_ZGC - , begin_opZLoadBarrierTest - , lir_zloadbarrier_test - , end_opZLoadBarrierTest + , begin_opXLoadBarrierTest + , lir_xloadbarrier_test + , end_opXLoadBarrierTest #endif }; diff --git a/src/hotspot/share/c1/c1_LIRAssembler.hpp b/src/hotspot/share/c1/c1_LIRAssembler.hpp index 270a63d7cfcec..8c89cb0adfae6 100644 --- a/src/hotspot/share/c1/c1_LIRAssembler.hpp +++ b/src/hotspot/share/c1/c1_LIRAssembler.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -91,6 +91,7 @@ class LIR_Assembler: public CompilationResourceObj { void emit_stubs(CodeStubList* stub_list); + public: // addresses Address as_Address(LIR_Address* addr); Address as_Address_lo(LIR_Address* addr); @@ -104,6 +105,7 @@ class LIR_Assembler: public CompilationResourceObj { ImplicitNullCheckStub* add_debug_info_for_null_check(int pc_offset, CodeEmitInfo* cinfo); ImplicitNullCheckStub* add_debug_info_for_null_check_here(CodeEmitInfo* info); + private: void breakpoint(); void push(LIR_Opr opr); void pop(LIR_Opr opr); diff --git a/src/hotspot/share/c1/c1_LIRGenerator.cpp b/src/hotspot/share/c1/c1_LIRGenerator.cpp index 4ff2bf9199640..85fb44e2b383d 100644 --- a/src/hotspot/share/c1/c1_LIRGenerator.cpp +++ b/src/hotspot/share/c1/c1_LIRGenerator.cpp @@ -843,8 +843,8 @@ void LIRGenerator::arraycopy_helper(Intrinsic* x, int* flagsp, ciArrayKlass** ex if (expected_type != NULL) { BasicType t = expected_type->element_type()->basic_type(); int element_size = type2aelembytes(t); - if (((arrayOopDesc::base_offset_in_bytes(t) + s_offs * element_size) % HeapWordSize == 0) && - ((arrayOopDesc::base_offset_in_bytes(t) + d_offs * element_size) % HeapWordSize == 0)) { + if (((arrayOopDesc::base_offset_in_bytes(t) + (uint)s_offs * element_size) % HeapWordSize == 0) && + ((arrayOopDesc::base_offset_in_bytes(t) + (uint)d_offs * element_size) % HeapWordSize == 0)) { flags &= ~LIR_OpArrayCopy::unaligned; } } diff --git a/src/hotspot/share/c1/c1_RangeCheckElimination.cpp b/src/hotspot/share/c1/c1_RangeCheckElimination.cpp index 361b7fd4e90ae..3da55f0001fe5 100644 --- a/src/hotspot/share/c1/c1_RangeCheckElimination.cpp +++ b/src/hotspot/share/c1/c1_RangeCheckElimination.cpp @@ -270,10 +270,11 @@ void RangeCheckEliminator::Visitor::do_ArithmeticOp(ArithmeticOp *ao) { Bound * bound = _rce->get_bound(y); if (bound->has_upper() && bound->has_lower()) { - int new_lower = bound->lower() + const_value; + // TODO: consider using __builtin_add_overflow jlong new_lowerl = ((jlong)bound->lower()) + const_value; - int new_upper = bound->upper() + const_value; + jint new_lower = low(new_lowerl); jlong new_upperl = ((jlong)bound->upper()) + const_value; + jint new_upper = low(new_upperl); if (((jlong)new_lower) == new_lowerl && ((jlong)new_upper == new_upperl)) { Bound *newBound = new Bound(new_lower, bound->lower_instr(), new_upper, bound->upper_instr()); diff --git a/src/hotspot/share/cds/dynamicArchive.cpp b/src/hotspot/share/cds/dynamicArchive.cpp index 0aa11ef8d5ba7..43e425c11c531 100644 --- a/src/hotspot/share/cds/dynamicArchive.cpp +++ b/src/hotspot/share/cds/dynamicArchive.cpp @@ -445,3 +445,12 @@ bool DynamicArchive::validate(FileMapInfo* dynamic_info) { return true; } + +void DynamicArchiveHeader::print(outputStream* st) { + ResourceMark rm; + + st->print_cr("- base_header_crc: 0x%08x", base_header_crc()); + for (int i = 0; i < NUM_CDS_REGIONS; i++) { + st->print_cr("- base_region_crc[%d]: 0x%08x", i, base_region_crc(i)); + } +} diff --git a/src/hotspot/share/cds/dynamicArchive.hpp b/src/hotspot/share/cds/dynamicArchive.hpp index 2cd03b5b4e75a..ef40d942d079b 100644 --- a/src/hotspot/share/cds/dynamicArchive.hpp +++ b/src/hotspot/share/cds/dynamicArchive.hpp @@ -54,6 +54,7 @@ class DynamicArchiveHeader : public FileMapHeader { assert(is_valid_region(i), "must be"); _base_region_crc[i] = c; } + void print(outputStream* st); }; class DynamicArchive : AllStatic { diff --git a/src/hotspot/share/cds/filemap.cpp b/src/hotspot/share/cds/filemap.cpp index 8e2a5b29ee8ff..999b18d70f805 100644 --- a/src/hotspot/share/cds/filemap.cpp +++ b/src/hotspot/share/cds/filemap.cpp @@ -2394,6 +2394,13 @@ FileMapRegion* FileMapInfo::last_core_region() const { return region_at(MetaspaceShared::ro); } +void FileMapInfo::print(outputStream* st) const { + header()->print(st); + if (!is_static()) { + dynamic_header()->print(st); + } +} + void FileMapHeader::set_as_offset(char* p, size_t *offset) { *offset = ArchiveBuilder::current()->any_to_offset((address)p); } diff --git a/src/hotspot/share/cds/filemap.hpp b/src/hotspot/share/cds/filemap.hpp index dce1ad94b6203..70ccb10e8a6dc 100644 --- a/src/hotspot/share/cds/filemap.hpp +++ b/src/hotspot/share/cds/filemap.hpp @@ -530,9 +530,7 @@ class FileMapInfo : public CHeapObj { return header()->region_at(i); } - void print(outputStream* st) { - header()->print(st); - } + void print(outputStream* st) const; const char* vm_version() { return header()->jvm_ident(); diff --git a/src/hotspot/share/classfile/classLoaderDataGraph.inline.hpp b/src/hotspot/share/classfile/classLoaderDataGraph.inline.hpp index 6b390a0507998..6880194009c42 100644 --- a/src/hotspot/share/classfile/classLoaderDataGraph.inline.hpp +++ b/src/hotspot/share/classfile/classLoaderDataGraph.inline.hpp @@ -56,7 +56,7 @@ void ClassLoaderDataGraph::inc_instance_classes(size_t count) { } void ClassLoaderDataGraph::dec_instance_classes(size_t count) { - size_t old_count = Atomic::fetch_and_add(&_num_instance_classes, -count, memory_order_relaxed); + size_t old_count = Atomic::fetch_then_add(&_num_instance_classes, -count, memory_order_relaxed); assert(old_count >= count, "Sanity"); } @@ -65,7 +65,7 @@ void ClassLoaderDataGraph::inc_array_classes(size_t count) { } void ClassLoaderDataGraph::dec_array_classes(size_t count) { - size_t old_count = Atomic::fetch_and_add(&_num_array_classes, -count, memory_order_relaxed); + size_t old_count = Atomic::fetch_then_add(&_num_array_classes, -count, memory_order_relaxed); assert(old_count >= count, "Sanity"); } diff --git a/src/hotspot/share/classfile/stringTable.cpp b/src/hotspot/share/classfile/stringTable.cpp index 8db4c08c96638..a2a567c790333 100644 --- a/src/hotspot/share/classfile/stringTable.cpp +++ b/src/hotspot/share/classfile/stringTable.cpp @@ -116,6 +116,8 @@ static size_t _current_size = 0; static volatile size_t _items_count = 0; volatile bool _alt_hash = false; + +static bool _rehashed = false; static uint64_t _alt_hash_seed = 0; uintx hash_string(const jchar* s, int len, bool useAlt) { @@ -529,20 +531,46 @@ bool StringTable::do_rehash() { return true; } +bool StringTable::should_grow() { + return get_load_factor() > PREF_AVG_LIST_LEN && !_local_table->is_max_size_reached(); +} + +bool StringTable::rehash_table_expects_safepoint_rehashing() { + // No rehashing required + if (!needs_rehashing()) { + return false; + } + + // Grow instead of rehash + if (should_grow()) { + return false; + } + + // Already rehashed + if (_rehashed) { + return false; + } + + // Resizing in progress + if (!_local_table->is_safepoint_safe()) { + return false; + } + + return true; +} + void StringTable::rehash_table() { - static bool rehashed = false; log_debug(stringtable)("Table imbalanced, rehashing called."); // Grow instead of rehash. - if (get_load_factor() > PREF_AVG_LIST_LEN && - !_local_table->is_max_size_reached()) { + if (should_grow()) { log_debug(stringtable)("Choosing growing over rehashing."); trigger_concurrent_work(); _needs_rehashing = false; return; } // Already rehashed. - if (rehashed) { + if (_rehashed) { log_warning(stringtable)("Rehashing already done, still long lists."); trigger_concurrent_work(); _needs_rehashing = false; @@ -552,7 +580,7 @@ void StringTable::rehash_table() { _alt_hash_seed = AltHashing::compute_seed(); { if (do_rehash()) { - rehashed = true; + _rehashed = true; } else { log_info(stringtable)("Resizes in progress rehashing skipped."); } @@ -631,12 +659,11 @@ class VerifyCompStrings : StackObj { return java_lang_String::equals(a, b); } - ResizeableResourceHashtable _table; public: size_t _errors; - VerifyCompStrings() : _table(unsigned(_items_count / 8) + 1), _errors(0) {} + VerifyCompStrings() : _table(unsigned(_items_count / 8) + 1, 0 /* do not resize */), _errors(0) {} bool operator()(WeakHandle* val) { oop s = val->resolve(); if (s == nullptr) { diff --git a/src/hotspot/share/classfile/stringTable.hpp b/src/hotspot/share/classfile/stringTable.hpp index 44de539f53d3a..9549a8ce00984 100644 --- a/src/hotspot/share/classfile/stringTable.hpp +++ b/src/hotspot/share/classfile/stringTable.hpp @@ -96,6 +96,11 @@ class StringTable : public CHeapObj{ static oop intern(const char *utf8_string, TRAPS); // Rehash the string table if it gets out of balance +private: + static bool should_grow(); + +public: + static bool rehash_table_expects_safepoint_rehashing(); static void rehash_table(); static bool needs_rehashing() { return _needs_rehashing; } static inline void update_needs_rehash(bool rehash) { diff --git a/src/hotspot/share/classfile/symbolTable.cpp b/src/hotspot/share/classfile/symbolTable.cpp index f277aa5f99d13..61d5ba576b54a 100644 --- a/src/hotspot/share/classfile/symbolTable.cpp +++ b/src/hotspot/share/classfile/symbolTable.cpp @@ -103,6 +103,7 @@ static THREAD_LOCAL bool _lookup_shared_first = false; // Static arena for symbols that are not deallocated Arena* SymbolTable::_arena = nullptr; +static bool _rehashed = false; static uint64_t _alt_hash_seed = 0; static inline void log_trace_symboltable_helper(Symbol* sym, const char* msg) { @@ -805,13 +806,39 @@ bool SymbolTable::do_rehash() { return true; } +bool SymbolTable::should_grow() { + return get_load_factor() > PREF_AVG_LIST_LEN && !_local_table->is_max_size_reached(); +} + +bool SymbolTable::rehash_table_expects_safepoint_rehashing() { + // No rehashing required + if (!needs_rehashing()) { + return false; + } + + // Grow instead of rehash + if (should_grow()) { + return false; + } + + // Already rehashed + if (_rehashed) { + return false; + } + + // Resizing in progress + if (!_local_table->is_safepoint_safe()) { + return false; + } + + return true; +} + void SymbolTable::rehash_table() { - static bool rehashed = false; log_debug(symboltable)("Table imbalanced, rehashing called."); // Grow instead of rehash. - if (get_load_factor() > PREF_AVG_LIST_LEN && - !_local_table->is_max_size_reached()) { + if (should_grow()) { log_debug(symboltable)("Choosing growing over rehashing."); trigger_cleanup(); _needs_rehashing = false; @@ -819,7 +846,7 @@ void SymbolTable::rehash_table() { } // Already rehashed. - if (rehashed) { + if (_rehashed) { log_warning(symboltable)("Rehashing already done, still long lists."); trigger_cleanup(); _needs_rehashing = false; @@ -829,7 +856,7 @@ void SymbolTable::rehash_table() { _alt_hash_seed = AltHashing::compute_seed(); if (do_rehash()) { - rehashed = true; + _rehashed = true; } else { log_info(symboltable)("Resizes in progress rehashing skipped."); } diff --git a/src/hotspot/share/classfile/symbolTable.hpp b/src/hotspot/share/classfile/symbolTable.hpp index d3d279987c083..282dd574c683a 100644 --- a/src/hotspot/share/classfile/symbolTable.hpp +++ b/src/hotspot/share/classfile/symbolTable.hpp @@ -144,6 +144,11 @@ class SymbolTable : public AllStatic { static Symbol* new_permanent_symbol(const char* name); // Rehash the string table if it gets out of balance +private: + static bool should_grow(); + +public: + static bool rehash_table_expects_safepoint_rehashing(); static void rehash_table(); static bool needs_rehashing() { return _needs_rehashing; } static inline void update_needs_rehash(bool rehash) { diff --git a/src/hotspot/share/code/icBuffer.cpp b/src/hotspot/share/code/icBuffer.cpp index f9a3d345a3f42..a43d3678ad31b 100644 --- a/src/hotspot/share/code/icBuffer.cpp +++ b/src/hotspot/share/code/icBuffer.cpp @@ -161,6 +161,20 @@ void InlineCacheBuffer::refill_ic_stubs() { VMThread::execute(&ibf); } +bool InlineCacheBuffer::needs_update_inline_caches() { + // Stub removal + if (buffer()->number_of_stubs() > 0) { + return true; + } + + // Release pending CompiledICHolder + if (pending_icholder_count() > 0) { + return true; + } + + return false; +} + void InlineCacheBuffer::update_inline_caches() { if (buffer()->number_of_stubs() > 0) { if (TraceICBuffer) { diff --git a/src/hotspot/share/code/icBuffer.hpp b/src/hotspot/share/code/icBuffer.hpp index 135e11928f980..d385b99d59d42 100644 --- a/src/hotspot/share/code/icBuffer.hpp +++ b/src/hotspot/share/code/icBuffer.hpp @@ -167,6 +167,7 @@ class InlineCacheBuffer: public AllStatic { static bool contains(address instruction_address); // removes the ICStubs after backpatching + static bool needs_update_inline_caches(); static void update_inline_caches(); static void refill_ic_stubs(); diff --git a/src/hotspot/share/code/nmethod.cpp b/src/hotspot/share/code/nmethod.cpp index 7e707b69b52f8..ded61f989487f 100644 --- a/src/hotspot/share/code/nmethod.cpp +++ b/src/hotspot/share/code/nmethod.cpp @@ -550,9 +550,7 @@ nmethod* nmethod::new_nmethod(const methodHandle& method, #if INCLUDE_JVMCI , char* speculations, int speculations_len, - int nmethod_mirror_index, - const char* nmethod_mirror_name, - FailedSpeculation** failed_speculations + JVMCINMethodData* jvmci_data #endif ) { @@ -561,7 +559,7 @@ nmethod* nmethod::new_nmethod(const methodHandle& method, // create nmethod nmethod* nm = nullptr; #if INCLUDE_JVMCI - int jvmci_data_size = !compiler->is_jvmci() ? 0 : JVMCINMethodData::compute_size(nmethod_mirror_name); + int jvmci_data_size = compiler->is_jvmci() ? jvmci_data->size() : 0; #endif int nmethod_size = CodeBlob::allocation_size(code_buffer, sizeof(nmethod)) @@ -588,17 +586,11 @@ nmethod* nmethod::new_nmethod(const methodHandle& method, #if INCLUDE_JVMCI , speculations, speculations_len, - jvmci_data_size + jvmci_data #endif ); if (nm != nullptr) { -#if INCLUDE_JVMCI - if (compiler->is_jvmci()) { - // Initialize the JVMCINMethodData object inlined into nm - nm->jvmci_nmethod_data()->initialize(nmethod_mirror_index, nmethod_mirror_name, failed_speculations); - } -#endif // To make dependency checking during class loading fast, record // the nmethod dependencies in the classes it is dependent on. // This allows the dependency checking code to simply walk the @@ -786,7 +778,7 @@ nmethod::nmethod( #if INCLUDE_JVMCI , char* speculations, int speculations_len, - int jvmci_data_size + JVMCINMethodData* jvmci_data #endif ) : CompiledMethod(method, "nmethod", type, nmethod_size, sizeof(nmethod), code_buffer, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps, false, true), @@ -866,6 +858,7 @@ nmethod::nmethod( #if INCLUDE_JVMCI _speculations_offset = _nul_chk_table_offset + align_up(nul_chk_table->size_in_bytes(), oopSize); _jvmci_data_offset = _speculations_offset + align_up(speculations_len, oopSize); + int jvmci_data_size = compiler->is_jvmci() ? jvmci_data->size() : 0; _nmethod_end_offset = _jvmci_data_offset + align_up(jvmci_data_size, oopSize); #else _nmethod_end_offset = _nul_chk_table_offset + align_up(nul_chk_table->size_in_bytes(), oopSize); @@ -885,6 +878,13 @@ nmethod::nmethod( dependencies->copy_to(this); clear_unloading_state(); +#if INCLUDE_JVMCI + if (compiler->is_jvmci()) { + // Initialize the JVMCINMethodData object inlined into nm + jvmci_nmethod_data()->copy(jvmci_data); + } +#endif + Universe::heap()->register_nmethod(this); debug_only(Universe::heap()->verify_nmethod(this)); diff --git a/src/hotspot/share/code/nmethod.hpp b/src/hotspot/share/code/nmethod.hpp index 669baff1b5195..2b4fc56ae673d 100644 --- a/src/hotspot/share/code/nmethod.hpp +++ b/src/hotspot/share/code/nmethod.hpp @@ -292,9 +292,9 @@ class nmethod : public CompiledMethod { AbstractCompiler* compiler, CompLevel comp_level #if INCLUDE_JVMCI - , char* speculations, - int speculations_len, - int jvmci_data_size + , char* speculations = nullptr, + int speculations_len = 0, + JVMCINMethodData* jvmci_data = nullptr #endif ); @@ -345,9 +345,7 @@ class nmethod : public CompiledMethod { #if INCLUDE_JVMCI , char* speculations = nullptr, int speculations_len = 0, - int nmethod_mirror_index = -1, - const char* nmethod_mirror_name = nullptr, - FailedSpeculation** failed_speculations = nullptr + JVMCINMethodData* jvmci_data = nullptr #endif ); diff --git a/src/hotspot/share/code/relocInfo.hpp b/src/hotspot/share/code/relocInfo.hpp index ef8027760d7fa..2972f30ce5603 100644 --- a/src/hotspot/share/code/relocInfo.hpp +++ b/src/hotspot/share/code/relocInfo.hpp @@ -275,6 +275,7 @@ class relocInfo { data_prefix_tag = 15, // tag for a prefix (carries data arguments) post_call_nop_type = 16, // A tag for post call nop relocations entry_guard_type = 17, // A tag for an nmethod entry barrier guard value + barrier_type = 18, // GC barrier data type_mask = 31 // A mask which selects only the above values }; @@ -316,6 +317,7 @@ class relocInfo { visitor(trampoline_stub) \ visitor(post_call_nop) \ visitor(entry_guard) \ + visitor(barrier) \ public: @@ -829,7 +831,6 @@ class Relocation { protected: short* data() const { return binding()->data(); } int datalen() const { return binding()->datalen(); } - int format() const { return binding()->format(); } public: // Make a filler relocation. @@ -841,6 +842,8 @@ class Relocation { // trivial, so this must not be virtual (and hence non-trivial). ~Relocation() = default; + int format() const { return binding()->format(); } + relocInfo::relocType type() const { return _rtype; } // Copy this relocation into holder. @@ -1078,6 +1081,26 @@ class metadata_Relocation : public DataRelocation { }; +class barrier_Relocation : public Relocation { + + public: + // The uninitialized value used before the relocation has been patched. + // Code assumes that the unpatched value is zero. + static const int16_t unpatched = 0; + + static RelocationHolder spec() { + return RelocationHolder::construct(); + } + + void copy_into(RelocationHolder& holder) const override; + + private: + friend class RelocIterator; + friend class RelocationHolder; + barrier_Relocation() : Relocation(relocInfo::barrier_type) { } +}; + + class virtual_call_Relocation : public CallRelocation { public: diff --git a/src/hotspot/share/compiler/compileBroker.cpp b/src/hotspot/share/compiler/compileBroker.cpp index 0671edc739059..9c9483353a429 100644 --- a/src/hotspot/share/compiler/compileBroker.cpp +++ b/src/hotspot/share/compiler/compileBroker.cpp @@ -1011,7 +1011,7 @@ void CompileBroker::init_compiler_threads() { void CompileBroker::possibly_add_compiler_threads(JavaThread* THREAD) { - julong available_memory = os::available_memory(); + julong free_memory = os::free_memory(); // If SegmentedCodeCache is off, both values refer to the single heap (with type CodeBlobType::All). size_t available_cc_np = CodeCache::unallocated_capacity(CodeBlobType::MethodNonProfiled), available_cc_p = CodeCache::unallocated_capacity(CodeBlobType::MethodProfiled); @@ -1023,7 +1023,7 @@ void CompileBroker::possibly_add_compiler_threads(JavaThread* THREAD) { int old_c2_count = _compilers[1]->num_compiler_threads(); int new_c2_count = MIN4(_c2_count, _c2_compile_queue->size() / 2, - (int)(available_memory / (200*M)), + (int)(free_memory / (200*M)), (int)(available_cc_np / (128*K))); for (int i = old_c2_count; i < new_c2_count; i++) { @@ -1070,8 +1070,8 @@ void CompileBroker::possibly_add_compiler_threads(JavaThread* THREAD) { ThreadsListHandle tlh; // name() depends on the TLH. assert(tlh.includes(ct), "ct=" INTPTR_FORMAT " exited unexpectedly.", p2i(ct)); stringStream msg; - msg.print("Added compiler thread %s (available memory: %dMB, available non-profiled code cache: %dMB)", - ct->name(), (int)(available_memory/M), (int)(available_cc_np/M)); + msg.print("Added compiler thread %s (free memory: %dMB, available non-profiled code cache: %dMB)", + ct->name(), (int)(free_memory/M), (int)(available_cc_np/M)); print_compiler_threads(msg); } } @@ -1081,7 +1081,7 @@ void CompileBroker::possibly_add_compiler_threads(JavaThread* THREAD) { int old_c1_count = _compilers[0]->num_compiler_threads(); int new_c1_count = MIN4(_c1_count, _c1_compile_queue->size() / 4, - (int)(available_memory / (100*M)), + (int)(free_memory / (100*M)), (int)(available_cc_p / (128*K))); for (int i = old_c1_count; i < new_c1_count; i++) { @@ -1093,8 +1093,8 @@ void CompileBroker::possibly_add_compiler_threads(JavaThread* THREAD) { ThreadsListHandle tlh; // name() depends on the TLH. assert(tlh.includes(ct), "ct=" INTPTR_FORMAT " exited unexpectedly.", p2i(ct)); stringStream msg; - msg.print("Added compiler thread %s (available memory: %dMB, available profiled code cache: %dMB)", - ct->name(), (int)(available_memory/M), (int)(available_cc_p/M)); + msg.print("Added compiler thread %s (free memory: %dMB, available profiled code cache: %dMB)", + ct->name(), (int)(free_memory/M), (int)(available_cc_p/M)); print_compiler_threads(msg); } } diff --git a/src/hotspot/share/gc/g1/g1BatchedTask.cpp b/src/hotspot/share/gc/g1/g1BatchedTask.cpp index 9ab47e9c4c9d0..804ec06688054 100644 --- a/src/hotspot/share/gc/g1/g1BatchedTask.cpp +++ b/src/hotspot/share/gc/g1/g1BatchedTask.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -41,7 +41,7 @@ const char* G1AbstractSubTask::name() const { } bool G1BatchedTask::try_claim_serial_task(int& task) { - task = Atomic::fetch_and_add(&_num_serial_tasks_done, 1); + task = Atomic::fetch_then_add(&_num_serial_tasks_done, 1); return task < _serial_tasks.length(); } diff --git a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp index f4b4938769b48..ac74da114edee 100644 --- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp +++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp @@ -2624,8 +2624,8 @@ void G1CollectedHeap::set_humongous_stats(uint num_humongous_total, uint num_hum } bool G1CollectedHeap::should_sample_collection_set_candidates() const { - G1CollectionSetCandidates* candidates = G1CollectedHeap::heap()->collection_set()->candidates(); - return candidates != nullptr && candidates->num_remaining() > 0; + const G1CollectionSetCandidates* candidates = collection_set()->candidates(); + return !candidates->is_empty(); } void G1CollectedHeap::set_collection_set_candidates_stats(G1MonotonicArenaMemoryStats& stats) { diff --git a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp index 73f31d3f969b3..e1b95bf616df9 100644 --- a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp +++ b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp @@ -918,6 +918,8 @@ class G1CollectedHeap : public CollectedHeap { const G1CollectionSet* collection_set() const { return &_collection_set; } G1CollectionSet* collection_set() { return &_collection_set; } + inline bool is_collection_set_candidate(const HeapRegion* r) const; + SoftRefPolicy* soft_ref_policy() override; void initialize_serviceability() override; diff --git a/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp b/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp index cf5b05b18f49d..bde7d1fff8da9 100644 --- a/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp +++ b/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp @@ -55,7 +55,7 @@ inline JavaThread* const* G1JavaThreadsListClaimer::claim(uint& count) { if (Atomic::load(&_cur_claim) >= _list.length()) { return nullptr; } - uint claim = Atomic::fetch_and_add(&_cur_claim, _claim_step); + uint claim = Atomic::fetch_then_add(&_cur_claim, _claim_step); if (claim >= _list.length()) { return nullptr; } @@ -282,4 +282,9 @@ inline void G1CollectedHeap::set_humongous_is_live(oop obj) { } } +inline bool G1CollectedHeap::is_collection_set_candidate(const HeapRegion* r) const { + const G1CollectionSetCandidates* candidates = collection_set()->candidates(); + return candidates->contains(r); +} + #endif // SHARE_GC_G1_G1COLLECTEDHEAP_INLINE_HPP diff --git a/src/hotspot/share/gc/g1/g1CollectionSet.cpp b/src/hotspot/share/gc/g1/g1CollectionSet.cpp index 180236d38b0f4..337cf8d68b9a7 100644 --- a/src/hotspot/share/gc/g1/g1CollectionSet.cpp +++ b/src/hotspot/share/gc/g1/g1CollectionSet.cpp @@ -50,22 +50,21 @@ G1GCPhaseTimes* G1CollectionSet::phase_times() { G1CollectionSet::G1CollectionSet(G1CollectedHeap* g1h, G1Policy* policy) : _g1h(g1h), _policy(policy), - _candidates(nullptr), - _eden_region_length(0), - _survivor_region_length(0), - _old_region_length(0), + _candidates(), _collection_set_regions(nullptr), _collection_set_cur_length(0), _collection_set_max_length(0), - _num_optional_regions(0), + _eden_region_length(0), + _survivor_region_length(0), + _initial_old_region_length(0), + _optional_old_regions(), _inc_build_state(Inactive), _inc_part_start(0) { } G1CollectionSet::~G1CollectionSet() { FREE_C_HEAP_ARRAY(uint, _collection_set_regions); - free_optional_regions(); - clear_candidates(); + abandon_all_candidates(); } void G1CollectionSet::init_region_lengths(uint eden_cset_region_length, @@ -75,29 +74,27 @@ void G1CollectionSet::init_region_lengths(uint eden_cset_region_length, _eden_region_length = eden_cset_region_length; _survivor_region_length = survivor_cset_region_length; - assert((size_t) young_region_length() == _collection_set_cur_length, + assert((size_t)young_region_length() == _collection_set_cur_length, "Young region length %u should match collection set length %u", young_region_length(), _collection_set_cur_length); - _old_region_length = 0; - free_optional_regions(); + _initial_old_region_length = 0; + _optional_old_regions.clear(); } void G1CollectionSet::initialize(uint max_region_length) { guarantee(_collection_set_regions == nullptr, "Must only initialize once."); _collection_set_max_length = max_region_length; _collection_set_regions = NEW_C_HEAP_ARRAY(uint, max_region_length, mtGC); -} -void G1CollectionSet::free_optional_regions() { - _num_optional_regions = 0; + _candidates.initialize(max_region_length); } -void G1CollectionSet::clear_candidates() { - delete _candidates; - _candidates = nullptr; +void G1CollectionSet::abandon_all_candidates() { + _candidates.clear(); + _initial_old_region_length = 0; + _optional_old_regions.clear(); } -// Add the heap region at the head of the non-incremental collection set void G1CollectionSet::add_old_region(HeapRegion* hr) { assert_at_safepoint_on_vm_thread(); @@ -110,21 +107,11 @@ void G1CollectionSet::add_old_region(HeapRegion* hr) { assert(_collection_set_cur_length < _collection_set_max_length, "Collection set now larger than maximum size."); _collection_set_regions[_collection_set_cur_length++] = hr->hrm_index(); - - _old_region_length++; + _initial_old_region_length++; _g1h->old_set_remove(hr); } -void G1CollectionSet::add_optional_region(HeapRegion* hr) { - assert(hr->is_old(), "the region should be old"); - assert(!hr->in_collection_set(), "should not already be in the CSet"); - - _g1h->register_optional_region_with_region_attr(hr); - - hr->set_index_in_opt_cset(_num_optional_regions++); -} - void G1CollectionSet::start_incremental_building() { assert(_collection_set_cur_length == 0, "Collection set must be empty before starting a new collection set."); assert(_inc_build_state == Inactive, "Precondition"); @@ -165,8 +152,7 @@ void G1CollectionSet::par_iterate(HeapRegionClosure* cl, void G1CollectionSet::iterate_optional(HeapRegionClosure* cl) const { assert_at_safepoint(); - for (uint i = 0; i < _num_optional_regions; i++) { - HeapRegion* r = _candidates->at(i); + for (HeapRegion* r : _optional_old_regions) { bool result = cl->do_heap_region(r); guarantee(!result, "Must not cancel iteration"); } @@ -337,24 +323,22 @@ void G1CollectionSet::finalize_old_part(double time_remaining_ms) { if (collector_state()->in_mixed_phase()) { candidates()->verify(); - uint num_initial_old_regions; - uint num_optional_old_regions; + G1CollectionCandidateRegionList initial_old_regions; + assert(_optional_old_regions.length() == 0, "must be"); - _policy->calculate_old_collection_set_regions(candidates(), - time_remaining_ms, - num_initial_old_regions, - num_optional_old_regions); + _policy->select_candidates_from_marking(&candidates()->marking_regions(), + time_remaining_ms, + &initial_old_regions, + &_optional_old_regions); - // Prepare initial old regions. - move_candidates_to_collection_set(num_initial_old_regions); - - // Prepare optional old regions for evacuation. - uint candidate_idx = candidates()->cur_idx(); - for (uint i = 0; i < num_optional_old_regions; i++) { - add_optional_region(candidates()->at(candidate_idx + i)); - } + // Move initially selected old regions to collection set directly. + move_candidates_to_collection_set(&initial_old_regions); + // Only prepare selected optional regions for now. + prepare_optional_regions(&_optional_old_regions); candidates()->verify(); + } else { + log_debug(gc, ergo, cset)("No candidates to reclaim."); } stop_incremental_building(); @@ -365,21 +349,24 @@ void G1CollectionSet::finalize_old_part(double time_remaining_ms) { QuickSort::sort(_collection_set_regions, _collection_set_cur_length, compare_region_idx, true); } -void G1CollectionSet::move_candidates_to_collection_set(uint num_old_candidate_regions) { - if (num_old_candidate_regions == 0) { - return; - } - uint candidate_idx = candidates()->cur_idx(); - for (uint i = 0; i < num_old_candidate_regions; i++) { - HeapRegion* r = candidates()->at(candidate_idx + i); - // This potentially optional candidate region is going to be an actual collection - // set region. Clear cset marker. +void G1CollectionSet::move_candidates_to_collection_set(G1CollectionCandidateRegionList* regions) { + for (HeapRegion* r : *regions) { _g1h->clear_region_attr(r); add_old_region(r); } - candidates()->remove(num_old_candidate_regions); + candidates()->remove(regions); +} + +void G1CollectionSet::prepare_optional_regions(G1CollectionCandidateRegionList* regions){ + uint cur_index = 0; + for (HeapRegion* r : *regions) { + assert(r->is_old(), "the region should be old"); + assert(!r->in_collection_set(), "should not already be in the CSet"); - candidates()->verify(); + _g1h->register_optional_region_with_region_attr(r); + + r->set_index_in_opt_cset(cur_index++); + } } void G1CollectionSet::finalize_initial_collection_set(double target_pause_time_ms, G1SurvivorRegions* survivor) { @@ -390,26 +377,24 @@ void G1CollectionSet::finalize_initial_collection_set(double target_pause_time_m bool G1CollectionSet::finalize_optional_for_evacuation(double remaining_pause_time) { update_incremental_marker(); - uint num_selected_regions; - _policy->calculate_optional_collection_set_regions(candidates(), - _num_optional_regions, + G1CollectionCandidateRegionList selected_regions; + _policy->calculate_optional_collection_set_regions(&_optional_old_regions, remaining_pause_time, - num_selected_regions); + &selected_regions); - move_candidates_to_collection_set(num_selected_regions); + move_candidates_to_collection_set(&selected_regions); - _num_optional_regions -= num_selected_regions; + _optional_old_regions.remove_prefix(&selected_regions); stop_incremental_building(); _g1h->verify_region_attr_remset_is_tracked(); - return num_selected_regions > 0; + return selected_regions.length() > 0; } void G1CollectionSet::abandon_optional_collection_set(G1ParScanThreadStateSet* pss) { - for (uint i = 0; i < _num_optional_regions; i++) { - HeapRegion* r = candidates()->at(candidates()->cur_idx() + i); + for (HeapRegion* r : _optional_old_regions) { pss->record_unused_optional_region(r); // Clear collection set marker and make sure that the remembered set information // is correct as we still need it later. @@ -417,7 +402,7 @@ void G1CollectionSet::abandon_optional_collection_set(G1ParScanThreadStateSet* p _g1h->register_region_with_region_attr(r); r->clear_index_in_opt_cset(); } - free_optional_regions(); + _optional_old_regions.clear(); _g1h->verify_region_attr_remset_is_tracked(); } diff --git a/src/hotspot/share/gc/g1/g1CollectionSet.hpp b/src/hotspot/share/gc/g1/g1CollectionSet.hpp index 6ff6d3dcf46f1..133246a52081d 100644 --- a/src/hotspot/share/gc/g1/g1CollectionSet.hpp +++ b/src/hotspot/share/gc/g1/g1CollectionSet.hpp @@ -25,11 +25,11 @@ #ifndef SHARE_GC_G1_G1COLLECTIONSET_HPP #define SHARE_GC_G1_G1COLLECTIONSET_HPP +#include "gc/g1/g1CollectionSetCandidates.hpp" #include "utilities/debug.hpp" #include "utilities/globalDefinitions.hpp" class G1CollectedHeap; -class G1CollectionSetCandidates; class G1CollectorState; class G1GCPhaseTimes; class G1ParScanThreadStateSet; @@ -133,12 +133,8 @@ class G1CollectionSet { G1CollectedHeap* _g1h; G1Policy* _policy; - // All old gen collection set candidate regions for the current mixed phase. - G1CollectionSetCandidates* _candidates; - - uint _eden_region_length; - uint _survivor_region_length; - uint _old_region_length; + // All old gen collection set candidate regions. + G1CollectionSetCandidates _candidates; // The actual collection set as a set of region indices. // All entries in _collection_set_regions below _collection_set_cur_length are @@ -150,11 +146,13 @@ class G1CollectionSet { volatile uint _collection_set_cur_length; uint _collection_set_max_length; + uint _eden_region_length; + uint _survivor_region_length; + uint _initial_old_region_length; + // When doing mixed collections we can add old regions to the collection set, which - // will be collected only if there is enough time. We call these optional regions. - // This member records the current number of regions that are of that type that - // correspond to the first x entries in the collection set candidates. - uint _num_optional_regions; + // will be collected only if there is enough time. We call these optional (old) regions. + G1CollectionCandidateRegionList _optional_old_regions; enum CSetBuildType { Active, // We are actively building the collection set @@ -172,14 +170,13 @@ class G1CollectionSet { // Update the incremental collection set information when adding a region. void add_young_region_common(HeapRegion* hr); - // Add old region "hr" to the collection set. + // Add the given old region to the head of the current collection set. void add_old_region(HeapRegion* hr); - void free_optional_regions(); - // Add old region "hr" to optional collection set. - void add_optional_region(HeapRegion* hr); - - void move_candidates_to_collection_set(uint num_regions); + void move_candidates_to_collection_set(G1CollectionCandidateRegionList* regions); + // Prepares old regions in the given set for optional collection later. Does not + // add the region to collection set yet. + void prepare_optional_regions(G1CollectionCandidateRegionList* regions); // Finalize the young part of the initial collection set. Relabel survivor regions // as Eden and calculate a prediction on how long the evacuation of all young regions @@ -208,26 +205,25 @@ class G1CollectionSet { // Initializes the collection set giving the maximum possible length of the collection set. void initialize(uint max_region_length); - void clear_candidates(); + void abandon_all_candidates(); - void set_candidates(G1CollectionSetCandidates* candidates) { - assert(_candidates == nullptr, "Trying to replace collection set candidates."); - _candidates = candidates; - } - G1CollectionSetCandidates* candidates() { return _candidates; } + G1CollectionSetCandidates* candidates() { return &_candidates; } + const G1CollectionSetCandidates* candidates() const { return &_candidates; } void init_region_lengths(uint eden_cset_region_length, uint survivor_cset_region_length); uint region_length() const { return young_region_length() + - old_region_length(); } + initial_old_region_length(); } uint young_region_length() const { return eden_region_length() + survivor_region_length(); } - uint eden_region_length() const { return _eden_region_length; } + uint eden_region_length() const { return _eden_region_length; } uint survivor_region_length() const { return _survivor_region_length; } - uint old_region_length() const { return _old_region_length; } - uint optional_region_length() const { return _num_optional_regions; } + uint initial_old_region_length() const { return _initial_old_region_length; } + uint optional_region_length() const { return _optional_old_regions.length(); } + + bool only_contains_young_regions() const { return (initial_old_region_length() + optional_region_length()) == 0; } // Reset the contents of the collection set. void clear(); diff --git a/src/hotspot/share/gc/g1/g1CollectionSetCandidates.cpp b/src/hotspot/share/gc/g1/g1CollectionSetCandidates.cpp index ac3cde49030bc..4842283f9ac1a 100644 --- a/src/hotspot/share/gc/g1/g1CollectionSetCandidates.cpp +++ b/src/hotspot/share/gc/g1/g1CollectionSetCandidates.cpp @@ -26,78 +26,245 @@ #include "gc/g1/g1CollectionSetCandidates.hpp" #include "gc/g1/g1CollectionSetChooser.hpp" #include "gc/g1/heapRegion.inline.hpp" +#include "utilities/bitMap.inline.hpp" +#include "utilities/growableArray.hpp" -void G1CollectionSetCandidates::remove(uint num_regions) { - assert(num_regions <= num_remaining(), "Trying to remove more regions (%u) than available (%u)", num_regions, num_remaining()); - for (uint i = 0; i < num_regions; i++) { - _remaining_reclaimable_bytes -= at(_front_idx)->reclaimable_bytes(); - _front_idx++; +G1CollectionCandidateList::G1CollectionCandidateList() : _candidates(2, mtGC) { } + +void G1CollectionCandidateList::set(G1CollectionCandidateList::CandidateInfo* candidate_infos, uint num_infos) { + assert(_candidates.is_empty(), "must be"); + + GrowableArrayFromArray a(candidate_infos, (int)num_infos); + _candidates.appendAll(&a); +} + +void G1CollectionCandidateList::remove(G1CollectionCandidateRegionList* other) { + guarantee((uint)_candidates.length() >= other->length(), "must be"); + + if (other->length() == 0) { + // Nothing to remove or nothing in the original set. + return; + } + + // Create a list from scratch, copying over the elements from the candidate + // list not in the other list. Finally deallocate and overwrite the old list. + int new_length = _candidates.length() - other->length(); + GrowableArray new_list(new_length, mtGC); + + uint other_idx = 0; + + for (uint candidate_idx = 0; candidate_idx < (uint)_candidates.length(); candidate_idx++) { + if ((other_idx == other->length()) || _candidates.at(candidate_idx)._r != other->at(other_idx)) { + new_list.append(_candidates.at(candidate_idx)); + } else { + other_idx++; + } } + _candidates.swap(&new_list); + + verify(); + assert(_candidates.length() == new_length, "must be %u %u", _candidates.length(), new_length); } -void G1CollectionSetCandidates::remove_from_end(uint num_remove, size_t wasted) { - assert(num_remove <= num_remaining(), "trying to remove more regions than remaining"); +void G1CollectionCandidateList::clear() { + _candidates.clear(); +} -#ifdef ASSERT - size_t reclaimable = 0; +#ifndef PRODUCT +void G1CollectionCandidateList::verify() { + CandidateInfo* prev = nullptr; - for (uint i = 0; i < num_remove; i++) { - uint cur_idx = _num_regions - i - 1; - reclaimable += at(cur_idx)->reclaimable_bytes(); - // Make sure we crash if we access it. - _regions[cur_idx] = nullptr; + for (uint i = 0; i < (uint)_candidates.length(); i++) { + CandidateInfo& ci = _candidates.at(i); + assert(prev == nullptr || prev->_gc_efficiency >= ci._gc_efficiency, + "Stored gc efficiency must be descending from region %u to %u", + prev->_r->hrm_index(), ci._r->hrm_index()); + prev = &ci; + assert(ci._r->rem_set()->is_tracked(), "remset for region %u must be tracked", ci._r->hrm_index()); + } +} +#endif + +int G1CollectionCandidateList::compare(CandidateInfo* ci1, CandidateInfo* ci2) { + // Make sure that null entries are moved to the end. + if (ci1->_r == nullptr) { + if (ci2->_r == nullptr) { + return 0; + } else { + return 1; + } + } else if (ci2->_r == nullptr) { + return -1; } - assert(reclaimable == wasted, "Recalculated reclaimable inconsistent"); + double gc_eff1 = ci1->_gc_efficiency; + double gc_eff2 = ci2->_gc_efficiency; + + if (gc_eff1 > gc_eff2) { + return -1; + } if (gc_eff1 < gc_eff2) { + return 1; + } else { + return 0; + } +} + +G1CollectionCandidateRegionList::G1CollectionCandidateRegionList() : _regions(2, mtGC) { } + +void G1CollectionCandidateRegionList::append(HeapRegion* r) { + assert(!_regions.contains(r), "must be"); + _regions.append(r); +} + +void G1CollectionCandidateRegionList::remove_prefix(G1CollectionCandidateRegionList* other) { +#ifdef ASSERT + // Check that the given list is a prefix of this list. + int i = 0; + for (HeapRegion* r : *other) { + assert(_regions.at(i) == r, "must be in order, but element %d is not", i); + i++; + } #endif - _num_regions -= num_remove; - _remaining_reclaimable_bytes -= wasted; + + if (other->length() == 0) { + return; + } + _regions.remove_till(other->length()); } -void G1CollectionSetCandidates::iterate(HeapRegionClosure* cl) { - for (uint i = _front_idx; i < _num_regions; i++) { - HeapRegion* r = _regions[i]; - if (cl->do_heap_region(r)) { - cl->set_incomplete(); - break; - } +HeapRegion* G1CollectionCandidateRegionList::at(uint index) { + return _regions.at(index); +} + +void G1CollectionCandidateRegionList::clear() { + _regions.clear(); +} + +G1CollectionSetCandidates::G1CollectionSetCandidates() : + _marking_regions(), + _contains_map(nullptr), + _max_regions(0), + _last_marking_candidates_length(0) +{ } + +G1CollectionSetCandidates::~G1CollectionSetCandidates() { + FREE_C_HEAP_ARRAY(CandidateOrigin, _contains_map); +} + +bool G1CollectionSetCandidates::is_from_marking(HeapRegion* r) const { + assert(contains(r), "must be"); + return _contains_map[r->hrm_index()] == CandidateOrigin::Marking; +} + +void G1CollectionSetCandidates::initialize(uint max_regions) { + assert(_contains_map == nullptr, "already initialized"); + _max_regions = max_regions; + _contains_map = NEW_C_HEAP_ARRAY(CandidateOrigin, max_regions, mtGC); + clear(); +} + +void G1CollectionSetCandidates::clear() { + _marking_regions.clear(); + for (uint i = 0; i < _max_regions; i++) { + _contains_map[i] = CandidateOrigin::Invalid; } + _last_marking_candidates_length = 0; } -void G1CollectionSetCandidates::iterate_backwards(HeapRegionClosure* cl) { - for (uint i = _num_regions; i > _front_idx; i--) { - HeapRegion* r = _regions[i - 1]; - if (cl->do_heap_region(r)) { - cl->set_incomplete(); - break; - } +void G1CollectionSetCandidates::set_candidates_from_marking(G1CollectionCandidateList::CandidateInfo* candidate_infos, + uint num_infos) { + assert(_marking_regions.length() == 0, "must be empty before adding new ones"); + + verify(); + + _marking_regions.set(candidate_infos, num_infos); + for (uint i = 0; i < num_infos; i++) { + HeapRegion* r = candidate_infos[i]._r; + assert(!contains(r), "must not contain region %u", r->hrm_index()); + _contains_map[r->hrm_index()] = CandidateOrigin::Marking; + } + _last_marking_candidates_length = num_infos; + + verify(); +} + +void G1CollectionSetCandidates::remove(G1CollectionCandidateRegionList* other) { + _marking_regions.remove(other); + + for (HeapRegion* r : *other) { + assert(contains(r), "must contain region %u", r->hrm_index()); + _contains_map[r->hrm_index()] = CandidateOrigin::Invalid; } + + verify(); +} + +bool G1CollectionSetCandidates::is_empty() const { + return length() == 0; +} + +bool G1CollectionSetCandidates::has_more_marking_candidates() const { + return _marking_regions.length() != 0; } #ifndef PRODUCT -void G1CollectionSetCandidates::verify() const { - guarantee(_front_idx <= _num_regions, "Index: %u Num_regions: %u", _front_idx, _num_regions); - uint idx = _front_idx; - size_t sum_of_reclaimable_bytes = 0; - HeapRegion *prev = nullptr; - for (; idx < _num_regions; idx++) { - HeapRegion *cur = _regions[idx]; - guarantee(cur != nullptr, "Regions after _front_idx %u cannot be NULL but %u is", _front_idx, idx); - // Currently the decision whether young gc moves region contents is determined - // at region allocation time. It is not possible that a region becomes non-movable - // at a later point, which means below condition always holds true. - guarantee(G1CollectionSetChooser::should_add(cur), - "Region %u should be eligible for addition.", cur->hrm_index()); - if (prev != nullptr) { - guarantee(prev->gc_efficiency() >= cur->gc_efficiency(), - "GC efficiency for region %u: %1.4f smaller than for region %u: %1.4f", - prev->hrm_index(), prev->gc_efficiency(), cur->hrm_index(), cur->gc_efficiency()); +void G1CollectionSetCandidates::verify_helper(G1CollectionCandidateList* list, uint& from_marking, CandidateOrigin* verify_map) { + list->verify(); + + for (uint i = 0; i < (uint)list->length(); i++) { + HeapRegion* r = list->at(i)._r; + + if (is_from_marking(r)) { + from_marking++; } - sum_of_reclaimable_bytes += cur->reclaimable_bytes(); - prev = cur; + const uint hrm_index = r->hrm_index(); + assert(_contains_map[hrm_index] == CandidateOrigin::Marking, + "must be %u is %u", hrm_index, (uint)_contains_map[hrm_index]); + assert(verify_map[hrm_index] == CandidateOrigin::Invalid, "already added"); + + verify_map[hrm_index] = CandidateOrigin::Verify; + } +} + +void G1CollectionSetCandidates::verify() { + uint from_marking = 0; + + CandidateOrigin* verify_map = NEW_C_HEAP_ARRAY(CandidateOrigin, _max_regions, mtGC); + for (uint i = 0; i < _max_regions; i++) { + verify_map[i] = CandidateOrigin::Invalid; + } + + verify_helper(&_marking_regions, from_marking, verify_map); + + assert(from_marking == marking_regions_length(), "must be"); + + // Check whether the _contains_map is consistent with the list. + for (uint i = 0; i < _max_regions; i++) { + assert(_contains_map[i] == verify_map[i] || + (_contains_map[i] != CandidateOrigin::Invalid && verify_map[i] == CandidateOrigin::Verify), + "Candidate origin does not match for region %u, is %u but should be %u", + i, + static_cast::type>(_contains_map[i]), + static_cast::type>(verify_map[i])); } - guarantee(sum_of_reclaimable_bytes == _remaining_reclaimable_bytes, - "Inconsistent remaining_reclaimable bytes, remaining " SIZE_FORMAT " calculated " SIZE_FORMAT, - _remaining_reclaimable_bytes, sum_of_reclaimable_bytes); + + FREE_C_HEAP_ARRAY(CandidateOrigin, verify_map); +} +#endif + +bool G1CollectionSetCandidates::contains(const HeapRegion* r) const { + const uint index = r->hrm_index(); + assert(index < _max_regions, "must be"); + return _contains_map[index] != CandidateOrigin::Invalid; +} + +const char* G1CollectionSetCandidates::get_short_type_str(const HeapRegion* r) const { + static const char* type_strings[] = { + "Ci", // Invalid + "Cm", // Marking + "Cv" // Verification + }; + + uint8_t kind = static_cast::type>(_contains_map[r->hrm_index()]); + return type_strings[kind]; } -#endif // !PRODUCT diff --git a/src/hotspot/share/gc/g1/g1CollectionSetCandidates.hpp b/src/hotspot/share/gc/g1/g1CollectionSetCandidates.hpp index 472917de27140..80f4d0d670cdb 100644 --- a/src/hotspot/share/gc/g1/g1CollectionSetCandidates.hpp +++ b/src/hotspot/share/gc/g1/g1CollectionSetCandidates.hpp @@ -29,72 +29,200 @@ #include "gc/shared/workerThread.hpp" #include "memory/allocation.hpp" #include "runtime/globals.hpp" +#include "utilities/bitMap.hpp" +#include "utilities/growableArray.hpp" +class G1CollectionCandidateList; +class G1CollectionSetCandidates; class HeapRegion; class HeapRegionClosure; -// Set of collection set candidates, i.e. all old gen regions we consider worth -// collecting in the remainder of the current mixed phase. Regions are sorted by decreasing -// gc efficiency. -// Maintains a cursor into the list that specifies the next collection set candidate -// to put into the current collection set. -class G1CollectionSetCandidates : public CHeapObj { - HeapRegion** _regions; - uint _num_regions; // Total number of regions in the collection set candidate set. +using G1CollectionCandidateRegionListIterator = GrowableArrayIterator; - // The sum of bytes that can be reclaimed in the remaining set of collection - // set candidates. - size_t _remaining_reclaimable_bytes; - // The index of the next candidate old region to be considered for - // addition to the current collection set. - uint _front_idx; +// A set of HeapRegion*, a thin wrapper around GrowableArray. +class G1CollectionCandidateRegionList { + GrowableArray _regions; public: - G1CollectionSetCandidates(HeapRegion** regions, uint num_regions, size_t remaining_reclaimable_bytes) : - _regions(regions), - _num_regions(num_regions), - _remaining_reclaimable_bytes(remaining_reclaimable_bytes), - _front_idx(0) { } - - ~G1CollectionSetCandidates() { - FREE_C_HEAP_ARRAY(HeapRegion*, _regions); - } + G1CollectionCandidateRegionList(); + + // Append a HeapRegion to the end of this list. The region must not be in the list + // already. + void append(HeapRegion* r); + // Remove the given list of HeapRegion* from this list. The given list must be a prefix + // of this list. + void remove_prefix(G1CollectionCandidateRegionList* list); + + // Empty contents of the list. + void clear(); + + HeapRegion* at(uint index); + + uint length() const { return (uint)_regions.length(); } + + G1CollectionCandidateRegionListIterator begin() const { return _regions.begin(); } + G1CollectionCandidateRegionListIterator end() const { return _regions.end(); } +}; + +class G1CollectionCandidateListIterator : public StackObj { + G1CollectionCandidateList* _which; + uint _position; + +public: + G1CollectionCandidateListIterator(G1CollectionCandidateList* which, uint position); + + G1CollectionCandidateListIterator& operator++(); + HeapRegion* operator*(); + + bool operator==(const G1CollectionCandidateListIterator& rhs); + bool operator!=(const G1CollectionCandidateListIterator& rhs); +}; - // Returns the total number of collection set candidate old regions added. - uint num_regions() { return _num_regions; } +// List of collection set candidates (regions with their efficiency) ordered by +// decreasing gc efficiency. +class G1CollectionCandidateList : public CHeapObj { + friend class G1CollectionCandidateListIterator; - uint cur_idx() const { return _front_idx; } +public: + struct CandidateInfo { + HeapRegion* _r; + double _gc_efficiency; + + CandidateInfo() : CandidateInfo(nullptr, 0.0) { } + CandidateInfo(HeapRegion* r, double gc_efficiency) : _r(r), _gc_efficiency(gc_efficiency) { } + }; + +private: + GrowableArray _candidates; + +public: + G1CollectionCandidateList(); + + // Put the given set of candidates into this list, preserving the efficiency ordering. + void set(CandidateInfo* candidate_infos, uint num_infos); + // Removes any HeapRegions stored in this list also in the other list. The other + // list may only contain regions in this list, sorted by gc efficiency. It need + // not be a prefix of this list. Returns the number of regions removed. + // E.g. if this list is "A B G H", the other list may be "A G H", but not "F" (not in + // this list) or "A H G" (wrong order). + void remove(G1CollectionCandidateRegionList* other); + + void clear(); + + CandidateInfo& at(uint position) { return _candidates.at(position); } + + uint length() const { return (uint)_candidates.length(); } + + void verify() PRODUCT_RETURN; + + // Comparison function to order regions in decreasing GC efficiency order. This + // will cause regions with a lot of live objects and large remembered sets to end + // up at the end of the list. + static int compare(CandidateInfo* ci1, CandidateInfo* ci2); + + G1CollectionCandidateListIterator begin() { + return G1CollectionCandidateListIterator(this, 0); + } - HeapRegion* at(uint idx) const { - HeapRegion* res = nullptr; - if (idx < _num_regions) { - res = _regions[idx]; - assert(res != nullptr, "Unexpected null HeapRegion at index %u", idx); - } - return res; + G1CollectionCandidateListIterator end() { + return G1CollectionCandidateListIterator(this, length()); } +}; + +// Iterator for G1CollectionSetCandidates. +class G1CollectionSetCandidatesIterator : public StackObj { + G1CollectionSetCandidates* _which; + uint _marking_position; + +public: + G1CollectionSetCandidatesIterator(G1CollectionSetCandidates* which, uint marking_position); + + G1CollectionSetCandidatesIterator& operator++(); + HeapRegion* operator*(); + + bool operator==(const G1CollectionSetCandidatesIterator& rhs); + bool operator!=(const G1CollectionSetCandidatesIterator& rhs); +}; - // Remove num_regions from the front of the collection set candidate list. - void remove(uint num_regions); - // Remove num_remove regions from the back of the collection set candidate list. - void remove_from_end(uint num_remove, size_t wasted); +// Tracks all collection set candidates, i.e. regions that could/should be evacuated soon. +// +// These candidate regions are tracked in a list of regions, sorted by decreasing +// "gc efficiency". +// +// Currently there is only one type of such regions: +// +// * marking_regions: the set of regions selected by concurrent marking to be +// evacuated to keep overall heap occupancy stable. +// They are guaranteed to be evacuated and cleared out during +// the mixed phase. +// +class G1CollectionSetCandidates : public CHeapObj { + friend class G1CollectionSetCandidatesIterator; + + enum class CandidateOrigin : uint8_t { + Invalid, + Marking, // This region has been determined as candidate by concurrent marking. + Verify // Special value for verification. + }; + + G1CollectionCandidateList _marking_regions; + + CandidateOrigin* _contains_map; + uint _max_regions; - // Iterate over all remaining collection set candidate regions. - void iterate(HeapRegionClosure* cl); - // Iterate over all remaining collection set candidate regions from the end - // to the beginning of the set. - void iterate_backwards(HeapRegionClosure* cl); + // The number of regions from the last merge of candidates from the marking. + uint _last_marking_candidates_length; + + bool is_from_marking(HeapRegion* r) const; + +public: + G1CollectionSetCandidates(); + ~G1CollectionSetCandidates(); - // Return the number of candidate regions remaining. - uint num_remaining() { return _num_regions - _front_idx; } + G1CollectionCandidateList& marking_regions() { return _marking_regions; } - bool is_empty() { return num_remaining() == 0; } + void initialize(uint max_regions); - // Return the amount of reclaimable bytes that may be collected by the remaining - // candidate regions. - size_t remaining_reclaimable_bytes() { return _remaining_reclaimable_bytes; } + void clear(); - void verify() const PRODUCT_RETURN; + // Merge collection set candidates from marking into the current marking list + // (which needs to be empty). + void set_candidates_from_marking(G1CollectionCandidateList::CandidateInfo* candidate_infos, + uint num_infos); + // The most recent length of the list that had been merged last via + // set_candidates_from_marking(). Used for calculating minimum collection set + // regions. + uint last_marking_candidates_length() const { return _last_marking_candidates_length; } + + // Remove the given regions from the candidates. All given regions must be part + // of the candidates. + void remove(G1CollectionCandidateRegionList* other); + + bool contains(const HeapRegion* r) const; + + const char* get_short_type_str(const HeapRegion* r) const; + + bool is_empty() const; + bool has_more_marking_candidates() const; + + uint marking_regions_length() const { return _marking_regions.length(); } + +private: + void verify_helper(G1CollectionCandidateList* list, uint& from_marking, CandidateOrigin* verify_map) PRODUCT_RETURN; + +public: + void verify() PRODUCT_RETURN; + + uint length() const { return marking_regions_length(); } + + // Iteration + G1CollectionSetCandidatesIterator begin() { + return G1CollectionSetCandidatesIterator(this, 0); + } + + G1CollectionSetCandidatesIterator end() { + return G1CollectionSetCandidatesIterator(this, marking_regions_length()); + } }; #endif /* SHARE_GC_G1_G1COLLECTIONSETCANDIDATES_HPP */ diff --git a/src/hotspot/share/gc/g1/g1CollectionSetCandidates.inline.hpp b/src/hotspot/share/gc/g1/g1CollectionSetCandidates.inline.hpp new file mode 100644 index 0000000000000..18daa3e59f8a3 --- /dev/null +++ b/src/hotspot/share/gc/g1/g1CollectionSetCandidates.inline.hpp @@ -0,0 +1,79 @@ +/* + * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_GC_G1_G1COLLECTIONSETCANDIDATES_INLINE_HPP +#define SHARE_GC_G1_G1COLLECTIONSETCANDIDATES_INLINE_HPP + +#include "gc/g1/g1CollectionSetCandidates.hpp" + +#include "utilities/growableArray.hpp" + +inline G1CollectionCandidateListIterator::G1CollectionCandidateListIterator(G1CollectionCandidateList* which, uint position) : + _which(which), _position(position) { } + +inline G1CollectionCandidateListIterator& G1CollectionCandidateListIterator::operator++() { + assert(_position < _which->length(), "must be"); + _position++; + return *this; +} + +inline HeapRegion* G1CollectionCandidateListIterator::operator*() { + return _which->_candidates.at(_position)._r; +} + +inline bool G1CollectionCandidateListIterator::operator==(const G1CollectionCandidateListIterator& rhs) { + assert(_which == rhs._which, "iterator belongs to different array"); + return _position == rhs._position; +} + +inline bool G1CollectionCandidateListIterator::operator!=(const G1CollectionCandidateListIterator& rhs) { + return !(*this == rhs); +} + +inline G1CollectionSetCandidatesIterator::G1CollectionSetCandidatesIterator(G1CollectionSetCandidates* which, uint marking_position) : + _which(which), _marking_position(marking_position) { +} + +inline G1CollectionSetCandidatesIterator& G1CollectionSetCandidatesIterator::operator++() { + assert(_marking_position < _which->_marking_regions.length(), + "must not be at end already"); + + _marking_position++; + return *this; +} + +inline HeapRegion* G1CollectionSetCandidatesIterator::operator*() { + return _which->_marking_regions.at(_marking_position)._r; +} + +inline bool G1CollectionSetCandidatesIterator::operator==(const G1CollectionSetCandidatesIterator& rhs) { + assert(_which == rhs._which, "iterator belongs to different array"); + return _marking_position == rhs._marking_position; +} + +inline bool G1CollectionSetCandidatesIterator::operator!=(const G1CollectionSetCandidatesIterator& rhs) { + return !(*this == rhs); +} + +#endif /* SHARE_GC_G1_G1COLLECTIONSETCANDIDATES_INLINE_HPP */ diff --git a/src/hotspot/share/gc/g1/g1CollectionSetChooser.cpp b/src/hotspot/share/gc/g1/g1CollectionSetChooser.cpp index efeab18cc1a57..434bd3e7c25a2 100644 --- a/src/hotspot/share/gc/g1/g1CollectionSetChooser.cpp +++ b/src/hotspot/share/gc/g1/g1CollectionSetChooser.cpp @@ -31,37 +31,6 @@ #include "runtime/atomic.hpp" #include "utilities/quickSort.hpp" -// Order regions according to GC efficiency. This will cause regions with a lot -// of live objects and large remembered sets to end up at the end of the array. -// Given that we might skip collecting the last few old regions, if after a few -// mixed GCs the remaining have reclaimable bytes under a certain threshold, the -// hope is that the ones we'll skip are ones with both large remembered sets and -// a lot of live objects, not the ones with just a lot of live objects if we -// ordered according to the amount of reclaimable bytes per region. -static int order_regions(HeapRegion* hr1, HeapRegion* hr2) { - // Make sure that null entries are moved to the end. - if (hr1 == nullptr) { - if (hr2 == nullptr) { - return 0; - } else { - return 1; - } - } else if (hr2 == nullptr) { - return -1; - } - - double gc_eff1 = hr1->gc_efficiency(); - double gc_eff2 = hr2->gc_efficiency(); - - if (gc_eff1 > gc_eff2) { - return -1; - } if (gc_eff1 < gc_eff2) { - return 1; - } else { - return 0; - } -} - // Determine collection set candidates: For all regions determine whether they // should be a collection set candidates, calculate their efficiency, sort and // return them as G1CollectionSetCandidates instance. @@ -71,6 +40,8 @@ static int order_regions(HeapRegion* hr1, HeapRegion* hr2) { // owner of this object. class G1BuildCandidateRegionsTask : public WorkerTask { + using CandidateInfo = G1CollectionCandidateList::CandidateInfo; + // Work area for building the set of collection set candidates. Contains references // to heap regions with their GC efficiencies calculated. To reduce contention // on claiming array elements, worker threads claim parts of this array in chunks; @@ -82,7 +53,7 @@ class G1BuildCandidateRegionsTask : public WorkerTask { uint const _max_size; uint const _chunk_size; - HeapRegion** _data; + CandidateInfo* _data; uint volatile _cur_claim_idx; @@ -99,15 +70,15 @@ class G1BuildCandidateRegionsTask : public WorkerTask { G1BuildCandidateArray(uint max_num_regions, uint chunk_size, uint num_workers) : _max_size(required_array_size(max_num_regions, chunk_size, num_workers)), _chunk_size(chunk_size), - _data(NEW_C_HEAP_ARRAY(HeapRegion*, _max_size, mtGC)), + _data(NEW_C_HEAP_ARRAY(CandidateInfo, _max_size, mtGC)), _cur_claim_idx(0) { for (uint i = 0; i < _max_size; i++) { - _data[i] = nullptr; + _data[i] = CandidateInfo(); } } ~G1BuildCandidateArray() { - FREE_C_HEAP_ARRAY(HeapRegion*, _data); + FREE_C_HEAP_ARRAY(CandidateInfo, _data); } // Claim a new chunk, returning its bounds [from, to[. @@ -123,25 +94,24 @@ class G1BuildCandidateRegionsTask : public WorkerTask { // Set element in array. void set(uint idx, HeapRegion* hr) { assert(idx < _max_size, "Index %u out of bounds %u", idx, _max_size); - assert(_data[idx] == nullptr, "Value must not have been set."); - _data[idx] = hr; + assert(_data[idx]._r == nullptr, "Value must not have been set."); + _data[idx] = CandidateInfo(hr, hr->calc_gc_efficiency()); } - void sort_and_copy_into(HeapRegion** dest, uint num_regions) { + void sort_by_efficiency() { if (_cur_claim_idx == 0) { return; } for (uint i = _cur_claim_idx; i < _max_size; i++) { - assert(_data[i] == nullptr, "must be"); - } - QuickSort::sort(_data, _cur_claim_idx, order_regions, true); - for (uint i = num_regions; i < _max_size; i++) { - assert(_data[i] == nullptr, "must be"); + assert(_data[i]._r == nullptr, "must be"); } - for (uint i = 0; i < num_regions; i++) { - dest[i] = _data[i]; + qsort(_data, _cur_claim_idx, sizeof(_data[0]), (_sort_Fn)G1CollectionCandidateList::compare); + for (uint i = _cur_claim_idx; i < _max_size; i++) { + assert(_data[i]._r == nullptr, "must be"); } } + + CandidateInfo* array() const { return _data; } }; // Per-region closure. In addition to determining whether a region should be @@ -154,7 +124,6 @@ class G1BuildCandidateRegionsTask : public WorkerTask { uint _cur_chunk_end; uint _regions_added; - size_t _reclaimable_bytes_added; void add_region(HeapRegion* hr) { if (_cur_chunk_idx == _cur_chunk_end) { @@ -162,13 +131,11 @@ class G1BuildCandidateRegionsTask : public WorkerTask { } assert(_cur_chunk_idx < _cur_chunk_end, "Must be"); - hr->calc_gc_efficiency(); _array->set(_cur_chunk_idx, hr); _cur_chunk_idx++; _regions_added++; - _reclaimable_bytes_added += hr->reclaimable_bytes(); } bool should_add(HeapRegion* hr) { return G1CollectionSetChooser::should_add(hr); } @@ -178,8 +145,7 @@ class G1BuildCandidateRegionsTask : public WorkerTask { _array(array), _cur_chunk_idx(0), _cur_chunk_end(0), - _regions_added(0), - _reclaimable_bytes_added(0) { } + _regions_added(0) { } bool do_heap_region(HeapRegion* r) { // We will skip any region that's currently used as an old GC @@ -188,8 +154,7 @@ class G1BuildCandidateRegionsTask : public WorkerTask { if (should_add(r) && !G1CollectedHeap::heap()->is_old_gc_alloc_region(r)) { add_region(r); } else if (r->is_old()) { - // Keep remembered sets for humongous regions, otherwise clean out remembered - // sets for old regions. + // Keep remembered sets for humongous regions, otherwise clean them out. r->rem_set()->clear(true /* only_cardset */); } else { assert(!r->is_old() || !r->rem_set()->is_tracked(), @@ -200,48 +165,84 @@ class G1BuildCandidateRegionsTask : public WorkerTask { } uint regions_added() const { return _regions_added; } - size_t reclaimable_bytes_added() const { return _reclaimable_bytes_added; } }; G1CollectedHeap* _g1h; HeapRegionClaimer _hrclaimer; uint volatile _num_regions_added; - size_t volatile _reclaimable_bytes_added; G1BuildCandidateArray _result; - void update_totals(uint num_regions, size_t reclaimable_bytes) { + void update_totals(uint num_regions) { if (num_regions > 0) { - assert(reclaimable_bytes > 0, "invariant"); Atomic::add(&_num_regions_added, num_regions); - Atomic::add(&_reclaimable_bytes_added, reclaimable_bytes); - } else { - assert(reclaimable_bytes == 0, "invariant"); } } + // Early prune (remove) regions meeting the G1HeapWastePercent criteria. That + // is, either until only the minimum amount of old collection set regions are + // available (for forward progress in evacuation) or the waste accumulated by the + // removed regions is above the maximum allowed waste. + // Updates number of candidates and reclaimable bytes given. + void prune(CandidateInfo* data) { + G1Policy* p = G1CollectedHeap::heap()->policy(); + + uint num_candidates = Atomic::load(&_num_regions_added); + + uint min_old_cset_length = p->calc_min_old_cset_length(num_candidates); + uint num_pruned = 0; + size_t wasted_bytes = 0; + + if (min_old_cset_length >= num_candidates) { + // We take all of the candidate regions to provide some forward progress. + return; + } + + size_t allowed_waste = p->allowed_waste_in_collection_set(); + uint max_to_prune = num_candidates - min_old_cset_length; + + while (true) { + HeapRegion* r = data[num_candidates - num_pruned - 1]._r; + size_t const reclaimable = r->reclaimable_bytes(); + if (num_pruned >= max_to_prune || + wasted_bytes + reclaimable > allowed_waste) { + break; + } + r->rem_set()->clear(true /* cardset_only */); + + wasted_bytes += reclaimable; + num_pruned++; + } + + log_debug(gc, ergo, cset)("Pruned %u regions out of %u, leaving " SIZE_FORMAT " bytes waste (allowed " SIZE_FORMAT ")", + num_pruned, + num_candidates, + wasted_bytes, + allowed_waste); + + Atomic::sub(&_num_regions_added, num_pruned, memory_order_relaxed); + } + public: G1BuildCandidateRegionsTask(uint max_num_regions, uint chunk_size, uint num_workers) : WorkerTask("G1 Build Candidate Regions"), _g1h(G1CollectedHeap::heap()), _hrclaimer(num_workers), _num_regions_added(0), - _reclaimable_bytes_added(0), _result(max_num_regions, chunk_size, num_workers) { } void work(uint worker_id) { G1BuildCandidateRegionsClosure cl(&_result); _g1h->heap_region_par_iterate_from_worker_offset(&cl, &_hrclaimer, worker_id); - update_totals(cl.regions_added(), cl.reclaimable_bytes_added()); + update_totals(cl.regions_added()); } - G1CollectionSetCandidates* get_sorted_candidates() { - HeapRegion** regions = NEW_C_HEAP_ARRAY(HeapRegion*, _num_regions_added, mtGC); - _result.sort_and_copy_into(regions, _num_regions_added); - return new G1CollectionSetCandidates(regions, - _num_regions_added, - _reclaimable_bytes_added); + void sort_and_prune_into(G1CollectionSetCandidates* candidates) { + _result.sort_by_efficiency(); + prune(_result.array()); + candidates->set_candidates_from_marking(_result.array(), + _num_regions_added); } }; @@ -257,69 +258,13 @@ bool G1CollectionSetChooser::should_add(HeapRegion* hr) { hr->rem_set()->is_complete(); } -// Closure implementing early pruning (removal) of regions meeting the -// G1HeapWastePercent criteria. That is, either until _max_pruned regions were -// removed (for forward progress in evacuation) or the waste accumulated by the -// removed regions is above max_wasted. -class G1PruneRegionClosure : public HeapRegionClosure { - uint _num_pruned; - size_t _cur_wasted; - - uint const _max_pruned; - size_t const _max_wasted; - -public: - G1PruneRegionClosure(uint max_pruned, size_t max_wasted) : - _num_pruned(0), _cur_wasted(0), _max_pruned(max_pruned), _max_wasted(max_wasted) { } - - virtual bool do_heap_region(HeapRegion* r) { - size_t const reclaimable = r->reclaimable_bytes(); - if (_num_pruned >= _max_pruned || - _cur_wasted + reclaimable > _max_wasted) { - return true; - } - r->rem_set()->clear(true /* cardset_only */); - _cur_wasted += reclaimable; - _num_pruned++; - return false; - } - - uint num_pruned() const { return _num_pruned; } - size_t wasted() const { return _cur_wasted; } -}; - -void G1CollectionSetChooser::prune(G1CollectionSetCandidates* candidates) { - G1Policy* p = G1CollectedHeap::heap()->policy(); - - uint min_old_cset_length = p->calc_min_old_cset_length(candidates->num_regions()); - uint num_candidates = candidates->num_regions(); - - if (min_old_cset_length < num_candidates) { - size_t allowed_waste = p->allowed_waste_in_collection_set(); - - G1PruneRegionClosure prune_cl(num_candidates - min_old_cset_length, - allowed_waste); - candidates->iterate_backwards(&prune_cl); - - log_debug(gc, ergo, cset)("Pruned %u regions out of %u, leaving " SIZE_FORMAT " bytes waste (allowed " SIZE_FORMAT ")", - prune_cl.num_pruned(), - candidates->num_regions(), - prune_cl.wasted(), - allowed_waste); - - candidates->remove_from_end(prune_cl.num_pruned(), prune_cl.wasted()); - } -} - -G1CollectionSetCandidates* G1CollectionSetChooser::build(WorkerThreads* workers, uint max_num_regions) { +void G1CollectionSetChooser::build(WorkerThreads* workers, uint max_num_regions, G1CollectionSetCandidates* candidates) { uint num_workers = workers->active_workers(); uint chunk_size = calculate_work_chunk_size(num_workers, max_num_regions); G1BuildCandidateRegionsTask cl(max_num_regions, chunk_size, num_workers); workers->run_task(&cl, num_workers); - G1CollectionSetCandidates* result = cl.get_sorted_candidates(); - prune(result); - result->verify(); - return result; + cl.sort_and_prune_into(candidates); + candidates->verify(); } diff --git a/src/hotspot/share/gc/g1/g1CollectionSetChooser.hpp b/src/hotspot/share/gc/g1/g1CollectionSetChooser.hpp index a253cb848082f..861fcd997900c 100644 --- a/src/hotspot/share/gc/g1/g1CollectionSetChooser.hpp +++ b/src/hotspot/share/gc/g1/g1CollectionSetChooser.hpp @@ -38,12 +38,7 @@ class WorkerThreads; class G1CollectionSetChooser : public AllStatic { static uint calculate_work_chunk_size(uint num_workers, uint num_regions); - // Remove regions in the collection set candidates as long as the G1HeapWastePercent - // criteria is met. Keep at least the minimum amount of old regions to guarantee - // some progress. - static void prune(G1CollectionSetCandidates* candidates); public: - static size_t mixed_gc_live_threshold_bytes() { return HeapRegion::GrainBytes * (size_t) G1MixedGCLiveThresholdPercent / 100; } @@ -60,7 +55,7 @@ class G1CollectionSetChooser : public AllStatic { // Build and return set of collection set candidates sorted by decreasing gc // efficiency. - static G1CollectionSetCandidates* build(WorkerThreads* workers, uint max_num_regions); + static void build(WorkerThreads* workers, uint max_num_regions, G1CollectionSetCandidates* candidates); }; #endif // SHARE_GC_G1_G1COLLECTIONSETCHOOSER_HPP diff --git a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp index 4fe0959da99c5..8552063fe42e3 100644 --- a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp +++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp @@ -218,7 +218,7 @@ G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::allocate_new_chunk() { return nullptr; } - size_t cur_idx = Atomic::fetch_and_add(&_hwm, 1u); + size_t cur_idx = Atomic::fetch_then_add(&_hwm, 1u); if (cur_idx >= _chunk_capacity) { return nullptr; } @@ -286,7 +286,7 @@ void G1CMRootMemRegions::reset() { void G1CMRootMemRegions::add(HeapWord* start, HeapWord* end) { assert_at_safepoint(); - size_t idx = Atomic::fetch_and_add(&_num_root_regions, 1u); + size_t idx = Atomic::fetch_then_add(&_num_root_regions, 1u); assert(idx < _max_regions, "Trying to add more root MemRegions than there is space " SIZE_FORMAT, _max_regions); assert(start != nullptr && end != nullptr && start <= end, "Start (" PTR_FORMAT ") should be less or equal to " "end (" PTR_FORMAT ")", p2i(start), p2i(end)); @@ -314,7 +314,7 @@ const MemRegion* G1CMRootMemRegions::claim_next() { return nullptr; } - size_t claimed_index = Atomic::fetch_and_add(&_claimed_root_regions, 1u); + size_t claimed_index = Atomic::fetch_then_add(&_claimed_root_regions, 1u); if (claimed_index < _num_root_regions) { return &_root_regions[claimed_index]; } @@ -1215,6 +1215,17 @@ class G1UpdateRegionsAfterRebuild : public HeapRegionClosure { } }; +class G1ObjectCountIsAliveClosure: public BoolObjectClosure { + G1CollectedHeap* _g1h; +public: + G1ObjectCountIsAliveClosure(G1CollectedHeap* g1h) : _g1h(g1h) { } + + bool do_object_b(oop obj) { + return obj != nullptr && + (!_g1h->is_in_reserved(obj) || !_g1h->is_obj_dead(obj)); + } +}; + void G1ConcurrentMark::remark() { assert_at_safepoint_on_vm_thread(); @@ -1297,6 +1308,12 @@ void G1ConcurrentMark::remark() { reset_at_marking_complete(); G1CollectedHeap::finish_codecache_marking_cycle(); + + { + GCTraceTime(Debug, gc, phases) debug("Report Object Count", _gc_timer_cm); + G1ObjectCountIsAliveClosure is_alive(_g1h); + _gc_tracer_cm->report_object_count_after_gc(&is_alive, _g1h->workers()); + } } else { // We overflowed. Restart concurrent marking. _restart_for_overflow = true; @@ -1308,11 +1325,6 @@ void G1ConcurrentMark::remark() { reset_marking_for_restart(); } - { - GCTraceTime(Debug, gc, phases) debug("Report Object Count", _gc_timer_cm); - report_object_count(mark_finished); - } - // Statistics double now = os::elapsedTime(); _remark_mark_times.add((mark_work_end - start) * 1000.0); @@ -1715,29 +1727,6 @@ void G1ConcurrentMark::preclean() { _gc_timer_cm); } -class G1ObjectCountIsAliveClosure: public BoolObjectClosure { - G1CollectedHeap* _g1h; -public: - G1ObjectCountIsAliveClosure(G1CollectedHeap* g1h) : _g1h(g1h) { } - - bool do_object_b(oop obj) { - return obj != nullptr && - (!_g1h->is_in_reserved(obj) || !_g1h->is_obj_dead(obj)); - } -}; - -void G1ConcurrentMark::report_object_count(bool mark_completed) { - // Depending on the completion of the marking liveness needs to be determined - // using either the bitmap or after the cycle using the scrubbing information. - if (mark_completed) { - G1ObjectCountIsAliveClosure is_alive(_g1h); - _gc_tracer_cm->report_object_count_after_gc(&is_alive); - } else { - G1CMIsAliveClosure is_alive(_g1h); - _gc_tracer_cm->report_object_count_after_gc(&is_alive); - } -} - // Closure for marking entries in SATB buffers. class G1CMSATBBufferClosure : public SATBBufferClosure { private: @@ -2989,7 +2978,7 @@ bool G1PrintRegionLivenessInfoClosure::do_heap_region(HeapRegion* r) { size_t capacity_bytes = r->capacity(); size_t used_bytes = r->used(); size_t live_bytes = r->live_bytes(); - double gc_eff = r->gc_efficiency(); + double gc_eff = r->calc_gc_efficiency(); size_t remset_bytes = r->rem_set()->mem_size(); size_t code_roots_bytes = r->rem_set()->code_roots_mem_size(); const char* remset_type = r->rem_set()->get_short_state_str(); diff --git a/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp b/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp index a824d50ac5fbf..1e4aa9af41f55 100644 --- a/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp +++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp @@ -375,8 +375,6 @@ class G1ConcurrentMark : public CHeapObj { void weak_refs_work(); - void report_object_count(bool mark_completed); - void reclaim_empty_regions(); // After reclaiming empty regions, update heap sizes. diff --git a/src/hotspot/share/gc/g1/g1EvacFailureRegions.inline.hpp b/src/hotspot/share/gc/g1/g1EvacFailureRegions.inline.hpp index bdbffea734a14..cfd453ed2e096 100644 --- a/src/hotspot/share/gc/g1/g1EvacFailureRegions.inline.hpp +++ b/src/hotspot/share/gc/g1/g1EvacFailureRegions.inline.hpp @@ -32,7 +32,7 @@ bool G1EvacFailureRegions::record(uint region_idx) { bool success = _regions_failed_evacuation.par_set_bit(region_idx, memory_order_relaxed); if (success) { - size_t offset = Atomic::fetch_and_add(&_evac_failure_regions_cur_length, 1u); + size_t offset = Atomic::fetch_then_add(&_evac_failure_regions_cur_length, 1u); _evac_failure_regions[offset] = region_idx; G1CollectedHeap* g1h = G1CollectedHeap::heap(); diff --git a/src/hotspot/share/gc/g1/g1FullCollector.cpp b/src/hotspot/share/gc/g1/g1FullCollector.cpp index 6a16a97e59f2b..d1360be6ab459 100644 --- a/src/hotspot/share/gc/g1/g1FullCollector.cpp +++ b/src/hotspot/share/gc/g1/g1FullCollector.cpp @@ -332,7 +332,7 @@ void G1FullCollector::phase1_mark_live_objects() { { GCTraceTime(Debug, gc, phases) debug("Report Object Count", scope()->timer()); - scope()->tracer()->report_object_count_after_gc(&_is_alive); + scope()->tracer()->report_object_count_after_gc(&_is_alive, _heap->workers()); } #if TASKQUEUE_STATS oop_queue_set()->print_and_reset_taskqueue_stats("Oop Queue"); diff --git a/src/hotspot/share/gc/g1/g1HeapVerifier.cpp b/src/hotspot/share/gc/g1/g1HeapVerifier.cpp index ecdf0810e73bd..905780bc7eeda 100644 --- a/src/hotspot/share/gc/g1/g1HeapVerifier.cpp +++ b/src/hotspot/share/gc/g1/g1HeapVerifier.cpp @@ -433,6 +433,8 @@ void G1HeapVerifier::verify_region_sets() { VerifyRegionListsClosure cl(&_g1h->_old_set, &_g1h->_humongous_set, &_g1h->_hrm); _g1h->heap_region_iterate(&cl); cl.verify_counts(&_g1h->_old_set, &_g1h->_humongous_set, &_g1h->_hrm); + + _g1h->collection_set()->candidates()->verify(); } void G1HeapVerifier::prepare_for_verify() { diff --git a/src/hotspot/share/gc/g1/g1MonitoringSupport.cpp b/src/hotspot/share/gc/g1/g1MonitoringSupport.cpp index fccb4aa7cd5be..c4f2c1284032b 100644 --- a/src/hotspot/share/gc/g1/g1MonitoringSupport.cpp +++ b/src/hotspot/share/gc/g1/g1MonitoringSupport.cpp @@ -40,48 +40,50 @@ class G1GenerationCounters : public GenerationCounters { G1GenerationCounters(G1MonitoringSupport* monitoring_support, const char* name, int ordinal, int spaces, size_t min_capacity, size_t max_capacity, - size_t curr_capacity) - : GenerationCounters(name, ordinal, spaces, min_capacity, + size_t curr_capacity) : + GenerationCounters(name, ordinal, spaces, min_capacity, max_capacity, curr_capacity), _monitoring_support(monitoring_support) { } }; class G1YoungGenerationCounters : public G1GenerationCounters { public: - // We pad the capacity three times given that the young generation - // contains three spaces (eden and two survivors). - G1YoungGenerationCounters(G1MonitoringSupport* monitoring_support, const char* name, size_t max_size) - : G1GenerationCounters(monitoring_support, name, 0 /* ordinal */, 3 /* spaces */, - G1MonitoringSupport::pad_capacity(0, 3) /* min_capacity */, - G1MonitoringSupport::pad_capacity(max_size, 3), - G1MonitoringSupport::pad_capacity(0, 3) /* curr_capacity */) { + G1YoungGenerationCounters(G1MonitoringSupport* monitoring_support, const char* name, size_t max_size) : + G1GenerationCounters(monitoring_support, + name, + 0 /* ordinal */, + 3 /* spaces */, + 0 /* min_capacity */, + max_size, + 0 /* curr_capacity */) { if (UsePerfData) { update_all(); } } virtual void update_all() { - size_t committed = - G1MonitoringSupport::pad_capacity(_monitoring_support->young_gen_committed(), 3); + size_t committed = _monitoring_support->young_gen_committed(); _current_size->set_value(committed); } }; class G1OldGenerationCounters : public G1GenerationCounters { public: - G1OldGenerationCounters(G1MonitoringSupport* monitoring_support, const char* name, size_t max_size) - : G1GenerationCounters(monitoring_support, name, 1 /* ordinal */, 1 /* spaces */, - G1MonitoringSupport::pad_capacity(0) /* min_capacity */, - G1MonitoringSupport::pad_capacity(max_size), - G1MonitoringSupport::pad_capacity(0) /* curr_capacity */) { + G1OldGenerationCounters(G1MonitoringSupport* monitoring_support, const char* name, size_t max_size) : + G1GenerationCounters(monitoring_support, + name, + 1 /* ordinal */, + 1 /* spaces */, + 0 /* min_capacity */, + max_size, + 0 /* curr_capacity */) { if (UsePerfData) { update_all(); } } virtual void update_all() { - size_t committed = - G1MonitoringSupport::pad_capacity(_monitoring_support->old_gen_committed()); + size_t committed = _monitoring_support->old_gen_committed(); _current_size->set_value(committed); } }; @@ -144,9 +146,9 @@ G1MonitoringSupport::G1MonitoringSupport(G1CollectedHeap* g1h) : // Counters are created from maxCapacity, capacity, initCapacity, // and used. _old_space_counters = new HSpaceCounters(_old_gen_counters->name_space(), - "space", 0 /* ordinal */, - pad_capacity(g1h->max_capacity()) /* max_capacity */, - pad_capacity(_old_gen_committed) /* init_capacity */); + "space", 0 /* ordinal */, + g1h->max_capacity() /* max_capacity */, + _old_gen_committed /* init_capacity */); // Young collection set // name "generation.0". This is logically the young generation. @@ -159,17 +161,17 @@ G1MonitoringSupport::G1MonitoringSupport(G1CollectedHeap* g1h) : // name "generation.0.space.0" // See _old_space_counters for additional counters _eden_space_counters = new HSpaceCounters(young_collection_name_space, - "eden", 0 /* ordinal */, - pad_capacity(g1h->max_capacity()) /* max_capacity */, - pad_capacity(_eden_space_committed) /* init_capacity */); + "eden", 0 /* ordinal */, + g1h->max_capacity() /* max_capacity */, + _eden_space_committed /* init_capacity */); // name "generation.0.space.1" // See _old_space_counters for additional counters // Set the arguments to indicate that this survivor space is not used. _from_space_counters = new HSpaceCounters(young_collection_name_space, - "s0", 1 /* ordinal */, - pad_capacity(0) /* max_capacity */, - pad_capacity(0) /* init_capacity */); + "s0", 1 /* ordinal */, + 0 /* max_capacity */, + 0 /* init_capacity */); // Given that this survivor space is not used, we update it here // once to reflect that its used space is 0 so that we don't have to // worry about updating it again later. @@ -180,9 +182,9 @@ G1MonitoringSupport::G1MonitoringSupport(G1CollectedHeap* g1h) : // name "generation.0.space.2" // See _old_space_counters for additional counters _to_space_counters = new HSpaceCounters(young_collection_name_space, - "s1", 2 /* ordinal */, - pad_capacity(g1h->max_capacity()) /* max_capacity */, - pad_capacity(_survivor_space_committed) /* init_capacity */); + "s1", 2 /* ordinal */, + g1h->max_capacity() /* max_capacity */, + _survivor_space_committed /* init_capacity */); } G1MonitoringSupport::~G1MonitoringSupport() { @@ -296,13 +298,13 @@ void G1MonitoringSupport::recalculate_sizes() { void G1MonitoringSupport::update_sizes() { recalculate_sizes(); if (UsePerfData) { - _eden_space_counters->update_capacity(pad_capacity(_eden_space_committed)); + _eden_space_counters->update_capacity(_eden_space_committed); _eden_space_counters->update_used(_eden_space_used); // only the "to" survivor space is active, so we don't need to // update the counters for the "from" survivor space - _to_space_counters->update_capacity(pad_capacity(_survivor_space_committed)); + _to_space_counters->update_capacity(_survivor_space_committed); _to_space_counters->update_used(_survivor_space_used); - _old_space_counters->update_capacity(pad_capacity(_old_gen_committed)); + _old_space_counters->update_capacity(_old_gen_committed); _old_space_counters->update_used(_old_gen_used); _young_gen_counters->update_all(); diff --git a/src/hotspot/share/gc/g1/g1MonitoringSupport.hpp b/src/hotspot/share/gc/g1/g1MonitoringSupport.hpp index d812d811aee4a..769a6fe831c7f 100644 --- a/src/hotspot/share/gc/g1/g1MonitoringSupport.hpp +++ b/src/hotspot/share/gc/g1/g1MonitoringSupport.hpp @@ -192,21 +192,6 @@ class G1MonitoringSupport : public CHeapObj { GrowableArray memory_managers(); GrowableArray memory_pools(); - // Unfortunately, the jstat tool assumes that no space has 0 - // capacity. In our case, given that each space is logical, it's - // possible that no regions will be allocated to it, hence to have 0 - // capacity (e.g., if there are no survivor regions, the survivor - // space has 0 capacity). The way we deal with this is to always pad - // each capacity value we report to jstat by a very small amount to - // make sure that it's never zero. Given that we sometimes have to - // report a capacity of a generation that contains several spaces - // (e.g., young gen includes one eden, two survivor spaces), the - // mult parameter is provided in order to adding the appropriate - // padding multiple times so that the capacities add up correctly. - static size_t pad_capacity(size_t size_bytes, size_t mult = 1) { - return size_bytes + MinObjAlignmentInBytes * mult; - } - // Recalculate all the sizes from scratch and update all the jstat // counters accordingly. void update_sizes(); diff --git a/src/hotspot/share/gc/g1/g1MonotonicArena.inline.hpp b/src/hotspot/share/gc/g1/g1MonotonicArena.inline.hpp index ad9bf6db6df52..f01248424a026 100644 --- a/src/hotspot/share/gc/g1/g1MonotonicArena.inline.hpp +++ b/src/hotspot/share/gc/g1/g1MonotonicArena.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2021, 2022, Huawei Technologies Co., Ltd. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -34,7 +34,7 @@ inline void* G1MonotonicArena::Segment::allocate_slot() { if (_next_allocate >= _num_slots) { return nullptr; } - uint result = Atomic::fetch_and_add(&_next_allocate, 1u, memory_order_relaxed); + uint result = Atomic::fetch_then_add(&_next_allocate, 1u, memory_order_relaxed); if (result >= _num_slots) { return nullptr; } diff --git a/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp b/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp index 5c558bb237e63..4985c21355f6b 100644 --- a/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp +++ b/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp @@ -59,8 +59,7 @@ G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, PreservedMarks* preserved_marks, uint worker_id, uint num_workers, - size_t young_cset_length, - size_t optional_cset_length, + G1CollectionSet* collection_set, G1EvacFailureRegions* evac_failure_regions) : _g1h(g1h), _task_queue(g1h->task_queue(worker_id)), @@ -78,12 +77,12 @@ G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, _trim_ticks(), _surviving_young_words_base(nullptr), _surviving_young_words(nullptr), - _surviving_words_length(young_cset_length + 1), + _surviving_words_length(collection_set->young_region_length() + 1), _old_gen_is_full(false), _partial_objarray_chunk_size(ParGCArrayScanChunk), _partial_array_stepper(num_workers), _string_dedup_requests(), - _max_num_optional_regions(optional_cset_length), + _max_num_optional_regions(collection_set->optional_region_length()), _numa(g1h->numa()), _obj_alloc_stat(nullptr), EVAC_FAILURE_INJECTOR_ONLY(_evac_failure_inject_counter(0) COMMA) @@ -104,7 +103,9 @@ G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, _plab_allocator = new G1PLABAllocator(_g1h->allocator()); - _closures = G1EvacuationRootClosures::create_root_closures(this, _g1h); + _closures = G1EvacuationRootClosures::create_root_closures(_g1h, + this, + collection_set->only_contains_young_regions()); _oops_into_optional_regions = new G1OopStarChunkedList[_max_num_optional_regions]; @@ -569,8 +570,7 @@ G1ParScanThreadState* G1ParScanThreadStateSet::state_for_worker(uint worker_id) _preserved_marks_set.get(worker_id), worker_id, _num_workers, - _young_cset_length, - _optional_cset_length, + _collection_set, _evac_failure_regions); } return _states[worker_id]; @@ -690,16 +690,14 @@ void G1ParScanThreadState::update_numa_stats(uint node_index) { G1ParScanThreadStateSet::G1ParScanThreadStateSet(G1CollectedHeap* g1h, uint num_workers, - size_t young_cset_length, - size_t optional_cset_length, + G1CollectionSet* collection_set, G1EvacFailureRegions* evac_failure_regions) : _g1h(g1h), + _collection_set(collection_set), _rdcqs(G1BarrierSet::dirty_card_queue_set().allocator()), _preserved_marks_set(true /* in_c_heap */), _states(NEW_C_HEAP_ARRAY(G1ParScanThreadState*, num_workers, mtGC)), - _surviving_young_words_total(NEW_C_HEAP_ARRAY(size_t, young_cset_length + 1, mtGC)), - _young_cset_length(young_cset_length), - _optional_cset_length(optional_cset_length), + _surviving_young_words_total(NEW_C_HEAP_ARRAY(size_t, collection_set->young_region_length() + 1, mtGC)), _num_workers(num_workers), _flushed(false), _evac_failure_regions(evac_failure_regions) { @@ -707,7 +705,7 @@ G1ParScanThreadStateSet::G1ParScanThreadStateSet(G1CollectedHeap* g1h, for (uint i = 0; i < num_workers; ++i) { _states[i] = nullptr; } - memset(_surviving_young_words_total, 0, (young_cset_length + 1) * sizeof(size_t)); + memset(_surviving_young_words_total, 0, (collection_set->young_region_length() + 1) * sizeof(size_t)); } G1ParScanThreadStateSet::~G1ParScanThreadStateSet() { diff --git a/src/hotspot/share/gc/g1/g1ParScanThreadState.hpp b/src/hotspot/share/gc/g1/g1ParScanThreadState.hpp index ab31a6d88ca11..fd9430c024478 100644 --- a/src/hotspot/share/gc/g1/g1ParScanThreadState.hpp +++ b/src/hotspot/share/gc/g1/g1ParScanThreadState.hpp @@ -41,6 +41,7 @@ #include "utilities/ticks.hpp" class G1CardTable; +class G1CollectionSet; class G1EvacFailureRegions; class G1EvacuationRootClosures; class G1OopStarChunkedList; @@ -117,8 +118,7 @@ class G1ParScanThreadState : public CHeapObj { PreservedMarks* preserved_marks, uint worker_id, uint num_workers, - size_t young_cset_length, - size_t optional_cset_length, + G1CollectionSet* collection_set, G1EvacFailureRegions* evac_failure_regions); virtual ~G1ParScanThreadState(); @@ -231,12 +231,11 @@ class G1ParScanThreadState : public CHeapObj { class G1ParScanThreadStateSet : public StackObj { G1CollectedHeap* _g1h; + G1CollectionSet* _collection_set; G1RedirtyCardsQueueSet _rdcqs; PreservedMarksSet _preserved_marks_set; G1ParScanThreadState** _states; size_t* _surviving_young_words_total; - size_t _young_cset_length; - size_t _optional_cset_length; uint _num_workers; bool _flushed; G1EvacFailureRegions* _evac_failure_regions; @@ -244,8 +243,7 @@ class G1ParScanThreadStateSet : public StackObj { public: G1ParScanThreadStateSet(G1CollectedHeap* g1h, uint num_workers, - size_t young_cset_length, - size_t optional_cset_length, + G1CollectionSet* collection_set, G1EvacFailureRegions* evac_failure_regions); ~G1ParScanThreadStateSet(); diff --git a/src/hotspot/share/gc/g1/g1Policy.cpp b/src/hotspot/share/gc/g1/g1Policy.cpp index 7847179a55d61..92c83301c2a40 100644 --- a/src/hotspot/share/gc/g1/g1Policy.cpp +++ b/src/hotspot/share/gc/g1/g1Policy.cpp @@ -28,7 +28,7 @@ #include "gc/g1/g1Arguments.hpp" #include "gc/g1/g1CollectedHeap.inline.hpp" #include "gc/g1/g1CollectionSet.hpp" -#include "gc/g1/g1CollectionSetCandidates.hpp" +#include "gc/g1/g1CollectionSetCandidates.inline.hpp" #include "gc/g1/g1ConcurrentMark.hpp" #include "gc/g1/g1ConcurrentMarkThread.inline.hpp" #include "gc/g1/g1ConcurrentRefine.hpp" @@ -485,21 +485,20 @@ uint G1Policy::calculate_desired_eden_length_before_young_only(double base_time_ uint G1Policy::calculate_desired_eden_length_before_mixed(double base_time_ms, uint min_eden_length, uint max_eden_length) const { - G1CollectionSetCandidates* candidates = _collection_set->candidates(); - - uint min_old_regions_end = MIN2(candidates->cur_idx() + calc_min_old_cset_length(candidates->num_regions()), - candidates->num_regions()); + uint min_marking_candidates = MIN2(calc_min_old_cset_length(candidates()->last_marking_candidates_length()), + candidates()->marking_regions_length()); double predicted_region_evac_time_ms = base_time_ms; - for (uint i = candidates->cur_idx(); i < min_old_regions_end; i++) { - HeapRegion* r = candidates->at(i); + for (HeapRegion* r : candidates()->marking_regions()) { + if (min_marking_candidates == 0) { + break; + } predicted_region_evac_time_ms += predict_region_total_time_ms(r, false /* for_young_only_phase */); + min_marking_candidates--; } - uint desired_eden_length_by_min_cset_length = - calculate_desired_eden_length_before_young_only(predicted_region_evac_time_ms, - min_eden_length, - max_eden_length); - return desired_eden_length_by_min_cset_length; + return calculate_desired_eden_length_before_young_only(predicted_region_evac_time_ms, + min_eden_length, + max_eden_length); } double G1Policy::predict_survivor_regions_evac_time() const { @@ -537,7 +536,7 @@ void G1Policy::record_full_collection_start() { // Release the future to-space so that it is available for compaction into. collector_state()->set_in_young_only_phase(false); collector_state()->set_in_full_gc(true); - _collection_set->clear_candidates(); + _collection_set->abandon_all_candidates(); _pending_cards_at_gc_start = 0; } @@ -665,6 +664,10 @@ void G1Policy::record_concurrent_mark_cleanup_start() { _mark_cleanup_start_sec = os::elapsedTime(); } +G1CollectionSetCandidates* G1Policy::candidates() const { + return _collection_set->candidates(); +} + double G1Policy::average_time_ms(G1GCPhaseTimes::GCParPhases phase) const { return phase_times()->average_time_ms(phase); } @@ -797,7 +800,9 @@ void G1Policy::record_young_collection_end(bool concurrent_operation_is_full_mar if (!next_gc_should_be_mixed("do not continue mixed GCs")) { collector_state()->set_in_young_only_phase(true); - clear_collection_set_candidates(); + assert(!candidates()->has_more_marking_candidates(), + "only end mixed if all candidates from marking were processed"); + maybe_start_marking(); } } else { @@ -858,9 +863,9 @@ void G1Policy::record_young_collection_end(bool concurrent_operation_is_full_mar _collection_set->young_region_length()); } - if (_collection_set->old_region_length() > 0) { + if (_collection_set->initial_old_region_length() > 0) { _analytics->report_non_young_other_cost_per_region_ms(non_young_other_time_ms() / - _collection_set->old_region_length()); + _collection_set->initial_old_region_length()); } _analytics->report_constant_other_time_ms(constant_other_time_ms(pause_time_ms)); @@ -1212,7 +1217,7 @@ void G1Policy::decide_on_concurrent_start_pause() { // active. The following remark might change the change the "evacuation efficiency" of // the regions in this set, leading to failing asserts later. // Since the concurrent cycle will recreate the collection set anyway, simply drop it here. - clear_collection_set_candidates(); + abandon_collection_set_candidates(); abort_time_to_mixed_tracking(); initiate_conc_mark(); log_debug(gc, ergo)("Initiate concurrent cycle (%s requested concurrent cycle)", @@ -1244,8 +1249,7 @@ void G1Policy::decide_on_concurrent_start_pause() { void G1Policy::record_concurrent_mark_cleanup_end(bool has_rebuilt_remembered_sets) { bool mixed_gc_pending = false; if (has_rebuilt_remembered_sets) { - G1CollectionSetCandidates* candidates = G1CollectionSetChooser::build(_g1h->workers(), _g1h->num_regions()); - _collection_set->set_candidates(candidates); + G1CollectionSetChooser::build(_g1h->workers(), _g1h->num_regions(), candidates()); mixed_gc_pending = next_gc_should_be_mixed("request young-only gcs"); } @@ -1255,7 +1259,6 @@ void G1Policy::record_concurrent_mark_cleanup_end(bool has_rebuilt_remembered_se } if (!mixed_gc_pending) { - clear_collection_set_candidates(); abort_time_to_mixed_tracking(); } collector_state()->set_in_young_gc_before_mixed(mixed_gc_pending); @@ -1269,26 +1272,13 @@ void G1Policy::record_concurrent_mark_cleanup_end(bool has_rebuilt_remembered_se record_pause(G1GCPauseType::Cleanup, _mark_cleanup_start_sec, end_sec); } -double G1Policy::reclaimable_bytes_percent(size_t reclaimable_bytes) const { - return percent_of(reclaimable_bytes, _g1h->capacity()); -} - -class G1ClearCollectionSetCandidateRemSets : public HeapRegionClosure { - virtual bool do_heap_region(HeapRegion* r) { - r->rem_set()->clear_locked(true /* only_cardset */); - return false; - } -}; - -void G1Policy::clear_collection_set_candidates() { - if (_collection_set->candidates() == nullptr) { - return; - } +void G1Policy::abandon_collection_set_candidates() { // Clear remembered sets of remaining candidate regions and the actual candidate // set. - G1ClearCollectionSetCandidateRemSets cl; - _collection_set->candidates()->iterate(&cl); - _collection_set->clear_candidates(); + for (HeapRegion* r : *candidates()) { + r->rem_set()->clear_locked(true /* only_cardset */); + } + _collection_set->abandon_all_candidates(); } void G1Policy::maybe_start_marking() { @@ -1371,9 +1361,7 @@ void G1Policy::abort_time_to_mixed_tracking() { } bool G1Policy::next_gc_should_be_mixed(const char* no_candidates_str) const { - G1CollectionSetCandidates* candidates = _collection_set->candidates(); - - if (candidates == nullptr || candidates->is_empty()) { + if (!candidates()->has_more_marking_candidates()) { if (no_candidates_str != nullptr) { log_debug(gc, ergo)("%s (candidate old regions not available)", no_candidates_str); } @@ -1414,48 +1402,52 @@ uint G1Policy::calc_max_old_cset_length() const { return (uint)ceil(result); } -void G1Policy::calculate_old_collection_set_regions(G1CollectionSetCandidates* candidates, - double time_remaining_ms, - uint& num_initial_regions, - uint& num_optional_regions) { - assert(candidates != nullptr, "Must be"); +static void print_finish_message(const char* reason, bool from_marking) { + log_debug(gc, ergo, cset)("Finish adding %s candidates to collection set (%s).", + from_marking ? "marking" : "retained", reason); +} + +double G1Policy::select_candidates_from_marking(G1CollectionCandidateList* marking_list, + double time_remaining_ms, + G1CollectionCandidateRegionList* initial_old_regions, + G1CollectionCandidateRegionList* optional_old_regions) { + assert(marking_list != nullptr, "must be"); - num_initial_regions = 0; - num_optional_regions = 0; uint num_expensive_regions = 0; + uint num_initial_regions_selected = 0; + uint num_optional_regions_selected = 0; + double predicted_initial_time_ms = 0.0; double predicted_optional_time_ms = 0.0; double optional_threshold_ms = time_remaining_ms * optional_prediction_fraction(); - const uint min_old_cset_length = calc_min_old_cset_length(candidates->num_regions()); + const uint min_old_cset_length = calc_min_old_cset_length(candidates()->last_marking_candidates_length()); const uint max_old_cset_length = MAX2(min_old_cset_length, calc_max_old_cset_length()); const uint max_optional_regions = max_old_cset_length - min_old_cset_length; bool check_time_remaining = use_adaptive_young_list_length(); - uint candidate_idx = candidates->cur_idx(); - - log_debug(gc, ergo, cset)("Start adding old regions to collection set. Min %u regions, max %u regions, " + log_debug(gc, ergo, cset)("Start adding marking candidates to collection set. " + "Min %u regions, max %u regions, " "time remaining %1.2fms, optional threshold %1.2fms", min_old_cset_length, max_old_cset_length, time_remaining_ms, optional_threshold_ms); - HeapRegion* hr = candidates->at(candidate_idx); - while (hr != nullptr) { - if (num_initial_regions + num_optional_regions >= max_old_cset_length) { + G1CollectionCandidateListIterator iter = marking_list->begin(); + for (; iter != marking_list->end(); ++iter) { + if (num_initial_regions_selected + num_optional_regions_selected >= max_old_cset_length) { // Added maximum number of old regions to the CSet. - log_debug(gc, ergo, cset)("Finish adding old regions to collection set (Maximum number of regions). " - "Initial %u regions, optional %u regions", - num_initial_regions, num_optional_regions); + print_finish_message("Maximum number of regions reached", true); break; } - + HeapRegion* hr = *iter; double predicted_time_ms = predict_region_total_time_ms(hr, false); time_remaining_ms = MAX2(time_remaining_ms - predicted_time_ms, 0.0); // Add regions to old set until we reach the minimum amount - if (num_initial_regions < min_old_cset_length) { + if (initial_old_regions->length() < min_old_cset_length) { + initial_old_regions->append(hr); + num_initial_regions_selected++; predicted_initial_time_ms += predicted_time_ms; - num_initial_regions++; // Record the number of regions added with no time remaining if (time_remaining_ms == 0.0) { num_expensive_regions++; @@ -1463,53 +1455,54 @@ void G1Policy::calculate_old_collection_set_regions(G1CollectionSetCandidates* c } else if (!check_time_remaining) { // In the non-auto-tuning case, we'll finish adding regions // to the CSet if we reach the minimum. - log_debug(gc, ergo, cset)("Finish adding old regions to collection set (Region amount reached min)."); + print_finish_message("Region amount reached min", true); break; } else { // Keep adding regions to old set until we reach the optional threshold if (time_remaining_ms > optional_threshold_ms) { predicted_initial_time_ms += predicted_time_ms; - num_initial_regions++; + initial_old_regions->append(hr); + num_initial_regions_selected++; } else if (time_remaining_ms > 0) { // Keep adding optional regions until time is up. - assert(num_optional_regions < max_optional_regions, "Should not be possible."); + assert(optional_old_regions->length() < max_optional_regions, "Should not be possible."); predicted_optional_time_ms += predicted_time_ms; - num_optional_regions++; + optional_old_regions->append(hr); + num_optional_regions_selected++; } else { - log_debug(gc, ergo, cset)("Finish adding old regions to collection set (Predicted time too high)."); + print_finish_message("Predicted time too high", true); break; } } - hr = candidates->at(++candidate_idx); } - if (hr == nullptr) { - log_debug(gc, ergo, cset)("Old candidate collection set empty."); + if (iter == marking_list->end()) { + log_debug(gc, ergo, cset)("Marking candidates exhausted."); } if (num_expensive_regions > 0) { - log_debug(gc, ergo, cset)("Added %u initial old regions to collection set although the predicted time was too high.", + log_debug(gc, ergo, cset)("Added %u marking candidates to collection set although the predicted time was too high.", num_expensive_regions); } - log_debug(gc, ergo, cset)("Finish choosing collection set old regions. Initial: %u, optional: %u, " - "predicted initial time: %1.2fms, predicted optional time: %1.2fms, time remaining: %1.2f", - num_initial_regions, num_optional_regions, + log_debug(gc, ergo, cset)("Finish adding marking candidates to collection set. Initial: %u, optional: %u, " + "predicted initial time: %1.2fms, predicted optional time: %1.2fms, time remaining: %1.2fms", + num_initial_regions_selected, num_optional_regions_selected, predicted_initial_time_ms, predicted_optional_time_ms, time_remaining_ms); + + assert(initial_old_regions->length() == num_initial_regions_selected, "must be"); + assert(optional_old_regions->length() == num_optional_regions_selected, "must be"); + return time_remaining_ms; } -void G1Policy::calculate_optional_collection_set_regions(G1CollectionSetCandidates* candidates, - uint const max_optional_regions, +void G1Policy::calculate_optional_collection_set_regions(G1CollectionCandidateRegionList* optional_regions, double time_remaining_ms, - uint& num_optional_regions) { - assert(_g1h->collector_state()->in_mixed_phase(), "Should only be called in mixed phase"); + G1CollectionCandidateRegionList* selected_regions) { + assert(_collection_set->optional_region_length() > 0, + "Should only be called when there are optional regions"); - num_optional_regions = 0; double total_prediction_ms = 0.0; - uint candidate_idx = candidates->cur_idx(); - HeapRegion* r = candidates->at(candidate_idx); - while (num_optional_regions < max_optional_regions) { - assert(r != nullptr, "Region must exist"); + for (HeapRegion* r : *optional_regions) { double prediction_ms = predict_region_total_time_ms(r, false); if (prediction_ms > time_remaining_ms) { @@ -1521,12 +1514,12 @@ void G1Policy::calculate_optional_collection_set_regions(G1CollectionSetCandidat total_prediction_ms += prediction_ms; time_remaining_ms -= prediction_ms; - num_optional_regions++; - r = candidates->at(++candidate_idx); + + selected_regions->append(r); } log_debug(gc, ergo, cset)("Prepared %u regions out of %u for optional evacuation. Total predicted time: %.3fms", - num_optional_regions, max_optional_regions, total_prediction_ms); + selected_regions->length(), optional_regions->length(), total_prediction_ms); } void G1Policy::transfer_survivors_to_cset(const G1SurvivorRegions* survivors) { diff --git a/src/hotspot/share/gc/g1/g1Policy.hpp b/src/hotspot/share/gc/g1/g1Policy.hpp index 41ee9bfb9a870..238076bb1e678 100644 --- a/src/hotspot/share/gc/g1/g1Policy.hpp +++ b/src/hotspot/share/gc/g1/g1Policy.hpp @@ -46,8 +46,10 @@ class HeapRegion; class G1CollectionSet; +class G1CollectionCandidateList; class G1CollectionSetCandidates; class G1CollectionSetChooser; +class G1CollectionCandidateRegionList; class G1IHOPControl; class G1Analytics; class G1SurvivorRegions; @@ -181,6 +183,7 @@ class G1Policy: public CHeapObj { private: G1CollectionSet* _collection_set; + G1CollectionSetCandidates* candidates() const; double average_time_ms(G1GCPhaseTimes::GCParPhases phase) const; double other_time_ms(double pause_time_ms) const; @@ -265,13 +268,8 @@ class G1Policy: public CHeapObj { // during a mixed GC. uint calc_max_old_cset_length() const; - // Returns the given amount of reclaimable bytes (that represents - // the amount of reclaimable space still to be collected) as a - // percentage of the current heap capacity. - double reclaimable_bytes_percent(size_t reclaimable_bytes) const; - private: - void clear_collection_set_candidates(); + void abandon_collection_set_candidates(); // Sets up marking if proper conditions are met. void maybe_start_marking(); // Manage time-to-mixed tracking. @@ -340,20 +338,20 @@ class G1Policy: public CHeapObj { // Amount of allowed waste in bytes in the collection set. size_t allowed_waste_in_collection_set() const; - // Calculate and return the number of initial and optional old gen regions from - // the given collection set candidates and the remaining time. - void calculate_old_collection_set_regions(G1CollectionSetCandidates* candidates, - double time_remaining_ms, - uint& num_initial_regions, - uint& num_optional_regions); + // Calculate and fill in the initial and optional old gen candidate regions from + // the given candidate list and the remaining time. + // Returns the remaining time. + double select_candidates_from_marking(G1CollectionCandidateList* marking_list, + double time_remaining_ms, + G1CollectionCandidateRegionList* initial_old_regions, + G1CollectionCandidateRegionList* optional_old_regions); // Calculate the number of optional regions from the given collection set candidates, // the remaining time and the maximum number of these regions and return the number // of actually selected regions in num_optional_regions. - void calculate_optional_collection_set_regions(G1CollectionSetCandidates* candidates, - uint const max_optional_regions, + void calculate_optional_collection_set_regions(G1CollectionCandidateRegionList* optional_old_regions, double time_remaining_ms, - uint& num_optional_regions); + G1CollectionCandidateRegionList* selected); private: @@ -422,12 +420,12 @@ class G1Policy: public CHeapObj { // Fraction used when predicting how many optional regions to include in // the CSet. This fraction of the available time is used for optional regions, // the rest is used to add old regions to the normal CSet. - double optional_prediction_fraction() { return 0.2; } + double optional_prediction_fraction() const { return 0.2; } public: // Fraction used when evacuating the optional regions. This fraction of the // remaining time is used to choose what regions to include in the evacuation. - double optional_evacuation_fraction() { return 0.75; } + double optional_evacuation_fraction() const { return 0.75; } uint tenuring_threshold() const { return _tenuring_threshold; } diff --git a/src/hotspot/share/gc/g1/g1RemSet.cpp b/src/hotspot/share/gc/g1/g1RemSet.cpp index d300240b00d10..b29125037b7e0 100644 --- a/src/hotspot/share/gc/g1/g1RemSet.cpp +++ b/src/hotspot/share/gc/g1/g1RemSet.cpp @@ -178,7 +178,7 @@ class G1RemSetScanState : public CHeapObj { bool marked_as_dirty = Atomic::cmpxchg(&_contains[region], false, true) == false; if (marked_as_dirty) { - uint allocated = Atomic::fetch_and_add(&_cur_idx, 1u); + uint allocated = Atomic::fetch_then_add(&_cur_idx, 1u); _buffer[allocated] = region; } } @@ -243,7 +243,7 @@ class G1RemSetScanState : public CHeapObj { const uint num_regions_per_worker = num_cards_per_worker / (uint)HeapRegion::CardsPerRegion; while (_cur_dirty_regions < _regions->size()) { - uint next = Atomic::fetch_and_add(&_cur_dirty_regions, num_regions_per_worker); + uint next = Atomic::fetch_then_add(&_cur_dirty_regions, num_regions_per_worker); uint max = MIN2(next + num_regions_per_worker, _regions->size()); for (uint i = next; i < max; i++) { @@ -420,7 +420,7 @@ class G1RemSetScanState : public CHeapObj { uint claim_cards_to_scan(uint region, uint increment) { assert(region < _max_reserved_regions, "Tried to access invalid region %u", region); - return Atomic::fetch_and_add(&_card_table_scan_state[region], increment, memory_order_relaxed); + return Atomic::fetch_then_add(&_card_table_scan_state[region], increment, memory_order_relaxed); } void add_dirty_region(uint const region) { diff --git a/src/hotspot/share/gc/g1/g1RootClosures.cpp b/src/hotspot/share/gc/g1/g1RootClosures.cpp index 946b2ed2fdea6..0d3fac2c11efb 100644 --- a/src/hotspot/share/gc/g1/g1RootClosures.cpp +++ b/src/hotspot/share/gc/g1/g1RootClosures.cpp @@ -69,7 +69,9 @@ class G1ConcurrentStartMarkClosures : public G1EvacuationRootClosures { CodeBlobClosure* weak_codeblobs() { return &_weak._codeblobs; } }; -G1EvacuationRootClosures* G1EvacuationRootClosures::create_root_closures(G1ParScanThreadState* pss, G1CollectedHeap* g1h) { +G1EvacuationRootClosures* G1EvacuationRootClosures::create_root_closures(G1CollectedHeap* g1h, + G1ParScanThreadState* pss, + bool process_only_dirty_klasses) { G1EvacuationRootClosures* res = nullptr; if (g1h->collector_state()->in_concurrent_start_gc()) { if (ClassUnloadingWithConcurrentMark) { @@ -78,7 +80,7 @@ G1EvacuationRootClosures* G1EvacuationRootClosures::create_root_closures(G1ParSc res = new G1ConcurrentStartMarkClosures(g1h, pss); } } else { - res = new G1EvacuationClosures(g1h, pss, g1h->collector_state()->in_young_only_phase()); + res = new G1EvacuationClosures(g1h, pss, process_only_dirty_klasses); } return res; } diff --git a/src/hotspot/share/gc/g1/g1RootClosures.hpp b/src/hotspot/share/gc/g1/g1RootClosures.hpp index 9159e3a312d96..cd3e30ad62254 100644 --- a/src/hotspot/share/gc/g1/g1RootClosures.hpp +++ b/src/hotspot/share/gc/g1/g1RootClosures.hpp @@ -49,7 +49,9 @@ class G1EvacuationRootClosures : public G1RootClosures { // Applied to code blobs treated as weak roots. virtual CodeBlobClosure* weak_codeblobs() = 0; - static G1EvacuationRootClosures* create_root_closures(G1ParScanThreadState* pss, G1CollectedHeap* g1h); + static G1EvacuationRootClosures* create_root_closures(G1CollectedHeap* g1h, + G1ParScanThreadState* pss, + bool process_only_dirty_klasses); }; #endif // SHARE_GC_G1_G1ROOTCLOSURES_HPP diff --git a/src/hotspot/share/gc/g1/g1YoungCollector.cpp b/src/hotspot/share/gc/g1/g1YoungCollector.cpp index 607b5c5109256..aac7aced7b494 100644 --- a/src/hotspot/share/gc/g1/g1YoungCollector.cpp +++ b/src/hotspot/share/gc/g1/g1YoungCollector.cpp @@ -268,7 +268,7 @@ void G1YoungCollector::calculate_collection_set(G1EvacInfo* evacuation_info, dou collection_set()->finalize_initial_collection_set(target_pause_time_ms, survivor_regions()); evacuation_info->set_collection_set_regions(collection_set()->region_length() + - collection_set()->optional_region_length()); + collection_set()->optional_region_length()); concurrent_mark()->verify_no_collection_set_oops(); @@ -1014,7 +1014,7 @@ void G1YoungCollector::collect() { G1YoungGCJFRTracerMark jtm(gc_timer_stw(), gc_tracer_stw(), _gc_cause); // JStat/MXBeans G1YoungGCMonitoringScope ms(monitoring_support(), - collector_state()->in_mixed_phase() /* all_memory_pools_affected */); + !collection_set()->candidates()->is_empty() /* all_memory_pools_affected */); // Create the heap printer before internal pause timing to have // heap information printed as last part of detailed GC log. G1HeapPrinterMark hpm(_g1h); @@ -1043,8 +1043,7 @@ void G1YoungCollector::collect() { G1ParScanThreadStateSet per_thread_states(_g1h, workers()->active_workers(), - collection_set()->young_region_length(), - collection_set()->optional_region_length(), + collection_set(), &_evac_failure_regions); bool may_do_optional_evacuation = collection_set()->optional_region_length() != 0; diff --git a/src/hotspot/share/gc/g1/g1YoungGCPostEvacuateTasks.cpp b/src/hotspot/share/gc/g1/g1YoungGCPostEvacuateTasks.cpp index 21fc522317fad..6edf87bb99197 100644 --- a/src/hotspot/share/gc/g1/g1YoungGCPostEvacuateTasks.cpp +++ b/src/hotspot/share/gc/g1/g1YoungGCPostEvacuateTasks.cpp @@ -28,7 +28,7 @@ #include "gc/g1/g1CardSetMemory.hpp" #include "gc/g1/g1CardTableEntryClosure.hpp" #include "gc/g1/g1CollectedHeap.inline.hpp" -#include "gc/g1/g1CollectionSetCandidates.hpp" +#include "gc/g1/g1CollectionSetCandidates.inline.hpp" #include "gc/g1/g1ConcurrentMark.inline.hpp" #include "gc/g1/g1EvacStats.inline.hpp" #include "gc/g1/g1EvacInfo.hpp" @@ -81,21 +81,14 @@ class G1PostEvacuateCollectionSetCleanupTask1::SampleCollectionSetCandidatesTask } void do_work(uint worker_id) override { - - class G1SampleCollectionSetCandidatesClosure : public HeapRegionClosure { - public: - G1MonotonicArenaMemoryStats _total; - - bool do_heap_region(HeapRegion* r) override { - _total.add(r->rem_set()->card_set_memory_stats()); - return false; - } - } cl; - G1CollectedHeap* g1h = G1CollectedHeap::heap(); - g1h->collection_set()->candidates()->iterate(&cl); - g1h->set_collection_set_candidates_stats(cl._total); + G1MonotonicArenaMemoryStats _total; + G1CollectionSetCandidates* candidates = g1h->collection_set()->candidates(); + for (HeapRegion* r : *candidates) { + _total.add(r->rem_set()->card_set_memory_stats()); + } + g1h->set_collection_set_candidates_stats(_total); } }; @@ -357,7 +350,6 @@ class G1PostEvacuateCollectionSetCleanupTask2::ClearRetainedRegionBitmaps : publ }; public: - ClearRetainedRegionBitmaps(G1EvacFailureRegions* evac_failure_regions) : G1AbstractSubTask(G1GCPhaseTimes::ClearRetainedRegionBitmaps), _evac_failure_regions(evac_failure_regions), diff --git a/src/hotspot/share/gc/g1/g1YoungGCPostEvacuateTasks.hpp b/src/hotspot/share/gc/g1/g1YoungGCPostEvacuateTasks.hpp index c03550d1676ac..fdedf73b64a10 100644 --- a/src/hotspot/share/gc/g1/g1YoungGCPostEvacuateTasks.hpp +++ b/src/hotspot/share/gc/g1/g1YoungGCPostEvacuateTasks.hpp @@ -69,8 +69,8 @@ class G1PostEvacuateCollectionSetCleanupTask2 : public G1BatchedTask { class ClearRetainedRegionBitmaps; class RedirtyLoggedCardsTask; class RestorePreservedMarksTask; - class ResizeTLABsTask; class FreeCollectionSetTask; + class ResizeTLABsTask; public: G1PostEvacuateCollectionSetCleanupTask2(G1ParScanThreadStateSet* per_thread_states, diff --git a/src/hotspot/share/gc/g1/heapRegion.cpp b/src/hotspot/share/gc/g1/heapRegion.cpp index 0c2f05f14d9eb..b3e64c064e007 100644 --- a/src/hotspot/share/gc/g1/heapRegion.cpp +++ b/src/hotspot/share/gc/g1/heapRegion.cpp @@ -28,6 +28,7 @@ #include "gc/g1/g1BlockOffsetTable.inline.hpp" #include "gc/g1/g1CollectedHeap.inline.hpp" #include "gc/g1/g1CollectionSet.hpp" +#include "gc/g1/g1CollectionSetCandidates.inline.hpp" #include "gc/g1/g1HeapRegionTraceType.hpp" #include "gc/g1/g1NUMA.hpp" #include "gc/g1/g1OopClosures.inline.hpp" @@ -127,8 +128,6 @@ void HeapRegion::hr_clear(bool clear_space) { init_top_at_mark_start(); if (clear_space) clear(SpaceDecorator::Mangle); - - _gc_efficiency = -1.0; } void HeapRegion::clear_cardtable() { @@ -136,7 +135,7 @@ void HeapRegion::clear_cardtable() { ct->clear_MemRegion(MemRegion(bottom(), end())); } -void HeapRegion::calc_gc_efficiency() { +double HeapRegion::calc_gc_efficiency() { // GC efficiency is the ratio of how much space would be // reclaimed over how long we predict it would take to reclaim it. G1Policy* policy = G1CollectedHeap::heap()->policy(); @@ -145,7 +144,7 @@ void HeapRegion::calc_gc_efficiency() { // a mixed gc because the region will only be evacuated during a // mixed gc. double region_elapsed_time_ms = policy->predict_region_total_time_ms(this, false /* for_young_only_phase */); - _gc_efficiency = (double) reclaimable_bytes() / region_elapsed_time_ms; + return (double)reclaimable_bytes() / region_elapsed_time_ms; } void HeapRegion::set_free() { @@ -233,7 +232,8 @@ HeapRegion::HeapRegion(uint hrm_index, _parsable_bottom(nullptr), _garbage_bytes(0), _young_index_in_cset(-1), - _surv_rate_group(nullptr), _age_index(G1SurvRateGroup::InvalidAgeIndex), _gc_efficiency(-1.0), + _surv_rate_group(nullptr), + _age_index(G1SurvRateGroup::InvalidAgeIndex), _node_index(G1NUMA::UnknownNodeIndex) { assert(Universe::on_page_boundary(mr.start()) && Universe::on_page_boundary(mr.end()), @@ -263,7 +263,7 @@ void HeapRegion::report_region_type_change(G1HeapRegionTraceType::Type to) { used()); } -void HeapRegion::note_evacuation_failure(bool during_concurrent_start) { + void HeapRegion::note_evacuation_failure(bool during_concurrent_start) { // PB must be bottom - we only evacuate old gen regions after scrubbing, and // young gen regions never have their PB set to anything other than bottom. assert(parsable_bottom_acquire() == bottom(), "must be"); @@ -429,6 +429,9 @@ void HeapRegion::print_on(outputStream* st) const { st->print("|%2s", get_short_type_str()); if (in_collection_set()) { st->print("|CS"); + } else if (is_collection_set_candidate()) { + G1CollectionSetCandidates* candidates = G1CollectedHeap::heap()->collection_set()->candidates(); + st->print("|%s", candidates->get_short_type_str(this)); } else { st->print("| "); } diff --git a/src/hotspot/share/gc/g1/heapRegion.hpp b/src/hotspot/share/gc/g1/heapRegion.hpp index 218fd4d1ac588..05a3299889b4b 100644 --- a/src/hotspot/share/gc/g1/heapRegion.hpp +++ b/src/hotspot/share/gc/g1/heapRegion.hpp @@ -250,11 +250,7 @@ class HeapRegion : public CHeapObj { G1SurvRateGroup* _surv_rate_group; int _age_index; - // Cached attributes used in the collection set policy information - - // The calculated GC efficiency of the region. - double _gc_efficiency; - + // NUMA node. uint _node_index; void report_region_type_change(G1HeapRegionTraceType::Type to); @@ -341,10 +337,12 @@ class HeapRegion : public CHeapObj { // since it will also be reclaimed if we collect the region. size_t reclaimable_bytes() { size_t known_live_bytes = live_bytes(); - assert(known_live_bytes <= capacity(), "sanity"); + assert(known_live_bytes <= capacity(), "sanity %u %zu %zu %zu", hrm_index(), known_live_bytes, used(), garbage_bytes()); return capacity() - known_live_bytes; } + inline bool is_collection_set_candidate() const; + // Get the start of the unmarked area in this region. HeapWord* top_at_mark_start() const; void set_top_at_mark_start(HeapWord* value); @@ -378,7 +376,7 @@ class HeapRegion : public CHeapObj { // This set only includes old regions - humongous regions only // contain a single object which is either dead or live, and young regions are never even // considered during concurrent scrub. - bool needs_scrubbing() const { return is_old(); } + bool needs_scrubbing() const; // Same question as above, during full gc. Full gc needs to scrub any region that // might be skipped for compaction. This includes young generation regions as the // region relabeling to old happens later than scrubbing. @@ -440,6 +438,8 @@ class HeapRegion : public CHeapObj { inline bool in_collection_set() const; + inline const char* collection_set_candidate_short_type_str() const; + void prepare_remset_for_scan(); // Methods used by the HeapRegionSetBase class and subclasses. @@ -501,8 +501,7 @@ class HeapRegion : public CHeapObj { void set_index_in_opt_cset(uint index) { _index_in_opt_cset = index; } void clear_index_in_opt_cset() { _index_in_opt_cset = InvalidCSetIndex; } - void calc_gc_efficiency(void); - double gc_efficiency() const { return _gc_efficiency;} + double calc_gc_efficiency(); uint young_index_in_cset() const { return _young_index_in_cset; } void clear_young_index_in_cset() { _young_index_in_cset = 0; } diff --git a/src/hotspot/share/gc/g1/heapRegion.inline.hpp b/src/hotspot/share/gc/g1/heapRegion.inline.hpp index a3ea47c83a826..5109c6a1e582c 100644 --- a/src/hotspot/share/gc/g1/heapRegion.inline.hpp +++ b/src/hotspot/share/gc/g1/heapRegion.inline.hpp @@ -162,6 +162,10 @@ inline HeapWord* HeapRegion::next_live_in_unparsable(const HeapWord* p, HeapWord return next_live_in_unparsable(bitmap, p, limit); } +inline bool HeapRegion::is_collection_set_candidate() const { + return G1CollectedHeap::heap()->is_collection_set_candidate(this); +} + inline size_t HeapRegion::block_size(const HeapWord* p) const { return block_size(p, parsable_bottom()); } @@ -290,14 +294,18 @@ inline void HeapRegion::reset_parsable_bottom() { } inline void HeapRegion::note_start_of_marking() { - set_top_at_mark_start(top()); - _gc_efficiency = -1.0; + assert(top_at_mark_start() == bottom(), "CA region's TAMS must always be at bottom"); + if (is_old_or_humongous()) { + set_top_at_mark_start(top()); + } } inline void HeapRegion::note_end_of_marking(size_t marked_bytes) { assert_at_safepoint(); - _garbage_bytes = byte_size(bottom(), top_at_mark_start()) - marked_bytes; + if (top_at_mark_start() != bottom()) { + _garbage_bytes = byte_size(bottom(), top_at_mark_start()) - marked_bytes; + } if (needs_scrubbing()) { _parsable_bottom = top_at_mark_start(); @@ -325,6 +333,10 @@ inline void HeapRegion::reset_top_at_mark_start() { set_top_at_mark_start(bottom()); } +inline bool HeapRegion::needs_scrubbing() const { + return is_old(); +} + inline bool HeapRegion::in_collection_set() const { return G1CollectedHeap::heap()->is_in_cset(this); } diff --git a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp index 39bbccf77852b..45e734231a55d 100644 --- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp +++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp @@ -590,7 +590,7 @@ class HeapBlockClaimer : public StackObj { // Claim the block and get the block index. size_t claim_and_get_block() { size_t block_index; - block_index = Atomic::fetch_and_add(&_claimed_index, 1u); + block_index = Atomic::fetch_then_add(&_claimed_index, 1u); PSOldGen* old_gen = ParallelScavengeHeap::heap()->old_gen(); size_t num_claims = old_gen->num_iterable_blocks() + NumNonOldGenClaims; diff --git a/src/hotspot/share/gc/parallel/psParallelCompact.cpp b/src/hotspot/share/gc/parallel/psParallelCompact.cpp index 3a882fb9b833b..64b2c0475f44f 100644 --- a/src/hotspot/share/gc/parallel/psParallelCompact.cpp +++ b/src/hotspot/share/gc/parallel/psParallelCompact.cpp @@ -2069,7 +2069,7 @@ void PSParallelCompact::marking_phase(ParallelOldTracer *gc_tracer) { { GCTraceTime(Debug, gc, phases) tm("Report Object Count", &_gc_timer); - _gc_tracer.report_object_count_after_gc(is_alive_closure()); + _gc_tracer.report_object_count_after_gc(is_alive_closure(), &ParallelScavengeHeap::heap()->workers()); } #if TASKQUEUE_STATS ParCompactionManager::oop_task_queues()->print_and_reset_taskqueue_stats("Oop Queue"); @@ -2238,7 +2238,7 @@ class TaskQueue : StackObj { } bool try_claim(PSParallelCompact::UpdateDensePrefixTask& reference) { - uint claimed = Atomic::fetch_and_add(&_counter, 1u); + uint claimed = Atomic::fetch_then_add(&_counter, 1u); if (claimed < _insert_index) { reference = _backing_array[claimed]; return true; diff --git a/src/hotspot/share/gc/serial/genMarkSweep.cpp b/src/hotspot/share/gc/serial/genMarkSweep.cpp index 4f0f9575c4c4b..44409ac69d8a8 100644 --- a/src/hotspot/share/gc/serial/genMarkSweep.cpp +++ b/src/hotspot/share/gc/serial/genMarkSweep.cpp @@ -217,7 +217,7 @@ void GenMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) { { GCTraceTime(Debug, gc, phases) tm_m("Report Object Count", gc_timer()); - gc_tracer()->report_object_count_after_gc(&is_alive); + gc_tracer()->report_object_count_after_gc(&is_alive, nullptr); } } diff --git a/src/hotspot/share/gc/shared/barrierSetConfig.hpp b/src/hotspot/share/gc/shared/barrierSetConfig.hpp index e20e9ef6f7cbd..76681aa898687 100644 --- a/src/hotspot/share/gc/shared/barrierSetConfig.hpp +++ b/src/hotspot/share/gc/shared/barrierSetConfig.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -33,6 +33,7 @@ EPSILONGC_ONLY(f(EpsilonBarrierSet)) \ G1GC_ONLY(f(G1BarrierSet)) \ SHENANDOAHGC_ONLY(f(ShenandoahBarrierSet)) \ + ZGC_ONLY(f(XBarrierSet)) \ ZGC_ONLY(f(ZBarrierSet)) #define FOR_EACH_ABSTRACT_BARRIER_SET_DO(f) \ diff --git a/src/hotspot/share/gc/shared/barrierSetConfig.inline.hpp b/src/hotspot/share/gc/shared/barrierSetConfig.inline.hpp index 27f36f1fee40d..9523428821b7e 100644 --- a/src/hotspot/share/gc/shared/barrierSetConfig.inline.hpp +++ b/src/hotspot/share/gc/shared/barrierSetConfig.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -40,6 +40,7 @@ #include "gc/shenandoah/shenandoahBarrierSet.inline.hpp" #endif #if INCLUDE_ZGC +#include "gc/x/xBarrierSet.inline.hpp" #include "gc/z/zBarrierSet.inline.hpp" #endif diff --git a/src/hotspot/share/gc/shared/barrierSetNMethod.cpp b/src/hotspot/share/gc/shared/barrierSetNMethod.cpp index 793ae9fc6e946..274744d5de256 100644 --- a/src/hotspot/share/gc/shared/barrierSetNMethod.cpp +++ b/src/hotspot/share/gc/shared/barrierSetNMethod.cpp @@ -39,6 +39,9 @@ #include "runtime/threadWXSetters.inline.hpp" #include "runtime/threads.hpp" #include "utilities/debug.hpp" +#if INCLUDE_JVMCI +#include "jvmci/jvmciRuntime.hpp" +#endif int BarrierSetNMethod::disarmed_guard_value() const { return *disarmed_guard_value_address(); @@ -62,11 +65,17 @@ bool BarrierSetNMethod::supports_entry_barrier(nmethod* nm) { return false; } - if (!nm->is_native_method() && !nm->is_compiled_by_c2() && !nm->is_compiled_by_c1()) { - return false; + if (nm->is_native_method() || nm->is_compiled_by_c2() || nm->is_compiled_by_c1()) { + return true; } - return true; +#if INCLUDE_JVMCI + if (nm->is_compiled_by_jvmci() && nm->jvmci_nmethod_data()->has_entry_barrier()) { + return true; + } +#endif + + return false; } void BarrierSetNMethod::disarm(nmethod* nm) { diff --git a/src/hotspot/share/gc/shared/barrierSetNMethod.hpp b/src/hotspot/share/gc/shared/barrierSetNMethod.hpp index 7629c11949a7c..d003abe9bbe0d 100644 --- a/src/hotspot/share/gc/shared/barrierSetNMethod.hpp +++ b/src/hotspot/share/gc/shared/barrierSetNMethod.hpp @@ -26,6 +26,7 @@ #define SHARE_GC_SHARED_BARRIERSETNMETHOD_HPP #include "memory/allocation.hpp" +#include "utilities/formatBuffer.hpp" #include "utilities/globalDefinitions.hpp" #include "utilities/sizes.hpp" @@ -55,6 +56,10 @@ class BarrierSetNMethod: public CHeapObj { void set_guard_value(nmethod* nm, int value); void arm_all_nmethods(); + +#if INCLUDE_JVMCI + bool verify_barrier(nmethod* nm, FormatBuffer<>& msg); +#endif }; diff --git a/src/hotspot/share/gc/shared/collectedHeap.hpp b/src/hotspot/share/gc/shared/collectedHeap.hpp index e427e6de6689b..073db543495d3 100644 --- a/src/hotspot/share/gc/shared/collectedHeap.hpp +++ b/src/hotspot/share/gc/shared/collectedHeap.hpp @@ -95,6 +95,7 @@ class CollectedHeap : public CHeapObj { friend class VMStructs; friend class JVMCIVMStructs; friend class IsGCActiveMark; // Block structured external access to _is_gc_active + friend class DisableIsGCActiveMark; // Disable current IsGCActiveMark friend class MemAllocator; friend class ParallelObjectIterator; diff --git a/src/hotspot/share/gc/shared/gcConfig.cpp b/src/hotspot/share/gc/shared/gcConfig.cpp index 8eb265b54d939..506b368d6cf05 100644 --- a/src/hotspot/share/gc/shared/gcConfig.cpp +++ b/src/hotspot/share/gc/shared/gcConfig.cpp @@ -44,7 +44,7 @@ #include "gc/shenandoah/shenandoahArguments.hpp" #endif #if INCLUDE_ZGC -#include "gc/z/zArguments.hpp" +#include "gc/z/shared/zSharedArguments.hpp" #endif struct IncludedGC { @@ -62,7 +62,7 @@ struct IncludedGC { PARALLELGC_ONLY(static ParallelArguments parallelArguments;) SERIALGC_ONLY(static SerialArguments serialArguments;) SHENANDOAHGC_ONLY(static ShenandoahArguments shenandoahArguments;) - ZGC_ONLY(static ZArguments zArguments;) + ZGC_ONLY(static ZSharedArguments zArguments;) // Table of included GCs, for translating between command // line flag, CollectedHeap::Name and GCArguments instance. diff --git a/src/hotspot/share/gc/shared/gcConfiguration.cpp b/src/hotspot/share/gc/shared/gcConfiguration.cpp index 3b6e87fdde06d..2e8d3eb2a515a 100644 --- a/src/hotspot/share/gc/shared/gcConfiguration.cpp +++ b/src/hotspot/share/gc/shared/gcConfiguration.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -23,6 +23,7 @@ */ #include "precompiled.hpp" #include "gc/shared/collectedHeap.hpp" +#include "gc/shared/gc_globals.hpp" #include "gc/shared/gcArguments.hpp" #include "gc/shared/gcConfiguration.hpp" #include "gc/shared/tlab_globals.hpp" @@ -41,7 +42,15 @@ GCName GCConfiguration::young_collector() const { return ParallelScavenge; } - if (UseZGC || UseShenandoahGC) { + if (UseZGC) { + if (ZGenerational) { + return ZMinor; + } else { + return NA; + } + } + + if (UseShenandoahGC) { return NA; } @@ -58,7 +67,11 @@ GCName GCConfiguration::old_collector() const { } if (UseZGC) { - return Z; + if (ZGenerational) { + return ZMajor; + } else { + return Z; + } } if (UseShenandoahGC) { diff --git a/src/hotspot/share/gc/shared/gcId.cpp b/src/hotspot/share/gc/shared/gcId.cpp index 97f13a1304db5..646de8339104c 100644 --- a/src/hotspot/share/gc/shared/gcId.cpp +++ b/src/hotspot/share/gc/shared/gcId.cpp @@ -30,6 +30,18 @@ #include "runtime/safepoint.hpp" uint GCId::_next_id = 0; +GCIdPrinter GCId::_default_printer; +GCIdPrinter* GCId::_printer = &_default_printer; + +size_t GCIdPrinter::print_gc_id(uint gc_id, char* buf, size_t len) { + int ret = jio_snprintf(buf, len, "GC(%u) ", gc_id); + assert(ret > 0, "Failed to print prefix. Log buffer too small?"); + return (size_t)ret; +} + +void GCId::set_printer(GCIdPrinter* printer) { + _printer = printer; +} NamedThread* currentNamedthread() { assert(Thread::current()->is_Named_thread(), "This thread must be NamedThread"); @@ -59,24 +71,20 @@ size_t GCId::print_prefix(char* buf, size_t len) { if (thread != nullptr) { uint gc_id = current_or_undefined(); if (gc_id != undefined()) { - int ret = jio_snprintf(buf, len, "GC(%u) ", gc_id); - assert(ret > 0, "Failed to print prefix. Log buffer too small?"); - return (size_t)ret; + return _printer->print_gc_id(gc_id, buf, len); } } return 0; } -GCIdMark::GCIdMark() { - assert(currentNamedthread()->gc_id() == GCId::undefined(), "nested"); +GCIdMark::GCIdMark() : _previous_gc_id(currentNamedthread()->gc_id()) { currentNamedthread()->set_gc_id(GCId::create()); } -GCIdMark::GCIdMark(uint gc_id) { - assert(currentNamedthread()->gc_id() == GCId::undefined(), "nested"); +GCIdMark::GCIdMark(uint gc_id) : _previous_gc_id(currentNamedthread()->gc_id()) { currentNamedthread()->set_gc_id(gc_id); } GCIdMark::~GCIdMark() { - currentNamedthread()->set_gc_id(GCId::undefined()); + currentNamedthread()->set_gc_id(_previous_gc_id); } diff --git a/src/hotspot/share/gc/shared/gcId.hpp b/src/hotspot/share/gc/shared/gcId.hpp index 1e25d9dc44629..d36929b03dc97 100644 --- a/src/hotspot/share/gc/shared/gcId.hpp +++ b/src/hotspot/share/gc/shared/gcId.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,6 +27,12 @@ #include "memory/allocation.hpp" +class GCIdPrinter : public CHeapObj { +public: + virtual ~GCIdPrinter() {} + virtual size_t print_gc_id(uint gc_id, char* buf, size_t len); +}; + class GCId : public AllStatic { private: friend class GCIdMark; @@ -35,6 +41,10 @@ class GCId : public AllStatic { static const uint UNDEFINED = UINT_MAX; static uint create(); + // Default printer used unless a custom printer is set + static GCIdPrinter _default_printer; + static GCIdPrinter* _printer; + public: // Returns the currently active GC id. Asserts that there is an active GC id. static uint current(); @@ -44,9 +54,14 @@ class GCId : public AllStatic { static uint peek(); static uint undefined() { return UNDEFINED; } static size_t print_prefix(char* buf, size_t len); + // Set a custom GCId printer + static void set_printer(GCIdPrinter* printer); }; class GCIdMark : public StackObj { +private: + const uint _previous_gc_id; + public: GCIdMark(); GCIdMark(uint gc_id); diff --git a/src/hotspot/share/gc/shared/gcName.hpp b/src/hotspot/share/gc/shared/gcName.hpp index ca40c642f3efd..3d2dd350ac10e 100644 --- a/src/hotspot/share/gc/shared/gcName.hpp +++ b/src/hotspot/share/gc/shared/gcName.hpp @@ -35,7 +35,9 @@ enum GCName { G1New, G1Old, G1Full, - Z, + ZMinor, + ZMajor, + Z, // Support for the legacy, single-gen mode Shenandoah, NA, GCNameEndSentinel @@ -52,6 +54,8 @@ class GCNameHelper { case G1New: return "G1New"; case G1Old: return "G1Old"; case G1Full: return "G1Full"; + case ZMinor: return "ZGC Minor"; + case ZMajor: return "ZGC Major"; case Z: return "Z"; case Shenandoah: return "Shenandoah"; case NA: return "N/A"; diff --git a/src/hotspot/share/gc/shared/gcThreadLocalData.hpp b/src/hotspot/share/gc/shared/gcThreadLocalData.hpp index 404e1520b6ba9..ba0e6d8fb1a01 100644 --- a/src/hotspot/share/gc/shared/gcThreadLocalData.hpp +++ b/src/hotspot/share/gc/shared/gcThreadLocalData.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -40,6 +40,6 @@ // should consider placing frequently accessed fields first in // T, so that field offsets relative to Thread are small, which // often allows for a more compact instruction encoding. -typedef uint64_t GCThreadLocalData[19]; // 152 bytes +typedef uint64_t GCThreadLocalData[43]; // 344 bytes #endif // SHARE_GC_SHARED_GCTHREADLOCALDATA_HPP diff --git a/src/hotspot/share/gc/shared/gcTrace.cpp b/src/hotspot/share/gc/shared/gcTrace.cpp index 3c8a69b391504..1c84c3ca88bdc 100644 --- a/src/hotspot/share/gc/shared/gcTrace.cpp +++ b/src/hotspot/share/gc/shared/gcTrace.cpp @@ -100,7 +100,7 @@ class ObjectCountEventSenderClosure : public KlassInfoClosure { } }; -void GCTracer::report_object_count_after_gc(BoolObjectClosure* is_alive_cl) { +void GCTracer::report_object_count_after_gc(BoolObjectClosure* is_alive_cl, WorkerThreads* workers) { assert(is_alive_cl != nullptr, "Must supply function to check liveness"); if (ObjectCountEventSender::should_send_event()) { @@ -109,7 +109,7 @@ void GCTracer::report_object_count_after_gc(BoolObjectClosure* is_alive_cl) { KlassInfoTable cit(false); if (!cit.allocation_failed()) { HeapInspection hi; - hi.populate_table(&cit, is_alive_cl); + hi.populate_table(&cit, is_alive_cl, workers); ObjectCountEventSenderClosure event_sender(cit.size_of_instances_in_words(), Ticks::now()); cit.iterate(&event_sender); } diff --git a/src/hotspot/share/gc/shared/gcTrace.hpp b/src/hotspot/share/gc/shared/gcTrace.hpp index 4cdeacb88063b..9c747e139df1d 100644 --- a/src/hotspot/share/gc/shared/gcTrace.hpp +++ b/src/hotspot/share/gc/shared/gcTrace.hpp @@ -30,6 +30,7 @@ #include "gc/shared/gcId.hpp" #include "gc/shared/gcName.hpp" #include "gc/shared/gcWhen.hpp" +#include "gc/shared/workerThread.hpp" #include "memory/metaspace.hpp" #include "memory/referenceType.hpp" #include "utilities/macros.hpp" @@ -102,7 +103,7 @@ class GCTracer { void report_gc_heap_summary(GCWhen::Type when, const GCHeapSummary& heap_summary) const; void report_metaspace_summary(GCWhen::Type when, const MetaspaceSummary& metaspace_summary) const; void report_gc_reference_stats(const ReferenceProcessorStats& rp) const; - void report_object_count_after_gc(BoolObjectClosure* object_filter) NOT_SERVICES_RETURN; + void report_object_count_after_gc(BoolObjectClosure* object_filter, WorkerThreads* workers) NOT_SERVICES_RETURN; void report_cpu_time_event(double user_time, double system_time, double real_time) const; protected: diff --git a/src/hotspot/share/gc/shared/gcTraceSend.cpp b/src/hotspot/share/gc/shared/gcTraceSend.cpp index d7117ffbb40c3..31ec2871cd05e 100644 --- a/src/hotspot/share/gc/shared/gcTraceSend.cpp +++ b/src/hotspot/share/gc/shared/gcTraceSend.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -317,11 +317,12 @@ class PhaseSender : public PhaseVisitor { } void visit_concurrent(GCPhase* phase) { - assert(phase->level() < 2, "There is only two levels for ConcurrentPhase"); + assert(phase->level() < 3, "There are only three levels for ConcurrentPhase"); switch (phase->level()) { case 0: send_phase(phase); break; case 1: send_phase(phase); break; + case 2: send_phase(phase); break; default: /* Ignore sending this phase */ break; } } diff --git a/src/hotspot/share/gc/shared/gcVMOperations.cpp b/src/hotspot/share/gc/shared/gcVMOperations.cpp index e39e590283a3b..ceb4d2cc32223 100644 --- a/src/hotspot/share/gc/shared/gcVMOperations.cpp +++ b/src/hotspot/share/gc/shared/gcVMOperations.cpp @@ -135,6 +135,17 @@ void VM_GC_Operation::doit_epilogue() { VM_GC_Sync_Operation::doit_epilogue(); } +bool VM_GC_HeapInspection::doit_prologue() { + if (_full_gc && UseZGC) { + // ZGC cannot perform a synchronous GC cycle from within the VM thread. + // So VM_GC_HeapInspection::collect() is a noop. To respect the _full_gc + // flag a synchronous GC cycle is performed from the caller thread in the + // prologue. + Universe::heap()->collect(GCCause::_heap_inspection); + } + return VM_GC_Operation::doit_prologue(); +} + bool VM_GC_HeapInspection::skip_operation() const { return false; } @@ -167,7 +178,16 @@ void VM_GC_HeapInspection::doit() { } } HeapInspection inspect; - inspect.heap_inspection(_out, _parallel_thread_num); + WorkerThreads* workers = Universe::heap()->safepoint_workers(); + if (workers != nullptr) { + // The GC provided a WorkerThreads to be used during a safepoint. + // Can't run with more threads than provided by the WorkerThreads. + const uint capped_parallel_thread_num = MIN2(_parallel_thread_num, workers->max_workers()); + WithActiveWorkers with_active_workers(workers, capped_parallel_thread_num); + inspect.heap_inspection(_out, workers); + } else { + inspect.heap_inspection(_out, nullptr); + } } diff --git a/src/hotspot/share/gc/shared/gcVMOperations.hpp b/src/hotspot/share/gc/shared/gcVMOperations.hpp index 58aa76cb4ba96..378ba2903679a 100644 --- a/src/hotspot/share/gc/shared/gcVMOperations.hpp +++ b/src/hotspot/share/gc/shared/gcVMOperations.hpp @@ -171,6 +171,7 @@ class VM_GC_HeapInspection: public VM_GC_Operation { ~VM_GC_HeapInspection() {} virtual VMOp_Type type() const { return VMOp_GC_HeapInspection; } virtual bool skip_operation() const; + virtual bool doit_prologue(); virtual void doit(); protected: bool collect(); diff --git a/src/hotspot/share/gc/shared/gc_globals.hpp b/src/hotspot/share/gc/shared/gc_globals.hpp index 2365b27b3dc79..90c009f73886d 100644 --- a/src/hotspot/share/gc/shared/gc_globals.hpp +++ b/src/hotspot/share/gc/shared/gc_globals.hpp @@ -43,7 +43,7 @@ #include "gc/shenandoah/shenandoah_globals.hpp" #endif #if INCLUDE_ZGC -#include "gc/z/z_globals.hpp" +#include "gc/z/shared/z_shared_globals.hpp" #endif #define GC_FLAGS(develop, \ @@ -99,7 +99,7 @@ range, \ constraint)) \ \ - ZGC_ONLY(GC_Z_FLAGS( \ + ZGC_ONLY(GC_Z_SHARED_FLAGS( \ develop, \ develop_pd, \ product, \ @@ -125,6 +125,9 @@ product(bool, UseZGC, false, \ "Use the Z garbage collector") \ \ + product(bool, ZGenerational, false, \ + "Use the generational version of ZGC") \ + \ product(bool, UseShenandoahGC, false, \ "Use the Shenandoah garbage collector") \ \ diff --git a/src/hotspot/share/gc/shared/isGCActiveMark.cpp b/src/hotspot/share/gc/shared/isGCActiveMark.cpp index 5d5627b164162..c797ff43e5aeb 100644 --- a/src/hotspot/share/gc/shared/isGCActiveMark.cpp +++ b/src/hotspot/share/gc/shared/isGCActiveMark.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -42,3 +42,15 @@ IsGCActiveMark::~IsGCActiveMark() { assert(heap->is_gc_active(), "Sanity"); heap->_is_gc_active = false; } + +DisableIsGCActiveMark::DisableIsGCActiveMark() { + CollectedHeap* heap = Universe::heap(); + assert(heap->is_gc_active(), "Not reentrant"); + heap->_is_gc_active = false; +} + +DisableIsGCActiveMark::~DisableIsGCActiveMark() { + CollectedHeap* heap = Universe::heap(); + assert(!heap->is_gc_active(), "Sanity"); + heap->_is_gc_active = true; +} diff --git a/src/hotspot/share/gc/shared/isGCActiveMark.hpp b/src/hotspot/share/gc/shared/isGCActiveMark.hpp index 8b8ce7b4aeea6..c6d95d8a6340d 100644 --- a/src/hotspot/share/gc/shared/isGCActiveMark.hpp +++ b/src/hotspot/share/gc/shared/isGCActiveMark.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -36,4 +36,10 @@ class IsGCActiveMark : public StackObj { ~IsGCActiveMark(); }; +class DisableIsGCActiveMark : public StackObj { + public: + DisableIsGCActiveMark(); + ~DisableIsGCActiveMark(); +}; + #endif // SHARE_GC_SHARED_ISGCACTIVEMARK_HPP diff --git a/src/hotspot/share/gc/shared/partialArrayTaskStepper.inline.hpp b/src/hotspot/share/gc/shared/partialArrayTaskStepper.inline.hpp index 3dd3456bd5f0f..aa9a02c49025a 100644 --- a/src/hotspot/share/gc/shared/partialArrayTaskStepper.inline.hpp +++ b/src/hotspot/share/gc/shared/partialArrayTaskStepper.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -71,9 +71,9 @@ PartialArrayTaskStepper::next_impl(int length, // Because we limit the number of enqueued tasks to being no more than the // number of remaining chunks to process, we can use an atomic add for the // claim, rather than a CAS loop. - int start = Atomic::fetch_and_add(to_length_addr, - chunk_size, - memory_order_relaxed); + int start = Atomic::fetch_then_add(to_length_addr, + chunk_size, + memory_order_relaxed); assert(start < length, "invariant: start %d, length %d", start, length); assert(((length - start) % chunk_size) == 0, diff --git a/src/hotspot/share/gc/shared/suspendibleThreadSet.cpp b/src/hotspot/share/gc/shared/suspendibleThreadSet.cpp index 163da887bb543..a0d83ac298da6 100644 --- a/src/hotspot/share/gc/shared/suspendibleThreadSet.cpp +++ b/src/hotspot/share/gc/shared/suspendibleThreadSet.cpp @@ -91,7 +91,6 @@ void SuspendibleThreadSet::yield_slow() { } void SuspendibleThreadSet::synchronize() { - assert(Thread::current()->is_VM_thread(), "Must be the VM thread"); if (ConcGCYieldTimeout > 0) { _suspend_all_start = os::elapsedTime(); } @@ -126,7 +125,6 @@ void SuspendibleThreadSet::synchronize() { } void SuspendibleThreadSet::desynchronize() { - assert(Thread::current()->is_VM_thread(), "Must be the VM thread"); MonitorLocker ml(STS_lock, Mutex::_no_safepoint_check_flag); assert(should_yield(), "STS not synchronizing"); assert(is_synchronized(), "STS not synchronized"); diff --git a/src/hotspot/share/gc/shared/vmStructs_gc.hpp b/src/hotspot/share/gc/shared/vmStructs_gc.hpp index 2db0f466d34b6..acd153cd45d2f 100644 --- a/src/hotspot/share/gc/shared/vmStructs_gc.hpp +++ b/src/hotspot/share/gc/shared/vmStructs_gc.hpp @@ -50,7 +50,7 @@ #include "gc/shenandoah/vmStructs_shenandoah.hpp" #endif #if INCLUDE_ZGC -#include "gc/z/vmStructs_z.hpp" +#include "gc/z/shared/vmStructs_z_shared.hpp" #endif #define VM_STRUCTS_GC(nonstatic_field, \ @@ -72,9 +72,9 @@ SHENANDOAHGC_ONLY(VM_STRUCTS_SHENANDOAH(nonstatic_field, \ volatile_nonstatic_field, \ static_field)) \ - ZGC_ONLY(VM_STRUCTS_ZGC(nonstatic_field, \ - volatile_nonstatic_field, \ - static_field)) \ + ZGC_ONLY(VM_STRUCTS_Z_SHARED(nonstatic_field, \ + volatile_nonstatic_field, \ + static_field)) \ \ /**********************************************************************************/ \ /* Generation and Space hierarchies */ \ @@ -146,9 +146,9 @@ SHENANDOAHGC_ONLY(VM_TYPES_SHENANDOAH(declare_type, \ declare_toplevel_type, \ declare_integer_type)) \ - ZGC_ONLY(VM_TYPES_ZGC(declare_type, \ - declare_toplevel_type, \ - declare_integer_type)) \ + ZGC_ONLY(VM_TYPES_Z_SHARED(declare_type, \ + declare_toplevel_type, \ + declare_integer_type)) \ \ /******************************************/ \ /* Generation and space hierarchies */ \ @@ -210,8 +210,8 @@ declare_constant_with_value)) \ SHENANDOAHGC_ONLY(VM_INT_CONSTANTS_SHENANDOAH(declare_constant, \ declare_constant_with_value)) \ - ZGC_ONLY(VM_INT_CONSTANTS_ZGC(declare_constant, \ - declare_constant_with_value)) \ + ZGC_ONLY(VM_INT_CONSTANTS_Z_SHARED(declare_constant, \ + declare_constant_with_value)) \ \ /********************************************/ \ /* Generation and Space Hierarchy Constants */ \ @@ -243,6 +243,6 @@ declare_constant(Generation::GenGrain) \ #define VM_LONG_CONSTANTS_GC(declare_constant) \ - ZGC_ONLY(VM_LONG_CONSTANTS_ZGC(declare_constant)) + ZGC_ONLY(VM_LONG_CONSTANTS_Z_SHARED(declare_constant)) #endif // SHARE_GC_SHARED_VMSTRUCTS_GC_HPP diff --git a/src/hotspot/share/gc/shared/workerThread.cpp b/src/hotspot/share/gc/shared/workerThread.cpp index 14f9c349d63cc..b64c5050a2230 100644 --- a/src/hotspot/share/gc/shared/workerThread.cpp +++ b/src/hotspot/share/gc/shared/workerThread.cpp @@ -61,7 +61,7 @@ void WorkerTaskDispatcher::worker_run_task() { _start_semaphore.wait(); // Get and set worker id. - const uint worker_id = Atomic::fetch_and_add(&_started, 1u); + const uint worker_id = Atomic::fetch_then_add(&_started, 1u); WorkerThread::set_worker_id(worker_id); // Run task. @@ -141,8 +141,40 @@ void WorkerThreads::threads_do(ThreadClosure* tc) const { } } +void WorkerThreads::set_indirectly_suspendible_threads() { +#ifdef ASSERT + class SetIndirectlySuspendibleThreadClosure : public ThreadClosure { + virtual void do_thread(Thread* thread) { + thread->set_indirectly_suspendible_thread(); + } + }; + + if (Thread::current()->is_suspendible_thread()) { + SetIndirectlySuspendibleThreadClosure cl; + threads_do(&cl); + } +#endif +} + +void WorkerThreads::clear_indirectly_suspendible_threads() { +#ifdef ASSERT + class ClearIndirectlySuspendibleThreadClosure : public ThreadClosure { + virtual void do_thread(Thread* thread) { + thread->clear_indirectly_suspendible_thread(); + } + }; + + if (Thread::current()->is_suspendible_thread()) { + ClearIndirectlySuspendibleThreadClosure cl; + threads_do(&cl); + } +#endif +} + void WorkerThreads::run_task(WorkerTask* task) { + set_indirectly_suspendible_threads(); _dispatcher.coordinator_distribute_task(task, _active_workers); + clear_indirectly_suspendible_threads(); } void WorkerThreads::run_task(WorkerTask* task, uint num_workers) { diff --git a/src/hotspot/share/gc/shared/workerThread.hpp b/src/hotspot/share/gc/shared/workerThread.hpp index bdb61f34ed9d9..d3b246c0930b4 100644 --- a/src/hotspot/share/gc/shared/workerThread.hpp +++ b/src/hotspot/share/gc/shared/workerThread.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -93,6 +93,9 @@ class WorkerThreads : public CHeapObj { WorkerThread* create_worker(uint name_suffix); + void set_indirectly_suspendible_threads(); + void clear_indirectly_suspendible_threads(); + protected: virtual void on_create_worker(WorkerThread* worker) {} diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp index cb592370674ca..6f5981e672545 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp @@ -1506,7 +1506,7 @@ class ShenandoahParallelHeapRegionTask : public WorkerTask { size_t max = _heap->num_regions(); while (Atomic::load(&_index) < max) { - size_t cur = Atomic::fetch_and_add(&_index, stride, memory_order_relaxed); + size_t cur = Atomic::fetch_then_add(&_index, stride, memory_order_relaxed); size_t start = cur; size_t end = MIN2(cur + stride, max); if (start >= max) break; diff --git a/src/hotspot/share/gc/shenandoah/shenandoahNMethod.cpp b/src/hotspot/share/gc/shenandoah/shenandoahNMethod.cpp index e9d853b571484..74aafeb3831b9 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahNMethod.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahNMethod.cpp @@ -471,7 +471,7 @@ void ShenandoahNMethodTableSnapshot::parallel_blobs_do(CodeBlobClosure *f) { size_t max = (size_t)_limit; while (_claimed < max) { - size_t cur = Atomic::fetch_and_add(&_claimed, stride, memory_order_relaxed); + size_t cur = Atomic::fetch_then_add(&_claimed, stride, memory_order_relaxed); size_t start = cur; size_t end = MIN2(cur + stride, max); if (start >= max) break; @@ -495,7 +495,7 @@ void ShenandoahNMethodTableSnapshot::concurrent_nmethods_do(NMethodClosure* cl) ShenandoahNMethod** list = _list->list(); size_t max = (size_t)_limit; while (_claimed < max) { - size_t cur = Atomic::fetch_and_add(&_claimed, stride, memory_order_relaxed); + size_t cur = Atomic::fetch_then_add(&_claimed, stride, memory_order_relaxed); size_t start = cur; size_t end = MIN2(cur + stride, max); if (start >= max) break; diff --git a/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.cpp b/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.cpp index 834db23147fd1..586835d35ad02 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.cpp @@ -46,7 +46,7 @@ ShenandoahJavaThreadsIterator::ShenandoahJavaThreadsIterator(ShenandoahPhaseTimi } uint ShenandoahJavaThreadsIterator::claim() { - return Atomic::fetch_and_add(&_claimed, _stride, memory_order_relaxed); + return Atomic::fetch_then_add(&_claimed, _stride, memory_order_relaxed); } void ShenandoahJavaThreadsIterator::threads_do(ThreadClosure* cl, uint worker_id) { diff --git a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp index 0e15181dd177a..1d5d962a4ec4a 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp @@ -520,7 +520,7 @@ class ShenandoahVerifierMarkedRegionTask : public WorkerTask { _options); while (true) { - size_t v = Atomic::fetch_and_add(&_claimed, 1u, memory_order_relaxed); + size_t v = Atomic::fetch_then_add(&_claimed, 1u, memory_order_relaxed); if (v < _heap->num_regions()) { ShenandoahHeapRegion* r = _heap->get_region(v); if (!r->is_humongous() && !r->is_trash()) { diff --git a/src/hotspot/share/gc/x/c1/xBarrierSetC1.cpp b/src/hotspot/share/gc/x/c1/xBarrierSetC1.cpp new file mode 100644 index 0000000000000..ddc997900df01 --- /dev/null +++ b/src/hotspot/share/gc/x/c1/xBarrierSetC1.cpp @@ -0,0 +1,237 @@ +/* + * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "c1/c1_LIR.hpp" +#include "c1/c1_LIRGenerator.hpp" +#include "c1/c1_CodeStubs.hpp" +#include "gc/x/c1/xBarrierSetC1.hpp" +#include "gc/x/xBarrierSet.hpp" +#include "gc/x/xBarrierSetAssembler.hpp" +#include "gc/x/xThreadLocalData.hpp" +#include "utilities/macros.hpp" + +XLoadBarrierStubC1::XLoadBarrierStubC1(LIRAccess& access, LIR_Opr ref, address runtime_stub) : + _decorators(access.decorators()), + _ref_addr(access.resolved_addr()), + _ref(ref), + _tmp(LIR_OprFact::illegalOpr), + _runtime_stub(runtime_stub) { + + assert(_ref_addr->is_address(), "Must be an address"); + assert(_ref->is_register(), "Must be a register"); + + // Allocate tmp register if needed + if (_ref_addr->as_address_ptr()->index()->is_valid() || + _ref_addr->as_address_ptr()->disp() != 0) { + // Has index or displacement, need tmp register to load address into + _tmp = access.gen()->new_pointer_register(); + } + + FrameMap* f = Compilation::current()->frame_map(); + f->update_reserved_argument_area_size(2 * BytesPerWord); +} + +DecoratorSet XLoadBarrierStubC1::decorators() const { + return _decorators; +} + +LIR_Opr XLoadBarrierStubC1::ref() const { + return _ref; +} + +LIR_Opr XLoadBarrierStubC1::ref_addr() const { + return _ref_addr; +} + +LIR_Opr XLoadBarrierStubC1::tmp() const { + return _tmp; +} + +address XLoadBarrierStubC1::runtime_stub() const { + return _runtime_stub; +} + +void XLoadBarrierStubC1::visit(LIR_OpVisitState* visitor) { + visitor->do_slow_case(); + visitor->do_input(_ref_addr); + visitor->do_output(_ref); + if (_tmp->is_valid()) { + visitor->do_temp(_tmp); + } +} + +void XLoadBarrierStubC1::emit_code(LIR_Assembler* ce) { + XBarrierSet::assembler()->generate_c1_load_barrier_stub(ce, this); +} + +#ifndef PRODUCT +void XLoadBarrierStubC1::print_name(outputStream* out) const { + out->print("XLoadBarrierStubC1"); +} +#endif // PRODUCT + +class LIR_OpXLoadBarrierTest : public LIR_Op { +private: + LIR_Opr _opr; + +public: + LIR_OpXLoadBarrierTest(LIR_Opr opr) : + LIR_Op(lir_xloadbarrier_test, LIR_OprFact::illegalOpr, NULL), + _opr(opr) {} + + virtual void visit(LIR_OpVisitState* state) { + state->do_input(_opr); + } + + virtual void emit_code(LIR_Assembler* ce) { + XBarrierSet::assembler()->generate_c1_load_barrier_test(ce, _opr); + } + + virtual void print_instr(outputStream* out) const { + _opr->print(out); + out->print(" "); + } + +#ifndef PRODUCT + virtual const char* name() const { + return "lir_z_load_barrier_test"; + } +#endif // PRODUCT +}; + +static bool barrier_needed(LIRAccess& access) { + return XBarrierSet::barrier_needed(access.decorators(), access.type()); +} + +XBarrierSetC1::XBarrierSetC1() : + _load_barrier_on_oop_field_preloaded_runtime_stub(NULL), + _load_barrier_on_weak_oop_field_preloaded_runtime_stub(NULL) {} + +address XBarrierSetC1::load_barrier_on_oop_field_preloaded_runtime_stub(DecoratorSet decorators) const { + assert((decorators & ON_PHANTOM_OOP_REF) == 0, "Unsupported decorator"); + //assert((decorators & ON_UNKNOWN_OOP_REF) == 0, "Unsupported decorator"); + + if ((decorators & ON_WEAK_OOP_REF) != 0) { + return _load_barrier_on_weak_oop_field_preloaded_runtime_stub; + } else { + return _load_barrier_on_oop_field_preloaded_runtime_stub; + } +} + +#ifdef ASSERT +#define __ access.gen()->lir(__FILE__, __LINE__)-> +#else +#define __ access.gen()->lir()-> +#endif + +void XBarrierSetC1::load_barrier(LIRAccess& access, LIR_Opr result) const { + // Fast path + __ append(new LIR_OpXLoadBarrierTest(result)); + + // Slow path + const address runtime_stub = load_barrier_on_oop_field_preloaded_runtime_stub(access.decorators()); + CodeStub* const stub = new XLoadBarrierStubC1(access, result, runtime_stub); + __ branch(lir_cond_notEqual, stub); + __ branch_destination(stub->continuation()); +} + +LIR_Opr XBarrierSetC1::resolve_address(LIRAccess& access, bool resolve_in_register) { + // We must resolve in register when patching. This is to avoid + // having a patch area in the load barrier stub, since the call + // into the runtime to patch will not have the proper oop map. + const bool patch_before_barrier = barrier_needed(access) && (access.decorators() & C1_NEEDS_PATCHING) != 0; + return BarrierSetC1::resolve_address(access, resolve_in_register || patch_before_barrier); +} + +#undef __ + +void XBarrierSetC1::load_at_resolved(LIRAccess& access, LIR_Opr result) { + BarrierSetC1::load_at_resolved(access, result); + + if (barrier_needed(access)) { + load_barrier(access, result); + } +} + +static void pre_load_barrier(LIRAccess& access) { + DecoratorSet decorators = access.decorators(); + + // Downgrade access to MO_UNORDERED + decorators = (decorators & ~MO_DECORATOR_MASK) | MO_UNORDERED; + + // Remove ACCESS_WRITE + decorators = (decorators & ~ACCESS_WRITE); + + // Generate synthetic load at + access.gen()->access_load_at(decorators, + access.type(), + access.base().item(), + access.offset().opr(), + access.gen()->new_register(access.type()), + NULL /* patch_emit_info */, + NULL /* load_emit_info */); +} + +LIR_Opr XBarrierSetC1::atomic_xchg_at_resolved(LIRAccess& access, LIRItem& value) { + if (barrier_needed(access)) { + pre_load_barrier(access); + } + + return BarrierSetC1::atomic_xchg_at_resolved(access, value); +} + +LIR_Opr XBarrierSetC1::atomic_cmpxchg_at_resolved(LIRAccess& access, LIRItem& cmp_value, LIRItem& new_value) { + if (barrier_needed(access)) { + pre_load_barrier(access); + } + + return BarrierSetC1::atomic_cmpxchg_at_resolved(access, cmp_value, new_value); +} + +class XLoadBarrierRuntimeStubCodeGenClosure : public StubAssemblerCodeGenClosure { +private: + const DecoratorSet _decorators; + +public: + XLoadBarrierRuntimeStubCodeGenClosure(DecoratorSet decorators) : + _decorators(decorators) {} + + virtual OopMapSet* generate_code(StubAssembler* sasm) { + XBarrierSet::assembler()->generate_c1_load_barrier_runtime_stub(sasm, _decorators); + return NULL; + } +}; + +static address generate_c1_runtime_stub(BufferBlob* blob, DecoratorSet decorators, const char* name) { + XLoadBarrierRuntimeStubCodeGenClosure cl(decorators); + CodeBlob* const code_blob = Runtime1::generate_blob(blob, -1 /* stub_id */, name, false /* expect_oop_map*/, &cl); + return code_blob->code_begin(); +} + +void XBarrierSetC1::generate_c1_runtime_stubs(BufferBlob* blob) { + _load_barrier_on_oop_field_preloaded_runtime_stub = + generate_c1_runtime_stub(blob, ON_STRONG_OOP_REF, "load_barrier_on_oop_field_preloaded_runtime_stub"); + _load_barrier_on_weak_oop_field_preloaded_runtime_stub = + generate_c1_runtime_stub(blob, ON_WEAK_OOP_REF, "load_barrier_on_weak_oop_field_preloaded_runtime_stub"); +} diff --git a/src/hotspot/share/gc/x/c1/xBarrierSetC1.hpp b/src/hotspot/share/gc/x/c1/xBarrierSetC1.hpp new file mode 100644 index 0000000000000..26c2e142cdf80 --- /dev/null +++ b/src/hotspot/share/gc/x/c1/xBarrierSetC1.hpp @@ -0,0 +1,78 @@ +/* + * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_C1_XBARRIERSETC1_HPP +#define SHARE_GC_X_C1_XBARRIERSETC1_HPP + +#include "c1/c1_CodeStubs.hpp" +#include "c1/c1_IR.hpp" +#include "c1/c1_LIR.hpp" +#include "gc/shared/c1/barrierSetC1.hpp" +#include "oops/accessDecorators.hpp" + +class XLoadBarrierStubC1 : public CodeStub { +private: + DecoratorSet _decorators; + LIR_Opr _ref_addr; + LIR_Opr _ref; + LIR_Opr _tmp; + address _runtime_stub; + +public: + XLoadBarrierStubC1(LIRAccess& access, LIR_Opr ref, address runtime_stub); + + DecoratorSet decorators() const; + LIR_Opr ref() const; + LIR_Opr ref_addr() const; + LIR_Opr tmp() const; + address runtime_stub() const; + + virtual void emit_code(LIR_Assembler* ce); + virtual void visit(LIR_OpVisitState* visitor); + +#ifndef PRODUCT + virtual void print_name(outputStream* out) const; +#endif // PRODUCT +}; + +class XBarrierSetC1 : public BarrierSetC1 { +private: + address _load_barrier_on_oop_field_preloaded_runtime_stub; + address _load_barrier_on_weak_oop_field_preloaded_runtime_stub; + + address load_barrier_on_oop_field_preloaded_runtime_stub(DecoratorSet decorators) const; + void load_barrier(LIRAccess& access, LIR_Opr result) const; + +protected: + virtual LIR_Opr resolve_address(LIRAccess& access, bool resolve_in_register); + virtual void load_at_resolved(LIRAccess& access, LIR_Opr result); + virtual LIR_Opr atomic_xchg_at_resolved(LIRAccess& access, LIRItem& value); + virtual LIR_Opr atomic_cmpxchg_at_resolved(LIRAccess& access, LIRItem& cmp_value, LIRItem& new_value); + +public: + XBarrierSetC1(); + + virtual void generate_c1_runtime_stubs(BufferBlob* blob); +}; + +#endif // SHARE_GC_X_C1_XBARRIERSETC1_HPP diff --git a/src/hotspot/share/gc/x/c2/xBarrierSetC2.cpp b/src/hotspot/share/gc/x/c2/xBarrierSetC2.cpp new file mode 100644 index 0000000000000..5ec0558cc7806 --- /dev/null +++ b/src/hotspot/share/gc/x/c2/xBarrierSetC2.cpp @@ -0,0 +1,583 @@ +/* + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "classfile/javaClasses.hpp" +#include "gc/x/c2/xBarrierSetC2.hpp" +#include "gc/x/xBarrierSet.hpp" +#include "gc/x/xBarrierSetAssembler.hpp" +#include "gc/x/xBarrierSetRuntime.hpp" +#include "opto/arraycopynode.hpp" +#include "opto/addnode.hpp" +#include "opto/block.hpp" +#include "opto/compile.hpp" +#include "opto/graphKit.hpp" +#include "opto/machnode.hpp" +#include "opto/macro.hpp" +#include "opto/memnode.hpp" +#include "opto/node.hpp" +#include "opto/output.hpp" +#include "opto/regalloc.hpp" +#include "opto/rootnode.hpp" +#include "opto/runtime.hpp" +#include "opto/type.hpp" +#include "utilities/growableArray.hpp" +#include "utilities/macros.hpp" + +class XBarrierSetC2State : public ArenaObj { +private: + GrowableArray* _stubs; + Node_Array _live; + +public: + XBarrierSetC2State(Arena* arena) : + _stubs(new (arena) GrowableArray(arena, 8, 0, NULL)), + _live(arena) {} + + GrowableArray* stubs() { + return _stubs; + } + + RegMask* live(const Node* node) { + if (!node->is_Mach()) { + // Don't need liveness for non-MachNodes + return NULL; + } + + const MachNode* const mach = node->as_Mach(); + if (mach->barrier_data() == XLoadBarrierElided) { + // Don't need liveness data for nodes without barriers + return NULL; + } + + RegMask* live = (RegMask*)_live[node->_idx]; + if (live == NULL) { + live = new (Compile::current()->comp_arena()->AmallocWords(sizeof(RegMask))) RegMask(); + _live.map(node->_idx, (Node*)live); + } + + return live; + } +}; + +static XBarrierSetC2State* barrier_set_state() { + return reinterpret_cast(Compile::current()->barrier_set_state()); +} + +XLoadBarrierStubC2* XLoadBarrierStubC2::create(const MachNode* node, Address ref_addr, Register ref, Register tmp, uint8_t barrier_data) { + XLoadBarrierStubC2* const stub = new (Compile::current()->comp_arena()) XLoadBarrierStubC2(node, ref_addr, ref, tmp, barrier_data); + if (!Compile::current()->output()->in_scratch_emit_size()) { + barrier_set_state()->stubs()->append(stub); + } + + return stub; +} + +XLoadBarrierStubC2::XLoadBarrierStubC2(const MachNode* node, Address ref_addr, Register ref, Register tmp, uint8_t barrier_data) : + _node(node), + _ref_addr(ref_addr), + _ref(ref), + _tmp(tmp), + _barrier_data(barrier_data), + _entry(), + _continuation() { + assert_different_registers(ref, ref_addr.base()); + assert_different_registers(ref, ref_addr.index()); +} + +Address XLoadBarrierStubC2::ref_addr() const { + return _ref_addr; +} + +Register XLoadBarrierStubC2::ref() const { + return _ref; +} + +Register XLoadBarrierStubC2::tmp() const { + return _tmp; +} + +address XLoadBarrierStubC2::slow_path() const { + DecoratorSet decorators = DECORATORS_NONE; + if (_barrier_data & XLoadBarrierStrong) { + decorators |= ON_STRONG_OOP_REF; + } + if (_barrier_data & XLoadBarrierWeak) { + decorators |= ON_WEAK_OOP_REF; + } + if (_barrier_data & XLoadBarrierPhantom) { + decorators |= ON_PHANTOM_OOP_REF; + } + if (_barrier_data & XLoadBarrierNoKeepalive) { + decorators |= AS_NO_KEEPALIVE; + } + return XBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators); +} + +RegMask& XLoadBarrierStubC2::live() const { + RegMask* mask = barrier_set_state()->live(_node); + assert(mask != NULL, "must be mach-node with barrier"); + return *mask; +} + +Label* XLoadBarrierStubC2::entry() { + // The _entry will never be bound when in_scratch_emit_size() is true. + // However, we still need to return a label that is not bound now, but + // will eventually be bound. Any label will do, as it will only act as + // a placeholder, so we return the _continuation label. + return Compile::current()->output()->in_scratch_emit_size() ? &_continuation : &_entry; +} + +Label* XLoadBarrierStubC2::continuation() { + return &_continuation; +} + +void* XBarrierSetC2::create_barrier_state(Arena* comp_arena) const { + return new (comp_arena) XBarrierSetC2State(comp_arena); +} + +void XBarrierSetC2::late_barrier_analysis() const { + analyze_dominating_barriers(); + compute_liveness_at_stubs(); +} + +void XBarrierSetC2::emit_stubs(CodeBuffer& cb) const { + MacroAssembler masm(&cb); + GrowableArray* const stubs = barrier_set_state()->stubs(); + + for (int i = 0; i < stubs->length(); i++) { + // Make sure there is enough space in the code buffer + if (cb.insts()->maybe_expand_to_ensure_remaining(PhaseOutput::MAX_inst_size) && cb.blob() == NULL) { + ciEnv::current()->record_failure("CodeCache is full"); + return; + } + + XBarrierSet::assembler()->generate_c2_load_barrier_stub(&masm, stubs->at(i)); + } + + masm.flush(); +} + +int XBarrierSetC2::estimate_stub_size() const { + Compile* const C = Compile::current(); + BufferBlob* const blob = C->output()->scratch_buffer_blob(); + GrowableArray* const stubs = barrier_set_state()->stubs(); + int size = 0; + + for (int i = 0; i < stubs->length(); i++) { + CodeBuffer cb(blob->content_begin(), (address)C->output()->scratch_locs_memory() - blob->content_begin()); + MacroAssembler masm(&cb); + XBarrierSet::assembler()->generate_c2_load_barrier_stub(&masm, stubs->at(i)); + size += cb.insts_size(); + } + + return size; +} + +static void set_barrier_data(C2Access& access) { + if (XBarrierSet::barrier_needed(access.decorators(), access.type())) { + uint8_t barrier_data = 0; + + if (access.decorators() & ON_PHANTOM_OOP_REF) { + barrier_data |= XLoadBarrierPhantom; + } else if (access.decorators() & ON_WEAK_OOP_REF) { + barrier_data |= XLoadBarrierWeak; + } else { + barrier_data |= XLoadBarrierStrong; + } + + if (access.decorators() & AS_NO_KEEPALIVE) { + barrier_data |= XLoadBarrierNoKeepalive; + } + + access.set_barrier_data(barrier_data); + } +} + +Node* XBarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) const { + set_barrier_data(access); + return BarrierSetC2::load_at_resolved(access, val_type); +} + +Node* XBarrierSetC2::atomic_cmpxchg_val_at_resolved(C2AtomicParseAccess& access, Node* expected_val, + Node* new_val, const Type* val_type) const { + set_barrier_data(access); + return BarrierSetC2::atomic_cmpxchg_val_at_resolved(access, expected_val, new_val, val_type); +} + +Node* XBarrierSetC2::atomic_cmpxchg_bool_at_resolved(C2AtomicParseAccess& access, Node* expected_val, + Node* new_val, const Type* value_type) const { + set_barrier_data(access); + return BarrierSetC2::atomic_cmpxchg_bool_at_resolved(access, expected_val, new_val, value_type); +} + +Node* XBarrierSetC2::atomic_xchg_at_resolved(C2AtomicParseAccess& access, Node* new_val, const Type* val_type) const { + set_barrier_data(access); + return BarrierSetC2::atomic_xchg_at_resolved(access, new_val, val_type); +} + +bool XBarrierSetC2::array_copy_requires_gc_barriers(bool tightly_coupled_alloc, BasicType type, + bool is_clone, bool is_clone_instance, + ArrayCopyPhase phase) const { + if (phase == ArrayCopyPhase::Parsing) { + return false; + } + if (phase == ArrayCopyPhase::Optimization) { + return is_clone_instance; + } + // else ArrayCopyPhase::Expansion + return type == T_OBJECT || type == T_ARRAY; +} + +// This TypeFunc assumes a 64bit system +static const TypeFunc* clone_type() { + // Create input type (domain) + const Type** domain_fields = TypeTuple::fields(4); + domain_fields[TypeFunc::Parms + 0] = TypeInstPtr::NOTNULL; // src + domain_fields[TypeFunc::Parms + 1] = TypeInstPtr::NOTNULL; // dst + domain_fields[TypeFunc::Parms + 2] = TypeLong::LONG; // size lower + domain_fields[TypeFunc::Parms + 3] = Type::HALF; // size upper + const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + 4, domain_fields); + + // Create result type (range) + const Type** range_fields = TypeTuple::fields(0); + const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 0, range_fields); + + return TypeFunc::make(domain, range); +} + +#define XTOP LP64_ONLY(COMMA phase->top()) + +void XBarrierSetC2::clone_at_expansion(PhaseMacroExpand* phase, ArrayCopyNode* ac) const { + Node* const src = ac->in(ArrayCopyNode::Src); + const TypeAryPtr* ary_ptr = src->get_ptr_type()->isa_aryptr(); + + if (ac->is_clone_array() && ary_ptr != NULL) { + BasicType bt = ary_ptr->elem()->array_element_basic_type(); + if (is_reference_type(bt)) { + // Clone object array + bt = T_OBJECT; + } else { + // Clone primitive array + bt = T_LONG; + } + + Node* ctrl = ac->in(TypeFunc::Control); + Node* mem = ac->in(TypeFunc::Memory); + Node* src = ac->in(ArrayCopyNode::Src); + Node* src_offset = ac->in(ArrayCopyNode::SrcPos); + Node* dest = ac->in(ArrayCopyNode::Dest); + Node* dest_offset = ac->in(ArrayCopyNode::DestPos); + Node* length = ac->in(ArrayCopyNode::Length); + + if (bt == T_OBJECT) { + // BarrierSetC2::clone sets the offsets via BarrierSetC2::arraycopy_payload_base_offset + // which 8-byte aligns them to allow for word size copies. Make sure the offsets point + // to the first element in the array when cloning object arrays. Otherwise, load + // barriers are applied to parts of the header. Also adjust the length accordingly. + assert(src_offset == dest_offset, "should be equal"); + jlong offset = src_offset->get_long(); + if (offset != arrayOopDesc::base_offset_in_bytes(T_OBJECT)) { + assert(!UseCompressedClassPointers, "should only happen without compressed class pointers"); + assert((arrayOopDesc::base_offset_in_bytes(T_OBJECT) - offset) == BytesPerLong, "unexpected offset"); + length = phase->transform_later(new SubLNode(length, phase->longcon(1))); // Size is in longs + src_offset = phase->longcon(arrayOopDesc::base_offset_in_bytes(T_OBJECT)); + dest_offset = src_offset; + } + } + Node* payload_src = phase->basic_plus_adr(src, src_offset); + Node* payload_dst = phase->basic_plus_adr(dest, dest_offset); + + const char* copyfunc_name = "arraycopy"; + address copyfunc_addr = phase->basictype2arraycopy(bt, NULL, NULL, true, copyfunc_name, true); + + const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM; + const TypeFunc* call_type = OptoRuntime::fast_arraycopy_Type(); + + Node* call = phase->make_leaf_call(ctrl, mem, call_type, copyfunc_addr, copyfunc_name, raw_adr_type, payload_src, payload_dst, length XTOP); + phase->transform_later(call); + + phase->igvn().replace_node(ac, call); + return; + } + + // Clone instance + Node* const ctrl = ac->in(TypeFunc::Control); + Node* const mem = ac->in(TypeFunc::Memory); + Node* const dst = ac->in(ArrayCopyNode::Dest); + Node* const size = ac->in(ArrayCopyNode::Length); + + assert(size->bottom_type()->is_long(), "Should be long"); + + // The native clone we are calling here expects the instance size in words + // Add header/offset size to payload size to get instance size. + Node* const base_offset = phase->longcon(arraycopy_payload_base_offset(ac->is_clone_array()) >> LogBytesPerLong); + Node* const full_size = phase->transform_later(new AddLNode(size, base_offset)); + + Node* const call = phase->make_leaf_call(ctrl, + mem, + clone_type(), + XBarrierSetRuntime::clone_addr(), + "XBarrierSetRuntime::clone", + TypeRawPtr::BOTTOM, + src, + dst, + full_size, + phase->top()); + phase->transform_later(call); + phase->igvn().replace_node(ac, call); +} + +#undef XTOP + +// == Dominating barrier elision == + +static bool block_has_safepoint(const Block* block, uint from, uint to) { + for (uint i = from; i < to; i++) { + if (block->get_node(i)->is_MachSafePoint()) { + // Safepoint found + return true; + } + } + + // Safepoint not found + return false; +} + +static bool block_has_safepoint(const Block* block) { + return block_has_safepoint(block, 0, block->number_of_nodes()); +} + +static uint block_index(const Block* block, const Node* node) { + for (uint j = 0; j < block->number_of_nodes(); ++j) { + if (block->get_node(j) == node) { + return j; + } + } + ShouldNotReachHere(); + return 0; +} + +void XBarrierSetC2::analyze_dominating_barriers() const { + ResourceMark rm; + Compile* const C = Compile::current(); + PhaseCFG* const cfg = C->cfg(); + Block_List worklist; + Node_List mem_ops; + Node_List barrier_loads; + + // Step 1 - Find accesses, and track them in lists + for (uint i = 0; i < cfg->number_of_blocks(); ++i) { + const Block* const block = cfg->get_block(i); + for (uint j = 0; j < block->number_of_nodes(); ++j) { + const Node* const node = block->get_node(j); + if (!node->is_Mach()) { + continue; + } + + MachNode* const mach = node->as_Mach(); + switch (mach->ideal_Opcode()) { + case Op_LoadP: + if ((mach->barrier_data() & XLoadBarrierStrong) != 0) { + barrier_loads.push(mach); + } + if ((mach->barrier_data() & (XLoadBarrierStrong | XLoadBarrierNoKeepalive)) == + XLoadBarrierStrong) { + mem_ops.push(mach); + } + break; + case Op_CompareAndExchangeP: + case Op_CompareAndSwapP: + case Op_GetAndSetP: + if ((mach->barrier_data() & XLoadBarrierStrong) != 0) { + barrier_loads.push(mach); + } + case Op_StoreP: + mem_ops.push(mach); + break; + + default: + break; + } + } + } + + // Step 2 - Find dominating accesses for each load + for (uint i = 0; i < barrier_loads.size(); i++) { + MachNode* const load = barrier_loads.at(i)->as_Mach(); + const TypePtr* load_adr_type = NULL; + intptr_t load_offset = 0; + const Node* const load_obj = load->get_base_and_disp(load_offset, load_adr_type); + Block* const load_block = cfg->get_block_for_node(load); + const uint load_index = block_index(load_block, load); + + for (uint j = 0; j < mem_ops.size(); j++) { + MachNode* mem = mem_ops.at(j)->as_Mach(); + const TypePtr* mem_adr_type = NULL; + intptr_t mem_offset = 0; + const Node* mem_obj = mem->get_base_and_disp(mem_offset, mem_adr_type); + Block* mem_block = cfg->get_block_for_node(mem); + uint mem_index = block_index(mem_block, mem); + + if (load_obj == NodeSentinel || mem_obj == NodeSentinel || + load_obj == NULL || mem_obj == NULL || + load_offset < 0 || mem_offset < 0) { + continue; + } + + if (mem_obj != load_obj || mem_offset != load_offset) { + // Not the same addresses, not a candidate + continue; + } + + if (load_block == mem_block) { + // Earlier accesses in the same block + if (mem_index < load_index && !block_has_safepoint(mem_block, mem_index + 1, load_index)) { + load->set_barrier_data(XLoadBarrierElided); + } + } else if (mem_block->dominates(load_block)) { + // Dominating block? Look around for safepoints + ResourceMark rm; + Block_List stack; + VectorSet visited; + stack.push(load_block); + bool safepoint_found = block_has_safepoint(load_block); + while (!safepoint_found && stack.size() > 0) { + Block* block = stack.pop(); + if (visited.test_set(block->_pre_order)) { + continue; + } + if (block_has_safepoint(block)) { + safepoint_found = true; + break; + } + if (block == mem_block) { + continue; + } + + // Push predecessor blocks + for (uint p = 1; p < block->num_preds(); ++p) { + Block* pred = cfg->get_block_for_node(block->pred(p)); + stack.push(pred); + } + } + + if (!safepoint_found) { + load->set_barrier_data(XLoadBarrierElided); + } + } + } + } +} + +// == Reduced spilling optimization == + +void XBarrierSetC2::compute_liveness_at_stubs() const { + ResourceMark rm; + Compile* const C = Compile::current(); + Arena* const A = Thread::current()->resource_area(); + PhaseCFG* const cfg = C->cfg(); + PhaseRegAlloc* const regalloc = C->regalloc(); + RegMask* const live = NEW_ARENA_ARRAY(A, RegMask, cfg->number_of_blocks() * sizeof(RegMask)); + XBarrierSetAssembler* const bs = XBarrierSet::assembler(); + Block_List worklist; + + for (uint i = 0; i < cfg->number_of_blocks(); ++i) { + new ((void*)(live + i)) RegMask(); + worklist.push(cfg->get_block(i)); + } + + while (worklist.size() > 0) { + const Block* const block = worklist.pop(); + RegMask& old_live = live[block->_pre_order]; + RegMask new_live; + + // Initialize to union of successors + for (uint i = 0; i < block->_num_succs; i++) { + const uint succ_id = block->_succs[i]->_pre_order; + new_live.OR(live[succ_id]); + } + + // Walk block backwards, computing liveness + for (int i = block->number_of_nodes() - 1; i >= 0; --i) { + const Node* const node = block->get_node(i); + + // Remove def bits + const OptoReg::Name first = bs->refine_register(node, regalloc->get_reg_first(node)); + const OptoReg::Name second = bs->refine_register(node, regalloc->get_reg_second(node)); + if (first != OptoReg::Bad) { + new_live.Remove(first); + } + if (second != OptoReg::Bad) { + new_live.Remove(second); + } + + // Add use bits + for (uint j = 1; j < node->req(); ++j) { + const Node* const use = node->in(j); + const OptoReg::Name first = bs->refine_register(use, regalloc->get_reg_first(use)); + const OptoReg::Name second = bs->refine_register(use, regalloc->get_reg_second(use)); + if (first != OptoReg::Bad) { + new_live.Insert(first); + } + if (second != OptoReg::Bad) { + new_live.Insert(second); + } + } + + // If this node tracks liveness, update it + RegMask* const regs = barrier_set_state()->live(node); + if (regs != NULL) { + regs->OR(new_live); + } + } + + // Now at block top, see if we have any changes + new_live.SUBTRACT(old_live); + if (new_live.is_NotEmpty()) { + // Liveness has refined, update and propagate to prior blocks + old_live.OR(new_live); + for (uint i = 1; i < block->num_preds(); ++i) { + Block* const pred = cfg->get_block_for_node(block->pred(i)); + worklist.push(pred); + } + } + } +} + +#ifndef PRODUCT +void XBarrierSetC2::dump_barrier_data(const MachNode* mach, outputStream* st) const { + if ((mach->barrier_data() & XLoadBarrierStrong) != 0) { + st->print("strong "); + } + if ((mach->barrier_data() & XLoadBarrierWeak) != 0) { + st->print("weak "); + } + if ((mach->barrier_data() & XLoadBarrierPhantom) != 0) { + st->print("phantom "); + } + if ((mach->barrier_data() & XLoadBarrierNoKeepalive) != 0) { + st->print("nokeepalive "); + } +} +#endif // !PRODUCT diff --git a/src/hotspot/share/gc/x/c2/xBarrierSetC2.hpp b/src/hotspot/share/gc/x/c2/xBarrierSetC2.hpp new file mode 100644 index 0000000000000..91835338fd73c --- /dev/null +++ b/src/hotspot/share/gc/x/c2/xBarrierSetC2.hpp @@ -0,0 +1,100 @@ +/* + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_C2_XBARRIERSETC2_HPP +#define SHARE_GC_X_C2_XBARRIERSETC2_HPP + +#include "gc/shared/c2/barrierSetC2.hpp" +#include "memory/allocation.hpp" +#include "opto/node.hpp" +#include "utilities/growableArray.hpp" + +const uint8_t XLoadBarrierElided = 0; +const uint8_t XLoadBarrierStrong = 1; +const uint8_t XLoadBarrierWeak = 2; +const uint8_t XLoadBarrierPhantom = 4; +const uint8_t XLoadBarrierNoKeepalive = 8; + +class XLoadBarrierStubC2 : public ArenaObj { +private: + const MachNode* _node; + const Address _ref_addr; + const Register _ref; + const Register _tmp; + const uint8_t _barrier_data; + Label _entry; + Label _continuation; + + XLoadBarrierStubC2(const MachNode* node, Address ref_addr, Register ref, Register tmp, uint8_t barrier_data); + +public: + static XLoadBarrierStubC2* create(const MachNode* node, Address ref_addr, Register ref, Register tmp, uint8_t barrier_data); + + Address ref_addr() const; + Register ref() const; + Register tmp() const; + address slow_path() const; + RegMask& live() const; + Label* entry(); + Label* continuation(); +}; + +class XBarrierSetC2 : public BarrierSetC2 { +private: + void compute_liveness_at_stubs() const; + void analyze_dominating_barriers() const; + +protected: + virtual Node* load_at_resolved(C2Access& access, const Type* val_type) const; + virtual Node* atomic_cmpxchg_val_at_resolved(C2AtomicParseAccess& access, + Node* expected_val, + Node* new_val, + const Type* val_type) const; + virtual Node* atomic_cmpxchg_bool_at_resolved(C2AtomicParseAccess& access, + Node* expected_val, + Node* new_val, + const Type* value_type) const; + virtual Node* atomic_xchg_at_resolved(C2AtomicParseAccess& access, + Node* new_val, + const Type* val_type) const; + +public: + virtual void* create_barrier_state(Arena* comp_arena) const; + virtual bool array_copy_requires_gc_barriers(bool tightly_coupled_alloc, + BasicType type, + bool is_clone, + bool is_clone_instance, + ArrayCopyPhase phase) const; + virtual void clone_at_expansion(PhaseMacroExpand* phase, + ArrayCopyNode* ac) const; + + virtual void late_barrier_analysis() const; + virtual int estimate_stub_size() const; + virtual void emit_stubs(CodeBuffer& cb) const; + +#ifndef PRODUCT + virtual void dump_barrier_data(const MachNode* mach, outputStream* st) const; +#endif +}; + +#endif // SHARE_GC_X_C2_XBARRIERSETC2_HPP diff --git a/src/hotspot/share/gc/x/vmStructs_x.cpp b/src/hotspot/share/gc/x/vmStructs_x.cpp new file mode 100644 index 0000000000000..4c7d63f41b403 --- /dev/null +++ b/src/hotspot/share/gc/x/vmStructs_x.cpp @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/x/vmStructs_x.hpp" + +XGlobalsForVMStructs::XGlobalsForVMStructs() : + _XGlobalPhase(&XGlobalPhase), + _XGlobalSeqNum(&XGlobalSeqNum), + _XAddressOffsetMask(&XAddressOffsetMask), + _XAddressMetadataMask(&XAddressMetadataMask), + _XAddressMetadataFinalizable(&XAddressMetadataFinalizable), + _XAddressGoodMask(&XAddressGoodMask), + _XAddressBadMask(&XAddressBadMask), + _XAddressWeakBadMask(&XAddressWeakBadMask), + _XObjectAlignmentSmallShift(&XObjectAlignmentSmallShift), + _XObjectAlignmentSmall(&XObjectAlignmentSmall) { +} + +XGlobalsForVMStructs XGlobalsForVMStructs::_instance; +XGlobalsForVMStructs* XGlobalsForVMStructs::_instance_p = &XGlobalsForVMStructs::_instance; diff --git a/src/hotspot/share/gc/x/vmStructs_x.hpp b/src/hotspot/share/gc/x/vmStructs_x.hpp new file mode 100644 index 0000000000000..b911c21be2343 --- /dev/null +++ b/src/hotspot/share/gc/x/vmStructs_x.hpp @@ -0,0 +1,143 @@ +/* + * Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_VMSTRUCTS_X_HPP +#define SHARE_GC_X_VMSTRUCTS_X_HPP + +#include "gc/x/xAttachedArray.hpp" +#include "gc/x/xCollectedHeap.hpp" +#include "gc/x/xForwarding.hpp" +#include "gc/x/xGranuleMap.hpp" +#include "gc/x/xHeap.hpp" +#include "gc/x/xPageAllocator.hpp" +#include "utilities/macros.hpp" + +// Expose some ZGC globals to the SA agent. +class XGlobalsForVMStructs { + static XGlobalsForVMStructs _instance; + +public: + static XGlobalsForVMStructs* _instance_p; + + XGlobalsForVMStructs(); + + uint32_t* _XGlobalPhase; + + uint32_t* _XGlobalSeqNum; + + uintptr_t* _XAddressOffsetMask; + uintptr_t* _XAddressMetadataMask; + uintptr_t* _XAddressMetadataFinalizable; + uintptr_t* _XAddressGoodMask; + uintptr_t* _XAddressBadMask; + uintptr_t* _XAddressWeakBadMask; + + const int* _XObjectAlignmentSmallShift; + const int* _XObjectAlignmentSmall; +}; + +typedef XGranuleMap XGranuleMapForPageTable; +typedef XGranuleMap XGranuleMapForForwarding; +typedef XAttachedArray XAttachedArrayForForwarding; + +#define VM_STRUCTS_X(nonstatic_field, volatile_nonstatic_field, static_field) \ + static_field(XGlobalsForVMStructs, _instance_p, XGlobalsForVMStructs*) \ + nonstatic_field(XGlobalsForVMStructs, _XGlobalPhase, uint32_t*) \ + nonstatic_field(XGlobalsForVMStructs, _XGlobalSeqNum, uint32_t*) \ + nonstatic_field(XGlobalsForVMStructs, _XAddressOffsetMask, uintptr_t*) \ + nonstatic_field(XGlobalsForVMStructs, _XAddressMetadataMask, uintptr_t*) \ + nonstatic_field(XGlobalsForVMStructs, _XAddressMetadataFinalizable, uintptr_t*) \ + nonstatic_field(XGlobalsForVMStructs, _XAddressGoodMask, uintptr_t*) \ + nonstatic_field(XGlobalsForVMStructs, _XAddressBadMask, uintptr_t*) \ + nonstatic_field(XGlobalsForVMStructs, _XAddressWeakBadMask, uintptr_t*) \ + nonstatic_field(XGlobalsForVMStructs, _XObjectAlignmentSmallShift, const int*) \ + nonstatic_field(XGlobalsForVMStructs, _XObjectAlignmentSmall, const int*) \ + \ + nonstatic_field(XCollectedHeap, _heap, XHeap) \ + \ + nonstatic_field(XHeap, _page_allocator, XPageAllocator) \ + nonstatic_field(XHeap, _page_table, XPageTable) \ + nonstatic_field(XHeap, _forwarding_table, XForwardingTable) \ + nonstatic_field(XHeap, _relocate, XRelocate) \ + \ + nonstatic_field(XPage, _type, const uint8_t) \ + nonstatic_field(XPage, _seqnum, uint32_t) \ + nonstatic_field(XPage, _virtual, const XVirtualMemory) \ + volatile_nonstatic_field(XPage, _top, uintptr_t) \ + \ + nonstatic_field(XPageAllocator, _max_capacity, const size_t) \ + volatile_nonstatic_field(XPageAllocator, _capacity, size_t) \ + volatile_nonstatic_field(XPageAllocator, _used, size_t) \ + \ + nonstatic_field(XPageTable, _map, XGranuleMapForPageTable) \ + \ + nonstatic_field(XGranuleMapForPageTable, _map, XPage** const) \ + nonstatic_field(XGranuleMapForForwarding, _map, XForwarding** const) \ + \ + nonstatic_field(XForwardingTable, _map, XGranuleMapForForwarding) \ + \ + nonstatic_field(XVirtualMemory, _start, const uintptr_t) \ + nonstatic_field(XVirtualMemory, _end, const uintptr_t) \ + \ + nonstatic_field(XForwarding, _virtual, const XVirtualMemory) \ + nonstatic_field(XForwarding, _object_alignment_shift, const size_t) \ + volatile_nonstatic_field(XForwarding, _ref_count, int) \ + nonstatic_field(XForwarding, _entries, const XAttachedArrayForForwarding) \ + nonstatic_field(XForwardingEntry, _entry, uint64_t) \ + nonstatic_field(XAttachedArrayForForwarding, _length, const size_t) + +#define VM_INT_CONSTANTS_X(declare_constant, declare_constant_with_value) \ + declare_constant(XPhaseRelocate) \ + declare_constant(XPageTypeSmall) \ + declare_constant(XPageTypeMedium) \ + declare_constant(XPageTypeLarge) \ + declare_constant(XObjectAlignmentMediumShift) \ + declare_constant(XObjectAlignmentLargeShift) + +#define VM_LONG_CONSTANTS_X(declare_constant) \ + declare_constant(XGranuleSizeShift) \ + declare_constant(XPageSizeSmallShift) \ + declare_constant(XPageSizeMediumShift) \ + declare_constant(XAddressOffsetShift) \ + declare_constant(XAddressOffsetBits) \ + declare_constant(XAddressOffsetMask) \ + declare_constant(XAddressOffsetMax) + +#define VM_TYPES_X(declare_type, declare_toplevel_type, declare_integer_type) \ + declare_toplevel_type(XGlobalsForVMStructs) \ + declare_type(XCollectedHeap, CollectedHeap) \ + declare_toplevel_type(XHeap) \ + declare_toplevel_type(XRelocate) \ + declare_toplevel_type(XPage) \ + declare_toplevel_type(XPageAllocator) \ + declare_toplevel_type(XPageTable) \ + declare_toplevel_type(XAttachedArrayForForwarding) \ + declare_toplevel_type(XGranuleMapForPageTable) \ + declare_toplevel_type(XGranuleMapForForwarding) \ + declare_toplevel_type(XVirtualMemory) \ + declare_toplevel_type(XForwardingTable) \ + declare_toplevel_type(XForwarding) \ + declare_toplevel_type(XForwardingEntry) \ + declare_toplevel_type(XPhysicalMemoryManager) + +#endif // SHARE_GC_X_VMSTRUCTS_X_HPP diff --git a/src/hotspot/share/gc/x/xAbort.cpp b/src/hotspot/share/gc/x/xAbort.cpp new file mode 100644 index 0000000000000..11b8d840d22f5 --- /dev/null +++ b/src/hotspot/share/gc/x/xAbort.cpp @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/x/xAbort.hpp" +#include "runtime/atomic.hpp" + +volatile bool XAbort::_should_abort = false; + +void XAbort::abort() { + Atomic::release_store_fence(&_should_abort, true); +} diff --git a/src/hotspot/share/gc/x/xAbort.hpp b/src/hotspot/share/gc/x/xAbort.hpp new file mode 100644 index 0000000000000..808a350584bc5 --- /dev/null +++ b/src/hotspot/share/gc/x/xAbort.hpp @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2021, 2022, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XABORT_HPP +#define SHARE_GC_X_XABORT_HPP + +#include "memory/allStatic.hpp" + +class XAbort : public AllStatic { +private: + static volatile bool _should_abort; + +public: + static bool should_abort(); + static void abort(); +}; + +#endif // SHARE_GC_X_XABORT_HPP diff --git a/src/hotspot/share/gc/x/xAbort.inline.hpp b/src/hotspot/share/gc/x/xAbort.inline.hpp new file mode 100644 index 0000000000000..8ef1219330a93 --- /dev/null +++ b/src/hotspot/share/gc/x/xAbort.inline.hpp @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XABORT_INLINE_HPP +#define SHARE_GC_X_XABORT_INLINE_HPP + +#include "gc/x/xAbort.hpp" + +#include "runtime/atomic.hpp" + +inline bool XAbort::should_abort() { + return Atomic::load_acquire(&_should_abort); +} + +#endif // SHARE_GC_X_XABORT_INLINE_HPP diff --git a/src/hotspot/share/gc/x/xAddress.cpp b/src/hotspot/share/gc/x/xAddress.cpp new file mode 100644 index 0000000000000..33dffc662f161 --- /dev/null +++ b/src/hotspot/share/gc/x/xAddress.cpp @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/x/xAddress.hpp" +#include "gc/x/xGlobals.hpp" + +void XAddress::set_good_mask(uintptr_t mask) { + XAddressGoodMask = mask; + XAddressBadMask = XAddressGoodMask ^ XAddressMetadataMask; + XAddressWeakBadMask = (XAddressGoodMask | XAddressMetadataRemapped | XAddressMetadataFinalizable) ^ XAddressMetadataMask; +} + +void XAddress::initialize() { + XAddressOffsetBits = XPlatformAddressOffsetBits(); + XAddressOffsetMask = (((uintptr_t)1 << XAddressOffsetBits) - 1) << XAddressOffsetShift; + XAddressOffsetMax = (uintptr_t)1 << XAddressOffsetBits; + + XAddressMetadataShift = XPlatformAddressMetadataShift(); + XAddressMetadataMask = (((uintptr_t)1 << XAddressMetadataBits) - 1) << XAddressMetadataShift; + + XAddressMetadataMarked0 = (uintptr_t)1 << (XAddressMetadataShift + 0); + XAddressMetadataMarked1 = (uintptr_t)1 << (XAddressMetadataShift + 1); + XAddressMetadataRemapped = (uintptr_t)1 << (XAddressMetadataShift + 2); + XAddressMetadataFinalizable = (uintptr_t)1 << (XAddressMetadataShift + 3); + + XAddressMetadataMarked = XAddressMetadataMarked0; + set_good_mask(XAddressMetadataRemapped); +} + +void XAddress::flip_to_marked() { + XAddressMetadataMarked ^= (XAddressMetadataMarked0 | XAddressMetadataMarked1); + set_good_mask(XAddressMetadataMarked); +} + +void XAddress::flip_to_remapped() { + set_good_mask(XAddressMetadataRemapped); +} diff --git a/src/hotspot/share/gc/x/xAddress.hpp b/src/hotspot/share/gc/x/xAddress.hpp new file mode 100644 index 0000000000000..ff9d548f1af0c --- /dev/null +++ b/src/hotspot/share/gc/x/xAddress.hpp @@ -0,0 +1,67 @@ +/* + * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XADDRESS_HPP +#define SHARE_GC_X_XADDRESS_HPP + +#include "memory/allStatic.hpp" +#include "utilities/globalDefinitions.hpp" + +class XAddress : public AllStatic { + friend class XAddressTest; + +private: + static void set_good_mask(uintptr_t mask); + +public: + static void initialize(); + + static void flip_to_marked(); + static void flip_to_remapped(); + + static bool is_null(uintptr_t value); + static bool is_bad(uintptr_t value); + static bool is_good(uintptr_t value); + static bool is_good_or_null(uintptr_t value); + static bool is_weak_bad(uintptr_t value); + static bool is_weak_good(uintptr_t value); + static bool is_weak_good_or_null(uintptr_t value); + static bool is_marked(uintptr_t value); + static bool is_marked_or_null(uintptr_t value); + static bool is_finalizable(uintptr_t value); + static bool is_finalizable_good(uintptr_t value); + static bool is_remapped(uintptr_t value); + static bool is_in(uintptr_t value); + + static uintptr_t offset(uintptr_t value); + static uintptr_t good(uintptr_t value); + static uintptr_t good_or_null(uintptr_t value); + static uintptr_t finalizable_good(uintptr_t value); + static uintptr_t marked(uintptr_t value); + static uintptr_t marked0(uintptr_t value); + static uintptr_t marked1(uintptr_t value); + static uintptr_t remapped(uintptr_t value); + static uintptr_t remapped_or_null(uintptr_t value); +}; + +#endif // SHARE_GC_X_XADDRESS_HPP diff --git a/src/hotspot/share/gc/x/xAddress.inline.hpp b/src/hotspot/share/gc/x/xAddress.inline.hpp new file mode 100644 index 0000000000000..046ee10af00af --- /dev/null +++ b/src/hotspot/share/gc/x/xAddress.inline.hpp @@ -0,0 +1,137 @@ +/* + * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XADDRESS_INLINE_HPP +#define SHARE_GC_X_XADDRESS_INLINE_HPP + +#include "gc/x/xAddress.hpp" + +#include "gc/x/xGlobals.hpp" +#include "utilities/globalDefinitions.hpp" +#include "utilities/macros.hpp" +#include "utilities/powerOfTwo.hpp" + +inline bool XAddress::is_null(uintptr_t value) { + return value == 0; +} + +inline bool XAddress::is_bad(uintptr_t value) { + return value & XAddressBadMask; +} + +inline bool XAddress::is_good(uintptr_t value) { + return !is_bad(value) && !is_null(value); +} + +inline bool XAddress::is_good_or_null(uintptr_t value) { + // Checking if an address is "not bad" is an optimized version of + // checking if it's "good or null", which eliminates an explicit + // null check. However, the implicit null check only checks that + // the mask bits are zero, not that the entire address is zero. + // This means that an address without mask bits would pass through + // the barrier as if it was null. This should be harmless as such + // addresses should ever be passed through the barrier. + const bool result = !is_bad(value); + assert((is_good(value) || is_null(value)) == result, "Bad address"); + return result; +} + +inline bool XAddress::is_weak_bad(uintptr_t value) { + return value & XAddressWeakBadMask; +} + +inline bool XAddress::is_weak_good(uintptr_t value) { + return !is_weak_bad(value) && !is_null(value); +} + +inline bool XAddress::is_weak_good_or_null(uintptr_t value) { + return !is_weak_bad(value); +} + +inline bool XAddress::is_marked(uintptr_t value) { + return value & XAddressMetadataMarked; +} + +inline bool XAddress::is_marked_or_null(uintptr_t value) { + return is_marked(value) || is_null(value); +} + +inline bool XAddress::is_finalizable(uintptr_t value) { + return value & XAddressMetadataFinalizable; +} + +inline bool XAddress::is_finalizable_good(uintptr_t value) { + return is_finalizable(value) && is_good(value ^ XAddressMetadataFinalizable); +} + +inline bool XAddress::is_remapped(uintptr_t value) { + return value & XAddressMetadataRemapped; +} + +inline bool XAddress::is_in(uintptr_t value) { + // Check that exactly one non-offset bit is set + if (!is_power_of_2(value & ~XAddressOffsetMask)) { + return false; + } + + // Check that one of the non-finalizable metadata is set + return value & (XAddressMetadataMask & ~XAddressMetadataFinalizable); +} + +inline uintptr_t XAddress::offset(uintptr_t value) { + return value & XAddressOffsetMask; +} + +inline uintptr_t XAddress::good(uintptr_t value) { + return offset(value) | XAddressGoodMask; +} + +inline uintptr_t XAddress::good_or_null(uintptr_t value) { + return is_null(value) ? 0 : good(value); +} + +inline uintptr_t XAddress::finalizable_good(uintptr_t value) { + return offset(value) | XAddressMetadataFinalizable | XAddressGoodMask; +} + +inline uintptr_t XAddress::marked(uintptr_t value) { + return offset(value) | XAddressMetadataMarked; +} + +inline uintptr_t XAddress::marked0(uintptr_t value) { + return offset(value) | XAddressMetadataMarked0; +} + +inline uintptr_t XAddress::marked1(uintptr_t value) { + return offset(value) | XAddressMetadataMarked1; +} + +inline uintptr_t XAddress::remapped(uintptr_t value) { + return offset(value) | XAddressMetadataRemapped; +} + +inline uintptr_t XAddress::remapped_or_null(uintptr_t value) { + return is_null(value) ? 0 : remapped(value); +} + +#endif // SHARE_GC_X_XADDRESS_INLINE_HPP diff --git a/src/hotspot/share/gc/x/xAddressSpaceLimit.cpp b/src/hotspot/share/gc/x/xAddressSpaceLimit.cpp new file mode 100644 index 0000000000000..6d3c7a295dfe0 --- /dev/null +++ b/src/hotspot/share/gc/x/xAddressSpaceLimit.cpp @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2019, 2021, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/shared/gc_globals.hpp" +#include "gc/x/xAddressSpaceLimit.hpp" +#include "gc/x/xGlobals.hpp" +#include "runtime/globals.hpp" +#include "runtime/os.hpp" +#include "utilities/align.hpp" + +static size_t address_space_limit() { + size_t limit = 0; + + if (os::has_allocatable_memory_limit(&limit)) { + return limit; + } + + // No limit + return SIZE_MAX; +} + +size_t XAddressSpaceLimit::mark_stack() { + // Allow mark stacks to occupy 10% of the address space + const size_t limit = address_space_limit() / 10; + return align_up(limit, XMarkStackSpaceExpandSize); +} + +size_t XAddressSpaceLimit::heap_view() { + // Allow all heap views to occupy 50% of the address space + const size_t limit = address_space_limit() / MaxVirtMemFraction / XHeapViews; + return align_up(limit, XGranuleSize); +} diff --git a/src/hotspot/share/gc/x/xAddressSpaceLimit.hpp b/src/hotspot/share/gc/x/xAddressSpaceLimit.hpp new file mode 100644 index 0000000000000..9a3fcc27a293d --- /dev/null +++ b/src/hotspot/share/gc/x/xAddressSpaceLimit.hpp @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2019, 2022, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XADDRESSSPACELIMIT_HPP +#define SHARE_GC_X_XADDRESSSPACELIMIT_HPP + +#include "memory/allStatic.hpp" +#include "utilities/globalDefinitions.hpp" + +class XAddressSpaceLimit : public AllStatic { +public: + static size_t mark_stack(); + static size_t heap_view(); +}; + +#endif // SHARE_GC_X_XADDRESSSPACELIMIT_HPP diff --git a/src/hotspot/share/gc/x/xAllocationFlags.hpp b/src/hotspot/share/gc/x/xAllocationFlags.hpp new file mode 100644 index 0000000000000..307d68c65ac78 --- /dev/null +++ b/src/hotspot/share/gc/x/xAllocationFlags.hpp @@ -0,0 +1,85 @@ +/* + * Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XALLOCATIONFLAGS_HPP +#define SHARE_GC_X_XALLOCATIONFLAGS_HPP + +#include "gc/x/xBitField.hpp" +#include "memory/allocation.hpp" + +// +// Allocation flags layout +// ----------------------- +// +// 7 2 1 0 +// +-----+-+-+-+ +// |00000|1|1|1| +// +-----+-+-+-+ +// | | | | +// | | | * 0-0 Non-Blocking Flag (1-bit) +// | | | +// | | * 1-1 Worker Relocation Flag (1-bit) +// | | +// | * 2-2 Low Address Flag (1-bit) +// | +// * 7-3 Unused (5-bits) +// + +class XAllocationFlags { +private: + typedef XBitField field_non_blocking; + typedef XBitField field_worker_relocation; + typedef XBitField field_low_address; + + uint8_t _flags; + +public: + XAllocationFlags() : + _flags(0) {} + + void set_non_blocking() { + _flags |= field_non_blocking::encode(true); + } + + void set_worker_relocation() { + _flags |= field_worker_relocation::encode(true); + } + + void set_low_address() { + _flags |= field_low_address::encode(true); + } + + bool non_blocking() const { + return field_non_blocking::decode(_flags); + } + + bool worker_relocation() const { + return field_worker_relocation::decode(_flags); + } + + bool low_address() const { + return field_low_address::decode(_flags); + } +}; + +#endif // SHARE_GC_X_XALLOCATIONFLAGS_HPP diff --git a/src/hotspot/share/gc/x/xArguments.cpp b/src/hotspot/share/gc/x/xArguments.cpp new file mode 100644 index 0000000000000..8c02c80024773 --- /dev/null +++ b/src/hotspot/share/gc/x/xArguments.cpp @@ -0,0 +1,123 @@ +/* + * Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/x/xAddressSpaceLimit.hpp" +#include "gc/x/xArguments.hpp" +#include "gc/x/xCollectedHeap.hpp" +#include "gc/x/xGlobals.hpp" +#include "gc/x/xHeuristics.hpp" +#include "gc/shared/gcArguments.hpp" +#include "runtime/globals.hpp" +#include "runtime/globals_extension.hpp" +#include "runtime/java.hpp" + +void XArguments::initialize_alignments() { + SpaceAlignment = XGranuleSize; + HeapAlignment = SpaceAlignment; +} + +void XArguments::initialize() { + // Check mark stack size + const size_t mark_stack_space_limit = XAddressSpaceLimit::mark_stack(); + if (ZMarkStackSpaceLimit > mark_stack_space_limit) { + if (!FLAG_IS_DEFAULT(ZMarkStackSpaceLimit)) { + vm_exit_during_initialization("ZMarkStackSpaceLimit too large for limited address space"); + } + FLAG_SET_DEFAULT(ZMarkStackSpaceLimit, mark_stack_space_limit); + } + + // Enable NUMA by default + if (FLAG_IS_DEFAULT(UseNUMA)) { + FLAG_SET_DEFAULT(UseNUMA, true); + } + + if (FLAG_IS_DEFAULT(ZFragmentationLimit)) { + FLAG_SET_DEFAULT(ZFragmentationLimit, 25.0); + } + + // Select number of parallel threads + if (FLAG_IS_DEFAULT(ParallelGCThreads)) { + FLAG_SET_DEFAULT(ParallelGCThreads, XHeuristics::nparallel_workers()); + } + + if (ParallelGCThreads == 0) { + vm_exit_during_initialization("The flag -XX:+UseZGC can not be combined with -XX:ParallelGCThreads=0"); + } + + // Select number of concurrent threads + if (FLAG_IS_DEFAULT(ConcGCThreads)) { + FLAG_SET_DEFAULT(ConcGCThreads, XHeuristics::nconcurrent_workers()); + } + + if (ConcGCThreads == 0) { + vm_exit_during_initialization("The flag -XX:+UseZGC can not be combined with -XX:ConcGCThreads=0"); + } + + // Large page size must match granule size + if (!FLAG_IS_DEFAULT(LargePageSizeInBytes) && LargePageSizeInBytes != XGranuleSize) { + vm_exit_during_initialization(err_msg("Incompatible -XX:LargePageSizeInBytes, only " + SIZE_FORMAT "M large pages are supported by ZGC", + XGranuleSize / M)); + } + + // The heuristics used when UseDynamicNumberOfGCThreads is + // enabled defaults to using a ZAllocationSpikeTolerance of 1. + if (UseDynamicNumberOfGCThreads && FLAG_IS_DEFAULT(ZAllocationSpikeTolerance)) { + FLAG_SET_DEFAULT(ZAllocationSpikeTolerance, 1); + } + +#ifdef COMPILER2 + // Enable loop strip mining by default + if (FLAG_IS_DEFAULT(UseCountedLoopSafepoints)) { + FLAG_SET_DEFAULT(UseCountedLoopSafepoints, true); + if (FLAG_IS_DEFAULT(LoopStripMiningIter)) { + FLAG_SET_DEFAULT(LoopStripMiningIter, 1000); + } + } +#endif + + // CompressedOops not supported + FLAG_SET_DEFAULT(UseCompressedOops, false); + + // Verification before startup and after exit not (yet) supported + FLAG_SET_DEFAULT(VerifyDuringStartup, false); + FLAG_SET_DEFAULT(VerifyBeforeExit, false); + + if (VerifyBeforeGC || VerifyDuringGC || VerifyAfterGC) { + FLAG_SET_DEFAULT(ZVerifyRoots, true); + FLAG_SET_DEFAULT(ZVerifyObjects, true); + } +} + +size_t XArguments::heap_virtual_to_physical_ratio() { + return XHeapViews * XVirtualToPhysicalRatio; +} + +CollectedHeap* XArguments::create_heap() { + return new XCollectedHeap(); +} + +bool XArguments::is_supported() { + return is_os_supported(); +} diff --git a/src/hotspot/share/gc/x/xArguments.hpp b/src/hotspot/share/gc/x/xArguments.hpp new file mode 100644 index 0000000000000..aaa586a2df2f1 --- /dev/null +++ b/src/hotspot/share/gc/x/xArguments.hpp @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XARGUMENTS_HPP +#define SHARE_GC_X_XARGUMENTS_HPP + +#include "gc/shared/gcArguments.hpp" + +class CollectedHeap; + +class XArguments : AllStatic { +public: + static void initialize_alignments(); + static void initialize(); + static size_t heap_virtual_to_physical_ratio(); + static CollectedHeap* create_heap(); + + static bool is_supported(); + + static bool is_os_supported(); +}; + +#endif // SHARE_GC_X_XARGUMENTS_HPP diff --git a/src/hotspot/share/gc/x/xArray.hpp b/src/hotspot/share/gc/x/xArray.hpp new file mode 100644 index 0000000000000..b0b4b5bd81ea6 --- /dev/null +++ b/src/hotspot/share/gc/x/xArray.hpp @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XARRAY_HPP +#define SHARE_GC_X_XARRAY_HPP + +#include "memory/allocation.hpp" +#include "utilities/growableArray.hpp" + +template using XArray = GrowableArrayCHeap; + +template +class XArrayIteratorImpl : public StackObj { +private: + const T* _next; + const T* const _end; + + bool next_serial(T* elem); + bool next_parallel(T* elem); + +public: + XArrayIteratorImpl(const T* array, size_t length); + XArrayIteratorImpl(const XArray* array); + + bool next(T* elem); +}; + +template using XArrayIterator = XArrayIteratorImpl; +template using XArrayParallelIterator = XArrayIteratorImpl; + +#endif // SHARE_GC_X_XARRAY_HPP diff --git a/src/hotspot/share/gc/x/xArray.inline.hpp b/src/hotspot/share/gc/x/xArray.inline.hpp new file mode 100644 index 0000000000000..9d3cfcfbc659a --- /dev/null +++ b/src/hotspot/share/gc/x/xArray.inline.hpp @@ -0,0 +1,81 @@ +/* + * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XARRAY_INLINE_HPP +#define SHARE_GC_X_XARRAY_INLINE_HPP + +#include "gc/x/xArray.hpp" + +#include "runtime/atomic.hpp" + +template +inline bool XArrayIteratorImpl::next_serial(T* elem) { + if (_next == _end) { + return false; + } + + *elem = *_next; + _next++; + + return true; +} + +template +inline bool XArrayIteratorImpl::next_parallel(T* elem) { + const T* old_next = Atomic::load(&_next); + + for (;;) { + if (old_next == _end) { + return false; + } + + const T* const new_next = old_next + 1; + const T* const prev_next = Atomic::cmpxchg(&_next, old_next, new_next); + if (prev_next == old_next) { + *elem = *old_next; + return true; + } + + old_next = prev_next; + } +} + +template +inline XArrayIteratorImpl::XArrayIteratorImpl(const T* array, size_t length) : + _next(array), + _end(array + length) {} + +template +inline XArrayIteratorImpl::XArrayIteratorImpl(const XArray* array) : + XArrayIteratorImpl(array->is_empty() ? NULL : array->adr_at(0), array->length()) {} + +template +inline bool XArrayIteratorImpl::next(T* elem) { + if (Parallel) { + return next_parallel(elem); + } else { + return next_serial(elem); + } +} + +#endif // SHARE_GC_X_XARRAY_INLINE_HPP diff --git a/src/hotspot/share/gc/x/xAttachedArray.hpp b/src/hotspot/share/gc/x/xAttachedArray.hpp new file mode 100644 index 0000000000000..f039f602aab38 --- /dev/null +++ b/src/hotspot/share/gc/x/xAttachedArray.hpp @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2019, 2021, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XATTACHEDARRAY_HPP +#define SHARE_GC_X_XATTACHEDARRAY_HPP + +#include "utilities/globalDefinitions.hpp" + +class VMStructs; + +template +class XAttachedArray { + friend class ::VMStructs; + +private: + const size_t _length; + + static size_t object_size(); + static size_t array_size(size_t length); + +public: + template + static void* alloc(Allocator* allocator, size_t length); + + static void* alloc(size_t length); + static void free(ObjectT* obj); + + XAttachedArray(size_t length); + + size_t length() const; + ArrayT* operator()(const ObjectT* obj) const; +}; + +#endif // SHARE_GC_X_XATTACHEDARRAY_HPP diff --git a/src/hotspot/share/gc/x/xAttachedArray.inline.hpp b/src/hotspot/share/gc/x/xAttachedArray.inline.hpp new file mode 100644 index 0000000000000..ba10de9967384 --- /dev/null +++ b/src/hotspot/share/gc/x/xAttachedArray.inline.hpp @@ -0,0 +1,86 @@ +/* + * Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XATTACHEDARRAY_INLINE_HPP +#define SHARE_GC_X_XATTACHEDARRAY_INLINE_HPP + +#include "gc/x/xAttachedArray.hpp" + +#include "memory/allocation.hpp" +#include "utilities/align.hpp" + +template +inline size_t XAttachedArray::object_size() { + return align_up(sizeof(ObjectT), sizeof(ArrayT)); +} + +template +inline size_t XAttachedArray::array_size(size_t length) { + return sizeof(ArrayT) * length; +} + +template +template +inline void* XAttachedArray::alloc(Allocator* allocator, size_t length) { + // Allocate memory for object and array + const size_t size = object_size() + array_size(length); + void* const addr = allocator->alloc(size); + + // Placement new array + void* const array_addr = reinterpret_cast(addr) + object_size(); + ::new (array_addr) ArrayT[length]; + + // Return pointer to object + return addr; +} + +template +inline void* XAttachedArray::alloc(size_t length) { + struct Allocator { + void* alloc(size_t size) const { + return AllocateHeap(size, mtGC); + } + } allocator; + return alloc(&allocator, length); +} + +template +inline void XAttachedArray::free(ObjectT* obj) { + FreeHeap(obj); +} + +template +inline XAttachedArray::XAttachedArray(size_t length) : + _length(length) {} + +template +inline size_t XAttachedArray::length() const { + return _length; +} + +template +inline ArrayT* XAttachedArray::operator()(const ObjectT* obj) const { + return reinterpret_cast(reinterpret_cast(obj) + object_size()); +} + +#endif // SHARE_GC_X_XATTACHEDARRAY_INLINE_HPP diff --git a/src/hotspot/share/gc/x/xBarrier.cpp b/src/hotspot/share/gc/x/xBarrier.cpp new file mode 100644 index 0000000000000..a2528a9aaf268 --- /dev/null +++ b/src/hotspot/share/gc/x/xBarrier.cpp @@ -0,0 +1,275 @@ +/* + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "classfile/javaClasses.hpp" +#include "gc/x/xBarrier.inline.hpp" +#include "gc/x/xHeap.inline.hpp" +#include "gc/x/xOop.inline.hpp" +#include "gc/x/xThread.inline.hpp" +#include "memory/iterator.inline.hpp" +#include "oops/oop.inline.hpp" +#include "runtime/safepoint.hpp" +#include "utilities/debug.hpp" + +template +bool XBarrier::should_mark_through(uintptr_t addr) { + // Finalizable marked oops can still exists on the heap after marking + // has completed, in which case we just want to convert this into a + // good oop and not push it on the mark stack. + if (!during_mark()) { + assert(XAddress::is_marked(addr), "Should be marked"); + assert(XAddress::is_finalizable(addr), "Should be finalizable"); + return false; + } + + // During marking, we mark through already marked oops to avoid having + // some large part of the object graph hidden behind a pushed, but not + // yet flushed, entry on a mutator mark stack. Always marking through + // allows the GC workers to proceed through the object graph even if a + // mutator touched an oop first, which in turn will reduce the risk of + // having to flush mark stacks multiple times to terminate marking. + // + // However, when doing finalizable marking we don't always want to mark + // through. First, marking through an already strongly marked oop would + // be wasteful, since we will then proceed to do finalizable marking on + // an object which is, or will be, marked strongly. Second, marking + // through an already finalizable marked oop would also be wasteful, + // since such oops can never end up on a mutator mark stack and can + // therefore not hide some part of the object graph from GC workers. + if (finalizable) { + return !XAddress::is_marked(addr); + } + + // Mark through + return true; +} + +template +uintptr_t XBarrier::mark(uintptr_t addr) { + uintptr_t good_addr; + + if (XAddress::is_marked(addr)) { + // Already marked, but try to mark though anyway + good_addr = XAddress::good(addr); + } else if (XAddress::is_remapped(addr)) { + // Already remapped, but also needs to be marked + good_addr = XAddress::good(addr); + } else { + // Needs to be both remapped and marked + good_addr = remap(addr); + } + + // Mark + if (should_mark_through(addr)) { + XHeap::heap()->mark_object(good_addr); + } + + if (finalizable) { + // Make the oop finalizable marked/good, instead of normal marked/good. + // This is needed because an object might first becomes finalizable + // marked by the GC, and then loaded by a mutator thread. In this case, + // the mutator thread must be able to tell that the object needs to be + // strongly marked. The finalizable bit in the oop exists to make sure + // that a load of a finalizable marked oop will fall into the barrier + // slow path so that we can mark the object as strongly reachable. + return XAddress::finalizable_good(good_addr); + } + + return good_addr; +} + +uintptr_t XBarrier::remap(uintptr_t addr) { + assert(!XAddress::is_good(addr), "Should not be good"); + assert(!XAddress::is_weak_good(addr), "Should not be weak good"); + return XHeap::heap()->remap_object(addr); +} + +uintptr_t XBarrier::relocate(uintptr_t addr) { + assert(!XAddress::is_good(addr), "Should not be good"); + assert(!XAddress::is_weak_good(addr), "Should not be weak good"); + return XHeap::heap()->relocate_object(addr); +} + +uintptr_t XBarrier::relocate_or_mark(uintptr_t addr) { + return during_relocate() ? relocate(addr) : mark(addr); +} + +uintptr_t XBarrier::relocate_or_mark_no_follow(uintptr_t addr) { + return during_relocate() ? relocate(addr) : mark(addr); +} + +uintptr_t XBarrier::relocate_or_remap(uintptr_t addr) { + return during_relocate() ? relocate(addr) : remap(addr); +} + +// +// Load barrier +// +uintptr_t XBarrier::load_barrier_on_oop_slow_path(uintptr_t addr) { + return relocate_or_mark(addr); +} + +uintptr_t XBarrier::load_barrier_on_invisible_root_oop_slow_path(uintptr_t addr) { + return relocate_or_mark_no_follow(addr); +} + +void XBarrier::load_barrier_on_oop_fields(oop o) { + assert(XAddress::is_good(XOop::to_address(o)), "Should be good"); + XLoadBarrierOopClosure cl; + o->oop_iterate(&cl); +} + +// +// Weak load barrier +// +uintptr_t XBarrier::weak_load_barrier_on_oop_slow_path(uintptr_t addr) { + return XAddress::is_weak_good(addr) ? XAddress::good(addr) : relocate_or_remap(addr); +} + +uintptr_t XBarrier::weak_load_barrier_on_weak_oop_slow_path(uintptr_t addr) { + const uintptr_t good_addr = weak_load_barrier_on_oop_slow_path(addr); + if (XHeap::heap()->is_object_strongly_live(good_addr)) { + return good_addr; + } + + // Not strongly live + return 0; +} + +uintptr_t XBarrier::weak_load_barrier_on_phantom_oop_slow_path(uintptr_t addr) { + const uintptr_t good_addr = weak_load_barrier_on_oop_slow_path(addr); + if (XHeap::heap()->is_object_live(good_addr)) { + return good_addr; + } + + // Not live + return 0; +} + +// +// Keep alive barrier +// +uintptr_t XBarrier::keep_alive_barrier_on_oop_slow_path(uintptr_t addr) { + assert(during_mark(), "Invalid phase"); + + // Mark + return mark(addr); +} + +uintptr_t XBarrier::keep_alive_barrier_on_weak_oop_slow_path(uintptr_t addr) { + assert(XResurrection::is_blocked(), "This operation is only valid when resurrection is blocked"); + const uintptr_t good_addr = weak_load_barrier_on_oop_slow_path(addr); + assert(XHeap::heap()->is_object_strongly_live(good_addr), "Should be live"); + return good_addr; +} + +uintptr_t XBarrier::keep_alive_barrier_on_phantom_oop_slow_path(uintptr_t addr) { + assert(XResurrection::is_blocked(), "This operation is only valid when resurrection is blocked"); + const uintptr_t good_addr = weak_load_barrier_on_oop_slow_path(addr); + assert(XHeap::heap()->is_object_live(good_addr), "Should be live"); + return good_addr; +} + +// +// Mark barrier +// +uintptr_t XBarrier::mark_barrier_on_oop_slow_path(uintptr_t addr) { + assert(during_mark(), "Invalid phase"); + assert(XThread::is_worker(), "Invalid thread"); + + // Mark + return mark(addr); +} + +uintptr_t XBarrier::mark_barrier_on_finalizable_oop_slow_path(uintptr_t addr) { + assert(during_mark(), "Invalid phase"); + assert(XThread::is_worker(), "Invalid thread"); + + // Mark + return mark(addr); +} + +// +// Narrow oop variants, never used. +// +oop XBarrier::load_barrier_on_oop_field(volatile narrowOop* p) { + ShouldNotReachHere(); + return NULL; +} + +oop XBarrier::load_barrier_on_oop_field_preloaded(volatile narrowOop* p, oop o) { + ShouldNotReachHere(); + return NULL; +} + +void XBarrier::load_barrier_on_oop_array(volatile narrowOop* p, size_t length) { + ShouldNotReachHere(); +} + +oop XBarrier::load_barrier_on_weak_oop_field_preloaded(volatile narrowOop* p, oop o) { + ShouldNotReachHere(); + return NULL; +} + +oop XBarrier::load_barrier_on_phantom_oop_field_preloaded(volatile narrowOop* p, oop o) { + ShouldNotReachHere(); + return NULL; +} + +oop XBarrier::weak_load_barrier_on_oop_field_preloaded(volatile narrowOop* p, oop o) { + ShouldNotReachHere(); + return NULL; +} + +oop XBarrier::weak_load_barrier_on_weak_oop_field_preloaded(volatile narrowOop* p, oop o) { + ShouldNotReachHere(); + return NULL; +} + +oop XBarrier::weak_load_barrier_on_phantom_oop_field_preloaded(volatile narrowOop* p, oop o) { + ShouldNotReachHere(); + return NULL; +} + +#ifdef ASSERT + +// ON_WEAK barriers should only ever be applied to j.l.r.Reference.referents. +void XBarrier::verify_on_weak(volatile oop* referent_addr) { + if (referent_addr != NULL) { + uintptr_t base = (uintptr_t)referent_addr - java_lang_ref_Reference::referent_offset(); + oop obj = cast_to_oop(base); + assert(oopDesc::is_oop(obj), "Verification failed for: ref " PTR_FORMAT " obj: " PTR_FORMAT, (uintptr_t)referent_addr, base); + assert(java_lang_ref_Reference::is_referent_field(obj, java_lang_ref_Reference::referent_offset()), "Sanity"); + } +} + +#endif + +void XLoadBarrierOopClosure::do_oop(oop* p) { + XBarrier::load_barrier_on_oop_field(p); +} + +void XLoadBarrierOopClosure::do_oop(narrowOop* p) { + ShouldNotReachHere(); +} diff --git a/src/hotspot/share/gc/x/xBarrier.hpp b/src/hotspot/share/gc/x/xBarrier.hpp new file mode 100644 index 0000000000000..e2ef210d7d25b --- /dev/null +++ b/src/hotspot/share/gc/x/xBarrier.hpp @@ -0,0 +1,135 @@ +/* + * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XBARRIER_HPP +#define SHARE_GC_X_XBARRIER_HPP + +#include "memory/allStatic.hpp" +#include "memory/iterator.hpp" +#include "oops/oop.hpp" + +typedef bool (*XBarrierFastPath)(uintptr_t); +typedef uintptr_t (*XBarrierSlowPath)(uintptr_t); + +class XBarrier : public AllStatic { +private: + static const bool GCThread = true; + static const bool AnyThread = false; + + static const bool Follow = true; + static const bool DontFollow = false; + + static const bool Strong = false; + static const bool Finalizable = true; + + static const bool Publish = true; + static const bool Overflow = false; + + template static void self_heal(volatile oop* p, uintptr_t addr, uintptr_t heal_addr); + + template static oop barrier(volatile oop* p, oop o); + template static oop weak_barrier(volatile oop* p, oop o); + template static void root_barrier(oop* p, oop o); + + static bool is_good_or_null_fast_path(uintptr_t addr); + static bool is_weak_good_or_null_fast_path(uintptr_t addr); + static bool is_marked_or_null_fast_path(uintptr_t addr); + + static bool during_mark(); + static bool during_relocate(); + template static bool should_mark_through(uintptr_t addr); + template static uintptr_t mark(uintptr_t addr); + static uintptr_t remap(uintptr_t addr); + static uintptr_t relocate(uintptr_t addr); + static uintptr_t relocate_or_mark(uintptr_t addr); + static uintptr_t relocate_or_mark_no_follow(uintptr_t addr); + static uintptr_t relocate_or_remap(uintptr_t addr); + + static uintptr_t load_barrier_on_oop_slow_path(uintptr_t addr); + static uintptr_t load_barrier_on_invisible_root_oop_slow_path(uintptr_t addr); + + static uintptr_t weak_load_barrier_on_oop_slow_path(uintptr_t addr); + static uintptr_t weak_load_barrier_on_weak_oop_slow_path(uintptr_t addr); + static uintptr_t weak_load_barrier_on_phantom_oop_slow_path(uintptr_t addr); + + static uintptr_t keep_alive_barrier_on_oop_slow_path(uintptr_t addr); + static uintptr_t keep_alive_barrier_on_weak_oop_slow_path(uintptr_t addr); + static uintptr_t keep_alive_barrier_on_phantom_oop_slow_path(uintptr_t addr); + + static uintptr_t mark_barrier_on_oop_slow_path(uintptr_t addr); + static uintptr_t mark_barrier_on_finalizable_oop_slow_path(uintptr_t addr); + + static void verify_on_weak(volatile oop* referent_addr) NOT_DEBUG_RETURN; + +public: + // Load barrier + static oop load_barrier_on_oop(oop o); + static oop load_barrier_on_oop_field(volatile oop* p); + static oop load_barrier_on_oop_field_preloaded(volatile oop* p, oop o); + static void load_barrier_on_oop_array(volatile oop* p, size_t length); + static void load_barrier_on_oop_fields(oop o); + static oop load_barrier_on_weak_oop_field_preloaded(volatile oop* p, oop o); + static oop load_barrier_on_phantom_oop_field_preloaded(volatile oop* p, oop o); + static void load_barrier_on_root_oop_field(oop* p); + static void load_barrier_on_invisible_root_oop_field(oop* p); + + // Weak load barrier + static oop weak_load_barrier_on_oop_field(volatile oop* p); + static oop weak_load_barrier_on_oop_field_preloaded(volatile oop* p, oop o); + static oop weak_load_barrier_on_weak_oop(oop o); + static oop weak_load_barrier_on_weak_oop_field_preloaded(volatile oop* p, oop o); + static oop weak_load_barrier_on_phantom_oop(oop o); + static oop weak_load_barrier_on_phantom_oop_field_preloaded(volatile oop* p, oop o); + + // Is alive barrier + static bool is_alive_barrier_on_weak_oop(oop o); + static bool is_alive_barrier_on_phantom_oop(oop o); + + // Keep alive barrier + static void keep_alive_barrier_on_oop(oop o); + static void keep_alive_barrier_on_weak_oop_field(volatile oop* p); + static void keep_alive_barrier_on_phantom_oop_field(volatile oop* p); + static void keep_alive_barrier_on_phantom_root_oop_field(oop* p); + + // Mark barrier + static void mark_barrier_on_oop_field(volatile oop* p, bool finalizable); + static void mark_barrier_on_oop_array(volatile oop* p, size_t length, bool finalizable); + + // Narrow oop variants, never used. + static oop load_barrier_on_oop_field(volatile narrowOop* p); + static oop load_barrier_on_oop_field_preloaded(volatile narrowOop* p, oop o); + static void load_barrier_on_oop_array(volatile narrowOop* p, size_t length); + static oop load_barrier_on_weak_oop_field_preloaded(volatile narrowOop* p, oop o); + static oop load_barrier_on_phantom_oop_field_preloaded(volatile narrowOop* p, oop o); + static oop weak_load_barrier_on_oop_field_preloaded(volatile narrowOop* p, oop o); + static oop weak_load_barrier_on_weak_oop_field_preloaded(volatile narrowOop* p, oop o); + static oop weak_load_barrier_on_phantom_oop_field_preloaded(volatile narrowOop* p, oop o); +}; + +class XLoadBarrierOopClosure : public BasicOopIterateClosure { +public: + virtual void do_oop(oop* p); + virtual void do_oop(narrowOop* p); +}; + +#endif // SHARE_GC_X_XBARRIER_HPP diff --git a/src/hotspot/share/gc/x/xBarrier.inline.hpp b/src/hotspot/share/gc/x/xBarrier.inline.hpp new file mode 100644 index 0000000000000..70288b5daac71 --- /dev/null +++ b/src/hotspot/share/gc/x/xBarrier.inline.hpp @@ -0,0 +1,394 @@ +/* + * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XBARRIER_INLINE_HPP +#define SHARE_GC_X_XBARRIER_INLINE_HPP + +#include "gc/x/xBarrier.hpp" + +#include "code/codeCache.hpp" +#include "gc/x/xAddress.inline.hpp" +#include "gc/x/xOop.inline.hpp" +#include "gc/x/xResurrection.inline.hpp" +#include "oops/oop.hpp" +#include "runtime/atomic.hpp" +#include "runtime/continuation.hpp" + +// A self heal must always "upgrade" the address metadata bits in +// accordance with the metadata bits state machine, which has the +// valid state transitions as described below (where N is the GC +// cycle). +// +// Note the subtleness of overlapping GC cycles. Specifically that +// oops are colored Remapped(N) starting at relocation N and ending +// at marking N + 1. +// +// +--- Mark Start +// | +--- Mark End +// | | +--- Relocate Start +// | | | +--- Relocate End +// | | | | +// Marked |---N---|--N+1--|--N+2--|---- +// Finalizable |---N---|--N+1--|--N+2--|---- +// Remapped ----|---N---|--N+1--|--N+2--| +// +// VALID STATE TRANSITIONS +// +// Marked(N) -> Remapped(N) +// -> Marked(N + 1) +// -> Finalizable(N + 1) +// +// Finalizable(N) -> Marked(N) +// -> Remapped(N) +// -> Marked(N + 1) +// -> Finalizable(N + 1) +// +// Remapped(N) -> Marked(N + 1) +// -> Finalizable(N + 1) +// +// PHASE VIEW +// +// XPhaseMark +// Load & Mark +// Marked(N) <- Marked(N - 1) +// <- Finalizable(N - 1) +// <- Remapped(N - 1) +// <- Finalizable(N) +// +// Mark(Finalizable) +// Finalizable(N) <- Marked(N - 1) +// <- Finalizable(N - 1) +// <- Remapped(N - 1) +// +// Load(AS_NO_KEEPALIVE) +// Remapped(N - 1) <- Marked(N - 1) +// <- Finalizable(N - 1) +// +// XPhaseMarkCompleted (Resurrection blocked) +// Load & Load(ON_WEAK/PHANTOM_OOP_REF | AS_NO_KEEPALIVE) & KeepAlive +// Marked(N) <- Marked(N - 1) +// <- Finalizable(N - 1) +// <- Remapped(N - 1) +// <- Finalizable(N) +// +// Load(ON_STRONG_OOP_REF | AS_NO_KEEPALIVE) +// Remapped(N - 1) <- Marked(N - 1) +// <- Finalizable(N - 1) +// +// XPhaseMarkCompleted (Resurrection unblocked) +// Load +// Marked(N) <- Finalizable(N) +// +// XPhaseRelocate +// Load & Load(AS_NO_KEEPALIVE) +// Remapped(N) <- Marked(N) +// <- Finalizable(N) + +template +inline void XBarrier::self_heal(volatile oop* p, uintptr_t addr, uintptr_t heal_addr) { + if (heal_addr == 0) { + // Never heal with null since it interacts badly with reference processing. + // A mutator clearing an oop would be similar to calling Reference.clear(), + // which would make the reference non-discoverable or silently dropped + // by the reference processor. + return; + } + + assert(!fast_path(addr), "Invalid self heal"); + assert(fast_path(heal_addr), "Invalid self heal"); + + for (;;) { + // Heal + const uintptr_t prev_addr = Atomic::cmpxchg((volatile uintptr_t*)p, addr, heal_addr, memory_order_relaxed); + if (prev_addr == addr) { + // Success + return; + } + + if (fast_path(prev_addr)) { + // Must not self heal + return; + } + + // The oop location was healed by another barrier, but still needs upgrading. + // Re-apply healing to make sure the oop is not left with weaker (remapped or + // finalizable) metadata bits than what this barrier tried to apply. + assert(XAddress::offset(prev_addr) == XAddress::offset(heal_addr), "Invalid offset"); + addr = prev_addr; + } +} + +template +inline oop XBarrier::barrier(volatile oop* p, oop o) { + const uintptr_t addr = XOop::to_address(o); + + // Fast path + if (fast_path(addr)) { + return XOop::from_address(addr); + } + + // Slow path + const uintptr_t good_addr = slow_path(addr); + + if (p != NULL) { + self_heal(p, addr, good_addr); + } + + return XOop::from_address(good_addr); +} + +template +inline oop XBarrier::weak_barrier(volatile oop* p, oop o) { + const uintptr_t addr = XOop::to_address(o); + + // Fast path + if (fast_path(addr)) { + // Return the good address instead of the weak good address + // to ensure that the currently active heap view is used. + return XOop::from_address(XAddress::good_or_null(addr)); + } + + // Slow path + const uintptr_t good_addr = slow_path(addr); + + if (p != NULL) { + // The slow path returns a good/marked address or null, but we never mark + // oops in a weak load barrier so we always heal with the remapped address. + self_heal(p, addr, XAddress::remapped_or_null(good_addr)); + } + + return XOop::from_address(good_addr); +} + +template +inline void XBarrier::root_barrier(oop* p, oop o) { + const uintptr_t addr = XOop::to_address(o); + + // Fast path + if (fast_path(addr)) { + return; + } + + // Slow path + const uintptr_t good_addr = slow_path(addr); + + // Non-atomic healing helps speed up root scanning. This is safe to do + // since we are always healing roots in a safepoint, or under a lock, + // which ensures we are never racing with mutators modifying roots while + // we are healing them. It's also safe in case multiple GC threads try + // to heal the same root if it is aligned, since they would always heal + // the root in the same way and it does not matter in which order it + // happens. For misaligned oops, there needs to be mutual exclusion. + *p = XOop::from_address(good_addr); +} + +inline bool XBarrier::is_good_or_null_fast_path(uintptr_t addr) { + return XAddress::is_good_or_null(addr); +} + +inline bool XBarrier::is_weak_good_or_null_fast_path(uintptr_t addr) { + return XAddress::is_weak_good_or_null(addr); +} + +inline bool XBarrier::is_marked_or_null_fast_path(uintptr_t addr) { + return XAddress::is_marked_or_null(addr); +} + +inline bool XBarrier::during_mark() { + return XGlobalPhase == XPhaseMark; +} + +inline bool XBarrier::during_relocate() { + return XGlobalPhase == XPhaseRelocate; +} + +// +// Load barrier +// +inline oop XBarrier::load_barrier_on_oop(oop o) { + return load_barrier_on_oop_field_preloaded((oop*)NULL, o); +} + +inline oop XBarrier::load_barrier_on_oop_field(volatile oop* p) { + const oop o = Atomic::load(p); + return load_barrier_on_oop_field_preloaded(p, o); +} + +inline oop XBarrier::load_barrier_on_oop_field_preloaded(volatile oop* p, oop o) { + return barrier(p, o); +} + +inline void XBarrier::load_barrier_on_oop_array(volatile oop* p, size_t length) { + for (volatile const oop* const end = p + length; p < end; p++) { + load_barrier_on_oop_field(p); + } +} + +inline oop XBarrier::load_barrier_on_weak_oop_field_preloaded(volatile oop* p, oop o) { + verify_on_weak(p); + + if (XResurrection::is_blocked()) { + return barrier(p, o); + } + + return load_barrier_on_oop_field_preloaded(p, o); +} + +inline oop XBarrier::load_barrier_on_phantom_oop_field_preloaded(volatile oop* p, oop o) { + if (XResurrection::is_blocked()) { + return barrier(p, o); + } + + return load_barrier_on_oop_field_preloaded(p, o); +} + +inline void XBarrier::load_barrier_on_root_oop_field(oop* p) { + const oop o = *p; + root_barrier(p, o); +} + +inline void XBarrier::load_barrier_on_invisible_root_oop_field(oop* p) { + const oop o = *p; + root_barrier(p, o); +} + +// +// Weak load barrier +// +inline oop XBarrier::weak_load_barrier_on_oop_field(volatile oop* p) { + assert(!XResurrection::is_blocked(), "Should not be called during resurrection blocked phase"); + const oop o = Atomic::load(p); + return weak_load_barrier_on_oop_field_preloaded(p, o); +} + +inline oop XBarrier::weak_load_barrier_on_oop_field_preloaded(volatile oop* p, oop o) { + return weak_barrier(p, o); +} + +inline oop XBarrier::weak_load_barrier_on_weak_oop(oop o) { + return weak_load_barrier_on_weak_oop_field_preloaded((oop*)NULL, o); +} + +inline oop XBarrier::weak_load_barrier_on_weak_oop_field_preloaded(volatile oop* p, oop o) { + verify_on_weak(p); + + if (XResurrection::is_blocked()) { + return barrier(p, o); + } + + return weak_load_barrier_on_oop_field_preloaded(p, o); +} + +inline oop XBarrier::weak_load_barrier_on_phantom_oop(oop o) { + return weak_load_barrier_on_phantom_oop_field_preloaded((oop*)NULL, o); +} + +inline oop XBarrier::weak_load_barrier_on_phantom_oop_field_preloaded(volatile oop* p, oop o) { + if (XResurrection::is_blocked()) { + return barrier(p, o); + } + + return weak_load_barrier_on_oop_field_preloaded(p, o); +} + +// +// Is alive barrier +// +inline bool XBarrier::is_alive_barrier_on_weak_oop(oop o) { + // Check if oop is logically non-null. This operation + // is only valid when resurrection is blocked. + assert(XResurrection::is_blocked(), "Invalid phase"); + return weak_load_barrier_on_weak_oop(o) != NULL; +} + +inline bool XBarrier::is_alive_barrier_on_phantom_oop(oop o) { + // Check if oop is logically non-null. This operation + // is only valid when resurrection is blocked. + assert(XResurrection::is_blocked(), "Invalid phase"); + return weak_load_barrier_on_phantom_oop(o) != NULL; +} + +// +// Keep alive barrier +// +inline void XBarrier::keep_alive_barrier_on_weak_oop_field(volatile oop* p) { + assert(XResurrection::is_blocked(), "This operation is only valid when resurrection is blocked"); + const oop o = Atomic::load(p); + barrier(p, o); +} + +inline void XBarrier::keep_alive_barrier_on_phantom_oop_field(volatile oop* p) { + assert(XResurrection::is_blocked(), "This operation is only valid when resurrection is blocked"); + const oop o = Atomic::load(p); + barrier(p, o); +} + +inline void XBarrier::keep_alive_barrier_on_phantom_root_oop_field(oop* p) { + // The keep alive operation is only valid when resurrection is blocked. + // + // Except with Loom, where we intentionally trigger arms nmethods after + // unlinking, to get a sense of what nmethods are alive. This will trigger + // the keep alive barriers, but the oops are healed and the slow-paths + // will not trigger. We have stronger checks in the slow-paths. + assert(XResurrection::is_blocked() || (CodeCache::contains((void*)p)), + "This operation is only valid when resurrection is blocked"); + const oop o = *p; + root_barrier(p, o); +} + +inline void XBarrier::keep_alive_barrier_on_oop(oop o) { + const uintptr_t addr = XOop::to_address(o); + assert(XAddress::is_good(addr), "Invalid address"); + + if (during_mark()) { + keep_alive_barrier_on_oop_slow_path(addr); + } +} + +// +// Mark barrier +// +inline void XBarrier::mark_barrier_on_oop_field(volatile oop* p, bool finalizable) { + const oop o = Atomic::load(p); + + if (finalizable) { + barrier(p, o); + } else { + const uintptr_t addr = XOop::to_address(o); + if (XAddress::is_good(addr)) { + // Mark through good oop + mark_barrier_on_oop_slow_path(addr); + } else { + // Mark through bad oop + barrier(p, o); + } + } +} + +inline void XBarrier::mark_barrier_on_oop_array(volatile oop* p, size_t length, bool finalizable) { + for (volatile const oop* const end = p + length; p < end; p++) { + mark_barrier_on_oop_field(p, finalizable); + } +} + +#endif // SHARE_GC_X_XBARRIER_INLINE_HPP diff --git a/src/hotspot/share/gc/x/xBarrierSet.cpp b/src/hotspot/share/gc/x/xBarrierSet.cpp new file mode 100644 index 0000000000000..cee53e8c3fa00 --- /dev/null +++ b/src/hotspot/share/gc/x/xBarrierSet.cpp @@ -0,0 +1,99 @@ +/* + * Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/x/xBarrierSet.hpp" +#include "gc/x/xBarrierSetAssembler.hpp" +#include "gc/x/xBarrierSetNMethod.hpp" +#include "gc/x/xBarrierSetStackChunk.hpp" +#include "gc/x/xGlobals.hpp" +#include "gc/x/xHeap.inline.hpp" +#include "gc/x/xStackWatermark.hpp" +#include "gc/x/xThreadLocalData.hpp" +#include "runtime/javaThread.hpp" +#include "utilities/macros.hpp" +#ifdef COMPILER1 +#include "gc/x/c1/xBarrierSetC1.hpp" +#endif +#ifdef COMPILER2 +#include "gc/x/c2/xBarrierSetC2.hpp" +#endif + +class XBarrierSetC1; +class XBarrierSetC2; + +XBarrierSet::XBarrierSet() : + BarrierSet(make_barrier_set_assembler(), + make_barrier_set_c1(), + make_barrier_set_c2(), + new XBarrierSetNMethod(), + new XBarrierSetStackChunk(), + BarrierSet::FakeRtti(BarrierSet::XBarrierSet)) {} + +XBarrierSetAssembler* XBarrierSet::assembler() { + BarrierSetAssembler* const bsa = BarrierSet::barrier_set()->barrier_set_assembler(); + return reinterpret_cast(bsa); +} + +bool XBarrierSet::barrier_needed(DecoratorSet decorators, BasicType type) { + assert((decorators & AS_RAW) == 0, "Unexpected decorator"); + //assert((decorators & ON_UNKNOWN_OOP_REF) == 0, "Unexpected decorator"); + + if (is_reference_type(type)) { + assert((decorators & (IN_HEAP | IN_NATIVE)) != 0, "Where is reference?"); + // Barrier needed even when IN_NATIVE, to allow concurrent scanning. + return true; + } + + // Barrier not needed + return false; +} + +void XBarrierSet::on_thread_create(Thread* thread) { + // Create thread local data + XThreadLocalData::create(thread); +} + +void XBarrierSet::on_thread_destroy(Thread* thread) { + // Destroy thread local data + XThreadLocalData::destroy(thread); +} + +void XBarrierSet::on_thread_attach(Thread* thread) { + // Set thread local address bad mask + XThreadLocalData::set_address_bad_mask(thread, XAddressBadMask); + if (thread->is_Java_thread()) { + JavaThread* const jt = JavaThread::cast(thread); + StackWatermark* const watermark = new XStackWatermark(jt); + StackWatermarkSet::add_watermark(jt, watermark); + } +} + +void XBarrierSet::on_thread_detach(Thread* thread) { + // Flush and free any remaining mark stacks + XHeap::heap()->mark_flush_and_free(thread); +} + +void XBarrierSet::print_on(outputStream* st) const { + st->print_cr("XBarrierSet"); +} diff --git a/src/hotspot/share/gc/x/xBarrierSet.hpp b/src/hotspot/share/gc/x/xBarrierSet.hpp new file mode 100644 index 0000000000000..3f1eb760033d0 --- /dev/null +++ b/src/hotspot/share/gc/x/xBarrierSet.hpp @@ -0,0 +1,109 @@ +/* + * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XBARRIERSET_HPP +#define SHARE_GC_X_XBARRIERSET_HPP + +#include "gc/shared/barrierSet.hpp" + +class XBarrierSetAssembler; + +class XBarrierSet : public BarrierSet { +public: + XBarrierSet(); + + static XBarrierSetAssembler* assembler(); + static bool barrier_needed(DecoratorSet decorators, BasicType type); + + virtual void on_thread_create(Thread* thread); + virtual void on_thread_destroy(Thread* thread); + virtual void on_thread_attach(Thread* thread); + virtual void on_thread_detach(Thread* thread); + + virtual void print_on(outputStream* st) const; + + template + class AccessBarrier : public BarrierSet::AccessBarrier { + private: + typedef BarrierSet::AccessBarrier Raw; + + template + static void verify_decorators_present(); + + template + static void verify_decorators_absent(); + + static oop* field_addr(oop base, ptrdiff_t offset); + + template + static oop load_barrier_on_oop_field_preloaded(T* addr, oop o); + + template + static oop load_barrier_on_unknown_oop_field_preloaded(oop base, ptrdiff_t offset, T* addr, oop o); + + public: + // + // In heap + // + template + static oop oop_load_in_heap(T* addr); + static oop oop_load_in_heap_at(oop base, ptrdiff_t offset); + + template + static oop oop_atomic_cmpxchg_in_heap(T* addr, oop compare_value, oop new_value); + static oop oop_atomic_cmpxchg_in_heap_at(oop base, ptrdiff_t offset, oop compare_value, oop new_value); + + template + static oop oop_atomic_xchg_in_heap(T* addr, oop new_value); + static oop oop_atomic_xchg_in_heap_at(oop base, ptrdiff_t offset, oop new_value); + + template + static bool oop_arraycopy_in_heap(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw, + arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw, + size_t length); + + static void clone_in_heap(oop src, oop dst, size_t size); + + // + // Not in heap + // + template + static oop oop_load_not_in_heap(T* addr); + + template + static oop oop_atomic_cmpxchg_not_in_heap(T* addr, oop compare_value, oop new_value); + + template + static oop oop_atomic_xchg_not_in_heap(T* addr, oop new_value); + }; +}; + +template<> struct BarrierSet::GetName { + static const BarrierSet::Name value = BarrierSet::XBarrierSet; +}; + +template<> struct BarrierSet::GetType { + typedef ::XBarrierSet type; +}; + +#endif // SHARE_GC_X_XBARRIERSET_HPP diff --git a/src/hotspot/share/gc/x/xBarrierSet.inline.hpp b/src/hotspot/share/gc/x/xBarrierSet.inline.hpp new file mode 100644 index 0000000000000..a8ec7304e28a8 --- /dev/null +++ b/src/hotspot/share/gc/x/xBarrierSet.inline.hpp @@ -0,0 +1,242 @@ +/* + * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XBARRIERSET_INLINE_HPP +#define SHARE_GC_X_XBARRIERSET_INLINE_HPP + +#include "gc/x/xBarrierSet.hpp" + +#include "gc/shared/accessBarrierSupport.inline.hpp" +#include "gc/x/xBarrier.inline.hpp" +#include "utilities/debug.hpp" + +template +template +inline void XBarrierSet::AccessBarrier::verify_decorators_present() { + if ((decorators & expected) == 0) { + fatal("Using unsupported access decorators"); + } +} + +template +template +inline void XBarrierSet::AccessBarrier::verify_decorators_absent() { + if ((decorators & expected) != 0) { + fatal("Using unsupported access decorators"); + } +} + +template +inline oop* XBarrierSet::AccessBarrier::field_addr(oop base, ptrdiff_t offset) { + assert(base != NULL, "Invalid base"); + return reinterpret_cast(reinterpret_cast((void*)base) + offset); +} + +template +template +inline oop XBarrierSet::AccessBarrier::load_barrier_on_oop_field_preloaded(T* addr, oop o) { + verify_decorators_absent(); + + if (HasDecorator::value) { + if (HasDecorator::value) { + return XBarrier::weak_load_barrier_on_oop_field_preloaded(addr, o); + } else if (HasDecorator::value) { + return XBarrier::weak_load_barrier_on_weak_oop_field_preloaded(addr, o); + } else { + assert((HasDecorator::value), "Must be"); + return XBarrier::weak_load_barrier_on_phantom_oop_field_preloaded(addr, o); + } + } else { + if (HasDecorator::value) { + return XBarrier::load_barrier_on_oop_field_preloaded(addr, o); + } else if (HasDecorator::value) { + return XBarrier::load_barrier_on_weak_oop_field_preloaded(addr, o); + } else { + assert((HasDecorator::value), "Must be"); + return XBarrier::load_barrier_on_phantom_oop_field_preloaded(addr, o); + } + } +} + +template +template +inline oop XBarrierSet::AccessBarrier::load_barrier_on_unknown_oop_field_preloaded(oop base, ptrdiff_t offset, T* addr, oop o) { + verify_decorators_present(); + + const DecoratorSet decorators_known_strength = + AccessBarrierSupport::resolve_possibly_unknown_oop_ref_strength(base, offset); + + if (HasDecorator::value) { + if (decorators_known_strength & ON_STRONG_OOP_REF) { + return XBarrier::weak_load_barrier_on_oop_field_preloaded(addr, o); + } else if (decorators_known_strength & ON_WEAK_OOP_REF) { + return XBarrier::weak_load_barrier_on_weak_oop_field_preloaded(addr, o); + } else { + assert(decorators_known_strength & ON_PHANTOM_OOP_REF, "Must be"); + return XBarrier::weak_load_barrier_on_phantom_oop_field_preloaded(addr, o); + } + } else { + if (decorators_known_strength & ON_STRONG_OOP_REF) { + return XBarrier::load_barrier_on_oop_field_preloaded(addr, o); + } else if (decorators_known_strength & ON_WEAK_OOP_REF) { + return XBarrier::load_barrier_on_weak_oop_field_preloaded(addr, o); + } else { + assert(decorators_known_strength & ON_PHANTOM_OOP_REF, "Must be"); + return XBarrier::load_barrier_on_phantom_oop_field_preloaded(addr, o); + } + } +} + +// +// In heap +// +template +template +inline oop XBarrierSet::AccessBarrier::oop_load_in_heap(T* addr) { + verify_decorators_absent(); + + const oop o = Raw::oop_load_in_heap(addr); + return load_barrier_on_oop_field_preloaded(addr, o); +} + +template +inline oop XBarrierSet::AccessBarrier::oop_load_in_heap_at(oop base, ptrdiff_t offset) { + oop* const addr = field_addr(base, offset); + const oop o = Raw::oop_load_in_heap(addr); + + if (HasDecorator::value) { + return load_barrier_on_unknown_oop_field_preloaded(base, offset, addr, o); + } + + return load_barrier_on_oop_field_preloaded(addr, o); +} + +template +template +inline oop XBarrierSet::AccessBarrier::oop_atomic_cmpxchg_in_heap(T* addr, oop compare_value, oop new_value) { + verify_decorators_present(); + verify_decorators_absent(); + + XBarrier::load_barrier_on_oop_field(addr); + return Raw::oop_atomic_cmpxchg_in_heap(addr, compare_value, new_value); +} + +template +inline oop XBarrierSet::AccessBarrier::oop_atomic_cmpxchg_in_heap_at(oop base, ptrdiff_t offset, oop compare_value, oop new_value) { + verify_decorators_present(); + verify_decorators_absent(); + + // Through Unsafe.CompareAndExchangeObject()/CompareAndSetObject() we can receive + // calls with ON_UNKNOWN_OOP_REF set. However, we treat these as ON_STRONG_OOP_REF, + // with the motivation that if you're doing Unsafe operations on a Reference.referent + // field, then you're on your own anyway. + XBarrier::load_barrier_on_oop_field(field_addr(base, offset)); + return Raw::oop_atomic_cmpxchg_in_heap_at(base, offset, compare_value, new_value); +} + +template +template +inline oop XBarrierSet::AccessBarrier::oop_atomic_xchg_in_heap(T* addr, oop new_value) { + verify_decorators_present(); + verify_decorators_absent(); + + const oop o = Raw::oop_atomic_xchg_in_heap(addr, new_value); + return XBarrier::load_barrier_on_oop(o); +} + +template +inline oop XBarrierSet::AccessBarrier::oop_atomic_xchg_in_heap_at(oop base, ptrdiff_t offset, oop new_value) { + verify_decorators_present(); + verify_decorators_absent(); + + const oop o = Raw::oop_atomic_xchg_in_heap_at(base, offset, new_value); + return XBarrier::load_barrier_on_oop(o); +} + +template +template +inline bool XBarrierSet::AccessBarrier::oop_arraycopy_in_heap(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw, + arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw, + size_t length) { + T* src = arrayOopDesc::obj_offset_to_raw(src_obj, src_offset_in_bytes, src_raw); + T* dst = arrayOopDesc::obj_offset_to_raw(dst_obj, dst_offset_in_bytes, dst_raw); + + if (!HasDecorator::value) { + // No check cast, bulk barrier and bulk copy + XBarrier::load_barrier_on_oop_array(src, length); + return Raw::oop_arraycopy_in_heap(NULL, 0, src, NULL, 0, dst, length); + } + + // Check cast and copy each elements + Klass* const dst_klass = objArrayOop(dst_obj)->element_klass(); + for (const T* const end = src + length; src < end; src++, dst++) { + const oop elem = XBarrier::load_barrier_on_oop_field(src); + if (!oopDesc::is_instanceof_or_null(elem, dst_klass)) { + // Check cast failed + return false; + } + + // Cast is safe, since we know it's never a narrowOop + *(oop*)dst = elem; + } + + return true; +} + +template +inline void XBarrierSet::AccessBarrier::clone_in_heap(oop src, oop dst, size_t size) { + XBarrier::load_barrier_on_oop_fields(src); + Raw::clone_in_heap(src, dst, size); +} + +// +// Not in heap +// +template +template +inline oop XBarrierSet::AccessBarrier::oop_load_not_in_heap(T* addr) { + verify_decorators_absent(); + + const oop o = Raw::oop_load_not_in_heap(addr); + return load_barrier_on_oop_field_preloaded(addr, o); +} + +template +template +inline oop XBarrierSet::AccessBarrier::oop_atomic_cmpxchg_not_in_heap(T* addr, oop compare_value, oop new_value) { + verify_decorators_present(); + verify_decorators_absent(); + + return Raw::oop_atomic_cmpxchg_not_in_heap(addr, compare_value, new_value); +} + +template +template +inline oop XBarrierSet::AccessBarrier::oop_atomic_xchg_not_in_heap(T* addr, oop new_value) { + verify_decorators_present(); + verify_decorators_absent(); + + return Raw::oop_atomic_xchg_not_in_heap(addr, new_value); +} + +#endif // SHARE_GC_X_XBARRIERSET_INLINE_HPP diff --git a/src/hotspot/share/gc/x/xBarrierSetAssembler.cpp b/src/hotspot/share/gc/x/xBarrierSetAssembler.cpp new file mode 100644 index 0000000000000..d00c12ed291e4 --- /dev/null +++ b/src/hotspot/share/gc/x/xBarrierSetAssembler.cpp @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/x/xBarrierSetAssembler.hpp" +#include "gc/x/xThreadLocalData.hpp" +#include "runtime/javaThread.hpp" + +Address XBarrierSetAssemblerBase::address_bad_mask_from_thread(Register thread) { + return Address(thread, XThreadLocalData::address_bad_mask_offset()); +} + +Address XBarrierSetAssemblerBase::address_bad_mask_from_jni_env(Register env) { + return Address(env, XThreadLocalData::address_bad_mask_offset() - JavaThread::jni_environment_offset()); +} diff --git a/src/hotspot/share/gc/x/xBarrierSetAssembler.hpp b/src/hotspot/share/gc/x/xBarrierSetAssembler.hpp new file mode 100644 index 0000000000000..2f733465bfb97 --- /dev/null +++ b/src/hotspot/share/gc/x/xBarrierSetAssembler.hpp @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XBARRIERSETASSEMBLER_HPP +#define SHARE_GC_X_XBARRIERSETASSEMBLER_HPP + +#include "gc/shared/barrierSetAssembler.hpp" +#include "utilities/macros.hpp" + +class XBarrierSetAssemblerBase : public BarrierSetAssembler { +public: + static Address address_bad_mask_from_thread(Register thread); + static Address address_bad_mask_from_jni_env(Register env); +}; + +// Needs to be included after definition of XBarrierSetAssemblerBase +#include CPU_HEADER(gc/x/xBarrierSetAssembler) + +#endif // SHARE_GC_X_XBARRIERSETASSEMBLER_HPP diff --git a/src/hotspot/share/gc/x/xBarrierSetNMethod.cpp b/src/hotspot/share/gc/x/xBarrierSetNMethod.cpp new file mode 100644 index 0000000000000..002d6bc00c5d7 --- /dev/null +++ b/src/hotspot/share/gc/x/xBarrierSetNMethod.cpp @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "code/nmethod.hpp" +#include "gc/x/xBarrierSetNMethod.hpp" +#include "gc/x/xGlobals.hpp" +#include "gc/x/xLock.inline.hpp" +#include "gc/x/xNMethod.hpp" +#include "gc/x/xThreadLocalData.hpp" +#include "logging/log.hpp" +#include "runtime/threadWXSetters.inline.hpp" + +bool XBarrierSetNMethod::nmethod_entry_barrier(nmethod* nm) { + XLocker locker(XNMethod::lock_for_nmethod(nm)); + log_trace(nmethod, barrier)("Entered critical zone for %p", nm); + + if (!is_armed(nm)) { + // Some other thread got here first and healed the oops + // and disarmed the nmethod. + return true; + } + + MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXWrite, Thread::current())); + + if (nm->is_unloading()) { + // We don't need to take the lock when unlinking nmethods from + // the Method, because it is only concurrently unlinked by + // the entry barrier, which acquires the per nmethod lock. + nm->unlink_from_method(); + + // We can end up calling nmethods that are unloading + // since we clear compiled ICs lazily. Returning false + // will re-resovle the call and update the compiled IC. + return false; + } + + // Heal oops + XNMethod::nmethod_oops_barrier(nm); + + + // CodeCache unloading support + nm->mark_as_maybe_on_stack(); + + // Disarm + disarm(nm); + + return true; +} + +int* XBarrierSetNMethod::disarmed_guard_value_address() const { + return (int*)XAddressBadMaskHighOrderBitsAddr; +} + +ByteSize XBarrierSetNMethod::thread_disarmed_guard_value_offset() const { + return XThreadLocalData::nmethod_disarmed_offset(); +} diff --git a/src/hotspot/share/gc/x/xBarrierSetNMethod.hpp b/src/hotspot/share/gc/x/xBarrierSetNMethod.hpp new file mode 100644 index 0000000000000..db1ee8c4e8f11 --- /dev/null +++ b/src/hotspot/share/gc/x/xBarrierSetNMethod.hpp @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XBARRIERSETNMETHOD_HPP +#define SHARE_GC_X_XBARRIERSETNMETHOD_HPP + +#include "gc/shared/barrierSetNMethod.hpp" +#include "memory/allocation.hpp" + +class nmethod; + +class XBarrierSetNMethod : public BarrierSetNMethod { +protected: + virtual bool nmethod_entry_barrier(nmethod* nm); + +public: + virtual ByteSize thread_disarmed_guard_value_offset() const; + virtual int* disarmed_guard_value_address() const; +}; + +#endif // SHARE_GC_X_XBARRIERSETNMETHOD_HPP diff --git a/src/hotspot/share/gc/x/xBarrierSetRuntime.cpp b/src/hotspot/share/gc/x/xBarrierSetRuntime.cpp new file mode 100644 index 0000000000000..d87df24b9d8e6 --- /dev/null +++ b/src/hotspot/share/gc/x/xBarrierSetRuntime.cpp @@ -0,0 +1,114 @@ +/* + * Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/x/xBarrier.inline.hpp" +#include "gc/x/xBarrierSetRuntime.hpp" +#include "oops/access.hpp" +#include "runtime/interfaceSupport.inline.hpp" + +JRT_LEAF(oopDesc*, XBarrierSetRuntime::load_barrier_on_oop_field_preloaded(oopDesc* o, oop* p)) + return XBarrier::load_barrier_on_oop_field_preloaded(p, o); +JRT_END + +JRT_LEAF(oopDesc*, XBarrierSetRuntime::weak_load_barrier_on_oop_field_preloaded(oopDesc* o, oop* p)) + return XBarrier::weak_load_barrier_on_oop_field_preloaded(p, o); +JRT_END + +JRT_LEAF(oopDesc*, XBarrierSetRuntime::weak_load_barrier_on_weak_oop_field_preloaded(oopDesc* o, oop* p)) + return XBarrier::weak_load_barrier_on_weak_oop_field_preloaded(p, o); +JRT_END + +JRT_LEAF(oopDesc*, XBarrierSetRuntime::weak_load_barrier_on_phantom_oop_field_preloaded(oopDesc* o, oop* p)) + return XBarrier::weak_load_barrier_on_phantom_oop_field_preloaded(p, o); +JRT_END + +JRT_LEAF(oopDesc*, XBarrierSetRuntime::load_barrier_on_weak_oop_field_preloaded(oopDesc* o, oop* p)) + return XBarrier::load_barrier_on_weak_oop_field_preloaded(p, o); +JRT_END + +JRT_LEAF(oopDesc*, XBarrierSetRuntime::load_barrier_on_phantom_oop_field_preloaded(oopDesc* o, oop* p)) + return XBarrier::load_barrier_on_phantom_oop_field_preloaded(p, o); +JRT_END + +JRT_LEAF(void, XBarrierSetRuntime::load_barrier_on_oop_array(oop* p, size_t length)) + XBarrier::load_barrier_on_oop_array(p, length); +JRT_END + +JRT_LEAF(void, XBarrierSetRuntime::clone(oopDesc* src, oopDesc* dst, size_t size)) + HeapAccess<>::clone(src, dst, size); +JRT_END + +address XBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(DecoratorSet decorators) { + if (decorators & ON_PHANTOM_OOP_REF) { + if (decorators & AS_NO_KEEPALIVE) { + return weak_load_barrier_on_phantom_oop_field_preloaded_addr(); + } else { + return load_barrier_on_phantom_oop_field_preloaded_addr(); + } + } else if (decorators & ON_WEAK_OOP_REF) { + if (decorators & AS_NO_KEEPALIVE) { + return weak_load_barrier_on_weak_oop_field_preloaded_addr(); + } else { + return load_barrier_on_weak_oop_field_preloaded_addr(); + } + } else { + if (decorators & AS_NO_KEEPALIVE) { + return weak_load_barrier_on_oop_field_preloaded_addr(); + } else { + return load_barrier_on_oop_field_preloaded_addr(); + } + } +} + +address XBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr() { + return reinterpret_cast
(load_barrier_on_oop_field_preloaded); +} + +address XBarrierSetRuntime::load_barrier_on_weak_oop_field_preloaded_addr() { + return reinterpret_cast
(load_barrier_on_weak_oop_field_preloaded); +} + +address XBarrierSetRuntime::load_barrier_on_phantom_oop_field_preloaded_addr() { + return reinterpret_cast
(load_barrier_on_phantom_oop_field_preloaded); +} + +address XBarrierSetRuntime::weak_load_barrier_on_oop_field_preloaded_addr() { + return reinterpret_cast
(weak_load_barrier_on_oop_field_preloaded); +} + +address XBarrierSetRuntime::weak_load_barrier_on_weak_oop_field_preloaded_addr() { + return reinterpret_cast
(weak_load_barrier_on_weak_oop_field_preloaded); +} + +address XBarrierSetRuntime::weak_load_barrier_on_phantom_oop_field_preloaded_addr() { + return reinterpret_cast
(weak_load_barrier_on_phantom_oop_field_preloaded); +} + +address XBarrierSetRuntime::load_barrier_on_oop_array_addr() { + return reinterpret_cast
(load_barrier_on_oop_array); +} + +address XBarrierSetRuntime::clone_addr() { + return reinterpret_cast
(clone); +} diff --git a/src/hotspot/share/gc/x/xBarrierSetRuntime.hpp b/src/hotspot/share/gc/x/xBarrierSetRuntime.hpp new file mode 100644 index 0000000000000..6302f1ce36dc0 --- /dev/null +++ b/src/hotspot/share/gc/x/xBarrierSetRuntime.hpp @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XBARRIERSETRUNTIME_HPP +#define SHARE_GC_X_XBARRIERSETRUNTIME_HPP + +#include "memory/allStatic.hpp" +#include "oops/accessDecorators.hpp" +#include "utilities/globalDefinitions.hpp" + +class oopDesc; + +class XBarrierSetRuntime : public AllStatic { +private: + static oopDesc* load_barrier_on_oop_field_preloaded(oopDesc* o, oop* p); + static oopDesc* load_barrier_on_weak_oop_field_preloaded(oopDesc* o, oop* p); + static oopDesc* load_barrier_on_phantom_oop_field_preloaded(oopDesc* o, oop* p); + static oopDesc* weak_load_barrier_on_oop_field_preloaded(oopDesc* o, oop* p); + static oopDesc* weak_load_barrier_on_weak_oop_field_preloaded(oopDesc* o, oop* p); + static oopDesc* weak_load_barrier_on_phantom_oop_field_preloaded(oopDesc* o, oop* p); + static void load_barrier_on_oop_array(oop* p, size_t length); + static void clone(oopDesc* src, oopDesc* dst, size_t size); + +public: + static address load_barrier_on_oop_field_preloaded_addr(DecoratorSet decorators); + static address load_barrier_on_oop_field_preloaded_addr(); + static address load_barrier_on_weak_oop_field_preloaded_addr(); + static address load_barrier_on_phantom_oop_field_preloaded_addr(); + static address weak_load_barrier_on_oop_field_preloaded_addr(); + static address weak_load_barrier_on_weak_oop_field_preloaded_addr(); + static address weak_load_barrier_on_phantom_oop_field_preloaded_addr(); + static address load_barrier_on_oop_array_addr(); + static address clone_addr(); +}; + +#endif // SHARE_GC_X_XBARRIERSETRUNTIME_HPP diff --git a/src/hotspot/share/gc/x/xBarrierSetStackChunk.cpp b/src/hotspot/share/gc/x/xBarrierSetStackChunk.cpp new file mode 100644 index 0000000000000..37e36597c1da9 --- /dev/null +++ b/src/hotspot/share/gc/x/xBarrierSetStackChunk.cpp @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "gc/x/xBarrier.inline.hpp" +#include "gc/x/xBarrierSetStackChunk.hpp" +#include "runtime/atomic.hpp" +#include "utilities/debug.hpp" + +void XBarrierSetStackChunk::encode_gc_mode(stackChunkOop chunk, OopIterator* iterator) { + // Do nothing +} + +void XBarrierSetStackChunk::decode_gc_mode(stackChunkOop chunk, OopIterator* iterator) { + // Do nothing +} + +oop XBarrierSetStackChunk::load_oop(stackChunkOop chunk, oop* addr) { + oop obj = Atomic::load(addr); + return XBarrier::load_barrier_on_oop_field_preloaded((volatile oop*)NULL, obj); +} + +oop XBarrierSetStackChunk::load_oop(stackChunkOop chunk, narrowOop* addr) { + ShouldNotReachHere(); + return NULL; +} diff --git a/src/hotspot/share/gc/x/xBarrierSetStackChunk.hpp b/src/hotspot/share/gc/x/xBarrierSetStackChunk.hpp new file mode 100644 index 0000000000000..36180db7b8c4a --- /dev/null +++ b/src/hotspot/share/gc/x/xBarrierSetStackChunk.hpp @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_GC_X_XBARRIERSETSTACKCHUNK_HPP +#define SHARE_GC_X_XBARRIERSETSTACKCHUNK_HPP + +#include "gc/shared/barrierSetStackChunk.hpp" +#include "memory/iterator.hpp" +#include "oops/oopsHierarchy.hpp" +#include "utilities/globalDefinitions.hpp" + +class OopClosure; + +class XBarrierSetStackChunk : public BarrierSetStackChunk { +public: + virtual void encode_gc_mode(stackChunkOop chunk, OopIterator* iterator) override; + virtual void decode_gc_mode(stackChunkOop chunk, OopIterator* iterator) override; + + virtual oop load_oop(stackChunkOop chunk, oop* addr) override; + virtual oop load_oop(stackChunkOop chunk, narrowOop* addr) override; +}; + +#endif // SHARE_GC_X_XBARRIERSETSTACKCHUNK_HPP diff --git a/src/hotspot/share/gc/x/xBitField.hpp b/src/hotspot/share/gc/x/xBitField.hpp new file mode 100644 index 0000000000000..f11d7cf7ef7a4 --- /dev/null +++ b/src/hotspot/share/gc/x/xBitField.hpp @@ -0,0 +1,81 @@ +/* + * Copyright (c) 2017, 2022, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XBITFIELD_HPP +#define SHARE_GC_X_XBITFIELD_HPP + +#include "memory/allStatic.hpp" +#include "utilities/debug.hpp" +#include "utilities/globalDefinitions.hpp" + +// +// Example +// ------- +// +// typedef XBitField field_word_aligned_size; +// typedef XBitField field_length; +// +// +// 6 3 3 +// 3 2 1 2 10 +// +-----------------------------------+---------------------------------+--+ +// |11111111 11111111 11111111 11111111|11111111 11111111 11111111 111111|11| +// +-----------------------------------+---------------------------------+--+ +// | | | +// | 31-2 field_length (30-bits) * | +// | | +// | 1-0 field_word_aligned_size (2-bits) * +// | +// * 63-32 Unused (32-bits) +// +// +// field_word_aligned_size::encode(16) = 2 +// field_length::encode(2342) = 9368 +// +// field_word_aligned_size::decode(9368 | 2) = 16 +// field_length::decode(9368 | 2) = 2342 +// + +template +class XBitField : public AllStatic { +private: + static const int ContainerBits = sizeof(ContainerType) * BitsPerByte; + + static_assert(FieldBits < ContainerBits, "Field too large"); + static_assert(FieldShift + FieldBits <= ContainerBits, "Field too large"); + static_assert(ValueShift + FieldBits <= ContainerBits, "Field too large"); + + static const ContainerType FieldMask = (((ContainerType)1 << FieldBits) - 1); + +public: + static ValueType decode(ContainerType container) { + return (ValueType)(((container >> FieldShift) & FieldMask) << ValueShift); + } + + static ContainerType encode(ValueType value) { + assert(((ContainerType)value & (FieldMask << ValueShift)) == (ContainerType)value, "Invalid value"); + return ((ContainerType)value >> ValueShift) << FieldShift; + } +}; + +#endif // SHARE_GC_X_XBITFIELD_HPP diff --git a/src/hotspot/share/gc/x/xBitMap.hpp b/src/hotspot/share/gc/x/xBitMap.hpp new file mode 100644 index 0000000000000..c96f63b4c8985 --- /dev/null +++ b/src/hotspot/share/gc/x/xBitMap.hpp @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XBITMAP_HPP +#define SHARE_GC_X_XBITMAP_HPP + +#include "utilities/bitMap.hpp" + +class XBitMap : public CHeapBitMap { +private: + static bm_word_t bit_mask_pair(idx_t bit); + + bool par_set_bit_pair_finalizable(idx_t bit, bool& inc_live); + bool par_set_bit_pair_strong(idx_t bit, bool& inc_live); + +public: + XBitMap(idx_t size_in_bits); + + bool par_set_bit_pair(idx_t bit, bool finalizable, bool& inc_live); +}; + +#endif // SHARE_GC_X_XBITMAP_HPP diff --git a/src/hotspot/share/gc/x/xBitMap.inline.hpp b/src/hotspot/share/gc/x/xBitMap.inline.hpp new file mode 100644 index 0000000000000..e35f59eeb880e --- /dev/null +++ b/src/hotspot/share/gc/x/xBitMap.inline.hpp @@ -0,0 +1,80 @@ +/* + * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XBITMAP_INLINE_HPP +#define SHARE_GC_X_XBITMAP_INLINE_HPP + +#include "gc/x/xBitMap.hpp" + +#include "runtime/atomic.hpp" +#include "utilities/bitMap.inline.hpp" +#include "utilities/debug.hpp" + +inline XBitMap::XBitMap(idx_t size_in_bits) : + CHeapBitMap(size_in_bits, mtGC, false /* clear */) {} + +inline BitMap::bm_word_t XBitMap::bit_mask_pair(idx_t bit) { + assert(bit_in_word(bit) < BitsPerWord - 1, "Invalid bit index"); + return (bm_word_t)3 << bit_in_word(bit); +} + +inline bool XBitMap::par_set_bit_pair_finalizable(idx_t bit, bool& inc_live) { + inc_live = par_set_bit(bit); + return inc_live; +} + +inline bool XBitMap::par_set_bit_pair_strong(idx_t bit, bool& inc_live) { + verify_index(bit); + volatile bm_word_t* const addr = word_addr(bit); + const bm_word_t pair_mask = bit_mask_pair(bit); + bm_word_t old_val = *addr; + + do { + const bm_word_t new_val = old_val | pair_mask; + if (new_val == old_val) { + // Someone else beat us to it + inc_live = false; + return false; + } + const bm_word_t cur_val = Atomic::cmpxchg(addr, old_val, new_val); + if (cur_val == old_val) { + // Success + const bm_word_t marked_mask = bit_mask(bit); + inc_live = !(old_val & marked_mask); + return true; + } + + // The value changed, retry + old_val = cur_val; + } while (true); +} + +inline bool XBitMap::par_set_bit_pair(idx_t bit, bool finalizable, bool& inc_live) { + if (finalizable) { + return par_set_bit_pair_finalizable(bit, inc_live); + } else { + return par_set_bit_pair_strong(bit, inc_live); + } +} + +#endif // SHARE_GC_X_XBITMAP_INLINE_HPP diff --git a/src/hotspot/share/gc/x/xBreakpoint.cpp b/src/hotspot/share/gc/x/xBreakpoint.cpp new file mode 100644 index 0000000000000..e053ceaedb956 --- /dev/null +++ b/src/hotspot/share/gc/x/xBreakpoint.cpp @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/shared/concurrentGCBreakpoints.hpp" +#include "gc/x/xBreakpoint.hpp" +#include "runtime/mutexLocker.hpp" +#include "utilities/debug.hpp" + +bool XBreakpoint::_start_gc = false; + +void XBreakpoint::start_gc() { + MonitorLocker ml(ConcurrentGCBreakpoints::monitor()); + assert(ConcurrentGCBreakpoints::is_controlled(), "Invalid state"); + assert(!_start_gc, "Invalid state"); + _start_gc = true; + ml.notify_all(); +} + +void XBreakpoint::at_before_gc() { + MonitorLocker ml(ConcurrentGCBreakpoints::monitor(), Mutex::_no_safepoint_check_flag); + while (ConcurrentGCBreakpoints::is_controlled() && !_start_gc) { + ml.wait(); + } + _start_gc = false; + ConcurrentGCBreakpoints::notify_idle_to_active(); +} + +void XBreakpoint::at_after_gc() { + ConcurrentGCBreakpoints::notify_active_to_idle(); +} + +void XBreakpoint::at_after_marking_started() { + ConcurrentGCBreakpoints::at("AFTER MARKING STARTED"); +} + +void XBreakpoint::at_before_marking_completed() { + ConcurrentGCBreakpoints::at("BEFORE MARKING COMPLETED"); +} + +void XBreakpoint::at_after_reference_processing_started() { + ConcurrentGCBreakpoints::at("AFTER CONCURRENT REFERENCE PROCESSING STARTED"); +} diff --git a/src/hotspot/share/gc/x/xBreakpoint.hpp b/src/hotspot/share/gc/x/xBreakpoint.hpp new file mode 100644 index 0000000000000..0c0b9d3c90f69 --- /dev/null +++ b/src/hotspot/share/gc/x/xBreakpoint.hpp @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XBREAKPOINT_HPP +#define SHARE_GC_X_XBREAKPOINT_HPP + +#include "memory/allStatic.hpp" + +class XBreakpoint : public AllStatic { +private: + static bool _start_gc; + +public: + static void start_gc(); + + static void at_before_gc(); + static void at_after_gc(); + static void at_after_marking_started(); + static void at_before_marking_completed(); + static void at_after_reference_processing_started(); +}; + +#endif // SHARE_GC_X_XBREAKPOINT_HPP diff --git a/src/hotspot/share/gc/x/xCPU.cpp b/src/hotspot/share/gc/x/xCPU.cpp new file mode 100644 index 0000000000000..d212739a305b7 --- /dev/null +++ b/src/hotspot/share/gc/x/xCPU.cpp @@ -0,0 +1,67 @@ +/* + * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/shared/gcLogPrecious.hpp" +#include "gc/x/xCPU.inline.hpp" +#include "memory/padded.inline.hpp" +#include "runtime/javaThread.hpp" +#include "runtime/os.hpp" +#include "utilities/debug.hpp" + +#define XCPU_UNKNOWN_AFFINITY ((Thread*)-1) +#define XCPU_UNKNOWN_SELF ((Thread*)-2) + +PaddedEnd* XCPU::_affinity = NULL; +THREAD_LOCAL Thread* XCPU::_self = XCPU_UNKNOWN_SELF; +THREAD_LOCAL uint32_t XCPU::_cpu = 0; + +void XCPU::initialize() { + assert(_affinity == NULL, "Already initialized"); + const uint32_t ncpus = count(); + + _affinity = PaddedArray::create_unfreeable(ncpus); + + for (uint32_t i = 0; i < ncpus; i++) { + _affinity[i]._thread = XCPU_UNKNOWN_AFFINITY; + } + + log_info_p(gc, init)("CPUs: %u total, %u available", + os::processor_count(), + os::initial_active_processor_count()); +} + +uint32_t XCPU::id_slow() { + // Set current thread + if (_self == XCPU_UNKNOWN_SELF) { + _self = Thread::current(); + } + + // Set current CPU + _cpu = os::processor_id(); + + // Update affinity table + _affinity[_cpu]._thread = _self; + + return _cpu; +} diff --git a/src/hotspot/share/gc/x/xCPU.hpp b/src/hotspot/share/gc/x/xCPU.hpp new file mode 100644 index 0000000000000..fd931956c4bdc --- /dev/null +++ b/src/hotspot/share/gc/x/xCPU.hpp @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XCPU_HPP +#define SHARE_GC_X_XCPU_HPP + +#include "memory/allStatic.hpp" +#include "memory/padded.hpp" +#include "utilities/globalDefinitions.hpp" + +class Thread; + +class XCPU : public AllStatic { +private: + struct XCPUAffinity { + Thread* _thread; + }; + + static PaddedEnd* _affinity; + static THREAD_LOCAL Thread* _self; + static THREAD_LOCAL uint32_t _cpu; + + static uint32_t id_slow(); + +public: + static void initialize(); + + static uint32_t count(); + static uint32_t id(); +}; + +#endif // SHARE_GC_X_XCPU_HPP diff --git a/src/hotspot/share/gc/x/xCPU.inline.hpp b/src/hotspot/share/gc/x/xCPU.inline.hpp new file mode 100644 index 0000000000000..ce1f4ec65c95a --- /dev/null +++ b/src/hotspot/share/gc/x/xCPU.inline.hpp @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XCPU_INLINE_HPP +#define SHARE_GC_X_XCPU_INLINE_HPP + +#include "gc/x/xCPU.hpp" + +#include "runtime/os.hpp" +#include "utilities/debug.hpp" + +inline uint32_t XCPU::count() { + return os::processor_count(); +} + +inline uint32_t XCPU::id() { + assert(_affinity != NULL, "Not initialized"); + + // Fast path + if (_affinity[_cpu]._thread == _self) { + return _cpu; + } + + // Slow path + return id_slow(); +} + +#endif // SHARE_GC_X_XCPU_INLINE_HPP diff --git a/src/hotspot/share/gc/x/xCollectedHeap.cpp b/src/hotspot/share/gc/x/xCollectedHeap.cpp new file mode 100644 index 0000000000000..935441d627ad2 --- /dev/null +++ b/src/hotspot/share/gc/x/xCollectedHeap.cpp @@ -0,0 +1,351 @@ +/* + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "classfile/classLoaderData.hpp" +#include "gc/shared/gcHeapSummary.hpp" +#include "gc/shared/gcLocker.inline.hpp" +#include "gc/shared/suspendibleThreadSet.hpp" +#include "gc/x/xCollectedHeap.hpp" +#include "gc/x/xDirector.hpp" +#include "gc/x/xDriver.hpp" +#include "gc/x/xGlobals.hpp" +#include "gc/x/xHeap.inline.hpp" +#include "gc/x/xNMethod.hpp" +#include "gc/x/xObjArrayAllocator.hpp" +#include "gc/x/xOop.inline.hpp" +#include "gc/x/xServiceability.hpp" +#include "gc/x/xStat.hpp" +#include "gc/x/xUtils.inline.hpp" +#include "memory/classLoaderMetaspace.hpp" +#include "memory/iterator.hpp" +#include "memory/metaspaceCriticalAllocation.hpp" +#include "memory/universe.hpp" +#include "oops/stackChunkOop.hpp" +#include "runtime/continuationJavaClasses.hpp" +#include "utilities/align.hpp" + +XCollectedHeap* XCollectedHeap::heap() { + return named_heap(CollectedHeap::Z); +} + +XCollectedHeap::XCollectedHeap() : + _soft_ref_policy(), + _barrier_set(), + _initialize(&_barrier_set), + _heap(), + _driver(new XDriver()), + _director(new XDirector(_driver)), + _stat(new XStat()), + _runtime_workers() {} + +CollectedHeap::Name XCollectedHeap::kind() const { + return CollectedHeap::Z; +} + +const char* XCollectedHeap::name() const { + return XName; +} + +jint XCollectedHeap::initialize() { + if (!_heap.is_initialized()) { + return JNI_ENOMEM; + } + + Universe::calculate_verify_data((HeapWord*)0, (HeapWord*)UINTPTR_MAX); + + return JNI_OK; +} + +void XCollectedHeap::initialize_serviceability() { + _heap.serviceability_initialize(); +} + +class XStopConcurrentGCThreadClosure : public ThreadClosure { +public: + virtual void do_thread(Thread* thread) { + if (thread->is_ConcurrentGC_thread()) { + ConcurrentGCThread::cast(thread)->stop(); + } + } +}; + +void XCollectedHeap::stop() { + XStopConcurrentGCThreadClosure cl; + gc_threads_do(&cl); +} + +SoftRefPolicy* XCollectedHeap::soft_ref_policy() { + return &_soft_ref_policy; +} + +size_t XCollectedHeap::max_capacity() const { + return _heap.max_capacity(); +} + +size_t XCollectedHeap::capacity() const { + return _heap.capacity(); +} + +size_t XCollectedHeap::used() const { + return _heap.used(); +} + +size_t XCollectedHeap::unused() const { + return _heap.unused(); +} + +bool XCollectedHeap::is_maximal_no_gc() const { + // Not supported + ShouldNotReachHere(); + return false; +} + +bool XCollectedHeap::is_in(const void* p) const { + return _heap.is_in((uintptr_t)p); +} + +bool XCollectedHeap::requires_barriers(stackChunkOop obj) const { + uintptr_t* cont_addr = obj->field_addr(jdk_internal_vm_StackChunk::cont_offset()); + + if (!_heap.is_allocating(cast_from_oop(obj))) { + // An object that isn't allocating, is visible from GC tracing. Such + // stack chunks require barriers. + return true; + } + + if (!XAddress::is_good_or_null(*cont_addr)) { + // If a chunk is allocated after a GC started, but before relocate start + // we can have an allocating chunk that isn't deeply good. That means that + // the contained oops might be bad and require GC barriers. + return true; + } + + // The chunk is allocating and its pointers are good. This chunk needs no + // GC barriers + return false; +} + +HeapWord* XCollectedHeap::allocate_new_tlab(size_t min_size, size_t requested_size, size_t* actual_size) { + const size_t size_in_bytes = XUtils::words_to_bytes(align_object_size(requested_size)); + const uintptr_t addr = _heap.alloc_tlab(size_in_bytes); + + if (addr != 0) { + *actual_size = requested_size; + } + + return (HeapWord*)addr; +} + +oop XCollectedHeap::array_allocate(Klass* klass, size_t size, int length, bool do_zero, TRAPS) { + XObjArrayAllocator allocator(klass, size, length, do_zero, THREAD); + return allocator.allocate(); +} + +HeapWord* XCollectedHeap::mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded) { + const size_t size_in_bytes = XUtils::words_to_bytes(align_object_size(size)); + return (HeapWord*)_heap.alloc_object(size_in_bytes); +} + +MetaWord* XCollectedHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data, + size_t size, + Metaspace::MetadataType mdtype) { + // Start asynchronous GC + collect(GCCause::_metadata_GC_threshold); + + // Expand and retry allocation + MetaWord* const result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype); + if (result != NULL) { + return result; + } + + // As a last resort, try a critical allocation, riding on a synchronous full GC + return MetaspaceCriticalAllocation::allocate(loader_data, size, mdtype); +} + +void XCollectedHeap::collect(GCCause::Cause cause) { + _driver->collect(cause); +} + +void XCollectedHeap::collect_as_vm_thread(GCCause::Cause cause) { + // These collection requests are ignored since ZGC can't run a synchronous + // GC cycle from within the VM thread. This is considered benign, since the + // only GC causes coming in here should be heap dumper and heap inspector. + // If the heap dumper or heap inspector explicitly requests a gc and the + // caller is not the VM thread a synchronous GC cycle is performed from the + // caller thread in the prologue. + assert(Thread::current()->is_VM_thread(), "Should be the VM thread"); + guarantee(cause == GCCause::_heap_dump || + cause == GCCause::_heap_inspection, "Invalid cause"); +} + +void XCollectedHeap::do_full_collection(bool clear_all_soft_refs) { + // Not supported + ShouldNotReachHere(); +} + +size_t XCollectedHeap::tlab_capacity(Thread* ignored) const { + return _heap.tlab_capacity(); +} + +size_t XCollectedHeap::tlab_used(Thread* ignored) const { + return _heap.tlab_used(); +} + +size_t XCollectedHeap::max_tlab_size() const { + return _heap.max_tlab_size(); +} + +size_t XCollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const { + return _heap.unsafe_max_tlab_alloc(); +} + +bool XCollectedHeap::uses_stack_watermark_barrier() const { + return true; +} + +MemoryUsage XCollectedHeap::memory_usage() { + return _heap.serviceability_memory_pool()->get_memory_usage(); +} + +GrowableArray XCollectedHeap::memory_managers() { + GrowableArray memory_managers(2); + memory_managers.append(_heap.serviceability_cycle_memory_manager()); + memory_managers.append(_heap.serviceability_pause_memory_manager()); + return memory_managers; +} + +GrowableArray XCollectedHeap::memory_pools() { + GrowableArray memory_pools(1); + memory_pools.append(_heap.serviceability_memory_pool()); + return memory_pools; +} + +void XCollectedHeap::object_iterate(ObjectClosure* cl) { + _heap.object_iterate(cl, true /* visit_weaks */); +} + +ParallelObjectIteratorImpl* XCollectedHeap::parallel_object_iterator(uint nworkers) { + return _heap.parallel_object_iterator(nworkers, true /* visit_weaks */); +} + +void XCollectedHeap::keep_alive(oop obj) { + _heap.keep_alive(obj); +} + +void XCollectedHeap::register_nmethod(nmethod* nm) { + XNMethod::register_nmethod(nm); +} + +void XCollectedHeap::unregister_nmethod(nmethod* nm) { + XNMethod::unregister_nmethod(nm); +} + +void XCollectedHeap::verify_nmethod(nmethod* nm) { + // Does nothing +} + +WorkerThreads* XCollectedHeap::safepoint_workers() { + return _runtime_workers.workers(); +} + +void XCollectedHeap::gc_threads_do(ThreadClosure* tc) const { + tc->do_thread(_director); + tc->do_thread(_driver); + tc->do_thread(_stat); + _heap.threads_do(tc); + _runtime_workers.threads_do(tc); +} + +VirtualSpaceSummary XCollectedHeap::create_heap_space_summary() { + return VirtualSpaceSummary((HeapWord*)0, (HeapWord*)capacity(), (HeapWord*)max_capacity()); +} + +void XCollectedHeap::safepoint_synchronize_begin() { + SuspendibleThreadSet::synchronize(); +} + +void XCollectedHeap::safepoint_synchronize_end() { + SuspendibleThreadSet::desynchronize(); +} + +void XCollectedHeap::pin_object(JavaThread* thread, oop obj) { + GCLocker::lock_critical(thread); +} + +void XCollectedHeap::unpin_object(JavaThread* thread, oop obj) { + GCLocker::unlock_critical(thread); +} + +void XCollectedHeap::prepare_for_verify() { + // Does nothing +} + +void XCollectedHeap::print_on(outputStream* st) const { + _heap.print_on(st); +} + +void XCollectedHeap::print_on_error(outputStream* st) const { + st->print_cr("ZGC Globals:"); + st->print_cr(" GlobalPhase: %u (%s)", XGlobalPhase, XGlobalPhaseToString()); + st->print_cr(" GlobalSeqNum: %u", XGlobalSeqNum); + st->print_cr(" Offset Max: " SIZE_FORMAT "%s (" PTR_FORMAT ")", + byte_size_in_exact_unit(XAddressOffsetMax), + exact_unit_for_byte_size(XAddressOffsetMax), + XAddressOffsetMax); + st->print_cr(" Page Size Small: " SIZE_FORMAT "M", XPageSizeSmall / M); + st->print_cr(" Page Size Medium: " SIZE_FORMAT "M", XPageSizeMedium / M); + st->cr(); + st->print_cr("ZGC Metadata Bits:"); + st->print_cr(" Good: " PTR_FORMAT, XAddressGoodMask); + st->print_cr(" Bad: " PTR_FORMAT, XAddressBadMask); + st->print_cr(" WeakBad: " PTR_FORMAT, XAddressWeakBadMask); + st->print_cr(" Marked: " PTR_FORMAT, XAddressMetadataMarked); + st->print_cr(" Remapped: " PTR_FORMAT, XAddressMetadataRemapped); + st->cr(); + CollectedHeap::print_on_error(st); +} + +void XCollectedHeap::print_extended_on(outputStream* st) const { + _heap.print_extended_on(st); +} + +void XCollectedHeap::print_tracing_info() const { + // Does nothing +} + +bool XCollectedHeap::print_location(outputStream* st, void* addr) const { + return _heap.print_location(st, (uintptr_t)addr); +} + +void XCollectedHeap::verify(VerifyOption option /* ignored */) { + _heap.verify(); +} + +bool XCollectedHeap::is_oop(oop object) const { + return _heap.is_oop(XOop::to_address(object)); +} + +bool XCollectedHeap::supports_concurrent_gc_breakpoints() const { + return true; +} diff --git a/src/hotspot/share/gc/x/xCollectedHeap.hpp b/src/hotspot/share/gc/x/xCollectedHeap.hpp new file mode 100644 index 0000000000000..302963ca2c46b --- /dev/null +++ b/src/hotspot/share/gc/x/xCollectedHeap.hpp @@ -0,0 +1,132 @@ +/* + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XCOLLECTEDHEAP_HPP +#define SHARE_GC_X_XCOLLECTEDHEAP_HPP + +#include "gc/shared/collectedHeap.hpp" +#include "gc/shared/softRefPolicy.hpp" +#include "gc/x/xBarrierSet.hpp" +#include "gc/x/xHeap.hpp" +#include "gc/x/xInitialize.hpp" +#include "gc/x/xRuntimeWorkers.hpp" +#include "memory/metaspace.hpp" +#include "services/memoryUsage.hpp" + +class VMStructs; +class XDirector; +class XDriver; +class XStat; + +class XCollectedHeap : public CollectedHeap { + friend class ::VMStructs; + +private: + SoftRefPolicy _soft_ref_policy; + XBarrierSet _barrier_set; + XInitialize _initialize; + XHeap _heap; + XDriver* _driver; + XDirector* _director; + XStat* _stat; + XRuntimeWorkers _runtime_workers; + + HeapWord* allocate_new_tlab(size_t min_size, + size_t requested_size, + size_t* actual_size) override; + +public: + static XCollectedHeap* heap(); + + XCollectedHeap(); + Name kind() const override; + const char* name() const override; + jint initialize() override; + void initialize_serviceability() override; + void stop() override; + + SoftRefPolicy* soft_ref_policy() override; + + size_t max_capacity() const override; + size_t capacity() const override; + size_t used() const override; + size_t unused() const override; + + bool is_maximal_no_gc() const override; + bool is_in(const void* p) const override; + bool requires_barriers(stackChunkOop obj) const override; + + oop array_allocate(Klass* klass, size_t size, int length, bool do_zero, TRAPS) override; + HeapWord* mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded) override; + MetaWord* satisfy_failed_metadata_allocation(ClassLoaderData* loader_data, + size_t size, + Metaspace::MetadataType mdtype) override; + void collect(GCCause::Cause cause) override; + void collect_as_vm_thread(GCCause::Cause cause) override; + void do_full_collection(bool clear_all_soft_refs) override; + + size_t tlab_capacity(Thread* thr) const override; + size_t tlab_used(Thread* thr) const override; + size_t max_tlab_size() const override; + size_t unsafe_max_tlab_alloc(Thread* thr) const override; + + bool uses_stack_watermark_barrier() const override; + + MemoryUsage memory_usage() override; + GrowableArray memory_managers() override; + GrowableArray memory_pools() override; + + void object_iterate(ObjectClosure* cl) override; + ParallelObjectIteratorImpl* parallel_object_iterator(uint nworkers) override; + + void keep_alive(oop obj) override; + + void register_nmethod(nmethod* nm) override; + void unregister_nmethod(nmethod* nm) override; + void verify_nmethod(nmethod* nmethod) override; + + WorkerThreads* safepoint_workers() override; + + void gc_threads_do(ThreadClosure* tc) const override; + + VirtualSpaceSummary create_heap_space_summary() override; + + void safepoint_synchronize_begin() override; + void safepoint_synchronize_end() override; + + void pin_object(JavaThread* thread, oop obj) override; + void unpin_object(JavaThread* thread, oop obj) override; + + void print_on(outputStream* st) const override; + void print_on_error(outputStream* st) const override; + void print_extended_on(outputStream* st) const override; + void print_tracing_info() const override; + bool print_location(outputStream* st, void* addr) const override; + + void prepare_for_verify() override; + void verify(VerifyOption option /* ignored */) override; + bool is_oop(oop object) const override; + bool supports_concurrent_gc_breakpoints() const override; +}; + +#endif // SHARE_GC_X_XCOLLECTEDHEAP_HPP diff --git a/src/hotspot/share/gc/x/xDebug.gdb b/src/hotspot/share/gc/x/xDebug.gdb new file mode 100644 index 0000000000000..d8f79d589c841 --- /dev/null +++ b/src/hotspot/share/gc/x/xDebug.gdb @@ -0,0 +1,148 @@ +# +# GDB functions for debugging the Z Garbage Collector +# + +printf "Loading zDebug.gdb\n" + +# Print Klass* +define zpk + printf "Klass: %s\n", (char*)((Klass*)($arg0))->_name->_body +end + +# Print oop +define zpo + set $obj = (oopDesc*)($arg0) + + printf "Oop: 0x%016llx\tState: ", (uintptr_t)$obj + if ((uintptr_t)$obj & (uintptr_t)XAddressGoodMask) + printf "Good " + if ((uintptr_t)$obj & (uintptr_t)XAddressMetadataRemapped) + printf "(Remapped)" + else + if ((uintptr_t)$obj & (uintptr_t)XAddressMetadataMarked) + printf "(Marked)" + else + printf "(Unknown)" + end + end + else + printf "Bad " + if ((uintptr_t)XAddressGoodMask & (uintptr_t)XAddressMetadataMarked) + # Should be marked + if ((uintptr_t)$obj & (uintptr_t)XAddressMetadataRemapped) + printf "(Not Marked, Remapped)" + else + printf "(Not Marked, Not Remapped)" + end + else + if ((uintptr_t)XAddressGoodMask & (uintptr_t)XAddressMetadataRemapped) + # Should be remapped + if ((uintptr_t)$obj & (uintptr_t)XAddressMetadataMarked) + printf "(Marked, Not Remapped)" + else + printf "(Not Marked, Not Remapped)" + end + else + # Unknown + printf "(Unknown)" + end + end + end + printf "\t Page: %llu\n", ((uintptr_t)$obj & XAddressOffsetMask) >> XGranuleSizeShift + x/16gx $obj + if (UseCompressedClassPointers) + set $klass = (Klass*)(void*)((uintptr_t)CompressedKlassPointers::_narrow_klass._base +((uintptr_t)$obj->_metadata->_compressed_klass << CompressedKlassPointers::_narrow_klass._shift)) + else + set $klass = $obj->_metadata->_klass + end + printf "Mark: 0x%016llx\tKlass: %s\n", (uintptr_t)$obj->_mark, (char*)$klass->_name->_body +end + +# Print heap page by page table index +define zpp + set $page = (XPage*)((uintptr_t)XHeap::_heap._page_table._map._map[($arg0)] & ~1) + printf "Page %p\n", $page + print *$page +end + +# Print page_table +define zpt + printf "Pagetable (first 128 slots)\n" + x/128gx XHeap::_heap._page_table._map._map +end + +# Print live map +define __zmarked + set $livemap = $arg0 + set $bit = $arg1 + set $size = $livemap._bitmap._size + set $segment = $size / XLiveMap::nsegments + set $segment_bit = 1 << $segment + + printf "Segment is " + if !($livemap._segment_live_bits & $segment_bit) + printf "NOT " + end + printf "live (segment %d)\n", $segment + + if $bit >= $size + print "Error: Bit %z out of bounds (bitmap size %z)\n", $bit, $size + else + set $word_index = $bit / 64 + set $bit_index = $bit % 64 + set $word = $livemap._bitmap._map[$word_index] + set $live_bit = $word & (1 << $bit_index) + + printf "Object is " + if $live_bit == 0 + printf "NOT " + end + printf "live (word index %d, bit index %d)\n", $word_index, $bit_index + end +end + +define zmarked + set $addr = $arg0 + set $obj = ((uintptr_t)$addr & XAddressOffsetMask) + set $page_index = $obj >> XGranuleSizeShift + set $page_entry = (uintptr_t)XHeap::_heap._page_table._map._map[$page_index] + set $page = (XPage*)($page_entry & ~1) + set $page_start = (uintptr_t)$page._virtual._start + set $page_end = (uintptr_t)$page._virtual._end + set $page_seqnum = $page._livemap._seqnum + set $global_seqnum = XGlobalSeqNum + + if $obj < $page_start || $obj >= $page_end + printf "Error: %p not in page %p (start %p, end %p)\n", $obj, $page, $page_start, $page_end + else + printf "Page is " + if $page_seqnum != $global_seqnum + printf "NOT " + end + printf "live (page %p, page seqnum %d, global seqnum %d)\n", $page, $page_seqnum, $global_seqnum + + #if $page_seqnum == $global_seqnum + set $offset = $obj - $page_start + set $bit = $offset / 8 + __zmarked $page._livemap $bit + #end + end +end + +# Print heap information +define zph + printf "Heap\n" + printf " GlobalPhase: %u\n", XGlobalPhase + printf " GlobalSeqNum: %u\n", XGlobalSeqNum + printf " Offset Max: %-15llu (0x%llx)\n", XAddressOffsetMax, XAddressOffsetMax + printf " Page Size Small: %-15llu (0x%llx)\n", XPageSizeSmall, XPageSizeSmall + printf " Page Size Medium: %-15llu (0x%llx)\n", XPageSizeMedium, XPageSizeMedium + printf "Metadata Bits\n" + printf " Good: 0x%016llx\n", XAddressGoodMask + printf " Bad: 0x%016llx\n", XAddressBadMask + printf " WeakBad: 0x%016llx\n", XAddressWeakBadMask + printf " Marked: 0x%016llx\n", XAddressMetadataMarked + printf " Remapped: 0x%016llx\n", XAddressMetadataRemapped +end + +# End of file diff --git a/src/hotspot/share/gc/x/xDirector.cpp b/src/hotspot/share/gc/x/xDirector.cpp new file mode 100644 index 0000000000000..05ef362374192 --- /dev/null +++ b/src/hotspot/share/gc/x/xDirector.cpp @@ -0,0 +1,406 @@ +/* + * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/shared/gc_globals.hpp" +#include "gc/x/xDirector.hpp" +#include "gc/x/xDriver.hpp" +#include "gc/x/xHeap.inline.hpp" +#include "gc/x/xHeuristics.hpp" +#include "gc/x/xStat.hpp" +#include "logging/log.hpp" + +constexpr double one_in_1000 = 3.290527; +constexpr double sample_interval = 1.0 / XStatAllocRate::sample_hz; + +XDirector::XDirector(XDriver* driver) : + _driver(driver), + _metronome(XStatAllocRate::sample_hz) { + set_name("XDirector"); + create_and_start(); +} + +static void sample_allocation_rate() { + // Sample allocation rate. This is needed by rule_allocation_rate() + // below to estimate the time we have until we run out of memory. + const double bytes_per_second = XStatAllocRate::sample_and_reset(); + + log_debug(gc, alloc)("Allocation Rate: %.1fMB/s, Predicted: %.1fMB/s, Avg: %.1f(+/-%.1f)MB/s", + bytes_per_second / M, + XStatAllocRate::predict() / M, + XStatAllocRate::avg() / M, + XStatAllocRate::sd() / M); +} + +static XDriverRequest rule_allocation_stall() { + // Perform GC if we've observed at least one allocation stall since + // the last GC started. + if (!XHeap::heap()->has_alloc_stalled()) { + return GCCause::_no_gc; + } + + log_debug(gc, director)("Rule: Allocation Stall Observed"); + + return GCCause::_z_allocation_stall; +} + +static XDriverRequest rule_warmup() { + if (XStatCycle::is_warm()) { + // Rule disabled + return GCCause::_no_gc; + } + + // Perform GC if heap usage passes 10/20/30% and no other GC has been + // performed yet. This allows us to get some early samples of the GC + // duration, which is needed by the other rules. + const size_t soft_max_capacity = XHeap::heap()->soft_max_capacity(); + const size_t used = XHeap::heap()->used(); + const double used_threshold_percent = (XStatCycle::nwarmup_cycles() + 1) * 0.1; + const size_t used_threshold = soft_max_capacity * used_threshold_percent; + + log_debug(gc, director)("Rule: Warmup %.0f%%, Used: " SIZE_FORMAT "MB, UsedThreshold: " SIZE_FORMAT "MB", + used_threshold_percent * 100, used / M, used_threshold / M); + + if (used < used_threshold) { + return GCCause::_no_gc; + } + + return GCCause::_z_warmup; +} + +static XDriverRequest rule_timer() { + if (ZCollectionInterval <= 0) { + // Rule disabled + return GCCause::_no_gc; + } + + // Perform GC if timer has expired. + const double time_since_last_gc = XStatCycle::time_since_last(); + const double time_until_gc = ZCollectionInterval - time_since_last_gc; + + log_debug(gc, director)("Rule: Timer, Interval: %.3fs, TimeUntilGC: %.3fs", + ZCollectionInterval, time_until_gc); + + if (time_until_gc > 0) { + return GCCause::_no_gc; + } + + return GCCause::_z_timer; +} + +static double estimated_gc_workers(double serial_gc_time, double parallelizable_gc_time, double time_until_deadline) { + const double parallelizable_time_until_deadline = MAX2(time_until_deadline - serial_gc_time, 0.001); + return parallelizable_gc_time / parallelizable_time_until_deadline; +} + +static uint discrete_gc_workers(double gc_workers) { + return clamp(ceil(gc_workers), 1, ConcGCThreads); +} + +static double select_gc_workers(double serial_gc_time, double parallelizable_gc_time, double alloc_rate_sd_percent, double time_until_oom) { + // Use all workers until we're warm + if (!XStatCycle::is_warm()) { + const double not_warm_gc_workers = ConcGCThreads; + log_debug(gc, director)("Select GC Workers (Not Warm), GCWorkers: %.3f", not_warm_gc_workers); + return not_warm_gc_workers; + } + + // Calculate number of GC workers needed to avoid a long GC cycle and to avoid OOM. + const double avoid_long_gc_workers = estimated_gc_workers(serial_gc_time, parallelizable_gc_time, 10 /* seconds */); + const double avoid_oom_gc_workers = estimated_gc_workers(serial_gc_time, parallelizable_gc_time, time_until_oom); + + const double gc_workers = MAX2(avoid_long_gc_workers, avoid_oom_gc_workers); + const uint actual_gc_workers = discrete_gc_workers(gc_workers); + const uint last_gc_workers = XStatCycle::last_active_workers(); + + // More than 15% division from the average is considered unsteady + if (alloc_rate_sd_percent >= 0.15) { + const double half_gc_workers = ConcGCThreads / 2.0; + const double unsteady_gc_workers = MAX3(gc_workers, last_gc_workers, half_gc_workers); + log_debug(gc, director)("Select GC Workers (Unsteady), " + "AvoidLongGCWorkers: %.3f, AvoidOOMGCWorkers: %.3f, LastGCWorkers: %.3f, HalfGCWorkers: %.3f, GCWorkers: %.3f", + avoid_long_gc_workers, avoid_oom_gc_workers, (double)last_gc_workers, half_gc_workers, unsteady_gc_workers); + return unsteady_gc_workers; + } + + if (actual_gc_workers < last_gc_workers) { + // Before decreasing number of GC workers compared to the previous GC cycle, check if the + // next GC cycle will need to increase it again. If so, use the same number of GC workers + // that will be needed in the next cycle. + const double gc_duration_delta = (parallelizable_gc_time / actual_gc_workers) - (parallelizable_gc_time / last_gc_workers); + const double additional_time_for_allocations = XStatCycle::time_since_last() - gc_duration_delta - sample_interval; + const double next_time_until_oom = time_until_oom + additional_time_for_allocations; + const double next_avoid_oom_gc_workers = estimated_gc_workers(serial_gc_time, parallelizable_gc_time, next_time_until_oom); + + // Add 0.5 to increase friction and avoid lowering too eagerly + const double next_gc_workers = next_avoid_oom_gc_workers + 0.5; + const double try_lowering_gc_workers = clamp(next_gc_workers, actual_gc_workers, last_gc_workers); + + log_debug(gc, director)("Select GC Workers (Try Lowering), " + "AvoidLongGCWorkers: %.3f, AvoidOOMGCWorkers: %.3f, NextAvoidOOMGCWorkers: %.3f, LastGCWorkers: %.3f, GCWorkers: %.3f", + avoid_long_gc_workers, avoid_oom_gc_workers, next_avoid_oom_gc_workers, (double)last_gc_workers, try_lowering_gc_workers); + return try_lowering_gc_workers; + } + + log_debug(gc, director)("Select GC Workers (Normal), " + "AvoidLongGCWorkers: %.3f, AvoidOOMGCWorkers: %.3f, LastGCWorkers: %.3f, GCWorkers: %.3f", + avoid_long_gc_workers, avoid_oom_gc_workers, (double)last_gc_workers, gc_workers); + return gc_workers; +} + +XDriverRequest rule_allocation_rate_dynamic() { + if (!XStatCycle::is_time_trustable()) { + // Rule disabled + return GCCause::_no_gc; + } + + // Calculate amount of free memory available. Note that we take the + // relocation headroom into account to avoid in-place relocation. + const size_t soft_max_capacity = XHeap::heap()->soft_max_capacity(); + const size_t used = XHeap::heap()->used(); + const size_t free_including_headroom = soft_max_capacity - MIN2(soft_max_capacity, used); + const size_t free = free_including_headroom - MIN2(free_including_headroom, XHeuristics::relocation_headroom()); + + // Calculate time until OOM given the max allocation rate and the amount + // of free memory. The allocation rate is a moving average and we multiply + // that with an allocation spike tolerance factor to guard against unforeseen + // phase changes in the allocate rate. We then add ~3.3 sigma to account for + // the allocation rate variance, which means the probability is 1 in 1000 + // that a sample is outside of the confidence interval. + const double alloc_rate_predict = XStatAllocRate::predict(); + const double alloc_rate_avg = XStatAllocRate::avg(); + const double alloc_rate_sd = XStatAllocRate::sd(); + const double alloc_rate_sd_percent = alloc_rate_sd / (alloc_rate_avg + 1.0); + const double alloc_rate = (MAX2(alloc_rate_predict, alloc_rate_avg) * ZAllocationSpikeTolerance) + (alloc_rate_sd * one_in_1000) + 1.0; + const double time_until_oom = (free / alloc_rate) / (1.0 + alloc_rate_sd_percent); + + // Calculate max serial/parallel times of a GC cycle. The times are + // moving averages, we add ~3.3 sigma to account for the variance. + const double serial_gc_time = XStatCycle::serial_time().davg() + (XStatCycle::serial_time().dsd() * one_in_1000); + const double parallelizable_gc_time = XStatCycle::parallelizable_time().davg() + (XStatCycle::parallelizable_time().dsd() * one_in_1000); + + // Calculate number of GC workers needed to avoid OOM. + const double gc_workers = select_gc_workers(serial_gc_time, parallelizable_gc_time, alloc_rate_sd_percent, time_until_oom); + + // Convert to a discrete number of GC workers within limits. + const uint actual_gc_workers = discrete_gc_workers(gc_workers); + + // Calculate GC duration given number of GC workers needed. + const double actual_gc_duration = serial_gc_time + (parallelizable_gc_time / actual_gc_workers); + const uint last_gc_workers = XStatCycle::last_active_workers(); + + // Calculate time until GC given the time until OOM and GC duration. + // We also subtract the sample interval, so that we don't overshoot the + // target time and end up starting the GC too late in the next interval. + const double time_until_gc = time_until_oom - actual_gc_duration - sample_interval; + + log_debug(gc, director)("Rule: Allocation Rate (Dynamic GC Workers), " + "MaxAllocRate: %.1fMB/s (+/-%.1f%%), Free: " SIZE_FORMAT "MB, GCCPUTime: %.3f, " + "GCDuration: %.3fs, TimeUntilOOM: %.3fs, TimeUntilGC: %.3fs, GCWorkers: %u -> %u", + alloc_rate / M, + alloc_rate_sd_percent * 100, + free / M, + serial_gc_time + parallelizable_gc_time, + serial_gc_time + (parallelizable_gc_time / actual_gc_workers), + time_until_oom, + time_until_gc, + last_gc_workers, + actual_gc_workers); + + if (actual_gc_workers <= last_gc_workers && time_until_gc > 0) { + return XDriverRequest(GCCause::_no_gc, actual_gc_workers); + } + + return XDriverRequest(GCCause::_z_allocation_rate, actual_gc_workers); +} + +static XDriverRequest rule_allocation_rate_static() { + if (!XStatCycle::is_time_trustable()) { + // Rule disabled + return GCCause::_no_gc; + } + + // Perform GC if the estimated max allocation rate indicates that we + // will run out of memory. The estimated max allocation rate is based + // on the moving average of the sampled allocation rate plus a safety + // margin based on variations in the allocation rate and unforeseen + // allocation spikes. + + // Calculate amount of free memory available. Note that we take the + // relocation headroom into account to avoid in-place relocation. + const size_t soft_max_capacity = XHeap::heap()->soft_max_capacity(); + const size_t used = XHeap::heap()->used(); + const size_t free_including_headroom = soft_max_capacity - MIN2(soft_max_capacity, used); + const size_t free = free_including_headroom - MIN2(free_including_headroom, XHeuristics::relocation_headroom()); + + // Calculate time until OOM given the max allocation rate and the amount + // of free memory. The allocation rate is a moving average and we multiply + // that with an allocation spike tolerance factor to guard against unforeseen + // phase changes in the allocate rate. We then add ~3.3 sigma to account for + // the allocation rate variance, which means the probability is 1 in 1000 + // that a sample is outside of the confidence interval. + const double max_alloc_rate = (XStatAllocRate::avg() * ZAllocationSpikeTolerance) + (XStatAllocRate::sd() * one_in_1000); + const double time_until_oom = free / (max_alloc_rate + 1.0); // Plus 1.0B/s to avoid division by zero + + // Calculate max serial/parallel times of a GC cycle. The times are + // moving averages, we add ~3.3 sigma to account for the variance. + const double serial_gc_time = XStatCycle::serial_time().davg() + (XStatCycle::serial_time().dsd() * one_in_1000); + const double parallelizable_gc_time = XStatCycle::parallelizable_time().davg() + (XStatCycle::parallelizable_time().dsd() * one_in_1000); + + // Calculate GC duration given number of GC workers needed. + const double gc_duration = serial_gc_time + (parallelizable_gc_time / ConcGCThreads); + + // Calculate time until GC given the time until OOM and max duration of GC. + // We also deduct the sample interval, so that we don't overshoot the target + // time and end up starting the GC too late in the next interval. + const double time_until_gc = time_until_oom - gc_duration - sample_interval; + + log_debug(gc, director)("Rule: Allocation Rate (Static GC Workers), MaxAllocRate: %.1fMB/s, Free: " SIZE_FORMAT "MB, GCDuration: %.3fs, TimeUntilGC: %.3fs", + max_alloc_rate / M, free / M, gc_duration, time_until_gc); + + if (time_until_gc > 0) { + return GCCause::_no_gc; + } + + return GCCause::_z_allocation_rate; +} + +static XDriverRequest rule_allocation_rate() { + if (UseDynamicNumberOfGCThreads) { + return rule_allocation_rate_dynamic(); + } else { + return rule_allocation_rate_static(); + } +} + +static XDriverRequest rule_high_usage() { + // Perform GC if the amount of free memory is 5% or less. This is a preventive + // meassure in the case where the application has a very low allocation rate, + // such that the allocation rate rule doesn't trigger, but the amount of free + // memory is still slowly but surely heading towards zero. In this situation, + // we start a GC cycle to avoid a potential allocation stall later. + + // Calculate amount of free memory available. Note that we take the + // relocation headroom into account to avoid in-place relocation. + const size_t soft_max_capacity = XHeap::heap()->soft_max_capacity(); + const size_t used = XHeap::heap()->used(); + const size_t free_including_headroom = soft_max_capacity - MIN2(soft_max_capacity, used); + const size_t free = free_including_headroom - MIN2(free_including_headroom, XHeuristics::relocation_headroom()); + const double free_percent = percent_of(free, soft_max_capacity); + + log_debug(gc, director)("Rule: High Usage, Free: " SIZE_FORMAT "MB(%.1f%%)", + free / M, free_percent); + + if (free_percent > 5.0) { + return GCCause::_no_gc; + } + + return GCCause::_z_high_usage; +} + +static XDriverRequest rule_proactive() { + if (!ZProactive || !XStatCycle::is_warm()) { + // Rule disabled + return GCCause::_no_gc; + } + + // Perform GC if the impact of doing so, in terms of application throughput + // reduction, is considered acceptable. This rule allows us to keep the heap + // size down and allow reference processing to happen even when we have a lot + // of free space on the heap. + + // Only consider doing a proactive GC if the heap usage has grown by at least + // 10% of the max capacity since the previous GC, or more than 5 minutes has + // passed since the previous GC. This helps avoid superfluous GCs when running + // applications with very low allocation rate. + const size_t used_after_last_gc = XStatHeap::used_at_relocate_end(); + const size_t used_increase_threshold = XHeap::heap()->soft_max_capacity() * 0.10; // 10% + const size_t used_threshold = used_after_last_gc + used_increase_threshold; + const size_t used = XHeap::heap()->used(); + const double time_since_last_gc = XStatCycle::time_since_last(); + const double time_since_last_gc_threshold = 5 * 60; // 5 minutes + if (used < used_threshold && time_since_last_gc < time_since_last_gc_threshold) { + // Don't even consider doing a proactive GC + log_debug(gc, director)("Rule: Proactive, UsedUntilEnabled: " SIZE_FORMAT "MB, TimeUntilEnabled: %.3fs", + (used_threshold - used) / M, + time_since_last_gc_threshold - time_since_last_gc); + return GCCause::_no_gc; + } + + const double assumed_throughput_drop_during_gc = 0.50; // 50% + const double acceptable_throughput_drop = 0.01; // 1% + const double serial_gc_time = XStatCycle::serial_time().davg() + (XStatCycle::serial_time().dsd() * one_in_1000); + const double parallelizable_gc_time = XStatCycle::parallelizable_time().davg() + (XStatCycle::parallelizable_time().dsd() * one_in_1000); + const double gc_duration = serial_gc_time + (parallelizable_gc_time / ConcGCThreads); + const double acceptable_gc_interval = gc_duration * ((assumed_throughput_drop_during_gc / acceptable_throughput_drop) - 1.0); + const double time_until_gc = acceptable_gc_interval - time_since_last_gc; + + log_debug(gc, director)("Rule: Proactive, AcceptableGCInterval: %.3fs, TimeSinceLastGC: %.3fs, TimeUntilGC: %.3fs", + acceptable_gc_interval, time_since_last_gc, time_until_gc); + + if (time_until_gc > 0) { + return GCCause::_no_gc; + } + + return GCCause::_z_proactive; +} + +static XDriverRequest make_gc_decision() { + // List of rules + using XDirectorRule = XDriverRequest (*)(); + const XDirectorRule rules[] = { + rule_allocation_stall, + rule_warmup, + rule_timer, + rule_allocation_rate, + rule_high_usage, + rule_proactive, + }; + + // Execute rules + for (size_t i = 0; i < ARRAY_SIZE(rules); i++) { + const XDriverRequest request = rules[i](); + if (request.cause() != GCCause::_no_gc) { + return request; + } + } + + return GCCause::_no_gc; +} + +void XDirector::run_service() { + // Main loop + while (_metronome.wait_for_tick()) { + sample_allocation_rate(); + if (!_driver->is_busy()) { + const XDriverRequest request = make_gc_decision(); + if (request.cause() != GCCause::_no_gc) { + _driver->collect(request); + } + } + } +} + +void XDirector::stop_service() { + _metronome.stop(); +} diff --git a/src/hotspot/share/gc/x/xDirector.hpp b/src/hotspot/share/gc/x/xDirector.hpp new file mode 100644 index 0000000000000..eacce20e8c9c6 --- /dev/null +++ b/src/hotspot/share/gc/x/xDirector.hpp @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XDIRECTOR_HPP +#define SHARE_GC_X_XDIRECTOR_HPP + +#include "gc/shared/concurrentGCThread.hpp" +#include "gc/x/xMetronome.hpp" + +class XDriver; + +class XDirector : public ConcurrentGCThread { +private: + XDriver* const _driver; + XMetronome _metronome; + +protected: + virtual void run_service(); + virtual void stop_service(); + +public: + XDirector(XDriver* driver); +}; + +#endif // SHARE_GC_X_XDIRECTOR_HPP diff --git a/src/hotspot/share/gc/x/xDriver.cpp b/src/hotspot/share/gc/x/xDriver.cpp new file mode 100644 index 0000000000000..f76d9f4e58688 --- /dev/null +++ b/src/hotspot/share/gc/x/xDriver.cpp @@ -0,0 +1,513 @@ +/* + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/shared/gcId.hpp" +#include "gc/shared/gcLocker.hpp" +#include "gc/shared/gcVMOperations.hpp" +#include "gc/shared/isGCActiveMark.hpp" +#include "gc/x/xAbort.inline.hpp" +#include "gc/x/xBreakpoint.hpp" +#include "gc/x/xCollectedHeap.hpp" +#include "gc/x/xDriver.hpp" +#include "gc/x/xHeap.inline.hpp" +#include "gc/x/xMessagePort.inline.hpp" +#include "gc/x/xServiceability.hpp" +#include "gc/x/xStat.hpp" +#include "gc/x/xVerify.hpp" +#include "logging/log.hpp" +#include "memory/universe.hpp" +#include "runtime/threads.hpp" +#include "runtime/vmOperations.hpp" +#include "runtime/vmThread.hpp" + +static const XStatPhaseCycle XPhaseCycle("Garbage Collection Cycle"); +static const XStatPhasePause XPhasePauseMarkStart("Pause Mark Start"); +static const XStatPhaseConcurrent XPhaseConcurrentMark("Concurrent Mark"); +static const XStatPhaseConcurrent XPhaseConcurrentMarkContinue("Concurrent Mark Continue"); +static const XStatPhaseConcurrent XPhaseConcurrentMarkFree("Concurrent Mark Free"); +static const XStatPhasePause XPhasePauseMarkEnd("Pause Mark End"); +static const XStatPhaseConcurrent XPhaseConcurrentProcessNonStrongReferences("Concurrent Process Non-Strong References"); +static const XStatPhaseConcurrent XPhaseConcurrentResetRelocationSet("Concurrent Reset Relocation Set"); +static const XStatPhaseConcurrent XPhaseConcurrentSelectRelocationSet("Concurrent Select Relocation Set"); +static const XStatPhasePause XPhasePauseRelocateStart("Pause Relocate Start"); +static const XStatPhaseConcurrent XPhaseConcurrentRelocated("Concurrent Relocate"); +static const XStatCriticalPhase XCriticalPhaseGCLockerStall("GC Locker Stall", false /* verbose */); +static const XStatSampler XSamplerJavaThreads("System", "Java Threads", XStatUnitThreads); + +XDriverRequest::XDriverRequest() : + XDriverRequest(GCCause::_no_gc) {} + +XDriverRequest::XDriverRequest(GCCause::Cause cause) : + XDriverRequest(cause, ConcGCThreads) {} + +XDriverRequest::XDriverRequest(GCCause::Cause cause, uint nworkers) : + _cause(cause), + _nworkers(nworkers) {} + +bool XDriverRequest::operator==(const XDriverRequest& other) const { + return _cause == other._cause; +} + +GCCause::Cause XDriverRequest::cause() const { + return _cause; +} + +uint XDriverRequest::nworkers() const { + return _nworkers; +} + +class VM_XOperation : public VM_Operation { +private: + const uint _gc_id; + bool _gc_locked; + bool _success; + +public: + VM_XOperation() : + _gc_id(GCId::current()), + _gc_locked(false), + _success(false) {} + + virtual bool needs_inactive_gc_locker() const { + // An inactive GC locker is needed in operations where we change the bad + // mask or move objects. Changing the bad mask will invalidate all oops, + // which makes it conceptually the same thing as moving all objects. + return false; + } + + virtual bool skip_thread_oop_barriers() const { + return true; + } + + virtual bool do_operation() = 0; + + virtual bool doit_prologue() { + Heap_lock->lock(); + return true; + } + + virtual void doit() { + // Abort if GC locker state is incompatible + if (needs_inactive_gc_locker() && GCLocker::check_active_before_gc()) { + _gc_locked = true; + return; + } + + // Setup GC id and active marker + GCIdMark gc_id_mark(_gc_id); + IsGCActiveMark gc_active_mark; + + // Verify before operation + XVerify::before_zoperation(); + + // Execute operation + _success = do_operation(); + + // Update statistics + XStatSample(XSamplerJavaThreads, Threads::number_of_threads()); + } + + virtual void doit_epilogue() { + Heap_lock->unlock(); + } + + bool gc_locked() const { + return _gc_locked; + } + + bool success() const { + return _success; + } +}; + +class VM_XMarkStart : public VM_XOperation { +public: + virtual VMOp_Type type() const { + return VMOp_XMarkStart; + } + + virtual bool needs_inactive_gc_locker() const { + return true; + } + + virtual bool do_operation() { + XStatTimer timer(XPhasePauseMarkStart); + XServiceabilityPauseTracer tracer; + + XCollectedHeap::heap()->increment_total_collections(true /* full */); + + XHeap::heap()->mark_start(); + return true; + } +}; + +class VM_XMarkEnd : public VM_XOperation { +public: + virtual VMOp_Type type() const { + return VMOp_XMarkEnd; + } + + virtual bool do_operation() { + XStatTimer timer(XPhasePauseMarkEnd); + XServiceabilityPauseTracer tracer; + return XHeap::heap()->mark_end(); + } +}; + +class VM_XRelocateStart : public VM_XOperation { +public: + virtual VMOp_Type type() const { + return VMOp_XRelocateStart; + } + + virtual bool needs_inactive_gc_locker() const { + return true; + } + + virtual bool do_operation() { + XStatTimer timer(XPhasePauseRelocateStart); + XServiceabilityPauseTracer tracer; + XHeap::heap()->relocate_start(); + return true; + } +}; + +class VM_XVerify : public VM_Operation { +public: + virtual VMOp_Type type() const { + return VMOp_XVerify; + } + + virtual bool skip_thread_oop_barriers() const { + return true; + } + + virtual void doit() { + XVerify::after_weak_processing(); + } +}; + +XDriver::XDriver() : + _gc_cycle_port(), + _gc_locker_port() { + set_name("XDriver"); + create_and_start(); +} + +bool XDriver::is_busy() const { + return _gc_cycle_port.is_busy(); +} + +void XDriver::collect(const XDriverRequest& request) { + switch (request.cause()) { + case GCCause::_heap_dump: + case GCCause::_heap_inspection: + case GCCause::_wb_young_gc: + case GCCause::_wb_full_gc: + case GCCause::_dcmd_gc_run: + case GCCause::_java_lang_system_gc: + case GCCause::_full_gc_alot: + case GCCause::_scavenge_alot: + case GCCause::_jvmti_force_gc: + case GCCause::_metadata_GC_clear_soft_refs: + case GCCause::_codecache_GC_aggressive: + // Start synchronous GC + _gc_cycle_port.send_sync(request); + break; + + case GCCause::_z_timer: + case GCCause::_z_warmup: + case GCCause::_z_allocation_rate: + case GCCause::_z_allocation_stall: + case GCCause::_z_proactive: + case GCCause::_z_high_usage: + case GCCause::_codecache_GC_threshold: + case GCCause::_metadata_GC_threshold: + // Start asynchronous GC + _gc_cycle_port.send_async(request); + break; + + case GCCause::_gc_locker: + // Restart VM operation previously blocked by the GC locker + _gc_locker_port.signal(); + break; + + case GCCause::_wb_breakpoint: + XBreakpoint::start_gc(); + _gc_cycle_port.send_async(request); + break; + + default: + // Other causes not supported + fatal("Unsupported GC cause (%s)", GCCause::to_string(request.cause())); + break; + } +} + +template +bool XDriver::pause() { + for (;;) { + T op; + VMThread::execute(&op); + if (op.gc_locked()) { + // Wait for GC to become unlocked and restart the VM operation + XStatTimer timer(XCriticalPhaseGCLockerStall); + _gc_locker_port.wait(); + continue; + } + + // Notify VM operation completed + _gc_locker_port.ack(); + + return op.success(); + } +} + +void XDriver::pause_mark_start() { + pause(); +} + +void XDriver::concurrent_mark() { + XStatTimer timer(XPhaseConcurrentMark); + XBreakpoint::at_after_marking_started(); + XHeap::heap()->mark(true /* initial */); + XBreakpoint::at_before_marking_completed(); +} + +bool XDriver::pause_mark_end() { + return pause(); +} + +void XDriver::concurrent_mark_continue() { + XStatTimer timer(XPhaseConcurrentMarkContinue); + XHeap::heap()->mark(false /* initial */); +} + +void XDriver::concurrent_mark_free() { + XStatTimer timer(XPhaseConcurrentMarkFree); + XHeap::heap()->mark_free(); +} + +void XDriver::concurrent_process_non_strong_references() { + XStatTimer timer(XPhaseConcurrentProcessNonStrongReferences); + XBreakpoint::at_after_reference_processing_started(); + XHeap::heap()->process_non_strong_references(); +} + +void XDriver::concurrent_reset_relocation_set() { + XStatTimer timer(XPhaseConcurrentResetRelocationSet); + XHeap::heap()->reset_relocation_set(); +} + +void XDriver::pause_verify() { + if (ZVerifyRoots || ZVerifyObjects) { + VM_XVerify op; + VMThread::execute(&op); + } +} + +void XDriver::concurrent_select_relocation_set() { + XStatTimer timer(XPhaseConcurrentSelectRelocationSet); + XHeap::heap()->select_relocation_set(); +} + +void XDriver::pause_relocate_start() { + pause(); +} + +void XDriver::concurrent_relocate() { + XStatTimer timer(XPhaseConcurrentRelocated); + XHeap::heap()->relocate(); +} + +void XDriver::check_out_of_memory() { + XHeap::heap()->check_out_of_memory(); +} + +static bool should_clear_soft_references(const XDriverRequest& request) { + // Clear soft references if implied by the GC cause + if (request.cause() == GCCause::_wb_full_gc || + request.cause() == GCCause::_metadata_GC_clear_soft_refs || + request.cause() == GCCause::_z_allocation_stall) { + // Clear + return true; + } + + // Don't clear + return false; +} + +static uint select_active_worker_threads_dynamic(const XDriverRequest& request) { + // Use requested number of worker threads + return request.nworkers(); +} + +static uint select_active_worker_threads_static(const XDriverRequest& request) { + const GCCause::Cause cause = request.cause(); + const uint nworkers = request.nworkers(); + + // Boost number of worker threads if implied by the GC cause + if (cause == GCCause::_wb_full_gc || + cause == GCCause::_java_lang_system_gc || + cause == GCCause::_metadata_GC_clear_soft_refs || + cause == GCCause::_z_allocation_stall) { + // Boost + const uint boosted_nworkers = MAX2(nworkers, ParallelGCThreads); + return boosted_nworkers; + } + + // Use requested number of worker threads + return nworkers; +} + +static uint select_active_worker_threads(const XDriverRequest& request) { + if (UseDynamicNumberOfGCThreads) { + return select_active_worker_threads_dynamic(request); + } else { + return select_active_worker_threads_static(request); + } +} + +class XDriverGCScope : public StackObj { +private: + GCIdMark _gc_id; + GCCause::Cause _gc_cause; + GCCauseSetter _gc_cause_setter; + XStatTimer _timer; + XServiceabilityCycleTracer _tracer; + +public: + XDriverGCScope(const XDriverRequest& request) : + _gc_id(), + _gc_cause(request.cause()), + _gc_cause_setter(XCollectedHeap::heap(), _gc_cause), + _timer(XPhaseCycle), + _tracer() { + // Update statistics + XStatCycle::at_start(); + + // Set up soft reference policy + const bool clear = should_clear_soft_references(request); + XHeap::heap()->set_soft_reference_policy(clear); + + // Select number of worker threads to use + const uint nworkers = select_active_worker_threads(request); + XHeap::heap()->set_active_workers(nworkers); + } + + ~XDriverGCScope() { + // Update statistics + XStatCycle::at_end(_gc_cause, XHeap::heap()->active_workers()); + + // Update data used by soft reference policy + Universe::heap()->update_capacity_and_used_at_gc(); + + // Signal that we have completed a visit to all live objects + Universe::heap()->record_whole_heap_examined_timestamp(); + } +}; + +// Macro to execute a termination check after a concurrent phase. Note +// that it's important that the termination check comes after the call +// to the function f, since we can't abort between pause_relocate_start() +// and concurrent_relocate(). We need to let concurrent_relocate() call +// abort_page() on the remaining entries in the relocation set. +#define concurrent(f) \ + do { \ + concurrent_##f(); \ + if (should_terminate()) { \ + return; \ + } \ + } while (false) + +void XDriver::gc(const XDriverRequest& request) { + XDriverGCScope scope(request); + + // Phase 1: Pause Mark Start + pause_mark_start(); + + // Phase 2: Concurrent Mark + concurrent(mark); + + // Phase 3: Pause Mark End + while (!pause_mark_end()) { + // Phase 3.5: Concurrent Mark Continue + concurrent(mark_continue); + } + + // Phase 4: Concurrent Mark Free + concurrent(mark_free); + + // Phase 5: Concurrent Process Non-Strong References + concurrent(process_non_strong_references); + + // Phase 6: Concurrent Reset Relocation Set + concurrent(reset_relocation_set); + + // Phase 7: Pause Verify + pause_verify(); + + // Phase 8: Concurrent Select Relocation Set + concurrent(select_relocation_set); + + // Phase 9: Pause Relocate Start + pause_relocate_start(); + + // Phase 10: Concurrent Relocate + concurrent(relocate); +} + +void XDriver::run_service() { + // Main loop + while (!should_terminate()) { + // Wait for GC request + const XDriverRequest request = _gc_cycle_port.receive(); + if (request.cause() == GCCause::_no_gc) { + continue; + } + + XBreakpoint::at_before_gc(); + + // Run GC + gc(request); + + if (should_terminate()) { + // Abort + break; + } + + // Notify GC completed + _gc_cycle_port.ack(); + + // Check for out of memory condition + check_out_of_memory(); + + XBreakpoint::at_after_gc(); + } +} + +void XDriver::stop_service() { + XAbort::abort(); + _gc_cycle_port.send_async(GCCause::_no_gc); +} diff --git a/src/hotspot/share/gc/x/xDriver.hpp b/src/hotspot/share/gc/x/xDriver.hpp new file mode 100644 index 0000000000000..3803b699b85df --- /dev/null +++ b/src/hotspot/share/gc/x/xDriver.hpp @@ -0,0 +1,84 @@ +/* + * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XDRIVER_HPP +#define SHARE_GC_X_XDRIVER_HPP + +#include "gc/shared/concurrentGCThread.hpp" +#include "gc/shared/gcCause.hpp" +#include "gc/x/xMessagePort.hpp" + +class VM_XOperation; + +class XDriverRequest { +private: + GCCause::Cause _cause; + uint _nworkers; + +public: + XDriverRequest(); + XDriverRequest(GCCause::Cause cause); + XDriverRequest(GCCause::Cause cause, uint nworkers); + + bool operator==(const XDriverRequest& other) const; + + GCCause::Cause cause() const; + uint nworkers() const; +}; + +class XDriver : public ConcurrentGCThread { +private: + XMessagePort _gc_cycle_port; + XRendezvousPort _gc_locker_port; + + template bool pause(); + + void pause_mark_start(); + void concurrent_mark(); + bool pause_mark_end(); + void concurrent_mark_continue(); + void concurrent_mark_free(); + void concurrent_process_non_strong_references(); + void concurrent_reset_relocation_set(); + void pause_verify(); + void concurrent_select_relocation_set(); + void pause_relocate_start(); + void concurrent_relocate(); + + void check_out_of_memory(); + + void gc(const XDriverRequest& request); + +protected: + virtual void run_service(); + virtual void stop_service(); + +public: + XDriver(); + + bool is_busy() const; + + void collect(const XDriverRequest& request); +}; + +#endif // SHARE_GC_X_XDRIVER_HPP diff --git a/src/hotspot/share/gc/x/xErrno.cpp b/src/hotspot/share/gc/x/xErrno.cpp new file mode 100644 index 0000000000000..64951bc47ab15 --- /dev/null +++ b/src/hotspot/share/gc/x/xErrno.cpp @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/x/xErrno.hpp" +#include "runtime/os.hpp" + +#include +#include + +XErrno::XErrno() : + _error(errno) {} + +XErrno::XErrno(int error) : + _error(error) {} + +XErrno::operator bool() const { + return _error != 0; +} + +bool XErrno::operator==(int error) const { + return _error == error; +} + +bool XErrno::operator!=(int error) const { + return _error != error; +} + +const char* XErrno::to_string() const { + return os::strerror(_error); +} diff --git a/src/hotspot/share/gc/x/xErrno.hpp b/src/hotspot/share/gc/x/xErrno.hpp new file mode 100644 index 0000000000000..eb72d43da3f8f --- /dev/null +++ b/src/hotspot/share/gc/x/xErrno.hpp @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XERRNO_HPP +#define SHARE_GC_X_XERRNO_HPP + +#include "memory/allocation.hpp" + +class XErrno : public StackObj { +private: + const int _error; + +public: + XErrno(); + XErrno(int error); + + operator bool() const; + bool operator==(int error) const; + bool operator!=(int error) const; + const char* to_string() const; +}; + +#endif // SHARE_GC_X_XERRNO_HPP diff --git a/src/hotspot/share/gc/x/xForwarding.cpp b/src/hotspot/share/gc/x/xForwarding.cpp new file mode 100644 index 0000000000000..3e8b50d0d6494 --- /dev/null +++ b/src/hotspot/share/gc/x/xForwarding.cpp @@ -0,0 +1,210 @@ +/* + * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/x/xAddress.inline.hpp" +#include "gc/x/xForwarding.inline.hpp" +#include "gc/x/xStat.hpp" +#include "gc/x/xUtils.inline.hpp" +#include "utilities/align.hpp" + +// +// Reference count states: +// +// * If the reference count is zero, it will never change again. +// +// * If the reference count is positive, it can be both retained +// (increased) and released (decreased). +// +// * If the reference count is negative, is can only be released +// (increased). A negative reference count means that one or more +// threads are waiting for one or more other threads to release +// their references. +// +// The reference lock is used for waiting until the reference +// count has become zero (released) or negative one (claimed). +// + +static const XStatCriticalPhase XCriticalPhaseRelocationStall("Relocation Stall"); + +bool XForwarding::retain_page() { + for (;;) { + const int32_t ref_count = Atomic::load_acquire(&_ref_count); + + if (ref_count == 0) { + // Released + return false; + } + + if (ref_count < 0) { + // Claimed + const bool success = wait_page_released(); + assert(success, "Should always succeed"); + return false; + } + + if (Atomic::cmpxchg(&_ref_count, ref_count, ref_count + 1) == ref_count) { + // Retained + return true; + } + } +} + +XPage* XForwarding::claim_page() { + for (;;) { + const int32_t ref_count = Atomic::load(&_ref_count); + assert(ref_count > 0, "Invalid state"); + + // Invert reference count + if (Atomic::cmpxchg(&_ref_count, ref_count, -ref_count) != ref_count) { + continue; + } + + // If the previous reference count was 1, then we just changed it to -1, + // and we have now claimed the page. Otherwise we wait until it is claimed. + if (ref_count != 1) { + XLocker locker(&_ref_lock); + while (Atomic::load_acquire(&_ref_count) != -1) { + _ref_lock.wait(); + } + } + + return _page; + } +} + +void XForwarding::release_page() { + for (;;) { + const int32_t ref_count = Atomic::load(&_ref_count); + assert(ref_count != 0, "Invalid state"); + + if (ref_count > 0) { + // Decrement reference count + if (Atomic::cmpxchg(&_ref_count, ref_count, ref_count - 1) != ref_count) { + continue; + } + + // If the previous reference count was 1, then we just decremented + // it to 0 and we should signal that the page is now released. + if (ref_count == 1) { + // Notify released + XLocker locker(&_ref_lock); + _ref_lock.notify_all(); + } + } else { + // Increment reference count + if (Atomic::cmpxchg(&_ref_count, ref_count, ref_count + 1) != ref_count) { + continue; + } + + // If the previous reference count was -2 or -1, then we just incremented it + // to -1 or 0, and we should signal the that page is now claimed or released. + if (ref_count == -2 || ref_count == -1) { + // Notify claimed or released + XLocker locker(&_ref_lock); + _ref_lock.notify_all(); + } + } + + return; + } +} + +bool XForwarding::wait_page_released() const { + if (Atomic::load_acquire(&_ref_count) != 0) { + XStatTimer timer(XCriticalPhaseRelocationStall); + XLocker locker(&_ref_lock); + while (Atomic::load_acquire(&_ref_count) != 0) { + if (_ref_abort) { + return false; + } + + _ref_lock.wait(); + } + } + + return true; +} + +XPage* XForwarding::detach_page() { + // Wait until released + if (Atomic::load_acquire(&_ref_count) != 0) { + XLocker locker(&_ref_lock); + while (Atomic::load_acquire(&_ref_count) != 0) { + _ref_lock.wait(); + } + } + + // Detach and return page + XPage* const page = _page; + _page = NULL; + return page; +} + +void XForwarding::abort_page() { + XLocker locker(&_ref_lock); + assert(Atomic::load(&_ref_count) > 0, "Invalid state"); + assert(!_ref_abort, "Invalid state"); + _ref_abort = true; + _ref_lock.notify_all(); +} + +void XForwarding::verify() const { + guarantee(_ref_count != 0, "Invalid reference count"); + guarantee(_page != NULL, "Invalid page"); + + uint32_t live_objects = 0; + size_t live_bytes = 0; + + for (XForwardingCursor i = 0; i < _entries.length(); i++) { + const XForwardingEntry entry = at(&i); + if (!entry.populated()) { + // Skip empty entries + continue; + } + + // Check from index + guarantee(entry.from_index() < _page->object_max_count(), "Invalid from index"); + + // Check for duplicates + for (XForwardingCursor j = i + 1; j < _entries.length(); j++) { + const XForwardingEntry other = at(&j); + if (!other.populated()) { + // Skip empty entries + continue; + } + + guarantee(entry.from_index() != other.from_index(), "Duplicate from"); + guarantee(entry.to_offset() != other.to_offset(), "Duplicate to"); + } + + const uintptr_t to_addr = XAddress::good(entry.to_offset()); + const size_t size = XUtils::object_size(to_addr); + const size_t aligned_size = align_up(size, _page->object_alignment()); + live_bytes += aligned_size; + live_objects++; + } + + // Verify number of live objects and bytes + _page->verify_live(live_objects, live_bytes); +} diff --git a/src/hotspot/share/gc/x/xForwarding.hpp b/src/hotspot/share/gc/x/xForwarding.hpp new file mode 100644 index 0000000000000..a6185e23ced27 --- /dev/null +++ b/src/hotspot/share/gc/x/xForwarding.hpp @@ -0,0 +1,88 @@ +/* + * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XFORWARDING_HPP +#define SHARE_GC_X_XFORWARDING_HPP + +#include "gc/x/xAttachedArray.hpp" +#include "gc/x/xForwardingEntry.hpp" +#include "gc/x/xLock.hpp" +#include "gc/x/xVirtualMemory.hpp" + +class ObjectClosure; +class VMStructs; +class XForwardingAllocator; +class XPage; + +typedef size_t XForwardingCursor; + +class XForwarding { + friend class ::VMStructs; + friend class XForwardingTest; + +private: + typedef XAttachedArray AttachedArray; + + const XVirtualMemory _virtual; + const size_t _object_alignment_shift; + const AttachedArray _entries; + XPage* _page; + mutable XConditionLock _ref_lock; + volatile int32_t _ref_count; + bool _ref_abort; + bool _in_place; + + XForwardingEntry* entries() const; + XForwardingEntry at(XForwardingCursor* cursor) const; + XForwardingEntry first(uintptr_t from_index, XForwardingCursor* cursor) const; + XForwardingEntry next(XForwardingCursor* cursor) const; + + XForwarding(XPage* page, size_t nentries); + +public: + static uint32_t nentries(const XPage* page); + static XForwarding* alloc(XForwardingAllocator* allocator, XPage* page); + + uint8_t type() const; + uintptr_t start() const; + size_t size() const; + size_t object_alignment_shift() const; + void object_iterate(ObjectClosure *cl); + + bool retain_page(); + XPage* claim_page(); + void release_page(); + bool wait_page_released() const; + XPage* detach_page(); + void abort_page(); + + void set_in_place(); + bool in_place() const; + + XForwardingEntry find(uintptr_t from_index, XForwardingCursor* cursor) const; + uintptr_t insert(uintptr_t from_index, uintptr_t to_offset, XForwardingCursor* cursor); + + void verify() const; +}; + +#endif // SHARE_GC_X_XFORWARDING_HPP diff --git a/src/hotspot/share/gc/x/xForwarding.inline.hpp b/src/hotspot/share/gc/x/xForwarding.inline.hpp new file mode 100644 index 0000000000000..257109f3de926 --- /dev/null +++ b/src/hotspot/share/gc/x/xForwarding.inline.hpp @@ -0,0 +1,163 @@ +/* + * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XFORWARDING_INLINE_HPP +#define SHARE_GC_X_XFORWARDING_INLINE_HPP + +#include "gc/x/xForwarding.hpp" + +#include "gc/x/xAttachedArray.inline.hpp" +#include "gc/x/xForwardingAllocator.inline.hpp" +#include "gc/x/xHash.inline.hpp" +#include "gc/x/xHeap.hpp" +#include "gc/x/xLock.inline.hpp" +#include "gc/x/xPage.inline.hpp" +#include "gc/x/xVirtualMemory.inline.hpp" +#include "runtime/atomic.hpp" +#include "utilities/debug.hpp" +#include "utilities/powerOfTwo.hpp" + +inline uint32_t XForwarding::nentries(const XPage* page) { + // The number returned by the function is used to size the hash table of + // forwarding entries for this page. This hash table uses linear probing. + // The size of the table must be a power of two to allow for quick and + // inexpensive indexing/masking. The table is also sized to have a load + // factor of 50%, i.e. sized to have double the number of entries actually + // inserted, to allow for good lookup/insert performance. + return round_up_power_of_2(page->live_objects() * 2); +} + +inline XForwarding* XForwarding::alloc(XForwardingAllocator* allocator, XPage* page) { + const size_t nentries = XForwarding::nentries(page); + void* const addr = AttachedArray::alloc(allocator, nentries); + return ::new (addr) XForwarding(page, nentries); +} + +inline XForwarding::XForwarding(XPage* page, size_t nentries) : + _virtual(page->virtual_memory()), + _object_alignment_shift(page->object_alignment_shift()), + _entries(nentries), + _page(page), + _ref_lock(), + _ref_count(1), + _ref_abort(false), + _in_place(false) {} + +inline uint8_t XForwarding::type() const { + return _page->type(); +} + +inline uintptr_t XForwarding::start() const { + return _virtual.start(); +} + +inline size_t XForwarding::size() const { + return _virtual.size(); +} + +inline size_t XForwarding::object_alignment_shift() const { + return _object_alignment_shift; +} + +inline void XForwarding::object_iterate(ObjectClosure *cl) { + return _page->object_iterate(cl); +} + +inline void XForwarding::set_in_place() { + _in_place = true; +} + +inline bool XForwarding::in_place() const { + return _in_place; +} + +inline XForwardingEntry* XForwarding::entries() const { + return _entries(this); +} + +inline XForwardingEntry XForwarding::at(XForwardingCursor* cursor) const { + // Load acquire for correctness with regards to + // accesses to the contents of the forwarded object. + return Atomic::load_acquire(entries() + *cursor); +} + +inline XForwardingEntry XForwarding::first(uintptr_t from_index, XForwardingCursor* cursor) const { + const size_t mask = _entries.length() - 1; + const size_t hash = XHash::uint32_to_uint32((uint32_t)from_index); + *cursor = hash & mask; + return at(cursor); +} + +inline XForwardingEntry XForwarding::next(XForwardingCursor* cursor) const { + const size_t mask = _entries.length() - 1; + *cursor = (*cursor + 1) & mask; + return at(cursor); +} + +inline XForwardingEntry XForwarding::find(uintptr_t from_index, XForwardingCursor* cursor) const { + // Reading entries in the table races with the atomic CAS done for + // insertion into the table. This is safe because each entry is at + // most updated once (from zero to something else). + XForwardingEntry entry = first(from_index, cursor); + while (entry.populated()) { + if (entry.from_index() == from_index) { + // Match found, return matching entry + return entry; + } + + entry = next(cursor); + } + + // Match not found, return empty entry + return entry; +} + +inline uintptr_t XForwarding::insert(uintptr_t from_index, uintptr_t to_offset, XForwardingCursor* cursor) { + const XForwardingEntry new_entry(from_index, to_offset); + const XForwardingEntry old_entry; // Empty + + // Make sure that object copy is finished + // before forwarding table installation + OrderAccess::release(); + + for (;;) { + const XForwardingEntry prev_entry = Atomic::cmpxchg(entries() + *cursor, old_entry, new_entry, memory_order_relaxed); + if (!prev_entry.populated()) { + // Success + return to_offset; + } + + // Find next empty or matching entry + XForwardingEntry entry = at(cursor); + while (entry.populated()) { + if (entry.from_index() == from_index) { + // Match found, return already inserted address + return entry.to_offset(); + } + + entry = next(cursor); + } + } +} + +#endif // SHARE_GC_X_XFORWARDING_INLINE_HPP diff --git a/src/hotspot/share/gc/x/xForwardingAllocator.cpp b/src/hotspot/share/gc/x/xForwardingAllocator.cpp new file mode 100644 index 0000000000000..fddff50e88f6f --- /dev/null +++ b/src/hotspot/share/gc/x/xForwardingAllocator.cpp @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/x/xForwardingAllocator.hpp" +#include "memory/allocation.inline.hpp" + +XForwardingAllocator::XForwardingAllocator() : + _start(NULL), + _end(NULL), + _top(NULL) {} + +XForwardingAllocator::~XForwardingAllocator() { + FREE_C_HEAP_ARRAY(char, _start); +} + +void XForwardingAllocator::reset(size_t size) { + _start = _top = REALLOC_C_HEAP_ARRAY(char, _start, size, mtGC); + _end = _start + size; +} diff --git a/src/hotspot/share/gc/x/xForwardingAllocator.hpp b/src/hotspot/share/gc/x/xForwardingAllocator.hpp new file mode 100644 index 0000000000000..75495944e8ae3 --- /dev/null +++ b/src/hotspot/share/gc/x/xForwardingAllocator.hpp @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XFORWARDINGALLOCATOR_HPP +#define SHARE_GC_X_XFORWARDINGALLOCATOR_HPP + +#include "utilities/globalDefinitions.hpp" + +class XForwardingAllocator { +private: + char* _start; + char* _end; + char* _top; + +public: + XForwardingAllocator(); + ~XForwardingAllocator(); + + void reset(size_t size); + size_t size() const; + bool is_full() const; + + void* alloc(size_t size); +}; + +#endif // SHARE_GC_X_XFORWARDINGALLOCATOR_HPP diff --git a/src/hotspot/share/gc/x/xForwardingAllocator.inline.hpp b/src/hotspot/share/gc/x/xForwardingAllocator.inline.hpp new file mode 100644 index 0000000000000..e70986f52062e --- /dev/null +++ b/src/hotspot/share/gc/x/xForwardingAllocator.inline.hpp @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XFORWARDINGALLOCATOR_INLINE_HPP +#define SHARE_GC_X_XFORWARDINGALLOCATOR_INLINE_HPP + +#include "gc/x/xForwardingAllocator.hpp" + +#include "runtime/atomic.hpp" +#include "utilities/debug.hpp" + +inline size_t XForwardingAllocator::size() const { + return _end - _start; +} + +inline bool XForwardingAllocator::is_full() const { + return _top == _end; +} + +inline void* XForwardingAllocator::alloc(size_t size) { + char* const addr = Atomic::fetch_then_add(&_top, size); + assert(addr + size <= _end, "Allocation should never fail"); + return addr; +} + +#endif // SHARE_GC_X_XFORWARDINGALLOCATOR_INLINE_HPP diff --git a/src/hotspot/share/gc/x/xForwardingEntry.hpp b/src/hotspot/share/gc/x/xForwardingEntry.hpp new file mode 100644 index 0000000000000..3f8846abbaa2b --- /dev/null +++ b/src/hotspot/share/gc/x/xForwardingEntry.hpp @@ -0,0 +1,102 @@ +/* + * Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XFORWARDINGENTRY_HPP +#define SHARE_GC_X_XFORWARDINGENTRY_HPP + +#include "gc/x/xBitField.hpp" +#include "memory/allocation.hpp" +#include "metaprogramming/primitiveConversions.hpp" + +#include + +class VMStructs; + +// +// Forwarding entry layout +// ----------------------- +// +// 6 4 4 +// 3 6 5 1 0 +// +--------------------+--------------------------------------------------+-+ +// |11111111 11111111 11|111111 11111111 11111111 11111111 11111111 1111111|1| +// +--------------------+--------------------------------------------------+-+ +// | | | +// | | 0-0 Populated Flag (1-bits) * +// | | +// | * 45-1 To Object Offset (45-bits) +// | +// * 63-46 From Object Index (18-bits) +// + +class XForwardingEntry { + friend struct PrimitiveConversions::Translate; + friend class ::VMStructs; + +private: + typedef XBitField field_populated; + typedef XBitField field_to_offset; + typedef XBitField field_from_index; + + uint64_t _entry; + +public: + XForwardingEntry() : + _entry(0) {} + + XForwardingEntry(size_t from_index, size_t to_offset) : + _entry(field_populated::encode(true) | + field_to_offset::encode(to_offset) | + field_from_index::encode(from_index)) {} + + bool populated() const { + return field_populated::decode(_entry); + } + + size_t to_offset() const { + return field_to_offset::decode(_entry); + } + + size_t from_index() const { + return field_from_index::decode(_entry); + } +}; + +// Needed to allow atomic operations on XForwardingEntry +template <> +struct PrimitiveConversions::Translate : public std::true_type { + typedef XForwardingEntry Value; + typedef uint64_t Decayed; + + static Decayed decay(Value v) { + return v._entry; + } + + static Value recover(Decayed d) { + XForwardingEntry entry; + entry._entry = d; + return entry; + } +}; + +#endif // SHARE_GC_X_XFORWARDINGENTRY_HPP diff --git a/src/hotspot/share/gc/x/xForwardingTable.hpp b/src/hotspot/share/gc/x/xForwardingTable.hpp new file mode 100644 index 0000000000000..1f110292be516 --- /dev/null +++ b/src/hotspot/share/gc/x/xForwardingTable.hpp @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XFORWARDINGTABLE_HPP +#define SHARE_GC_X_XFORWARDINGTABLE_HPP + +#include "gc/x/xGranuleMap.hpp" + +class VMStructs; +class XForwarding; + +class XForwardingTable { + friend class ::VMStructs; + +private: + XGranuleMap _map; + +public: + XForwardingTable(); + + XForwarding* get(uintptr_t addr) const; + + void insert(XForwarding* forwarding); + void remove(XForwarding* forwarding); +}; + +#endif // SHARE_GC_X_XFORWARDINGTABLE_HPP diff --git a/src/hotspot/share/gc/x/xForwardingTable.inline.hpp b/src/hotspot/share/gc/x/xForwardingTable.inline.hpp new file mode 100644 index 0000000000000..3ea30d383ece0 --- /dev/null +++ b/src/hotspot/share/gc/x/xForwardingTable.inline.hpp @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XFORWARDINGTABLE_INLINE_HPP +#define SHARE_GC_X_XFORWARDINGTABLE_INLINE_HPP + +#include "gc/x/xForwardingTable.hpp" + +#include "gc/x/xAddress.inline.hpp" +#include "gc/x/xForwarding.inline.hpp" +#include "gc/x/xGlobals.hpp" +#include "gc/x/xGranuleMap.inline.hpp" +#include "utilities/debug.hpp" + +inline XForwardingTable::XForwardingTable() : + _map(XAddressOffsetMax) {} + +inline XForwarding* XForwardingTable::get(uintptr_t addr) const { + assert(!XAddress::is_null(addr), "Invalid address"); + return _map.get(XAddress::offset(addr)); +} + +inline void XForwardingTable::insert(XForwarding* forwarding) { + const uintptr_t offset = forwarding->start(); + const size_t size = forwarding->size(); + + assert(_map.get(offset) == NULL, "Invalid entry"); + _map.put(offset, size, forwarding); +} + +inline void XForwardingTable::remove(XForwarding* forwarding) { + const uintptr_t offset = forwarding->start(); + const size_t size = forwarding->size(); + + assert(_map.get(offset) == forwarding, "Invalid entry"); + _map.put(offset, size, NULL); +} + +#endif // SHARE_GC_X_XFORWARDINGTABLE_INLINE_HPP diff --git a/src/hotspot/share/gc/x/xFuture.hpp b/src/hotspot/share/gc/x/xFuture.hpp new file mode 100644 index 0000000000000..931f4b58f123c --- /dev/null +++ b/src/hotspot/share/gc/x/xFuture.hpp @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XFUTURE_HPP +#define SHARE_GC_X_XFUTURE_HPP + +#include "memory/allocation.hpp" +#include "runtime/semaphore.hpp" + +template +class XFuture { +private: + Semaphore _sema; + T _value; + +public: + XFuture(); + + void set(T value); + T get(); +}; + +#endif // SHARE_GC_X_XFUTURE_HPP diff --git a/src/hotspot/share/gc/x/xFuture.inline.hpp b/src/hotspot/share/gc/x/xFuture.inline.hpp new file mode 100644 index 0000000000000..d3dba3b7151d7 --- /dev/null +++ b/src/hotspot/share/gc/x/xFuture.inline.hpp @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XFUTURE_INLINE_HPP +#define SHARE_GC_X_XFUTURE_INLINE_HPP + +#include "gc/x/xFuture.hpp" + +#include "runtime/javaThread.hpp" +#include "runtime/semaphore.inline.hpp" + +template +inline XFuture::XFuture() : + _value() {} + +template +inline void XFuture::set(T value) { + // Set value + _value = value; + + // Notify waiter + _sema.signal(); +} + +template +inline T XFuture::get() { + // Wait for notification + Thread* const thread = Thread::current(); + if (thread->is_Java_thread()) { + _sema.wait_with_safepoint_check(JavaThread::cast(thread)); + } else { + _sema.wait(); + } + + // Return value + return _value; +} + +#endif // SHARE_GC_X_XFUTURE_INLINE_HPP diff --git a/src/hotspot/share/gc/x/xGlobals.cpp b/src/hotspot/share/gc/x/xGlobals.cpp new file mode 100644 index 0000000000000..b247565bc011d --- /dev/null +++ b/src/hotspot/share/gc/x/xGlobals.cpp @@ -0,0 +1,79 @@ +/* + * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/x/xGlobals.hpp" + +uint32_t XGlobalPhase = XPhaseRelocate; +uint32_t XGlobalSeqNum = 1; + +size_t XPageSizeMediumShift; +size_t XPageSizeMedium; + +size_t XObjectSizeLimitMedium; + +const int& XObjectAlignmentSmallShift = LogMinObjAlignmentInBytes; +int XObjectAlignmentMediumShift; + +const int& XObjectAlignmentSmall = MinObjAlignmentInBytes; +int XObjectAlignmentMedium; + +uintptr_t XAddressGoodMask; +uintptr_t XAddressBadMask; +uintptr_t XAddressWeakBadMask; + +static uint32_t* XAddressCalculateBadMaskHighOrderBitsAddr() { + const uintptr_t addr = reinterpret_cast(&XAddressBadMask); + return reinterpret_cast(addr + XAddressBadMaskHighOrderBitsOffset); +} + +uint32_t* XAddressBadMaskHighOrderBitsAddr = XAddressCalculateBadMaskHighOrderBitsAddr(); + +size_t XAddressOffsetBits; +uintptr_t XAddressOffsetMask; +size_t XAddressOffsetMax; + +size_t XAddressMetadataShift; +uintptr_t XAddressMetadataMask; + +uintptr_t XAddressMetadataMarked; +uintptr_t XAddressMetadataMarked0; +uintptr_t XAddressMetadataMarked1; +uintptr_t XAddressMetadataRemapped; +uintptr_t XAddressMetadataFinalizable; + +const char* XGlobalPhaseToString() { + switch (XGlobalPhase) { + case XPhaseMark: + return "Mark"; + + case XPhaseMarkCompleted: + return "MarkCompleted"; + + case XPhaseRelocate: + return "Relocate"; + + default: + return "Unknown"; + } +} diff --git a/src/hotspot/share/gc/x/xGlobals.hpp b/src/hotspot/share/gc/x/xGlobals.hpp new file mode 100644 index 0000000000000..662a502a79f86 --- /dev/null +++ b/src/hotspot/share/gc/x/xGlobals.hpp @@ -0,0 +1,158 @@ +/* + * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XGLOBALS_HPP +#define SHARE_GC_X_XGLOBALS_HPP + +#include "utilities/globalDefinitions.hpp" +#include "utilities/macros.hpp" +#include CPU_HEADER(gc/x/xGlobals) + +// Collector name +const char* const XName = "The Z Garbage Collector"; + +// Global phase state +extern uint32_t XGlobalPhase; +const uint32_t XPhaseMark = 0; +const uint32_t XPhaseMarkCompleted = 1; +const uint32_t XPhaseRelocate = 2; +const char* XGlobalPhaseToString(); + +// Global sequence number +extern uint32_t XGlobalSeqNum; + +// Granule shift/size +const size_t XGranuleSizeShift = 21; // 2MB +const size_t XGranuleSize = (size_t)1 << XGranuleSizeShift; + +// Number of heap views +const size_t XHeapViews = XPlatformHeapViews; + +// Virtual memory to physical memory ratio +const size_t XVirtualToPhysicalRatio = 16; // 16:1 + +// Page types +const uint8_t XPageTypeSmall = 0; +const uint8_t XPageTypeMedium = 1; +const uint8_t XPageTypeLarge = 2; + +// Page size shifts +const size_t XPageSizeSmallShift = XGranuleSizeShift; +extern size_t XPageSizeMediumShift; + +// Page sizes +const size_t XPageSizeSmall = (size_t)1 << XPageSizeSmallShift; +extern size_t XPageSizeMedium; + +// Object size limits +const size_t XObjectSizeLimitSmall = XPageSizeSmall / 8; // 12.5% max waste +extern size_t XObjectSizeLimitMedium; + +// Object alignment shifts +extern const int& XObjectAlignmentSmallShift; +extern int XObjectAlignmentMediumShift; +const int XObjectAlignmentLargeShift = XGranuleSizeShift; + +// Object alignments +extern const int& XObjectAlignmentSmall; +extern int XObjectAlignmentMedium; +const int XObjectAlignmentLarge = 1 << XObjectAlignmentLargeShift; + +// +// Good/Bad mask states +// -------------------- +// +// GoodMask BadMask WeakGoodMask WeakBadMask +// -------------------------------------------------------------- +// Marked0 001 110 101 010 +// Marked1 010 101 110 001 +// Remapped 100 011 100 011 +// + +// Good/bad masks +extern uintptr_t XAddressGoodMask; +extern uintptr_t XAddressBadMask; +extern uintptr_t XAddressWeakBadMask; + +// The bad mask is 64 bit. Its high order 32 bits contain all possible value combinations +// that this mask will have. Therefore, the memory where the 32 high order bits are stored, +// can be used as a 32 bit GC epoch counter, that has a different bit pattern every time +// the bad mask is flipped. This provides a pointer to said 32 bits. +extern uint32_t* XAddressBadMaskHighOrderBitsAddr; +const int XAddressBadMaskHighOrderBitsOffset = LITTLE_ENDIAN_ONLY(4) BIG_ENDIAN_ONLY(0); + +// Pointer part of address +extern size_t XAddressOffsetBits; +const size_t XAddressOffsetShift = 0; +extern uintptr_t XAddressOffsetMask; +extern size_t XAddressOffsetMax; + +// Metadata part of address +const size_t XAddressMetadataBits = 4; +extern size_t XAddressMetadataShift; +extern uintptr_t XAddressMetadataMask; + +// Metadata types +extern uintptr_t XAddressMetadataMarked; +extern uintptr_t XAddressMetadataMarked0; +extern uintptr_t XAddressMetadataMarked1; +extern uintptr_t XAddressMetadataRemapped; +extern uintptr_t XAddressMetadataFinalizable; + +// Cache line size +const size_t XCacheLineSize = XPlatformCacheLineSize; +#define XCACHE_ALIGNED ATTRIBUTE_ALIGNED(XCacheLineSize) + +// Mark stack space +extern uintptr_t XMarkStackSpaceStart; +const size_t XMarkStackSpaceExpandSize = (size_t)1 << 25; // 32M + +// Mark stack and magazine sizes +const size_t XMarkStackSizeShift = 11; // 2K +const size_t XMarkStackSize = (size_t)1 << XMarkStackSizeShift; +const size_t XMarkStackHeaderSize = (size_t)1 << 4; // 16B +const size_t XMarkStackSlots = (XMarkStackSize - XMarkStackHeaderSize) / sizeof(uintptr_t); +const size_t XMarkStackMagazineSize = (size_t)1 << 15; // 32K +const size_t XMarkStackMagazineSlots = (XMarkStackMagazineSize / XMarkStackSize) - 1; + +// Mark stripe size +const size_t XMarkStripeShift = XGranuleSizeShift; + +// Max number of mark stripes +const size_t XMarkStripesMax = 16; // Must be a power of two + +// Mark cache size +const size_t XMarkCacheSize = 1024; // Must be a power of two + +// Partial array minimum size +const size_t XMarkPartialArrayMinSizeShift = 12; // 4K +const size_t XMarkPartialArrayMinSize = (size_t)1 << XMarkPartialArrayMinSizeShift; + +// Max number of proactive/terminate flush attempts +const size_t XMarkProactiveFlushMax = 10; +const size_t XMarkTerminateFlushMax = 3; + +// Try complete mark timeout +const uint64_t XMarkCompleteTimeout = 200; // us + +#endif // SHARE_GC_X_XGLOBALS_HPP diff --git a/src/hotspot/share/gc/x/xGranuleMap.hpp b/src/hotspot/share/gc/x/xGranuleMap.hpp new file mode 100644 index 0000000000000..a9447e1469c6c --- /dev/null +++ b/src/hotspot/share/gc/x/xGranuleMap.hpp @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XGRANULEMAP_HPP +#define SHARE_GC_X_XGRANULEMAP_HPP + +#include "gc/x/xArray.hpp" +#include "memory/allocation.hpp" + +class VMStructs; + +template +class XGranuleMap { + friend class ::VMStructs; + template friend class XGranuleMapIterator; + +private: + const size_t _size; + T* const _map; + + size_t index_for_offset(uintptr_t offset) const; + +public: + XGranuleMap(size_t max_offset); + ~XGranuleMap(); + + T get(uintptr_t offset) const; + void put(uintptr_t offset, T value); + void put(uintptr_t offset, size_t size, T value); + + T get_acquire(uintptr_t offset) const; + void release_put(uintptr_t offset, T value); +}; + +template +class XGranuleMapIterator : public XArrayIteratorImpl { +public: + XGranuleMapIterator(const XGranuleMap* granule_map); +}; + +#endif // SHARE_GC_X_XGRANULEMAP_HPP diff --git a/src/hotspot/share/gc/x/xGranuleMap.inline.hpp b/src/hotspot/share/gc/x/xGranuleMap.inline.hpp new file mode 100644 index 0000000000000..95ef5ee2b2d5b --- /dev/null +++ b/src/hotspot/share/gc/x/xGranuleMap.inline.hpp @@ -0,0 +1,94 @@ +/* + * Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XGRANULEMAP_INLINE_HPP +#define SHARE_GC_X_XGRANULEMAP_INLINE_HPP + +#include "gc/x/xGranuleMap.hpp" + +#include "gc/x/xArray.inline.hpp" +#include "gc/x/xGlobals.hpp" +#include "memory/allocation.inline.hpp" +#include "runtime/atomic.hpp" +#include "utilities/align.hpp" +#include "utilities/debug.hpp" + +template +inline XGranuleMap::XGranuleMap(size_t max_offset) : + _size(max_offset >> XGranuleSizeShift), + _map(MmapArrayAllocator::allocate(_size, mtGC)) { + assert(is_aligned(max_offset, XGranuleSize), "Misaligned"); +} + +template +inline XGranuleMap::~XGranuleMap() { + MmapArrayAllocator::free(_map, _size); +} + +template +inline size_t XGranuleMap::index_for_offset(uintptr_t offset) const { + const size_t index = offset >> XGranuleSizeShift; + assert(index < _size, "Invalid index"); + return index; +} + +template +inline T XGranuleMap::get(uintptr_t offset) const { + const size_t index = index_for_offset(offset); + return _map[index]; +} + +template +inline void XGranuleMap::put(uintptr_t offset, T value) { + const size_t index = index_for_offset(offset); + _map[index] = value; +} + +template +inline void XGranuleMap::put(uintptr_t offset, size_t size, T value) { + assert(is_aligned(size, XGranuleSize), "Misaligned"); + + const size_t start_index = index_for_offset(offset); + const size_t end_index = start_index + (size >> XGranuleSizeShift); + for (size_t index = start_index; index < end_index; index++) { + _map[index] = value; + } +} + +template +inline T XGranuleMap::get_acquire(uintptr_t offset) const { + const size_t index = index_for_offset(offset); + return Atomic::load_acquire(_map + index); +} + +template +inline void XGranuleMap::release_put(uintptr_t offset, T value) { + const size_t index = index_for_offset(offset); + Atomic::release_store(_map + index, value); +} + +template +inline XGranuleMapIterator::XGranuleMapIterator(const XGranuleMap* granule_map) : + XArrayIteratorImpl(granule_map->_map, granule_map->_size) {} + +#endif // SHARE_GC_X_XGRANULEMAP_INLINE_HPP diff --git a/src/hotspot/share/gc/x/xHash.hpp b/src/hotspot/share/gc/x/xHash.hpp new file mode 100644 index 0000000000000..253f4d231c1c3 --- /dev/null +++ b/src/hotspot/share/gc/x/xHash.hpp @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XHASH_HPP +#define SHARE_GC_X_XHASH_HPP + +#include "memory/allStatic.hpp" +#include "utilities/globalDefinitions.hpp" + +class XHash : public AllStatic { +public: + static uint32_t uint32_to_uint32(uint32_t key); + static uint32_t address_to_uint32(uintptr_t key); +}; + +#endif // SHARE_GC_X_XHASH_HPP diff --git a/src/hotspot/share/gc/x/xHash.inline.hpp b/src/hotspot/share/gc/x/xHash.inline.hpp new file mode 100644 index 0000000000000..5ff5f540821e0 --- /dev/null +++ b/src/hotspot/share/gc/x/xHash.inline.hpp @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * This file is available under and governed by the GNU General Public + * License version 2 only, as published by the Free Software Foundation. + * However, the following notice accompanied the original version of this + * file: + * + * (C) 2009 by Remo Dentato (rdentato@gmail.com) + * + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * http://opensource.org/licenses/bsd-license.php + */ + +#ifndef SHARE_GC_X_XHASH_INLINE_HPP +#define SHARE_GC_X_XHASH_INLINE_HPP + +#include "gc/x/xHash.hpp" + +#include "gc/x/xAddress.inline.hpp" + +inline uint32_t XHash::uint32_to_uint32(uint32_t key) { + key = ~key + (key << 15); + key = key ^ (key >> 12); + key = key + (key << 2); + key = key ^ (key >> 4); + key = key * 2057; + key = key ^ (key >> 16); + return key; +} + +inline uint32_t XHash::address_to_uint32(uintptr_t key) { + return uint32_to_uint32((uint32_t)(key >> 3)); +} + +#endif // SHARE_GC_X_XHASH_INLINE_HPP diff --git a/src/hotspot/share/gc/x/xHeap.cpp b/src/hotspot/share/gc/x/xHeap.cpp new file mode 100644 index 0000000000000..21c4d447c9a85 --- /dev/null +++ b/src/hotspot/share/gc/x/xHeap.cpp @@ -0,0 +1,536 @@ +/* + * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "classfile/classLoaderDataGraph.hpp" +#include "gc/shared/gc_globals.hpp" +#include "gc/shared/locationPrinter.hpp" +#include "gc/shared/tlab_globals.hpp" +#include "gc/x/xAddress.inline.hpp" +#include "gc/x/xArray.inline.hpp" +#include "gc/x/xGlobals.hpp" +#include "gc/x/xHeap.inline.hpp" +#include "gc/x/xHeapIterator.hpp" +#include "gc/x/xHeuristics.hpp" +#include "gc/x/xMark.inline.hpp" +#include "gc/x/xPage.inline.hpp" +#include "gc/x/xPageTable.inline.hpp" +#include "gc/x/xRelocationSet.inline.hpp" +#include "gc/x/xRelocationSetSelector.inline.hpp" +#include "gc/x/xResurrection.hpp" +#include "gc/x/xStat.hpp" +#include "gc/x/xThread.inline.hpp" +#include "gc/x/xVerify.hpp" +#include "gc/x/xWorkers.hpp" +#include "logging/log.hpp" +#include "memory/iterator.hpp" +#include "memory/metaspaceUtils.hpp" +#include "memory/resourceArea.hpp" +#include "prims/jvmtiTagMap.hpp" +#include "runtime/handshake.hpp" +#include "runtime/javaThread.hpp" +#include "runtime/safepoint.hpp" +#include "utilities/debug.hpp" + +static const XStatCounter XCounterUndoPageAllocation("Memory", "Undo Page Allocation", XStatUnitOpsPerSecond); +static const XStatCounter XCounterOutOfMemory("Memory", "Out Of Memory", XStatUnitOpsPerSecond); + +XHeap* XHeap::_heap = NULL; + +XHeap::XHeap() : + _workers(), + _object_allocator(), + _page_allocator(&_workers, MinHeapSize, InitialHeapSize, MaxHeapSize), + _page_table(), + _forwarding_table(), + _mark(&_workers, &_page_table), + _reference_processor(&_workers), + _weak_roots_processor(&_workers), + _relocate(&_workers), + _relocation_set(&_workers), + _unload(&_workers), + _serviceability(min_capacity(), max_capacity()) { + // Install global heap instance + assert(_heap == NULL, "Already initialized"); + _heap = this; + + // Update statistics + XStatHeap::set_at_initialize(_page_allocator.stats()); +} + +bool XHeap::is_initialized() const { + return _page_allocator.is_initialized() && _mark.is_initialized(); +} + +size_t XHeap::min_capacity() const { + return _page_allocator.min_capacity(); +} + +size_t XHeap::max_capacity() const { + return _page_allocator.max_capacity(); +} + +size_t XHeap::soft_max_capacity() const { + return _page_allocator.soft_max_capacity(); +} + +size_t XHeap::capacity() const { + return _page_allocator.capacity(); +} + +size_t XHeap::used() const { + return _page_allocator.used(); +} + +size_t XHeap::unused() const { + return _page_allocator.unused(); +} + +size_t XHeap::tlab_capacity() const { + return capacity(); +} + +size_t XHeap::tlab_used() const { + return _object_allocator.used(); +} + +size_t XHeap::max_tlab_size() const { + return XObjectSizeLimitSmall; +} + +size_t XHeap::unsafe_max_tlab_alloc() const { + size_t size = _object_allocator.remaining(); + + if (size < MinTLABSize) { + // The remaining space in the allocator is not enough to + // fit the smallest possible TLAB. This means that the next + // TLAB allocation will force the allocator to get a new + // backing page anyway, which in turn means that we can then + // fit the largest possible TLAB. + size = max_tlab_size(); + } + + return MIN2(size, max_tlab_size()); +} + +bool XHeap::is_in(uintptr_t addr) const { + // An address is considered to be "in the heap" if it points into + // the allocated part of a page, regardless of which heap view is + // used. Note that an address with the finalizable metadata bit set + // is not pointing into a heap view, and therefore not considered + // to be "in the heap". + + if (XAddress::is_in(addr)) { + const XPage* const page = _page_table.get(addr); + if (page != NULL) { + return page->is_in(addr); + } + } + + return false; +} + +uint XHeap::active_workers() const { + return _workers.active_workers(); +} + +void XHeap::set_active_workers(uint nworkers) { + _workers.set_active_workers(nworkers); +} + +void XHeap::threads_do(ThreadClosure* tc) const { + _page_allocator.threads_do(tc); + _workers.threads_do(tc); +} + +void XHeap::out_of_memory() { + ResourceMark rm; + + XStatInc(XCounterOutOfMemory); + log_info(gc)("Out Of Memory (%s)", Thread::current()->name()); +} + +XPage* XHeap::alloc_page(uint8_t type, size_t size, XAllocationFlags flags) { + XPage* const page = _page_allocator.alloc_page(type, size, flags); + if (page != NULL) { + // Insert page table entry + _page_table.insert(page); + } + + return page; +} + +void XHeap::undo_alloc_page(XPage* page) { + assert(page->is_allocating(), "Invalid page state"); + + XStatInc(XCounterUndoPageAllocation); + log_trace(gc)("Undo page allocation, thread: " PTR_FORMAT " (%s), page: " PTR_FORMAT ", size: " SIZE_FORMAT, + XThread::id(), XThread::name(), p2i(page), page->size()); + + free_page(page, false /* reclaimed */); +} + +void XHeap::free_page(XPage* page, bool reclaimed) { + // Remove page table entry + _page_table.remove(page); + + // Free page + _page_allocator.free_page(page, reclaimed); +} + +void XHeap::free_pages(const XArray* pages, bool reclaimed) { + // Remove page table entries + XArrayIterator iter(pages); + for (XPage* page; iter.next(&page);) { + _page_table.remove(page); + } + + // Free pages + _page_allocator.free_pages(pages, reclaimed); +} + +void XHeap::flip_to_marked() { + XVerifyViewsFlip flip(&_page_allocator); + XAddress::flip_to_marked(); +} + +void XHeap::flip_to_remapped() { + XVerifyViewsFlip flip(&_page_allocator); + XAddress::flip_to_remapped(); +} + +void XHeap::mark_start() { + assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint"); + + // Verification + ClassLoaderDataGraph::verify_claimed_marks_cleared(ClassLoaderData::_claim_strong); + + if (XHeap::heap()->has_alloc_stalled()) { + // If there are stalled allocations, ensure that regardless of the + // cause of the GC, we have to clear soft references, as we are just + // about to increment the sequence number, and all previous allocations + // will throw if not presented with enough memory. + XHeap::heap()->set_soft_reference_policy(true); + } + + // Flip address view + flip_to_marked(); + + // Retire allocating pages + _object_allocator.retire_pages(); + + // Reset allocated/reclaimed/used statistics + _page_allocator.reset_statistics(); + + // Reset encountered/dropped/enqueued statistics + _reference_processor.reset_statistics(); + + // Enter mark phase + XGlobalPhase = XPhaseMark; + + // Reset marking information and mark roots + _mark.start(); + + // Update statistics + XStatHeap::set_at_mark_start(_page_allocator.stats()); +} + +void XHeap::mark(bool initial) { + _mark.mark(initial); +} + +void XHeap::mark_flush_and_free(Thread* thread) { + _mark.flush_and_free(thread); +} + +bool XHeap::mark_end() { + assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint"); + + // Try end marking + if (!_mark.end()) { + // Marking not completed, continue concurrent mark + return false; + } + + // Enter mark completed phase + XGlobalPhase = XPhaseMarkCompleted; + + // Verify after mark + XVerify::after_mark(); + + // Update statistics + XStatHeap::set_at_mark_end(_page_allocator.stats()); + + // Block resurrection of weak/phantom references + XResurrection::block(); + + // Prepare to unload stale metadata and nmethods + _unload.prepare(); + + // Notify JVMTI that some tagmap entry objects may have died. + JvmtiTagMap::set_needs_cleaning(); + + return true; +} + +void XHeap::mark_free() { + _mark.free(); +} + +void XHeap::keep_alive(oop obj) { + XBarrier::keep_alive_barrier_on_oop(obj); +} + +void XHeap::set_soft_reference_policy(bool clear) { + _reference_processor.set_soft_reference_policy(clear); +} + +class XRendezvousClosure : public HandshakeClosure { +public: + XRendezvousClosure() : + HandshakeClosure("XRendezvous") {} + + void do_thread(Thread* thread) {} +}; + +void XHeap::process_non_strong_references() { + // Process Soft/Weak/Final/PhantomReferences + _reference_processor.process_references(); + + // Process weak roots + _weak_roots_processor.process_weak_roots(); + + // Unlink stale metadata and nmethods + _unload.unlink(); + + // Perform a handshake. This is needed 1) to make sure that stale + // metadata and nmethods are no longer observable. And 2), to + // prevent the race where a mutator first loads an oop, which is + // logically null but not yet cleared. Then this oop gets cleared + // by the reference processor and resurrection is unblocked. At + // this point the mutator could see the unblocked state and pass + // this invalid oop through the normal barrier path, which would + // incorrectly try to mark the oop. + XRendezvousClosure cl; + Handshake::execute(&cl); + + // Unblock resurrection of weak/phantom references + XResurrection::unblock(); + + // Purge stale metadata and nmethods that were unlinked + _unload.purge(); + + // Enqueue Soft/Weak/Final/PhantomReferences. Note that this + // must be done after unblocking resurrection. Otherwise the + // Finalizer thread could call Reference.get() on the Finalizers + // that were just enqueued, which would incorrectly return null + // during the resurrection block window, since such referents + // are only Finalizable marked. + _reference_processor.enqueue_references(); + + // Clear old markings claim bits. + // Note: Clearing _claim_strong also clears _claim_finalizable. + ClassLoaderDataGraph::clear_claimed_marks(ClassLoaderData::_claim_strong); +} + +void XHeap::free_empty_pages(XRelocationSetSelector* selector, int bulk) { + // Freeing empty pages in bulk is an optimization to avoid grabbing + // the page allocator lock, and trying to satisfy stalled allocations + // too frequently. + if (selector->should_free_empty_pages(bulk)) { + free_pages(selector->empty_pages(), true /* reclaimed */); + selector->clear_empty_pages(); + } +} + +void XHeap::select_relocation_set() { + // Do not allow pages to be deleted + _page_allocator.enable_deferred_delete(); + + // Register relocatable pages with selector + XRelocationSetSelector selector; + XPageTableIterator pt_iter(&_page_table); + for (XPage* page; pt_iter.next(&page);) { + if (!page->is_relocatable()) { + // Not relocatable, don't register + continue; + } + + if (page->is_marked()) { + // Register live page + selector.register_live_page(page); + } else { + // Register empty page + selector.register_empty_page(page); + + // Reclaim empty pages in bulk + free_empty_pages(&selector, 64 /* bulk */); + } + } + + // Reclaim remaining empty pages + free_empty_pages(&selector, 0 /* bulk */); + + // Allow pages to be deleted + _page_allocator.disable_deferred_delete(); + + // Select relocation set + selector.select(); + + // Install relocation set + _relocation_set.install(&selector); + + // Setup forwarding table + XRelocationSetIterator rs_iter(&_relocation_set); + for (XForwarding* forwarding; rs_iter.next(&forwarding);) { + _forwarding_table.insert(forwarding); + } + + // Update statistics + XStatRelocation::set_at_select_relocation_set(selector.stats()); + XStatHeap::set_at_select_relocation_set(selector.stats()); +} + +void XHeap::reset_relocation_set() { + // Reset forwarding table + XRelocationSetIterator iter(&_relocation_set); + for (XForwarding* forwarding; iter.next(&forwarding);) { + _forwarding_table.remove(forwarding); + } + + // Reset relocation set + _relocation_set.reset(); +} + +void XHeap::relocate_start() { + assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint"); + + // Finish unloading stale metadata and nmethods + _unload.finish(); + + // Flip address view + flip_to_remapped(); + + // Enter relocate phase + XGlobalPhase = XPhaseRelocate; + + // Update statistics + XStatHeap::set_at_relocate_start(_page_allocator.stats()); +} + +void XHeap::relocate() { + // Relocate relocation set + _relocate.relocate(&_relocation_set); + + // Update statistics + XStatHeap::set_at_relocate_end(_page_allocator.stats(), _object_allocator.relocated()); +} + +bool XHeap::is_allocating(uintptr_t addr) const { + const XPage* const page = _page_table.get(addr); + return page->is_allocating(); +} + +void XHeap::object_iterate(ObjectClosure* cl, bool visit_weaks) { + assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint"); + XHeapIterator iter(1 /* nworkers */, visit_weaks); + iter.object_iterate(cl, 0 /* worker_id */); +} + +ParallelObjectIteratorImpl* XHeap::parallel_object_iterator(uint nworkers, bool visit_weaks) { + assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint"); + return new XHeapIterator(nworkers, visit_weaks); +} + +void XHeap::pages_do(XPageClosure* cl) { + XPageTableIterator iter(&_page_table); + for (XPage* page; iter.next(&page);) { + cl->do_page(page); + } + _page_allocator.pages_do(cl); +} + +void XHeap::serviceability_initialize() { + _serviceability.initialize(); +} + +GCMemoryManager* XHeap::serviceability_cycle_memory_manager() { + return _serviceability.cycle_memory_manager(); +} + +GCMemoryManager* XHeap::serviceability_pause_memory_manager() { + return _serviceability.pause_memory_manager(); +} + +MemoryPool* XHeap::serviceability_memory_pool() { + return _serviceability.memory_pool(); +} + +XServiceabilityCounters* XHeap::serviceability_counters() { + return _serviceability.counters(); +} + +void XHeap::print_on(outputStream* st) const { + st->print_cr(" ZHeap used " SIZE_FORMAT "M, capacity " SIZE_FORMAT "M, max capacity " SIZE_FORMAT "M", + used() / M, + capacity() / M, + max_capacity() / M); + MetaspaceUtils::print_on(st); +} + +void XHeap::print_extended_on(outputStream* st) const { + print_on(st); + st->cr(); + + // Do not allow pages to be deleted + _page_allocator.enable_deferred_delete(); + + // Print all pages + st->print_cr("ZGC Page Table:"); + XPageTableIterator iter(&_page_table); + for (XPage* page; iter.next(&page);) { + page->print_on(st); + } + + // Allow pages to be deleted + _page_allocator.disable_deferred_delete(); +} + +bool XHeap::print_location(outputStream* st, uintptr_t addr) const { + if (LocationPrinter::is_valid_obj((void*)addr)) { + st->print(PTR_FORMAT " is a %s oop: ", addr, XAddress::is_good(addr) ? "good" : "bad"); + XOop::from_address(addr)->print_on(st); + return true; + } + + return false; +} + +void XHeap::verify() { + // Heap verification can only be done between mark end and + // relocate start. This is the only window where all oop are + // good and the whole heap is in a consistent state. + guarantee(XGlobalPhase == XPhaseMarkCompleted, "Invalid phase"); + + XVerify::after_weak_processing(); +} diff --git a/src/hotspot/share/gc/x/xHeap.hpp b/src/hotspot/share/gc/x/xHeap.hpp new file mode 100644 index 0000000000000..af2c73180d91a --- /dev/null +++ b/src/hotspot/share/gc/x/xHeap.hpp @@ -0,0 +1,167 @@ +/* + * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XHEAP_HPP +#define SHARE_GC_X_XHEAP_HPP + +#include "gc/x/xAllocationFlags.hpp" +#include "gc/x/xArray.hpp" +#include "gc/x/xForwardingTable.hpp" +#include "gc/x/xMark.hpp" +#include "gc/x/xObjectAllocator.hpp" +#include "gc/x/xPageAllocator.hpp" +#include "gc/x/xPageTable.hpp" +#include "gc/x/xReferenceProcessor.hpp" +#include "gc/x/xRelocate.hpp" +#include "gc/x/xRelocationSet.hpp" +#include "gc/x/xWeakRootsProcessor.hpp" +#include "gc/x/xServiceability.hpp" +#include "gc/x/xUnload.hpp" +#include "gc/x/xWorkers.hpp" + +class ThreadClosure; +class VMStructs; +class XPage; +class XRelocationSetSelector; + +class XHeap { + friend class ::VMStructs; + +private: + static XHeap* _heap; + + XWorkers _workers; + XObjectAllocator _object_allocator; + XPageAllocator _page_allocator; + XPageTable _page_table; + XForwardingTable _forwarding_table; + XMark _mark; + XReferenceProcessor _reference_processor; + XWeakRootsProcessor _weak_roots_processor; + XRelocate _relocate; + XRelocationSet _relocation_set; + XUnload _unload; + XServiceability _serviceability; + + void flip_to_marked(); + void flip_to_remapped(); + + void free_empty_pages(XRelocationSetSelector* selector, int bulk); + + void out_of_memory(); + +public: + static XHeap* heap(); + + XHeap(); + + bool is_initialized() const; + + // Heap metrics + size_t min_capacity() const; + size_t max_capacity() const; + size_t soft_max_capacity() const; + size_t capacity() const; + size_t used() const; + size_t unused() const; + + size_t tlab_capacity() const; + size_t tlab_used() const; + size_t max_tlab_size() const; + size_t unsafe_max_tlab_alloc() const; + + bool is_in(uintptr_t addr) const; + + // Threads + uint active_workers() const; + void set_active_workers(uint nworkers); + void threads_do(ThreadClosure* tc) const; + + // Reference processing + ReferenceDiscoverer* reference_discoverer(); + void set_soft_reference_policy(bool clear); + + // Non-strong reference processing + void process_non_strong_references(); + + // Page allocation + XPage* alloc_page(uint8_t type, size_t size, XAllocationFlags flags); + void undo_alloc_page(XPage* page); + void free_page(XPage* page, bool reclaimed); + void free_pages(const XArray* pages, bool reclaimed); + + // Object allocation + uintptr_t alloc_tlab(size_t size); + uintptr_t alloc_object(size_t size); + uintptr_t alloc_object_for_relocation(size_t size); + void undo_alloc_object_for_relocation(uintptr_t addr, size_t size); + bool has_alloc_stalled() const; + void check_out_of_memory(); + + // Marking + bool is_object_live(uintptr_t addr) const; + bool is_object_strongly_live(uintptr_t addr) const; + template void mark_object(uintptr_t addr); + void mark_start(); + void mark(bool initial); + void mark_flush_and_free(Thread* thread); + bool mark_end(); + void mark_free(); + void keep_alive(oop obj); + + // Relocation set + void select_relocation_set(); + void reset_relocation_set(); + + // Relocation + void relocate_start(); + uintptr_t relocate_object(uintptr_t addr); + uintptr_t remap_object(uintptr_t addr); + void relocate(); + + // Continuations + bool is_allocating(uintptr_t addr) const; + + // Iteration + void object_iterate(ObjectClosure* cl, bool visit_weaks); + ParallelObjectIteratorImpl* parallel_object_iterator(uint nworkers, bool visit_weaks); + void pages_do(XPageClosure* cl); + + // Serviceability + void serviceability_initialize(); + GCMemoryManager* serviceability_cycle_memory_manager(); + GCMemoryManager* serviceability_pause_memory_manager(); + MemoryPool* serviceability_memory_pool(); + XServiceabilityCounters* serviceability_counters(); + + // Printing + void print_on(outputStream* st) const; + void print_extended_on(outputStream* st) const; + bool print_location(outputStream* st, uintptr_t addr) const; + + // Verification + bool is_oop(uintptr_t addr) const; + void verify(); +}; + +#endif // SHARE_GC_X_XHEAP_HPP diff --git a/src/hotspot/share/gc/x/xHeap.inline.hpp b/src/hotspot/share/gc/x/xHeap.inline.hpp new file mode 100644 index 0000000000000..5b3e06b2f4b82 --- /dev/null +++ b/src/hotspot/share/gc/x/xHeap.inline.hpp @@ -0,0 +1,127 @@ +/* + * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XHEAP_INLINE_HPP +#define SHARE_GC_X_XHEAP_INLINE_HPP + +#include "gc/x/xHeap.hpp" + +#include "gc/x/xAddress.inline.hpp" +#include "gc/x/xForwardingTable.inline.hpp" +#include "gc/x/xMark.inline.hpp" +#include "gc/x/xPage.inline.hpp" +#include "gc/x/xPageTable.inline.hpp" +#include "utilities/debug.hpp" + +inline XHeap* XHeap::heap() { + assert(_heap != NULL, "Not initialized"); + return _heap; +} + +inline ReferenceDiscoverer* XHeap::reference_discoverer() { + return &_reference_processor; +} + +inline bool XHeap::is_object_live(uintptr_t addr) const { + XPage* page = _page_table.get(addr); + return page->is_object_live(addr); +} + +inline bool XHeap::is_object_strongly_live(uintptr_t addr) const { + XPage* page = _page_table.get(addr); + return page->is_object_strongly_live(addr); +} + +template +inline void XHeap::mark_object(uintptr_t addr) { + assert(XGlobalPhase == XPhaseMark, "Mark not allowed"); + _mark.mark_object(addr); +} + +inline uintptr_t XHeap::alloc_tlab(size_t size) { + guarantee(size <= max_tlab_size(), "TLAB too large"); + return _object_allocator.alloc_object(size); +} + +inline uintptr_t XHeap::alloc_object(size_t size) { + uintptr_t addr = _object_allocator.alloc_object(size); + assert(XAddress::is_good_or_null(addr), "Bad address"); + + if (addr == 0) { + out_of_memory(); + } + + return addr; +} + +inline uintptr_t XHeap::alloc_object_for_relocation(size_t size) { + const uintptr_t addr = _object_allocator.alloc_object_for_relocation(&_page_table, size); + assert(XAddress::is_good_or_null(addr), "Bad address"); + return addr; +} + +inline void XHeap::undo_alloc_object_for_relocation(uintptr_t addr, size_t size) { + XPage* const page = _page_table.get(addr); + _object_allocator.undo_alloc_object_for_relocation(page, addr, size); +} + +inline uintptr_t XHeap::relocate_object(uintptr_t addr) { + assert(XGlobalPhase == XPhaseRelocate, "Relocate not allowed"); + + XForwarding* const forwarding = _forwarding_table.get(addr); + if (forwarding == NULL) { + // Not forwarding + return XAddress::good(addr); + } + + // Relocate object + return _relocate.relocate_object(forwarding, XAddress::good(addr)); +} + +inline uintptr_t XHeap::remap_object(uintptr_t addr) { + assert(XGlobalPhase == XPhaseMark || + XGlobalPhase == XPhaseMarkCompleted, "Forward not allowed"); + + XForwarding* const forwarding = _forwarding_table.get(addr); + if (forwarding == NULL) { + // Not forwarding + return XAddress::good(addr); + } + + // Forward object + return _relocate.forward_object(forwarding, XAddress::good(addr)); +} + +inline bool XHeap::has_alloc_stalled() const { + return _page_allocator.has_alloc_stalled(); +} + +inline void XHeap::check_out_of_memory() { + _page_allocator.check_out_of_memory(); +} + +inline bool XHeap::is_oop(uintptr_t addr) const { + return XAddress::is_good(addr) && is_object_aligned(addr) && is_in(addr); +} + +#endif // SHARE_GC_X_XHEAP_INLINE_HPP diff --git a/src/hotspot/share/gc/x/xHeapIterator.cpp b/src/hotspot/share/gc/x/xHeapIterator.cpp new file mode 100644 index 0000000000000..614f008935647 --- /dev/null +++ b/src/hotspot/share/gc/x/xHeapIterator.cpp @@ -0,0 +1,439 @@ +/* + * Copyright (c) 2017, 2022, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "classfile/classLoaderDataGraph.hpp" +#include "gc/shared/barrierSetNMethod.hpp" +#include "gc/shared/gc_globals.hpp" +#include "gc/shared/taskqueue.inline.hpp" +#include "gc/x/xAddress.inline.hpp" +#include "gc/x/xCollectedHeap.hpp" +#include "gc/x/xGlobals.hpp" +#include "gc/x/xGranuleMap.inline.hpp" +#include "gc/x/xHeapIterator.hpp" +#include "gc/x/xLock.inline.hpp" +#include "gc/x/xNMethod.hpp" +#include "gc/x/xOop.inline.hpp" +#include "memory/iterator.inline.hpp" +#include "utilities/bitMap.inline.hpp" + +class XHeapIteratorBitMap : public CHeapObj { +private: + CHeapBitMap _bitmap; + +public: + XHeapIteratorBitMap(size_t size_in_bits) : + _bitmap(size_in_bits, mtGC) {} + + bool try_set_bit(size_t index) { + return _bitmap.par_set_bit(index); + } +}; + +class XHeapIteratorContext { +private: + XHeapIterator* const _iter; + XHeapIteratorQueue* const _queue; + XHeapIteratorArrayQueue* const _array_queue; + const uint _worker_id; + XStatTimerDisable _timer_disable; + +public: + XHeapIteratorContext(XHeapIterator* iter, uint worker_id) : + _iter(iter), + _queue(_iter->_queues.queue(worker_id)), + _array_queue(_iter->_array_queues.queue(worker_id)), + _worker_id(worker_id) {} + + void mark_and_push(oop obj) const { + if (_iter->mark_object(obj)) { + _queue->push(obj); + } + } + + void push_array(const ObjArrayTask& array) const { + _array_queue->push(array); + } + + bool pop(oop& obj) const { + return _queue->pop_overflow(obj) || _queue->pop_local(obj); + } + + bool pop_array(ObjArrayTask& array) const { + return _array_queue->pop_overflow(array) || _array_queue->pop_local(array); + } + + bool steal(oop& obj) const { + return _iter->_queues.steal(_worker_id, obj); + } + + bool steal_array(ObjArrayTask& array) const { + return _iter->_array_queues.steal(_worker_id, array); + } + + bool is_drained() const { + return _queue->is_empty() && _array_queue->is_empty(); + } +}; + +template +class XHeapIteratorRootOopClosure : public OopClosure { +private: + const XHeapIteratorContext& _context; + + oop load_oop(oop* p) { + if (Weak) { + return NativeAccess::oop_load(p); + } + + return NativeAccess::oop_load(p); + } + +public: + XHeapIteratorRootOopClosure(const XHeapIteratorContext& context) : + _context(context) {} + + virtual void do_oop(oop* p) { + const oop obj = load_oop(p); + _context.mark_and_push(obj); + } + + virtual void do_oop(narrowOop* p) { + ShouldNotReachHere(); + } +}; + +template +class XHeapIteratorOopClosure : public OopIterateClosure { +private: + const XHeapIteratorContext& _context; + const oop _base; + + oop load_oop(oop* p) { + assert(XCollectedHeap::heap()->is_in(p), "Should be in heap"); + + if (VisitReferents) { + return HeapAccess::oop_load_at(_base, _base->field_offset(p)); + } + + return HeapAccess::oop_load(p); + } + +public: + XHeapIteratorOopClosure(const XHeapIteratorContext& context, oop base) : + OopIterateClosure(), + _context(context), + _base(base) {} + + virtual ReferenceIterationMode reference_iteration_mode() { + return VisitReferents ? DO_FIELDS : DO_FIELDS_EXCEPT_REFERENT; + } + + virtual void do_oop(oop* p) { + const oop obj = load_oop(p); + _context.mark_and_push(obj); + } + + virtual void do_oop(narrowOop* p) { + ShouldNotReachHere(); + } + + virtual bool do_metadata() { + return true; + } + + virtual void do_klass(Klass* k) { + ClassLoaderData* const cld = k->class_loader_data(); + XHeapIteratorOopClosure::do_cld(cld); + } + + virtual void do_cld(ClassLoaderData* cld) { + class NativeAccessClosure : public OopClosure { + private: + const XHeapIteratorContext& _context; + + public: + explicit NativeAccessClosure(const XHeapIteratorContext& context) : + _context(context) {} + + virtual void do_oop(oop* p) { + assert(!XCollectedHeap::heap()->is_in(p), "Should not be in heap"); + const oop obj = NativeAccess::oop_load(p); + _context.mark_and_push(obj); + } + + virtual void do_oop(narrowOop* p) { + ShouldNotReachHere(); + } + }; + + NativeAccessClosure cl(_context); + cld->oops_do(&cl, ClassLoaderData::_claim_other); + } + + // Don't follow loom stack metadata; it's already followed in other ways through CLDs + virtual void do_nmethod(nmethod* nm) {} + virtual void do_method(Method* m) {} +}; + +XHeapIterator::XHeapIterator(uint nworkers, bool visit_weaks) : + _visit_weaks(visit_weaks), + _timer_disable(), + _bitmaps(XAddressOffsetMax), + _bitmaps_lock(), + _queues(nworkers), + _array_queues(nworkers), + _roots(ClassLoaderData::_claim_other), + _weak_roots(), + _terminator(nworkers, &_queues) { + + // Create queues + for (uint i = 0; i < _queues.size(); i++) { + XHeapIteratorQueue* const queue = new XHeapIteratorQueue(); + _queues.register_queue(i, queue); + } + + // Create array queues + for (uint i = 0; i < _array_queues.size(); i++) { + XHeapIteratorArrayQueue* const array_queue = new XHeapIteratorArrayQueue(); + _array_queues.register_queue(i, array_queue); + } +} + +XHeapIterator::~XHeapIterator() { + // Destroy bitmaps + XHeapIteratorBitMapsIterator iter(&_bitmaps); + for (XHeapIteratorBitMap* bitmap; iter.next(&bitmap);) { + delete bitmap; + } + + // Destroy array queues + for (uint i = 0; i < _array_queues.size(); i++) { + delete _array_queues.queue(i); + } + + // Destroy queues + for (uint i = 0; i < _queues.size(); i++) { + delete _queues.queue(i); + } + + // Clear claimed CLD bits + ClassLoaderDataGraph::clear_claimed_marks(ClassLoaderData::_claim_other); +} + +static size_t object_index_max() { + return XGranuleSize >> XObjectAlignmentSmallShift; +} + +static size_t object_index(oop obj) { + const uintptr_t addr = XOop::to_address(obj); + const uintptr_t offset = XAddress::offset(addr); + const uintptr_t mask = XGranuleSize - 1; + return (offset & mask) >> XObjectAlignmentSmallShift; +} + +XHeapIteratorBitMap* XHeapIterator::object_bitmap(oop obj) { + const uintptr_t offset = XAddress::offset(XOop::to_address(obj)); + XHeapIteratorBitMap* bitmap = _bitmaps.get_acquire(offset); + if (bitmap == NULL) { + XLocker locker(&_bitmaps_lock); + bitmap = _bitmaps.get(offset); + if (bitmap == NULL) { + // Install new bitmap + bitmap = new XHeapIteratorBitMap(object_index_max()); + _bitmaps.release_put(offset, bitmap); + } + } + + return bitmap; +} + +bool XHeapIterator::mark_object(oop obj) { + if (obj == NULL) { + return false; + } + + XHeapIteratorBitMap* const bitmap = object_bitmap(obj); + const size_t index = object_index(obj); + return bitmap->try_set_bit(index); +} + +typedef ClaimingCLDToOopClosure XHeapIteratorCLDCLosure; + +class XHeapIteratorNMethodClosure : public NMethodClosure { +private: + OopClosure* const _cl; + BarrierSetNMethod* const _bs_nm; + +public: + XHeapIteratorNMethodClosure(OopClosure* cl) : + _cl(cl), + _bs_nm(BarrierSet::barrier_set()->barrier_set_nmethod()) {} + + virtual void do_nmethod(nmethod* nm) { + // If ClassUnloading is turned off, all nmethods are considered strong, + // not only those on the call stacks. The heap iteration might happen + // before the concurrent processign of the code cache, make sure that + // all nmethods have been processed before visiting the oops. + _bs_nm->nmethod_entry_barrier(nm); + + XNMethod::nmethod_oops_do(nm, _cl); + } +}; + +class XHeapIteratorThreadClosure : public ThreadClosure { +private: + OopClosure* const _cl; + CodeBlobToNMethodClosure _cb_cl; + +public: + XHeapIteratorThreadClosure(OopClosure* cl, NMethodClosure* nm_cl) : + _cl(cl), + _cb_cl(nm_cl) {} + + void do_thread(Thread* thread) { + thread->oops_do(_cl, &_cb_cl); + } +}; + +void XHeapIterator::push_strong_roots(const XHeapIteratorContext& context) { + XHeapIteratorRootOopClosure cl(context); + XHeapIteratorCLDCLosure cld_cl(&cl); + XHeapIteratorNMethodClosure nm_cl(&cl); + XHeapIteratorThreadClosure thread_cl(&cl, &nm_cl); + + _roots.apply(&cl, + &cld_cl, + &thread_cl, + &nm_cl); +} + +void XHeapIterator::push_weak_roots(const XHeapIteratorContext& context) { + XHeapIteratorRootOopClosure cl(context); + _weak_roots.apply(&cl); +} + +template +void XHeapIterator::push_roots(const XHeapIteratorContext& context) { + push_strong_roots(context); + if (VisitWeaks) { + push_weak_roots(context); + } +} + +template +void XHeapIterator::follow_object(const XHeapIteratorContext& context, oop obj) { + XHeapIteratorOopClosure cl(context, obj); + obj->oop_iterate(&cl); +} + +void XHeapIterator::follow_array(const XHeapIteratorContext& context, oop obj) { + // Follow klass + XHeapIteratorOopClosure cl(context, obj); + cl.do_klass(obj->klass()); + + // Push array chunk + context.push_array(ObjArrayTask(obj, 0 /* index */)); +} + +void XHeapIterator::follow_array_chunk(const XHeapIteratorContext& context, const ObjArrayTask& array) { + const objArrayOop obj = objArrayOop(array.obj()); + const int length = obj->length(); + const int start = array.index(); + const int stride = MIN2(length - start, ObjArrayMarkingStride); + const int end = start + stride; + + // Push remaining array chunk first + if (end < length) { + context.push_array(ObjArrayTask(obj, end)); + } + + // Follow array chunk + XHeapIteratorOopClosure cl(context, obj); + obj->oop_iterate_range(&cl, start, end); +} + +template +void XHeapIterator::visit_and_follow(const XHeapIteratorContext& context, ObjectClosure* cl, oop obj) { + // Visit + cl->do_object(obj); + + // Follow + if (obj->is_objArray()) { + follow_array(context, obj); + } else { + follow_object(context, obj); + } +} + +template +void XHeapIterator::drain(const XHeapIteratorContext& context, ObjectClosure* cl) { + ObjArrayTask array; + oop obj; + + do { + while (context.pop(obj)) { + visit_and_follow(context, cl, obj); + } + + if (context.pop_array(array)) { + follow_array_chunk(context, array); + } + } while (!context.is_drained()); +} + +template +void XHeapIterator::steal(const XHeapIteratorContext& context, ObjectClosure* cl) { + ObjArrayTask array; + oop obj; + + if (context.steal_array(array)) { + follow_array_chunk(context, array); + } else if (context.steal(obj)) { + visit_and_follow(context, cl, obj); + } +} + +template +void XHeapIterator::drain_and_steal(const XHeapIteratorContext& context, ObjectClosure* cl) { + do { + drain(context, cl); + steal(context, cl); + } while (!context.is_drained() || !_terminator.offer_termination()); +} + +template +void XHeapIterator::object_iterate_inner(const XHeapIteratorContext& context, ObjectClosure* object_cl) { + push_roots(context); + drain_and_steal(context, object_cl); +} + +void XHeapIterator::object_iterate(ObjectClosure* cl, uint worker_id) { + XHeapIteratorContext context(this, worker_id); + + if (_visit_weaks) { + object_iterate_inner(context, cl); + } else { + object_iterate_inner(context, cl); + } +} diff --git a/src/hotspot/share/gc/x/xHeapIterator.hpp b/src/hotspot/share/gc/x/xHeapIterator.hpp new file mode 100644 index 0000000000000..0d990a616f886 --- /dev/null +++ b/src/hotspot/share/gc/x/xHeapIterator.hpp @@ -0,0 +1,97 @@ +/* + * Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XHEAPITERATOR_HPP +#define SHARE_GC_X_XHEAPITERATOR_HPP + +#include "gc/shared/collectedHeap.hpp" +#include "gc/shared/taskTerminator.hpp" +#include "gc/shared/taskqueue.hpp" +#include "gc/x/xGranuleMap.hpp" +#include "gc/x/xLock.hpp" +#include "gc/x/xRootsIterator.hpp" +#include "gc/x/xStat.hpp" + +class XHeapIteratorBitMap; +class XHeapIteratorContext; + +using XHeapIteratorBitMaps = XGranuleMap; +using XHeapIteratorBitMapsIterator = XGranuleMapIterator; +using XHeapIteratorQueue = OverflowTaskQueue; +using XHeapIteratorQueues = GenericTaskQueueSet; +using XHeapIteratorArrayQueue = OverflowTaskQueue; +using XHeapIteratorArrayQueues = GenericTaskQueueSet; + +class XHeapIterator : public ParallelObjectIteratorImpl { + friend class XHeapIteratorContext; + +private: + const bool _visit_weaks; + XStatTimerDisable _timer_disable; + XHeapIteratorBitMaps _bitmaps; + XLock _bitmaps_lock; + XHeapIteratorQueues _queues; + XHeapIteratorArrayQueues _array_queues; + XRootsIterator _roots; + XWeakRootsIterator _weak_roots; + TaskTerminator _terminator; + + XHeapIteratorBitMap* object_bitmap(oop obj); + + bool mark_object(oop obj); + + void push_strong_roots(const XHeapIteratorContext& context); + void push_weak_roots(const XHeapIteratorContext& context); + + template + void push_roots(const XHeapIteratorContext& context); + + template + void follow_object(const XHeapIteratorContext& context, oop obj); + + void follow_array(const XHeapIteratorContext& context, oop obj); + void follow_array_chunk(const XHeapIteratorContext& context, const ObjArrayTask& array); + + template + void visit_and_follow(const XHeapIteratorContext& context, ObjectClosure* cl, oop obj); + + template + void drain(const XHeapIteratorContext& context, ObjectClosure* cl); + + template + void steal(const XHeapIteratorContext& context, ObjectClosure* cl); + + template + void drain_and_steal(const XHeapIteratorContext& context, ObjectClosure* cl); + + template + void object_iterate_inner(const XHeapIteratorContext& context, ObjectClosure* cl); + +public: + XHeapIterator(uint nworkers, bool visit_weaks); + virtual ~XHeapIterator(); + + virtual void object_iterate(ObjectClosure* cl, uint worker_id); +}; + +#endif // SHARE_GC_X_XHEAPITERATOR_HPP diff --git a/src/hotspot/share/gc/x/xHeuristics.cpp b/src/hotspot/share/gc/x/xHeuristics.cpp new file mode 100644 index 0000000000000..ec89fa41919da --- /dev/null +++ b/src/hotspot/share/gc/x/xHeuristics.cpp @@ -0,0 +1,104 @@ +/* + * Copyright (c) 2019, 2021, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/shared/gcLogPrecious.hpp" +#include "gc/shared/gc_globals.hpp" +#include "gc/x/xCPU.inline.hpp" +#include "gc/x/xGlobals.hpp" +#include "gc/x/xHeuristics.hpp" +#include "runtime/globals.hpp" +#include "runtime/os.hpp" +#include "utilities/globalDefinitions.hpp" +#include "utilities/powerOfTwo.hpp" + +void XHeuristics::set_medium_page_size() { + // Set XPageSizeMedium so that a medium page occupies at most 3.125% of the + // max heap size. XPageSizeMedium is initially set to 0, which means medium + // pages are effectively disabled. It is adjusted only if XPageSizeMedium + // becomes larger than XPageSizeSmall. + const size_t min = XGranuleSize; + const size_t max = XGranuleSize * 16; + const size_t unclamped = MaxHeapSize * 0.03125; + const size_t clamped = clamp(unclamped, min, max); + const size_t size = round_down_power_of_2(clamped); + + if (size > XPageSizeSmall) { + // Enable medium pages + XPageSizeMedium = size; + XPageSizeMediumShift = log2i_exact(XPageSizeMedium); + XObjectSizeLimitMedium = XPageSizeMedium / 8; + XObjectAlignmentMediumShift = (int)XPageSizeMediumShift - 13; + XObjectAlignmentMedium = 1 << XObjectAlignmentMediumShift; + } +} + +size_t XHeuristics::relocation_headroom() { + // Calculate headroom needed to avoid in-place relocation. Each worker will try + // to allocate a small page, and all workers will share a single medium page. + const uint nworkers = UseDynamicNumberOfGCThreads ? ConcGCThreads : MAX2(ConcGCThreads, ParallelGCThreads); + return (nworkers * XPageSizeSmall) + XPageSizeMedium; +} + +bool XHeuristics::use_per_cpu_shared_small_pages() { + // Use per-CPU shared small pages only if these pages occupy at most 3.125% + // of the max heap size. Otherwise fall back to using a single shared small + // page. This is useful when using small heaps on large machines. + const size_t per_cpu_share = (MaxHeapSize * 0.03125) / XCPU::count(); + return per_cpu_share >= XPageSizeSmall; +} + +static uint nworkers_based_on_ncpus(double cpu_share_in_percent) { + return ceil(os::initial_active_processor_count() * cpu_share_in_percent / 100.0); +} + +static uint nworkers_based_on_heap_size(double heap_share_in_percent) { + const int nworkers = (MaxHeapSize * (heap_share_in_percent / 100.0)) / XPageSizeSmall; + return MAX2(nworkers, 1); +} + +static uint nworkers(double cpu_share_in_percent) { + // Cap number of workers so that they don't use more than 2% of the max heap + // during relocation. This is useful when using small heaps on large machines. + return MIN2(nworkers_based_on_ncpus(cpu_share_in_percent), + nworkers_based_on_heap_size(2.0)); +} + +uint XHeuristics::nparallel_workers() { + // Use 60% of the CPUs, rounded up. We would like to use as many threads as + // possible to increase parallelism. However, using a thread count that is + // close to the number of processors tends to lead to over-provisioning and + // scheduling latency issues. Using 60% of the active processors appears to + // be a fairly good balance. + return nworkers(60.0); +} + +uint XHeuristics::nconcurrent_workers() { + // The number of concurrent threads we would like to use heavily depends + // on the type of workload we are running. Using too many threads will have + // a negative impact on the application throughput, while using too few + // threads will prolong the GC-cycle and we then risk being out-run by the + // application. When in dynamic mode, use up to 25% of the active processors. + // When in non-dynamic mode, use 12.5% of the active processors. + return nworkers(UseDynamicNumberOfGCThreads ? 25.0 : 12.5); +} diff --git a/src/hotspot/share/gc/x/xHeuristics.hpp b/src/hotspot/share/gc/x/xHeuristics.hpp new file mode 100644 index 0000000000000..2ca798257b233 --- /dev/null +++ b/src/hotspot/share/gc/x/xHeuristics.hpp @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2019, 2022, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XHEURISTICS_HPP +#define SHARE_GC_X_XHEURISTICS_HPP + +#include "memory/allStatic.hpp" + +class XHeuristics : public AllStatic { +public: + static void set_medium_page_size(); + + static size_t relocation_headroom(); + + static bool use_per_cpu_shared_small_pages(); + + static uint nparallel_workers(); + static uint nconcurrent_workers(); +}; + +#endif // SHARE_GC_X_XHEURISTICS_HPP diff --git a/src/hotspot/share/gc/x/xInitialize.cpp b/src/hotspot/share/gc/x/xInitialize.cpp new file mode 100644 index 0000000000000..01b79f3ffd780 --- /dev/null +++ b/src/hotspot/share/gc/x/xInitialize.cpp @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/x/xAddress.hpp" +#include "gc/x/xBarrierSet.hpp" +#include "gc/x/xCPU.hpp" +#include "gc/x/xGlobals.hpp" +#include "gc/x/xHeuristics.hpp" +#include "gc/x/xInitialize.hpp" +#include "gc/x/xLargePages.hpp" +#include "gc/x/xNUMA.hpp" +#include "gc/x/xStat.hpp" +#include "gc/x/xThreadLocalAllocBuffer.hpp" +#include "gc/x/xTracer.hpp" +#include "logging/log.hpp" +#include "runtime/vm_version.hpp" + +XInitialize::XInitialize(XBarrierSet* barrier_set) { + log_info(gc, init)("Initializing %s", XName); + log_info(gc, init)("Version: %s (%s)", + VM_Version::vm_release(), + VM_Version::jdk_debug_level()); + log_info(gc, init)("Using legacy single-generation mode"); + + // Early initialization + XAddress::initialize(); + XNUMA::initialize(); + XCPU::initialize(); + XStatValue::initialize(); + XThreadLocalAllocBuffer::initialize(); + XTracer::initialize(); + XLargePages::initialize(); + XHeuristics::set_medium_page_size(); + XBarrierSet::set_barrier_set(barrier_set); + + pd_initialize(); +} diff --git a/src/hotspot/share/gc/x/xInitialize.hpp b/src/hotspot/share/gc/x/xInitialize.hpp new file mode 100644 index 0000000000000..30e7b65293ed6 --- /dev/null +++ b/src/hotspot/share/gc/x/xInitialize.hpp @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XINITIALIZE_HPP +#define SHARE_GC_X_XINITIALIZE_HPP + +#include "memory/allocation.hpp" + +class XBarrierSet; + +class XInitialize { +private: + void pd_initialize(); + +public: + XInitialize(XBarrierSet* barrier_set); +}; + +#endif // SHARE_GC_X_XINITIALIZE_HPP diff --git a/src/hotspot/share/gc/x/xLargePages.cpp b/src/hotspot/share/gc/x/xLargePages.cpp new file mode 100644 index 0000000000000..13da763c6a39c --- /dev/null +++ b/src/hotspot/share/gc/x/xLargePages.cpp @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/shared/gcLogPrecious.hpp" +#include "gc/x/xLargePages.hpp" +#include "runtime/os.hpp" + +XLargePages::State XLargePages::_state; + +void XLargePages::initialize() { + pd_initialize(); + + log_info_p(gc, init)("Memory: " JULONG_FORMAT "M", os::physical_memory() / M); + log_info_p(gc, init)("Large Page Support: %s", to_string()); +} + +const char* XLargePages::to_string() { + switch (_state) { + case Explicit: + return "Enabled (Explicit)"; + + case Transparent: + return "Enabled (Transparent)"; + + default: + return "Disabled"; + } +} diff --git a/src/hotspot/share/gc/x/xLargePages.hpp b/src/hotspot/share/gc/x/xLargePages.hpp new file mode 100644 index 0000000000000..562e83ffbd088 --- /dev/null +++ b/src/hotspot/share/gc/x/xLargePages.hpp @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2017, 2022, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XLARGEPAGES_HPP +#define SHARE_GC_X_XLARGEPAGES_HPP + +#include "memory/allStatic.hpp" + +class XLargePages : public AllStatic { +private: + enum State { + Disabled, + Explicit, + Transparent + }; + + static State _state; + + static void pd_initialize(); + +public: + static void initialize(); + + static bool is_enabled(); + static bool is_explicit(); + static bool is_transparent(); + + static const char* to_string(); +}; + +#endif // SHARE_GC_X_XLARGEPAGES_HPP diff --git a/src/hotspot/share/gc/x/xLargePages.inline.hpp b/src/hotspot/share/gc/x/xLargePages.inline.hpp new file mode 100644 index 0000000000000..2f027c3b17605 --- /dev/null +++ b/src/hotspot/share/gc/x/xLargePages.inline.hpp @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XLARGEPAGES_INLINE_HPP +#define SHARE_GC_X_XLARGEPAGES_INLINE_HPP + +#include "gc/x/xLargePages.hpp" + +inline bool XLargePages::is_enabled() { + return _state != Disabled; +} + +inline bool XLargePages::is_explicit() { + return _state == Explicit; +} + +inline bool XLargePages::is_transparent() { + return _state == Transparent; +} + +#endif // SHARE_GC_X_XLARGEPAGES_INLINE_HPP diff --git a/src/hotspot/share/gc/x/xList.hpp b/src/hotspot/share/gc/x/xList.hpp new file mode 100644 index 0000000000000..d689704d65388 --- /dev/null +++ b/src/hotspot/share/gc/x/xList.hpp @@ -0,0 +1,116 @@ +/* + * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XLIST_HPP +#define SHARE_GC_X_XLIST_HPP + +#include "memory/allocation.hpp" +#include "utilities/globalDefinitions.hpp" + +template class XList; + +// Element in a doubly linked list +template +class XListNode { + friend class XList; + +private: + XListNode* _next; + XListNode* _prev; + + NONCOPYABLE(XListNode); + + void verify_links() const; + void verify_links_linked() const; + void verify_links_unlinked() const; + +public: + XListNode(); + ~XListNode(); +}; + +// Doubly linked list +template +class XList { +private: + XListNode _head; + size_t _size; + + NONCOPYABLE(XList); + + void verify_head() const; + + void insert(XListNode* before, XListNode* node); + + XListNode* cast_to_inner(T* elem) const; + T* cast_to_outer(XListNode* node) const; + +public: + XList(); + + size_t size() const; + bool is_empty() const; + + T* first() const; + T* last() const; + T* next(T* elem) const; + T* prev(T* elem) const; + + void insert_first(T* elem); + void insert_last(T* elem); + void insert_before(T* before, T* elem); + void insert_after(T* after, T* elem); + + void remove(T* elem); + T* remove_first(); + T* remove_last(); +}; + +template +class XListIteratorImpl : public StackObj { +private: + const XList* const _list; + T* _next; + +public: + XListIteratorImpl(const XList* list); + + bool next(T** elem); +}; + +template +class XListRemoveIteratorImpl : public StackObj { +private: + XList* const _list; + +public: + XListRemoveIteratorImpl(XList* list); + + bool next(T** elem); +}; + +template using XListIterator = XListIteratorImpl; +template using XListReverseIterator = XListIteratorImpl; +template using XListRemoveIterator = XListRemoveIteratorImpl; + +#endif // SHARE_GC_X_XLIST_HPP diff --git a/src/hotspot/share/gc/x/xList.inline.hpp b/src/hotspot/share/gc/x/xList.inline.hpp new file mode 100644 index 0000000000000..25c28fbda434e --- /dev/null +++ b/src/hotspot/share/gc/x/xList.inline.hpp @@ -0,0 +1,238 @@ +/* + * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XLIST_INLINE_HPP +#define SHARE_GC_X_XLIST_INLINE_HPP + +#include "gc/x/xList.hpp" + +#include "utilities/debug.hpp" + +template +inline XListNode::XListNode() : + _next(this), + _prev(this) {} + +template +inline XListNode::~XListNode() { + verify_links_unlinked(); +} + +template +inline void XListNode::verify_links() const { + assert(_next->_prev == this, "Corrupt list node"); + assert(_prev->_next == this, "Corrupt list node"); +} + +template +inline void XListNode::verify_links_linked() const { + assert(_next != this, "Should be in a list"); + assert(_prev != this, "Should be in a list"); + verify_links(); +} + +template +inline void XListNode::verify_links_unlinked() const { + assert(_next == this, "Should not be in a list"); + assert(_prev == this, "Should not be in a list"); +} + +template +inline void XList::verify_head() const { + _head.verify_links(); +} + +template +inline void XList::insert(XListNode* before, XListNode* node) { + verify_head(); + + before->verify_links(); + node->verify_links_unlinked(); + + node->_prev = before; + node->_next = before->_next; + before->_next = node; + node->_next->_prev = node; + + before->verify_links_linked(); + node->verify_links_linked(); + + _size++; +} + +template +inline XListNode* XList::cast_to_inner(T* elem) const { + return &elem->_node; +} + +template +inline T* XList::cast_to_outer(XListNode* node) const { + return (T*)((uintptr_t)node - offset_of(T, _node)); +} + +template +inline XList::XList() : + _head(), + _size(0) { + verify_head(); +} + +template +inline size_t XList::size() const { + verify_head(); + return _size; +} + +template +inline bool XList::is_empty() const { + return size() == 0; +} + +template +inline T* XList::first() const { + return is_empty() ? NULL : cast_to_outer(_head._next); +} + +template +inline T* XList::last() const { + return is_empty() ? NULL : cast_to_outer(_head._prev); +} + +template +inline T* XList::next(T* elem) const { + verify_head(); + + XListNode* const node = cast_to_inner(elem); + node->verify_links_linked(); + + XListNode* const next = node->_next; + next->verify_links_linked(); + + return (next == &_head) ? NULL : cast_to_outer(next); +} + +template +inline T* XList::prev(T* elem) const { + verify_head(); + + XListNode* const node = cast_to_inner(elem); + node->verify_links_linked(); + + XListNode* const prev = node->_prev; + prev->verify_links_linked(); + + return (prev == &_head) ? NULL : cast_to_outer(prev); +} + +template +inline void XList::insert_first(T* elem) { + insert(&_head, cast_to_inner(elem)); +} + +template +inline void XList::insert_last(T* elem) { + insert(_head._prev, cast_to_inner(elem)); +} + +template +inline void XList::insert_before(T* before, T* elem) { + insert(cast_to_inner(before)->_prev, cast_to_inner(elem)); +} + +template +inline void XList::insert_after(T* after, T* elem) { + insert(cast_to_inner(after), cast_to_inner(elem)); +} + +template +inline void XList::remove(T* elem) { + verify_head(); + + XListNode* const node = cast_to_inner(elem); + node->verify_links_linked(); + + XListNode* const next = node->_next; + XListNode* const prev = node->_prev; + next->verify_links_linked(); + prev->verify_links_linked(); + + node->_next = prev->_next; + node->_prev = next->_prev; + node->verify_links_unlinked(); + + next->_prev = prev; + prev->_next = next; + next->verify_links(); + prev->verify_links(); + + _size--; +} + +template +inline T* XList::remove_first() { + T* elem = first(); + if (elem != NULL) { + remove(elem); + } + + return elem; +} + +template +inline T* XList::remove_last() { + T* elem = last(); + if (elem != NULL) { + remove(elem); + } + + return elem; +} + +template +inline XListIteratorImpl::XListIteratorImpl(const XList* list) : + _list(list), + _next(Forward ? list->first() : list->last()) {} + +template +inline bool XListIteratorImpl::next(T** elem) { + if (_next != NULL) { + *elem = _next; + _next = Forward ? _list->next(_next) : _list->prev(_next); + return true; + } + + // No more elements + return false; +} + +template +inline XListRemoveIteratorImpl::XListRemoveIteratorImpl(XList* list) : + _list(list) {} + +template +inline bool XListRemoveIteratorImpl::next(T** elem) { + *elem = Forward ? _list->remove_first() : _list->remove_last(); + return *elem != NULL; +} + +#endif // SHARE_GC_X_XLIST_INLINE_HPP diff --git a/src/hotspot/share/gc/x/xLiveMap.cpp b/src/hotspot/share/gc/x/xLiveMap.cpp new file mode 100644 index 0000000000000..91ef99754f791 --- /dev/null +++ b/src/hotspot/share/gc/x/xLiveMap.cpp @@ -0,0 +1,133 @@ +/* + * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/x/xHeap.inline.hpp" +#include "gc/x/xLiveMap.inline.hpp" +#include "gc/x/xStat.hpp" +#include "gc/x/xThread.inline.hpp" +#include "logging/log.hpp" +#include "runtime/atomic.hpp" +#include "utilities/debug.hpp" +#include "utilities/powerOfTwo.hpp" + +static const XStatCounter XCounterMarkSeqNumResetContention("Contention", "Mark SeqNum Reset Contention", XStatUnitOpsPerSecond); +static const XStatCounter XCounterMarkSegmentResetContention("Contention", "Mark Segment Reset Contention", XStatUnitOpsPerSecond); + +static size_t bitmap_size(uint32_t size, size_t nsegments) { + // We need at least one bit per segment + return MAX2(size, nsegments) * 2; +} + +XLiveMap::XLiveMap(uint32_t size) : + _seqnum(0), + _live_objects(0), + _live_bytes(0), + _segment_live_bits(0), + _segment_claim_bits(0), + _bitmap(bitmap_size(size, nsegments)), + _segment_shift(exact_log2(segment_size())) {} + +void XLiveMap::reset(size_t index) { + const uint32_t seqnum_initializing = (uint32_t)-1; + bool contention = false; + + // Multiple threads can enter here, make sure only one of them + // resets the marking information while the others busy wait. + for (uint32_t seqnum = Atomic::load_acquire(&_seqnum); + seqnum != XGlobalSeqNum; + seqnum = Atomic::load_acquire(&_seqnum)) { + if ((seqnum != seqnum_initializing) && + (Atomic::cmpxchg(&_seqnum, seqnum, seqnum_initializing) == seqnum)) { + // Reset marking information + _live_bytes = 0; + _live_objects = 0; + + // Clear segment claimed/live bits + segment_live_bits().clear(); + segment_claim_bits().clear(); + + assert(_seqnum == seqnum_initializing, "Invalid"); + + // Make sure the newly reset marking information is ordered + // before the update of the page seqnum, such that when the + // up-to-date seqnum is load acquired, the bit maps will not + // contain stale information. + Atomic::release_store(&_seqnum, XGlobalSeqNum); + break; + } + + // Mark reset contention + if (!contention) { + // Count contention once + XStatInc(XCounterMarkSeqNumResetContention); + contention = true; + + log_trace(gc)("Mark seqnum reset contention, thread: " PTR_FORMAT " (%s), map: " PTR_FORMAT ", bit: " SIZE_FORMAT, + XThread::id(), XThread::name(), p2i(this), index); + } + } +} + +void XLiveMap::reset_segment(BitMap::idx_t segment) { + bool contention = false; + + if (!claim_segment(segment)) { + // Already claimed, wait for live bit to be set + while (!is_segment_live(segment)) { + // Mark reset contention + if (!contention) { + // Count contention once + XStatInc(XCounterMarkSegmentResetContention); + contention = true; + + log_trace(gc)("Mark segment reset contention, thread: " PTR_FORMAT " (%s), map: " PTR_FORMAT ", segment: " SIZE_FORMAT, + XThread::id(), XThread::name(), p2i(this), segment); + } + } + + // Segment is live + return; + } + + // Segment claimed, clear it + const BitMap::idx_t start_index = segment_start(segment); + const BitMap::idx_t end_index = segment_end(segment); + if (segment_size() / BitsPerWord >= 32) { + _bitmap.clear_large_range(start_index, end_index); + } else { + _bitmap.clear_range(start_index, end_index); + } + + // Set live bit + const bool success = set_segment_live(segment); + assert(success, "Should never fail"); +} + +void XLiveMap::resize(uint32_t size) { + const size_t new_bitmap_size = bitmap_size(size, nsegments); + if (_bitmap.size() != new_bitmap_size) { + _bitmap.reinitialize(new_bitmap_size, false /* clear */); + _segment_shift = exact_log2(segment_size()); + } +} diff --git a/src/hotspot/share/gc/x/xLiveMap.hpp b/src/hotspot/share/gc/x/xLiveMap.hpp new file mode 100644 index 0000000000000..7bad774c6c6c9 --- /dev/null +++ b/src/hotspot/share/gc/x/xLiveMap.hpp @@ -0,0 +1,90 @@ +/* + * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XLIVEMAP_HPP +#define SHARE_GC_X_XLIVEMAP_HPP + +#include "gc/x/xBitMap.hpp" +#include "memory/allocation.hpp" + +class ObjectClosure; + +class XLiveMap { + friend class XLiveMapTest; + +private: + static const size_t nsegments = 64; + + volatile uint32_t _seqnum; + volatile uint32_t _live_objects; + volatile size_t _live_bytes; + BitMap::bm_word_t _segment_live_bits; + BitMap::bm_word_t _segment_claim_bits; + XBitMap _bitmap; + size_t _segment_shift; + + const BitMapView segment_live_bits() const; + const BitMapView segment_claim_bits() const; + + BitMapView segment_live_bits(); + BitMapView segment_claim_bits(); + + BitMap::idx_t segment_size() const; + + BitMap::idx_t segment_start(BitMap::idx_t segment) const; + BitMap::idx_t segment_end(BitMap::idx_t segment) const; + + bool is_segment_live(BitMap::idx_t segment) const; + bool set_segment_live(BitMap::idx_t segment); + + BitMap::idx_t first_live_segment() const; + BitMap::idx_t next_live_segment(BitMap::idx_t segment) const; + BitMap::idx_t index_to_segment(BitMap::idx_t index) const; + + bool claim_segment(BitMap::idx_t segment); + + void reset(size_t index); + void reset_segment(BitMap::idx_t segment); + + void iterate_segment(ObjectClosure* cl, BitMap::idx_t segment, uintptr_t page_start, size_t page_object_alignment_shift); + +public: + XLiveMap(uint32_t size); + + void reset(); + void resize(uint32_t size); + + bool is_marked() const; + + uint32_t live_objects() const; + size_t live_bytes() const; + + bool get(size_t index) const; + bool set(size_t index, bool finalizable, bool& inc_live); + + void inc_live(uint32_t objects, size_t bytes); + + void iterate(ObjectClosure* cl, uintptr_t page_start, size_t page_object_alignment_shift); +}; + +#endif // SHARE_GC_X_XLIVEMAP_HPP diff --git a/src/hotspot/share/gc/x/xLiveMap.inline.hpp b/src/hotspot/share/gc/x/xLiveMap.inline.hpp new file mode 100644 index 0000000000000..f836f9ab4c21f --- /dev/null +++ b/src/hotspot/share/gc/x/xLiveMap.inline.hpp @@ -0,0 +1,175 @@ +/* + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XLIVEMAP_INLINE_HPP +#define SHARE_GC_X_XLIVEMAP_INLINE_HPP + +#include "gc/x/xLiveMap.hpp" + +#include "gc/x/xBitMap.inline.hpp" +#include "gc/x/xMark.hpp" +#include "gc/x/xOop.inline.hpp" +#include "gc/x/xUtils.inline.hpp" +#include "runtime/atomic.hpp" +#include "utilities/bitMap.inline.hpp" +#include "utilities/debug.hpp" + +inline void XLiveMap::reset() { + _seqnum = 0; +} + +inline bool XLiveMap::is_marked() const { + return Atomic::load_acquire(&_seqnum) == XGlobalSeqNum; +} + +inline uint32_t XLiveMap::live_objects() const { + assert(XGlobalPhase != XPhaseMark, "Invalid phase"); + return _live_objects; +} + +inline size_t XLiveMap::live_bytes() const { + assert(XGlobalPhase != XPhaseMark, "Invalid phase"); + return _live_bytes; +} + +inline const BitMapView XLiveMap::segment_live_bits() const { + return BitMapView(const_cast(&_segment_live_bits), nsegments); +} + +inline const BitMapView XLiveMap::segment_claim_bits() const { + return BitMapView(const_cast(&_segment_claim_bits), nsegments); +} + +inline BitMapView XLiveMap::segment_live_bits() { + return BitMapView(&_segment_live_bits, nsegments); +} + +inline BitMapView XLiveMap::segment_claim_bits() { + return BitMapView(&_segment_claim_bits, nsegments); +} + +inline bool XLiveMap::is_segment_live(BitMap::idx_t segment) const { + return segment_live_bits().par_at(segment); +} + +inline bool XLiveMap::set_segment_live(BitMap::idx_t segment) { + return segment_live_bits().par_set_bit(segment, memory_order_release); +} + +inline bool XLiveMap::claim_segment(BitMap::idx_t segment) { + return segment_claim_bits().par_set_bit(segment, memory_order_acq_rel); +} + +inline BitMap::idx_t XLiveMap::first_live_segment() const { + return segment_live_bits().find_first_set_bit(0, nsegments); +} + +inline BitMap::idx_t XLiveMap::next_live_segment(BitMap::idx_t segment) const { + return segment_live_bits().find_first_set_bit(segment + 1, nsegments); +} + +inline BitMap::idx_t XLiveMap::segment_size() const { + return _bitmap.size() / nsegments; +} + +inline BitMap::idx_t XLiveMap::index_to_segment(BitMap::idx_t index) const { + return index >> _segment_shift; +} + +inline bool XLiveMap::get(size_t index) const { + BitMap::idx_t segment = index_to_segment(index); + return is_marked() && // Page is marked + is_segment_live(segment) && // Segment is marked + _bitmap.par_at(index, memory_order_relaxed); // Object is marked +} + +inline bool XLiveMap::set(size_t index, bool finalizable, bool& inc_live) { + if (!is_marked()) { + // First object to be marked during this + // cycle, reset marking information. + reset(index); + } + + const BitMap::idx_t segment = index_to_segment(index); + if (!is_segment_live(segment)) { + // First object to be marked in this segment during + // this cycle, reset segment bitmap. + reset_segment(segment); + } + + return _bitmap.par_set_bit_pair(index, finalizable, inc_live); +} + +inline void XLiveMap::inc_live(uint32_t objects, size_t bytes) { + Atomic::add(&_live_objects, objects); + Atomic::add(&_live_bytes, bytes); +} + +inline BitMap::idx_t XLiveMap::segment_start(BitMap::idx_t segment) const { + return segment_size() * segment; +} + +inline BitMap::idx_t XLiveMap::segment_end(BitMap::idx_t segment) const { + return segment_start(segment) + segment_size(); +} + +inline void XLiveMap::iterate_segment(ObjectClosure* cl, BitMap::idx_t segment, uintptr_t page_start, size_t page_object_alignment_shift) { + assert(is_segment_live(segment), "Must be"); + + const BitMap::idx_t start_index = segment_start(segment); + const BitMap::idx_t end_index = segment_end(segment); + BitMap::idx_t index = _bitmap.find_first_set_bit(start_index, end_index); + + while (index < end_index) { + // Calculate object address + const uintptr_t addr = page_start + ((index / 2) << page_object_alignment_shift); + + // Get the size of the object before calling the closure, which + // might overwrite the object in case we are relocating in-place. + const size_t size = XUtils::object_size(addr); + + // Apply closure + cl->do_object(XOop::from_address(addr)); + + // Find next bit after this object + const uintptr_t next_addr = align_up(addr + size, 1 << page_object_alignment_shift); + const BitMap::idx_t next_index = ((next_addr - page_start) >> page_object_alignment_shift) * 2; + if (next_index >= end_index) { + // End of live map + break; + } + + index = _bitmap.find_first_set_bit(next_index, end_index); + } +} + +inline void XLiveMap::iterate(ObjectClosure* cl, uintptr_t page_start, size_t page_object_alignment_shift) { + if (is_marked()) { + for (BitMap::idx_t segment = first_live_segment(); segment < nsegments; segment = next_live_segment(segment)) { + // For each live segment + iterate_segment(cl, segment, page_start, page_object_alignment_shift); + } + } +} + +#endif // SHARE_GC_X_XLIVEMAP_INLINE_HPP diff --git a/src/hotspot/share/gc/x/xLock.hpp b/src/hotspot/share/gc/x/xLock.hpp new file mode 100644 index 0000000000000..2ba612d033cc0 --- /dev/null +++ b/src/hotspot/share/gc/x/xLock.hpp @@ -0,0 +1,79 @@ +/* + * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XLOCK_HPP +#define SHARE_GC_X_XLOCK_HPP + +#include "memory/allocation.hpp" +#include "runtime/mutex.hpp" + +class XLock { +private: + PlatformMutex _lock; + +public: + void lock(); + bool try_lock(); + void unlock(); +}; + +class XReentrantLock { +private: + XLock _lock; + Thread* volatile _owner; + uint64_t _count; + +public: + XReentrantLock(); + + void lock(); + void unlock(); + + bool is_owned() const; +}; + +class XConditionLock { +private: + PlatformMonitor _lock; + +public: + void lock(); + bool try_lock(); + void unlock(); + + bool wait(uint64_t millis = 0); + void notify(); + void notify_all(); +}; + +template +class XLocker : public StackObj { +private: + T* const _lock; + +public: + XLocker(T* lock); + ~XLocker(); +}; + +#endif // SHARE_GC_X_XLOCK_HPP diff --git a/src/hotspot/share/gc/x/xLock.inline.hpp b/src/hotspot/share/gc/x/xLock.inline.hpp new file mode 100644 index 0000000000000..07a673376a64a --- /dev/null +++ b/src/hotspot/share/gc/x/xLock.inline.hpp @@ -0,0 +1,120 @@ +/* + * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XLOCK_INLINE_HPP +#define SHARE_GC_X_XLOCK_INLINE_HPP + +#include "gc/x/xLock.hpp" + +#include "runtime/atomic.hpp" +#include "runtime/javaThread.hpp" +#include "runtime/os.inline.hpp" +#include "utilities/debug.hpp" + +inline void XLock::lock() { + _lock.lock(); +} + +inline bool XLock::try_lock() { + return _lock.try_lock(); +} + +inline void XLock::unlock() { + _lock.unlock(); +} + +inline XReentrantLock::XReentrantLock() : + _lock(), + _owner(NULL), + _count(0) {} + +inline void XReentrantLock::lock() { + Thread* const thread = Thread::current(); + Thread* const owner = Atomic::load(&_owner); + + if (owner != thread) { + _lock.lock(); + Atomic::store(&_owner, thread); + } + + _count++; +} + +inline void XReentrantLock::unlock() { + assert(is_owned(), "Invalid owner"); + assert(_count > 0, "Invalid count"); + + _count--; + + if (_count == 0) { + Atomic::store(&_owner, (Thread*)NULL); + _lock.unlock(); + } +} + +inline bool XReentrantLock::is_owned() const { + Thread* const thread = Thread::current(); + Thread* const owner = Atomic::load(&_owner); + return owner == thread; +} + +inline void XConditionLock::lock() { + _lock.lock(); +} + +inline bool XConditionLock::try_lock() { + return _lock.try_lock(); +} + +inline void XConditionLock::unlock() { + _lock.unlock(); +} + +inline bool XConditionLock::wait(uint64_t millis) { + return _lock.wait(millis) == OS_OK; +} + +inline void XConditionLock::notify() { + _lock.notify(); +} + +inline void XConditionLock::notify_all() { + _lock.notify_all(); +} + +template +inline XLocker::XLocker(T* lock) : + _lock(lock) { + if (_lock != NULL) { + _lock->lock(); + } +} + +template +inline XLocker::~XLocker() { + if (_lock != NULL) { + _lock->unlock(); + } +} + +#endif // SHARE_GC_X_XLOCK_INLINE_HPP diff --git a/src/hotspot/share/gc/x/xMark.cpp b/src/hotspot/share/gc/x/xMark.cpp new file mode 100644 index 0000000000000..16574364ef9ee --- /dev/null +++ b/src/hotspot/share/gc/x/xMark.cpp @@ -0,0 +1,875 @@ +/* + * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "classfile/classLoaderData.hpp" +#include "classfile/classLoaderDataGraph.hpp" +#include "classfile/javaClasses.inline.hpp" +#include "code/nmethod.hpp" +#include "gc/shared/continuationGCSupport.inline.hpp" +#include "gc/shared/gc_globals.hpp" +#include "gc/shared/stringdedup/stringDedup.hpp" +#include "gc/shared/suspendibleThreadSet.hpp" +#include "gc/x/xAbort.inline.hpp" +#include "gc/x/xBarrier.inline.hpp" +#include "gc/x/xHeap.inline.hpp" +#include "gc/x/xLock.inline.hpp" +#include "gc/x/xMark.inline.hpp" +#include "gc/x/xMarkCache.inline.hpp" +#include "gc/x/xMarkContext.inline.hpp" +#include "gc/x/xMarkStack.inline.hpp" +#include "gc/x/xMarkTerminate.inline.hpp" +#include "gc/x/xNMethod.hpp" +#include "gc/x/xOop.inline.hpp" +#include "gc/x/xPage.hpp" +#include "gc/x/xPageTable.inline.hpp" +#include "gc/x/xRootsIterator.hpp" +#include "gc/x/xStackWatermark.hpp" +#include "gc/x/xStat.hpp" +#include "gc/x/xTask.hpp" +#include "gc/x/xThread.inline.hpp" +#include "gc/x/xThreadLocalAllocBuffer.hpp" +#include "gc/x/xUtils.inline.hpp" +#include "gc/x/xWorkers.hpp" +#include "logging/log.hpp" +#include "memory/iterator.inline.hpp" +#include "oops/objArrayOop.inline.hpp" +#include "oops/oop.inline.hpp" +#include "runtime/atomic.hpp" +#include "runtime/continuation.hpp" +#include "runtime/handshake.hpp" +#include "runtime/javaThread.hpp" +#include "runtime/prefetch.inline.hpp" +#include "runtime/safepointMechanism.hpp" +#include "runtime/stackWatermark.hpp" +#include "runtime/stackWatermarkSet.inline.hpp" +#include "runtime/threads.hpp" +#include "utilities/align.hpp" +#include "utilities/globalDefinitions.hpp" +#include "utilities/powerOfTwo.hpp" +#include "utilities/ticks.hpp" + +static const XStatSubPhase XSubPhaseConcurrentMark("Concurrent Mark"); +static const XStatSubPhase XSubPhaseConcurrentMarkTryFlush("Concurrent Mark Try Flush"); +static const XStatSubPhase XSubPhaseConcurrentMarkTryTerminate("Concurrent Mark Try Terminate"); +static const XStatSubPhase XSubPhaseMarkTryComplete("Pause Mark Try Complete"); + +XMark::XMark(XWorkers* workers, XPageTable* page_table) : + _workers(workers), + _page_table(page_table), + _allocator(), + _stripes(), + _terminate(), + _work_terminateflush(true), + _work_nproactiveflush(0), + _work_nterminateflush(0), + _nproactiveflush(0), + _nterminateflush(0), + _ntrycomplete(0), + _ncontinue(0), + _nworkers(0) {} + +bool XMark::is_initialized() const { + return _allocator.is_initialized(); +} + +size_t XMark::calculate_nstripes(uint nworkers) const { + // Calculate the number of stripes from the number of workers we use, + // where the number of stripes must be a power of two and we want to + // have at least one worker per stripe. + const size_t nstripes = round_down_power_of_2(nworkers); + return MIN2(nstripes, XMarkStripesMax); +} + +void XMark::start() { + // Verification + if (ZVerifyMarking) { + verify_all_stacks_empty(); + } + + // Increment global sequence number to invalidate + // marking information for all pages. + XGlobalSeqNum++; + + // Note that we start a marking cycle. + // Unlike other GCs, the color switch implicitly changes the nmethods + // to be armed, and the thread-local disarm values are lazily updated + // when JavaThreads wake up from safepoints. + CodeCache::on_gc_marking_cycle_start(); + + // Reset flush/continue counters + _nproactiveflush = 0; + _nterminateflush = 0; + _ntrycomplete = 0; + _ncontinue = 0; + + // Set number of workers to use + _nworkers = _workers->active_workers(); + + // Set number of mark stripes to use, based on number + // of workers we will use in the concurrent mark phase. + const size_t nstripes = calculate_nstripes(_nworkers); + _stripes.set_nstripes(nstripes); + + // Update statistics + XStatMark::set_at_mark_start(nstripes); + + // Print worker/stripe distribution + LogTarget(Debug, gc, marking) log; + if (log.is_enabled()) { + log.print("Mark Worker/Stripe Distribution"); + for (uint worker_id = 0; worker_id < _nworkers; worker_id++) { + const XMarkStripe* const stripe = _stripes.stripe_for_worker(_nworkers, worker_id); + const size_t stripe_id = _stripes.stripe_id(stripe); + log.print(" Worker %u(%u) -> Stripe " SIZE_FORMAT "(" SIZE_FORMAT ")", + worker_id, _nworkers, stripe_id, nstripes); + } + } +} + +void XMark::prepare_work() { + assert(_nworkers == _workers->active_workers(), "Invalid number of workers"); + + // Set number of active workers + _terminate.reset(_nworkers); + + // Reset flush counters + _work_nproactiveflush = _work_nterminateflush = 0; + _work_terminateflush = true; +} + +void XMark::finish_work() { + // Accumulate proactive/terminate flush counters + _nproactiveflush += _work_nproactiveflush; + _nterminateflush += _work_nterminateflush; +} + +bool XMark::is_array(uintptr_t addr) const { + return XOop::from_address(addr)->is_objArray(); +} + +void XMark::push_partial_array(uintptr_t addr, size_t size, bool finalizable) { + assert(is_aligned(addr, XMarkPartialArrayMinSize), "Address misaligned"); + XMarkThreadLocalStacks* const stacks = XThreadLocalData::stacks(Thread::current()); + XMarkStripe* const stripe = _stripes.stripe_for_addr(addr); + const uintptr_t offset = XAddress::offset(addr) >> XMarkPartialArrayMinSizeShift; + const uintptr_t length = size / oopSize; + const XMarkStackEntry entry(offset, length, finalizable); + + log_develop_trace(gc, marking)("Array push partial: " PTR_FORMAT " (" SIZE_FORMAT "), stripe: " SIZE_FORMAT, + addr, size, _stripes.stripe_id(stripe)); + + stacks->push(&_allocator, &_stripes, stripe, entry, false /* publish */); +} + +void XMark::follow_small_array(uintptr_t addr, size_t size, bool finalizable) { + assert(size <= XMarkPartialArrayMinSize, "Too large, should be split"); + const size_t length = size / oopSize; + + log_develop_trace(gc, marking)("Array follow small: " PTR_FORMAT " (" SIZE_FORMAT ")", addr, size); + + XBarrier::mark_barrier_on_oop_array((oop*)addr, length, finalizable); +} + +void XMark::follow_large_array(uintptr_t addr, size_t size, bool finalizable) { + assert(size <= (size_t)arrayOopDesc::max_array_length(T_OBJECT) * oopSize, "Too large"); + assert(size > XMarkPartialArrayMinSize, "Too small, should not be split"); + const uintptr_t start = addr; + const uintptr_t end = start + size; + + // Calculate the aligned middle start/end/size, where the middle start + // should always be greater than the start (hence the +1 below) to make + // sure we always do some follow work, not just split the array into pieces. + const uintptr_t middle_start = align_up(start + 1, XMarkPartialArrayMinSize); + const size_t middle_size = align_down(end - middle_start, XMarkPartialArrayMinSize); + const uintptr_t middle_end = middle_start + middle_size; + + log_develop_trace(gc, marking)("Array follow large: " PTR_FORMAT "-" PTR_FORMAT" (" SIZE_FORMAT "), " + "middle: " PTR_FORMAT "-" PTR_FORMAT " (" SIZE_FORMAT ")", + start, end, size, middle_start, middle_end, middle_size); + + // Push unaligned trailing part + if (end > middle_end) { + const uintptr_t trailing_addr = middle_end; + const size_t trailing_size = end - middle_end; + push_partial_array(trailing_addr, trailing_size, finalizable); + } + + // Push aligned middle part(s) + uintptr_t partial_addr = middle_end; + while (partial_addr > middle_start) { + const size_t parts = 2; + const size_t partial_size = align_up((partial_addr - middle_start) / parts, XMarkPartialArrayMinSize); + partial_addr -= partial_size; + push_partial_array(partial_addr, partial_size, finalizable); + } + + // Follow leading part + assert(start < middle_start, "Miscalculated middle start"); + const uintptr_t leading_addr = start; + const size_t leading_size = middle_start - start; + follow_small_array(leading_addr, leading_size, finalizable); +} + +void XMark::follow_array(uintptr_t addr, size_t size, bool finalizable) { + if (size <= XMarkPartialArrayMinSize) { + follow_small_array(addr, size, finalizable); + } else { + follow_large_array(addr, size, finalizable); + } +} + +void XMark::follow_partial_array(XMarkStackEntry entry, bool finalizable) { + const uintptr_t addr = XAddress::good(entry.partial_array_offset() << XMarkPartialArrayMinSizeShift); + const size_t size = entry.partial_array_length() * oopSize; + + follow_array(addr, size, finalizable); +} + +template +class XMarkBarrierOopClosure : public ClaimMetadataVisitingOopIterateClosure { +public: + XMarkBarrierOopClosure() : + ClaimMetadataVisitingOopIterateClosure(finalizable + ? ClassLoaderData::_claim_finalizable + : ClassLoaderData::_claim_strong, + finalizable + ? NULL + : XHeap::heap()->reference_discoverer()) {} + + virtual void do_oop(oop* p) { + XBarrier::mark_barrier_on_oop_field(p, finalizable); + } + + virtual void do_oop(narrowOop* p) { + ShouldNotReachHere(); + } + + virtual void do_nmethod(nmethod* nm) { + assert(!finalizable, "Can't handle finalizable marking of nmethods"); + nm->run_nmethod_entry_barrier(); + } +}; + +void XMark::follow_array_object(objArrayOop obj, bool finalizable) { + if (finalizable) { + XMarkBarrierOopClosure cl; + cl.do_klass(obj->klass()); + } else { + XMarkBarrierOopClosure cl; + cl.do_klass(obj->klass()); + } + + const uintptr_t addr = (uintptr_t)obj->base(); + const size_t size = (size_t)obj->length() * oopSize; + + follow_array(addr, size, finalizable); +} + +void XMark::follow_object(oop obj, bool finalizable) { + if (ContinuationGCSupport::relativize_stack_chunk(obj)) { + // Loom doesn't support mixing of finalizable marking and strong marking of + // stack chunks. See: RelativizeDerivedOopClosure. + XMarkBarrierOopClosure cl; + obj->oop_iterate(&cl); + return; + } + + if (finalizable) { + XMarkBarrierOopClosure cl; + obj->oop_iterate(&cl); + } else { + XMarkBarrierOopClosure cl; + obj->oop_iterate(&cl); + } +} + +static void try_deduplicate(XMarkContext* context, oop obj) { + if (!StringDedup::is_enabled()) { + // Not enabled + return; + } + + if (!java_lang_String::is_instance(obj)) { + // Not a String object + return; + } + + if (java_lang_String::test_and_set_deduplication_requested(obj)) { + // Already requested deduplication + return; + } + + // Request deduplication + context->string_dedup_requests()->add(obj); +} + +void XMark::mark_and_follow(XMarkContext* context, XMarkStackEntry entry) { + // Decode flags + const bool finalizable = entry.finalizable(); + const bool partial_array = entry.partial_array(); + + if (partial_array) { + follow_partial_array(entry, finalizable); + return; + } + + // Decode object address and additional flags + const uintptr_t addr = entry.object_address(); + const bool mark = entry.mark(); + bool inc_live = entry.inc_live(); + const bool follow = entry.follow(); + + XPage* const page = _page_table->get(addr); + assert(page->is_relocatable(), "Invalid page state"); + + // Mark + if (mark && !page->mark_object(addr, finalizable, inc_live)) { + // Already marked + return; + } + + // Increment live + if (inc_live) { + // Update live objects/bytes for page. We use the aligned object + // size since that is the actual number of bytes used on the page + // and alignment paddings can never be reclaimed. + const size_t size = XUtils::object_size(addr); + const size_t aligned_size = align_up(size, page->object_alignment()); + context->cache()->inc_live(page, aligned_size); + } + + // Follow + if (follow) { + if (is_array(addr)) { + follow_array_object(objArrayOop(XOop::from_address(addr)), finalizable); + } else { + const oop obj = XOop::from_address(addr); + follow_object(obj, finalizable); + + // Try deduplicate + try_deduplicate(context, obj); + } + } +} + +template +bool XMark::drain(XMarkContext* context, T* timeout) { + XMarkStripe* const stripe = context->stripe(); + XMarkThreadLocalStacks* const stacks = context->stacks(); + XMarkStackEntry entry; + + // Drain stripe stacks + while (stacks->pop(&_allocator, &_stripes, stripe, entry)) { + mark_and_follow(context, entry); + + // Check timeout + if (timeout->has_expired()) { + // Timeout + return false; + } + } + + // Success + return !timeout->has_expired(); +} + +bool XMark::try_steal_local(XMarkContext* context) { + XMarkStripe* const stripe = context->stripe(); + XMarkThreadLocalStacks* const stacks = context->stacks(); + + // Try to steal a local stack from another stripe + for (XMarkStripe* victim_stripe = _stripes.stripe_next(stripe); + victim_stripe != stripe; + victim_stripe = _stripes.stripe_next(victim_stripe)) { + XMarkStack* const stack = stacks->steal(&_stripes, victim_stripe); + if (stack != NULL) { + // Success, install the stolen stack + stacks->install(&_stripes, stripe, stack); + return true; + } + } + + // Nothing to steal + return false; +} + +bool XMark::try_steal_global(XMarkContext* context) { + XMarkStripe* const stripe = context->stripe(); + XMarkThreadLocalStacks* const stacks = context->stacks(); + + // Try to steal a stack from another stripe + for (XMarkStripe* victim_stripe = _stripes.stripe_next(stripe); + victim_stripe != stripe; + victim_stripe = _stripes.stripe_next(victim_stripe)) { + XMarkStack* const stack = victim_stripe->steal_stack(); + if (stack != NULL) { + // Success, install the stolen stack + stacks->install(&_stripes, stripe, stack); + return true; + } + } + + // Nothing to steal + return false; +} + +bool XMark::try_steal(XMarkContext* context) { + return try_steal_local(context) || try_steal_global(context); +} + +void XMark::idle() const { + os::naked_short_sleep(1); +} + +class XMarkFlushAndFreeStacksClosure : public HandshakeClosure { +private: + XMark* const _mark; + bool _flushed; + +public: + XMarkFlushAndFreeStacksClosure(XMark* mark) : + HandshakeClosure("XMarkFlushAndFreeStacks"), + _mark(mark), + _flushed(false) {} + + void do_thread(Thread* thread) { + if (_mark->flush_and_free(thread)) { + _flushed = true; + } + } + + bool flushed() const { + return _flushed; + } +}; + +bool XMark::flush(bool at_safepoint) { + XMarkFlushAndFreeStacksClosure cl(this); + if (at_safepoint) { + Threads::threads_do(&cl); + } else { + Handshake::execute(&cl); + } + + // Returns true if more work is available + return cl.flushed() || !_stripes.is_empty(); +} + +bool XMark::try_flush(volatile size_t* nflush) { + Atomic::inc(nflush); + + XStatTimer timer(XSubPhaseConcurrentMarkTryFlush); + return flush(false /* at_safepoint */); +} + +bool XMark::try_proactive_flush() { + // Only do proactive flushes from worker 0 + if (XThread::worker_id() != 0) { + return false; + } + + if (Atomic::load(&_work_nproactiveflush) == XMarkProactiveFlushMax || + Atomic::load(&_work_nterminateflush) != 0) { + // Limit reached or we're trying to terminate + return false; + } + + return try_flush(&_work_nproactiveflush); +} + +bool XMark::try_terminate() { + XStatTimer timer(XSubPhaseConcurrentMarkTryTerminate); + + if (_terminate.enter_stage0()) { + // Last thread entered stage 0, flush + if (Atomic::load(&_work_terminateflush) && + Atomic::load(&_work_nterminateflush) != XMarkTerminateFlushMax) { + // Exit stage 0 to allow other threads to continue marking + _terminate.exit_stage0(); + + // Flush before termination + if (!try_flush(&_work_nterminateflush)) { + // No more work available, skip further flush attempts + Atomic::store(&_work_terminateflush, false); + } + + // Don't terminate, regardless of whether we successfully + // flushed out more work or not. We've already exited + // termination stage 0, to allow other threads to continue + // marking, so this thread has to return false and also + // make another round of attempted marking. + return false; + } + } + + for (;;) { + if (_terminate.enter_stage1()) { + // Last thread entered stage 1, terminate + return true; + } + + // Idle to give the other threads + // a chance to enter termination. + idle(); + + if (!_terminate.try_exit_stage1()) { + // All workers in stage 1, terminate + return true; + } + + if (_terminate.try_exit_stage0()) { + // More work available, don't terminate + return false; + } + } +} + +class XMarkNoTimeout : public StackObj { +public: + bool has_expired() { + // No timeout, but check for signal to abort + return XAbort::should_abort(); + } +}; + +void XMark::work_without_timeout(XMarkContext* context) { + XStatTimer timer(XSubPhaseConcurrentMark); + XMarkNoTimeout no_timeout; + + for (;;) { + if (!drain(context, &no_timeout)) { + // Abort + break; + } + + if (try_steal(context)) { + // Stole work + continue; + } + + if (try_proactive_flush()) { + // Work available + continue; + } + + if (try_terminate()) { + // Terminate + break; + } + } +} + +class XMarkTimeout : public StackObj { +private: + const Ticks _start; + const uint64_t _timeout; + const uint64_t _check_interval; + uint64_t _check_at; + uint64_t _check_count; + bool _expired; + +public: + XMarkTimeout(uint64_t timeout_in_micros) : + _start(Ticks::now()), + _timeout(_start.value() + TimeHelper::micros_to_counter(timeout_in_micros)), + _check_interval(200), + _check_at(_check_interval), + _check_count(0), + _expired(false) {} + + ~XMarkTimeout() { + const Tickspan duration = Ticks::now() - _start; + log_debug(gc, marking)("Mark With Timeout (%s): %s, " UINT64_FORMAT " oops, %.3fms", + XThread::name(), _expired ? "Expired" : "Completed", + _check_count, TimeHelper::counter_to_millis(duration.value())); + } + + bool has_expired() { + if (++_check_count == _check_at) { + _check_at += _check_interval; + if ((uint64_t)Ticks::now().value() >= _timeout) { + // Timeout + _expired = true; + } + } + + return _expired; + } +}; + +void XMark::work_with_timeout(XMarkContext* context, uint64_t timeout_in_micros) { + XStatTimer timer(XSubPhaseMarkTryComplete); + XMarkTimeout timeout(timeout_in_micros); + + for (;;) { + if (!drain(context, &timeout)) { + // Timed out + break; + } + + if (try_steal(context)) { + // Stole work + continue; + } + + // Terminate + break; + } +} + +void XMark::work(uint64_t timeout_in_micros) { + XMarkStripe* const stripe = _stripes.stripe_for_worker(_nworkers, XThread::worker_id()); + XMarkThreadLocalStacks* const stacks = XThreadLocalData::stacks(Thread::current()); + XMarkContext context(_stripes.nstripes(), stripe, stacks); + + if (timeout_in_micros == 0) { + work_without_timeout(&context); + } else { + work_with_timeout(&context, timeout_in_micros); + } + + // Flush and publish stacks + stacks->flush(&_allocator, &_stripes); + + // Free remaining stacks + stacks->free(&_allocator); +} + +class XMarkOopClosure : public OopClosure { + virtual void do_oop(oop* p) { + XBarrier::mark_barrier_on_oop_field(p, false /* finalizable */); + } + + virtual void do_oop(narrowOop* p) { + ShouldNotReachHere(); + } +}; + +class XMarkThreadClosure : public ThreadClosure { +private: + OopClosure* const _cl; + +public: + XMarkThreadClosure(OopClosure* cl) : + _cl(cl) { + XThreadLocalAllocBuffer::reset_statistics(); + } + ~XMarkThreadClosure() { + XThreadLocalAllocBuffer::publish_statistics(); + } + virtual void do_thread(Thread* thread) { + JavaThread* const jt = JavaThread::cast(thread); + StackWatermarkSet::finish_processing(jt, _cl, StackWatermarkKind::gc); + XThreadLocalAllocBuffer::update_stats(jt); + } +}; + +class XMarkNMethodClosure : public NMethodClosure { +private: + OopClosure* const _cl; + +public: + XMarkNMethodClosure(OopClosure* cl) : + _cl(cl) {} + + virtual void do_nmethod(nmethod* nm) { + XLocker locker(XNMethod::lock_for_nmethod(nm)); + if (XNMethod::is_armed(nm)) { + XNMethod::nmethod_oops_do_inner(nm, _cl); + + // CodeCache unloading support + nm->mark_as_maybe_on_stack(); + + XNMethod::disarm(nm); + } + } +}; + +typedef ClaimingCLDToOopClosure XMarkCLDClosure; + +class XMarkRootsTask : public XTask { +private: + XMark* const _mark; + SuspendibleThreadSetJoiner _sts_joiner; + XRootsIterator _roots; + + XMarkOopClosure _cl; + XMarkCLDClosure _cld_cl; + XMarkThreadClosure _thread_cl; + XMarkNMethodClosure _nm_cl; + +public: + XMarkRootsTask(XMark* mark) : + XTask("XMarkRootsTask"), + _mark(mark), + _sts_joiner(), + _roots(ClassLoaderData::_claim_strong), + _cl(), + _cld_cl(&_cl), + _thread_cl(&_cl), + _nm_cl(&_cl) { + ClassLoaderDataGraph_lock->lock(); + } + + ~XMarkRootsTask() { + ClassLoaderDataGraph_lock->unlock(); + } + + virtual void work() { + _roots.apply(&_cl, + &_cld_cl, + &_thread_cl, + &_nm_cl); + + // Flush and free worker stacks. Needed here since + // the set of workers executing during root scanning + // can be different from the set of workers executing + // during mark. + _mark->flush_and_free(); + } +}; + +class XMarkTask : public XTask { +private: + XMark* const _mark; + const uint64_t _timeout_in_micros; + +public: + XMarkTask(XMark* mark, uint64_t timeout_in_micros = 0) : + XTask("XMarkTask"), + _mark(mark), + _timeout_in_micros(timeout_in_micros) { + _mark->prepare_work(); + } + + ~XMarkTask() { + _mark->finish_work(); + } + + virtual void work() { + _mark->work(_timeout_in_micros); + } +}; + +void XMark::mark(bool initial) { + if (initial) { + XMarkRootsTask task(this); + _workers->run(&task); + } + + XMarkTask task(this); + _workers->run(&task); +} + +bool XMark::try_complete() { + _ntrycomplete++; + + // Use nconcurrent number of worker threads to maintain the + // worker/stripe distribution used during concurrent mark. + XMarkTask task(this, XMarkCompleteTimeout); + _workers->run(&task); + + // Successful if all stripes are empty + return _stripes.is_empty(); +} + +bool XMark::try_end() { + // Flush all mark stacks + if (!flush(true /* at_safepoint */)) { + // Mark completed + return true; + } + + // Try complete marking by doing a limited + // amount of mark work in this phase. + return try_complete(); +} + +bool XMark::end() { + // Try end marking + if (!try_end()) { + // Mark not completed + _ncontinue++; + return false; + } + + // Verification + if (ZVerifyMarking) { + verify_all_stacks_empty(); + } + + // Update statistics + XStatMark::set_at_mark_end(_nproactiveflush, _nterminateflush, _ntrycomplete, _ncontinue); + + // Note that we finished a marking cycle. + // Unlike other GCs, we do not arm the nmethods + // when marking terminates. + CodeCache::on_gc_marking_cycle_finish(); + + // Mark completed + return true; +} + +void XMark::free() { + // Free any unused mark stack space + _allocator.free(); + + // Update statistics + XStatMark::set_at_mark_free(_allocator.size()); +} + +void XMark::flush_and_free() { + Thread* const thread = Thread::current(); + flush_and_free(thread); +} + +bool XMark::flush_and_free(Thread* thread) { + XMarkThreadLocalStacks* const stacks = XThreadLocalData::stacks(thread); + const bool flushed = stacks->flush(&_allocator, &_stripes); + stacks->free(&_allocator); + return flushed; +} + +class XVerifyMarkStacksEmptyClosure : public ThreadClosure { +private: + const XMarkStripeSet* const _stripes; + +public: + XVerifyMarkStacksEmptyClosure(const XMarkStripeSet* stripes) : + _stripes(stripes) {} + + void do_thread(Thread* thread) { + XMarkThreadLocalStacks* const stacks = XThreadLocalData::stacks(thread); + guarantee(stacks->is_empty(_stripes), "Should be empty"); + } +}; + +void XMark::verify_all_stacks_empty() const { + // Verify thread stacks + XVerifyMarkStacksEmptyClosure cl(&_stripes); + Threads::threads_do(&cl); + + // Verify stripe stacks + guarantee(_stripes.is_empty(), "Should be empty"); +} diff --git a/src/hotspot/share/gc/x/xMark.hpp b/src/hotspot/share/gc/x/xMark.hpp new file mode 100644 index 0000000000000..5e40b79f02e3e --- /dev/null +++ b/src/hotspot/share/gc/x/xMark.hpp @@ -0,0 +1,106 @@ +/* + * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XMARK_HPP +#define SHARE_GC_X_XMARK_HPP + +#include "gc/x/xMarkStack.hpp" +#include "gc/x/xMarkStackAllocator.hpp" +#include "gc/x/xMarkStackEntry.hpp" +#include "gc/x/xMarkTerminate.hpp" +#include "oops/oopsHierarchy.hpp" +#include "utilities/globalDefinitions.hpp" + +class Thread; +class XMarkContext; +class XPageTable; +class XWorkers; + +class XMark { + friend class XMarkTask; + +private: + XWorkers* const _workers; + XPageTable* const _page_table; + XMarkStackAllocator _allocator; + XMarkStripeSet _stripes; + XMarkTerminate _terminate; + volatile bool _work_terminateflush; + volatile size_t _work_nproactiveflush; + volatile size_t _work_nterminateflush; + size_t _nproactiveflush; + size_t _nterminateflush; + size_t _ntrycomplete; + size_t _ncontinue; + uint _nworkers; + + size_t calculate_nstripes(uint nworkers) const; + + bool is_array(uintptr_t addr) const; + void push_partial_array(uintptr_t addr, size_t size, bool finalizable); + void follow_small_array(uintptr_t addr, size_t size, bool finalizable); + void follow_large_array(uintptr_t addr, size_t size, bool finalizable); + void follow_array(uintptr_t addr, size_t size, bool finalizable); + void follow_partial_array(XMarkStackEntry entry, bool finalizable); + void follow_array_object(objArrayOop obj, bool finalizable); + void follow_object(oop obj, bool finalizable); + void mark_and_follow(XMarkContext* context, XMarkStackEntry entry); + + template bool drain(XMarkContext* context, T* timeout); + bool try_steal_local(XMarkContext* context); + bool try_steal_global(XMarkContext* context); + bool try_steal(XMarkContext* context); + void idle() const; + bool flush(bool at_safepoint); + bool try_proactive_flush(); + bool try_flush(volatile size_t* nflush); + bool try_terminate(); + bool try_complete(); + bool try_end(); + + void prepare_work(); + void finish_work(); + + void work_without_timeout(XMarkContext* context); + void work_with_timeout(XMarkContext* context, uint64_t timeout_in_micros); + void work(uint64_t timeout_in_micros); + + void verify_all_stacks_empty() const; + +public: + XMark(XWorkers* workers, XPageTable* page_table); + + bool is_initialized() const; + + template void mark_object(uintptr_t addr); + + void start(); + void mark(bool initial); + bool end(); + void free(); + + void flush_and_free(); + bool flush_and_free(Thread* thread); +}; + +#endif // SHARE_GC_X_XMARK_HPP diff --git a/src/hotspot/share/gc/x/xMark.inline.hpp b/src/hotspot/share/gc/x/xMark.inline.hpp new file mode 100644 index 0000000000000..1f8fc81f525c3 --- /dev/null +++ b/src/hotspot/share/gc/x/xMark.inline.hpp @@ -0,0 +1,80 @@ +/* + * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XMARK_INLINE_HPP +#define SHARE_GC_X_XMARK_INLINE_HPP + +#include "gc/x/xMark.hpp" + +#include "gc/x/xAddress.inline.hpp" +#include "gc/x/xMarkStack.inline.hpp" +#include "gc/x/xPage.inline.hpp" +#include "gc/x/xPageTable.inline.hpp" +#include "gc/x/xThreadLocalData.hpp" +#include "runtime/javaThread.hpp" +#include "utilities/debug.hpp" + +// Marking before pushing helps reduce mark stack memory usage. However, +// we only mark before pushing in GC threads to avoid burdening Java threads +// with writing to, and potentially first having to clear, mark bitmaps. +// +// It's also worth noting that while marking an object can be done at any +// time in the marking phase, following an object can only be done after +// root processing has called ClassLoaderDataGraph::clear_claimed_marks(), +// since it otherwise would interact badly with claiming of CLDs. + +template +inline void XMark::mark_object(uintptr_t addr) { + assert(XAddress::is_marked(addr), "Should be marked"); + + XPage* const page = _page_table->get(addr); + if (page->is_allocating()) { + // Already implicitly marked + return; + } + + const bool mark_before_push = gc_thread; + bool inc_live = false; + + if (mark_before_push) { + // Try mark object + if (!page->mark_object(addr, finalizable, inc_live)) { + // Already marked + return; + } + } else { + // Don't push if already marked + if (page->is_object_marked(addr)) { + // Already marked + return; + } + } + + // Push + XMarkThreadLocalStacks* const stacks = XThreadLocalData::stacks(Thread::current()); + XMarkStripe* const stripe = _stripes.stripe_for_addr(addr); + XMarkStackEntry entry(addr, !mark_before_push, inc_live, follow, finalizable); + stacks->push(&_allocator, &_stripes, stripe, entry, publish); +} + +#endif // SHARE_GC_X_XMARK_INLINE_HPP diff --git a/src/hotspot/share/gc/x/xMarkCache.cpp b/src/hotspot/share/gc/x/xMarkCache.cpp new file mode 100644 index 0000000000000..bb70683221ced --- /dev/null +++ b/src/hotspot/share/gc/x/xMarkCache.cpp @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/x/xMarkCache.inline.hpp" +#include "utilities/globalDefinitions.hpp" +#include "utilities/powerOfTwo.hpp" + +XMarkCacheEntry::XMarkCacheEntry() : + _page(NULL), + _objects(0), + _bytes(0) {} + +XMarkCache::XMarkCache(size_t nstripes) : + _shift(XMarkStripeShift + exact_log2(nstripes)) {} + +XMarkCache::~XMarkCache() { + // Evict all entries + for (size_t i = 0; i < XMarkCacheSize; i++) { + _cache[i].evict(); + } +} + diff --git a/src/hotspot/share/gc/x/xMarkCache.hpp b/src/hotspot/share/gc/x/xMarkCache.hpp new file mode 100644 index 0000000000000..8fbdc87352237 --- /dev/null +++ b/src/hotspot/share/gc/x/xMarkCache.hpp @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XMARKCACHE_HPP +#define SHARE_GC_X_XMARKCACHE_HPP + +#include "gc/x/xGlobals.hpp" +#include "memory/allocation.hpp" + +class XPage; + +class XMarkCacheEntry { +private: + XPage* _page; + uint32_t _objects; + size_t _bytes; + +public: + XMarkCacheEntry(); + + void inc_live(XPage* page, size_t bytes); + void evict(); +}; + +class XMarkCache : public StackObj { +private: + const size_t _shift; + XMarkCacheEntry _cache[XMarkCacheSize]; + +public: + XMarkCache(size_t nstripes); + ~XMarkCache(); + + void inc_live(XPage* page, size_t bytes); +}; + +#endif // SHARE_GC_X_XMARKCACHE_HPP diff --git a/src/hotspot/share/gc/x/xMarkCache.inline.hpp b/src/hotspot/share/gc/x/xMarkCache.inline.hpp new file mode 100644 index 0000000000000..8eaf04a68fed4 --- /dev/null +++ b/src/hotspot/share/gc/x/xMarkCache.inline.hpp @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XMARKCACHE_INLINE_HPP +#define SHARE_GC_X_XMARKCACHE_INLINE_HPP + +#include "gc/x/xMarkCache.hpp" + +#include "gc/x/xPage.inline.hpp" + +inline void XMarkCacheEntry::inc_live(XPage* page, size_t bytes) { + if (_page == page) { + // Cache hit + _objects++; + _bytes += bytes; + } else { + // Cache miss + evict(); + _page = page; + _objects = 1; + _bytes = bytes; + } +} + +inline void XMarkCacheEntry::evict() { + if (_page != NULL) { + // Write cached data out to page + _page->inc_live(_objects, _bytes); + _page = NULL; + } +} + +inline void XMarkCache::inc_live(XPage* page, size_t bytes) { + const size_t mask = XMarkCacheSize - 1; + const size_t index = (page->start() >> _shift) & mask; + _cache[index].inc_live(page, bytes); +} + +#endif // SHARE_GC_X_XMARKCACHE_INLINE_HPP diff --git a/src/hotspot/share/gc/x/xMarkContext.hpp b/src/hotspot/share/gc/x/xMarkContext.hpp new file mode 100644 index 0000000000000..246822931b7b1 --- /dev/null +++ b/src/hotspot/share/gc/x/xMarkContext.hpp @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XMARKCONTEXT_HPP +#define SHARE_GC_X_XMARKCONTEXT_HPP + +#include "gc/x/xMarkCache.hpp" +#include "gc/shared/stringdedup/stringDedup.hpp" +#include "memory/allocation.hpp" + +class XMarkStripe; +class XMarkThreadLocalStacks; + +class XMarkContext : public StackObj { +private: + XMarkCache _cache; + XMarkStripe* const _stripe; + XMarkThreadLocalStacks* const _stacks; + StringDedup::Requests _string_dedup_requests; + +public: + XMarkContext(size_t nstripes, + XMarkStripe* stripe, + XMarkThreadLocalStacks* stacks); + + XMarkCache* cache(); + XMarkStripe* stripe(); + XMarkThreadLocalStacks* stacks(); + StringDedup::Requests* string_dedup_requests(); +}; + +#endif // SHARE_GC_X_XMARKCONTEXT_HPP diff --git a/src/hotspot/share/gc/x/xMarkContext.inline.hpp b/src/hotspot/share/gc/x/xMarkContext.inline.hpp new file mode 100644 index 0000000000000..74a182b67c33a --- /dev/null +++ b/src/hotspot/share/gc/x/xMarkContext.inline.hpp @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XMARKCONTEXT_INLINE_HPP +#define SHARE_GC_X_XMARKCONTEXT_INLINE_HPP + +#include "gc/x/xMarkContext.hpp" + +inline XMarkContext::XMarkContext(size_t nstripes, + XMarkStripe* stripe, + XMarkThreadLocalStacks* stacks) : + _cache(nstripes), + _stripe(stripe), + _stacks(stacks), + _string_dedup_requests() {} + +inline XMarkCache* XMarkContext::cache() { + return &_cache; +} + +inline XMarkStripe* XMarkContext::stripe() { + return _stripe; +} + +inline XMarkThreadLocalStacks* XMarkContext::stacks() { + return _stacks; +} + +inline StringDedup::Requests* XMarkContext::string_dedup_requests() { + return &_string_dedup_requests; +} + +#endif // SHARE_GC_X_XMARKCACHE_INLINE_HPP diff --git a/src/hotspot/share/gc/x/xMarkStack.cpp b/src/hotspot/share/gc/x/xMarkStack.cpp new file mode 100644 index 0000000000000..384dc200a9569 --- /dev/null +++ b/src/hotspot/share/gc/x/xMarkStack.cpp @@ -0,0 +1,226 @@ +/* + * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/x/xMarkStack.inline.hpp" +#include "gc/x/xMarkStackAllocator.hpp" +#include "logging/log.hpp" +#include "utilities/debug.hpp" +#include "utilities/powerOfTwo.hpp" + +XMarkStripe::XMarkStripe() : + _published(), + _overflowed() {} + +XMarkStripeSet::XMarkStripeSet() : + _nstripes(0), + _nstripes_mask(0), + _stripes() {} + +void XMarkStripeSet::set_nstripes(size_t nstripes) { + assert(is_power_of_2(nstripes), "Must be a power of two"); + assert(is_power_of_2(XMarkStripesMax), "Must be a power of two"); + assert(nstripes >= 1, "Invalid number of stripes"); + assert(nstripes <= XMarkStripesMax, "Invalid number of stripes"); + + _nstripes = nstripes; + _nstripes_mask = nstripes - 1; + + log_debug(gc, marking)("Using " SIZE_FORMAT " mark stripes", _nstripes); +} + +bool XMarkStripeSet::is_empty() const { + for (size_t i = 0; i < _nstripes; i++) { + if (!_stripes[i].is_empty()) { + return false; + } + } + + return true; +} + +XMarkStripe* XMarkStripeSet::stripe_for_worker(uint nworkers, uint worker_id) { + const size_t spillover_limit = (nworkers / _nstripes) * _nstripes; + size_t index; + + if (worker_id < spillover_limit) { + // Not a spillover worker, use natural stripe + index = worker_id & _nstripes_mask; + } else { + // Distribute spillover workers evenly across stripes + const size_t spillover_nworkers = nworkers - spillover_limit; + const size_t spillover_worker_id = worker_id - spillover_limit; + const double spillover_chunk = (double)_nstripes / (double)spillover_nworkers; + index = spillover_worker_id * spillover_chunk; + } + + assert(index < _nstripes, "Invalid index"); + return &_stripes[index]; +} + +XMarkThreadLocalStacks::XMarkThreadLocalStacks() : + _magazine(NULL) { + for (size_t i = 0; i < XMarkStripesMax; i++) { + _stacks[i] = NULL; + } +} + +bool XMarkThreadLocalStacks::is_empty(const XMarkStripeSet* stripes) const { + for (size_t i = 0; i < stripes->nstripes(); i++) { + XMarkStack* const stack = _stacks[i]; + if (stack != NULL) { + return false; + } + } + + return true; +} + +XMarkStack* XMarkThreadLocalStacks::allocate_stack(XMarkStackAllocator* allocator) { + if (_magazine == NULL) { + // Allocate new magazine + _magazine = allocator->alloc_magazine(); + if (_magazine == NULL) { + return NULL; + } + } + + XMarkStack* stack = NULL; + + if (!_magazine->pop(stack)) { + // Magazine is empty, convert magazine into a new stack + _magazine->~XMarkStackMagazine(); + stack = new ((void*)_magazine) XMarkStack(); + _magazine = NULL; + } + + return stack; +} + +void XMarkThreadLocalStacks::free_stack(XMarkStackAllocator* allocator, XMarkStack* stack) { + for (;;) { + if (_magazine == NULL) { + // Convert stack into a new magazine + stack->~XMarkStack(); + _magazine = new ((void*)stack) XMarkStackMagazine(); + return; + } + + if (_magazine->push(stack)) { + // Success + return; + } + + // Free and uninstall full magazine + allocator->free_magazine(_magazine); + _magazine = NULL; + } +} + +bool XMarkThreadLocalStacks::push_slow(XMarkStackAllocator* allocator, + XMarkStripe* stripe, + XMarkStack** stackp, + XMarkStackEntry entry, + bool publish) { + XMarkStack* stack = *stackp; + + for (;;) { + if (stack == NULL) { + // Allocate and install new stack + *stackp = stack = allocate_stack(allocator); + if (stack == NULL) { + // Out of mark stack memory + return false; + } + } + + if (stack->push(entry)) { + // Success + return true; + } + + // Publish/Overflow and uninstall stack + stripe->publish_stack(stack, publish); + *stackp = stack = NULL; + } +} + +bool XMarkThreadLocalStacks::pop_slow(XMarkStackAllocator* allocator, + XMarkStripe* stripe, + XMarkStack** stackp, + XMarkStackEntry& entry) { + XMarkStack* stack = *stackp; + + for (;;) { + if (stack == NULL) { + // Try steal and install stack + *stackp = stack = stripe->steal_stack(); + if (stack == NULL) { + // Nothing to steal + return false; + } + } + + if (stack->pop(entry)) { + // Success + return true; + } + + // Free and uninstall stack + free_stack(allocator, stack); + *stackp = stack = NULL; + } +} + +bool XMarkThreadLocalStacks::flush(XMarkStackAllocator* allocator, XMarkStripeSet* stripes) { + bool flushed = false; + + // Flush all stacks + for (size_t i = 0; i < stripes->nstripes(); i++) { + XMarkStripe* const stripe = stripes->stripe_at(i); + XMarkStack** const stackp = &_stacks[i]; + XMarkStack* const stack = *stackp; + if (stack == NULL) { + continue; + } + + // Free/Publish and uninstall stack + if (stack->is_empty()) { + free_stack(allocator, stack); + } else { + stripe->publish_stack(stack); + flushed = true; + } + *stackp = NULL; + } + + return flushed; +} + +void XMarkThreadLocalStacks::free(XMarkStackAllocator* allocator) { + // Free and uninstall magazine + if (_magazine != NULL) { + allocator->free_magazine(_magazine); + _magazine = NULL; + } +} diff --git a/src/hotspot/share/gc/x/xMarkStack.hpp b/src/hotspot/share/gc/x/xMarkStack.hpp new file mode 100644 index 0000000000000..e012b89749d6a --- /dev/null +++ b/src/hotspot/share/gc/x/xMarkStack.hpp @@ -0,0 +1,164 @@ +/* + * Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XMARKSTACK_HPP +#define SHARE_GC_X_XMARKSTACK_HPP + +#include "gc/x/xGlobals.hpp" +#include "gc/x/xMarkStackEntry.hpp" +#include "utilities/globalDefinitions.hpp" + +template +class XStack { +private: + size_t _top; + XStack* _next; + T _slots[S]; + + bool is_full() const; + +public: + XStack(); + + bool is_empty() const; + + bool push(T value); + bool pop(T& value); + + XStack* next() const; + XStack** next_addr(); +}; + +template +class XStackList { +private: + T* volatile _head; + + T* encode_versioned_pointer(const T* stack, uint32_t version) const; + void decode_versioned_pointer(const T* vstack, T** stack, uint32_t* version) const; + +public: + XStackList(); + + bool is_empty() const; + + void push(T* stack); + T* pop(); + + void clear(); +}; + +using XMarkStack = XStack; +using XMarkStackList = XStackList; +using XMarkStackMagazine = XStack; +using XMarkStackMagazineList = XStackList; + +static_assert(sizeof(XMarkStack) == XMarkStackSize, "XMarkStack size mismatch"); +static_assert(sizeof(XMarkStackMagazine) <= XMarkStackSize, "XMarkStackMagazine size too large"); + +class XMarkStripe { +private: + XCACHE_ALIGNED XMarkStackList _published; + XCACHE_ALIGNED XMarkStackList _overflowed; + +public: + XMarkStripe(); + + bool is_empty() const; + + void publish_stack(XMarkStack* stack, bool publish = true); + XMarkStack* steal_stack(); +}; + +class XMarkStripeSet { +private: + size_t _nstripes; + size_t _nstripes_mask; + XMarkStripe _stripes[XMarkStripesMax]; + +public: + XMarkStripeSet(); + + size_t nstripes() const; + void set_nstripes(size_t nstripes); + + bool is_empty() const; + + size_t stripe_id(const XMarkStripe* stripe) const; + XMarkStripe* stripe_at(size_t index); + XMarkStripe* stripe_next(XMarkStripe* stripe); + XMarkStripe* stripe_for_worker(uint nworkers, uint worker_id); + XMarkStripe* stripe_for_addr(uintptr_t addr); +}; + +class XMarkStackAllocator; + +class XMarkThreadLocalStacks { +private: + XMarkStackMagazine* _magazine; + XMarkStack* _stacks[XMarkStripesMax]; + + XMarkStack* allocate_stack(XMarkStackAllocator* allocator); + void free_stack(XMarkStackAllocator* allocator, XMarkStack* stack); + + bool push_slow(XMarkStackAllocator* allocator, + XMarkStripe* stripe, + XMarkStack** stackp, + XMarkStackEntry entry, + bool publish); + + bool pop_slow(XMarkStackAllocator* allocator, + XMarkStripe* stripe, + XMarkStack** stackp, + XMarkStackEntry& entry); + +public: + XMarkThreadLocalStacks(); + + bool is_empty(const XMarkStripeSet* stripes) const; + + void install(XMarkStripeSet* stripes, + XMarkStripe* stripe, + XMarkStack* stack); + + XMarkStack* steal(XMarkStripeSet* stripes, + XMarkStripe* stripe); + + bool push(XMarkStackAllocator* allocator, + XMarkStripeSet* stripes, + XMarkStripe* stripe, + XMarkStackEntry entry, + bool publish); + + bool pop(XMarkStackAllocator* allocator, + XMarkStripeSet* stripes, + XMarkStripe* stripe, + XMarkStackEntry& entry); + + bool flush(XMarkStackAllocator* allocator, + XMarkStripeSet* stripes); + + void free(XMarkStackAllocator* allocator); +}; + +#endif // SHARE_GC_X_XMARKSTACK_HPP diff --git a/src/hotspot/share/gc/x/xMarkStack.inline.hpp b/src/hotspot/share/gc/x/xMarkStack.inline.hpp new file mode 100644 index 0000000000000..95047c0954ae6 --- /dev/null +++ b/src/hotspot/share/gc/x/xMarkStack.inline.hpp @@ -0,0 +1,266 @@ +/* + * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XMARKSTACK_INLINE_HPP +#define SHARE_GC_X_XMARKSTACK_INLINE_HPP + +#include "gc/x/xMarkStack.hpp" + +#include "utilities/debug.hpp" +#include "runtime/atomic.hpp" + +template +inline XStack::XStack() : + _top(0), + _next(NULL) {} + +template +inline bool XStack::is_empty() const { + return _top == 0; +} + +template +inline bool XStack::is_full() const { + return _top == S; +} + +template +inline bool XStack::push(T value) { + if (is_full()) { + return false; + } + + _slots[_top++] = value; + return true; +} + +template +inline bool XStack::pop(T& value) { + if (is_empty()) { + return false; + } + + value = _slots[--_top]; + return true; +} + +template +inline XStack* XStack::next() const { + return _next; +} + +template +inline XStack** XStack::next_addr() { + return &_next; +} + +template +inline XStackList::XStackList() : + _head(encode_versioned_pointer(NULL, 0)) {} + +template +inline T* XStackList::encode_versioned_pointer(const T* stack, uint32_t version) const { + uint64_t addr; + + if (stack == NULL) { + addr = (uint32_t)-1; + } else { + addr = ((uint64_t)stack - XMarkStackSpaceStart) >> XMarkStackSizeShift; + } + + return (T*)((addr << 32) | (uint64_t)version); +} + +template +inline void XStackList::decode_versioned_pointer(const T* vstack, T** stack, uint32_t* version) const { + const uint64_t addr = (uint64_t)vstack >> 32; + + if (addr == (uint32_t)-1) { + *stack = NULL; + } else { + *stack = (T*)((addr << XMarkStackSizeShift) + XMarkStackSpaceStart); + } + + *version = (uint32_t)(uint64_t)vstack; +} + +template +inline bool XStackList::is_empty() const { + const T* vstack = _head; + T* stack = NULL; + uint32_t version = 0; + + decode_versioned_pointer(vstack, &stack, &version); + return stack == NULL; +} + +template +inline void XStackList::push(T* stack) { + T* vstack = _head; + uint32_t version = 0; + + for (;;) { + decode_versioned_pointer(vstack, stack->next_addr(), &version); + T* const new_vstack = encode_versioned_pointer(stack, version + 1); + T* const prev_vstack = Atomic::cmpxchg(&_head, vstack, new_vstack); + if (prev_vstack == vstack) { + // Success + break; + } + + // Retry + vstack = prev_vstack; + } +} + +template +inline T* XStackList::pop() { + T* vstack = _head; + T* stack = NULL; + uint32_t version = 0; + + for (;;) { + decode_versioned_pointer(vstack, &stack, &version); + if (stack == NULL) { + return NULL; + } + + T* const new_vstack = encode_versioned_pointer(stack->next(), version + 1); + T* const prev_vstack = Atomic::cmpxchg(&_head, vstack, new_vstack); + if (prev_vstack == vstack) { + // Success + return stack; + } + + // Retry + vstack = prev_vstack; + } +} + +template +inline void XStackList::clear() { + _head = encode_versioned_pointer(NULL, 0); +} + +inline bool XMarkStripe::is_empty() const { + return _published.is_empty() && _overflowed.is_empty(); +} + +inline void XMarkStripe::publish_stack(XMarkStack* stack, bool publish) { + // A stack is published either on the published list or the overflowed + // list. The published list is used by mutators publishing stacks for GC + // workers to work on, while the overflowed list is used by GC workers + // to publish stacks that overflowed. The intention here is to avoid + // contention between mutators and GC workers as much as possible, while + // still allowing GC workers to help out and steal work from each other. + if (publish) { + _published.push(stack); + } else { + _overflowed.push(stack); + } +} + +inline XMarkStack* XMarkStripe::steal_stack() { + // Steal overflowed stacks first, then published stacks + XMarkStack* const stack = _overflowed.pop(); + if (stack != NULL) { + return stack; + } + + return _published.pop(); +} + +inline size_t XMarkStripeSet::nstripes() const { + return _nstripes; +} + +inline size_t XMarkStripeSet::stripe_id(const XMarkStripe* stripe) const { + const size_t index = ((uintptr_t)stripe - (uintptr_t)_stripes) / sizeof(XMarkStripe); + assert(index < _nstripes, "Invalid index"); + return index; +} + +inline XMarkStripe* XMarkStripeSet::stripe_at(size_t index) { + assert(index < _nstripes, "Invalid index"); + return &_stripes[index]; +} + +inline XMarkStripe* XMarkStripeSet::stripe_next(XMarkStripe* stripe) { + const size_t index = (stripe_id(stripe) + 1) & _nstripes_mask; + assert(index < _nstripes, "Invalid index"); + return &_stripes[index]; +} + +inline XMarkStripe* XMarkStripeSet::stripe_for_addr(uintptr_t addr) { + const size_t index = (addr >> XMarkStripeShift) & _nstripes_mask; + assert(index < _nstripes, "Invalid index"); + return &_stripes[index]; +} + +inline void XMarkThreadLocalStacks::install(XMarkStripeSet* stripes, + XMarkStripe* stripe, + XMarkStack* stack) { + XMarkStack** const stackp = &_stacks[stripes->stripe_id(stripe)]; + assert(*stackp == NULL, "Should be empty"); + *stackp = stack; +} + +inline XMarkStack* XMarkThreadLocalStacks::steal(XMarkStripeSet* stripes, + XMarkStripe* stripe) { + XMarkStack** const stackp = &_stacks[stripes->stripe_id(stripe)]; + XMarkStack* const stack = *stackp; + if (stack != NULL) { + *stackp = NULL; + } + + return stack; +} + +inline bool XMarkThreadLocalStacks::push(XMarkStackAllocator* allocator, + XMarkStripeSet* stripes, + XMarkStripe* stripe, + XMarkStackEntry entry, + bool publish) { + XMarkStack** const stackp = &_stacks[stripes->stripe_id(stripe)]; + XMarkStack* const stack = *stackp; + if (stack != NULL && stack->push(entry)) { + return true; + } + + return push_slow(allocator, stripe, stackp, entry, publish); +} + +inline bool XMarkThreadLocalStacks::pop(XMarkStackAllocator* allocator, + XMarkStripeSet* stripes, + XMarkStripe* stripe, + XMarkStackEntry& entry) { + XMarkStack** const stackp = &_stacks[stripes->stripe_id(stripe)]; + XMarkStack* const stack = *stackp; + if (stack != NULL && stack->pop(entry)) { + return true; + } + + return pop_slow(allocator, stripe, stackp, entry); +} + +#endif // SHARE_GC_X_XMARKSTACK_INLINE_HPP diff --git a/src/hotspot/share/gc/x/xMarkStackAllocator.cpp b/src/hotspot/share/gc/x/xMarkStackAllocator.cpp new file mode 100644 index 0000000000000..95924b35392b0 --- /dev/null +++ b/src/hotspot/share/gc/x/xMarkStackAllocator.cpp @@ -0,0 +1,221 @@ +/* + * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/shared/gcLogPrecious.hpp" +#include "gc/shared/gc_globals.hpp" +#include "gc/x/xLock.inline.hpp" +#include "gc/x/xMarkStack.inline.hpp" +#include "gc/x/xMarkStackAllocator.hpp" +#include "logging/log.hpp" +#include "runtime/atomic.hpp" +#include "runtime/os.hpp" +#include "utilities/debug.hpp" + +uintptr_t XMarkStackSpaceStart; + +XMarkStackSpace::XMarkStackSpace() : + _expand_lock(), + _start(0), + _top(0), + _end(0) { + assert(ZMarkStackSpaceLimit >= XMarkStackSpaceExpandSize, "ZMarkStackSpaceLimit too small"); + + // Reserve address space + const size_t size = ZMarkStackSpaceLimit; + const uintptr_t addr = (uintptr_t)os::reserve_memory(size, !ExecMem, mtGC); + if (addr == 0) { + log_error_pd(gc, marking)("Failed to reserve address space for mark stacks"); + return; + } + + // Successfully initialized + _start = _top = _end = addr; + + // Register mark stack space start + XMarkStackSpaceStart = _start; + + // Prime space + _end += expand_space(); +} + +bool XMarkStackSpace::is_initialized() const { + return _start != 0; +} + +size_t XMarkStackSpace::size() const { + return _end - _start; +} + +size_t XMarkStackSpace::used() const { + return _top - _start; +} + +size_t XMarkStackSpace::expand_space() { + const size_t expand_size = XMarkStackSpaceExpandSize; + const size_t old_size = size(); + const size_t new_size = old_size + expand_size; + + if (new_size > ZMarkStackSpaceLimit) { + // Expansion limit reached. This is a fatal error since we + // currently can't recover from running out of mark stack space. + fatal("Mark stack space exhausted. Use -XX:ZMarkStackSpaceLimit= to increase the " + "maximum number of bytes allocated for mark stacks. Current limit is " SIZE_FORMAT "M.", + ZMarkStackSpaceLimit / M); + } + + log_debug(gc, marking)("Expanding mark stack space: " SIZE_FORMAT "M->" SIZE_FORMAT "M", + old_size / M, new_size / M); + + // Expand + os::commit_memory_or_exit((char*)_end, expand_size, false /* executable */, "Mark stack space"); + + return expand_size; +} + +size_t XMarkStackSpace::shrink_space() { + // Shrink to what is currently used + const size_t old_size = size(); + const size_t new_size = align_up(used(), XMarkStackSpaceExpandSize); + const size_t shrink_size = old_size - new_size; + + if (shrink_size > 0) { + // Shrink + log_debug(gc, marking)("Shrinking mark stack space: " SIZE_FORMAT "M->" SIZE_FORMAT "M", + old_size / M, new_size / M); + + const uintptr_t shrink_start = _end - shrink_size; + os::uncommit_memory((char*)shrink_start, shrink_size, false /* executable */); + } + + return shrink_size; +} + +uintptr_t XMarkStackSpace::alloc_space(size_t size) { + uintptr_t top = Atomic::load(&_top); + + for (;;) { + const uintptr_t end = Atomic::load(&_end); + const uintptr_t new_top = top + size; + if (new_top > end) { + // Not enough space left + return 0; + } + + const uintptr_t prev_top = Atomic::cmpxchg(&_top, top, new_top); + if (prev_top == top) { + // Success + return top; + } + + // Retry + top = prev_top; + } +} + +uintptr_t XMarkStackSpace::expand_and_alloc_space(size_t size) { + XLocker locker(&_expand_lock); + + // Retry allocation before expanding + uintptr_t addr = alloc_space(size); + if (addr != 0) { + return addr; + } + + // Expand + const size_t expand_size = expand_space(); + + // Increment top before end to make sure another + // thread can't steal out newly expanded space. + addr = Atomic::fetch_then_add(&_top, size); + Atomic::add(&_end, expand_size); + + return addr; +} + +uintptr_t XMarkStackSpace::alloc(size_t size) { + assert(size <= XMarkStackSpaceExpandSize, "Invalid size"); + + const uintptr_t addr = alloc_space(size); + if (addr != 0) { + return addr; + } + + return expand_and_alloc_space(size); +} + +void XMarkStackSpace::free() { + _end -= shrink_space(); + _top = _start; +} + +XMarkStackAllocator::XMarkStackAllocator() : + _freelist(), + _space() {} + +bool XMarkStackAllocator::is_initialized() const { + return _space.is_initialized(); +} + +size_t XMarkStackAllocator::size() const { + return _space.size(); +} + +XMarkStackMagazine* XMarkStackAllocator::create_magazine_from_space(uintptr_t addr, size_t size) { + assert(is_aligned(size, XMarkStackSize), "Invalid size"); + + // Use first stack as magazine + XMarkStackMagazine* const magazine = new ((void*)addr) XMarkStackMagazine(); + for (size_t i = XMarkStackSize; i < size; i += XMarkStackSize) { + XMarkStack* const stack = new ((void*)(addr + i)) XMarkStack(); + const bool success = magazine->push(stack); + assert(success, "Magazine should never get full"); + } + + return magazine; +} + +XMarkStackMagazine* XMarkStackAllocator::alloc_magazine() { + // Try allocating from the free list first + XMarkStackMagazine* const magazine = _freelist.pop(); + if (magazine != NULL) { + return magazine; + } + + // Allocate new magazine + const uintptr_t addr = _space.alloc(XMarkStackMagazineSize); + if (addr == 0) { + return NULL; + } + + return create_magazine_from_space(addr, XMarkStackMagazineSize); +} + +void XMarkStackAllocator::free_magazine(XMarkStackMagazine* magazine) { + _freelist.push(magazine); +} + +void XMarkStackAllocator::free() { + _freelist.clear(); + _space.free(); +} diff --git a/src/hotspot/share/gc/x/xMarkStackAllocator.hpp b/src/hotspot/share/gc/x/xMarkStackAllocator.hpp new file mode 100644 index 0000000000000..5e81ae284cf50 --- /dev/null +++ b/src/hotspot/share/gc/x/xMarkStackAllocator.hpp @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XMARKSTACKALLOCATOR_HPP +#define SHARE_GC_X_XMARKSTACKALLOCATOR_HPP + +#include "gc/x/xGlobals.hpp" +#include "gc/x/xLock.hpp" +#include "utilities/globalDefinitions.hpp" + +class XMarkStackSpace { +private: + XLock _expand_lock; + uintptr_t _start; + volatile uintptr_t _top; + volatile uintptr_t _end; + + size_t used() const; + + size_t expand_space(); + size_t shrink_space(); + + uintptr_t alloc_space(size_t size); + uintptr_t expand_and_alloc_space(size_t size); + +public: + XMarkStackSpace(); + + bool is_initialized() const; + + size_t size() const; + + uintptr_t alloc(size_t size); + void free(); +}; + +class XMarkStackAllocator { +private: + XCACHE_ALIGNED XMarkStackMagazineList _freelist; + XCACHE_ALIGNED XMarkStackSpace _space; + + XMarkStackMagazine* create_magazine_from_space(uintptr_t addr, size_t size); + +public: + XMarkStackAllocator(); + + bool is_initialized() const; + + size_t size() const; + + XMarkStackMagazine* alloc_magazine(); + void free_magazine(XMarkStackMagazine* magazine); + + void free(); +}; + +#endif // SHARE_GC_X_XMARKSTACKALLOCATOR_HPP diff --git a/src/hotspot/share/gc/x/xMarkStackEntry.hpp b/src/hotspot/share/gc/x/xMarkStackEntry.hpp new file mode 100644 index 0000000000000..61df1798df2a0 --- /dev/null +++ b/src/hotspot/share/gc/x/xMarkStackEntry.hpp @@ -0,0 +1,142 @@ +/* + * Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XMARKSTACKENTRY_HPP +#define SHARE_GC_X_XMARKSTACKENTRY_HPP + +#include "gc/x/xBitField.hpp" +#include "memory/allocation.hpp" + +// +// Mark stack entry layout +// ----------------------- +// +// Object entry +// ------------ +// +// 6 +// 3 5 4 3 2 1 0 +// +------------------------------------------------------------------+-+-+-+-+-+ +// |11111111 11111111 11111111 11111111 11111111 11111111 11111111 111|1|1|1|1|1| +// +------------------------------------------------------------------+-+-+-+-+-+ +// | | | | | | +// | 4-4 Mark Flag (1-bit) * | | | | +// | | | | | +// | 3-3 Increment Live Flag (1-bit) * | | | +// | | | | +// | 2-2 Follow Flag (1-bit) * | | +// | | | +// | 1-1 Partial Array Flag (1-bit) * | +// | | +// | 0-0 Final Flag (1-bit) * +// | +// * 63-5 Object Address (59-bits) +// +// +// Partial array entry +// ------------------- +// +// 6 3 3 +// 3 2 1 2 1 0 +// +------------------------------------+---------------------------------+-+-+ +// |11111111 11111111 11111111 11111111 |11111111 11111111 11111111 111111|1|1| +// +------------------------------------+---------------------------------+-+-+ +// | | | | +// | | 1-1 Partial Array Flag (1-bit) * | +// | | | +// | | 0-0 Final Flag (1-bit) * +// | | +// | * 31-2 Partial Array Length (30-bits) +// | +// * 63-32 Partial Array Address Offset (32-bits) +// + +class XMarkStackEntry { +private: + typedef XBitField field_finalizable; + typedef XBitField field_partial_array; + typedef XBitField field_follow; + typedef XBitField field_inc_live; + typedef XBitField field_mark; + typedef XBitField field_object_address; + typedef XBitField field_partial_array_length; + typedef XBitField field_partial_array_offset; + + uint64_t _entry; + +public: + XMarkStackEntry() { + // This constructor is intentionally left empty and does not initialize + // _entry to allow it to be optimized out when instantiating XMarkStack, + // which has a long array of XMarkStackEntry elements, but doesn't care + // what _entry is initialized to. + } + + XMarkStackEntry(uintptr_t object_address, bool mark, bool inc_live, bool follow, bool finalizable) : + _entry(field_object_address::encode(object_address) | + field_mark::encode(mark) | + field_inc_live::encode(inc_live) | + field_follow::encode(follow) | + field_partial_array::encode(false) | + field_finalizable::encode(finalizable)) {} + + XMarkStackEntry(size_t partial_array_offset, size_t partial_array_length, bool finalizable) : + _entry(field_partial_array_offset::encode(partial_array_offset) | + field_partial_array_length::encode(partial_array_length) | + field_partial_array::encode(true) | + field_finalizable::encode(finalizable)) {} + + bool finalizable() const { + return field_finalizable::decode(_entry); + } + + bool partial_array() const { + return field_partial_array::decode(_entry); + } + + size_t partial_array_offset() const { + return field_partial_array_offset::decode(_entry); + } + + size_t partial_array_length() const { + return field_partial_array_length::decode(_entry); + } + + bool follow() const { + return field_follow::decode(_entry); + } + + bool inc_live() const { + return field_inc_live::decode(_entry); + } + + bool mark() const { + return field_mark::decode(_entry); + } + + uintptr_t object_address() const { + return field_object_address::decode(_entry); + } +}; + +#endif // SHARE_GC_X_XMARKSTACKENTRY_HPP diff --git a/src/hotspot/share/gc/x/xMarkTerminate.hpp b/src/hotspot/share/gc/x/xMarkTerminate.hpp new file mode 100644 index 0000000000000..28f18f6e1cbc7 --- /dev/null +++ b/src/hotspot/share/gc/x/xMarkTerminate.hpp @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XMARKTERMINATE_HPP +#define SHARE_GC_X_XMARKTERMINATE_HPP + +#include "gc/x/xGlobals.hpp" +#include "memory/allocation.hpp" +#include "utilities/globalDefinitions.hpp" + +class XMarkTerminate { +private: + uint _nworkers; + XCACHE_ALIGNED volatile uint _nworking_stage0; + volatile uint _nworking_stage1; + + bool enter_stage(volatile uint* nworking_stage); + void exit_stage(volatile uint* nworking_stage); + bool try_exit_stage(volatile uint* nworking_stage); + +public: + XMarkTerminate(); + + void reset(uint nworkers); + + bool enter_stage0(); + void exit_stage0(); + bool try_exit_stage0(); + + bool enter_stage1(); + bool try_exit_stage1(); +}; + +#endif // SHARE_GC_X_XMARKTERMINATE_HPP diff --git a/src/hotspot/share/gc/x/xMarkTerminate.inline.hpp b/src/hotspot/share/gc/x/xMarkTerminate.inline.hpp new file mode 100644 index 0000000000000..e4b9256ba6b7e --- /dev/null +++ b/src/hotspot/share/gc/x/xMarkTerminate.inline.hpp @@ -0,0 +1,88 @@ +/* + * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XMARKTERMINATE_INLINE_HPP +#define SHARE_GC_X_XMARKTERMINATE_INLINE_HPP + +#include "gc/x/xMarkTerminate.hpp" + +#include "runtime/atomic.hpp" + +inline XMarkTerminate::XMarkTerminate() : + _nworkers(0), + _nworking_stage0(0), + _nworking_stage1(0) {} + +inline bool XMarkTerminate::enter_stage(volatile uint* nworking_stage) { + return Atomic::sub(nworking_stage, 1u) == 0; +} + +inline void XMarkTerminate::exit_stage(volatile uint* nworking_stage) { + Atomic::add(nworking_stage, 1u); +} + +inline bool XMarkTerminate::try_exit_stage(volatile uint* nworking_stage) { + uint nworking = Atomic::load(nworking_stage); + + for (;;) { + if (nworking == 0) { + return false; + } + + const uint new_nworking = nworking + 1; + const uint prev_nworking = Atomic::cmpxchg(nworking_stage, nworking, new_nworking); + if (prev_nworking == nworking) { + // Success + return true; + } + + // Retry + nworking = prev_nworking; + } +} + +inline void XMarkTerminate::reset(uint nworkers) { + _nworkers = _nworking_stage0 = _nworking_stage1 = nworkers; +} + +inline bool XMarkTerminate::enter_stage0() { + return enter_stage(&_nworking_stage0); +} + +inline void XMarkTerminate::exit_stage0() { + exit_stage(&_nworking_stage0); +} + +inline bool XMarkTerminate::try_exit_stage0() { + return try_exit_stage(&_nworking_stage0); +} + +inline bool XMarkTerminate::enter_stage1() { + return enter_stage(&_nworking_stage1); +} + +inline bool XMarkTerminate::try_exit_stage1() { + return try_exit_stage(&_nworking_stage1); +} + +#endif // SHARE_GC_X_XMARKTERMINATE_INLINE_HPP diff --git a/src/hotspot/share/gc/x/xMemory.cpp b/src/hotspot/share/gc/x/xMemory.cpp new file mode 100644 index 0000000000000..bdfa45339c2a7 --- /dev/null +++ b/src/hotspot/share/gc/x/xMemory.cpp @@ -0,0 +1,220 @@ +/* + * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/x/xList.inline.hpp" +#include "gc/x/xLock.inline.hpp" +#include "gc/x/xMemory.inline.hpp" + +XMemory* XMemoryManager::create(uintptr_t start, size_t size) { + XMemory* const area = new XMemory(start, size); + if (_callbacks._create != NULL) { + _callbacks._create(area); + } + return area; +} + +void XMemoryManager::destroy(XMemory* area) { + if (_callbacks._destroy != NULL) { + _callbacks._destroy(area); + } + delete area; +} + +void XMemoryManager::shrink_from_front(XMemory* area, size_t size) { + if (_callbacks._shrink_from_front != NULL) { + _callbacks._shrink_from_front(area, size); + } + area->shrink_from_front(size); +} + +void XMemoryManager::shrink_from_back(XMemory* area, size_t size) { + if (_callbacks._shrink_from_back != NULL) { + _callbacks._shrink_from_back(area, size); + } + area->shrink_from_back(size); +} + +void XMemoryManager::grow_from_front(XMemory* area, size_t size) { + if (_callbacks._grow_from_front != NULL) { + _callbacks._grow_from_front(area, size); + } + area->grow_from_front(size); +} + +void XMemoryManager::grow_from_back(XMemory* area, size_t size) { + if (_callbacks._grow_from_back != NULL) { + _callbacks._grow_from_back(area, size); + } + area->grow_from_back(size); +} + +XMemoryManager::Callbacks::Callbacks() : + _create(NULL), + _destroy(NULL), + _shrink_from_front(NULL), + _shrink_from_back(NULL), + _grow_from_front(NULL), + _grow_from_back(NULL) {} + +XMemoryManager::XMemoryManager() : + _freelist(), + _callbacks() {} + +void XMemoryManager::register_callbacks(const Callbacks& callbacks) { + _callbacks = callbacks; +} + +uintptr_t XMemoryManager::peek_low_address() const { + XLocker locker(&_lock); + + const XMemory* const area = _freelist.first(); + if (area != NULL) { + return area->start(); + } + + // Out of memory + return UINTPTR_MAX; +} + +uintptr_t XMemoryManager::alloc_low_address(size_t size) { + XLocker locker(&_lock); + + XListIterator iter(&_freelist); + for (XMemory* area; iter.next(&area);) { + if (area->size() >= size) { + if (area->size() == size) { + // Exact match, remove area + const uintptr_t start = area->start(); + _freelist.remove(area); + destroy(area); + return start; + } else { + // Larger than requested, shrink area + const uintptr_t start = area->start(); + shrink_from_front(area, size); + return start; + } + } + } + + // Out of memory + return UINTPTR_MAX; +} + +uintptr_t XMemoryManager::alloc_low_address_at_most(size_t size, size_t* allocated) { + XLocker locker(&_lock); + + XMemory* area = _freelist.first(); + if (area != NULL) { + if (area->size() <= size) { + // Smaller than or equal to requested, remove area + const uintptr_t start = area->start(); + *allocated = area->size(); + _freelist.remove(area); + destroy(area); + return start; + } else { + // Larger than requested, shrink area + const uintptr_t start = area->start(); + shrink_from_front(area, size); + *allocated = size; + return start; + } + } + + // Out of memory + *allocated = 0; + return UINTPTR_MAX; +} + +uintptr_t XMemoryManager::alloc_high_address(size_t size) { + XLocker locker(&_lock); + + XListReverseIterator iter(&_freelist); + for (XMemory* area; iter.next(&area);) { + if (area->size() >= size) { + if (area->size() == size) { + // Exact match, remove area + const uintptr_t start = area->start(); + _freelist.remove(area); + destroy(area); + return start; + } else { + // Larger than requested, shrink area + shrink_from_back(area, size); + return area->end(); + } + } + } + + // Out of memory + return UINTPTR_MAX; +} + +void XMemoryManager::free(uintptr_t start, size_t size) { + assert(start != UINTPTR_MAX, "Invalid address"); + const uintptr_t end = start + size; + + XLocker locker(&_lock); + + XListIterator iter(&_freelist); + for (XMemory* area; iter.next(&area);) { + if (start < area->start()) { + XMemory* const prev = _freelist.prev(area); + if (prev != NULL && start == prev->end()) { + if (end == area->start()) { + // Merge with prev and current area + grow_from_back(prev, size + area->size()); + _freelist.remove(area); + delete area; + } else { + // Merge with prev area + grow_from_back(prev, size); + } + } else if (end == area->start()) { + // Merge with current area + grow_from_front(area, size); + } else { + // Insert new area before current area + assert(end < area->start(), "Areas must not overlap"); + XMemory* const new_area = create(start, size); + _freelist.insert_before(area, new_area); + } + + // Done + return; + } + } + + // Insert last + XMemory* const last = _freelist.last(); + if (last != NULL && start == last->end()) { + // Merge with last area + grow_from_back(last, size); + } else { + // Insert new area last + XMemory* const new_area = create(start, size); + _freelist.insert_last(new_area); + } +} diff --git a/src/hotspot/share/gc/x/xMemory.hpp b/src/hotspot/share/gc/x/xMemory.hpp new file mode 100644 index 0000000000000..2c3739cb44a58 --- /dev/null +++ b/src/hotspot/share/gc/x/xMemory.hpp @@ -0,0 +1,93 @@ +/* + * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XMEMORY_HPP +#define SHARE_GC_X_XMEMORY_HPP + +#include "gc/x/xList.hpp" +#include "gc/x/xLock.hpp" +#include "memory/allocation.hpp" + +class XMemory : public CHeapObj { + friend class XList; + +private: + uintptr_t _start; + uintptr_t _end; + XListNode _node; + +public: + XMemory(uintptr_t start, size_t size); + + uintptr_t start() const; + uintptr_t end() const; + size_t size() const; + + void shrink_from_front(size_t size); + void shrink_from_back(size_t size); + void grow_from_front(size_t size); + void grow_from_back(size_t size); +}; + +class XMemoryManager { +public: + typedef void (*CreateDestroyCallback)(const XMemory* area); + typedef void (*ResizeCallback)(const XMemory* area, size_t size); + + struct Callbacks { + CreateDestroyCallback _create; + CreateDestroyCallback _destroy; + ResizeCallback _shrink_from_front; + ResizeCallback _shrink_from_back; + ResizeCallback _grow_from_front; + ResizeCallback _grow_from_back; + + Callbacks(); + }; + +private: + mutable XLock _lock; + XList _freelist; + Callbacks _callbacks; + + XMemory* create(uintptr_t start, size_t size); + void destroy(XMemory* area); + void shrink_from_front(XMemory* area, size_t size); + void shrink_from_back(XMemory* area, size_t size); + void grow_from_front(XMemory* area, size_t size); + void grow_from_back(XMemory* area, size_t size); + +public: + XMemoryManager(); + + void register_callbacks(const Callbacks& callbacks); + + uintptr_t peek_low_address() const; + uintptr_t alloc_low_address(size_t size); + uintptr_t alloc_low_address_at_most(size_t size, size_t* allocated); + uintptr_t alloc_high_address(size_t size); + + void free(uintptr_t start, size_t size); +}; + +#endif // SHARE_GC_X_XMEMORY_HPP diff --git a/src/hotspot/share/gc/x/xMemory.inline.hpp b/src/hotspot/share/gc/x/xMemory.inline.hpp new file mode 100644 index 0000000000000..332cdae9160ed --- /dev/null +++ b/src/hotspot/share/gc/x/xMemory.inline.hpp @@ -0,0 +1,67 @@ +/* + * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XMEMORY_INLINE_HPP +#define SHARE_GC_X_XMEMORY_INLINE_HPP + +#include "gc/x/xMemory.hpp" + +#include "gc/x/xList.inline.hpp" +#include "utilities/debug.hpp" + +inline XMemory::XMemory(uintptr_t start, size_t size) : + _start(start), + _end(start + size) {} + +inline uintptr_t XMemory::start() const { + return _start; +} + +inline uintptr_t XMemory::end() const { + return _end; +} + +inline size_t XMemory::size() const { + return end() - start(); +} + +inline void XMemory::shrink_from_front(size_t size) { + assert(this->size() > size, "Too small"); + _start += size; +} + +inline void XMemory::shrink_from_back(size_t size) { + assert(this->size() > size, "Too small"); + _end -= size; +} + +inline void XMemory::grow_from_front(size_t size) { + assert(start() >= size, "Too big"); + _start -= size; +} + +inline void XMemory::grow_from_back(size_t size) { + _end += size; +} + +#endif // SHARE_GC_X_XMEMORY_INLINE_HPP diff --git a/src/hotspot/share/gc/z/zMessagePort.hpp b/src/hotspot/share/gc/x/xMessagePort.hpp similarity index 79% rename from src/hotspot/share/gc/z/zMessagePort.hpp rename to src/hotspot/share/gc/x/xMessagePort.hpp index 364eddad9f258..205652537968c 100644 --- a/src/hotspot/share/gc/z/zMessagePort.hpp +++ b/src/hotspot/share/gc/x/xMessagePort.hpp @@ -21,28 +21,28 @@ * questions. */ -#ifndef SHARE_GC_Z_ZMESSAGEPORT_HPP -#define SHARE_GC_Z_ZMESSAGEPORT_HPP +#ifndef SHARE_GC_X_XMESSAGEPORT_HPP +#define SHARE_GC_X_XMESSAGEPORT_HPP -#include "gc/z/zFuture.hpp" -#include "gc/z/zList.hpp" +#include "gc/x/xFuture.hpp" +#include "gc/x/xList.hpp" #include "runtime/mutex.hpp" -template class ZMessageRequest; +template class XMessageRequest; template -class ZMessagePort { +class XMessagePort { private: - typedef ZMessageRequest Request; + typedef XMessageRequest Request; mutable Monitor _monitor; bool _has_message; T _message; uint64_t _seqnum; - ZList _queue; + XList _queue; public: - ZMessagePort(); + XMessagePort(); bool is_busy() const; @@ -53,9 +53,9 @@ class ZMessagePort { void ack(); }; -class ZRendezvousPort { +class XRendezvousPort { private: - ZMessagePort _port; + XMessagePort _port; public: void signal(); @@ -63,4 +63,4 @@ class ZRendezvousPort { void ack(); }; -#endif // SHARE_GC_Z_ZMESSAGEPORT_HPP +#endif // SHARE_GC_X_XMESSAGEPORT_HPP diff --git a/src/hotspot/share/gc/z/zMessagePort.inline.hpp b/src/hotspot/share/gc/x/xMessagePort.inline.hpp similarity index 82% rename from src/hotspot/share/gc/z/zMessagePort.inline.hpp rename to src/hotspot/share/gc/x/xMessagePort.inline.hpp index e9e9ba539a204..8007a80eacdf0 100644 --- a/src/hotspot/share/gc/z/zMessagePort.inline.hpp +++ b/src/hotspot/share/gc/x/xMessagePort.inline.hpp @@ -21,24 +21,24 @@ * questions. */ -#ifndef SHARE_GC_Z_ZMESSAGEPORT_INLINE_HPP -#define SHARE_GC_Z_ZMESSAGEPORT_INLINE_HPP +#ifndef SHARE_GC_X_XMESSAGEPORT_INLINE_HPP +#define SHARE_GC_X_XMESSAGEPORT_INLINE_HPP -#include "gc/z/zMessagePort.hpp" +#include "gc/x/xMessagePort.hpp" -#include "gc/z/zFuture.inline.hpp" -#include "gc/z/zList.inline.hpp" +#include "gc/x/xFuture.inline.hpp" +#include "gc/x/xList.inline.hpp" #include "runtime/mutexLocker.hpp" template -class ZMessageRequest : public StackObj { - friend class ZList; +class XMessageRequest : public StackObj { + friend class XList; private: T _message; uint64_t _seqnum; - ZFuture _result; - ZListNode _node; + XFuture _result; + XListNode _node; public: void initialize(T message, uint64_t seqnum) { @@ -65,20 +65,20 @@ class ZMessageRequest : public StackObj { }; template -inline ZMessagePort::ZMessagePort() : - _monitor(Monitor::nosafepoint, "ZMessagePort_lock"), +inline XMessagePort::XMessagePort() : + _monitor(Monitor::nosafepoint, "XMessagePort_lock"), _has_message(false), _seqnum(0), _queue() {} template -inline bool ZMessagePort::is_busy() const { +inline bool XMessagePort::is_busy() const { MonitorLocker ml(&_monitor, Monitor::_no_safepoint_check_flag); return _has_message; } template -inline void ZMessagePort::send_sync(const T& message) { +inline void XMessagePort::send_sync(const T& message) { Request request; { @@ -105,7 +105,7 @@ inline void ZMessagePort::send_sync(const T& message) { } template -inline void ZMessagePort::send_async(const T& message) { +inline void XMessagePort::send_async(const T& message) { MonitorLocker ml(&_monitor, Monitor::_no_safepoint_check_flag); if (!_has_message) { // Post message @@ -116,7 +116,7 @@ inline void ZMessagePort::send_async(const T& message) { } template -inline T ZMessagePort::receive() { +inline T XMessagePort::receive() { MonitorLocker ml(&_monitor, Monitor::_no_safepoint_check_flag); // Wait for message @@ -137,7 +137,7 @@ inline T ZMessagePort::receive() { } template -inline void ZMessagePort::ack() { +inline void XMessagePort::ack() { MonitorLocker ml(&_monitor, Monitor::_no_safepoint_check_flag); if (!_has_message) { @@ -146,7 +146,7 @@ inline void ZMessagePort::ack() { } // Satisfy requests (and duplicates) in queue - ZListIterator iter(&_queue); + XListIterator iter(&_queue); for (Request* request; iter.next(&request);) { if (request->message() == _message && request->seqnum() < _seqnum) { // Dequeue and satisfy request. Note that the dequeue operation must @@ -166,16 +166,16 @@ inline void ZMessagePort::ack() { } } -inline void ZRendezvousPort::signal() { +inline void XRendezvousPort::signal() { _port.send_sync(true /* ignored */); } -inline void ZRendezvousPort::wait() { +inline void XRendezvousPort::wait() { _port.receive(); } -inline void ZRendezvousPort::ack() { +inline void XRendezvousPort::ack() { _port.ack(); } -#endif // SHARE_GC_Z_ZMESSAGEPORT_INLINE_HPP +#endif // SHARE_GC_X_XMESSAGEPORT_INLINE_HPP diff --git a/src/hotspot/share/gc/x/xMetronome.cpp b/src/hotspot/share/gc/x/xMetronome.cpp new file mode 100644 index 0000000000000..7f0b649deb467 --- /dev/null +++ b/src/hotspot/share/gc/x/xMetronome.cpp @@ -0,0 +1,81 @@ +/* + * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/x/xMetronome.hpp" +#include "runtime/mutexLocker.hpp" +#include "runtime/timer.hpp" +#include "utilities/ticks.hpp" + +XMetronome::XMetronome(uint64_t hz) : + _monitor(Monitor::nosafepoint, "XMetronome_lock"), + _interval_ms(MILLIUNITS / hz), + _start_ms(0), + _nticks(0), + _stopped(false) {} + +bool XMetronome::wait_for_tick() { + if (_nticks++ == 0) { + // First tick, set start time + const Ticks now = Ticks::now(); + _start_ms = TimeHelper::counter_to_millis(now.value()); + } + + MonitorLocker ml(&_monitor, Monitor::_no_safepoint_check_flag); + + while (!_stopped) { + // We might wake up spuriously from wait, so always recalculate + // the timeout after a wakeup to see if we need to wait again. + const Ticks now = Ticks::now(); + const uint64_t now_ms = TimeHelper::counter_to_millis(now.value()); + const uint64_t next_ms = _start_ms + (_interval_ms * _nticks); + const int64_t timeout_ms = next_ms - now_ms; + + if (timeout_ms > 0) { + // Wait + ml.wait(timeout_ms); + } else { + // Tick + if (timeout_ms < 0) { + const uint64_t overslept = -timeout_ms; + if (overslept > _interval_ms) { + // Missed one or more ticks. Bump _nticks accordingly to + // avoid firing a string of immediate ticks to make up + // for the ones we missed. + _nticks += overslept / _interval_ms; + } + } + + return true; + } + } + + // Stopped + return false; +} + +void XMetronome::stop() { + MonitorLocker ml(&_monitor, Monitor::_no_safepoint_check_flag); + _stopped = true; + ml.notify(); +} diff --git a/src/hotspot/share/gc/x/xMetronome.hpp b/src/hotspot/share/gc/x/xMetronome.hpp new file mode 100644 index 0000000000000..8a0f27061c33c --- /dev/null +++ b/src/hotspot/share/gc/x/xMetronome.hpp @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XMETRONOME_HPP +#define SHARE_GC_X_XMETRONOME_HPP + +#include "memory/allocation.hpp" +#include "runtime/mutex.hpp" + +class XMetronome : public StackObj { +private: + Monitor _monitor; + const uint64_t _interval_ms; + uint64_t _start_ms; + uint64_t _nticks; + bool _stopped; + +public: + XMetronome(uint64_t hz); + + bool wait_for_tick(); + void stop(); +}; + +#endif // SHARE_GC_X_XMETRONOME_HPP diff --git a/src/hotspot/share/gc/x/xNMethod.cpp b/src/hotspot/share/gc/x/xNMethod.cpp new file mode 100644 index 0000000000000..20d8982bc0779 --- /dev/null +++ b/src/hotspot/share/gc/x/xNMethod.cpp @@ -0,0 +1,366 @@ +/* + * Copyright (c) 2017, 2022, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "code/relocInfo.hpp" +#include "code/nmethod.hpp" +#include "code/icBuffer.hpp" +#include "gc/shared/barrierSet.hpp" +#include "gc/shared/barrierSetNMethod.hpp" +#include "gc/shared/suspendibleThreadSet.hpp" +#include "gc/x/xBarrier.inline.hpp" +#include "gc/x/xGlobals.hpp" +#include "gc/x/xLock.inline.hpp" +#include "gc/x/xNMethod.hpp" +#include "gc/x/xNMethodData.hpp" +#include "gc/x/xNMethodTable.hpp" +#include "gc/x/xTask.hpp" +#include "gc/x/xWorkers.hpp" +#include "logging/log.hpp" +#include "memory/allocation.inline.hpp" +#include "memory/iterator.hpp" +#include "memory/resourceArea.hpp" +#include "memory/universe.hpp" +#include "oops/oop.inline.hpp" +#include "runtime/atomic.hpp" +#include "runtime/continuation.hpp" +#include "utilities/debug.hpp" + +static XNMethodData* gc_data(const nmethod* nm) { + return nm->gc_data(); +} + +static void set_gc_data(nmethod* nm, XNMethodData* data) { + return nm->set_gc_data(data); +} + +void XNMethod::attach_gc_data(nmethod* nm) { + GrowableArray immediate_oops; + bool non_immediate_oops = false; + + // Find all oop relocations + RelocIterator iter(nm); + while (iter.next()) { + if (iter.type() != relocInfo::oop_type) { + // Not an oop + continue; + } + + oop_Relocation* r = iter.oop_reloc(); + + if (!r->oop_is_immediate()) { + // Non-immediate oop found + non_immediate_oops = true; + continue; + } + + if (r->oop_value() != NULL) { + // Non-NULL immediate oop found. NULL oops can safely be + // ignored since the method will be re-registered if they + // are later patched to be non-NULL. + immediate_oops.push(r->oop_addr()); + } + } + + // Attach GC data to nmethod + XNMethodData* data = gc_data(nm); + if (data == NULL) { + data = new XNMethodData(); + set_gc_data(nm, data); + } + + // Attach oops in GC data + XNMethodDataOops* const new_oops = XNMethodDataOops::create(immediate_oops, non_immediate_oops); + XNMethodDataOops* const old_oops = data->swap_oops(new_oops); + XNMethodDataOops::destroy(old_oops); +} + +XReentrantLock* XNMethod::lock_for_nmethod(nmethod* nm) { + return gc_data(nm)->lock(); +} + +void XNMethod::log_register(const nmethod* nm) { + LogTarget(Trace, gc, nmethod) log; + if (!log.is_enabled()) { + return; + } + + const XNMethodDataOops* const oops = gc_data(nm)->oops(); + + log.print("Register NMethod: %s.%s (" PTR_FORMAT "), " + "Compiler: %s, Oops: %d, ImmediateOops: " SIZE_FORMAT ", NonImmediateOops: %s", + nm->method()->method_holder()->external_name(), + nm->method()->name()->as_C_string(), + p2i(nm), + nm->compiler_name(), + nm->oops_count() - 1, + oops->immediates_count(), + oops->has_non_immediates() ? "Yes" : "No"); + + LogTarget(Trace, gc, nmethod, oops) log_oops; + if (!log_oops.is_enabled()) { + return; + } + + // Print nmethod oops table + { + oop* const begin = nm->oops_begin(); + oop* const end = nm->oops_end(); + for (oop* p = begin; p < end; p++) { + const oop o = Atomic::load(p); // C1 PatchingStub may replace it concurrently. + const char* external_name = (o == nullptr) ? "N/A" : o->klass()->external_name(); + log_oops.print(" Oop[" SIZE_FORMAT "] " PTR_FORMAT " (%s)", + (p - begin), p2i(o), external_name); + } + } + + // Print nmethod immediate oops + { + oop** const begin = oops->immediates_begin(); + oop** const end = oops->immediates_end(); + for (oop** p = begin; p < end; p++) { + log_oops.print(" ImmediateOop[" SIZE_FORMAT "] " PTR_FORMAT " @ " PTR_FORMAT " (%s)", + (p - begin), p2i(**p), p2i(*p), (**p)->klass()->external_name()); + } + } +} + +void XNMethod::log_unregister(const nmethod* nm) { + LogTarget(Debug, gc, nmethod) log; + if (!log.is_enabled()) { + return; + } + + log.print("Unregister NMethod: %s.%s (" PTR_FORMAT ")", + nm->method()->method_holder()->external_name(), + nm->method()->name()->as_C_string(), + p2i(nm)); +} + +void XNMethod::register_nmethod(nmethod* nm) { + ResourceMark rm; + + // Create and attach gc data + attach_gc_data(nm); + + log_register(nm); + + XNMethodTable::register_nmethod(nm); + + // Disarm nmethod entry barrier + disarm(nm); +} + +void XNMethod::unregister_nmethod(nmethod* nm) { + ResourceMark rm; + + log_unregister(nm); + + XNMethodTable::unregister_nmethod(nm); + + // Destroy GC data + delete gc_data(nm); +} + +bool XNMethod::supports_entry_barrier(nmethod* nm) { + BarrierSetNMethod* const bs = BarrierSet::barrier_set()->barrier_set_nmethod(); + return bs->supports_entry_barrier(nm); +} + +bool XNMethod::is_armed(nmethod* nm) { + BarrierSetNMethod* const bs = BarrierSet::barrier_set()->barrier_set_nmethod(); + return bs->is_armed(nm); +} + +void XNMethod::disarm(nmethod* nm) { + BarrierSetNMethod* const bs = BarrierSet::barrier_set()->barrier_set_nmethod(); + bs->disarm(nm); +} + +void XNMethod::set_guard_value(nmethod* nm, int value) { + BarrierSetNMethod* const bs = BarrierSet::barrier_set()->barrier_set_nmethod(); + bs->set_guard_value(nm, value); +} + +void XNMethod::nmethod_oops_do(nmethod* nm, OopClosure* cl) { + XLocker locker(XNMethod::lock_for_nmethod(nm)); + XNMethod::nmethod_oops_do_inner(nm, cl); +} + +void XNMethod::nmethod_oops_do_inner(nmethod* nm, OopClosure* cl) { + // Process oops table + { + oop* const begin = nm->oops_begin(); + oop* const end = nm->oops_end(); + for (oop* p = begin; p < end; p++) { + if (!Universe::contains_non_oop_word(p)) { + cl->do_oop(p); + } + } + } + + XNMethodDataOops* const oops = gc_data(nm)->oops(); + + // Process immediate oops + { + oop** const begin = oops->immediates_begin(); + oop** const end = oops->immediates_end(); + for (oop** p = begin; p < end; p++) { + if (*p != Universe::non_oop_word()) { + cl->do_oop(*p); + } + } + } + + // Process non-immediate oops + if (oops->has_non_immediates()) { + nm->fix_oop_relocations(); + } +} + +class XNMethodOopClosure : public OopClosure { +public: + virtual void do_oop(oop* p) { + if (XResurrection::is_blocked()) { + XBarrier::keep_alive_barrier_on_phantom_root_oop_field(p); + } else { + XBarrier::load_barrier_on_root_oop_field(p); + } + } + + virtual void do_oop(narrowOop* p) { + ShouldNotReachHere(); + } +}; + +void XNMethod::nmethod_oops_barrier(nmethod* nm) { + XNMethodOopClosure cl; + nmethod_oops_do_inner(nm, &cl); +} + +void XNMethod::nmethods_do_begin() { + XNMethodTable::nmethods_do_begin(); +} + +void XNMethod::nmethods_do_end() { + XNMethodTable::nmethods_do_end(); +} + +void XNMethod::nmethods_do(NMethodClosure* cl) { + XNMethodTable::nmethods_do(cl); +} + +class XNMethodUnlinkClosure : public NMethodClosure { +private: + bool _unloading_occurred; + volatile bool _failed; + + void set_failed() { + Atomic::store(&_failed, true); + } + +public: + XNMethodUnlinkClosure(bool unloading_occurred) : + _unloading_occurred(unloading_occurred), + _failed(false) {} + + virtual void do_nmethod(nmethod* nm) { + if (failed()) { + return; + } + + if (nm->is_unloading()) { + XLocker locker(XNMethod::lock_for_nmethod(nm)); + nm->unlink(); + return; + } + + XLocker locker(XNMethod::lock_for_nmethod(nm)); + + if (XNMethod::is_armed(nm)) { + // Heal oops and arm phase invariantly + XNMethod::nmethod_oops_barrier(nm); + XNMethod::set_guard_value(nm, 0); + } + + // Clear compiled ICs and exception caches + if (!nm->unload_nmethod_caches(_unloading_occurred)) { + set_failed(); + } + } + + bool failed() const { + return Atomic::load(&_failed); + } +}; + +class XNMethodUnlinkTask : public XTask { +private: + XNMethodUnlinkClosure _cl; + ICRefillVerifier* _verifier; + +public: + XNMethodUnlinkTask(bool unloading_occurred, ICRefillVerifier* verifier) : + XTask("XNMethodUnlinkTask"), + _cl(unloading_occurred), + _verifier(verifier) { + XNMethodTable::nmethods_do_begin(); + } + + ~XNMethodUnlinkTask() { + XNMethodTable::nmethods_do_end(); + } + + virtual void work() { + ICRefillVerifierMark mark(_verifier); + XNMethodTable::nmethods_do(&_cl); + } + + bool success() const { + return !_cl.failed(); + } +}; + +void XNMethod::unlink(XWorkers* workers, bool unloading_occurred) { + for (;;) { + ICRefillVerifier verifier; + + { + XNMethodUnlinkTask task(unloading_occurred, &verifier); + workers->run(&task); + if (task.success()) { + return; + } + } + + // Cleaning failed because we ran out of transitional IC stubs, + // so we have to refill and try again. Refilling requires taking + // a safepoint, so we temporarily leave the suspendible thread set. + SuspendibleThreadSetLeaver sts; + InlineCacheBuffer::refill_ic_stubs(); + } +} + +void XNMethod::purge() { + CodeCache::flush_unlinked_nmethods(); +} diff --git a/src/hotspot/share/gc/x/xNMethod.hpp b/src/hotspot/share/gc/x/xNMethod.hpp new file mode 100644 index 0000000000000..cadb160191014 --- /dev/null +++ b/src/hotspot/share/gc/x/xNMethod.hpp @@ -0,0 +1,66 @@ +/* + * Copyright (c) 2017, 2022, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XNMETHOD_HPP +#define SHARE_GC_X_XNMETHOD_HPP + +#include "memory/allStatic.hpp" + +class nmethod; +class NMethodClosure; +class XReentrantLock; +class XWorkers; + +class XNMethod : public AllStatic { +private: + static void attach_gc_data(nmethod* nm); + + static void log_register(const nmethod* nm); + static void log_unregister(const nmethod* nm); + +public: + static void register_nmethod(nmethod* nm); + static void unregister_nmethod(nmethod* nm); + + static bool supports_entry_barrier(nmethod* nm); + + static bool is_armed(nmethod* nm); + static void disarm(nmethod* nm); + static void set_guard_value(nmethod* nm, int value); + + static void nmethod_oops_do(nmethod* nm, OopClosure* cl); + static void nmethod_oops_do_inner(nmethod* nm, OopClosure* cl); + + static void nmethod_oops_barrier(nmethod* nm); + + static void nmethods_do_begin(); + static void nmethods_do_end(); + static void nmethods_do(NMethodClosure* cl); + + static XReentrantLock* lock_for_nmethod(nmethod* nm); + + static void unlink(XWorkers* workers, bool unloading_occurred); + static void purge(); +}; + +#endif // SHARE_GC_X_XNMETHOD_HPP diff --git a/src/hotspot/share/gc/x/xNMethodData.cpp b/src/hotspot/share/gc/x/xNMethodData.cpp new file mode 100644 index 0000000000000..f024e0d3cee11 --- /dev/null +++ b/src/hotspot/share/gc/x/xNMethodData.cpp @@ -0,0 +1,88 @@ +/* + * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/x/xAttachedArray.inline.hpp" +#include "gc/x/xLock.inline.hpp" +#include "gc/x/xNMethodData.hpp" +#include "memory/allocation.hpp" +#include "runtime/atomic.hpp" +#include "utilities/align.hpp" +#include "utilities/debug.hpp" +#include "utilities/growableArray.hpp" + +XNMethodDataOops* XNMethodDataOops::create(const GrowableArray& immediates, bool has_non_immediates) { + return ::new (AttachedArray::alloc(immediates.length())) XNMethodDataOops(immediates, has_non_immediates); +} + +void XNMethodDataOops::destroy(XNMethodDataOops* oops) { + AttachedArray::free(oops); +} + +XNMethodDataOops::XNMethodDataOops(const GrowableArray& immediates, bool has_non_immediates) : + _immediates(immediates.length()), + _has_non_immediates(has_non_immediates) { + // Save all immediate oops + for (size_t i = 0; i < immediates_count(); i++) { + immediates_begin()[i] = immediates.at(int(i)); + } +} + +size_t XNMethodDataOops::immediates_count() const { + return _immediates.length(); +} + +oop** XNMethodDataOops::immediates_begin() const { + return _immediates(this); +} + +oop** XNMethodDataOops::immediates_end() const { + return immediates_begin() + immediates_count(); +} + +bool XNMethodDataOops::has_non_immediates() const { + return _has_non_immediates; +} + +XNMethodData::XNMethodData() : + _lock(), + _oops(NULL) {} + +XNMethodData::~XNMethodData() { + XNMethodDataOops::destroy(_oops); +} + +XReentrantLock* XNMethodData::lock() { + return &_lock; +} + +XNMethodDataOops* XNMethodData::oops() const { + return Atomic::load_acquire(&_oops); +} + +XNMethodDataOops* XNMethodData::swap_oops(XNMethodDataOops* new_oops) { + XLocker locker(&_lock); + XNMethodDataOops* const old_oops = _oops; + _oops = new_oops; + return old_oops; +} diff --git a/src/hotspot/share/gc/x/xNMethodData.hpp b/src/hotspot/share/gc/x/xNMethodData.hpp new file mode 100644 index 0000000000000..f0e45ef5d14ce --- /dev/null +++ b/src/hotspot/share/gc/x/xNMethodData.hpp @@ -0,0 +1,71 @@ +/* + * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XNMETHODDATA_HPP +#define SHARE_GC_X_XNMETHODDATA_HPP + +#include "gc/x/xAttachedArray.hpp" +#include "gc/x/xLock.hpp" +#include "memory/allocation.hpp" +#include "oops/oopsHierarchy.hpp" +#include "utilities/globalDefinitions.hpp" + +class nmethod; +template class GrowableArray; + +class XNMethodDataOops { +private: + typedef XAttachedArray AttachedArray; + + const AttachedArray _immediates; + const bool _has_non_immediates; + + XNMethodDataOops(const GrowableArray& immediates, bool has_non_immediates); + +public: + static XNMethodDataOops* create(const GrowableArray& immediates, bool has_non_immediates); + static void destroy(XNMethodDataOops* oops); + + size_t immediates_count() const; + oop** immediates_begin() const; + oop** immediates_end() const; + + bool has_non_immediates() const; +}; + +class XNMethodData : public CHeapObj { +private: + XReentrantLock _lock; + XNMethodDataOops* volatile _oops; + +public: + XNMethodData(); + ~XNMethodData(); + + XReentrantLock* lock(); + + XNMethodDataOops* oops() const; + XNMethodDataOops* swap_oops(XNMethodDataOops* oops); +}; + +#endif // SHARE_GC_X_XNMETHODDATA_HPP diff --git a/src/hotspot/share/gc/x/xNMethodTable.cpp b/src/hotspot/share/gc/x/xNMethodTable.cpp new file mode 100644 index 0000000000000..70ceb7a9219b2 --- /dev/null +++ b/src/hotspot/share/gc/x/xNMethodTable.cpp @@ -0,0 +1,235 @@ +/* + * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "code/relocInfo.hpp" +#include "code/nmethod.hpp" +#include "code/icBuffer.hpp" +#include "gc/shared/barrierSet.hpp" +#include "gc/shared/barrierSetNMethod.hpp" +#include "gc/x/xGlobals.hpp" +#include "gc/x/xHash.inline.hpp" +#include "gc/x/xLock.inline.hpp" +#include "gc/x/xNMethodData.hpp" +#include "gc/x/xNMethodTable.hpp" +#include "gc/x/xNMethodTableEntry.hpp" +#include "gc/x/xNMethodTableIteration.hpp" +#include "gc/x/xSafeDelete.inline.hpp" +#include "gc/x/xTask.hpp" +#include "gc/x/xWorkers.hpp" +#include "logging/log.hpp" +#include "memory/allocation.hpp" +#include "memory/iterator.hpp" +#include "memory/resourceArea.hpp" +#include "runtime/mutexLocker.hpp" +#include "utilities/debug.hpp" +#include "utilities/powerOfTwo.hpp" + +XNMethodTableEntry* XNMethodTable::_table = NULL; +size_t XNMethodTable::_size = 0; +size_t XNMethodTable::_nregistered = 0; +size_t XNMethodTable::_nunregistered = 0; +XNMethodTableIteration XNMethodTable::_iteration; +XSafeDeleteNoLock XNMethodTable::_safe_delete; + +size_t XNMethodTable::first_index(const nmethod* nm, size_t size) { + assert(is_power_of_2(size), "Invalid size"); + const size_t mask = size - 1; + const size_t hash = XHash::address_to_uint32((uintptr_t)nm); + return hash & mask; +} + +size_t XNMethodTable::next_index(size_t prev_index, size_t size) { + assert(is_power_of_2(size), "Invalid size"); + const size_t mask = size - 1; + return (prev_index + 1) & mask; +} + +bool XNMethodTable::register_entry(XNMethodTableEntry* table, size_t size, nmethod* nm) { + const XNMethodTableEntry entry(nm); + size_t index = first_index(nm, size); + + for (;;) { + const XNMethodTableEntry table_entry = table[index]; + + if (!table_entry.registered() && !table_entry.unregistered()) { + // Insert new entry + table[index] = entry; + return true; + } + + if (table_entry.registered() && table_entry.method() == nm) { + // Replace existing entry + table[index] = entry; + return false; + } + + index = next_index(index, size); + } +} + +void XNMethodTable::unregister_entry(XNMethodTableEntry* table, size_t size, nmethod* nm) { + size_t index = first_index(nm, size); + + for (;;) { + const XNMethodTableEntry table_entry = table[index]; + assert(table_entry.registered() || table_entry.unregistered(), "Entry not found"); + + if (table_entry.registered() && table_entry.method() == nm) { + // Remove entry + table[index] = XNMethodTableEntry(true /* unregistered */); + return; + } + + index = next_index(index, size); + } +} + +void XNMethodTable::rebuild(size_t new_size) { + assert(CodeCache_lock->owned_by_self(), "Lock must be held"); + + assert(is_power_of_2(new_size), "Invalid size"); + + log_debug(gc, nmethod)("Rebuilding NMethod Table: " + SIZE_FORMAT "->" SIZE_FORMAT " entries, " + SIZE_FORMAT "(%.0f%%->%.0f%%) registered, " + SIZE_FORMAT "(%.0f%%->%.0f%%) unregistered", + _size, new_size, + _nregistered, percent_of(_nregistered, _size), percent_of(_nregistered, new_size), + _nunregistered, percent_of(_nunregistered, _size), 0.0); + + // Allocate new table + XNMethodTableEntry* const new_table = new XNMethodTableEntry[new_size]; + + // Transfer all registered entries + for (size_t i = 0; i < _size; i++) { + const XNMethodTableEntry entry = _table[i]; + if (entry.registered()) { + register_entry(new_table, new_size, entry.method()); + } + } + + // Free old table + _safe_delete(_table); + + // Install new table + _table = new_table; + _size = new_size; + _nunregistered = 0; +} + +void XNMethodTable::rebuild_if_needed() { + // The hash table uses linear probing. To avoid wasting memory while + // at the same time maintaining good hash collision behavior we want + // to keep the table occupancy between 30% and 70%. The table always + // grows/shrinks by doubling/halving its size. Pruning of unregistered + // entries is done by rebuilding the table with or without resizing it. + const size_t min_size = 1024; + const size_t shrink_threshold = _size * 0.30; + const size_t prune_threshold = _size * 0.65; + const size_t grow_threshold = _size * 0.70; + + if (_size == 0) { + // Initialize table + rebuild(min_size); + } else if (_nregistered < shrink_threshold && _size > min_size) { + // Shrink table + rebuild(_size / 2); + } else if (_nregistered + _nunregistered > grow_threshold) { + // Prune or grow table + if (_nregistered < prune_threshold) { + // Prune table + rebuild(_size); + } else { + // Grow table + rebuild(_size * 2); + } + } +} + +size_t XNMethodTable::registered_nmethods() { + return _nregistered; +} + +size_t XNMethodTable::unregistered_nmethods() { + return _nunregistered; +} + +void XNMethodTable::register_nmethod(nmethod* nm) { + assert(CodeCache_lock->owned_by_self(), "Lock must be held"); + + // Grow/Shrink/Prune table if needed + rebuild_if_needed(); + + // Insert new entry + if (register_entry(_table, _size, nm)) { + // New entry registered. When register_entry() instead returns + // false the nmethod was already in the table so we do not want + // to increase number of registered entries in that case. + _nregistered++; + } +} + +void XNMethodTable::wait_until_iteration_done() { + assert(CodeCache_lock->owned_by_self(), "Lock must be held"); + + while (_iteration.in_progress()) { + CodeCache_lock->wait_without_safepoint_check(); + } +} + +void XNMethodTable::unregister_nmethod(nmethod* nm) { + assert(CodeCache_lock->owned_by_self(), "Lock must be held"); + + // Remove entry + unregister_entry(_table, _size, nm); + _nunregistered++; + _nregistered--; +} + +void XNMethodTable::nmethods_do_begin() { + MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); + + // Do not allow the table to be deleted while iterating + _safe_delete.enable_deferred_delete(); + + // Prepare iteration + _iteration.nmethods_do_begin(_table, _size); +} + +void XNMethodTable::nmethods_do_end() { + MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); + + // Finish iteration + _iteration.nmethods_do_end(); + + // Allow the table to be deleted + _safe_delete.disable_deferred_delete(); + + // Notify iteration done + CodeCache_lock->notify_all(); +} + +void XNMethodTable::nmethods_do(NMethodClosure* cl) { + _iteration.nmethods_do(cl); +} diff --git a/src/hotspot/share/gc/x/xNMethodTable.hpp b/src/hotspot/share/gc/x/xNMethodTable.hpp new file mode 100644 index 0000000000000..ebb7803a08376 --- /dev/null +++ b/src/hotspot/share/gc/x/xNMethodTable.hpp @@ -0,0 +1,74 @@ +/* + * Copyright (c) 2017, 2022, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XNMETHODTABLE_HPP +#define SHARE_GC_X_XNMETHODTABLE_HPP + +#include "gc/x/xNMethodTableIteration.hpp" +#include "gc/x/xSafeDelete.hpp" +#include "memory/allStatic.hpp" + +class nmethod; +class NMethodClosure; +class XNMethodTableEntry; +class XWorkers; + +class XNMethodTable : public AllStatic { +private: + static XNMethodTableEntry* _table; + static size_t _size; + static size_t _nregistered; + static size_t _nunregistered; + static XNMethodTableIteration _iteration; + static XSafeDeleteNoLock _safe_delete; + + static XNMethodTableEntry* create(size_t size); + static void destroy(XNMethodTableEntry* table); + + static size_t first_index(const nmethod* nm, size_t size); + static size_t next_index(size_t prev_index, size_t size); + + static bool register_entry(XNMethodTableEntry* table, size_t size, nmethod* nm); + static void unregister_entry(XNMethodTableEntry* table, size_t size, nmethod* nm); + + static void rebuild(size_t new_size); + static void rebuild_if_needed(); + +public: + static size_t registered_nmethods(); + static size_t unregistered_nmethods(); + + static void register_nmethod(nmethod* nm); + static void unregister_nmethod(nmethod* nm); + + static void wait_until_iteration_done(); + + static void nmethods_do_begin(); + static void nmethods_do_end(); + static void nmethods_do(NMethodClosure* cl); + + static void unlink(XWorkers* workers, bool unloading_occurred); + static void purge(XWorkers* workers); +}; + +#endif // SHARE_GC_X_XNMETHODTABLE_HPP diff --git a/src/hotspot/share/gc/x/xNMethodTableEntry.hpp b/src/hotspot/share/gc/x/xNMethodTableEntry.hpp new file mode 100644 index 0000000000000..78138492ef914 --- /dev/null +++ b/src/hotspot/share/gc/x/xNMethodTableEntry.hpp @@ -0,0 +1,81 @@ +/* + * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XNMETHODTABLEENTRY_HPP +#define SHARE_GC_X_XNMETHODTABLEENTRY_HPP + +#include "gc/x/xBitField.hpp" +#include "memory/allocation.hpp" + +class nmethod; + +// +// NMethod table entry layout +// -------------------------- +// +// 6 +// 3 2 1 0 +// +---------------------------------------------------------------------+-+-+ +// |11111111 11111111 11111111 11111111 11111111 11111111 11111111 111111|1|1| +// +---------------------------------------------------------------------+-+-+ +// | | | +// | 1-1 Unregistered Flag (1-bits) * | +// | | +// | 0-0 Registered Flag (1-bits) * +// | +// * 63-2 NMethod Address (62-bits) +// + +class XNMethodTableEntry : public CHeapObj { +private: + typedef XBitField field_registered; + typedef XBitField field_unregistered; + typedef XBitField field_method; + + uint64_t _entry; + +public: + explicit XNMethodTableEntry(bool unregistered = false) : + _entry(field_registered::encode(false) | + field_unregistered::encode(unregistered) | + field_method::encode(NULL)) {} + + explicit XNMethodTableEntry(nmethod* method) : + _entry(field_registered::encode(true) | + field_unregistered::encode(false) | + field_method::encode(method)) {} + + bool registered() const { + return field_registered::decode(_entry); + } + + bool unregistered() const { + return field_unregistered::decode(_entry); + } + + nmethod* method() const { + return field_method::decode(_entry); + } +}; + +#endif // SHARE_GC_X_XNMETHODTABLEENTRY_HPP diff --git a/src/hotspot/share/gc/x/xNMethodTableIteration.cpp b/src/hotspot/share/gc/x/xNMethodTableIteration.cpp new file mode 100644 index 0000000000000..fd8bd8561b4de --- /dev/null +++ b/src/hotspot/share/gc/x/xNMethodTableIteration.cpp @@ -0,0 +1,76 @@ +/* + * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/x/xNMethodTableEntry.hpp" +#include "gc/x/xNMethodTableIteration.hpp" +#include "memory/iterator.hpp" +#include "runtime/atomic.hpp" +#include "utilities/debug.hpp" +#include "utilities/globalDefinitions.hpp" + +XNMethodTableIteration::XNMethodTableIteration() : + _table(NULL), + _size(0), + _claimed(0) {} + +bool XNMethodTableIteration::in_progress() const { + return _table != NULL; +} + +void XNMethodTableIteration::nmethods_do_begin(XNMethodTableEntry* table, size_t size) { + assert(!in_progress(), "precondition"); + + _table = table; + _size = size; + _claimed = 0; +} + +void XNMethodTableIteration::nmethods_do_end() { + assert(_claimed >= _size, "Failed to claim all table entries"); + + // Finish iteration + _table = NULL; +} + +void XNMethodTableIteration::nmethods_do(NMethodClosure* cl) { + for (;;) { + // Claim table partition. Each partition is currently sized to span + // two cache lines. This number is just a guess, but seems to work well. + const size_t partition_size = (XCacheLineSize * 2) / sizeof(XNMethodTableEntry); + const size_t partition_start = MIN2(Atomic::fetch_then_add(&_claimed, partition_size), _size); + const size_t partition_end = MIN2(partition_start + partition_size, _size); + if (partition_start == partition_end) { + // End of table + break; + } + + // Process table partition + for (size_t i = partition_start; i < partition_end; i++) { + const XNMethodTableEntry entry = _table[i]; + if (entry.registered()) { + cl->do_nmethod(entry.method()); + } + } + } +} diff --git a/src/hotspot/share/gc/x/xNMethodTableIteration.hpp b/src/hotspot/share/gc/x/xNMethodTableIteration.hpp new file mode 100644 index 0000000000000..1677b334490fc --- /dev/null +++ b/src/hotspot/share/gc/x/xNMethodTableIteration.hpp @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XNMETHODTABLEITERATION_HPP +#define SHARE_GC_X_XNMETHODTABLEITERATION_HPP + +#include "gc/x/xGlobals.hpp" + +class NMethodClosure; +class XNMethodTableEntry; + +class XNMethodTableIteration { +private: + XNMethodTableEntry* _table; + size_t _size; + XCACHE_ALIGNED volatile size_t _claimed; + +public: + XNMethodTableIteration(); + + bool in_progress() const; + + void nmethods_do_begin(XNMethodTableEntry* table, size_t size); + void nmethods_do_end(); + void nmethods_do(NMethodClosure* cl); +}; + +#endif // SHARE_GC_X_XNMETHODTABLEITERATION_HPP diff --git a/src/hotspot/share/gc/x/xNUMA.cpp b/src/hotspot/share/gc/x/xNUMA.cpp new file mode 100644 index 0000000000000..fb99878b200d2 --- /dev/null +++ b/src/hotspot/share/gc/x/xNUMA.cpp @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/shared/gcLogPrecious.hpp" +#include "gc/x/xNUMA.hpp" + +bool XNUMA::_enabled; + +void XNUMA::initialize() { + pd_initialize(); + + log_info_p(gc, init)("NUMA Support: %s", to_string()); + if (_enabled) { + log_info_p(gc, init)("NUMA Nodes: %u", count()); + } +} + +const char* XNUMA::to_string() { + return _enabled ? "Enabled" : "Disabled"; +} diff --git a/src/hotspot/share/gc/x/xNUMA.hpp b/src/hotspot/share/gc/x/xNUMA.hpp new file mode 100644 index 0000000000000..6331a62c042dc --- /dev/null +++ b/src/hotspot/share/gc/x/xNUMA.hpp @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XNUMA_HPP +#define SHARE_GC_X_XNUMA_HPP + +#include "memory/allStatic.hpp" +#include "utilities/globalDefinitions.hpp" + +class XNUMA : public AllStatic { +private: + static bool _enabled; + + static void pd_initialize(); + +public: + static void initialize(); + static bool is_enabled(); + + static uint32_t count(); + static uint32_t id(); + + static uint32_t memory_id(uintptr_t addr); + + static const char* to_string(); +}; + +#endif // SHARE_GC_X_XNUMA_HPP diff --git a/src/hotspot/share/gc/x/xNUMA.inline.hpp b/src/hotspot/share/gc/x/xNUMA.inline.hpp new file mode 100644 index 0000000000000..17f5b831a31e8 --- /dev/null +++ b/src/hotspot/share/gc/x/xNUMA.inline.hpp @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XNUMA_INLINE_HPP +#define SHARE_GC_X_XNUMA_INLINE_HPP + +#include "gc/x/xNUMA.hpp" + +inline bool XNUMA::is_enabled() { + return _enabled; +} + +#endif // SHARE_GC_X_XNUMA_INLINE_HPP diff --git a/src/hotspot/share/gc/x/xObjArrayAllocator.cpp b/src/hotspot/share/gc/x/xObjArrayAllocator.cpp new file mode 100644 index 0000000000000..9408e027cbd8c --- /dev/null +++ b/src/hotspot/share/gc/x/xObjArrayAllocator.cpp @@ -0,0 +1,93 @@ +/* + * Copyright (c) 2019, 2021, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/x/xThreadLocalData.hpp" +#include "gc/x/xObjArrayAllocator.hpp" +#include "gc/x/xUtils.inline.hpp" +#include "oops/arrayKlass.hpp" +#include "runtime/interfaceSupport.inline.hpp" +#include "utilities/debug.hpp" + +XObjArrayAllocator::XObjArrayAllocator(Klass* klass, size_t word_size, int length, bool do_zero, Thread* thread) : + ObjArrayAllocator(klass, word_size, length, do_zero, thread) {} + +void XObjArrayAllocator::yield_for_safepoint() const { + ThreadBlockInVM tbivm(JavaThread::cast(_thread)); +} + +oop XObjArrayAllocator::initialize(HeapWord* mem) const { + // ZGC specializes the initialization by performing segmented clearing + // to allow shorter time-to-safepoints. + + if (!_do_zero) { + // No need for ZGC specialization + return ObjArrayAllocator::initialize(mem); + } + + // A max segment size of 64K was chosen because microbenchmarking + // suggested that it offered a good trade-off between allocation + // time and time-to-safepoint + const size_t segment_max = XUtils::bytes_to_words(64 * K); + const BasicType element_type = ArrayKlass::cast(_klass)->element_type(); + const size_t header = arrayOopDesc::header_size(element_type); + const size_t payload_size = _word_size - header; + + if (payload_size <= segment_max) { + // To small to use segmented clearing + return ObjArrayAllocator::initialize(mem); + } + + // Segmented clearing + + // The array is going to be exposed before it has been completely + // cleared, therefore we can't expose the header at the end of this + // function. Instead explicitly initialize it according to our needs. + arrayOopDesc::set_mark(mem, markWord::prototype()); + arrayOopDesc::release_set_klass(mem, _klass); + assert(_length >= 0, "length should be non-negative"); + arrayOopDesc::set_length(mem, _length); + + // Keep the array alive across safepoints through an invisible + // root. Invisible roots are not visited by the heap itarator + // and the marking logic will not attempt to follow its elements. + // Relocation knows how to dodge iterating over such objects. + XThreadLocalData::set_invisible_root(_thread, (oop*)&mem); + + for (size_t processed = 0; processed < payload_size; processed += segment_max) { + // Calculate segment + HeapWord* const start = (HeapWord*)(mem + header + processed); + const size_t remaining = payload_size - processed; + const size_t segment_size = MIN2(remaining, segment_max); + + // Clear segment + Copy::zero_to_words(start, segment_size); + + // Safepoint + yield_for_safepoint(); + } + + XThreadLocalData::clear_invisible_root(_thread); + + return cast_to_oop(mem); +} diff --git a/src/hotspot/share/gc/x/xObjArrayAllocator.hpp b/src/hotspot/share/gc/x/xObjArrayAllocator.hpp new file mode 100644 index 0000000000000..4a084da3279b3 --- /dev/null +++ b/src/hotspot/share/gc/x/xObjArrayAllocator.hpp @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XOBJARRAYALLOCATOR_HPP +#define SHARE_GC_X_XOBJARRAYALLOCATOR_HPP + +#include "gc/shared/memAllocator.hpp" + +class XObjArrayAllocator : public ObjArrayAllocator { +private: + virtual oop initialize(HeapWord* mem) const override; + + void yield_for_safepoint() const; + +public: + XObjArrayAllocator(Klass* klass, size_t word_size, int length, bool do_zero, Thread* thread); +}; + +#endif // SHARE_GC_X_XOBJARRAYALLOCATOR_HPP diff --git a/src/hotspot/share/gc/x/xObjectAllocator.cpp b/src/hotspot/share/gc/x/xObjectAllocator.cpp new file mode 100644 index 0000000000000..589e2f2feba30 --- /dev/null +++ b/src/hotspot/share/gc/x/xObjectAllocator.cpp @@ -0,0 +1,267 @@ +/* + * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/x/xGlobals.hpp" +#include "gc/x/xHeap.inline.hpp" +#include "gc/x/xHeuristics.hpp" +#include "gc/x/xObjectAllocator.hpp" +#include "gc/x/xPage.inline.hpp" +#include "gc/x/xPageTable.inline.hpp" +#include "gc/x/xStat.hpp" +#include "gc/x/xThread.inline.hpp" +#include "gc/x/xValue.inline.hpp" +#include "logging/log.hpp" +#include "runtime/atomic.hpp" +#include "runtime/safepoint.hpp" +#include "utilities/align.hpp" +#include "utilities/debug.hpp" + +static const XStatCounter XCounterUndoObjectAllocationSucceeded("Memory", "Undo Object Allocation Succeeded", XStatUnitOpsPerSecond); +static const XStatCounter XCounterUndoObjectAllocationFailed("Memory", "Undo Object Allocation Failed", XStatUnitOpsPerSecond); + +XObjectAllocator::XObjectAllocator() : + _use_per_cpu_shared_small_pages(XHeuristics::use_per_cpu_shared_small_pages()), + _used(0), + _undone(0), + _alloc_for_relocation(0), + _undo_alloc_for_relocation(0), + _shared_medium_page(NULL), + _shared_small_page(NULL) {} + +XPage** XObjectAllocator::shared_small_page_addr() { + return _use_per_cpu_shared_small_pages ? _shared_small_page.addr() : _shared_small_page.addr(0); +} + +XPage* const* XObjectAllocator::shared_small_page_addr() const { + return _use_per_cpu_shared_small_pages ? _shared_small_page.addr() : _shared_small_page.addr(0); +} + +void XObjectAllocator::register_alloc_for_relocation(const XPageTable* page_table, uintptr_t addr, size_t size) { + const XPage* const page = page_table->get(addr); + const size_t aligned_size = align_up(size, page->object_alignment()); + Atomic::add(_alloc_for_relocation.addr(), aligned_size); +} + +void XObjectAllocator::register_undo_alloc_for_relocation(const XPage* page, size_t size) { + const size_t aligned_size = align_up(size, page->object_alignment()); + Atomic::add(_undo_alloc_for_relocation.addr(), aligned_size); +} + +XPage* XObjectAllocator::alloc_page(uint8_t type, size_t size, XAllocationFlags flags) { + XPage* const page = XHeap::heap()->alloc_page(type, size, flags); + if (page != NULL) { + // Increment used bytes + Atomic::add(_used.addr(), size); + } + + return page; +} + +void XObjectAllocator::undo_alloc_page(XPage* page) { + // Increment undone bytes + Atomic::add(_undone.addr(), page->size()); + + XHeap::heap()->undo_alloc_page(page); +} + +uintptr_t XObjectAllocator::alloc_object_in_shared_page(XPage** shared_page, + uint8_t page_type, + size_t page_size, + size_t size, + XAllocationFlags flags) { + uintptr_t addr = 0; + XPage* page = Atomic::load_acquire(shared_page); + + if (page != NULL) { + addr = page->alloc_object_atomic(size); + } + + if (addr == 0) { + // Allocate new page + XPage* const new_page = alloc_page(page_type, page_size, flags); + if (new_page != NULL) { + // Allocate object before installing the new page + addr = new_page->alloc_object(size); + + retry: + // Install new page + XPage* const prev_page = Atomic::cmpxchg(shared_page, page, new_page); + if (prev_page != page) { + if (prev_page == NULL) { + // Previous page was retired, retry installing the new page + page = prev_page; + goto retry; + } + + // Another page already installed, try allocation there first + const uintptr_t prev_addr = prev_page->alloc_object_atomic(size); + if (prev_addr == 0) { + // Allocation failed, retry installing the new page + page = prev_page; + goto retry; + } + + // Allocation succeeded in already installed page + addr = prev_addr; + + // Undo new page allocation + undo_alloc_page(new_page); + } + } + } + + return addr; +} + +uintptr_t XObjectAllocator::alloc_large_object(size_t size, XAllocationFlags flags) { + uintptr_t addr = 0; + + // Allocate new large page + const size_t page_size = align_up(size, XGranuleSize); + XPage* const page = alloc_page(XPageTypeLarge, page_size, flags); + if (page != NULL) { + // Allocate the object + addr = page->alloc_object(size); + } + + return addr; +} + +uintptr_t XObjectAllocator::alloc_medium_object(size_t size, XAllocationFlags flags) { + return alloc_object_in_shared_page(_shared_medium_page.addr(), XPageTypeMedium, XPageSizeMedium, size, flags); +} + +uintptr_t XObjectAllocator::alloc_small_object(size_t size, XAllocationFlags flags) { + return alloc_object_in_shared_page(shared_small_page_addr(), XPageTypeSmall, XPageSizeSmall, size, flags); +} + +uintptr_t XObjectAllocator::alloc_object(size_t size, XAllocationFlags flags) { + if (size <= XObjectSizeLimitSmall) { + // Small + return alloc_small_object(size, flags); + } else if (size <= XObjectSizeLimitMedium) { + // Medium + return alloc_medium_object(size, flags); + } else { + // Large + return alloc_large_object(size, flags); + } +} + +uintptr_t XObjectAllocator::alloc_object(size_t size) { + XAllocationFlags flags; + return alloc_object(size, flags); +} + +uintptr_t XObjectAllocator::alloc_object_for_relocation(const XPageTable* page_table, size_t size) { + XAllocationFlags flags; + flags.set_non_blocking(); + + const uintptr_t addr = alloc_object(size, flags); + if (addr != 0) { + register_alloc_for_relocation(page_table, addr, size); + } + + return addr; +} + +void XObjectAllocator::undo_alloc_object_for_relocation(XPage* page, uintptr_t addr, size_t size) { + const uint8_t type = page->type(); + + if (type == XPageTypeLarge) { + register_undo_alloc_for_relocation(page, size); + undo_alloc_page(page); + XStatInc(XCounterUndoObjectAllocationSucceeded); + } else { + if (page->undo_alloc_object_atomic(addr, size)) { + register_undo_alloc_for_relocation(page, size); + XStatInc(XCounterUndoObjectAllocationSucceeded); + } else { + XStatInc(XCounterUndoObjectAllocationFailed); + } + } +} + +size_t XObjectAllocator::used() const { + size_t total_used = 0; + size_t total_undone = 0; + + XPerCPUConstIterator iter_used(&_used); + for (const size_t* cpu_used; iter_used.next(&cpu_used);) { + total_used += *cpu_used; + } + + XPerCPUConstIterator iter_undone(&_undone); + for (const size_t* cpu_undone; iter_undone.next(&cpu_undone);) { + total_undone += *cpu_undone; + } + + return total_used - total_undone; +} + +size_t XObjectAllocator::remaining() const { + assert(XThread::is_java(), "Should be a Java thread"); + + const XPage* const page = Atomic::load_acquire(shared_small_page_addr()); + if (page != NULL) { + return page->remaining(); + } + + return 0; +} + +size_t XObjectAllocator::relocated() const { + size_t total_alloc = 0; + size_t total_undo_alloc = 0; + + XPerCPUConstIterator iter_alloc(&_alloc_for_relocation); + for (const size_t* alloc; iter_alloc.next(&alloc);) { + total_alloc += Atomic::load(alloc); + } + + XPerCPUConstIterator iter_undo_alloc(&_undo_alloc_for_relocation); + for (const size_t* undo_alloc; iter_undo_alloc.next(&undo_alloc);) { + total_undo_alloc += Atomic::load(undo_alloc); + } + + assert(total_alloc >= total_undo_alloc, "Mismatch"); + + return total_alloc - total_undo_alloc; +} + +void XObjectAllocator::retire_pages() { + assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint"); + + // Reset used and undone bytes + _used.set_all(0); + _undone.set_all(0); + + // Reset relocated bytes + _alloc_for_relocation.set_all(0); + _undo_alloc_for_relocation.set_all(0); + + // Reset allocation pages + _shared_medium_page.set(NULL); + _shared_small_page.set_all(NULL); +} diff --git a/src/hotspot/share/gc/x/xObjectAllocator.hpp b/src/hotspot/share/gc/x/xObjectAllocator.hpp new file mode 100644 index 0000000000000..8880c41f3d598 --- /dev/null +++ b/src/hotspot/share/gc/x/xObjectAllocator.hpp @@ -0,0 +1,79 @@ +/* + * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XOBJECTALLOCATOR_HPP +#define SHARE_GC_X_XOBJECTALLOCATOR_HPP + +#include "gc/x/xAllocationFlags.hpp" +#include "gc/x/xValue.hpp" + +class XPage; +class XPageTable; + +class XObjectAllocator { +private: + const bool _use_per_cpu_shared_small_pages; + XPerCPU _used; + XPerCPU _undone; + XPerCPU _alloc_for_relocation; + XPerCPU _undo_alloc_for_relocation; + XContended _shared_medium_page; + XPerCPU _shared_small_page; + + XPage** shared_small_page_addr(); + XPage* const* shared_small_page_addr() const; + + void register_alloc_for_relocation(const XPageTable* page_table, uintptr_t addr, size_t size); + void register_undo_alloc_for_relocation(const XPage* page, size_t size); + + XPage* alloc_page(uint8_t type, size_t size, XAllocationFlags flags); + void undo_alloc_page(XPage* page); + + // Allocate an object in a shared page. Allocate and + // atomically install a new page if necessary. + uintptr_t alloc_object_in_shared_page(XPage** shared_page, + uint8_t page_type, + size_t page_size, + size_t size, + XAllocationFlags flags); + + uintptr_t alloc_large_object(size_t size, XAllocationFlags flags); + uintptr_t alloc_medium_object(size_t size, XAllocationFlags flags); + uintptr_t alloc_small_object(size_t size, XAllocationFlags flags); + uintptr_t alloc_object(size_t size, XAllocationFlags flags); + +public: + XObjectAllocator(); + + uintptr_t alloc_object(size_t size); + uintptr_t alloc_object_for_relocation(const XPageTable* page_table, size_t size); + void undo_alloc_object_for_relocation(XPage* page, uintptr_t addr, size_t size); + + size_t used() const; + size_t remaining() const; + size_t relocated() const; + + void retire_pages(); +}; + +#endif // SHARE_GC_X_XOBJECTALLOCATOR_HPP diff --git a/src/hotspot/share/gc/z/zOop.hpp b/src/hotspot/share/gc/x/xOop.hpp similarity index 91% rename from src/hotspot/share/gc/z/zOop.hpp rename to src/hotspot/share/gc/x/xOop.hpp index 4fb0e6499e14a..92cc7a225fe6b 100644 --- a/src/hotspot/share/gc/z/zOop.hpp +++ b/src/hotspot/share/gc/x/xOop.hpp @@ -21,16 +21,16 @@ * questions. */ -#ifndef SHARE_GC_Z_ZOOP_HPP -#define SHARE_GC_Z_ZOOP_HPP +#ifndef SHARE_GC_X_XOOP_HPP +#define SHARE_GC_X_XOOP_HPP #include "memory/allStatic.hpp" #include "oops/oopsHierarchy.hpp" -class ZOop : public AllStatic { +class XOop : public AllStatic { public: static oop from_address(uintptr_t addr); static uintptr_t to_address(oop o); }; -#endif // SHARE_GC_Z_ZOOP_HPP +#endif // SHARE_GC_X_XOOP_HPP diff --git a/src/hotspot/share/gc/z/zOop.inline.hpp b/src/hotspot/share/gc/x/xOop.inline.hpp similarity index 83% rename from src/hotspot/share/gc/z/zOop.inline.hpp rename to src/hotspot/share/gc/x/xOop.inline.hpp index e71e233ffc9bc..933987577d113 100644 --- a/src/hotspot/share/gc/z/zOop.inline.hpp +++ b/src/hotspot/share/gc/x/xOop.inline.hpp @@ -21,17 +21,17 @@ * questions. */ -#ifndef SHARE_GC_Z_ZOOP_INLINE_HPP -#define SHARE_GC_Z_ZOOP_INLINE_HPP +#ifndef SHARE_GC_X_XOOP_INLINE_HPP +#define SHARE_GC_X_XOOP_INLINE_HPP -#include "gc/z/zOop.hpp" +#include "gc/x/xOop.hpp" -inline oop ZOop::from_address(uintptr_t addr) { +inline oop XOop::from_address(uintptr_t addr) { return cast_to_oop(addr); } -inline uintptr_t ZOop::to_address(oop o) { +inline uintptr_t XOop::to_address(oop o) { return cast_from_oop(o); } -#endif // SHARE_GC_Z_ZOOP_INLINE_HPP +#endif // SHARE_GC_X_XOOP_INLINE_HPP diff --git a/src/hotspot/share/gc/x/xPage.cpp b/src/hotspot/share/gc/x/xPage.cpp new file mode 100644 index 0000000000000..896adb82768e0 --- /dev/null +++ b/src/hotspot/share/gc/x/xPage.cpp @@ -0,0 +1,135 @@ +/* + * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/x/xList.inline.hpp" +#include "gc/x/xPage.inline.hpp" +#include "gc/x/xPhysicalMemory.inline.hpp" +#include "gc/x/xVirtualMemory.inline.hpp" +#include "utilities/align.hpp" +#include "utilities/debug.hpp" + +XPage::XPage(const XVirtualMemory& vmem, const XPhysicalMemory& pmem) : + XPage(type_from_size(vmem.size()), vmem, pmem) {} + +XPage::XPage(uint8_t type, const XVirtualMemory& vmem, const XPhysicalMemory& pmem) : + _type(type), + _numa_id((uint8_t)-1), + _seqnum(0), + _virtual(vmem), + _top(start()), + _livemap(object_max_count()), + _last_used(0), + _physical(pmem), + _node() { + assert_initialized(); +} + +XPage::~XPage() {} + +void XPage::assert_initialized() const { + assert(!_virtual.is_null(), "Should not be null"); + assert(!_physical.is_null(), "Should not be null"); + assert(_virtual.size() == _physical.size(), "Virtual/Physical size mismatch"); + assert((_type == XPageTypeSmall && size() == XPageSizeSmall) || + (_type == XPageTypeMedium && size() == XPageSizeMedium) || + (_type == XPageTypeLarge && is_aligned(size(), XGranuleSize)), + "Page type/size mismatch"); +} + +void XPage::reset() { + _seqnum = XGlobalSeqNum; + _top = start(); + _livemap.reset(); + _last_used = 0; +} + +void XPage::reset_for_in_place_relocation() { + _seqnum = XGlobalSeqNum; + _top = start(); +} + +XPage* XPage::retype(uint8_t type) { + assert(_type != type, "Invalid retype"); + _type = type; + _livemap.resize(object_max_count()); + return this; +} + +XPage* XPage::split(size_t size) { + return split(type_from_size(size), size); +} + +XPage* XPage::split(uint8_t type, size_t size) { + assert(_virtual.size() > size, "Invalid split"); + + // Resize this page, keep _numa_id, _seqnum, and _last_used + const XVirtualMemory vmem = _virtual.split(size); + const XPhysicalMemory pmem = _physical.split(size); + _type = type_from_size(_virtual.size()); + _top = start(); + _livemap.resize(object_max_count()); + + // Create new page, inherit _seqnum and _last_used + XPage* const page = new XPage(type, vmem, pmem); + page->_seqnum = _seqnum; + page->_last_used = _last_used; + return page; +} + +XPage* XPage::split_committed() { + // Split any committed part of this page into a separate page, + // leaving this page with only uncommitted physical memory. + const XPhysicalMemory pmem = _physical.split_committed(); + if (pmem.is_null()) { + // Nothing committed + return NULL; + } + + assert(!_physical.is_null(), "Should not be null"); + + // Resize this page + const XVirtualMemory vmem = _virtual.split(pmem.size()); + _type = type_from_size(_virtual.size()); + _top = start(); + _livemap.resize(object_max_count()); + + // Create new page + return new XPage(vmem, pmem); +} + +void XPage::print_on(outputStream* out) const { + out->print_cr(" %-6s " PTR_FORMAT " " PTR_FORMAT " " PTR_FORMAT " %s%s", + type_to_string(), start(), top(), end(), + is_allocating() ? " Allocating" : "", + is_relocatable() ? " Relocatable" : ""); +} + +void XPage::print() const { + print_on(tty); +} + +void XPage::verify_live(uint32_t live_objects, size_t live_bytes) const { + guarantee(live_objects == _livemap.live_objects(), "Invalid number of live objects"); + guarantee(live_bytes == _livemap.live_bytes(), "Invalid number of live bytes"); +} diff --git a/src/hotspot/share/gc/x/xPage.hpp b/src/hotspot/share/gc/x/xPage.hpp new file mode 100644 index 0000000000000..c1040e034bd1c --- /dev/null +++ b/src/hotspot/share/gc/x/xPage.hpp @@ -0,0 +1,125 @@ +/* + * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XPAGE_HPP +#define SHARE_GC_X_XPAGE_HPP + +#include "gc/x/xList.hpp" +#include "gc/x/xLiveMap.hpp" +#include "gc/x/xPhysicalMemory.hpp" +#include "gc/x/xVirtualMemory.hpp" +#include "memory/allocation.hpp" + +class VMStructs; + +class XPage : public CHeapObj { + friend class ::VMStructs; + friend class XList; + +private: + uint8_t _type; + uint8_t _numa_id; + uint32_t _seqnum; + XVirtualMemory _virtual; + volatile uintptr_t _top; + XLiveMap _livemap; + uint64_t _last_used; + XPhysicalMemory _physical; + XListNode _node; + + void assert_initialized() const; + + uint8_t type_from_size(size_t size) const; + const char* type_to_string() const; + + bool is_object_marked(uintptr_t addr) const; + bool is_object_strongly_marked(uintptr_t addr) const; + +public: + XPage(const XVirtualMemory& vmem, const XPhysicalMemory& pmem); + XPage(uint8_t type, const XVirtualMemory& vmem, const XPhysicalMemory& pmem); + ~XPage(); + + uint32_t object_max_count() const; + size_t object_alignment_shift() const; + size_t object_alignment() const; + + uint8_t type() const; + uintptr_t start() const; + uintptr_t end() const; + size_t size() const; + uintptr_t top() const; + size_t remaining() const; + + const XVirtualMemory& virtual_memory() const; + const XPhysicalMemory& physical_memory() const; + XPhysicalMemory& physical_memory(); + + uint8_t numa_id(); + + bool is_allocating() const; + bool is_relocatable() const; + + uint64_t last_used() const; + void set_last_used(); + + void reset(); + void reset_for_in_place_relocation(); + + XPage* retype(uint8_t type); + XPage* split(size_t size); + XPage* split(uint8_t type, size_t size); + XPage* split_committed(); + + bool is_in(uintptr_t addr) const; + + bool is_marked() const; + template bool is_object_marked(uintptr_t addr) const; + bool is_object_live(uintptr_t addr) const; + bool is_object_strongly_live(uintptr_t addr) const; + bool mark_object(uintptr_t addr, bool finalizable, bool& inc_live); + + void inc_live(uint32_t objects, size_t bytes); + uint32_t live_objects() const; + size_t live_bytes() const; + + void object_iterate(ObjectClosure* cl); + + uintptr_t alloc_object(size_t size); + uintptr_t alloc_object_atomic(size_t size); + + bool undo_alloc_object(uintptr_t addr, size_t size); + bool undo_alloc_object_atomic(uintptr_t addr, size_t size); + + void print_on(outputStream* out) const; + void print() const; + + void verify_live(uint32_t live_objects, size_t live_bytes) const; +}; + +class XPageClosure { +public: + virtual void do_page(const XPage* page) = 0; +}; + +#endif // SHARE_GC_X_XPAGE_HPP diff --git a/src/hotspot/share/gc/x/xPage.inline.hpp b/src/hotspot/share/gc/x/xPage.inline.hpp new file mode 100644 index 0000000000000..8017b2dadf6fe --- /dev/null +++ b/src/hotspot/share/gc/x/xPage.inline.hpp @@ -0,0 +1,312 @@ +/* + * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XPAGE_INLINE_HPP +#define SHARE_GC_X_XPAGE_INLINE_HPP + +#include "gc/x/xPage.hpp" + +#include "gc/x/xAddress.inline.hpp" +#include "gc/x/xGlobals.hpp" +#include "gc/x/xLiveMap.inline.hpp" +#include "gc/x/xNUMA.hpp" +#include "gc/x/xPhysicalMemory.inline.hpp" +#include "gc/x/xVirtualMemory.inline.hpp" +#include "runtime/atomic.hpp" +#include "runtime/os.hpp" +#include "utilities/align.hpp" +#include "utilities/debug.hpp" + +inline uint8_t XPage::type_from_size(size_t size) const { + if (size == XPageSizeSmall) { + return XPageTypeSmall; + } else if (size == XPageSizeMedium) { + return XPageTypeMedium; + } else { + return XPageTypeLarge; + } +} + +inline const char* XPage::type_to_string() const { + switch (type()) { + case XPageTypeSmall: + return "Small"; + + case XPageTypeMedium: + return "Medium"; + + default: + assert(type() == XPageTypeLarge, "Invalid page type"); + return "Large"; + } +} + +inline uint32_t XPage::object_max_count() const { + switch (type()) { + case XPageTypeLarge: + // A large page can only contain a single + // object aligned to the start of the page. + return 1; + + default: + return (uint32_t)(size() >> object_alignment_shift()); + } +} + +inline size_t XPage::object_alignment_shift() const { + switch (type()) { + case XPageTypeSmall: + return XObjectAlignmentSmallShift; + + case XPageTypeMedium: + return XObjectAlignmentMediumShift; + + default: + assert(type() == XPageTypeLarge, "Invalid page type"); + return XObjectAlignmentLargeShift; + } +} + +inline size_t XPage::object_alignment() const { + switch (type()) { + case XPageTypeSmall: + return XObjectAlignmentSmall; + + case XPageTypeMedium: + return XObjectAlignmentMedium; + + default: + assert(type() == XPageTypeLarge, "Invalid page type"); + return XObjectAlignmentLarge; + } +} + +inline uint8_t XPage::type() const { + return _type; +} + +inline uintptr_t XPage::start() const { + return _virtual.start(); +} + +inline uintptr_t XPage::end() const { + return _virtual.end(); +} + +inline size_t XPage::size() const { + return _virtual.size(); +} + +inline uintptr_t XPage::top() const { + return _top; +} + +inline size_t XPage::remaining() const { + return end() - top(); +} + +inline const XVirtualMemory& XPage::virtual_memory() const { + return _virtual; +} + +inline const XPhysicalMemory& XPage::physical_memory() const { + return _physical; +} + +inline XPhysicalMemory& XPage::physical_memory() { + return _physical; +} + +inline uint8_t XPage::numa_id() { + if (_numa_id == (uint8_t)-1) { + _numa_id = XNUMA::memory_id(XAddress::good(start())); + } + + return _numa_id; +} + +inline bool XPage::is_allocating() const { + return _seqnum == XGlobalSeqNum; +} + +inline bool XPage::is_relocatable() const { + return _seqnum < XGlobalSeqNum; +} + +inline uint64_t XPage::last_used() const { + return _last_used; +} + +inline void XPage::set_last_used() { + _last_used = ceil(os::elapsedTime()); +} + +inline bool XPage::is_in(uintptr_t addr) const { + const uintptr_t offset = XAddress::offset(addr); + return offset >= start() && offset < top(); +} + +inline bool XPage::is_marked() const { + assert(is_relocatable(), "Invalid page state"); + return _livemap.is_marked(); +} + +inline bool XPage::is_object_marked(uintptr_t addr) const { + assert(is_relocatable(), "Invalid page state"); + const size_t index = ((XAddress::offset(addr) - start()) >> object_alignment_shift()) * 2; + return _livemap.get(index); +} + +inline bool XPage::is_object_strongly_marked(uintptr_t addr) const { + assert(is_relocatable(), "Invalid page state"); + const size_t index = ((XAddress::offset(addr) - start()) >> object_alignment_shift()) * 2; + return _livemap.get(index + 1); +} + +template +inline bool XPage::is_object_marked(uintptr_t addr) const { + return finalizable ? is_object_marked(addr) : is_object_strongly_marked(addr); +} + +inline bool XPage::is_object_live(uintptr_t addr) const { + return is_allocating() || is_object_marked(addr); +} + +inline bool XPage::is_object_strongly_live(uintptr_t addr) const { + return is_allocating() || is_object_strongly_marked(addr); +} + +inline bool XPage::mark_object(uintptr_t addr, bool finalizable, bool& inc_live) { + assert(XAddress::is_marked(addr), "Invalid address"); + assert(is_relocatable(), "Invalid page state"); + assert(is_in(addr), "Invalid address"); + + // Set mark bit + const size_t index = ((XAddress::offset(addr) - start()) >> object_alignment_shift()) * 2; + return _livemap.set(index, finalizable, inc_live); +} + +inline void XPage::inc_live(uint32_t objects, size_t bytes) { + _livemap.inc_live(objects, bytes); +} + +inline uint32_t XPage::live_objects() const { + assert(is_marked(), "Should be marked"); + return _livemap.live_objects(); +} + +inline size_t XPage::live_bytes() const { + assert(is_marked(), "Should be marked"); + return _livemap.live_bytes(); +} + +inline void XPage::object_iterate(ObjectClosure* cl) { + _livemap.iterate(cl, XAddress::good(start()), object_alignment_shift()); +} + +inline uintptr_t XPage::alloc_object(size_t size) { + assert(is_allocating(), "Invalid state"); + + const size_t aligned_size = align_up(size, object_alignment()); + const uintptr_t addr = top(); + const uintptr_t new_top = addr + aligned_size; + + if (new_top > end()) { + // Not enough space left + return 0; + } + + _top = new_top; + + return XAddress::good(addr); +} + +inline uintptr_t XPage::alloc_object_atomic(size_t size) { + assert(is_allocating(), "Invalid state"); + + const size_t aligned_size = align_up(size, object_alignment()); + uintptr_t addr = top(); + + for (;;) { + const uintptr_t new_top = addr + aligned_size; + if (new_top > end()) { + // Not enough space left + return 0; + } + + const uintptr_t prev_top = Atomic::cmpxchg(&_top, addr, new_top); + if (prev_top == addr) { + // Success + return XAddress::good(addr); + } + + // Retry + addr = prev_top; + } +} + +inline bool XPage::undo_alloc_object(uintptr_t addr, size_t size) { + assert(is_allocating(), "Invalid state"); + + const uintptr_t offset = XAddress::offset(addr); + const size_t aligned_size = align_up(size, object_alignment()); + const uintptr_t old_top = top(); + const uintptr_t new_top = old_top - aligned_size; + + if (new_top != offset) { + // Failed to undo allocation, not the last allocated object + return false; + } + + _top = new_top; + + // Success + return true; +} + +inline bool XPage::undo_alloc_object_atomic(uintptr_t addr, size_t size) { + assert(is_allocating(), "Invalid state"); + + const uintptr_t offset = XAddress::offset(addr); + const size_t aligned_size = align_up(size, object_alignment()); + uintptr_t old_top = top(); + + for (;;) { + const uintptr_t new_top = old_top - aligned_size; + if (new_top != offset) { + // Failed to undo allocation, not the last allocated object + return false; + } + + const uintptr_t prev_top = Atomic::cmpxchg(&_top, old_top, new_top); + if (prev_top == old_top) { + // Success + return true; + } + + // Retry + old_top = prev_top; + } +} + +#endif // SHARE_GC_X_XPAGE_INLINE_HPP diff --git a/src/hotspot/share/gc/x/xPageAllocator.cpp b/src/hotspot/share/gc/x/xPageAllocator.cpp new file mode 100644 index 0000000000000..10885c864882f --- /dev/null +++ b/src/hotspot/share/gc/x/xPageAllocator.cpp @@ -0,0 +1,870 @@ +/* + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/shared/gcLogPrecious.hpp" +#include "gc/shared/suspendibleThreadSet.hpp" +#include "gc/x/xArray.inline.hpp" +#include "gc/x/xCollectedHeap.hpp" +#include "gc/x/xFuture.inline.hpp" +#include "gc/x/xGlobals.hpp" +#include "gc/x/xLock.inline.hpp" +#include "gc/x/xPage.inline.hpp" +#include "gc/x/xPageAllocator.inline.hpp" +#include "gc/x/xPageCache.hpp" +#include "gc/x/xSafeDelete.inline.hpp" +#include "gc/x/xStat.hpp" +#include "gc/x/xTask.hpp" +#include "gc/x/xUncommitter.hpp" +#include "gc/x/xUnmapper.hpp" +#include "gc/x/xWorkers.hpp" +#include "jfr/jfrEvents.hpp" +#include "logging/log.hpp" +#include "runtime/globals.hpp" +#include "runtime/init.hpp" +#include "runtime/java.hpp" +#include "utilities/debug.hpp" +#include "utilities/globalDefinitions.hpp" + +static const XStatCounter XCounterAllocationRate("Memory", "Allocation Rate", XStatUnitBytesPerSecond); +static const XStatCounter XCounterPageCacheFlush("Memory", "Page Cache Flush", XStatUnitBytesPerSecond); +static const XStatCounter XCounterDefragment("Memory", "Defragment", XStatUnitOpsPerSecond); +static const XStatCriticalPhase XCriticalPhaseAllocationStall("Allocation Stall"); + +enum XPageAllocationStall { + XPageAllocationStallSuccess, + XPageAllocationStallFailed, + XPageAllocationStallStartGC +}; + +class XPageAllocation : public StackObj { + friend class XList; + +private: + const uint8_t _type; + const size_t _size; + const XAllocationFlags _flags; + const uint32_t _seqnum; + size_t _flushed; + size_t _committed; + XList _pages; + XListNode _node; + XFuture _stall_result; + +public: + XPageAllocation(uint8_t type, size_t size, XAllocationFlags flags) : + _type(type), + _size(size), + _flags(flags), + _seqnum(XGlobalSeqNum), + _flushed(0), + _committed(0), + _pages(), + _node(), + _stall_result() {} + + uint8_t type() const { + return _type; + } + + size_t size() const { + return _size; + } + + XAllocationFlags flags() const { + return _flags; + } + + uint32_t seqnum() const { + return _seqnum; + } + + size_t flushed() const { + return _flushed; + } + + void set_flushed(size_t flushed) { + _flushed = flushed; + } + + size_t committed() const { + return _committed; + } + + void set_committed(size_t committed) { + _committed = committed; + } + + XPageAllocationStall wait() { + return _stall_result.get(); + } + + XList* pages() { + return &_pages; + } + + void satisfy(XPageAllocationStall result) { + _stall_result.set(result); + } +}; + +XPageAllocator::XPageAllocator(XWorkers* workers, + size_t min_capacity, + size_t initial_capacity, + size_t max_capacity) : + _lock(), + _cache(), + _virtual(max_capacity), + _physical(max_capacity), + _min_capacity(min_capacity), + _max_capacity(max_capacity), + _current_max_capacity(max_capacity), + _capacity(0), + _claimed(0), + _used(0), + _used_high(0), + _used_low(0), + _reclaimed(0), + _stalled(), + _nstalled(0), + _satisfied(), + _unmapper(new XUnmapper(this)), + _uncommitter(new XUncommitter(this)), + _safe_delete(), + _initialized(false) { + + if (!_virtual.is_initialized() || !_physical.is_initialized()) { + return; + } + + log_info_p(gc, init)("Min Capacity: " SIZE_FORMAT "M", min_capacity / M); + log_info_p(gc, init)("Initial Capacity: " SIZE_FORMAT "M", initial_capacity / M); + log_info_p(gc, init)("Max Capacity: " SIZE_FORMAT "M", max_capacity / M); + if (XPageSizeMedium > 0) { + log_info_p(gc, init)("Medium Page Size: " SIZE_FORMAT "M", XPageSizeMedium / M); + } else { + log_info_p(gc, init)("Medium Page Size: N/A"); + } + log_info_p(gc, init)("Pre-touch: %s", AlwaysPreTouch ? "Enabled" : "Disabled"); + + // Warn if system limits could stop us from reaching max capacity + _physical.warn_commit_limits(max_capacity); + + // Check if uncommit should and can be enabled + _physical.try_enable_uncommit(min_capacity, max_capacity); + + // Pre-map initial capacity + if (!prime_cache(workers, initial_capacity)) { + log_error_p(gc)("Failed to allocate initial Java heap (" SIZE_FORMAT "M)", initial_capacity / M); + return; + } + + // Successfully initialized + _initialized = true; +} + +class XPreTouchTask : public XTask { +private: + const XPhysicalMemoryManager* const _physical; + volatile uintptr_t _start; + const uintptr_t _end; + +public: + XPreTouchTask(const XPhysicalMemoryManager* physical, uintptr_t start, uintptr_t end) : + XTask("XPreTouchTask"), + _physical(physical), + _start(start), + _end(end) {} + + virtual void work() { + for (;;) { + // Get granule offset + const size_t size = XGranuleSize; + const uintptr_t offset = Atomic::fetch_then_add(&_start, size); + if (offset >= _end) { + // Done + break; + } + + // Pre-touch granule + _physical->pretouch(offset, size); + } + } +}; + +bool XPageAllocator::prime_cache(XWorkers* workers, size_t size) { + XAllocationFlags flags; + + flags.set_non_blocking(); + flags.set_low_address(); + + XPage* const page = alloc_page(XPageTypeLarge, size, flags); + if (page == NULL) { + return false; + } + + if (AlwaysPreTouch) { + // Pre-touch page + XPreTouchTask task(&_physical, page->start(), page->end()); + workers->run_all(&task); + } + + free_page(page, false /* reclaimed */); + + return true; +} + +bool XPageAllocator::is_initialized() const { + return _initialized; +} + +size_t XPageAllocator::min_capacity() const { + return _min_capacity; +} + +size_t XPageAllocator::max_capacity() const { + return _max_capacity; +} + +size_t XPageAllocator::soft_max_capacity() const { + // Note that SoftMaxHeapSize is a manageable flag + const size_t soft_max_capacity = Atomic::load(&SoftMaxHeapSize); + const size_t current_max_capacity = Atomic::load(&_current_max_capacity); + return MIN2(soft_max_capacity, current_max_capacity); +} + +size_t XPageAllocator::capacity() const { + return Atomic::load(&_capacity); +} + +size_t XPageAllocator::used() const { + return Atomic::load(&_used); +} + +size_t XPageAllocator::unused() const { + const ssize_t capacity = (ssize_t)Atomic::load(&_capacity); + const ssize_t used = (ssize_t)Atomic::load(&_used); + const ssize_t claimed = (ssize_t)Atomic::load(&_claimed); + const ssize_t unused = capacity - used - claimed; + return unused > 0 ? (size_t)unused : 0; +} + +XPageAllocatorStats XPageAllocator::stats() const { + XLocker locker(&_lock); + return XPageAllocatorStats(_min_capacity, + _max_capacity, + soft_max_capacity(), + _capacity, + _used, + _used_high, + _used_low, + _reclaimed); +} + +void XPageAllocator::reset_statistics() { + assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint"); + _reclaimed = 0; + _used_high = _used_low = _used; + _nstalled = 0; +} + +size_t XPageAllocator::increase_capacity(size_t size) { + const size_t increased = MIN2(size, _current_max_capacity - _capacity); + + if (increased > 0) { + // Update atomically since we have concurrent readers + Atomic::add(&_capacity, increased); + + // Record time of last commit. When allocation, we prefer increasing + // the capacity over flushing the cache. That means there could be + // expired pages in the cache at this time. However, since we are + // increasing the capacity we are obviously in need of committed + // memory and should therefore not be uncommitting memory. + _cache.set_last_commit(); + } + + return increased; +} + +void XPageAllocator::decrease_capacity(size_t size, bool set_max_capacity) { + // Update atomically since we have concurrent readers + Atomic::sub(&_capacity, size); + + if (set_max_capacity) { + // Adjust current max capacity to avoid further attempts to increase capacity + log_error_p(gc)("Forced to lower max Java heap size from " + SIZE_FORMAT "M(%.0f%%) to " SIZE_FORMAT "M(%.0f%%)", + _current_max_capacity / M, percent_of(_current_max_capacity, _max_capacity), + _capacity / M, percent_of(_capacity, _max_capacity)); + + // Update atomically since we have concurrent readers + Atomic::store(&_current_max_capacity, _capacity); + } +} + +void XPageAllocator::increase_used(size_t size, bool worker_relocation) { + if (worker_relocation) { + // Allocating a page for the purpose of worker relocation has + // a negative contribution to the number of reclaimed bytes. + _reclaimed -= size; + } + + // Update atomically since we have concurrent readers + const size_t used = Atomic::add(&_used, size); + if (used > _used_high) { + _used_high = used; + } +} + +void XPageAllocator::decrease_used(size_t size, bool reclaimed) { + // Only pages explicitly released with the reclaimed flag set + // counts as reclaimed bytes. This flag is true when we release + // a page after relocation, and is false when we release a page + // to undo an allocation. + if (reclaimed) { + _reclaimed += size; + } + + // Update atomically since we have concurrent readers + const size_t used = Atomic::sub(&_used, size); + if (used < _used_low) { + _used_low = used; + } +} + +bool XPageAllocator::commit_page(XPage* page) { + // Commit physical memory + return _physical.commit(page->physical_memory()); +} + +void XPageAllocator::uncommit_page(XPage* page) { + if (!ZUncommit) { + return; + } + + // Uncommit physical memory + _physical.uncommit(page->physical_memory()); +} + +void XPageAllocator::map_page(const XPage* page) const { + // Map physical memory + _physical.map(page->start(), page->physical_memory()); +} + +void XPageAllocator::unmap_page(const XPage* page) const { + // Unmap physical memory + _physical.unmap(page->start(), page->size()); +} + +void XPageAllocator::destroy_page(XPage* page) { + // Free virtual memory + _virtual.free(page->virtual_memory()); + + // Free physical memory + _physical.free(page->physical_memory()); + + // Delete page safely + _safe_delete(page); +} + +bool XPageAllocator::is_alloc_allowed(size_t size) const { + const size_t available = _current_max_capacity - _used - _claimed; + return available >= size; +} + +bool XPageAllocator::alloc_page_common_inner(uint8_t type, size_t size, XList* pages) { + if (!is_alloc_allowed(size)) { + // Out of memory + return false; + } + + // Try allocate from the page cache + XPage* const page = _cache.alloc_page(type, size); + if (page != NULL) { + // Success + pages->insert_last(page); + return true; + } + + // Try increase capacity + const size_t increased = increase_capacity(size); + if (increased < size) { + // Could not increase capacity enough to satisfy the allocation + // completely. Flush the page cache to satisfy the remainder. + const size_t remaining = size - increased; + _cache.flush_for_allocation(remaining, pages); + } + + // Success + return true; +} + +bool XPageAllocator::alloc_page_common(XPageAllocation* allocation) { + const uint8_t type = allocation->type(); + const size_t size = allocation->size(); + const XAllocationFlags flags = allocation->flags(); + XList* const pages = allocation->pages(); + + if (!alloc_page_common_inner(type, size, pages)) { + // Out of memory + return false; + } + + // Updated used statistics + increase_used(size, flags.worker_relocation()); + + // Success + return true; +} + +static void check_out_of_memory_during_initialization() { + if (!is_init_completed()) { + vm_exit_during_initialization("java.lang.OutOfMemoryError", "Java heap too small"); + } +} + +bool XPageAllocator::alloc_page_stall(XPageAllocation* allocation) { + XStatTimer timer(XCriticalPhaseAllocationStall); + EventZAllocationStall event; + XPageAllocationStall result; + + // We can only block if the VM is fully initialized + check_out_of_memory_during_initialization(); + + // Increment stalled counter + Atomic::inc(&_nstalled); + + do { + // Start asynchronous GC + XCollectedHeap::heap()->collect(GCCause::_z_allocation_stall); + + // Wait for allocation to complete, fail or request a GC + result = allocation->wait(); + } while (result == XPageAllocationStallStartGC); + + { + // + // We grab the lock here for two different reasons: + // + // 1) Guard deletion of underlying semaphore. This is a workaround for + // a bug in sem_post() in glibc < 2.21, where it's not safe to destroy + // the semaphore immediately after returning from sem_wait(). The + // reason is that sem_post() can touch the semaphore after a waiting + // thread have returned from sem_wait(). To avoid this race we are + // forcing the waiting thread to acquire/release the lock held by the + // posting thread. https://sourceware.org/bugzilla/show_bug.cgi?id=12674 + // + // 2) Guard the list of satisfied pages. + // + XLocker locker(&_lock); + _satisfied.remove(allocation); + } + + // Send event + event.commit(allocation->type(), allocation->size()); + + return (result == XPageAllocationStallSuccess); +} + +bool XPageAllocator::alloc_page_or_stall(XPageAllocation* allocation) { + { + XLocker locker(&_lock); + + if (alloc_page_common(allocation)) { + // Success + return true; + } + + // Failed + if (allocation->flags().non_blocking()) { + // Don't stall + return false; + } + + // Enqueue allocation request + _stalled.insert_last(allocation); + } + + // Stall + return alloc_page_stall(allocation); +} + +XPage* XPageAllocator::alloc_page_create(XPageAllocation* allocation) { + const size_t size = allocation->size(); + + // Allocate virtual memory. To make error handling a lot more straight + // forward, we allocate virtual memory before destroying flushed pages. + // Flushed pages are also unmapped and destroyed asynchronously, so we + // can't immediately reuse that part of the address space anyway. + const XVirtualMemory vmem = _virtual.alloc(size, allocation->flags().low_address()); + if (vmem.is_null()) { + log_error(gc)("Out of address space"); + return NULL; + } + + XPhysicalMemory pmem; + size_t flushed = 0; + + // Harvest physical memory from flushed pages + XListRemoveIterator iter(allocation->pages()); + for (XPage* page; iter.next(&page);) { + flushed += page->size(); + + // Harvest flushed physical memory + XPhysicalMemory& fmem = page->physical_memory(); + pmem.add_segments(fmem); + fmem.remove_segments(); + + // Unmap and destroy page + _unmapper->unmap_and_destroy_page(page); + } + + if (flushed > 0) { + allocation->set_flushed(flushed); + + // Update statistics + XStatInc(XCounterPageCacheFlush, flushed); + log_debug(gc, heap)("Page Cache Flushed: " SIZE_FORMAT "M", flushed / M); + } + + // Allocate any remaining physical memory. Capacity and used has + // already been adjusted, we just need to fetch the memory, which + // is guaranteed to succeed. + if (flushed < size) { + const size_t remaining = size - flushed; + allocation->set_committed(remaining); + _physical.alloc(pmem, remaining); + } + + // Create new page + return new XPage(allocation->type(), vmem, pmem); +} + +bool XPageAllocator::should_defragment(const XPage* page) const { + // A small page can end up at a high address (second half of the address space) + // if we've split a larger page or we have a constrained address space. To help + // fight address space fragmentation we remap such pages to a lower address, if + // a lower address is available. + return page->type() == XPageTypeSmall && + page->start() >= _virtual.reserved() / 2 && + page->start() > _virtual.lowest_available_address(); +} + +bool XPageAllocator::is_alloc_satisfied(XPageAllocation* allocation) const { + // The allocation is immediately satisfied if the list of pages contains + // exactly one page, with the type and size that was requested. However, + // even if the allocation is immediately satisfied we might still want to + // return false here to force the page to be remapped to fight address + // space fragmentation. + + if (allocation->pages()->size() != 1) { + // Not a single page + return false; + } + + const XPage* const page = allocation->pages()->first(); + if (page->type() != allocation->type() || + page->size() != allocation->size()) { + // Wrong type or size + return false; + } + + if (should_defragment(page)) { + // Defragment address space + XStatInc(XCounterDefragment); + return false; + } + + // Allocation immediately satisfied + return true; +} + +XPage* XPageAllocator::alloc_page_finalize(XPageAllocation* allocation) { + // Fast path + if (is_alloc_satisfied(allocation)) { + return allocation->pages()->remove_first(); + } + + // Slow path + XPage* const page = alloc_page_create(allocation); + if (page == NULL) { + // Out of address space + return NULL; + } + + // Commit page + if (commit_page(page)) { + // Success + map_page(page); + return page; + } + + // Failed or partially failed. Split of any successfully committed + // part of the page into a new page and insert it into list of pages, + // so that it will be re-inserted into the page cache. + XPage* const committed_page = page->split_committed(); + destroy_page(page); + + if (committed_page != NULL) { + map_page(committed_page); + allocation->pages()->insert_last(committed_page); + } + + return NULL; +} + +void XPageAllocator::alloc_page_failed(XPageAllocation* allocation) { + XLocker locker(&_lock); + + size_t freed = 0; + + // Free any allocated/flushed pages + XListRemoveIterator iter(allocation->pages()); + for (XPage* page; iter.next(&page);) { + freed += page->size(); + free_page_inner(page, false /* reclaimed */); + } + + // Adjust capacity and used to reflect the failed capacity increase + const size_t remaining = allocation->size() - freed; + decrease_used(remaining, false /* reclaimed */); + decrease_capacity(remaining, true /* set_max_capacity */); + + // Try satisfy stalled allocations + satisfy_stalled(); +} + +XPage* XPageAllocator::alloc_page(uint8_t type, size_t size, XAllocationFlags flags) { + EventZPageAllocation event; + +retry: + XPageAllocation allocation(type, size, flags); + + // Allocate one or more pages from the page cache. If the allocation + // succeeds but the returned pages don't cover the complete allocation, + // then finalize phase is allowed to allocate the remaining memory + // directly from the physical memory manager. Note that this call might + // block in a safepoint if the non-blocking flag is not set. + if (!alloc_page_or_stall(&allocation)) { + // Out of memory + return NULL; + } + + XPage* const page = alloc_page_finalize(&allocation); + if (page == NULL) { + // Failed to commit or map. Clean up and retry, in the hope that + // we can still allocate by flushing the page cache (more aggressively). + alloc_page_failed(&allocation); + goto retry; + } + + // Reset page. This updates the page's sequence number and must + // be done after we potentially blocked in a safepoint (stalled) + // where the global sequence number was updated. + page->reset(); + + // Update allocation statistics. Exclude worker relocations to avoid + // artificial inflation of the allocation rate during relocation. + if (!flags.worker_relocation() && is_init_completed()) { + // Note that there are two allocation rate counters, which have + // different purposes and are sampled at different frequencies. + const size_t bytes = page->size(); + XStatInc(XCounterAllocationRate, bytes); + XStatInc(XStatAllocRate::counter(), bytes); + } + + // Send event + event.commit(type, size, allocation.flushed(), allocation.committed(), + page->physical_memory().nsegments(), flags.non_blocking()); + + return page; +} + +void XPageAllocator::satisfy_stalled() { + for (;;) { + XPageAllocation* const allocation = _stalled.first(); + if (allocation == NULL) { + // Allocation queue is empty + return; + } + + if (!alloc_page_common(allocation)) { + // Allocation could not be satisfied, give up + return; + } + + // Allocation succeeded, dequeue and satisfy allocation request. + // Note that we must dequeue the allocation request first, since + // it will immediately be deallocated once it has been satisfied. + _stalled.remove(allocation); + _satisfied.insert_last(allocation); + allocation->satisfy(XPageAllocationStallSuccess); + } +} + +void XPageAllocator::free_page_inner(XPage* page, bool reclaimed) { + // Update used statistics + decrease_used(page->size(), reclaimed); + + // Set time when last used + page->set_last_used(); + + // Cache page + _cache.free_page(page); +} + +void XPageAllocator::free_page(XPage* page, bool reclaimed) { + XLocker locker(&_lock); + + // Free page + free_page_inner(page, reclaimed); + + // Try satisfy stalled allocations + satisfy_stalled(); +} + +void XPageAllocator::free_pages(const XArray* pages, bool reclaimed) { + XLocker locker(&_lock); + + // Free pages + XArrayIterator iter(pages); + for (XPage* page; iter.next(&page);) { + free_page_inner(page, reclaimed); + } + + // Try satisfy stalled allocations + satisfy_stalled(); +} + +size_t XPageAllocator::uncommit(uint64_t* timeout) { + // We need to join the suspendible thread set while manipulating capacity and + // used, to make sure GC safepoints will have a consistent view. However, when + // ZVerifyViews is enabled we need to join at a broader scope to also make sure + // we don't change the address good mask after pages have been flushed, and + // thereby made invisible to pages_do(), but before they have been unmapped. + SuspendibleThreadSetJoiner joiner(ZVerifyViews); + XList pages; + size_t flushed; + + { + SuspendibleThreadSetJoiner joiner(!ZVerifyViews); + XLocker locker(&_lock); + + // Never uncommit below min capacity. We flush out and uncommit chunks at + // a time (~0.8% of the max capacity, but at least one granule and at most + // 256M), in case demand for memory increases while we are uncommitting. + const size_t retain = MAX2(_used, _min_capacity); + const size_t release = _capacity - retain; + const size_t limit = MIN2(align_up(_current_max_capacity >> 7, XGranuleSize), 256 * M); + const size_t flush = MIN2(release, limit); + + // Flush pages to uncommit + flushed = _cache.flush_for_uncommit(flush, &pages, timeout); + if (flushed == 0) { + // Nothing flushed + return 0; + } + + // Record flushed pages as claimed + Atomic::add(&_claimed, flushed); + } + + // Unmap, uncommit, and destroy flushed pages + XListRemoveIterator iter(&pages); + for (XPage* page; iter.next(&page);) { + unmap_page(page); + uncommit_page(page); + destroy_page(page); + } + + { + SuspendibleThreadSetJoiner joiner(!ZVerifyViews); + XLocker locker(&_lock); + + // Adjust claimed and capacity to reflect the uncommit + Atomic::sub(&_claimed, flushed); + decrease_capacity(flushed, false /* set_max_capacity */); + } + + return flushed; +} + +void XPageAllocator::enable_deferred_delete() const { + _safe_delete.enable_deferred_delete(); +} + +void XPageAllocator::disable_deferred_delete() const { + _safe_delete.disable_deferred_delete(); +} + +void XPageAllocator::debug_map_page(const XPage* page) const { + assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint"); + _physical.debug_map(page->start(), page->physical_memory()); +} + +void XPageAllocator::debug_unmap_page(const XPage* page) const { + assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint"); + _physical.debug_unmap(page->start(), page->size()); +} + +void XPageAllocator::pages_do(XPageClosure* cl) const { + assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint"); + + XListIterator iter_satisfied(&_satisfied); + for (XPageAllocation* allocation; iter_satisfied.next(&allocation);) { + XListIterator iter_pages(allocation->pages()); + for (XPage* page; iter_pages.next(&page);) { + cl->do_page(page); + } + } + + _cache.pages_do(cl); +} + +bool XPageAllocator::has_alloc_stalled() const { + return Atomic::load(&_nstalled) != 0; +} + +void XPageAllocator::check_out_of_memory() { + XLocker locker(&_lock); + + // Fail allocation requests that were enqueued before the + // last GC cycle started, otherwise start a new GC cycle. + for (XPageAllocation* allocation = _stalled.first(); allocation != NULL; allocation = _stalled.first()) { + if (allocation->seqnum() == XGlobalSeqNum) { + // Start a new GC cycle, keep allocation requests enqueued + allocation->satisfy(XPageAllocationStallStartGC); + return; + } + + // Out of memory, fail allocation request + _stalled.remove(allocation); + _satisfied.insert_last(allocation); + allocation->satisfy(XPageAllocationStallFailed); + } +} + +void XPageAllocator::threads_do(ThreadClosure* tc) const { + tc->do_thread(_unmapper); + tc->do_thread(_uncommitter); +} diff --git a/src/hotspot/share/gc/x/xPageAllocator.hpp b/src/hotspot/share/gc/x/xPageAllocator.hpp new file mode 100644 index 0000000000000..b907e50043d42 --- /dev/null +++ b/src/hotspot/share/gc/x/xPageAllocator.hpp @@ -0,0 +1,174 @@ +/* + * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XPAGEALLOCATOR_HPP +#define SHARE_GC_X_XPAGEALLOCATOR_HPP + +#include "gc/x/xAllocationFlags.hpp" +#include "gc/x/xArray.hpp" +#include "gc/x/xList.hpp" +#include "gc/x/xLock.hpp" +#include "gc/x/xPageCache.hpp" +#include "gc/x/xPhysicalMemory.hpp" +#include "gc/x/xSafeDelete.hpp" +#include "gc/x/xVirtualMemory.hpp" + +class ThreadClosure; +class VMStructs; +class XPageAllocation; +class XPageAllocatorStats; +class XWorkers; +class XUncommitter; +class XUnmapper; + +class XPageAllocator { + friend class ::VMStructs; + friend class XUnmapper; + friend class XUncommitter; + +private: + mutable XLock _lock; + XPageCache _cache; + XVirtualMemoryManager _virtual; + XPhysicalMemoryManager _physical; + const size_t _min_capacity; + const size_t _max_capacity; + volatile size_t _current_max_capacity; + volatile size_t _capacity; + volatile size_t _claimed; + volatile size_t _used; + size_t _used_high; + size_t _used_low; + ssize_t _reclaimed; + XList _stalled; + volatile uint64_t _nstalled; + XList _satisfied; + XUnmapper* _unmapper; + XUncommitter* _uncommitter; + mutable XSafeDelete _safe_delete; + bool _initialized; + + bool prime_cache(XWorkers* workers, size_t size); + + size_t increase_capacity(size_t size); + void decrease_capacity(size_t size, bool set_max_capacity); + + void increase_used(size_t size, bool relocation); + void decrease_used(size_t size, bool reclaimed); + + bool commit_page(XPage* page); + void uncommit_page(XPage* page); + + void map_page(const XPage* page) const; + void unmap_page(const XPage* page) const; + + void destroy_page(XPage* page); + + bool is_alloc_allowed(size_t size) const; + + bool alloc_page_common_inner(uint8_t type, size_t size, XList* pages); + bool alloc_page_common(XPageAllocation* allocation); + bool alloc_page_stall(XPageAllocation* allocation); + bool alloc_page_or_stall(XPageAllocation* allocation); + bool should_defragment(const XPage* page) const; + bool is_alloc_satisfied(XPageAllocation* allocation) const; + XPage* alloc_page_create(XPageAllocation* allocation); + XPage* alloc_page_finalize(XPageAllocation* allocation); + void alloc_page_failed(XPageAllocation* allocation); + + void satisfy_stalled(); + + void free_page_inner(XPage* page, bool reclaimed); + + size_t uncommit(uint64_t* timeout); + +public: + XPageAllocator(XWorkers* workers, + size_t min_capacity, + size_t initial_capacity, + size_t max_capacity); + + bool is_initialized() const; + + size_t min_capacity() const; + size_t max_capacity() const; + size_t soft_max_capacity() const; + size_t capacity() const; + size_t used() const; + size_t unused() const; + + XPageAllocatorStats stats() const; + + void reset_statistics(); + + XPage* alloc_page(uint8_t type, size_t size, XAllocationFlags flags); + void free_page(XPage* page, bool reclaimed); + void free_pages(const XArray* pages, bool reclaimed); + + void enable_deferred_delete() const; + void disable_deferred_delete() const; + + void debug_map_page(const XPage* page) const; + void debug_unmap_page(const XPage* page) const; + + bool has_alloc_stalled() const; + void check_out_of_memory(); + + void pages_do(XPageClosure* cl) const; + + void threads_do(ThreadClosure* tc) const; +}; + +class XPageAllocatorStats { +private: + size_t _min_capacity; + size_t _max_capacity; + size_t _soft_max_capacity; + size_t _current_max_capacity; + size_t _capacity; + size_t _used; + size_t _used_high; + size_t _used_low; + size_t _reclaimed; + +public: + XPageAllocatorStats(size_t min_capacity, + size_t max_capacity, + size_t soft_max_capacity, + size_t capacity, + size_t used, + size_t used_high, + size_t used_low, + size_t reclaimed); + + size_t min_capacity() const; + size_t max_capacity() const; + size_t soft_max_capacity() const; + size_t capacity() const; + size_t used() const; + size_t used_high() const; + size_t used_low() const; + size_t reclaimed() const; +}; + +#endif // SHARE_GC_X_XPAGEALLOCATOR_HPP diff --git a/src/hotspot/share/gc/x/xPageAllocator.inline.hpp b/src/hotspot/share/gc/x/xPageAllocator.inline.hpp new file mode 100644 index 0000000000000..dbaf77f56a051 --- /dev/null +++ b/src/hotspot/share/gc/x/xPageAllocator.inline.hpp @@ -0,0 +1,78 @@ +/* + * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XPAGEALLOCATOR_INLINE_HPP +#define SHARE_GC_X_XPAGEALLOCATOR_INLINE_HPP + +#include "gc/x/xPageAllocator.hpp" + +inline XPageAllocatorStats::XPageAllocatorStats(size_t min_capacity, + size_t max_capacity, + size_t soft_max_capacity, + size_t capacity, + size_t used, + size_t used_high, + size_t used_low, + size_t reclaimed) : + _min_capacity(min_capacity), + _max_capacity(max_capacity), + _soft_max_capacity(soft_max_capacity), + _capacity(capacity), + _used(used), + _used_high(used_high), + _used_low(used_low), + _reclaimed(reclaimed) {} + +inline size_t XPageAllocatorStats::min_capacity() const { + return _min_capacity; +} + +inline size_t XPageAllocatorStats::max_capacity() const { + return _max_capacity; +} + +inline size_t XPageAllocatorStats::soft_max_capacity() const { + return _soft_max_capacity; +} + +inline size_t XPageAllocatorStats::capacity() const { + return _capacity; +} + +inline size_t XPageAllocatorStats::used() const { + return _used; +} + +inline size_t XPageAllocatorStats::used_high() const { + return _used_high; +} + +inline size_t XPageAllocatorStats::used_low() const { + return _used_low; +} + +inline size_t XPageAllocatorStats::reclaimed() const { + return _reclaimed; +} + +#endif // SHARE_GC_X_XPAGEALLOCATOR_INLINE_HPP diff --git a/src/hotspot/share/gc/x/xPageCache.cpp b/src/hotspot/share/gc/x/xPageCache.cpp new file mode 100644 index 0000000000000..8f8a6636369b1 --- /dev/null +++ b/src/hotspot/share/gc/x/xPageCache.cpp @@ -0,0 +1,356 @@ +/* + * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/x/xGlobals.hpp" +#include "gc/x/xList.inline.hpp" +#include "gc/x/xNUMA.hpp" +#include "gc/x/xPage.inline.hpp" +#include "gc/x/xPageCache.hpp" +#include "gc/x/xStat.hpp" +#include "gc/x/xValue.inline.hpp" +#include "memory/allocation.hpp" +#include "runtime/globals.hpp" +#include "runtime/os.hpp" + +static const XStatCounter XCounterPageCacheHitL1("Memory", "Page Cache Hit L1", XStatUnitOpsPerSecond); +static const XStatCounter XCounterPageCacheHitL2("Memory", "Page Cache Hit L2", XStatUnitOpsPerSecond); +static const XStatCounter XCounterPageCacheHitL3("Memory", "Page Cache Hit L3", XStatUnitOpsPerSecond); +static const XStatCounter XCounterPageCacheMiss("Memory", "Page Cache Miss", XStatUnitOpsPerSecond); + +class XPageCacheFlushClosure : public StackObj { + friend class XPageCache; + +protected: + const size_t _requested; + size_t _flushed; + +public: + XPageCacheFlushClosure(size_t requested); + virtual bool do_page(const XPage* page) = 0; +}; + +XPageCacheFlushClosure::XPageCacheFlushClosure(size_t requested) : + _requested(requested), + _flushed(0) {} + +XPageCache::XPageCache() : + _small(), + _medium(), + _large(), + _last_commit(0) {} + +XPage* XPageCache::alloc_small_page() { + const uint32_t numa_id = XNUMA::id(); + const uint32_t numa_count = XNUMA::count(); + + // Try NUMA local page cache + XPage* const l1_page = _small.get(numa_id).remove_first(); + if (l1_page != NULL) { + XStatInc(XCounterPageCacheHitL1); + return l1_page; + } + + // Try NUMA remote page cache(s) + uint32_t remote_numa_id = numa_id + 1; + const uint32_t remote_numa_count = numa_count - 1; + for (uint32_t i = 0; i < remote_numa_count; i++) { + if (remote_numa_id == numa_count) { + remote_numa_id = 0; + } + + XPage* const l2_page = _small.get(remote_numa_id).remove_first(); + if (l2_page != NULL) { + XStatInc(XCounterPageCacheHitL2); + return l2_page; + } + + remote_numa_id++; + } + + return NULL; +} + +XPage* XPageCache::alloc_medium_page() { + XPage* const page = _medium.remove_first(); + if (page != NULL) { + XStatInc(XCounterPageCacheHitL1); + return page; + } + + return NULL; +} + +XPage* XPageCache::alloc_large_page(size_t size) { + // Find a page with the right size + XListIterator iter(&_large); + for (XPage* page; iter.next(&page);) { + if (size == page->size()) { + // Page found + _large.remove(page); + XStatInc(XCounterPageCacheHitL1); + return page; + } + } + + return NULL; +} + +XPage* XPageCache::alloc_oversized_medium_page(size_t size) { + if (size <= XPageSizeMedium) { + return _medium.remove_first(); + } + + return NULL; +} + +XPage* XPageCache::alloc_oversized_large_page(size_t size) { + // Find a page that is large enough + XListIterator iter(&_large); + for (XPage* page; iter.next(&page);) { + if (size <= page->size()) { + // Page found + _large.remove(page); + return page; + } + } + + return NULL; +} + +XPage* XPageCache::alloc_oversized_page(size_t size) { + XPage* page = alloc_oversized_large_page(size); + if (page == NULL) { + page = alloc_oversized_medium_page(size); + } + + if (page != NULL) { + XStatInc(XCounterPageCacheHitL3); + } + + return page; +} + +XPage* XPageCache::alloc_page(uint8_t type, size_t size) { + XPage* page; + + // Try allocate exact page + if (type == XPageTypeSmall) { + page = alloc_small_page(); + } else if (type == XPageTypeMedium) { + page = alloc_medium_page(); + } else { + page = alloc_large_page(size); + } + + if (page == NULL) { + // Try allocate potentially oversized page + XPage* const oversized = alloc_oversized_page(size); + if (oversized != NULL) { + if (size < oversized->size()) { + // Split oversized page + page = oversized->split(type, size); + + // Cache remainder + free_page(oversized); + } else { + // Re-type correctly sized page + page = oversized->retype(type); + } + } + } + + if (page == NULL) { + XStatInc(XCounterPageCacheMiss); + } + + return page; +} + +void XPageCache::free_page(XPage* page) { + const uint8_t type = page->type(); + if (type == XPageTypeSmall) { + _small.get(page->numa_id()).insert_first(page); + } else if (type == XPageTypeMedium) { + _medium.insert_first(page); + } else { + _large.insert_first(page); + } +} + +bool XPageCache::flush_list_inner(XPageCacheFlushClosure* cl, XList* from, XList* to) { + XPage* const page = from->last(); + if (page == NULL || !cl->do_page(page)) { + // Don't flush page + return false; + } + + // Flush page + from->remove(page); + to->insert_last(page); + return true; +} + +void XPageCache::flush_list(XPageCacheFlushClosure* cl, XList* from, XList* to) { + while (flush_list_inner(cl, from, to)); +} + +void XPageCache::flush_per_numa_lists(XPageCacheFlushClosure* cl, XPerNUMA >* from, XList* to) { + const uint32_t numa_count = XNUMA::count(); + uint32_t numa_done = 0; + uint32_t numa_next = 0; + + // Flush lists round-robin + while (numa_done < numa_count) { + XList* numa_list = from->addr(numa_next); + if (++numa_next == numa_count) { + numa_next = 0; + } + + if (flush_list_inner(cl, numa_list, to)) { + // Not done + numa_done = 0; + } else { + // Done + numa_done++; + } + } +} + +void XPageCache::flush(XPageCacheFlushClosure* cl, XList* to) { + // Prefer flushing large, then medium and last small pages + flush_list(cl, &_large, to); + flush_list(cl, &_medium, to); + flush_per_numa_lists(cl, &_small, to); + + if (cl->_flushed > cl->_requested) { + // Overflushed, re-insert part of last page into the cache + const size_t overflushed = cl->_flushed - cl->_requested; + XPage* const reinsert = to->last()->split(overflushed); + free_page(reinsert); + cl->_flushed -= overflushed; + } +} + +class XPageCacheFlushForAllocationClosure : public XPageCacheFlushClosure { +public: + XPageCacheFlushForAllocationClosure(size_t requested) : + XPageCacheFlushClosure(requested) {} + + virtual bool do_page(const XPage* page) { + if (_flushed < _requested) { + // Flush page + _flushed += page->size(); + return true; + } + + // Don't flush page + return false; + } +}; + +void XPageCache::flush_for_allocation(size_t requested, XList* to) { + XPageCacheFlushForAllocationClosure cl(requested); + flush(&cl, to); +} + +class XPageCacheFlushForUncommitClosure : public XPageCacheFlushClosure { +private: + const uint64_t _now; + uint64_t* _timeout; + +public: + XPageCacheFlushForUncommitClosure(size_t requested, uint64_t now, uint64_t* timeout) : + XPageCacheFlushClosure(requested), + _now(now), + _timeout(timeout) { + // Set initial timeout + *_timeout = ZUncommitDelay; + } + + virtual bool do_page(const XPage* page) { + const uint64_t expires = page->last_used() + ZUncommitDelay; + if (expires > _now) { + // Don't flush page, record shortest non-expired timeout + *_timeout = MIN2(*_timeout, expires - _now); + return false; + } + + if (_flushed >= _requested) { + // Don't flush page, requested amount flushed + return false; + } + + // Flush page + _flushed += page->size(); + return true; + } +}; + +size_t XPageCache::flush_for_uncommit(size_t requested, XList* to, uint64_t* timeout) { + const uint64_t now = os::elapsedTime(); + const uint64_t expires = _last_commit + ZUncommitDelay; + if (expires > now) { + // Delay uncommit, set next timeout + *timeout = expires - now; + return 0; + } + + if (requested == 0) { + // Nothing to flush, set next timeout + *timeout = ZUncommitDelay; + return 0; + } + + XPageCacheFlushForUncommitClosure cl(requested, now, timeout); + flush(&cl, to); + + return cl._flushed; +} + +void XPageCache::set_last_commit() { + _last_commit = ceil(os::elapsedTime()); +} + +void XPageCache::pages_do(XPageClosure* cl) const { + // Small + XPerNUMAConstIterator > iter_numa(&_small); + for (const XList* list; iter_numa.next(&list);) { + XListIterator iter_small(list); + for (XPage* page; iter_small.next(&page);) { + cl->do_page(page); + } + } + + // Medium + XListIterator iter_medium(&_medium); + for (XPage* page; iter_medium.next(&page);) { + cl->do_page(page); + } + + // Large + XListIterator iter_large(&_large); + for (XPage* page; iter_large.next(&page);) { + cl->do_page(page); + } +} diff --git a/src/hotspot/share/gc/x/xPageCache.hpp b/src/hotspot/share/gc/x/xPageCache.hpp new file mode 100644 index 0000000000000..9ed80a933f43b --- /dev/null +++ b/src/hotspot/share/gc/x/xPageCache.hpp @@ -0,0 +1,67 @@ +/* + * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XPAGECACHE_HPP +#define SHARE_GC_X_XPAGECACHE_HPP + +#include "gc/x/xList.hpp" +#include "gc/x/xPage.hpp" +#include "gc/x/xValue.hpp" + +class XPageCacheFlushClosure; + +class XPageCache { +private: + XPerNUMA > _small; + XList _medium; + XList _large; + uint64_t _last_commit; + + XPage* alloc_small_page(); + XPage* alloc_medium_page(); + XPage* alloc_large_page(size_t size); + + XPage* alloc_oversized_medium_page(size_t size); + XPage* alloc_oversized_large_page(size_t size); + XPage* alloc_oversized_page(size_t size); + + bool flush_list_inner(XPageCacheFlushClosure* cl, XList* from, XList* to); + void flush_list(XPageCacheFlushClosure* cl, XList* from, XList* to); + void flush_per_numa_lists(XPageCacheFlushClosure* cl, XPerNUMA >* from, XList* to); + void flush(XPageCacheFlushClosure* cl, XList* to); + +public: + XPageCache(); + + XPage* alloc_page(uint8_t type, size_t size); + void free_page(XPage* page); + + void flush_for_allocation(size_t requested, XList* to); + size_t flush_for_uncommit(size_t requested, XList* to, uint64_t* timeout); + + void set_last_commit(); + + void pages_do(XPageClosure* cl) const; +}; + +#endif // SHARE_GC_X_XPAGECACHE_HPP diff --git a/src/hotspot/share/gc/x/xPageTable.cpp b/src/hotspot/share/gc/x/xPageTable.cpp new file mode 100644 index 0000000000000..6cdb7c929e1a0 --- /dev/null +++ b/src/hotspot/share/gc/x/xPageTable.cpp @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/x/xGlobals.hpp" +#include "gc/x/xGranuleMap.inline.hpp" +#include "gc/x/xPage.inline.hpp" +#include "gc/x/xPageTable.inline.hpp" +#include "runtime/orderAccess.hpp" +#include "utilities/debug.hpp" + +XPageTable::XPageTable() : + _map(XAddressOffsetMax) {} + +void XPageTable::insert(XPage* page) { + const uintptr_t offset = page->start(); + const size_t size = page->size(); + + // Make sure a newly created page is + // visible before updating the page table. + OrderAccess::storestore(); + + assert(_map.get(offset) == NULL, "Invalid entry"); + _map.put(offset, size, page); +} + +void XPageTable::remove(XPage* page) { + const uintptr_t offset = page->start(); + const size_t size = page->size(); + + assert(_map.get(offset) == page, "Invalid entry"); + _map.put(offset, size, NULL); +} diff --git a/src/hotspot/share/gc/x/xPageTable.hpp b/src/hotspot/share/gc/x/xPageTable.hpp new file mode 100644 index 0000000000000..958dd73555770 --- /dev/null +++ b/src/hotspot/share/gc/x/xPageTable.hpp @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XPAGETABLE_HPP +#define SHARE_GC_X_XPAGETABLE_HPP + +#include "gc/x/xGranuleMap.hpp" +#include "memory/allocation.hpp" + +class VMStructs; +class XPage; +class XPageTableIterator; + +class XPageTable { + friend class ::VMStructs; + friend class XPageTableIterator; + +private: + XGranuleMap _map; + +public: + XPageTable(); + + XPage* get(uintptr_t addr) const; + + void insert(XPage* page); + void remove(XPage* page); +}; + +class XPageTableIterator : public StackObj { +private: + XGranuleMapIterator _iter; + XPage* _prev; + +public: + XPageTableIterator(const XPageTable* page_table); + + bool next(XPage** page); +}; + +#endif // SHARE_GC_X_XPAGETABLE_HPP diff --git a/src/hotspot/share/gc/x/xPageTable.inline.hpp b/src/hotspot/share/gc/x/xPageTable.inline.hpp new file mode 100644 index 0000000000000..c4f30d3e9c3e9 --- /dev/null +++ b/src/hotspot/share/gc/x/xPageTable.inline.hpp @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XPAGETABLE_INLINE_HPP +#define SHARE_GC_X_XPAGETABLE_INLINE_HPP + +#include "gc/x/xPageTable.hpp" + +#include "gc/x/xAddress.inline.hpp" +#include "gc/x/xGranuleMap.inline.hpp" + +inline XPage* XPageTable::get(uintptr_t addr) const { + assert(!XAddress::is_null(addr), "Invalid address"); + return _map.get(XAddress::offset(addr)); +} + +inline XPageTableIterator::XPageTableIterator(const XPageTable* page_table) : + _iter(&page_table->_map), + _prev(NULL) {} + +inline bool XPageTableIterator::next(XPage** page) { + for (XPage* entry; _iter.next(&entry);) { + if (entry != NULL && entry != _prev) { + // Next page found + *page = _prev = entry; + return true; + } + } + + // No more pages + return false; +} + +#endif // SHARE_GC_X_XPAGETABLE_INLINE_HPP diff --git a/src/hotspot/share/gc/x/xPhysicalMemory.cpp b/src/hotspot/share/gc/x/xPhysicalMemory.cpp new file mode 100644 index 0000000000000..20902cc05bc72 --- /dev/null +++ b/src/hotspot/share/gc/x/xPhysicalMemory.cpp @@ -0,0 +1,435 @@ +/* + * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/shared/gcLogPrecious.hpp" +#include "gc/x/xAddress.inline.hpp" +#include "gc/x/xArray.inline.hpp" +#include "gc/x/xGlobals.hpp" +#include "gc/x/xLargePages.inline.hpp" +#include "gc/x/xNUMA.inline.hpp" +#include "gc/x/xPhysicalMemory.inline.hpp" +#include "logging/log.hpp" +#include "runtime/globals.hpp" +#include "runtime/globals_extension.hpp" +#include "runtime/init.hpp" +#include "runtime/os.hpp" +#include "services/memTracker.hpp" +#include "utilities/align.hpp" +#include "utilities/debug.hpp" +#include "utilities/globalDefinitions.hpp" +#include "utilities/powerOfTwo.hpp" + +XPhysicalMemory::XPhysicalMemory() : + _segments() {} + +XPhysicalMemory::XPhysicalMemory(const XPhysicalMemorySegment& segment) : + _segments() { + add_segment(segment); +} + +XPhysicalMemory::XPhysicalMemory(const XPhysicalMemory& pmem) : + _segments() { + add_segments(pmem); +} + +const XPhysicalMemory& XPhysicalMemory::operator=(const XPhysicalMemory& pmem) { + // Free segments + _segments.clear_and_deallocate(); + + // Copy segments + add_segments(pmem); + + return *this; +} + +size_t XPhysicalMemory::size() const { + size_t size = 0; + + for (int i = 0; i < _segments.length(); i++) { + size += _segments.at(i).size(); + } + + return size; +} + +void XPhysicalMemory::insert_segment(int index, uintptr_t start, size_t size, bool committed) { + _segments.insert_before(index, XPhysicalMemorySegment(start, size, committed)); +} + +void XPhysicalMemory::replace_segment(int index, uintptr_t start, size_t size, bool committed) { + _segments.at_put(index, XPhysicalMemorySegment(start, size, committed)); +} + +void XPhysicalMemory::remove_segment(int index) { + _segments.remove_at(index); +} + +void XPhysicalMemory::add_segments(const XPhysicalMemory& pmem) { + for (int i = 0; i < pmem.nsegments(); i++) { + add_segment(pmem.segment(i)); + } +} + +void XPhysicalMemory::remove_segments() { + _segments.clear_and_deallocate(); +} + +static bool is_mergable(const XPhysicalMemorySegment& before, const XPhysicalMemorySegment& after) { + return before.end() == after.start() && before.is_committed() == after.is_committed(); +} + +void XPhysicalMemory::add_segment(const XPhysicalMemorySegment& segment) { + // Insert segments in address order, merge segments when possible + for (int i = _segments.length(); i > 0; i--) { + const int current = i - 1; + + if (_segments.at(current).end() <= segment.start()) { + if (is_mergable(_segments.at(current), segment)) { + if (current + 1 < _segments.length() && is_mergable(segment, _segments.at(current + 1))) { + // Merge with end of current segment and start of next segment + const size_t start = _segments.at(current).start(); + const size_t size = _segments.at(current).size() + segment.size() + _segments.at(current + 1).size(); + replace_segment(current, start, size, segment.is_committed()); + remove_segment(current + 1); + return; + } + + // Merge with end of current segment + const size_t start = _segments.at(current).start(); + const size_t size = _segments.at(current).size() + segment.size(); + replace_segment(current, start, size, segment.is_committed()); + return; + } else if (current + 1 < _segments.length() && is_mergable(segment, _segments.at(current + 1))) { + // Merge with start of next segment + const size_t start = segment.start(); + const size_t size = segment.size() + _segments.at(current + 1).size(); + replace_segment(current + 1, start, size, segment.is_committed()); + return; + } + + // Insert after current segment + insert_segment(current + 1, segment.start(), segment.size(), segment.is_committed()); + return; + } + } + + if (_segments.length() > 0 && is_mergable(segment, _segments.at(0))) { + // Merge with start of first segment + const size_t start = segment.start(); + const size_t size = segment.size() + _segments.at(0).size(); + replace_segment(0, start, size, segment.is_committed()); + return; + } + + // Insert before first segment + insert_segment(0, segment.start(), segment.size(), segment.is_committed()); +} + +bool XPhysicalMemory::commit_segment(int index, size_t size) { + assert(size <= _segments.at(index).size(), "Invalid size"); + assert(!_segments.at(index).is_committed(), "Invalid state"); + + if (size == _segments.at(index).size()) { + // Completely committed + _segments.at(index).set_committed(true); + return true; + } + + if (size > 0) { + // Partially committed, split segment + insert_segment(index + 1, _segments.at(index).start() + size, _segments.at(index).size() - size, false /* committed */); + replace_segment(index, _segments.at(index).start(), size, true /* committed */); + } + + return false; +} + +bool XPhysicalMemory::uncommit_segment(int index, size_t size) { + assert(size <= _segments.at(index).size(), "Invalid size"); + assert(_segments.at(index).is_committed(), "Invalid state"); + + if (size == _segments.at(index).size()) { + // Completely uncommitted + _segments.at(index).set_committed(false); + return true; + } + + if (size > 0) { + // Partially uncommitted, split segment + insert_segment(index + 1, _segments.at(index).start() + size, _segments.at(index).size() - size, true /* committed */); + replace_segment(index, _segments.at(index).start(), size, false /* committed */); + } + + return false; +} + +XPhysicalMemory XPhysicalMemory::split(size_t size) { + XPhysicalMemory pmem; + int nsegments = 0; + + for (int i = 0; i < _segments.length(); i++) { + const XPhysicalMemorySegment& segment = _segments.at(i); + if (pmem.size() < size) { + if (pmem.size() + segment.size() <= size) { + // Transfer segment + pmem.add_segment(segment); + } else { + // Split segment + const size_t split_size = size - pmem.size(); + pmem.add_segment(XPhysicalMemorySegment(segment.start(), split_size, segment.is_committed())); + _segments.at_put(nsegments++, XPhysicalMemorySegment(segment.start() + split_size, segment.size() - split_size, segment.is_committed())); + } + } else { + // Keep segment + _segments.at_put(nsegments++, segment); + } + } + + _segments.trunc_to(nsegments); + + return pmem; +} + +XPhysicalMemory XPhysicalMemory::split_committed() { + XPhysicalMemory pmem; + int nsegments = 0; + + for (int i = 0; i < _segments.length(); i++) { + const XPhysicalMemorySegment& segment = _segments.at(i); + if (segment.is_committed()) { + // Transfer segment + pmem.add_segment(segment); + } else { + // Keep segment + _segments.at_put(nsegments++, segment); + } + } + + _segments.trunc_to(nsegments); + + return pmem; +} + +XPhysicalMemoryManager::XPhysicalMemoryManager(size_t max_capacity) : + _backing(max_capacity) { + // Make the whole range free + _manager.free(0, max_capacity); +} + +bool XPhysicalMemoryManager::is_initialized() const { + return _backing.is_initialized(); +} + +void XPhysicalMemoryManager::warn_commit_limits(size_t max_capacity) const { + _backing.warn_commit_limits(max_capacity); +} + +void XPhysicalMemoryManager::try_enable_uncommit(size_t min_capacity, size_t max_capacity) { + assert(!is_init_completed(), "Invalid state"); + + // If uncommit is not explicitly disabled, max capacity is greater than + // min capacity, and uncommit is supported by the platform, then uncommit + // will be enabled. + if (!ZUncommit) { + log_info_p(gc, init)("Uncommit: Disabled"); + return; + } + + if (max_capacity == min_capacity) { + log_info_p(gc, init)("Uncommit: Implicitly Disabled (-Xms equals -Xmx)"); + FLAG_SET_ERGO(ZUncommit, false); + return; + } + + // Test if uncommit is supported by the operating system by committing + // and then uncommitting a granule. + XPhysicalMemory pmem(XPhysicalMemorySegment(0, XGranuleSize, false /* committed */)); + if (!commit(pmem) || !uncommit(pmem)) { + log_info_p(gc, init)("Uncommit: Implicitly Disabled (Not supported by operating system)"); + FLAG_SET_ERGO(ZUncommit, false); + return; + } + + log_info_p(gc, init)("Uncommit: Enabled"); + log_info_p(gc, init)("Uncommit Delay: " UINTX_FORMAT "s", ZUncommitDelay); +} + +void XPhysicalMemoryManager::nmt_commit(uintptr_t offset, size_t size) const { + // From an NMT point of view we treat the first heap view (marked0) as committed + const uintptr_t addr = XAddress::marked0(offset); + MemTracker::record_virtual_memory_commit((void*)addr, size, CALLER_PC); +} + +void XPhysicalMemoryManager::nmt_uncommit(uintptr_t offset, size_t size) const { + if (MemTracker::enabled()) { + const uintptr_t addr = XAddress::marked0(offset); + Tracker tracker(Tracker::uncommit); + tracker.record((address)addr, size); + } +} + +void XPhysicalMemoryManager::alloc(XPhysicalMemory& pmem, size_t size) { + assert(is_aligned(size, XGranuleSize), "Invalid size"); + + // Allocate segments + while (size > 0) { + size_t allocated = 0; + const uintptr_t start = _manager.alloc_low_address_at_most(size, &allocated); + assert(start != UINTPTR_MAX, "Allocation should never fail"); + pmem.add_segment(XPhysicalMemorySegment(start, allocated, false /* committed */)); + size -= allocated; + } +} + +void XPhysicalMemoryManager::free(const XPhysicalMemory& pmem) { + // Free segments + for (int i = 0; i < pmem.nsegments(); i++) { + const XPhysicalMemorySegment& segment = pmem.segment(i); + _manager.free(segment.start(), segment.size()); + } +} + +bool XPhysicalMemoryManager::commit(XPhysicalMemory& pmem) { + // Commit segments + for (int i = 0; i < pmem.nsegments(); i++) { + const XPhysicalMemorySegment& segment = pmem.segment(i); + if (segment.is_committed()) { + // Segment already committed + continue; + } + + // Commit segment + const size_t committed = _backing.commit(segment.start(), segment.size()); + if (!pmem.commit_segment(i, committed)) { + // Failed or partially failed + return false; + } + } + + // Success + return true; +} + +bool XPhysicalMemoryManager::uncommit(XPhysicalMemory& pmem) { + // Commit segments + for (int i = 0; i < pmem.nsegments(); i++) { + const XPhysicalMemorySegment& segment = pmem.segment(i); + if (!segment.is_committed()) { + // Segment already uncommitted + continue; + } + + // Uncommit segment + const size_t uncommitted = _backing.uncommit(segment.start(), segment.size()); + if (!pmem.uncommit_segment(i, uncommitted)) { + // Failed or partially failed + return false; + } + } + + // Success + return true; +} + +void XPhysicalMemoryManager::pretouch_view(uintptr_t addr, size_t size) const { + const size_t page_size = XLargePages::is_explicit() ? XGranuleSize : os::vm_page_size(); + os::pretouch_memory((void*)addr, (void*)(addr + size), page_size); +} + +void XPhysicalMemoryManager::map_view(uintptr_t addr, const XPhysicalMemory& pmem) const { + size_t size = 0; + + // Map segments + for (int i = 0; i < pmem.nsegments(); i++) { + const XPhysicalMemorySegment& segment = pmem.segment(i); + _backing.map(addr + size, segment.size(), segment.start()); + size += segment.size(); + } + + // Setup NUMA interleaving for large pages + if (XNUMA::is_enabled() && XLargePages::is_explicit()) { + // To get granule-level NUMA interleaving when using large pages, + // we simply let the kernel interleave the memory for us at page + // fault time. + os::numa_make_global((char*)addr, size); + } +} + +void XPhysicalMemoryManager::unmap_view(uintptr_t addr, size_t size) const { + _backing.unmap(addr, size); +} + +void XPhysicalMemoryManager::pretouch(uintptr_t offset, size_t size) const { + if (ZVerifyViews) { + // Pre-touch good view + pretouch_view(XAddress::good(offset), size); + } else { + // Pre-touch all views + pretouch_view(XAddress::marked0(offset), size); + pretouch_view(XAddress::marked1(offset), size); + pretouch_view(XAddress::remapped(offset), size); + } +} + +void XPhysicalMemoryManager::map(uintptr_t offset, const XPhysicalMemory& pmem) const { + const size_t size = pmem.size(); + + if (ZVerifyViews) { + // Map good view + map_view(XAddress::good(offset), pmem); + } else { + // Map all views + map_view(XAddress::marked0(offset), pmem); + map_view(XAddress::marked1(offset), pmem); + map_view(XAddress::remapped(offset), pmem); + } + + nmt_commit(offset, size); +} + +void XPhysicalMemoryManager::unmap(uintptr_t offset, size_t size) const { + nmt_uncommit(offset, size); + + if (ZVerifyViews) { + // Unmap good view + unmap_view(XAddress::good(offset), size); + } else { + // Unmap all views + unmap_view(XAddress::marked0(offset), size); + unmap_view(XAddress::marked1(offset), size); + unmap_view(XAddress::remapped(offset), size); + } +} + +void XPhysicalMemoryManager::debug_map(uintptr_t offset, const XPhysicalMemory& pmem) const { + // Map good view + assert(ZVerifyViews, "Should be enabled"); + map_view(XAddress::good(offset), pmem); +} + +void XPhysicalMemoryManager::debug_unmap(uintptr_t offset, size_t size) const { + // Unmap good view + assert(ZVerifyViews, "Should be enabled"); + unmap_view(XAddress::good(offset), size); +} diff --git a/src/hotspot/share/gc/x/xPhysicalMemory.hpp b/src/hotspot/share/gc/x/xPhysicalMemory.hpp new file mode 100644 index 0000000000000..26d8ed9bb9641 --- /dev/null +++ b/src/hotspot/share/gc/x/xPhysicalMemory.hpp @@ -0,0 +1,116 @@ +/* + * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XPHYSICALMEMORY_HPP +#define SHARE_GC_X_XPHYSICALMEMORY_HPP + +#include "gc/x/xArray.hpp" +#include "gc/x/xMemory.hpp" +#include "memory/allocation.hpp" +#include OS_HEADER(gc/x/xPhysicalMemoryBacking) + +class XPhysicalMemorySegment : public CHeapObj { +private: + uintptr_t _start; + uintptr_t _end; + bool _committed; + +public: + XPhysicalMemorySegment(); + XPhysicalMemorySegment(uintptr_t start, size_t size, bool committed); + + uintptr_t start() const; + uintptr_t end() const; + size_t size() const; + + bool is_committed() const; + void set_committed(bool committed); +}; + +class XPhysicalMemory { +private: + XArray _segments; + + void insert_segment(int index, uintptr_t start, size_t size, bool committed); + void replace_segment(int index, uintptr_t start, size_t size, bool committed); + void remove_segment(int index); + +public: + XPhysicalMemory(); + XPhysicalMemory(const XPhysicalMemorySegment& segment); + XPhysicalMemory(const XPhysicalMemory& pmem); + const XPhysicalMemory& operator=(const XPhysicalMemory& pmem); + + bool is_null() const; + size_t size() const; + + int nsegments() const; + const XPhysicalMemorySegment& segment(int index) const; + + void add_segments(const XPhysicalMemory& pmem); + void remove_segments(); + + void add_segment(const XPhysicalMemorySegment& segment); + bool commit_segment(int index, size_t size); + bool uncommit_segment(int index, size_t size); + + XPhysicalMemory split(size_t size); + XPhysicalMemory split_committed(); +}; + +class XPhysicalMemoryManager { +private: + XPhysicalMemoryBacking _backing; + XMemoryManager _manager; + + void nmt_commit(uintptr_t offset, size_t size) const; + void nmt_uncommit(uintptr_t offset, size_t size) const; + + void pretouch_view(uintptr_t addr, size_t size) const; + void map_view(uintptr_t addr, const XPhysicalMemory& pmem) const; + void unmap_view(uintptr_t addr, size_t size) const; + +public: + XPhysicalMemoryManager(size_t max_capacity); + + bool is_initialized() const; + + void warn_commit_limits(size_t max_capacity) const; + void try_enable_uncommit(size_t min_capacity, size_t max_capacity); + + void alloc(XPhysicalMemory& pmem, size_t size); + void free(const XPhysicalMemory& pmem); + + bool commit(XPhysicalMemory& pmem); + bool uncommit(XPhysicalMemory& pmem); + + void pretouch(uintptr_t offset, size_t size) const; + + void map(uintptr_t offset, const XPhysicalMemory& pmem) const; + void unmap(uintptr_t offset, size_t size) const; + + void debug_map(uintptr_t offset, const XPhysicalMemory& pmem) const; + void debug_unmap(uintptr_t offset, size_t size) const; +}; + +#endif // SHARE_GC_X_XPHYSICALMEMORY_HPP diff --git a/src/hotspot/share/gc/x/xPhysicalMemory.inline.hpp b/src/hotspot/share/gc/x/xPhysicalMemory.inline.hpp new file mode 100644 index 0000000000000..70f38e2abdbbb --- /dev/null +++ b/src/hotspot/share/gc/x/xPhysicalMemory.inline.hpp @@ -0,0 +1,74 @@ +/* + * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XPHYSICALMEMORY_INLINE_HPP +#define SHARE_GC_X_XPHYSICALMEMORY_INLINE_HPP + +#include "gc/x/xPhysicalMemory.hpp" + +#include "gc/x/xAddress.inline.hpp" +#include "utilities/debug.hpp" + +inline XPhysicalMemorySegment::XPhysicalMemorySegment() : + _start(UINTPTR_MAX), + _end(UINTPTR_MAX), + _committed(false) {} + +inline XPhysicalMemorySegment::XPhysicalMemorySegment(uintptr_t start, size_t size, bool committed) : + _start(start), + _end(start + size), + _committed(committed) {} + +inline uintptr_t XPhysicalMemorySegment::start() const { + return _start; +} + +inline uintptr_t XPhysicalMemorySegment::end() const { + return _end; +} + +inline size_t XPhysicalMemorySegment::size() const { + return _end - _start; +} + +inline bool XPhysicalMemorySegment::is_committed() const { + return _committed; +} + +inline void XPhysicalMemorySegment::set_committed(bool committed) { + _committed = committed; +} + +inline bool XPhysicalMemory::is_null() const { + return _segments.length() == 0; +} + +inline int XPhysicalMemory::nsegments() const { + return _segments.length(); +} + +inline const XPhysicalMemorySegment& XPhysicalMemory::segment(int index) const { + return _segments.at(index); +} + +#endif // SHARE_GC_X_XPHYSICALMEMORY_INLINE_HPP diff --git a/src/hotspot/share/gc/x/xReferenceProcessor.cpp b/src/hotspot/share/gc/x/xReferenceProcessor.cpp new file mode 100644 index 0000000000000..4d6f05e59225a --- /dev/null +++ b/src/hotspot/share/gc/x/xReferenceProcessor.cpp @@ -0,0 +1,459 @@ +/* + * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "classfile/javaClasses.inline.hpp" +#include "gc/shared/referencePolicy.hpp" +#include "gc/shared/referenceProcessorStats.hpp" +#include "gc/x/xHeap.inline.hpp" +#include "gc/x/xReferenceProcessor.hpp" +#include "gc/x/xStat.hpp" +#include "gc/x/xTask.hpp" +#include "gc/x/xTracer.inline.hpp" +#include "gc/x/xValue.inline.hpp" +#include "memory/universe.hpp" +#include "runtime/atomic.hpp" +#include "runtime/mutexLocker.hpp" +#include "runtime/os.hpp" + +static const XStatSubPhase XSubPhaseConcurrentReferencesProcess("Concurrent References Process"); +static const XStatSubPhase XSubPhaseConcurrentReferencesEnqueue("Concurrent References Enqueue"); + +static ReferenceType reference_type(oop reference) { + return InstanceKlass::cast(reference->klass())->reference_type(); +} + +static const char* reference_type_name(ReferenceType type) { + switch (type) { + case REF_SOFT: + return "Soft"; + + case REF_WEAK: + return "Weak"; + + case REF_FINAL: + return "Final"; + + case REF_PHANTOM: + return "Phantom"; + + default: + ShouldNotReachHere(); + return "Unknown"; + } +} + +static volatile oop* reference_referent_addr(oop reference) { + return (volatile oop*)java_lang_ref_Reference::referent_addr_raw(reference); +} + +static oop reference_referent(oop reference) { + return Atomic::load(reference_referent_addr(reference)); +} + +static void reference_clear_referent(oop reference) { + java_lang_ref_Reference::clear_referent_raw(reference); +} + +static oop* reference_discovered_addr(oop reference) { + return (oop*)java_lang_ref_Reference::discovered_addr_raw(reference); +} + +static oop reference_discovered(oop reference) { + return *reference_discovered_addr(reference); +} + +static void reference_set_discovered(oop reference, oop discovered) { + java_lang_ref_Reference::set_discovered_raw(reference, discovered); +} + +static oop* reference_next_addr(oop reference) { + return (oop*)java_lang_ref_Reference::next_addr_raw(reference); +} + +static oop reference_next(oop reference) { + return *reference_next_addr(reference); +} + +static void reference_set_next(oop reference, oop next) { + java_lang_ref_Reference::set_next_raw(reference, next); +} + +static void soft_reference_update_clock() { + const jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; + java_lang_ref_SoftReference::set_clock(now); +} + +XReferenceProcessor::XReferenceProcessor(XWorkers* workers) : + _workers(workers), + _soft_reference_policy(NULL), + _encountered_count(), + _discovered_count(), + _enqueued_count(), + _discovered_list(NULL), + _pending_list(NULL), + _pending_list_tail(_pending_list.addr()) {} + +void XReferenceProcessor::set_soft_reference_policy(bool clear) { + static AlwaysClearPolicy always_clear_policy; + static LRUMaxHeapPolicy lru_max_heap_policy; + + if (clear) { + log_info(gc, ref)("Clearing All SoftReferences"); + _soft_reference_policy = &always_clear_policy; + } else { + _soft_reference_policy = &lru_max_heap_policy; + } + + _soft_reference_policy->setup(); +} + +bool XReferenceProcessor::is_inactive(oop reference, oop referent, ReferenceType type) const { + if (type == REF_FINAL) { + // A FinalReference is inactive if its next field is non-null. An application can't + // call enqueue() or clear() on a FinalReference. + return reference_next(reference) != NULL; + } else { + // A non-FinalReference is inactive if the referent is null. The referent can only + // be null if the application called Reference.enqueue() or Reference.clear(). + return referent == NULL; + } +} + +bool XReferenceProcessor::is_strongly_live(oop referent) const { + return XHeap::heap()->is_object_strongly_live(XOop::to_address(referent)); +} + +bool XReferenceProcessor::is_softly_live(oop reference, ReferenceType type) const { + if (type != REF_SOFT) { + // Not a SoftReference + return false; + } + + // Ask SoftReference policy + const jlong clock = java_lang_ref_SoftReference::clock(); + assert(clock != 0, "Clock not initialized"); + assert(_soft_reference_policy != NULL, "Policy not initialized"); + return !_soft_reference_policy->should_clear_reference(reference, clock); +} + +bool XReferenceProcessor::should_discover(oop reference, ReferenceType type) const { + volatile oop* const referent_addr = reference_referent_addr(reference); + const oop referent = XBarrier::weak_load_barrier_on_oop_field(referent_addr); + + if (is_inactive(reference, referent, type)) { + return false; + } + + if (is_strongly_live(referent)) { + return false; + } + + if (is_softly_live(reference, type)) { + return false; + } + + // PhantomReferences with finalizable marked referents should technically not have + // to be discovered. However, InstanceRefKlass::oop_oop_iterate_ref_processing() + // does not know about the finalizable mark concept, and will therefore mark + // referents in non-discovered PhantomReferences as strongly live. To prevent + // this, we always discover PhantomReferences with finalizable marked referents. + // They will automatically be dropped during the reference processing phase. + return true; +} + +bool XReferenceProcessor::should_drop(oop reference, ReferenceType type) const { + const oop referent = reference_referent(reference); + if (referent == NULL) { + // Reference has been cleared, by a call to Reference.enqueue() + // or Reference.clear() from the application, which means we + // should drop the reference. + return true; + } + + // Check if the referent is still alive, in which case we should + // drop the reference. + if (type == REF_PHANTOM) { + return XBarrier::is_alive_barrier_on_phantom_oop(referent); + } else { + return XBarrier::is_alive_barrier_on_weak_oop(referent); + } +} + +void XReferenceProcessor::keep_alive(oop reference, ReferenceType type) const { + volatile oop* const p = reference_referent_addr(reference); + if (type == REF_PHANTOM) { + XBarrier::keep_alive_barrier_on_phantom_oop_field(p); + } else { + XBarrier::keep_alive_barrier_on_weak_oop_field(p); + } +} + +void XReferenceProcessor::make_inactive(oop reference, ReferenceType type) const { + if (type == REF_FINAL) { + // Don't clear referent. It is needed by the Finalizer thread to make the call + // to finalize(). A FinalReference is instead made inactive by self-looping the + // next field. An application can't call FinalReference.enqueue(), so there is + // no race to worry about when setting the next field. + assert(reference_next(reference) == NULL, "Already inactive"); + reference_set_next(reference, reference); + } else { + // Clear referent + reference_clear_referent(reference); + } +} + +void XReferenceProcessor::discover(oop reference, ReferenceType type) { + log_trace(gc, ref)("Discovered Reference: " PTR_FORMAT " (%s)", p2i(reference), reference_type_name(type)); + + // Update statistics + _discovered_count.get()[type]++; + + if (type == REF_FINAL) { + // Mark referent (and its reachable subgraph) finalizable. This avoids + // the problem of later having to mark those objects if the referent is + // still final reachable during processing. + volatile oop* const referent_addr = reference_referent_addr(reference); + XBarrier::mark_barrier_on_oop_field(referent_addr, true /* finalizable */); + } + + // Add reference to discovered list + assert(reference_discovered(reference) == NULL, "Already discovered"); + oop* const list = _discovered_list.addr(); + reference_set_discovered(reference, *list); + *list = reference; +} + +bool XReferenceProcessor::discover_reference(oop reference, ReferenceType type) { + if (!RegisterReferences) { + // Reference processing disabled + return false; + } + + log_trace(gc, ref)("Encountered Reference: " PTR_FORMAT " (%s)", p2i(reference), reference_type_name(type)); + + // Update statistics + _encountered_count.get()[type]++; + + if (!should_discover(reference, type)) { + // Not discovered + return false; + } + + discover(reference, type); + + // Discovered + return true; +} + +oop XReferenceProcessor::drop(oop reference, ReferenceType type) { + log_trace(gc, ref)("Dropped Reference: " PTR_FORMAT " (%s)", p2i(reference), reference_type_name(type)); + + // Keep referent alive + keep_alive(reference, type); + + // Unlink and return next in list + const oop next = reference_discovered(reference); + reference_set_discovered(reference, NULL); + return next; +} + +oop* XReferenceProcessor::keep(oop reference, ReferenceType type) { + log_trace(gc, ref)("Enqueued Reference: " PTR_FORMAT " (%s)", p2i(reference), reference_type_name(type)); + + // Update statistics + _enqueued_count.get()[type]++; + + // Make reference inactive + make_inactive(reference, type); + + // Return next in list + return reference_discovered_addr(reference); +} + +void XReferenceProcessor::work() { + // Process discovered references + oop* const list = _discovered_list.addr(); + oop* p = list; + + while (*p != NULL) { + const oop reference = *p; + const ReferenceType type = reference_type(reference); + + if (should_drop(reference, type)) { + *p = drop(reference, type); + } else { + p = keep(reference, type); + } + } + + // Prepend discovered references to internal pending list + if (*list != NULL) { + *p = Atomic::xchg(_pending_list.addr(), *list); + if (*p == NULL) { + // First to prepend to list, record tail + _pending_list_tail = p; + } + + // Clear discovered list + *list = NULL; + } +} + +bool XReferenceProcessor::is_empty() const { + XPerWorkerConstIterator iter(&_discovered_list); + for (const oop* list; iter.next(&list);) { + if (*list != NULL) { + return false; + } + } + + if (_pending_list.get() != NULL) { + return false; + } + + return true; +} + +void XReferenceProcessor::reset_statistics() { + assert(is_empty(), "Should be empty"); + + // Reset encountered + XPerWorkerIterator iter_encountered(&_encountered_count); + for (Counters* counters; iter_encountered.next(&counters);) { + for (int i = REF_SOFT; i <= REF_PHANTOM; i++) { + (*counters)[i] = 0; + } + } + + // Reset discovered + XPerWorkerIterator iter_discovered(&_discovered_count); + for (Counters* counters; iter_discovered.next(&counters);) { + for (int i = REF_SOFT; i <= REF_PHANTOM; i++) { + (*counters)[i] = 0; + } + } + + // Reset enqueued + XPerWorkerIterator iter_enqueued(&_enqueued_count); + for (Counters* counters; iter_enqueued.next(&counters);) { + for (int i = REF_SOFT; i <= REF_PHANTOM; i++) { + (*counters)[i] = 0; + } + } +} + +void XReferenceProcessor::collect_statistics() { + Counters encountered = {}; + Counters discovered = {}; + Counters enqueued = {}; + + // Sum encountered + XPerWorkerConstIterator iter_encountered(&_encountered_count); + for (const Counters* counters; iter_encountered.next(&counters);) { + for (int i = REF_SOFT; i <= REF_PHANTOM; i++) { + encountered[i] += (*counters)[i]; + } + } + + // Sum discovered + XPerWorkerConstIterator iter_discovered(&_discovered_count); + for (const Counters* counters; iter_discovered.next(&counters);) { + for (int i = REF_SOFT; i <= REF_PHANTOM; i++) { + discovered[i] += (*counters)[i]; + } + } + + // Sum enqueued + XPerWorkerConstIterator iter_enqueued(&_enqueued_count); + for (const Counters* counters; iter_enqueued.next(&counters);) { + for (int i = REF_SOFT; i <= REF_PHANTOM; i++) { + enqueued[i] += (*counters)[i]; + } + } + + // Update statistics + XStatReferences::set_soft(encountered[REF_SOFT], discovered[REF_SOFT], enqueued[REF_SOFT]); + XStatReferences::set_weak(encountered[REF_WEAK], discovered[REF_WEAK], enqueued[REF_WEAK]); + XStatReferences::set_final(encountered[REF_FINAL], discovered[REF_FINAL], enqueued[REF_FINAL]); + XStatReferences::set_phantom(encountered[REF_PHANTOM], discovered[REF_PHANTOM], enqueued[REF_PHANTOM]); + + // Trace statistics + const ReferenceProcessorStats stats(discovered[REF_SOFT], + discovered[REF_WEAK], + discovered[REF_FINAL], + discovered[REF_PHANTOM]); + XTracer::tracer()->report_gc_reference_stats(stats); +} + +class XReferenceProcessorTask : public XTask { +private: + XReferenceProcessor* const _reference_processor; + +public: + XReferenceProcessorTask(XReferenceProcessor* reference_processor) : + XTask("XReferenceProcessorTask"), + _reference_processor(reference_processor) {} + + virtual void work() { + _reference_processor->work(); + } +}; + +void XReferenceProcessor::process_references() { + XStatTimer timer(XSubPhaseConcurrentReferencesProcess); + + // Process discovered lists + XReferenceProcessorTask task(this); + _workers->run(&task); + + // Update SoftReference clock + soft_reference_update_clock(); + + // Collect, log and trace statistics + collect_statistics(); +} + +void XReferenceProcessor::enqueue_references() { + XStatTimer timer(XSubPhaseConcurrentReferencesEnqueue); + + if (_pending_list.get() == NULL) { + // Nothing to enqueue + return; + } + + { + // Heap_lock protects external pending list + MonitorLocker ml(Heap_lock); + + // Prepend internal pending list to external pending list + *_pending_list_tail = Universe::swap_reference_pending_list(_pending_list.get()); + + // Notify ReferenceHandler thread + ml.notify_all(); + } + + // Reset internal pending list + _pending_list.set(NULL); + _pending_list_tail = _pending_list.addr(); +} diff --git a/src/hotspot/share/gc/x/xReferenceProcessor.hpp b/src/hotspot/share/gc/x/xReferenceProcessor.hpp new file mode 100644 index 0000000000000..1ff7b14e868d6 --- /dev/null +++ b/src/hotspot/share/gc/x/xReferenceProcessor.hpp @@ -0,0 +1,79 @@ +/* + * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XREFERENCEPROCESSOR_HPP +#define SHARE_GC_X_XREFERENCEPROCESSOR_HPP + +#include "gc/shared/referenceDiscoverer.hpp" +#include "gc/x/xValue.hpp" + +class ReferencePolicy; +class XWorkers; + +class XReferenceProcessor : public ReferenceDiscoverer { + friend class XReferenceProcessorTask; + +private: + static const size_t reference_type_count = REF_PHANTOM + 1; + typedef size_t Counters[reference_type_count]; + + XWorkers* const _workers; + ReferencePolicy* _soft_reference_policy; + XPerWorker _encountered_count; + XPerWorker _discovered_count; + XPerWorker _enqueued_count; + XPerWorker _discovered_list; + XContended _pending_list; + oop* _pending_list_tail; + + bool is_inactive(oop reference, oop referent, ReferenceType type) const; + bool is_strongly_live(oop referent) const; + bool is_softly_live(oop reference, ReferenceType type) const; + + bool should_discover(oop reference, ReferenceType type) const; + bool should_drop(oop reference, ReferenceType type) const; + void keep_alive(oop reference, ReferenceType type) const; + void make_inactive(oop reference, ReferenceType type) const; + + void discover(oop reference, ReferenceType type); + + oop drop(oop reference, ReferenceType type); + oop* keep(oop reference, ReferenceType type); + + bool is_empty() const; + + void work(); + void collect_statistics(); + +public: + XReferenceProcessor(XWorkers* workers); + + void set_soft_reference_policy(bool clear); + void reset_statistics(); + + virtual bool discover_reference(oop reference, ReferenceType type); + void process_references(); + void enqueue_references(); +}; + +#endif // SHARE_GC_X_XREFERENCEPROCESSOR_HPP diff --git a/src/hotspot/share/gc/x/xRelocate.cpp b/src/hotspot/share/gc/x/xRelocate.cpp new file mode 100644 index 0000000000000..e13773242b3ef --- /dev/null +++ b/src/hotspot/share/gc/x/xRelocate.cpp @@ -0,0 +1,419 @@ +/* + * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/shared/gc_globals.hpp" +#include "gc/x/xAbort.inline.hpp" +#include "gc/x/xAddress.inline.hpp" +#include "gc/x/xBarrier.inline.hpp" +#include "gc/x/xForwarding.inline.hpp" +#include "gc/x/xHeap.inline.hpp" +#include "gc/x/xPage.inline.hpp" +#include "gc/x/xRelocate.hpp" +#include "gc/x/xRelocationSet.inline.hpp" +#include "gc/x/xStat.hpp" +#include "gc/x/xTask.hpp" +#include "gc/x/xThread.inline.hpp" +#include "gc/x/xWorkers.hpp" +#include "prims/jvmtiTagMap.hpp" +#include "runtime/atomic.hpp" +#include "utilities/debug.hpp" + +XRelocate::XRelocate(XWorkers* workers) : + _workers(workers) {} + +static uintptr_t forwarding_index(XForwarding* forwarding, uintptr_t from_addr) { + const uintptr_t from_offset = XAddress::offset(from_addr); + return (from_offset - forwarding->start()) >> forwarding->object_alignment_shift(); +} + +static uintptr_t forwarding_find(XForwarding* forwarding, uintptr_t from_addr, XForwardingCursor* cursor) { + const uintptr_t from_index = forwarding_index(forwarding, from_addr); + const XForwardingEntry entry = forwarding->find(from_index, cursor); + return entry.populated() ? XAddress::good(entry.to_offset()) : 0; +} + +static uintptr_t forwarding_insert(XForwarding* forwarding, uintptr_t from_addr, uintptr_t to_addr, XForwardingCursor* cursor) { + const uintptr_t from_index = forwarding_index(forwarding, from_addr); + const uintptr_t to_offset = XAddress::offset(to_addr); + const uintptr_t to_offset_final = forwarding->insert(from_index, to_offset, cursor); + return XAddress::good(to_offset_final); +} + +static uintptr_t relocate_object_inner(XForwarding* forwarding, uintptr_t from_addr, XForwardingCursor* cursor) { + assert(XHeap::heap()->is_object_live(from_addr), "Should be live"); + + // Allocate object + const size_t size = XUtils::object_size(from_addr); + const uintptr_t to_addr = XHeap::heap()->alloc_object_for_relocation(size); + if (to_addr == 0) { + // Allocation failed + return 0; + } + + // Copy object + XUtils::object_copy_disjoint(from_addr, to_addr, size); + + // Insert forwarding + const uintptr_t to_addr_final = forwarding_insert(forwarding, from_addr, to_addr, cursor); + if (to_addr_final != to_addr) { + // Already relocated, try undo allocation + XHeap::heap()->undo_alloc_object_for_relocation(to_addr, size); + } + + return to_addr_final; +} + +uintptr_t XRelocate::relocate_object(XForwarding* forwarding, uintptr_t from_addr) const { + XForwardingCursor cursor; + + // Lookup forwarding + uintptr_t to_addr = forwarding_find(forwarding, from_addr, &cursor); + if (to_addr != 0) { + // Already relocated + return to_addr; + } + + // Relocate object + if (forwarding->retain_page()) { + to_addr = relocate_object_inner(forwarding, from_addr, &cursor); + forwarding->release_page(); + + if (to_addr != 0) { + // Success + return to_addr; + } + + // Failed to relocate object. Wait for a worker thread to complete + // relocation of this page, and then forward the object. If the GC + // aborts the relocation phase before the page has been relocated, + // then wait return false and we just forward the object in-place. + if (!forwarding->wait_page_released()) { + // Forward object in-place + return forwarding_insert(forwarding, from_addr, from_addr, &cursor); + } + } + + // Forward object + return forward_object(forwarding, from_addr); +} + +uintptr_t XRelocate::forward_object(XForwarding* forwarding, uintptr_t from_addr) const { + XForwardingCursor cursor; + const uintptr_t to_addr = forwarding_find(forwarding, from_addr, &cursor); + assert(to_addr != 0, "Should be forwarded"); + return to_addr; +} + +static XPage* alloc_page(const XForwarding* forwarding) { + if (ZStressRelocateInPlace) { + // Simulate failure to allocate a new page. This will + // cause the page being relocated to be relocated in-place. + return NULL; + } + + XAllocationFlags flags; + flags.set_non_blocking(); + flags.set_worker_relocation(); + return XHeap::heap()->alloc_page(forwarding->type(), forwarding->size(), flags); +} + +static void free_page(XPage* page) { + XHeap::heap()->free_page(page, true /* reclaimed */); +} + +static bool should_free_target_page(XPage* page) { + // Free target page if it is empty. We can end up with an empty target + // page if we allocated a new target page, and then lost the race to + // relocate the remaining objects, leaving the target page empty when + // relocation completed. + return page != NULL && page->top() == page->start(); +} + +class XRelocateSmallAllocator { +private: + volatile size_t _in_place_count; + +public: + XRelocateSmallAllocator() : + _in_place_count(0) {} + + XPage* alloc_target_page(XForwarding* forwarding, XPage* target) { + XPage* const page = alloc_page(forwarding); + if (page == NULL) { + Atomic::inc(&_in_place_count); + } + + return page; + } + + void share_target_page(XPage* page) { + // Does nothing + } + + void free_target_page(XPage* page) { + if (should_free_target_page(page)) { + free_page(page); + } + } + + void free_relocated_page(XPage* page) { + free_page(page); + } + + uintptr_t alloc_object(XPage* page, size_t size) const { + return (page != NULL) ? page->alloc_object(size) : 0; + } + + void undo_alloc_object(XPage* page, uintptr_t addr, size_t size) const { + page->undo_alloc_object(addr, size); + } + + const size_t in_place_count() const { + return _in_place_count; + } +}; + +class XRelocateMediumAllocator { +private: + XConditionLock _lock; + XPage* _shared; + bool _in_place; + volatile size_t _in_place_count; + +public: + XRelocateMediumAllocator() : + _lock(), + _shared(NULL), + _in_place(false), + _in_place_count(0) {} + + ~XRelocateMediumAllocator() { + if (should_free_target_page(_shared)) { + free_page(_shared); + } + } + + XPage* alloc_target_page(XForwarding* forwarding, XPage* target) { + XLocker locker(&_lock); + + // Wait for any ongoing in-place relocation to complete + while (_in_place) { + _lock.wait(); + } + + // Allocate a new page only if the shared page is the same as the + // current target page. The shared page will be different from the + // current target page if another thread shared a page, or allocated + // a new page. + if (_shared == target) { + _shared = alloc_page(forwarding); + if (_shared == NULL) { + Atomic::inc(&_in_place_count); + _in_place = true; + } + } + + return _shared; + } + + void share_target_page(XPage* page) { + XLocker locker(&_lock); + + assert(_in_place, "Invalid state"); + assert(_shared == NULL, "Invalid state"); + assert(page != NULL, "Invalid page"); + + _shared = page; + _in_place = false; + + _lock.notify_all(); + } + + void free_target_page(XPage* page) { + // Does nothing + } + + void free_relocated_page(XPage* page) { + free_page(page); + } + + uintptr_t alloc_object(XPage* page, size_t size) const { + return (page != NULL) ? page->alloc_object_atomic(size) : 0; + } + + void undo_alloc_object(XPage* page, uintptr_t addr, size_t size) const { + page->undo_alloc_object_atomic(addr, size); + } + + const size_t in_place_count() const { + return _in_place_count; + } +}; + +template +class XRelocateClosure : public ObjectClosure { +private: + Allocator* const _allocator; + XForwarding* _forwarding; + XPage* _target; + + bool relocate_object(uintptr_t from_addr) const { + XForwardingCursor cursor; + + // Lookup forwarding + if (forwarding_find(_forwarding, from_addr, &cursor) != 0) { + // Already relocated + return true; + } + + // Allocate object + const size_t size = XUtils::object_size(from_addr); + const uintptr_t to_addr = _allocator->alloc_object(_target, size); + if (to_addr == 0) { + // Allocation failed + return false; + } + + // Copy object. Use conjoint copying if we are relocating + // in-place and the new object overlapps with the old object. + if (_forwarding->in_place() && to_addr + size > from_addr) { + XUtils::object_copy_conjoint(from_addr, to_addr, size); + } else { + XUtils::object_copy_disjoint(from_addr, to_addr, size); + } + + // Insert forwarding + if (forwarding_insert(_forwarding, from_addr, to_addr, &cursor) != to_addr) { + // Already relocated, undo allocation + _allocator->undo_alloc_object(_target, to_addr, size); + } + + return true; + } + + virtual void do_object(oop obj) { + const uintptr_t addr = XOop::to_address(obj); + assert(XHeap::heap()->is_object_live(addr), "Should be live"); + + while (!relocate_object(addr)) { + // Allocate a new target page, or if that fails, use the page being + // relocated as the new target, which will cause it to be relocated + // in-place. + _target = _allocator->alloc_target_page(_forwarding, _target); + if (_target != NULL) { + continue; + } + + // Claim the page being relocated to block other threads from accessing + // it, or its forwarding table, until it has been released (relocation + // completed). + _target = _forwarding->claim_page(); + _target->reset_for_in_place_relocation(); + _forwarding->set_in_place(); + } + } + +public: + XRelocateClosure(Allocator* allocator) : + _allocator(allocator), + _forwarding(NULL), + _target(NULL) {} + + ~XRelocateClosure() { + _allocator->free_target_page(_target); + } + + void do_forwarding(XForwarding* forwarding) { + _forwarding = forwarding; + + // Check if we should abort + if (XAbort::should_abort()) { + _forwarding->abort_page(); + return; + } + + // Relocate objects + _forwarding->object_iterate(this); + + // Verify + if (ZVerifyForwarding) { + _forwarding->verify(); + } + + // Release relocated page + _forwarding->release_page(); + + if (_forwarding->in_place()) { + // The relocated page has been relocated in-place and should not + // be freed. Keep it as target page until it is full, and offer to + // share it with other worker threads. + _allocator->share_target_page(_target); + } else { + // Detach and free relocated page + XPage* const page = _forwarding->detach_page(); + _allocator->free_relocated_page(page); + } + } +}; + +class XRelocateTask : public XTask { +private: + XRelocationSetParallelIterator _iter; + XRelocateSmallAllocator _small_allocator; + XRelocateMediumAllocator _medium_allocator; + + static bool is_small(XForwarding* forwarding) { + return forwarding->type() == XPageTypeSmall; + } + +public: + XRelocateTask(XRelocationSet* relocation_set) : + XTask("XRelocateTask"), + _iter(relocation_set), + _small_allocator(), + _medium_allocator() {} + + ~XRelocateTask() { + XStatRelocation::set_at_relocate_end(_small_allocator.in_place_count(), + _medium_allocator.in_place_count()); + } + + virtual void work() { + XRelocateClosure small(&_small_allocator); + XRelocateClosure medium(&_medium_allocator); + + for (XForwarding* forwarding; _iter.next(&forwarding);) { + if (is_small(forwarding)) { + small.do_forwarding(forwarding); + } else { + medium.do_forwarding(forwarding); + } + } + } +}; + +void XRelocate::relocate(XRelocationSet* relocation_set) { + XRelocateTask task(relocation_set); + _workers->run(&task); +} diff --git a/src/hotspot/share/gc/x/xRelocate.hpp b/src/hotspot/share/gc/x/xRelocate.hpp new file mode 100644 index 0000000000000..46ab39240f643 --- /dev/null +++ b/src/hotspot/share/gc/x/xRelocate.hpp @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XRELOCATE_HPP +#define SHARE_GC_X_XRELOCATE_HPP + +#include "gc/x/xRelocationSet.hpp" + +class XForwarding; +class XWorkers; + +class XRelocate { + friend class XRelocateTask; + +private: + XWorkers* const _workers; + + void work(XRelocationSetParallelIterator* iter); + +public: + XRelocate(XWorkers* workers); + + uintptr_t relocate_object(XForwarding* forwarding, uintptr_t from_addr) const; + uintptr_t forward_object(XForwarding* forwarding, uintptr_t from_addr) const; + + void relocate(XRelocationSet* relocation_set); +}; + +#endif // SHARE_GC_X_XRELOCATE_HPP diff --git a/src/hotspot/share/gc/x/xRelocationSet.cpp b/src/hotspot/share/gc/x/xRelocationSet.cpp new file mode 100644 index 0000000000000..aca1bb4f030dd --- /dev/null +++ b/src/hotspot/share/gc/x/xRelocationSet.cpp @@ -0,0 +1,135 @@ +/* + * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/x/xArray.inline.hpp" +#include "gc/x/xForwarding.inline.hpp" +#include "gc/x/xForwardingAllocator.inline.hpp" +#include "gc/x/xRelocationSet.inline.hpp" +#include "gc/x/xRelocationSetSelector.inline.hpp" +#include "gc/x/xStat.hpp" +#include "gc/x/xTask.hpp" +#include "gc/x/xWorkers.hpp" +#include "runtime/atomic.hpp" +#include "utilities/debug.hpp" + +class XRelocationSetInstallTask : public XTask { +private: + XForwardingAllocator* const _allocator; + XForwarding** _forwardings; + const size_t _nforwardings; + XArrayParallelIterator _small_iter; + XArrayParallelIterator _medium_iter; + volatile size_t _small_next; + volatile size_t _medium_next; + + void install(XForwarding* forwarding, volatile size_t* next) { + const size_t index = Atomic::fetch_then_add(next, 1u); + assert(index < _nforwardings, "Invalid index"); + _forwardings[index] = forwarding; + } + + void install_small(XForwarding* forwarding) { + install(forwarding, &_small_next); + } + + void install_medium(XForwarding* forwarding) { + install(forwarding, &_medium_next); + } + +public: + XRelocationSetInstallTask(XForwardingAllocator* allocator, const XRelocationSetSelector* selector) : + XTask("XRelocationSetInstallTask"), + _allocator(allocator), + _forwardings(NULL), + _nforwardings(selector->small()->length() + selector->medium()->length()), + _small_iter(selector->small()), + _medium_iter(selector->medium()), + _small_next(selector->medium()->length()), + _medium_next(0) { + + // Reset the allocator to have room for the relocation + // set, all forwardings, and all forwarding entries. + const size_t relocation_set_size = _nforwardings * sizeof(XForwarding*); + const size_t forwardings_size = _nforwardings * sizeof(XForwarding); + const size_t forwarding_entries_size = selector->forwarding_entries() * sizeof(XForwardingEntry); + _allocator->reset(relocation_set_size + forwardings_size + forwarding_entries_size); + + // Allocate relocation set + _forwardings = new (_allocator->alloc(relocation_set_size)) XForwarding*[_nforwardings]; + } + + ~XRelocationSetInstallTask() { + assert(_allocator->is_full(), "Should be full"); + } + + virtual void work() { + // Allocate and install forwardings for small pages + for (XPage* page; _small_iter.next(&page);) { + XForwarding* const forwarding = XForwarding::alloc(_allocator, page); + install_small(forwarding); + } + + // Allocate and install forwardings for medium pages + for (XPage* page; _medium_iter.next(&page);) { + XForwarding* const forwarding = XForwarding::alloc(_allocator, page); + install_medium(forwarding); + } + } + + XForwarding** forwardings() const { + return _forwardings; + } + + size_t nforwardings() const { + return _nforwardings; + } +}; + +XRelocationSet::XRelocationSet(XWorkers* workers) : + _workers(workers), + _allocator(), + _forwardings(NULL), + _nforwardings(0) {} + +void XRelocationSet::install(const XRelocationSetSelector* selector) { + // Install relocation set + XRelocationSetInstallTask task(&_allocator, selector); + _workers->run(&task); + + _forwardings = task.forwardings(); + _nforwardings = task.nforwardings(); + + // Update statistics + XStatRelocation::set_at_install_relocation_set(_allocator.size()); +} + +void XRelocationSet::reset() { + // Destroy forwardings + XRelocationSetIterator iter(this); + for (XForwarding* forwarding; iter.next(&forwarding);) { + forwarding->~XForwarding(); + } + + _nforwardings = 0; +} diff --git a/src/hotspot/share/gc/x/xRelocationSet.hpp b/src/hotspot/share/gc/x/xRelocationSet.hpp new file mode 100644 index 0000000000000..bbbb3770516b5 --- /dev/null +++ b/src/hotspot/share/gc/x/xRelocationSet.hpp @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XRELOCATIONSET_HPP +#define SHARE_GC_X_XRELOCATIONSET_HPP + +#include "gc/x/xArray.hpp" +#include "gc/x/xForwardingAllocator.hpp" + +class XForwarding; +class XRelocationSetSelector; +class XWorkers; + +class XRelocationSet { + template friend class XRelocationSetIteratorImpl; + +private: + XWorkers* _workers; + XForwardingAllocator _allocator; + XForwarding** _forwardings; + size_t _nforwardings; + +public: + XRelocationSet(XWorkers* workers); + + void install(const XRelocationSetSelector* selector); + void reset(); +}; + +template +class XRelocationSetIteratorImpl : public XArrayIteratorImpl { +public: + XRelocationSetIteratorImpl(XRelocationSet* relocation_set); +}; + +using XRelocationSetIterator = XRelocationSetIteratorImpl; +using XRelocationSetParallelIterator = XRelocationSetIteratorImpl; + +#endif // SHARE_GC_X_XRELOCATIONSET_HPP diff --git a/src/hotspot/share/gc/x/xRelocationSet.inline.hpp b/src/hotspot/share/gc/x/xRelocationSet.inline.hpp new file mode 100644 index 0000000000000..3b76fbce46a2b --- /dev/null +++ b/src/hotspot/share/gc/x/xRelocationSet.inline.hpp @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XRELOCATIONSET_INLINE_HPP +#define SHARE_GC_X_XRELOCATIONSET_INLINE_HPP + +#include "gc/x/xRelocationSet.hpp" + +#include "gc/x/xArray.inline.hpp" + +template +inline XRelocationSetIteratorImpl::XRelocationSetIteratorImpl(XRelocationSet* relocation_set) : + XArrayIteratorImpl(relocation_set->_forwardings, relocation_set->_nforwardings) {} + +#endif // SHARE_GC_X_XRELOCATIONSET_INLINE_HPP diff --git a/src/hotspot/share/gc/x/xRelocationSetSelector.cpp b/src/hotspot/share/gc/x/xRelocationSetSelector.cpp new file mode 100644 index 0000000000000..b009443d39597 --- /dev/null +++ b/src/hotspot/share/gc/x/xRelocationSetSelector.cpp @@ -0,0 +1,213 @@ +/* + * Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/shared/gc_globals.hpp" +#include "gc/x/xArray.inline.hpp" +#include "gc/x/xForwarding.inline.hpp" +#include "gc/x/xPage.inline.hpp" +#include "gc/x/xRelocationSetSelector.inline.hpp" +#include "jfr/jfrEvents.hpp" +#include "logging/log.hpp" +#include "runtime/globals.hpp" +#include "utilities/debug.hpp" +#include "utilities/powerOfTwo.hpp" + +XRelocationSetSelectorGroupStats::XRelocationSetSelectorGroupStats() : + _npages_candidates(0), + _total(0), + _live(0), + _empty(0), + _npages_selected(0), + _relocate(0) {} + +XRelocationSetSelectorGroup::XRelocationSetSelectorGroup(const char* name, + uint8_t page_type, + size_t page_size, + size_t object_size_limit) : + _name(name), + _page_type(page_type), + _page_size(page_size), + _object_size_limit(object_size_limit), + _fragmentation_limit(page_size * (ZFragmentationLimit / 100)), + _live_pages(), + _forwarding_entries(0), + _stats() {} + +bool XRelocationSetSelectorGroup::is_disabled() { + // Medium pages are disabled when their page size is zero + return _page_type == XPageTypeMedium && _page_size == 0; +} + +bool XRelocationSetSelectorGroup::is_selectable() { + // Large pages are not selectable + return _page_type != XPageTypeLarge; +} + +void XRelocationSetSelectorGroup::semi_sort() { + // Semi-sort live pages by number of live bytes in ascending order + const size_t npartitions_shift = 11; + const size_t npartitions = (size_t)1 << npartitions_shift; + const size_t partition_size = _page_size >> npartitions_shift; + const size_t partition_size_shift = exact_log2(partition_size); + + // Partition slots/fingers + int partitions[npartitions] = { /* zero initialize */ }; + + // Calculate partition slots + XArrayIterator iter1(&_live_pages); + for (XPage* page; iter1.next(&page);) { + const size_t index = page->live_bytes() >> partition_size_shift; + partitions[index]++; + } + + // Calculate partition fingers + int finger = 0; + for (size_t i = 0; i < npartitions; i++) { + const int slots = partitions[i]; + partitions[i] = finger; + finger += slots; + } + + // Allocate destination array + const int npages = _live_pages.length(); + XArray sorted_live_pages(npages, npages, NULL); + + // Sort pages into partitions + XArrayIterator iter2(&_live_pages); + for (XPage* page; iter2.next(&page);) { + const size_t index = page->live_bytes() >> partition_size_shift; + const int finger = partitions[index]++; + assert(sorted_live_pages.at(finger) == NULL, "Invalid finger"); + sorted_live_pages.at_put(finger, page); + } + + _live_pages.swap(&sorted_live_pages); +} + +void XRelocationSetSelectorGroup::select_inner() { + // Calculate the number of pages to relocate by successively including pages in + // a candidate relocation set and calculate the maximum space requirement for + // their live objects. + const int npages = _live_pages.length(); + int selected_from = 0; + int selected_to = 0; + size_t npages_selected = 0; + size_t selected_live_bytes = 0; + size_t selected_forwarding_entries = 0; + size_t from_live_bytes = 0; + size_t from_forwarding_entries = 0; + + semi_sort(); + + for (int from = 1; from <= npages; from++) { + // Add page to the candidate relocation set + XPage* const page = _live_pages.at(from - 1); + from_live_bytes += page->live_bytes(); + from_forwarding_entries += XForwarding::nentries(page); + + // Calculate the maximum number of pages needed by the candidate relocation set. + // By subtracting the object size limit from the pages size we get the maximum + // number of pages that the relocation set is guaranteed to fit in, regardless + // of in which order the objects are relocated. + const int to = ceil((double)(from_live_bytes) / (double)(_page_size - _object_size_limit)); + + // Calculate the relative difference in reclaimable space compared to our + // currently selected final relocation set. If this number is larger than the + // acceptable fragmentation limit, then the current candidate relocation set + // becomes our new final relocation set. + const int diff_from = from - selected_from; + const int diff_to = to - selected_to; + const double diff_reclaimable = 100 - percent_of(diff_to, diff_from); + if (diff_reclaimable > ZFragmentationLimit) { + selected_from = from; + selected_to = to; + selected_live_bytes = from_live_bytes; + npages_selected += 1; + selected_forwarding_entries = from_forwarding_entries; + } + + log_trace(gc, reloc)("Candidate Relocation Set (%s Pages): %d->%d, " + "%.1f%% relative defragmentation, " SIZE_FORMAT " forwarding entries, %s", + _name, from, to, diff_reclaimable, from_forwarding_entries, + (selected_from == from) ? "Selected" : "Rejected"); + } + + // Finalize selection + _live_pages.trunc_to(selected_from); + _forwarding_entries = selected_forwarding_entries; + + // Update statistics + _stats._relocate = selected_live_bytes; + _stats._npages_selected = npages_selected; + + log_trace(gc, reloc)("Relocation Set (%s Pages): %d->%d, %d skipped, " SIZE_FORMAT " forwarding entries", + _name, selected_from, selected_to, npages - selected_from, selected_forwarding_entries); +} + +void XRelocationSetSelectorGroup::select() { + if (is_disabled()) { + return; + } + + EventZRelocationSetGroup event; + + if (is_selectable()) { + select_inner(); + } + + // Send event + event.commit(_page_type, _stats.npages_candidates(), _stats.total(), _stats.empty(), _stats.npages_selected(), _stats.relocate()); +} + +XRelocationSetSelector::XRelocationSetSelector() : + _small("Small", XPageTypeSmall, XPageSizeSmall, XObjectSizeLimitSmall), + _medium("Medium", XPageTypeMedium, XPageSizeMedium, XObjectSizeLimitMedium), + _large("Large", XPageTypeLarge, 0 /* page_size */, 0 /* object_size_limit */), + _empty_pages() {} + +void XRelocationSetSelector::select() { + // Select pages to relocate. The resulting relocation set will be + // sorted such that medium pages comes first, followed by small + // pages. Pages within each page group will be semi-sorted by live + // bytes in ascending order. Relocating pages in this order allows + // us to start reclaiming memory more quickly. + + EventZRelocationSet event; + + // Select pages from each group + _large.select(); + _medium.select(); + _small.select(); + + // Send event + event.commit(total(), empty(), relocate()); +} + +XRelocationSetSelectorStats XRelocationSetSelector::stats() const { + XRelocationSetSelectorStats stats; + stats._small = _small.stats(); + stats._medium = _medium.stats(); + stats._large = _large.stats(); + return stats; +} diff --git a/src/hotspot/share/gc/x/xRelocationSetSelector.hpp b/src/hotspot/share/gc/x/xRelocationSetSelector.hpp new file mode 100644 index 0000000000000..75e40eeea8c31 --- /dev/null +++ b/src/hotspot/share/gc/x/xRelocationSetSelector.hpp @@ -0,0 +1,134 @@ +/* + * Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XRELOCATIONSETSELECTOR_HPP +#define SHARE_GC_X_XRELOCATIONSETSELECTOR_HPP + +#include "gc/x/xArray.hpp" +#include "memory/allocation.hpp" + +class XPage; + +class XRelocationSetSelectorGroupStats { + friend class XRelocationSetSelectorGroup; + +private: + // Candidate set + size_t _npages_candidates; + size_t _total; + size_t _live; + size_t _empty; + + // Selected set + size_t _npages_selected; + size_t _relocate; + +public: + XRelocationSetSelectorGroupStats(); + + size_t npages_candidates() const; + size_t total() const; + size_t live() const; + size_t empty() const; + + size_t npages_selected() const; + size_t relocate() const; +}; + +class XRelocationSetSelectorStats { + friend class XRelocationSetSelector; + +private: + XRelocationSetSelectorGroupStats _small; + XRelocationSetSelectorGroupStats _medium; + XRelocationSetSelectorGroupStats _large; + +public: + const XRelocationSetSelectorGroupStats& small() const; + const XRelocationSetSelectorGroupStats& medium() const; + const XRelocationSetSelectorGroupStats& large() const; +}; + +class XRelocationSetSelectorGroup { +private: + const char* const _name; + const uint8_t _page_type; + const size_t _page_size; + const size_t _object_size_limit; + const size_t _fragmentation_limit; + XArray _live_pages; + size_t _forwarding_entries; + XRelocationSetSelectorGroupStats _stats; + + bool is_disabled(); + bool is_selectable(); + void semi_sort(); + void select_inner(); + +public: + XRelocationSetSelectorGroup(const char* name, + uint8_t page_type, + size_t page_size, + size_t object_size_limit); + + void register_live_page(XPage* page); + void register_empty_page(XPage* page); + void select(); + + const XArray* selected() const; + size_t forwarding_entries() const; + + const XRelocationSetSelectorGroupStats& stats() const; +}; + +class XRelocationSetSelector : public StackObj { +private: + XRelocationSetSelectorGroup _small; + XRelocationSetSelectorGroup _medium; + XRelocationSetSelectorGroup _large; + XArray _empty_pages; + + size_t total() const; + size_t empty() const; + size_t relocate() const; + +public: + XRelocationSetSelector(); + + void register_live_page(XPage* page); + void register_empty_page(XPage* page); + + bool should_free_empty_pages(int bulk) const; + const XArray* empty_pages() const; + void clear_empty_pages(); + + void select(); + + const XArray* small() const; + const XArray* medium() const; + size_t forwarding_entries() const; + + XRelocationSetSelectorStats stats() const; +}; + +#endif // SHARE_GC_X_XRELOCATIONSETSELECTOR_HPP diff --git a/src/hotspot/share/gc/x/xRelocationSetSelector.inline.hpp b/src/hotspot/share/gc/x/xRelocationSetSelector.inline.hpp new file mode 100644 index 0000000000000..25e0ede835de0 --- /dev/null +++ b/src/hotspot/share/gc/x/xRelocationSetSelector.inline.hpp @@ -0,0 +1,165 @@ +/* + * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XRELOCATIONSETSELECTOR_INLINE_HPP +#define SHARE_GC_X_XRELOCATIONSETSELECTOR_INLINE_HPP + +#include "gc/x/xRelocationSetSelector.hpp" + +#include "gc/x/xArray.inline.hpp" +#include "gc/x/xPage.inline.hpp" + +inline size_t XRelocationSetSelectorGroupStats::npages_candidates() const { + return _npages_candidates; +} + +inline size_t XRelocationSetSelectorGroupStats::total() const { + return _total; +} + +inline size_t XRelocationSetSelectorGroupStats::live() const { + return _live; +} + +inline size_t XRelocationSetSelectorGroupStats::empty() const { + return _empty; +} + +inline size_t XRelocationSetSelectorGroupStats::npages_selected() const { + return _npages_selected; +} + +inline size_t XRelocationSetSelectorGroupStats::relocate() const { + return _relocate; +} + +inline const XRelocationSetSelectorGroupStats& XRelocationSetSelectorStats::small() const { + return _small; +} + +inline const XRelocationSetSelectorGroupStats& XRelocationSetSelectorStats::medium() const { + return _medium; +} + +inline const XRelocationSetSelectorGroupStats& XRelocationSetSelectorStats::large() const { + return _large; +} + +inline void XRelocationSetSelectorGroup::register_live_page(XPage* page) { + const uint8_t type = page->type(); + const size_t size = page->size(); + const size_t live = page->live_bytes(); + const size_t garbage = size - live; + + if (garbage > _fragmentation_limit) { + _live_pages.append(page); + } + + _stats._npages_candidates++; + _stats._total += size; + _stats._live += live; +} + +inline void XRelocationSetSelectorGroup::register_empty_page(XPage* page) { + const size_t size = page->size(); + + _stats._npages_candidates++; + _stats._total += size; + _stats._empty += size; +} + +inline const XArray* XRelocationSetSelectorGroup::selected() const { + return &_live_pages; +} + +inline size_t XRelocationSetSelectorGroup::forwarding_entries() const { + return _forwarding_entries; +} + +inline const XRelocationSetSelectorGroupStats& XRelocationSetSelectorGroup::stats() const { + return _stats; +} + +inline void XRelocationSetSelector::register_live_page(XPage* page) { + const uint8_t type = page->type(); + + if (type == XPageTypeSmall) { + _small.register_live_page(page); + } else if (type == XPageTypeMedium) { + _medium.register_live_page(page); + } else { + _large.register_live_page(page); + } +} + +inline void XRelocationSetSelector::register_empty_page(XPage* page) { + const uint8_t type = page->type(); + + if (type == XPageTypeSmall) { + _small.register_empty_page(page); + } else if (type == XPageTypeMedium) { + _medium.register_empty_page(page); + } else { + _large.register_empty_page(page); + } + + _empty_pages.append(page); +} + +inline bool XRelocationSetSelector::should_free_empty_pages(int bulk) const { + return _empty_pages.length() >= bulk && _empty_pages.is_nonempty(); +} + +inline const XArray* XRelocationSetSelector::empty_pages() const { + return &_empty_pages; +} + +inline void XRelocationSetSelector::clear_empty_pages() { + return _empty_pages.clear(); +} + +inline size_t XRelocationSetSelector::total() const { + return _small.stats().total() + _medium.stats().total() + _large.stats().total(); +} + +inline size_t XRelocationSetSelector::empty() const { + return _small.stats().empty() + _medium.stats().empty() + _large.stats().empty(); +} + +inline size_t XRelocationSetSelector::relocate() const { + return _small.stats().relocate() + _medium.stats().relocate() + _large.stats().relocate(); +} + +inline const XArray* XRelocationSetSelector::small() const { + return _small.selected(); +} + +inline const XArray* XRelocationSetSelector::medium() const { + return _medium.selected(); +} + +inline size_t XRelocationSetSelector::forwarding_entries() const { + return _small.forwarding_entries() + _medium.forwarding_entries(); +} + +#endif // SHARE_GC_X_XRELOCATIONSETSELECTOR_INLINE_HPP diff --git a/src/hotspot/share/gc/x/xResurrection.cpp b/src/hotspot/share/gc/x/xResurrection.cpp new file mode 100644 index 0000000000000..486f1f8db82e0 --- /dev/null +++ b/src/hotspot/share/gc/x/xResurrection.cpp @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/x/xResurrection.hpp" +#include "runtime/atomic.hpp" +#include "runtime/safepoint.hpp" +#include "utilities/debug.hpp" + +volatile bool XResurrection::_blocked = false; + +void XResurrection::block() { + assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint"); + _blocked = true; +} + +void XResurrection::unblock() { + // No need for anything stronger than a relaxed store here. + // The preceding handshake makes sure that all non-strong + // oops have already been healed at this point. + Atomic::store(&_blocked, false); +} diff --git a/src/hotspot/share/gc/x/xResurrection.hpp b/src/hotspot/share/gc/x/xResurrection.hpp new file mode 100644 index 0000000000000..d6ce9820e02fe --- /dev/null +++ b/src/hotspot/share/gc/x/xResurrection.hpp @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XRESURRECTION_HPP +#define SHARE_GC_X_XRESURRECTION_HPP + +#include "memory/allStatic.hpp" + +class XResurrection : public AllStatic { +private: + static volatile bool _blocked; + +public: + static bool is_blocked(); + static void block(); + static void unblock(); +}; + +#endif // SHARE_GC_X_XRESURRECTION_HPP diff --git a/src/hotspot/share/gc/x/xResurrection.inline.hpp b/src/hotspot/share/gc/x/xResurrection.inline.hpp new file mode 100644 index 0000000000000..af1993945cc41 --- /dev/null +++ b/src/hotspot/share/gc/x/xResurrection.inline.hpp @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XRESURRECTION_INLINE_HPP +#define SHARE_GC_X_XRESURRECTION_INLINE_HPP + +#include "gc/x/xResurrection.hpp" + +#include "runtime/atomic.hpp" + +inline bool XResurrection::is_blocked() { + return Atomic::load(&_blocked); +} + +#endif // SHARE_GC_X_XRESURRECTION_INLINE_HPP diff --git a/src/hotspot/share/gc/x/xRootsIterator.cpp b/src/hotspot/share/gc/x/xRootsIterator.cpp new file mode 100644 index 0000000000000..4eaeb8e77c2a2 --- /dev/null +++ b/src/hotspot/share/gc/x/xRootsIterator.cpp @@ -0,0 +1,142 @@ +/* + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "classfile/classLoaderDataGraph.hpp" +#include "gc/shared/oopStorageSetParState.inline.hpp" +#include "gc/x/xNMethod.hpp" +#include "gc/x/xNMethodTable.hpp" +#include "gc/x/xRootsIterator.hpp" +#include "gc/x/xStat.hpp" +#include "memory/resourceArea.hpp" +#include "prims/jvmtiTagMap.hpp" +#include "runtime/atomic.hpp" +#include "runtime/globals.hpp" +#include "runtime/safepoint.hpp" +#include "utilities/debug.hpp" + +static const XStatSubPhase XSubPhaseConcurrentRootsOopStorageSet("Concurrent Roots OopStorageSet"); +static const XStatSubPhase XSubPhaseConcurrentRootsClassLoaderDataGraph("Concurrent Roots ClassLoaderDataGraph"); +static const XStatSubPhase XSubPhaseConcurrentRootsJavaThreads("Concurrent Roots JavaThreads"); +static const XStatSubPhase XSubPhaseConcurrentRootsCodeCache("Concurrent Roots CodeCache"); +static const XStatSubPhase XSubPhaseConcurrentWeakRootsOopStorageSet("Concurrent Weak Roots OopStorageSet"); + +template +template +void XParallelApply::apply(ClosureType* cl) { + if (!Atomic::load(&_completed)) { + _iter.apply(cl); + if (!Atomic::load(&_completed)) { + Atomic::store(&_completed, true); + } + } +} + +XStrongOopStorageSetIterator::XStrongOopStorageSetIterator() : + _iter() {} + +void XStrongOopStorageSetIterator::apply(OopClosure* cl) { + XStatTimer timer(XSubPhaseConcurrentRootsOopStorageSet); + _iter.oops_do(cl); +} + +void XStrongCLDsIterator::apply(CLDClosure* cl) { + XStatTimer timer(XSubPhaseConcurrentRootsClassLoaderDataGraph); + ClassLoaderDataGraph::always_strong_cld_do(cl); +} + +XJavaThreadsIterator::XJavaThreadsIterator() : + _threads(), + _claimed(0) {} + +uint XJavaThreadsIterator::claim() { + return Atomic::fetch_then_add(&_claimed, 1u); +} + +void XJavaThreadsIterator::apply(ThreadClosure* cl) { + XStatTimer timer(XSubPhaseConcurrentRootsJavaThreads); + + // The resource mark is needed because interpreter oop maps are + // not reused in concurrent mode. Instead, they are temporary and + // resource allocated. + ResourceMark _rm; + + for (uint i = claim(); i < _threads.length(); i = claim()) { + cl->do_thread(_threads.thread_at(i)); + } +} + +XNMethodsIterator::XNMethodsIterator() { + if (!ClassUnloading) { + XNMethod::nmethods_do_begin(); + } +} + +XNMethodsIterator::~XNMethodsIterator() { + if (!ClassUnloading) { + XNMethod::nmethods_do_end(); + } +} + +void XNMethodsIterator::apply(NMethodClosure* cl) { + XStatTimer timer(XSubPhaseConcurrentRootsCodeCache); + XNMethod::nmethods_do(cl); +} + +XRootsIterator::XRootsIterator(int cld_claim) { + if (cld_claim != ClassLoaderData::_claim_none) { + ClassLoaderDataGraph::verify_claimed_marks_cleared(cld_claim); + } +} + +void XRootsIterator::apply(OopClosure* cl, + CLDClosure* cld_cl, + ThreadClosure* thread_cl, + NMethodClosure* nm_cl) { + _oop_storage_set.apply(cl); + _class_loader_data_graph.apply(cld_cl); + _java_threads.apply(thread_cl); + if (!ClassUnloading) { + _nmethods.apply(nm_cl); + } +} + +XWeakOopStorageSetIterator::XWeakOopStorageSetIterator() : + _iter() {} + +void XWeakOopStorageSetIterator::apply(OopClosure* cl) { + XStatTimer timer(XSubPhaseConcurrentWeakRootsOopStorageSet); + _iter.oops_do(cl); +} + +void XWeakOopStorageSetIterator::report_num_dead() { + _iter.report_num_dead(); +} + +void XWeakRootsIterator::report_num_dead() { + _oop_storage_set.iter().report_num_dead(); +} + +void XWeakRootsIterator::apply(OopClosure* cl) { + _oop_storage_set.apply(cl); +} diff --git a/src/hotspot/share/gc/x/xRootsIterator.hpp b/src/hotspot/share/gc/x/xRootsIterator.hpp new file mode 100644 index 0000000000000..9adc4c0293868 --- /dev/null +++ b/src/hotspot/share/gc/x/xRootsIterator.hpp @@ -0,0 +1,124 @@ +/* + * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XROOTSITERATOR_HPP +#define SHARE_GC_X_XROOTSITERATOR_HPP + +#include "gc/shared/oopStorageSetParState.hpp" +#include "logging/log.hpp" +#include "memory/iterator.hpp" +#include "runtime/threadSMR.hpp" + +template +class XParallelApply { +private: + Iterator _iter; + volatile bool _completed; + +public: + XParallelApply() : + _iter(), + _completed(false) {} + + template + void apply(ClosureType* cl); + + Iterator& iter() { + return _iter; + } +}; + +class XStrongOopStorageSetIterator { + OopStorageSetStrongParState _iter; + +public: + XStrongOopStorageSetIterator(); + + void apply(OopClosure* cl); +}; + +class XStrongCLDsIterator { +public: + void apply(CLDClosure* cl); +}; + +class XJavaThreadsIterator { +private: + ThreadsListHandle _threads; + volatile uint _claimed; + + uint claim(); + +public: + XJavaThreadsIterator(); + + void apply(ThreadClosure* cl); +}; + +class XNMethodsIterator { +public: + XNMethodsIterator(); + ~XNMethodsIterator(); + + void apply(NMethodClosure* cl); +}; + +class XRootsIterator { +private: + XParallelApply _oop_storage_set; + XParallelApply _class_loader_data_graph; + XParallelApply _java_threads; + XParallelApply _nmethods; + +public: + XRootsIterator(int cld_claim); + + void apply(OopClosure* cl, + CLDClosure* cld_cl, + ThreadClosure* thread_cl, + NMethodClosure* nm_cl); +}; + +class XWeakOopStorageSetIterator { +private: + OopStorageSetWeakParState _iter; + +public: + XWeakOopStorageSetIterator(); + + void apply(OopClosure* cl); + + void report_num_dead(); +}; + +class XWeakRootsIterator { +private: + XParallelApply _oop_storage_set; + +public: + void apply(OopClosure* cl); + + void report_num_dead(); +}; + +#endif // SHARE_GC_X_XROOTSITERATOR_HPP diff --git a/src/hotspot/share/gc/x/xRuntimeWorkers.cpp b/src/hotspot/share/gc/x/xRuntimeWorkers.cpp new file mode 100644 index 0000000000000..d7e4a1262fcbd --- /dev/null +++ b/src/hotspot/share/gc/x/xRuntimeWorkers.cpp @@ -0,0 +1,85 @@ +/* + * Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/shared/gcLogPrecious.hpp" +#include "gc/shared/gc_globals.hpp" +#include "gc/x/xLock.inline.hpp" +#include "gc/x/xRuntimeWorkers.hpp" +#include "gc/x/xTask.hpp" +#include "gc/x/xThread.hpp" +#include "runtime/java.hpp" + +class XRuntimeWorkersInitializeTask : public WorkerTask { +private: + const uint _nworkers; + uint _started; + XConditionLock _lock; + +public: + XRuntimeWorkersInitializeTask(uint nworkers) : + WorkerTask("XRuntimeWorkersInitializeTask"), + _nworkers(nworkers), + _started(0), + _lock() {} + + virtual void work(uint worker_id) { + // Wait for all threads to start + XLocker locker(&_lock); + if (++_started == _nworkers) { + // All threads started + _lock.notify_all(); + } else { + while (_started != _nworkers) { + _lock.wait(); + } + } + } +}; + +XRuntimeWorkers::XRuntimeWorkers() : + _workers("RuntimeWorker", + ParallelGCThreads) { + + log_info_p(gc, init)("Runtime Workers: %u", _workers.max_workers()); + + // Initialize worker threads + _workers.initialize_workers(); + _workers.set_active_workers(_workers.max_workers()); + if (_workers.active_workers() != _workers.max_workers()) { + vm_exit_during_initialization("Failed to create XRuntimeWorkers"); + } + + // Execute task to reduce latency in early safepoints, + // which otherwise would have to take on any warmup costs. + XRuntimeWorkersInitializeTask task(_workers.max_workers()); + _workers.run_task(&task); +} + +WorkerThreads* XRuntimeWorkers::workers() { + return &_workers; +} + +void XRuntimeWorkers::threads_do(ThreadClosure* tc) const { + _workers.threads_do(tc); +} diff --git a/src/hotspot/share/gc/x/xRuntimeWorkers.hpp b/src/hotspot/share/gc/x/xRuntimeWorkers.hpp new file mode 100644 index 0000000000000..114521d65067e --- /dev/null +++ b/src/hotspot/share/gc/x/xRuntimeWorkers.hpp @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XRUNTIMEWORKERS_HPP +#define SHARE_GC_X_XRUNTIMEWORKERS_HPP + +#include "gc/shared/workerThread.hpp" + +class ThreadClosure; + +class XRuntimeWorkers { +private: + WorkerThreads _workers; + +public: + XRuntimeWorkers(); + + WorkerThreads* workers(); + + void threads_do(ThreadClosure* tc) const; +}; + +#endif // SHARE_GC_X_XRUNTIMEWORKERS_HPP diff --git a/src/hotspot/share/gc/x/xSafeDelete.hpp b/src/hotspot/share/gc/x/xSafeDelete.hpp new file mode 100644 index 0000000000000..c41a38ce1873a --- /dev/null +++ b/src/hotspot/share/gc/x/xSafeDelete.hpp @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XSAFEDELETE_HPP +#define SHARE_GC_X_XSAFEDELETE_HPP + +#include "gc/x/xArray.hpp" +#include "gc/x/xLock.hpp" + +#include + +template +class XSafeDeleteImpl { +private: + using ItemT = std::remove_extent_t; + + XLock* _lock; + uint64_t _enabled; + XArray _deferred; + + bool deferred_delete(ItemT* item); + void immediate_delete(ItemT* item); + +public: + XSafeDeleteImpl(XLock* lock); + + void enable_deferred_delete(); + void disable_deferred_delete(); + + void operator()(ItemT* item); +}; + +template +class XSafeDelete : public XSafeDeleteImpl { +private: + XLock _lock; + +public: + XSafeDelete(); +}; + +template +class XSafeDeleteNoLock : public XSafeDeleteImpl { +public: + XSafeDeleteNoLock(); +}; + +#endif // SHARE_GC_X_XSAFEDELETE_HPP diff --git a/src/hotspot/share/gc/x/xSafeDelete.inline.hpp b/src/hotspot/share/gc/x/xSafeDelete.inline.hpp new file mode 100644 index 0000000000000..6c8417593d442 --- /dev/null +++ b/src/hotspot/share/gc/x/xSafeDelete.inline.hpp @@ -0,0 +1,100 @@ +/* + * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XSAFEDELETE_INLINE_HPP +#define SHARE_GC_X_XSAFEDELETE_INLINE_HPP + +#include "gc/x/xSafeDelete.hpp" + +#include "gc/x/xArray.inline.hpp" +#include "utilities/debug.hpp" + +#include + +template +XSafeDeleteImpl::XSafeDeleteImpl(XLock* lock) : + _lock(lock), + _enabled(0), + _deferred() {} + +template +bool XSafeDeleteImpl::deferred_delete(ItemT* item) { + XLocker locker(_lock); + if (_enabled > 0) { + _deferred.append(item); + return true; + } + + return false; +} + +template +void XSafeDeleteImpl::immediate_delete(ItemT* item) { + if (std::is_array::value) { + delete [] item; + } else { + delete item; + } +} + +template +void XSafeDeleteImpl::enable_deferred_delete() { + XLocker locker(_lock); + _enabled++; +} + +template +void XSafeDeleteImpl::disable_deferred_delete() { + XArray deferred; + + { + XLocker locker(_lock); + assert(_enabled > 0, "Invalid state"); + if (--_enabled == 0) { + deferred.swap(&_deferred); + } + } + + XArrayIterator iter(&deferred); + for (ItemT* item; iter.next(&item);) { + immediate_delete(item); + } +} + +template +void XSafeDeleteImpl::operator()(ItemT* item) { + if (!deferred_delete(item)) { + immediate_delete(item); + } +} + +template +XSafeDelete::XSafeDelete() : + XSafeDeleteImpl(&_lock), + _lock() {} + +template +XSafeDeleteNoLock::XSafeDeleteNoLock() : + XSafeDeleteImpl(NULL) {} + +#endif // SHARE_GC_X_XSAFEDELETE_INLINE_HPP diff --git a/src/hotspot/share/gc/x/xServiceability.cpp b/src/hotspot/share/gc/x/xServiceability.cpp new file mode 100644 index 0000000000000..6882896dfa94f --- /dev/null +++ b/src/hotspot/share/gc/x/xServiceability.cpp @@ -0,0 +1,177 @@ +/* + * Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/shared/generationCounters.hpp" +#include "gc/shared/hSpaceCounters.hpp" +#include "gc/x/xCollectedHeap.hpp" +#include "gc/x/xHeap.inline.hpp" +#include "gc/x/xServiceability.hpp" +#include "memory/metaspaceCounters.hpp" +#include "runtime/perfData.hpp" + +class XGenerationCounters : public GenerationCounters { +public: + XGenerationCounters(const char* name, int ordinal, int spaces, + size_t min_capacity, size_t max_capacity, size_t curr_capacity) : + GenerationCounters(name, ordinal, spaces, + min_capacity, max_capacity, curr_capacity) {} + + void update_capacity(size_t capacity) { + _current_size->set_value(capacity); + } +}; + +// Class to expose perf counters used by jstat. +class XServiceabilityCounters : public CHeapObj { +private: + XGenerationCounters _generation_counters; + HSpaceCounters _space_counters; + CollectorCounters _collector_counters; + +public: + XServiceabilityCounters(size_t min_capacity, size_t max_capacity); + + CollectorCounters* collector_counters(); + + void update_sizes(); +}; + +XServiceabilityCounters::XServiceabilityCounters(size_t min_capacity, size_t max_capacity) : + // generation.1 + _generation_counters("old" /* name */, + 1 /* ordinal */, + 1 /* spaces */, + min_capacity /* min_capacity */, + max_capacity /* max_capacity */, + min_capacity /* curr_capacity */), + // generation.1.space.0 + _space_counters(_generation_counters.name_space(), + "space" /* name */, + 0 /* ordinal */, + max_capacity /* max_capacity */, + min_capacity /* init_capacity */), + // gc.collector.2 + _collector_counters("Z concurrent cycle pauses" /* name */, + 2 /* ordinal */) {} + +CollectorCounters* XServiceabilityCounters::collector_counters() { + return &_collector_counters; +} + +void XServiceabilityCounters::update_sizes() { + if (UsePerfData) { + const size_t capacity = XHeap::heap()->capacity(); + const size_t used = MIN2(XHeap::heap()->used(), capacity); + + _generation_counters.update_capacity(capacity); + _space_counters.update_capacity(capacity); + _space_counters.update_used(used); + + MetaspaceCounters::update_performance_counters(); + } +} + +XServiceabilityMemoryPool::XServiceabilityMemoryPool(size_t min_capacity, size_t max_capacity) : + CollectedMemoryPool("ZHeap", + min_capacity, + max_capacity, + true /* support_usage_threshold */) {} + +size_t XServiceabilityMemoryPool::used_in_bytes() { + return XHeap::heap()->used(); +} + +MemoryUsage XServiceabilityMemoryPool::get_memory_usage() { + const size_t committed = XHeap::heap()->capacity(); + const size_t used = MIN2(XHeap::heap()->used(), committed); + + return MemoryUsage(initial_size(), used, committed, max_size()); +} + +XServiceabilityMemoryManager::XServiceabilityMemoryManager(const char* name, + XServiceabilityMemoryPool* pool) : + GCMemoryManager(name) { + add_pool(pool); +} + +XServiceability::XServiceability(size_t min_capacity, size_t max_capacity) : + _min_capacity(min_capacity), + _max_capacity(max_capacity), + _memory_pool(_min_capacity, _max_capacity), + _cycle_memory_manager("ZGC Cycles", &_memory_pool), + _pause_memory_manager("ZGC Pauses", &_memory_pool), + _counters(NULL) {} + +void XServiceability::initialize() { + _counters = new XServiceabilityCounters(_min_capacity, _max_capacity); +} + +MemoryPool* XServiceability::memory_pool() { + return &_memory_pool; +} + +GCMemoryManager* XServiceability::cycle_memory_manager() { + return &_cycle_memory_manager; +} + +GCMemoryManager* XServiceability::pause_memory_manager() { + return &_pause_memory_manager; +} + +XServiceabilityCounters* XServiceability::counters() { + return _counters; +} + +XServiceabilityCycleTracer::XServiceabilityCycleTracer() : + _memory_manager_stats(XHeap::heap()->serviceability_cycle_memory_manager(), + XCollectedHeap::heap()->gc_cause(), + "end of GC cycle", + true /* allMemoryPoolsAffected */, + true /* recordGCBeginTime */, + true /* recordPreGCUsage */, + true /* recordPeakUsage */, + true /* recordPostGCUsage */, + true /* recordAccumulatedGCTime */, + true /* recordGCEndTime */, + true /* countCollection */) {} + +XServiceabilityPauseTracer::XServiceabilityPauseTracer() : + _svc_gc_marker(SvcGCMarker::CONCURRENT), + _counters_stats(XHeap::heap()->serviceability_counters()->collector_counters()), + _memory_manager_stats(XHeap::heap()->serviceability_pause_memory_manager(), + XCollectedHeap::heap()->gc_cause(), + "end of GC pause", + true /* allMemoryPoolsAffected */, + true /* recordGCBeginTime */, + false /* recordPreGCUsage */, + false /* recordPeakUsage */, + false /* recordPostGCUsage */, + true /* recordAccumulatedGCTime */, + true /* recordGCEndTime */, + true /* countCollection */) {} + +XServiceabilityPauseTracer::~XServiceabilityPauseTracer() { + XHeap::heap()->serviceability_counters()->update_sizes(); + MemoryService::track_memory_usage(); +} diff --git a/src/hotspot/share/gc/x/xServiceability.hpp b/src/hotspot/share/gc/x/xServiceability.hpp new file mode 100644 index 0000000000000..d8e2fc9ba7973 --- /dev/null +++ b/src/hotspot/share/gc/x/xServiceability.hpp @@ -0,0 +1,89 @@ +/* + * Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XSERVICEABILITY_HPP +#define SHARE_GC_X_XSERVICEABILITY_HPP + +#include "gc/shared/collectorCounters.hpp" +#include "gc/shared/gcVMOperations.hpp" +#include "memory/allocation.hpp" +#include "services/memoryManager.hpp" +#include "services/memoryPool.hpp" +#include "services/memoryService.hpp" + +class XServiceabilityCounters; + +class XServiceabilityMemoryPool : public CollectedMemoryPool { +public: + XServiceabilityMemoryPool(size_t min_capacity, size_t max_capacity); + + virtual size_t used_in_bytes(); + virtual MemoryUsage get_memory_usage(); +}; + +class XServiceabilityMemoryManager : public GCMemoryManager { +public: + XServiceabilityMemoryManager(const char* name, + XServiceabilityMemoryPool* pool); +}; + +class XServiceability { +private: + const size_t _min_capacity; + const size_t _max_capacity; + XServiceabilityMemoryPool _memory_pool; + XServiceabilityMemoryManager _cycle_memory_manager; + XServiceabilityMemoryManager _pause_memory_manager; + XServiceabilityCounters* _counters; + +public: + XServiceability(size_t min_capacity, size_t max_capacity); + + void initialize(); + + MemoryPool* memory_pool(); + GCMemoryManager* cycle_memory_manager(); + GCMemoryManager* pause_memory_manager(); + XServiceabilityCounters* counters(); +}; + +class XServiceabilityCycleTracer : public StackObj { +private: + TraceMemoryManagerStats _memory_manager_stats; + +public: + XServiceabilityCycleTracer(); +}; + +class XServiceabilityPauseTracer : public StackObj { +private: + SvcGCMarker _svc_gc_marker; + TraceCollectorStats _counters_stats; + TraceMemoryManagerStats _memory_manager_stats; + +public: + XServiceabilityPauseTracer(); + ~XServiceabilityPauseTracer(); +}; + +#endif // SHARE_GC_X_XSERVICEABILITY_HPP diff --git a/src/hotspot/share/gc/x/xStackWatermark.cpp b/src/hotspot/share/gc/x/xStackWatermark.cpp new file mode 100644 index 0000000000000..7be799f74f0de --- /dev/null +++ b/src/hotspot/share/gc/x/xStackWatermark.cpp @@ -0,0 +1,99 @@ +/* + * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/x/xAddress.hpp" +#include "gc/x/xBarrier.inline.hpp" +#include "gc/x/xStackWatermark.hpp" +#include "gc/x/xThread.inline.hpp" +#include "gc/x/xThreadLocalAllocBuffer.hpp" +#include "gc/x/xThreadLocalData.hpp" +#include "gc/x/xVerify.hpp" +#include "memory/resourceArea.inline.hpp" +#include "runtime/frame.inline.hpp" +#include "utilities/preserveException.hpp" + +XOnStackCodeBlobClosure::XOnStackCodeBlobClosure() : + _bs_nm(BarrierSet::barrier_set()->barrier_set_nmethod()) {} + +void XOnStackCodeBlobClosure::do_code_blob(CodeBlob* cb) { + nmethod* const nm = cb->as_nmethod_or_null(); + if (nm != NULL) { + const bool result = _bs_nm->nmethod_entry_barrier(nm); + assert(result, "NMethod on-stack must be alive"); + } +} + +ThreadLocalAllocStats& XStackWatermark::stats() { + return _stats; +} + +uint32_t XStackWatermark::epoch_id() const { + return *XAddressBadMaskHighOrderBitsAddr; +} + +XStackWatermark::XStackWatermark(JavaThread* jt) : + StackWatermark(jt, StackWatermarkKind::gc, *XAddressBadMaskHighOrderBitsAddr), + _jt_cl(), + _cb_cl(), + _stats() {} + +OopClosure* XStackWatermark::closure_from_context(void* context) { + if (context != NULL) { + assert(XThread::is_worker(), "Unexpected thread passing in context: " PTR_FORMAT, p2i(context)); + return reinterpret_cast(context); + } else { + return &_jt_cl; + } +} + +void XStackWatermark::start_processing_impl(void* context) { + // Verify the head (no_frames) of the thread is bad before fixing it. + XVerify::verify_thread_head_bad(_jt); + + // Process the non-frame part of the thread + _jt->oops_do_no_frames(closure_from_context(context), &_cb_cl); + XThreadLocalData::do_invisible_root(_jt, XBarrier::load_barrier_on_invisible_root_oop_field); + + // Verification of frames is done after processing of the "head" (no_frames). + // The reason is that the exception oop is fiddled with during frame processing. + XVerify::verify_thread_frames_bad(_jt); + + // Update thread local address bad mask + XThreadLocalData::set_address_bad_mask(_jt, XAddressBadMask); + + // Retire TLAB + if (XGlobalPhase == XPhaseMark) { + XThreadLocalAllocBuffer::retire(_jt, &_stats); + } else { + XThreadLocalAllocBuffer::remap(_jt); + } + + // Publishes the processing start to concurrent threads + StackWatermark::start_processing_impl(context); +} + +void XStackWatermark::process(const frame& fr, RegisterMap& register_map, void* context) { + XVerify::verify_frame_bad(fr, register_map); + fr.oops_do(closure_from_context(context), &_cb_cl, ®ister_map, DerivedPointerIterationMode::_directly); +} diff --git a/src/hotspot/share/gc/x/xStackWatermark.hpp b/src/hotspot/share/gc/x/xStackWatermark.hpp new file mode 100644 index 0000000000000..b9f39d0d1d87d --- /dev/null +++ b/src/hotspot/share/gc/x/xStackWatermark.hpp @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XSTACKWATERMARK_HPP +#define SHARE_GC_X_XSTACKWATERMARK_HPP + +#include "gc/shared/barrierSet.hpp" +#include "gc/shared/barrierSetNMethod.hpp" +#include "gc/shared/threadLocalAllocBuffer.hpp" +#include "gc/x/xBarrier.hpp" +#include "memory/allocation.hpp" +#include "memory/iterator.hpp" +#include "oops/oopsHierarchy.hpp" +#include "runtime/stackWatermark.hpp" +#include "utilities/globalDefinitions.hpp" + +class frame; +class JavaThread; + +class XOnStackCodeBlobClosure : public CodeBlobClosure { +private: + BarrierSetNMethod* _bs_nm; + + virtual void do_code_blob(CodeBlob* cb); + +public: + XOnStackCodeBlobClosure(); +}; + +class XStackWatermark : public StackWatermark { +private: + XLoadBarrierOopClosure _jt_cl; + XOnStackCodeBlobClosure _cb_cl; + ThreadLocalAllocStats _stats; + + OopClosure* closure_from_context(void* context); + + virtual uint32_t epoch_id() const; + virtual void start_processing_impl(void* context); + virtual void process(const frame& fr, RegisterMap& register_map, void* context); + +public: + XStackWatermark(JavaThread* jt); + + ThreadLocalAllocStats& stats(); +}; + +#endif // SHARE_GC_X_XSTACKWATERMARK_HPP diff --git a/src/hotspot/share/gc/x/xStat.cpp b/src/hotspot/share/gc/x/xStat.cpp new file mode 100644 index 0000000000000..beb90cc1740c1 --- /dev/null +++ b/src/hotspot/share/gc/x/xStat.cpp @@ -0,0 +1,1513 @@ +/* + * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/shared/gc_globals.hpp" +#include "gc/x/xAbort.inline.hpp" +#include "gc/x/xCollectedHeap.hpp" +#include "gc/x/xCPU.inline.hpp" +#include "gc/x/xGlobals.hpp" +#include "gc/x/xNMethodTable.hpp" +#include "gc/x/xPageAllocator.inline.hpp" +#include "gc/x/xRelocationSetSelector.inline.hpp" +#include "gc/x/xStat.hpp" +#include "gc/x/xThread.inline.hpp" +#include "gc/x/xTracer.inline.hpp" +#include "gc/x/xUtils.hpp" +#include "memory/metaspaceUtils.hpp" +#include "memory/resourceArea.hpp" +#include "runtime/atomic.hpp" +#include "runtime/os.hpp" +#include "runtime/timer.hpp" +#include "utilities/align.hpp" +#include "utilities/debug.hpp" +#include "utilities/ticks.hpp" + +#define XSIZE_FMT SIZE_FORMAT "M(%.0f%%)" +#define XSIZE_ARGS_WITH_MAX(size, max) ((size) / M), (percent_of(size, max)) +#define XSIZE_ARGS(size) XSIZE_ARGS_WITH_MAX(size, XStatHeap::max_capacity()) + +#define XTABLE_ARGS_NA "%9s", "-" +#define XTABLE_ARGS(size) SIZE_FORMAT_W(8) "M (%.0f%%)", \ + ((size) / M), (percent_of(size, XStatHeap::max_capacity())) + +// +// Stat sampler/counter data +// +struct XStatSamplerData { + uint64_t _nsamples; + uint64_t _sum; + uint64_t _max; + + XStatSamplerData() : + _nsamples(0), + _sum(0), + _max(0) {} + + void add(const XStatSamplerData& new_sample) { + _nsamples += new_sample._nsamples; + _sum += new_sample._sum; + _max = MAX2(_max, new_sample._max); + } +}; + +struct XStatCounterData { + uint64_t _counter; + + XStatCounterData() : + _counter(0) {} +}; + +// +// Stat sampler history +// +template +class XStatSamplerHistoryInterval { +private: + size_t _next; + XStatSamplerData _samples[size]; + XStatSamplerData _accumulated; + XStatSamplerData _total; + +public: + XStatSamplerHistoryInterval() : + _next(0), + _samples(), + _accumulated(), + _total() {} + + bool add(const XStatSamplerData& new_sample) { + // Insert sample + const XStatSamplerData old_sample = _samples[_next]; + _samples[_next] = new_sample; + + // Adjust accumulated + _accumulated._nsamples += new_sample._nsamples; + _accumulated._sum += new_sample._sum; + _accumulated._max = MAX2(_accumulated._max, new_sample._max); + + // Adjust total + _total._nsamples -= old_sample._nsamples; + _total._sum -= old_sample._sum; + _total._nsamples += new_sample._nsamples; + _total._sum += new_sample._sum; + if (_total._max < new_sample._max) { + // Found new max + _total._max = new_sample._max; + } else if (_total._max == old_sample._max) { + // Removed old max, reset and find new max + _total._max = 0; + for (size_t i = 0; i < size; i++) { + if (_total._max < _samples[i]._max) { + _total._max = _samples[i]._max; + } + } + } + + // Adjust next + if (++_next == size) { + _next = 0; + + // Clear accumulated + const XStatSamplerData zero; + _accumulated = zero; + + // Became full + return true; + } + + // Not yet full + return false; + } + + const XStatSamplerData& total() const { + return _total; + } + + const XStatSamplerData& accumulated() const { + return _accumulated; + } +}; + +class XStatSamplerHistory : public CHeapObj { +private: + XStatSamplerHistoryInterval<10> _10seconds; + XStatSamplerHistoryInterval<60> _10minutes; + XStatSamplerHistoryInterval<60> _10hours; + XStatSamplerData _total; + + uint64_t avg(uint64_t sum, uint64_t nsamples) const { + return (nsamples > 0) ? sum / nsamples : 0; + } + +public: + XStatSamplerHistory() : + _10seconds(), + _10minutes(), + _10hours(), + _total() {} + + void add(const XStatSamplerData& new_sample) { + if (_10seconds.add(new_sample)) { + if (_10minutes.add(_10seconds.total())) { + if (_10hours.add(_10minutes.total())) { + _total.add(_10hours.total()); + } + } + } + } + + uint64_t avg_10_seconds() const { + const uint64_t sum = _10seconds.total()._sum; + const uint64_t nsamples = _10seconds.total()._nsamples; + return avg(sum, nsamples); + } + + uint64_t avg_10_minutes() const { + const uint64_t sum = _10seconds.accumulated()._sum + + _10minutes.total()._sum; + const uint64_t nsamples = _10seconds.accumulated()._nsamples + + _10minutes.total()._nsamples; + return avg(sum, nsamples); + } + + uint64_t avg_10_hours() const { + const uint64_t sum = _10seconds.accumulated()._sum + + _10minutes.accumulated()._sum + + _10hours.total()._sum; + const uint64_t nsamples = _10seconds.accumulated()._nsamples + + _10minutes.accumulated()._nsamples + + _10hours.total()._nsamples; + return avg(sum, nsamples); + } + + uint64_t avg_total() const { + const uint64_t sum = _10seconds.accumulated()._sum + + _10minutes.accumulated()._sum + + _10hours.accumulated()._sum + + _total._sum; + const uint64_t nsamples = _10seconds.accumulated()._nsamples + + _10minutes.accumulated()._nsamples + + _10hours.accumulated()._nsamples + + _total._nsamples; + return avg(sum, nsamples); + } + + uint64_t max_10_seconds() const { + return _10seconds.total()._max; + } + + uint64_t max_10_minutes() const { + return MAX2(_10seconds.accumulated()._max, + _10minutes.total()._max); + } + + uint64_t max_10_hours() const { + return MAX3(_10seconds.accumulated()._max, + _10minutes.accumulated()._max, + _10hours.total()._max); + } + + uint64_t max_total() const { + return MAX4(_10seconds.accumulated()._max, + _10minutes.accumulated()._max, + _10hours.accumulated()._max, + _total._max); + } +}; + +// +// Stat unit printers +// +void XStatUnitTime(LogTargetHandle log, const XStatSampler& sampler, const XStatSamplerHistory& history) { + log.print(" %10s: %-41s " + "%9.3f / %-9.3f " + "%9.3f / %-9.3f " + "%9.3f / %-9.3f " + "%9.3f / %-9.3f ms", + sampler.group(), + sampler.name(), + TimeHelper::counter_to_millis(history.avg_10_seconds()), + TimeHelper::counter_to_millis(history.max_10_seconds()), + TimeHelper::counter_to_millis(history.avg_10_minutes()), + TimeHelper::counter_to_millis(history.max_10_minutes()), + TimeHelper::counter_to_millis(history.avg_10_hours()), + TimeHelper::counter_to_millis(history.max_10_hours()), + TimeHelper::counter_to_millis(history.avg_total()), + TimeHelper::counter_to_millis(history.max_total())); +} + +void XStatUnitBytes(LogTargetHandle log, const XStatSampler& sampler, const XStatSamplerHistory& history) { + log.print(" %10s: %-41s " + UINT64_FORMAT_W(9) " / " UINT64_FORMAT_W(-9) " " + UINT64_FORMAT_W(9) " / " UINT64_FORMAT_W(-9) " " + UINT64_FORMAT_W(9) " / " UINT64_FORMAT_W(-9) " " + UINT64_FORMAT_W(9) " / " UINT64_FORMAT_W(-9) " MB", + sampler.group(), + sampler.name(), + history.avg_10_seconds() / M, + history.max_10_seconds() / M, + history.avg_10_minutes() / M, + history.max_10_minutes() / M, + history.avg_10_hours() / M, + history.max_10_hours() / M, + history.avg_total() / M, + history.max_total() / M); +} + +void XStatUnitThreads(LogTargetHandle log, const XStatSampler& sampler, const XStatSamplerHistory& history) { + log.print(" %10s: %-41s " + UINT64_FORMAT_W(9) " / " UINT64_FORMAT_W(-9) " " + UINT64_FORMAT_W(9) " / " UINT64_FORMAT_W(-9) " " + UINT64_FORMAT_W(9) " / " UINT64_FORMAT_W(-9) " " + UINT64_FORMAT_W(9) " / " UINT64_FORMAT_W(-9) " threads", + sampler.group(), + sampler.name(), + history.avg_10_seconds(), + history.max_10_seconds(), + history.avg_10_minutes(), + history.max_10_minutes(), + history.avg_10_hours(), + history.max_10_hours(), + history.avg_total(), + history.max_total()); +} + +void XStatUnitBytesPerSecond(LogTargetHandle log, const XStatSampler& sampler, const XStatSamplerHistory& history) { + log.print(" %10s: %-41s " + UINT64_FORMAT_W(9) " / " UINT64_FORMAT_W(-9) " " + UINT64_FORMAT_W(9) " / " UINT64_FORMAT_W(-9) " " + UINT64_FORMAT_W(9) " / " UINT64_FORMAT_W(-9) " " + UINT64_FORMAT_W(9) " / " UINT64_FORMAT_W(-9) " MB/s", + sampler.group(), + sampler.name(), + history.avg_10_seconds() / M, + history.max_10_seconds() / M, + history.avg_10_minutes() / M, + history.max_10_minutes() / M, + history.avg_10_hours() / M, + history.max_10_hours() / M, + history.avg_total() / M, + history.max_total() / M); +} + +void XStatUnitOpsPerSecond(LogTargetHandle log, const XStatSampler& sampler, const XStatSamplerHistory& history) { + log.print(" %10s: %-41s " + UINT64_FORMAT_W(9) " / " UINT64_FORMAT_W(-9) " " + UINT64_FORMAT_W(9) " / " UINT64_FORMAT_W(-9) " " + UINT64_FORMAT_W(9) " / " UINT64_FORMAT_W(-9) " " + UINT64_FORMAT_W(9) " / " UINT64_FORMAT_W(-9) " ops/s", + sampler.group(), + sampler.name(), + history.avg_10_seconds(), + history.max_10_seconds(), + history.avg_10_minutes(), + history.max_10_minutes(), + history.avg_10_hours(), + history.max_10_hours(), + history.avg_total(), + history.max_total()); +} + +// +// Stat value +// +uintptr_t XStatValue::_base = 0; +uint32_t XStatValue::_cpu_offset = 0; + +XStatValue::XStatValue(const char* group, + const char* name, + uint32_t id, + uint32_t size) : + _group(group), + _name(name), + _id(id), + _offset(_cpu_offset) { + assert(_base == 0, "Already initialized"); + _cpu_offset += size; +} + +template +T* XStatValue::get_cpu_local(uint32_t cpu) const { + assert(_base != 0, "Not initialized"); + const uintptr_t cpu_base = _base + (_cpu_offset * cpu); + const uintptr_t value_addr = cpu_base + _offset; + return (T*)value_addr; +} + +void XStatValue::initialize() { + // Finalize and align CPU offset + _cpu_offset = align_up(_cpu_offset, (uint32_t)XCacheLineSize); + + // Allocation aligned memory + const size_t size = _cpu_offset * XCPU::count(); + _base = XUtils::alloc_aligned(XCacheLineSize, size); +} + +const char* XStatValue::group() const { + return _group; +} + +const char* XStatValue::name() const { + return _name; +} + +uint32_t XStatValue::id() const { + return _id; +} + +// +// Stat iterable value +// + +template +XStatIterableValue::XStatIterableValue(const char* group, + const char* name, + uint32_t size) : + XStatValue(group, name, _count++, size), + _next(insert()) {} + +template +T* XStatIterableValue::insert() const { + T* const next = _first; + _first = (T*)this; + return next; +} + +template +void XStatIterableValue::sort() { + T* first_unsorted = _first; + _first = NULL; + + while (first_unsorted != NULL) { + T* const value = first_unsorted; + first_unsorted = value->_next; + value->_next = NULL; + + T** current = &_first; + + while (*current != NULL) { + // First sort by group, then by name + const int group_cmp = strcmp((*current)->group(), value->group()); + if ((group_cmp > 0) || (group_cmp == 0 && strcmp((*current)->name(), value->name()) > 0)) { + break; + } + + current = &(*current)->_next; + } + value->_next = *current; + *current = value; + } +} + +// +// Stat sampler +// +XStatSampler::XStatSampler(const char* group, const char* name, XStatUnitPrinter printer) : + XStatIterableValue(group, name, sizeof(XStatSamplerData)), + _printer(printer) {} + +XStatSamplerData* XStatSampler::get() const { + return get_cpu_local(XCPU::id()); +} + +XStatSamplerData XStatSampler::collect_and_reset() const { + XStatSamplerData all; + + const uint32_t ncpus = XCPU::count(); + for (uint32_t i = 0; i < ncpus; i++) { + XStatSamplerData* const cpu_data = get_cpu_local(i); + if (cpu_data->_nsamples > 0) { + const uint64_t nsamples = Atomic::xchg(&cpu_data->_nsamples, (uint64_t)0); + const uint64_t sum = Atomic::xchg(&cpu_data->_sum, (uint64_t)0); + const uint64_t max = Atomic::xchg(&cpu_data->_max, (uint64_t)0); + all._nsamples += nsamples; + all._sum += sum; + if (all._max < max) { + all._max = max; + } + } + } + + return all; +} + +XStatUnitPrinter XStatSampler::printer() const { + return _printer; +} + +// +// Stat counter +// +XStatCounter::XStatCounter(const char* group, const char* name, XStatUnitPrinter printer) : + XStatIterableValue(group, name, sizeof(XStatCounterData)), + _sampler(group, name, printer) {} + +XStatCounterData* XStatCounter::get() const { + return get_cpu_local(XCPU::id()); +} + +void XStatCounter::sample_and_reset() const { + uint64_t counter = 0; + + const uint32_t ncpus = XCPU::count(); + for (uint32_t i = 0; i < ncpus; i++) { + XStatCounterData* const cpu_data = get_cpu_local(i); + counter += Atomic::xchg(&cpu_data->_counter, (uint64_t)0); + } + + XStatSample(_sampler, counter); +} + +// +// Stat unsampled counter +// +XStatUnsampledCounter::XStatUnsampledCounter(const char* name) : + XStatIterableValue("Unsampled", name, sizeof(XStatCounterData)) {} + +XStatCounterData* XStatUnsampledCounter::get() const { + return get_cpu_local(XCPU::id()); +} + +XStatCounterData XStatUnsampledCounter::collect_and_reset() const { + XStatCounterData all; + + const uint32_t ncpus = XCPU::count(); + for (uint32_t i = 0; i < ncpus; i++) { + XStatCounterData* const cpu_data = get_cpu_local(i); + all._counter += Atomic::xchg(&cpu_data->_counter, (uint64_t)0); + } + + return all; +} + +// +// Stat MMU (Minimum Mutator Utilization) +// +XStatMMUPause::XStatMMUPause() : + _start(0.0), + _end(0.0) {} + +XStatMMUPause::XStatMMUPause(const Ticks& start, const Ticks& end) : + _start(TimeHelper::counter_to_millis(start.value())), + _end(TimeHelper::counter_to_millis(end.value())) {} + +double XStatMMUPause::end() const { + return _end; +} + +double XStatMMUPause::overlap(double start, double end) const { + const double start_max = MAX2(start, _start); + const double end_min = MIN2(end, _end); + + if (end_min > start_max) { + // Overlap found + return end_min - start_max; + } + + // No overlap + return 0.0; +} + +size_t XStatMMU::_next = 0; +size_t XStatMMU::_npauses = 0; +XStatMMUPause XStatMMU::_pauses[200]; +double XStatMMU::_mmu_2ms = 100.0; +double XStatMMU::_mmu_5ms = 100.0; +double XStatMMU::_mmu_10ms = 100.0; +double XStatMMU::_mmu_20ms = 100.0; +double XStatMMU::_mmu_50ms = 100.0; +double XStatMMU::_mmu_100ms = 100.0; + +const XStatMMUPause& XStatMMU::pause(size_t index) { + return _pauses[(_next - index - 1) % ARRAY_SIZE(_pauses)]; +} + +double XStatMMU::calculate_mmu(double time_slice) { + const double end = pause(0).end(); + const double start = end - time_slice; + double time_paused = 0.0; + + // Find all overlapping pauses + for (size_t i = 0; i < _npauses; i++) { + const double overlap = pause(i).overlap(start, end); + if (overlap == 0.0) { + // No overlap + break; + } + + time_paused += overlap; + } + + // Calculate MMU + const double time_mutator = time_slice - time_paused; + return percent_of(time_mutator, time_slice); +} + +void XStatMMU::register_pause(const Ticks& start, const Ticks& end) { + // Add pause + const size_t index = _next++ % ARRAY_SIZE(_pauses); + _pauses[index] = XStatMMUPause(start, end); + _npauses = MIN2(_npauses + 1, ARRAY_SIZE(_pauses)); + + // Recalculate MMUs + _mmu_2ms = MIN2(_mmu_2ms, calculate_mmu(2)); + _mmu_5ms = MIN2(_mmu_5ms, calculate_mmu(5)); + _mmu_10ms = MIN2(_mmu_10ms, calculate_mmu(10)); + _mmu_20ms = MIN2(_mmu_20ms, calculate_mmu(20)); + _mmu_50ms = MIN2(_mmu_50ms, calculate_mmu(50)); + _mmu_100ms = MIN2(_mmu_100ms, calculate_mmu(100)); +} + +void XStatMMU::print() { + log_info(gc, mmu)("MMU: 2ms/%.1f%%, 5ms/%.1f%%, 10ms/%.1f%%, 20ms/%.1f%%, 50ms/%.1f%%, 100ms/%.1f%%", + _mmu_2ms, _mmu_5ms, _mmu_10ms, _mmu_20ms, _mmu_50ms, _mmu_100ms); +} + +// +// Stat phases +// +ConcurrentGCTimer XStatPhase::_timer; + +XStatPhase::XStatPhase(const char* group, const char* name) : + _sampler(group, name, XStatUnitTime) {} + +void XStatPhase::log_start(LogTargetHandle log, bool thread) const { + if (!log.is_enabled()) { + return; + } + + if (thread) { + ResourceMark rm; + log.print("%s (%s)", name(), Thread::current()->name()); + } else { + log.print("%s", name()); + } +} + +void XStatPhase::log_end(LogTargetHandle log, const Tickspan& duration, bool thread) const { + if (!log.is_enabled()) { + return; + } + + if (thread) { + ResourceMark rm; + log.print("%s (%s) %.3fms", name(), Thread::current()->name(), TimeHelper::counter_to_millis(duration.value())); + } else { + log.print("%s %.3fms", name(), TimeHelper::counter_to_millis(duration.value())); + } +} + +ConcurrentGCTimer* XStatPhase::timer() { + return &_timer; +} + +const char* XStatPhase::name() const { + return _sampler.name(); +} + +XStatPhaseCycle::XStatPhaseCycle(const char* name) : + XStatPhase("Collector", name) {} + +void XStatPhaseCycle::register_start(const Ticks& start) const { + timer()->register_gc_start(start); + + XTracer::tracer()->report_gc_start(XCollectedHeap::heap()->gc_cause(), start); + + XCollectedHeap::heap()->print_heap_before_gc(); + XCollectedHeap::heap()->trace_heap_before_gc(XTracer::tracer()); + + log_info(gc, start)("Garbage Collection (%s)", + GCCause::to_string(XCollectedHeap::heap()->gc_cause())); +} + +void XStatPhaseCycle::register_end(const Ticks& start, const Ticks& end) const { + if (XAbort::should_abort()) { + log_info(gc)("Garbage Collection (%s) Aborted", + GCCause::to_string(XCollectedHeap::heap()->gc_cause())); + return; + } + + timer()->register_gc_end(end); + + XCollectedHeap::heap()->print_heap_after_gc(); + XCollectedHeap::heap()->trace_heap_after_gc(XTracer::tracer()); + + XTracer::tracer()->report_gc_end(end, timer()->time_partitions()); + + const Tickspan duration = end - start; + XStatSample(_sampler, duration.value()); + + XStatLoad::print(); + XStatMMU::print(); + XStatMark::print(); + XStatNMethods::print(); + XStatMetaspace::print(); + XStatReferences::print(); + XStatRelocation::print(); + XStatHeap::print(); + + log_info(gc)("Garbage Collection (%s) " XSIZE_FMT "->" XSIZE_FMT, + GCCause::to_string(XCollectedHeap::heap()->gc_cause()), + XSIZE_ARGS(XStatHeap::used_at_mark_start()), + XSIZE_ARGS(XStatHeap::used_at_relocate_end())); +} + +Tickspan XStatPhasePause::_max; + +XStatPhasePause::XStatPhasePause(const char* name) : + XStatPhase("Phase", name) {} + +const Tickspan& XStatPhasePause::max() { + return _max; +} + +void XStatPhasePause::register_start(const Ticks& start) const { + timer()->register_gc_pause_start(name(), start); + + LogTarget(Debug, gc, phases, start) log; + log_start(log); +} + +void XStatPhasePause::register_end(const Ticks& start, const Ticks& end) const { + timer()->register_gc_pause_end(end); + + const Tickspan duration = end - start; + XStatSample(_sampler, duration.value()); + + // Track max pause time + if (_max < duration) { + _max = duration; + } + + // Track minimum mutator utilization + XStatMMU::register_pause(start, end); + + LogTarget(Info, gc, phases) log; + log_end(log, duration); +} + +XStatPhaseConcurrent::XStatPhaseConcurrent(const char* name) : + XStatPhase("Phase", name) {} + +void XStatPhaseConcurrent::register_start(const Ticks& start) const { + timer()->register_gc_concurrent_start(name(), start); + + LogTarget(Debug, gc, phases, start) log; + log_start(log); +} + +void XStatPhaseConcurrent::register_end(const Ticks& start, const Ticks& end) const { + if (XAbort::should_abort()) { + return; + } + + timer()->register_gc_concurrent_end(end); + + const Tickspan duration = end - start; + XStatSample(_sampler, duration.value()); + + LogTarget(Info, gc, phases) log; + log_end(log, duration); +} + +XStatSubPhase::XStatSubPhase(const char* name) : + XStatPhase("Subphase", name) {} + +void XStatSubPhase::register_start(const Ticks& start) const { + if (XThread::is_worker()) { + LogTarget(Trace, gc, phases, start) log; + log_start(log, true /* thread */); + } else { + LogTarget(Debug, gc, phases, start) log; + log_start(log, false /* thread */); + } +} + +void XStatSubPhase::register_end(const Ticks& start, const Ticks& end) const { + if (XAbort::should_abort()) { + return; + } + + XTracer::tracer()->report_thread_phase(name(), start, end); + + const Tickspan duration = end - start; + XStatSample(_sampler, duration.value()); + + if (XThread::is_worker()) { + LogTarget(Trace, gc, phases) log; + log_end(log, duration, true /* thread */); + } else { + LogTarget(Debug, gc, phases) log; + log_end(log, duration, false /* thread */); + } +} + +XStatCriticalPhase::XStatCriticalPhase(const char* name, bool verbose) : + XStatPhase("Critical", name), + _counter("Critical", name, XStatUnitOpsPerSecond), + _verbose(verbose) {} + +void XStatCriticalPhase::register_start(const Ticks& start) const { + // This is called from sensitive contexts, for example before an allocation stall + // has been resolved. This means we must not access any oops in here since that + // could lead to infinite recursion. Without access to the thread name we can't + // really log anything useful here. +} + +void XStatCriticalPhase::register_end(const Ticks& start, const Ticks& end) const { + XTracer::tracer()->report_thread_phase(name(), start, end); + + const Tickspan duration = end - start; + XStatSample(_sampler, duration.value()); + XStatInc(_counter); + + if (_verbose) { + LogTarget(Info, gc) log; + log_end(log, duration, true /* thread */); + } else { + LogTarget(Debug, gc) log; + log_end(log, duration, true /* thread */); + } +} + +// +// Stat timer +// +THREAD_LOCAL uint32_t XStatTimerDisable::_active = 0; + +// +// Stat sample/inc +// +void XStatSample(const XStatSampler& sampler, uint64_t value) { + XStatSamplerData* const cpu_data = sampler.get(); + Atomic::add(&cpu_data->_nsamples, 1u); + Atomic::add(&cpu_data->_sum, value); + + uint64_t max = cpu_data->_max; + for (;;) { + if (max >= value) { + // Not max + break; + } + + const uint64_t new_max = value; + const uint64_t prev_max = Atomic::cmpxchg(&cpu_data->_max, max, new_max); + if (prev_max == max) { + // Success + break; + } + + // Retry + max = prev_max; + } + + XTracer::tracer()->report_stat_sampler(sampler, value); +} + +void XStatInc(const XStatCounter& counter, uint64_t increment) { + XStatCounterData* const cpu_data = counter.get(); + const uint64_t value = Atomic::add(&cpu_data->_counter, increment); + + XTracer::tracer()->report_stat_counter(counter, increment, value); +} + +void XStatInc(const XStatUnsampledCounter& counter, uint64_t increment) { + XStatCounterData* const cpu_data = counter.get(); + Atomic::add(&cpu_data->_counter, increment); +} + +// +// Stat allocation rate +// +const XStatUnsampledCounter XStatAllocRate::_counter("Allocation Rate"); +TruncatedSeq XStatAllocRate::_samples(XStatAllocRate::sample_hz); +TruncatedSeq XStatAllocRate::_rate(XStatAllocRate::sample_hz); + +const XStatUnsampledCounter& XStatAllocRate::counter() { + return _counter; +} + +uint64_t XStatAllocRate::sample_and_reset() { + const XStatCounterData bytes_per_sample = _counter.collect_and_reset(); + _samples.add(bytes_per_sample._counter); + + const uint64_t bytes_per_second = _samples.sum(); + _rate.add(bytes_per_second); + + return bytes_per_second; +} + +double XStatAllocRate::predict() { + return _rate.predict_next(); +} + +double XStatAllocRate::avg() { + return _rate.avg(); +} + +double XStatAllocRate::sd() { + return _rate.sd(); +} + +// +// Stat thread +// +XStat::XStat() : + _metronome(sample_hz) { + set_name("XStat"); + create_and_start(); +} + +void XStat::sample_and_collect(XStatSamplerHistory* history) const { + // Sample counters + for (const XStatCounter* counter = XStatCounter::first(); counter != NULL; counter = counter->next()) { + counter->sample_and_reset(); + } + + // Collect samples + for (const XStatSampler* sampler = XStatSampler::first(); sampler != NULL; sampler = sampler->next()) { + XStatSamplerHistory& sampler_history = history[sampler->id()]; + sampler_history.add(sampler->collect_and_reset()); + } +} + +bool XStat::should_print(LogTargetHandle log) const { + static uint64_t print_at = ZStatisticsInterval; + const uint64_t now = os::elapsedTime(); + + if (now < print_at) { + return false; + } + + print_at = ((now / ZStatisticsInterval) * ZStatisticsInterval) + ZStatisticsInterval; + + return log.is_enabled(); +} + +void XStat::print(LogTargetHandle log, const XStatSamplerHistory* history) const { + // Print + log.print("=== Garbage Collection Statistics ======================================================================================================================="); + log.print(" Last 10s Last 10m Last 10h Total"); + log.print(" Avg / Max Avg / Max Avg / Max Avg / Max"); + + for (const XStatSampler* sampler = XStatSampler::first(); sampler != NULL; sampler = sampler->next()) { + const XStatSamplerHistory& sampler_history = history[sampler->id()]; + const XStatUnitPrinter printer = sampler->printer(); + printer(log, *sampler, sampler_history); + } + + log.print("========================================================================================================================================================="); +} + +void XStat::run_service() { + XStatSamplerHistory* const history = new XStatSamplerHistory[XStatSampler::count()]; + LogTarget(Info, gc, stats) log; + + XStatSampler::sort(); + + // Main loop + while (_metronome.wait_for_tick()) { + sample_and_collect(history); + if (should_print(log)) { + print(log, history); + } + } + + delete [] history; +} + +void XStat::stop_service() { + _metronome.stop(); +} + +// +// Stat table +// +class XStatTablePrinter { +private: + static const size_t _buffer_size = 256; + + const size_t _column0_width; + const size_t _columnN_width; + char _buffer[_buffer_size]; + +public: + class XColumn { + private: + char* const _buffer; + const size_t _position; + const size_t _width; + const size_t _width_next; + + XColumn next() const { + // Insert space between columns + _buffer[_position + _width] = ' '; + return XColumn(_buffer, _position + _width + 1, _width_next, _width_next); + } + + size_t print(size_t position, const char* fmt, va_list va) { + const int res = jio_vsnprintf(_buffer + position, _buffer_size - position, fmt, va); + if (res < 0) { + return 0; + } + + return (size_t)res; + } + + public: + XColumn(char* buffer, size_t position, size_t width, size_t width_next) : + _buffer(buffer), + _position(position), + _width(width), + _width_next(width_next) {} + + XColumn left(const char* fmt, ...) ATTRIBUTE_PRINTF(2, 3) { + va_list va; + + va_start(va, fmt); + const size_t written = print(_position, fmt, va); + va_end(va); + + if (written < _width) { + // Fill empty space + memset(_buffer + _position + written, ' ', _width - written); + } + + return next(); + } + + XColumn right(const char* fmt, ...) ATTRIBUTE_PRINTF(2, 3) { + va_list va; + + va_start(va, fmt); + const size_t written = print(_position, fmt, va); + va_end(va); + + if (written > _width) { + // Line too long + return fill('?'); + } + + if (written < _width) { + // Short line, move all to right + memmove(_buffer + _position + _width - written, _buffer + _position, written); + + // Fill empty space + memset(_buffer + _position, ' ', _width - written); + } + + return next(); + } + + XColumn center(const char* fmt, ...) ATTRIBUTE_PRINTF(2, 3) { + va_list va; + + va_start(va, fmt); + const size_t written = print(_position, fmt, va); + va_end(va); + + if (written > _width) { + // Line too long + return fill('?'); + } + + if (written < _width) { + // Short line, move all to center + const size_t start_space = (_width - written) / 2; + const size_t end_space = _width - written - start_space; + memmove(_buffer + _position + start_space, _buffer + _position, written); + + // Fill empty spaces + memset(_buffer + _position, ' ', start_space); + memset(_buffer + _position + start_space + written, ' ', end_space); + } + + return next(); + } + + XColumn fill(char filler = ' ') { + memset(_buffer + _position, filler, _width); + return next(); + } + + const char* end() { + _buffer[_position] = '\0'; + return _buffer; + } + }; + +public: + XStatTablePrinter(size_t column0_width, size_t columnN_width) : + _column0_width(column0_width), + _columnN_width(columnN_width) {} + + XColumn operator()() { + return XColumn(_buffer, 0, _column0_width, _columnN_width); + } +}; + +// +// Stat cycle +// +uint64_t XStatCycle::_nwarmup_cycles = 0; +Ticks XStatCycle::_start_of_last; +Ticks XStatCycle::_end_of_last; +NumberSeq XStatCycle::_serial_time(0.7 /* alpha */); +NumberSeq XStatCycle::_parallelizable_time(0.7 /* alpha */); +uint XStatCycle::_last_active_workers = 0; + +void XStatCycle::at_start() { + _start_of_last = Ticks::now(); +} + +void XStatCycle::at_end(GCCause::Cause cause, uint active_workers) { + _end_of_last = Ticks::now(); + + if (cause == GCCause::_z_warmup) { + _nwarmup_cycles++; + } + + _last_active_workers = active_workers; + + // Calculate serial and parallelizable GC cycle times + const double duration = (_end_of_last - _start_of_last).seconds(); + const double workers_duration = XStatWorkers::get_and_reset_duration(); + const double serial_time = duration - workers_duration; + const double parallelizable_time = workers_duration * active_workers; + _serial_time.add(serial_time); + _parallelizable_time.add(parallelizable_time); +} + +bool XStatCycle::is_warm() { + return _nwarmup_cycles >= 3; +} + +uint64_t XStatCycle::nwarmup_cycles() { + return _nwarmup_cycles; +} + +bool XStatCycle::is_time_trustable() { + // The times are considered trustable if we + // have completed at least one warmup cycle. + return _nwarmup_cycles > 0; +} + +const AbsSeq& XStatCycle::serial_time() { + return _serial_time; +} + +const AbsSeq& XStatCycle::parallelizable_time() { + return _parallelizable_time; +} + +uint XStatCycle::last_active_workers() { + return _last_active_workers; +} + +double XStatCycle::time_since_last() { + if (_end_of_last.value() == 0) { + // No end recorded yet, return time since VM start + return os::elapsedTime(); + } + + const Ticks now = Ticks::now(); + const Tickspan time_since_last = now - _end_of_last; + return time_since_last.seconds(); +} + +// +// Stat workers +// +Ticks XStatWorkers::_start_of_last; +Tickspan XStatWorkers::_accumulated_duration; + +void XStatWorkers::at_start() { + _start_of_last = Ticks::now(); +} + +void XStatWorkers::at_end() { + const Ticks now = Ticks::now(); + const Tickspan duration = now - _start_of_last; + _accumulated_duration += duration; +} + +double XStatWorkers::get_and_reset_duration() { + const double duration = _accumulated_duration.seconds(); + const Ticks now = Ticks::now(); + _accumulated_duration = now - now; + return duration; +} + +// +// Stat load +// +void XStatLoad::print() { + double loadavg[3] = {}; + os::loadavg(loadavg, ARRAY_SIZE(loadavg)); + log_info(gc, load)("Load: %.2f/%.2f/%.2f", loadavg[0], loadavg[1], loadavg[2]); +} + +// +// Stat mark +// +size_t XStatMark::_nstripes; +size_t XStatMark::_nproactiveflush; +size_t XStatMark::_nterminateflush; +size_t XStatMark::_ntrycomplete; +size_t XStatMark::_ncontinue; +size_t XStatMark::_mark_stack_usage; + +void XStatMark::set_at_mark_start(size_t nstripes) { + _nstripes = nstripes; +} + +void XStatMark::set_at_mark_end(size_t nproactiveflush, + size_t nterminateflush, + size_t ntrycomplete, + size_t ncontinue) { + _nproactiveflush = nproactiveflush; + _nterminateflush = nterminateflush; + _ntrycomplete = ntrycomplete; + _ncontinue = ncontinue; +} + +void XStatMark::set_at_mark_free(size_t mark_stack_usage) { + _mark_stack_usage = mark_stack_usage; +} + +void XStatMark::print() { + log_info(gc, marking)("Mark: " + SIZE_FORMAT " stripe(s), " + SIZE_FORMAT " proactive flush(es), " + SIZE_FORMAT " terminate flush(es), " + SIZE_FORMAT " completion(s), " + SIZE_FORMAT " continuation(s) ", + _nstripes, + _nproactiveflush, + _nterminateflush, + _ntrycomplete, + _ncontinue); + + log_info(gc, marking)("Mark Stack Usage: " SIZE_FORMAT "M", _mark_stack_usage / M); +} + +// +// Stat relocation +// +XRelocationSetSelectorStats XStatRelocation::_selector_stats; +size_t XStatRelocation::_forwarding_usage; +size_t XStatRelocation::_small_in_place_count; +size_t XStatRelocation::_medium_in_place_count; + +void XStatRelocation::set_at_select_relocation_set(const XRelocationSetSelectorStats& selector_stats) { + _selector_stats = selector_stats; +} + +void XStatRelocation::set_at_install_relocation_set(size_t forwarding_usage) { + _forwarding_usage = forwarding_usage; +} + +void XStatRelocation::set_at_relocate_end(size_t small_in_place_count, size_t medium_in_place_count) { + _small_in_place_count = small_in_place_count; + _medium_in_place_count = medium_in_place_count; +} + +void XStatRelocation::print(const char* name, + const XRelocationSetSelectorGroupStats& selector_group, + size_t in_place_count) { + log_info(gc, reloc)("%s Pages: " SIZE_FORMAT " / " SIZE_FORMAT "M, Empty: " SIZE_FORMAT "M, " + "Relocated: " SIZE_FORMAT "M, In-Place: " SIZE_FORMAT, + name, + selector_group.npages_candidates(), + selector_group.total() / M, + selector_group.empty() / M, + selector_group.relocate() / M, + in_place_count); +} + +void XStatRelocation::print() { + print("Small", _selector_stats.small(), _small_in_place_count); + if (XPageSizeMedium != 0) { + print("Medium", _selector_stats.medium(), _medium_in_place_count); + } + print("Large", _selector_stats.large(), 0 /* in_place_count */); + + log_info(gc, reloc)("Forwarding Usage: " SIZE_FORMAT "M", _forwarding_usage / M); +} + +// +// Stat nmethods +// +void XStatNMethods::print() { + log_info(gc, nmethod)("NMethods: " SIZE_FORMAT " registered, " SIZE_FORMAT " unregistered", + XNMethodTable::registered_nmethods(), + XNMethodTable::unregistered_nmethods()); +} + +// +// Stat metaspace +// +void XStatMetaspace::print() { + MetaspaceCombinedStats stats = MetaspaceUtils::get_combined_statistics(); + log_info(gc, metaspace)("Metaspace: " + SIZE_FORMAT "M used, " + SIZE_FORMAT "M committed, " SIZE_FORMAT "M reserved", + stats.used() / M, + stats.committed() / M, + stats.reserved() / M); +} + +// +// Stat references +// +XStatReferences::XCount XStatReferences::_soft; +XStatReferences::XCount XStatReferences::_weak; +XStatReferences::XCount XStatReferences::_final; +XStatReferences::XCount XStatReferences::_phantom; + +void XStatReferences::set(XCount* count, size_t encountered, size_t discovered, size_t enqueued) { + count->encountered = encountered; + count->discovered = discovered; + count->enqueued = enqueued; +} + +void XStatReferences::set_soft(size_t encountered, size_t discovered, size_t enqueued) { + set(&_soft, encountered, discovered, enqueued); +} + +void XStatReferences::set_weak(size_t encountered, size_t discovered, size_t enqueued) { + set(&_weak, encountered, discovered, enqueued); +} + +void XStatReferences::set_final(size_t encountered, size_t discovered, size_t enqueued) { + set(&_final, encountered, discovered, enqueued); +} + +void XStatReferences::set_phantom(size_t encountered, size_t discovered, size_t enqueued) { + set(&_phantom, encountered, discovered, enqueued); +} + +void XStatReferences::print(const char* name, const XStatReferences::XCount& ref) { + log_info(gc, ref)("%s: " + SIZE_FORMAT " encountered, " + SIZE_FORMAT " discovered, " + SIZE_FORMAT " enqueued", + name, + ref.encountered, + ref.discovered, + ref.enqueued); +} + +void XStatReferences::print() { + print("Soft", _soft); + print("Weak", _weak); + print("Final", _final); + print("Phantom", _phantom); +} + +// +// Stat heap +// +XStatHeap::XAtInitialize XStatHeap::_at_initialize; +XStatHeap::XAtMarkStart XStatHeap::_at_mark_start; +XStatHeap::XAtMarkEnd XStatHeap::_at_mark_end; +XStatHeap::XAtRelocateStart XStatHeap::_at_relocate_start; +XStatHeap::XAtRelocateEnd XStatHeap::_at_relocate_end; + +size_t XStatHeap::capacity_high() { + return MAX4(_at_mark_start.capacity, + _at_mark_end.capacity, + _at_relocate_start.capacity, + _at_relocate_end.capacity); +} + +size_t XStatHeap::capacity_low() { + return MIN4(_at_mark_start.capacity, + _at_mark_end.capacity, + _at_relocate_start.capacity, + _at_relocate_end.capacity); +} + +size_t XStatHeap::free(size_t used) { + return _at_initialize.max_capacity - used; +} + +size_t XStatHeap::allocated(size_t used, size_t reclaimed) { + // The amount of allocated memory between point A and B is used(B) - used(A). + // However, we might also have reclaimed memory between point A and B. This + // means the current amount of used memory must be incremented by the amount + // reclaimed, so that used(B) represents the amount of used memory we would + // have had if we had not reclaimed anything. + return (used + reclaimed) - _at_mark_start.used; +} + +size_t XStatHeap::garbage(size_t reclaimed) { + return _at_mark_end.garbage - reclaimed; +} + +void XStatHeap::set_at_initialize(const XPageAllocatorStats& stats) { + _at_initialize.min_capacity = stats.min_capacity(); + _at_initialize.max_capacity = stats.max_capacity(); +} + +void XStatHeap::set_at_mark_start(const XPageAllocatorStats& stats) { + _at_mark_start.soft_max_capacity = stats.soft_max_capacity(); + _at_mark_start.capacity = stats.capacity(); + _at_mark_start.free = free(stats.used()); + _at_mark_start.used = stats.used(); +} + +void XStatHeap::set_at_mark_end(const XPageAllocatorStats& stats) { + _at_mark_end.capacity = stats.capacity(); + _at_mark_end.free = free(stats.used()); + _at_mark_end.used = stats.used(); + _at_mark_end.allocated = allocated(stats.used(), 0 /* reclaimed */); +} + +void XStatHeap::set_at_select_relocation_set(const XRelocationSetSelectorStats& stats) { + const size_t live = stats.small().live() + stats.medium().live() + stats.large().live(); + _at_mark_end.live = live; + _at_mark_end.garbage = _at_mark_start.used - live; +} + +void XStatHeap::set_at_relocate_start(const XPageAllocatorStats& stats) { + _at_relocate_start.capacity = stats.capacity(); + _at_relocate_start.free = free(stats.used()); + _at_relocate_start.used = stats.used(); + _at_relocate_start.allocated = allocated(stats.used(), stats.reclaimed()); + _at_relocate_start.garbage = garbage(stats.reclaimed()); + _at_relocate_start.reclaimed = stats.reclaimed(); +} + +void XStatHeap::set_at_relocate_end(const XPageAllocatorStats& stats, size_t non_worker_relocated) { + const size_t reclaimed = stats.reclaimed() - MIN2(non_worker_relocated, stats.reclaimed()); + + _at_relocate_end.capacity = stats.capacity(); + _at_relocate_end.capacity_high = capacity_high(); + _at_relocate_end.capacity_low = capacity_low(); + _at_relocate_end.free = free(stats.used()); + _at_relocate_end.free_high = free(stats.used_low()); + _at_relocate_end.free_low = free(stats.used_high()); + _at_relocate_end.used = stats.used(); + _at_relocate_end.used_high = stats.used_high(); + _at_relocate_end.used_low = stats.used_low(); + _at_relocate_end.allocated = allocated(stats.used(), reclaimed); + _at_relocate_end.garbage = garbage(reclaimed); + _at_relocate_end.reclaimed = reclaimed; +} + +size_t XStatHeap::max_capacity() { + return _at_initialize.max_capacity; +} + +size_t XStatHeap::used_at_mark_start() { + return _at_mark_start.used; +} + +size_t XStatHeap::used_at_relocate_end() { + return _at_relocate_end.used; +} + +void XStatHeap::print() { + log_info(gc, heap)("Min Capacity: " + XSIZE_FMT, XSIZE_ARGS(_at_initialize.min_capacity)); + log_info(gc, heap)("Max Capacity: " + XSIZE_FMT, XSIZE_ARGS(_at_initialize.max_capacity)); + log_info(gc, heap)("Soft Max Capacity: " + XSIZE_FMT, XSIZE_ARGS(_at_mark_start.soft_max_capacity)); + + XStatTablePrinter table(10, 18); + log_info(gc, heap)("%s", table() + .fill() + .center("Mark Start") + .center("Mark End") + .center("Relocate Start") + .center("Relocate End") + .center("High") + .center("Low") + .end()); + log_info(gc, heap)("%s", table() + .right("Capacity:") + .left(XTABLE_ARGS(_at_mark_start.capacity)) + .left(XTABLE_ARGS(_at_mark_end.capacity)) + .left(XTABLE_ARGS(_at_relocate_start.capacity)) + .left(XTABLE_ARGS(_at_relocate_end.capacity)) + .left(XTABLE_ARGS(_at_relocate_end.capacity_high)) + .left(XTABLE_ARGS(_at_relocate_end.capacity_low)) + .end()); + log_info(gc, heap)("%s", table() + .right("Free:") + .left(XTABLE_ARGS(_at_mark_start.free)) + .left(XTABLE_ARGS(_at_mark_end.free)) + .left(XTABLE_ARGS(_at_relocate_start.free)) + .left(XTABLE_ARGS(_at_relocate_end.free)) + .left(XTABLE_ARGS(_at_relocate_end.free_high)) + .left(XTABLE_ARGS(_at_relocate_end.free_low)) + .end()); + log_info(gc, heap)("%s", table() + .right("Used:") + .left(XTABLE_ARGS(_at_mark_start.used)) + .left(XTABLE_ARGS(_at_mark_end.used)) + .left(XTABLE_ARGS(_at_relocate_start.used)) + .left(XTABLE_ARGS(_at_relocate_end.used)) + .left(XTABLE_ARGS(_at_relocate_end.used_high)) + .left(XTABLE_ARGS(_at_relocate_end.used_low)) + .end()); + log_info(gc, heap)("%s", table() + .right("Live:") + .left(XTABLE_ARGS_NA) + .left(XTABLE_ARGS(_at_mark_end.live)) + .left(XTABLE_ARGS(_at_mark_end.live /* Same as at mark end */)) + .left(XTABLE_ARGS(_at_mark_end.live /* Same as at mark end */)) + .left(XTABLE_ARGS_NA) + .left(XTABLE_ARGS_NA) + .end()); + log_info(gc, heap)("%s", table() + .right("Allocated:") + .left(XTABLE_ARGS_NA) + .left(XTABLE_ARGS(_at_mark_end.allocated)) + .left(XTABLE_ARGS(_at_relocate_start.allocated)) + .left(XTABLE_ARGS(_at_relocate_end.allocated)) + .left(XTABLE_ARGS_NA) + .left(XTABLE_ARGS_NA) + .end()); + log_info(gc, heap)("%s", table() + .right("Garbage:") + .left(XTABLE_ARGS_NA) + .left(XTABLE_ARGS(_at_mark_end.garbage)) + .left(XTABLE_ARGS(_at_relocate_start.garbage)) + .left(XTABLE_ARGS(_at_relocate_end.garbage)) + .left(XTABLE_ARGS_NA) + .left(XTABLE_ARGS_NA) + .end()); + log_info(gc, heap)("%s", table() + .right("Reclaimed:") + .left(XTABLE_ARGS_NA) + .left(XTABLE_ARGS_NA) + .left(XTABLE_ARGS(_at_relocate_start.reclaimed)) + .left(XTABLE_ARGS(_at_relocate_end.reclaimed)) + .left(XTABLE_ARGS_NA) + .left(XTABLE_ARGS_NA) + .end()); +} diff --git a/src/hotspot/share/gc/x/xStat.hpp b/src/hotspot/share/gc/x/xStat.hpp new file mode 100644 index 0000000000000..1ecaf9df49230 --- /dev/null +++ b/src/hotspot/share/gc/x/xStat.hpp @@ -0,0 +1,578 @@ +/* + * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XSTAT_HPP +#define SHARE_GC_X_XSTAT_HPP + +#include "gc/shared/concurrentGCThread.hpp" +#include "gc/shared/gcCause.hpp" +#include "gc/shared/gcTimer.hpp" +#include "gc/x/xMetronome.hpp" +#include "logging/logHandle.hpp" +#include "memory/allocation.hpp" +#include "utilities/globalDefinitions.hpp" +#include "utilities/numberSeq.hpp" +#include "utilities/ticks.hpp" + +class XPage; +class XPageAllocatorStats; +class XRelocationSetSelectorGroupStats; +class XRelocationSetSelectorStats; +class XStatSampler; +class XStatSamplerHistory; +struct XStatCounterData; +struct XStatSamplerData; + +// +// Stat unit printers +// +typedef void (*XStatUnitPrinter)(LogTargetHandle log, const XStatSampler&, const XStatSamplerHistory&); + +void XStatUnitTime(LogTargetHandle log, const XStatSampler& sampler, const XStatSamplerHistory& history); +void XStatUnitBytes(LogTargetHandle log, const XStatSampler& sampler, const XStatSamplerHistory& history); +void XStatUnitThreads(LogTargetHandle log, const XStatSampler& sampler, const XStatSamplerHistory& history); +void XStatUnitBytesPerSecond(LogTargetHandle log, const XStatSampler& sampler, const XStatSamplerHistory& history); +void XStatUnitOpsPerSecond(LogTargetHandle log, const XStatSampler& sampler, const XStatSamplerHistory& history); + +// +// Stat value +// +class XStatValue { +private: + static uintptr_t _base; + static uint32_t _cpu_offset; + + const char* const _group; + const char* const _name; + const uint32_t _id; + const uint32_t _offset; + +protected: + XStatValue(const char* group, + const char* name, + uint32_t id, + uint32_t size); + + template T* get_cpu_local(uint32_t cpu) const; + +public: + static void initialize(); + + const char* group() const; + const char* name() const; + uint32_t id() const; +}; + +// +// Stat iterable value +// +template +class XStatIterableValue : public XStatValue { +private: + static uint32_t _count; + static T* _first; + + T* _next; + + T* insert() const; + +protected: + XStatIterableValue(const char* group, + const char* name, + uint32_t size); + +public: + static void sort(); + + static uint32_t count() { + return _count; + } + + static T* first() { + return _first; + } + + T* next() const { + return _next; + } +}; + +template uint32_t XStatIterableValue::_count = 0; +template T* XStatIterableValue::_first = NULL; + +// +// Stat sampler +// +class XStatSampler : public XStatIterableValue { +private: + const XStatUnitPrinter _printer; + +public: + XStatSampler(const char* group, + const char* name, + XStatUnitPrinter printer); + + XStatSamplerData* get() const; + XStatSamplerData collect_and_reset() const; + + XStatUnitPrinter printer() const; +}; + +// +// Stat counter +// +class XStatCounter : public XStatIterableValue { +private: + const XStatSampler _sampler; + +public: + XStatCounter(const char* group, + const char* name, + XStatUnitPrinter printer); + + XStatCounterData* get() const; + void sample_and_reset() const; +}; + +// +// Stat unsampled counter +// +class XStatUnsampledCounter : public XStatIterableValue { +public: + XStatUnsampledCounter(const char* name); + + XStatCounterData* get() const; + XStatCounterData collect_and_reset() const; +}; + +// +// Stat MMU (Minimum Mutator Utilization) +// +class XStatMMUPause { +private: + double _start; + double _end; + +public: + XStatMMUPause(); + XStatMMUPause(const Ticks& start, const Ticks& end); + + double end() const; + double overlap(double start, double end) const; +}; + +class XStatMMU { +private: + static size_t _next; + static size_t _npauses; + static XStatMMUPause _pauses[200]; // Record the last 200 pauses + + static double _mmu_2ms; + static double _mmu_5ms; + static double _mmu_10ms; + static double _mmu_20ms; + static double _mmu_50ms; + static double _mmu_100ms; + + static const XStatMMUPause& pause(size_t index); + static double calculate_mmu(double time_slice); + +public: + static void register_pause(const Ticks& start, const Ticks& end); + + static void print(); +}; + +// +// Stat phases +// +class XStatPhase { +private: + static ConcurrentGCTimer _timer; + +protected: + const XStatSampler _sampler; + + XStatPhase(const char* group, const char* name); + + void log_start(LogTargetHandle log, bool thread = false) const; + void log_end(LogTargetHandle log, const Tickspan& duration, bool thread = false) const; + +public: + static ConcurrentGCTimer* timer(); + + const char* name() const; + + virtual void register_start(const Ticks& start) const = 0; + virtual void register_end(const Ticks& start, const Ticks& end) const = 0; +}; + +class XStatPhaseCycle : public XStatPhase { +public: + XStatPhaseCycle(const char* name); + + virtual void register_start(const Ticks& start) const; + virtual void register_end(const Ticks& start, const Ticks& end) const; +}; + +class XStatPhasePause : public XStatPhase { +private: + static Tickspan _max; // Max pause time + +public: + XStatPhasePause(const char* name); + + static const Tickspan& max(); + + virtual void register_start(const Ticks& start) const; + virtual void register_end(const Ticks& start, const Ticks& end) const; +}; + +class XStatPhaseConcurrent : public XStatPhase { +public: + XStatPhaseConcurrent(const char* name); + + virtual void register_start(const Ticks& start) const; + virtual void register_end(const Ticks& start, const Ticks& end) const; +}; + +class XStatSubPhase : public XStatPhase { +public: + XStatSubPhase(const char* name); + + virtual void register_start(const Ticks& start) const; + virtual void register_end(const Ticks& start, const Ticks& end) const; +}; + +class XStatCriticalPhase : public XStatPhase { +private: + const XStatCounter _counter; + const bool _verbose; + +public: + XStatCriticalPhase(const char* name, bool verbose = true); + + virtual void register_start(const Ticks& start) const; + virtual void register_end(const Ticks& start, const Ticks& end) const; +}; + +// +// Stat timer +// +class XStatTimerDisable : public StackObj { +private: + static THREAD_LOCAL uint32_t _active; + +public: + XStatTimerDisable() { + _active++; + } + + ~XStatTimerDisable() { + _active--; + } + + static bool is_active() { + return _active > 0; + } +}; + +class XStatTimer : public StackObj { +private: + const bool _enabled; + const XStatPhase& _phase; + const Ticks _start; + +public: + XStatTimer(const XStatPhase& phase) : + _enabled(!XStatTimerDisable::is_active()), + _phase(phase), + _start(Ticks::now()) { + if (_enabled) { + _phase.register_start(_start); + } + } + + ~XStatTimer() { + if (_enabled) { + const Ticks end = Ticks::now(); + _phase.register_end(_start, end); + } + } +}; + +// +// Stat sample/increment +// +void XStatSample(const XStatSampler& sampler, uint64_t value); +void XStatInc(const XStatCounter& counter, uint64_t increment = 1); +void XStatInc(const XStatUnsampledCounter& counter, uint64_t increment = 1); + +// +// Stat allocation rate +// +class XStatAllocRate : public AllStatic { +private: + static const XStatUnsampledCounter _counter; + static TruncatedSeq _samples; + static TruncatedSeq _rate; + +public: + static const uint64_t sample_hz = 10; + + static const XStatUnsampledCounter& counter(); + static uint64_t sample_and_reset(); + + static double predict(); + static double avg(); + static double sd(); +}; + +// +// Stat thread +// +class XStat : public ConcurrentGCThread { +private: + static const uint64_t sample_hz = 1; + + XMetronome _metronome; + + void sample_and_collect(XStatSamplerHistory* history) const; + bool should_print(LogTargetHandle log) const; + void print(LogTargetHandle log, const XStatSamplerHistory* history) const; + +protected: + virtual void run_service(); + virtual void stop_service(); + +public: + XStat(); +}; + +// +// Stat cycle +// +class XStatCycle : public AllStatic { +private: + static uint64_t _nwarmup_cycles; + static Ticks _start_of_last; + static Ticks _end_of_last; + static NumberSeq _serial_time; + static NumberSeq _parallelizable_time; + static uint _last_active_workers; + +public: + static void at_start(); + static void at_end(GCCause::Cause cause, uint active_workers); + + static bool is_warm(); + static uint64_t nwarmup_cycles(); + + static bool is_time_trustable(); + static const AbsSeq& serial_time(); + static const AbsSeq& parallelizable_time(); + + static uint last_active_workers(); + + static double time_since_last(); +}; + +// +// Stat workers +// +class XStatWorkers : public AllStatic { +private: + static Ticks _start_of_last; + static Tickspan _accumulated_duration; + +public: + static void at_start(); + static void at_end(); + + static double get_and_reset_duration(); +}; + +// +// Stat load +// +class XStatLoad : public AllStatic { +public: + static void print(); +}; + +// +// Stat mark +// +class XStatMark : public AllStatic { +private: + static size_t _nstripes; + static size_t _nproactiveflush; + static size_t _nterminateflush; + static size_t _ntrycomplete; + static size_t _ncontinue; + static size_t _mark_stack_usage; + +public: + static void set_at_mark_start(size_t nstripes); + static void set_at_mark_end(size_t nproactiveflush, + size_t nterminateflush, + size_t ntrycomplete, + size_t ncontinue); + static void set_at_mark_free(size_t mark_stack_usage); + + static void print(); +}; + +// +// Stat relocation +// +class XStatRelocation : public AllStatic { +private: + static XRelocationSetSelectorStats _selector_stats; + static size_t _forwarding_usage; + static size_t _small_in_place_count; + static size_t _medium_in_place_count; + + static void print(const char* name, + const XRelocationSetSelectorGroupStats& selector_group, + size_t in_place_count); + +public: + static void set_at_select_relocation_set(const XRelocationSetSelectorStats& selector_stats); + static void set_at_install_relocation_set(size_t forwarding_usage); + static void set_at_relocate_end(size_t small_in_place_count, size_t medium_in_place_count); + + static void print(); +}; + +// +// Stat nmethods +// +class XStatNMethods : public AllStatic { +public: + static void print(); +}; + +// +// Stat metaspace +// +class XStatMetaspace : public AllStatic { +public: + static void print(); +}; + +// +// Stat references +// +class XStatReferences : public AllStatic { +private: + static struct XCount { + size_t encountered; + size_t discovered; + size_t enqueued; + } _soft, _weak, _final, _phantom; + + static void set(XCount* count, size_t encountered, size_t discovered, size_t enqueued); + static void print(const char* name, const XCount& ref); + +public: + static void set_soft(size_t encountered, size_t discovered, size_t enqueued); + static void set_weak(size_t encountered, size_t discovered, size_t enqueued); + static void set_final(size_t encountered, size_t discovered, size_t enqueued); + static void set_phantom(size_t encountered, size_t discovered, size_t enqueued); + + static void print(); +}; + +// +// Stat heap +// +class XStatHeap : public AllStatic { +private: + static struct XAtInitialize { + size_t min_capacity; + size_t max_capacity; + } _at_initialize; + + static struct XAtMarkStart { + size_t soft_max_capacity; + size_t capacity; + size_t free; + size_t used; + } _at_mark_start; + + static struct XAtMarkEnd { + size_t capacity; + size_t free; + size_t used; + size_t live; + size_t allocated; + size_t garbage; + } _at_mark_end; + + static struct XAtRelocateStart { + size_t capacity; + size_t free; + size_t used; + size_t allocated; + size_t garbage; + size_t reclaimed; + } _at_relocate_start; + + static struct XAtRelocateEnd { + size_t capacity; + size_t capacity_high; + size_t capacity_low; + size_t free; + size_t free_high; + size_t free_low; + size_t used; + size_t used_high; + size_t used_low; + size_t allocated; + size_t garbage; + size_t reclaimed; + } _at_relocate_end; + + static size_t capacity_high(); + static size_t capacity_low(); + static size_t free(size_t used); + static size_t allocated(size_t used, size_t reclaimed); + static size_t garbage(size_t reclaimed); + +public: + static void set_at_initialize(const XPageAllocatorStats& stats); + static void set_at_mark_start(const XPageAllocatorStats& stats); + static void set_at_mark_end(const XPageAllocatorStats& stats); + static void set_at_select_relocation_set(const XRelocationSetSelectorStats& stats); + static void set_at_relocate_start(const XPageAllocatorStats& stats); + static void set_at_relocate_end(const XPageAllocatorStats& stats, size_t non_worker_relocated); + + static size_t max_capacity(); + static size_t used_at_mark_start(); + static size_t used_at_relocate_end(); + + static void print(); +}; + +#endif // SHARE_GC_X_XSTAT_HPP diff --git a/src/hotspot/share/gc/x/xTask.cpp b/src/hotspot/share/gc/x/xTask.cpp new file mode 100644 index 0000000000000..25f6d12f33dbb --- /dev/null +++ b/src/hotspot/share/gc/x/xTask.cpp @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/x/xTask.hpp" +#include "gc/x/xThread.hpp" + +XTask::Task::Task(XTask* task, const char* name) : + WorkerTask(name), + _task(task) {} + +void XTask::Task::work(uint worker_id) { + XThread::set_worker_id(worker_id); + _task->work(); + XThread::clear_worker_id(); +} + +XTask::XTask(const char* name) : + _worker_task(this, name) {} + +const char* XTask::name() const { + return _worker_task.name(); +} + +WorkerTask* XTask::worker_task() { + return &_worker_task; +} diff --git a/src/hotspot/share/gc/x/xTask.hpp b/src/hotspot/share/gc/x/xTask.hpp new file mode 100644 index 0000000000000..08adaed83e596 --- /dev/null +++ b/src/hotspot/share/gc/x/xTask.hpp @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XTASK_HPP +#define SHARE_GC_X_XTASK_HPP + +#include "gc/shared/workerThread.hpp" +#include "memory/allocation.hpp" + +class XTask : public StackObj { +private: + class Task : public WorkerTask { + private: + XTask* const _task; + + public: + Task(XTask* task, const char* name); + + virtual void work(uint worker_id); + }; + + Task _worker_task; + +public: + XTask(const char* name); + + const char* name() const; + WorkerTask* worker_task(); + + virtual void work() = 0; +}; + +#endif // SHARE_GC_X_XTASK_HPP diff --git a/src/hotspot/share/gc/x/xThread.cpp b/src/hotspot/share/gc/x/xThread.cpp new file mode 100644 index 0000000000000..fb9785690cff3 --- /dev/null +++ b/src/hotspot/share/gc/x/xThread.cpp @@ -0,0 +1,80 @@ +/* + * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/x/xThread.inline.hpp" +#include "runtime/javaThread.hpp" +#include "runtime/nonJavaThread.hpp" +#include "utilities/debug.hpp" + +THREAD_LOCAL bool XThread::_initialized; +THREAD_LOCAL uintptr_t XThread::_id; +THREAD_LOCAL bool XThread::_is_vm; +THREAD_LOCAL bool XThread::_is_java; +THREAD_LOCAL bool XThread::_is_worker; +THREAD_LOCAL uint XThread::_worker_id; + +void XThread::initialize() { + assert(!_initialized, "Already initialized"); + const Thread* const thread = Thread::current(); + _initialized = true; + _id = (uintptr_t)thread; + _is_vm = thread->is_VM_thread(); + _is_java = thread->is_Java_thread(); + _is_worker = false; + _worker_id = (uint)-1; +} + +const char* XThread::name() { + const Thread* const thread = Thread::current(); + if (thread->is_Named_thread()) { + const NamedThread* const named = (const NamedThread*)thread; + return named->name(); + } else if (thread->is_Java_thread()) { + return "Java"; + } + + return "Unknown"; +} + +void XThread::set_worker() { + ensure_initialized(); + _is_worker = true; +} + +bool XThread::has_worker_id() { + return _initialized && + _is_worker && + _worker_id != (uint)-1; +} + +void XThread::set_worker_id(uint worker_id) { + ensure_initialized(); + assert(!has_worker_id(), "Worker id already initialized"); + _worker_id = worker_id; +} + +void XThread::clear_worker_id() { + assert(has_worker_id(), "Worker id not initialized"); + _worker_id = (uint)-1; +} diff --git a/src/hotspot/share/gc/x/xThread.hpp b/src/hotspot/share/gc/x/xThread.hpp new file mode 100644 index 0000000000000..24df6ce1ca24d --- /dev/null +++ b/src/hotspot/share/gc/x/xThread.hpp @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XTHREAD_HPP +#define SHARE_GC_X_XTHREAD_HPP + +#include "memory/allStatic.hpp" +#include "utilities/globalDefinitions.hpp" + +class XThread : public AllStatic { + friend class XTask; + friend class XWorkersInitializeTask; + friend class XRuntimeWorkersInitializeTask; + +private: + static THREAD_LOCAL bool _initialized; + static THREAD_LOCAL uintptr_t _id; + static THREAD_LOCAL bool _is_vm; + static THREAD_LOCAL bool _is_java; + static THREAD_LOCAL bool _is_worker; + static THREAD_LOCAL uint _worker_id; + + static void initialize(); + static void ensure_initialized(); + + static void set_worker(); + + static bool has_worker_id(); + static void set_worker_id(uint worker_id); + static void clear_worker_id(); + +public: + static const char* name(); + static uintptr_t id(); + static bool is_vm(); + static bool is_java(); + static bool is_worker(); + static uint worker_id(); +}; + +#endif // SHARE_GC_X_XTHREAD_HPP diff --git a/src/hotspot/share/gc/z/zThread.inline.hpp b/src/hotspot/share/gc/x/xThread.inline.hpp similarity index 79% rename from src/hotspot/share/gc/z/zThread.inline.hpp rename to src/hotspot/share/gc/x/xThread.inline.hpp index 1a88648fc753a..eb6ff63e5f7be 100644 --- a/src/hotspot/share/gc/z/zThread.inline.hpp +++ b/src/hotspot/share/gc/x/xThread.inline.hpp @@ -21,42 +21,42 @@ * questions. */ -#ifndef SHARE_GC_Z_ZTHREAD_INLINE_HPP -#define SHARE_GC_Z_ZTHREAD_INLINE_HPP +#ifndef SHARE_GC_X_XTHREAD_INLINE_HPP +#define SHARE_GC_X_XTHREAD_INLINE_HPP -#include "gc/z/zThread.hpp" +#include "gc/x/xThread.hpp" #include "utilities/debug.hpp" -inline void ZThread::ensure_initialized() { +inline void XThread::ensure_initialized() { if (!_initialized) { initialize(); } } -inline uintptr_t ZThread::id() { +inline uintptr_t XThread::id() { ensure_initialized(); return _id; } -inline bool ZThread::is_vm() { +inline bool XThread::is_vm() { ensure_initialized(); return _is_vm; } -inline bool ZThread::is_java() { +inline bool XThread::is_java() { ensure_initialized(); return _is_java; } -inline bool ZThread::is_worker() { +inline bool XThread::is_worker() { ensure_initialized(); return _is_worker; } -inline uint ZThread::worker_id() { +inline uint XThread::worker_id() { assert(has_worker_id(), "Worker id not initialized"); return _worker_id; } -#endif // SHARE_GC_Z_ZTHREAD_INLINE_HPP +#endif // SHARE_GC_X_XTHREAD_INLINE_HPP diff --git a/src/hotspot/share/gc/x/xThreadLocalAllocBuffer.cpp b/src/hotspot/share/gc/x/xThreadLocalAllocBuffer.cpp new file mode 100644 index 0000000000000..594a7799d7d7f --- /dev/null +++ b/src/hotspot/share/gc/x/xThreadLocalAllocBuffer.cpp @@ -0,0 +1,92 @@ +/* + * Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/shared/tlab_globals.hpp" +#include "gc/x/xAddress.inline.hpp" +#include "gc/x/xStackWatermark.hpp" +#include "gc/x/xThreadLocalAllocBuffer.hpp" +#include "gc/x/xValue.inline.hpp" +#include "runtime/globals.hpp" +#include "runtime/javaThread.hpp" +#include "runtime/stackWatermarkSet.inline.hpp" + +XPerWorker* XThreadLocalAllocBuffer::_stats = NULL; + +void XThreadLocalAllocBuffer::initialize() { + if (UseTLAB) { + assert(_stats == NULL, "Already initialized"); + _stats = new XPerWorker(); + reset_statistics(); + } +} + +void XThreadLocalAllocBuffer::reset_statistics() { + if (UseTLAB) { + XPerWorkerIterator iter(_stats); + for (ThreadLocalAllocStats* stats; iter.next(&stats);) { + stats->reset(); + } + } +} + +void XThreadLocalAllocBuffer::publish_statistics() { + if (UseTLAB) { + ThreadLocalAllocStats total; + + XPerWorkerIterator iter(_stats); + for (ThreadLocalAllocStats* stats; iter.next(&stats);) { + total.update(*stats); + } + + total.publish(); + } +} + +static void fixup_address(HeapWord** p) { + *p = (HeapWord*)XAddress::good_or_null((uintptr_t)*p); +} + +void XThreadLocalAllocBuffer::retire(JavaThread* thread, ThreadLocalAllocStats* stats) { + if (UseTLAB) { + stats->reset(); + thread->tlab().addresses_do(fixup_address); + thread->tlab().retire(stats); + if (ResizeTLAB) { + thread->tlab().resize(); + } + } +} + +void XThreadLocalAllocBuffer::remap(JavaThread* thread) { + if (UseTLAB) { + thread->tlab().addresses_do(fixup_address); + } +} + +void XThreadLocalAllocBuffer::update_stats(JavaThread* thread) { + if (UseTLAB) { + XStackWatermark* const watermark = StackWatermarkSet::get(thread, StackWatermarkKind::gc); + _stats->addr()->update(watermark->stats()); + } +} diff --git a/src/hotspot/share/gc/x/xThreadLocalAllocBuffer.hpp b/src/hotspot/share/gc/x/xThreadLocalAllocBuffer.hpp new file mode 100644 index 0000000000000..521f4da19096a --- /dev/null +++ b/src/hotspot/share/gc/x/xThreadLocalAllocBuffer.hpp @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XTHREADLOCALALLOCBUFFER_HPP +#define SHARE_GC_X_XTHREADLOCALALLOCBUFFER_HPP + +#include "gc/shared/threadLocalAllocBuffer.hpp" +#include "gc/x/xValue.hpp" +#include "memory/allStatic.hpp" + +class JavaThread; + +class XThreadLocalAllocBuffer : public AllStatic { +private: + static XPerWorker* _stats; + +public: + static void initialize(); + + static void reset_statistics(); + static void publish_statistics(); + + static void retire(JavaThread* thread, ThreadLocalAllocStats* stats); + static void remap(JavaThread* thread); + static void update_stats(JavaThread* thread); +}; + +#endif // SHARE_GC_X_XTHREADLOCALALLOCBUFFER_HPP diff --git a/src/hotspot/share/gc/x/xThreadLocalData.hpp b/src/hotspot/share/gc/x/xThreadLocalData.hpp new file mode 100644 index 0000000000000..b4abaadd09ce2 --- /dev/null +++ b/src/hotspot/share/gc/x/xThreadLocalData.hpp @@ -0,0 +1,91 @@ +/* + * Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XTHREADLOCALDATA_HPP +#define SHARE_GC_X_XTHREADLOCALDATA_HPP + +#include "gc/x/xMarkStack.hpp" +#include "gc/x/xGlobals.hpp" +#include "runtime/javaThread.hpp" +#include "utilities/debug.hpp" +#include "utilities/sizes.hpp" + +class XThreadLocalData { +private: + uintptr_t _address_bad_mask; + XMarkThreadLocalStacks _stacks; + oop* _invisible_root; + + XThreadLocalData() : + _address_bad_mask(0), + _stacks(), + _invisible_root(NULL) {} + + static XThreadLocalData* data(Thread* thread) { + return thread->gc_data(); + } + +public: + static void create(Thread* thread) { + new (data(thread)) XThreadLocalData(); + } + + static void destroy(Thread* thread) { + data(thread)->~XThreadLocalData(); + } + + static void set_address_bad_mask(Thread* thread, uintptr_t mask) { + data(thread)->_address_bad_mask = mask; + } + + static XMarkThreadLocalStacks* stacks(Thread* thread) { + return &data(thread)->_stacks; + } + + static void set_invisible_root(Thread* thread, oop* root) { + assert(data(thread)->_invisible_root == NULL, "Already set"); + data(thread)->_invisible_root = root; + } + + static void clear_invisible_root(Thread* thread) { + assert(data(thread)->_invisible_root != NULL, "Should be set"); + data(thread)->_invisible_root = NULL; + } + + template + static void do_invisible_root(Thread* thread, T f) { + if (data(thread)->_invisible_root != NULL) { + f(data(thread)->_invisible_root); + } + } + + static ByteSize address_bad_mask_offset() { + return Thread::gc_data_offset() + byte_offset_of(XThreadLocalData, _address_bad_mask); + } + + static ByteSize nmethod_disarmed_offset() { + return address_bad_mask_offset() + in_ByteSize(XAddressBadMaskHighOrderBitsOffset); + } +}; + +#endif // SHARE_GC_X_XTHREADLOCALDATA_HPP diff --git a/src/hotspot/share/gc/x/xTracer.cpp b/src/hotspot/share/gc/x/xTracer.cpp new file mode 100644 index 0000000000000..6db2e0bcc9a22 --- /dev/null +++ b/src/hotspot/share/gc/x/xTracer.cpp @@ -0,0 +1,146 @@ +/* + * Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/shared/gcId.hpp" +#include "gc/x/xGlobals.hpp" +#include "gc/x/xStat.hpp" +#include "gc/x/xTracer.hpp" +#include "jfr/jfrEvents.hpp" +#include "runtime/safepointVerifiers.hpp" +#include "utilities/debug.hpp" +#include "utilities/macros.hpp" +#if INCLUDE_JFR +#include "jfr/metadata/jfrSerializer.hpp" +#endif + +#if INCLUDE_JFR + +class XPageTypeConstant : public JfrSerializer { +public: + virtual void serialize(JfrCheckpointWriter& writer) { + writer.write_count(3); + writer.write_key(XPageTypeSmall); + writer.write("Small"); + writer.write_key(XPageTypeMedium); + writer.write("Medium"); + writer.write_key(XPageTypeLarge); + writer.write("Large"); + } +}; + +class XStatisticsCounterTypeConstant : public JfrSerializer { +public: + virtual void serialize(JfrCheckpointWriter& writer) { + writer.write_count(XStatCounter::count()); + for (XStatCounter* counter = XStatCounter::first(); counter != NULL; counter = counter->next()) { + writer.write_key(counter->id()); + writer.write(counter->name()); + } + } +}; + +class XStatisticsSamplerTypeConstant : public JfrSerializer { +public: + virtual void serialize(JfrCheckpointWriter& writer) { + writer.write_count(XStatSampler::count()); + for (XStatSampler* sampler = XStatSampler::first(); sampler != NULL; sampler = sampler->next()) { + writer.write_key(sampler->id()); + writer.write(sampler->name()); + } + } +}; + +static void register_jfr_type_serializers() { + JfrSerializer::register_serializer(TYPE_ZPAGETYPETYPE, + true /* permit_cache */, + new XPageTypeConstant()); + JfrSerializer::register_serializer(TYPE_ZSTATISTICSCOUNTERTYPE, + true /* permit_cache */, + new XStatisticsCounterTypeConstant()); + JfrSerializer::register_serializer(TYPE_ZSTATISTICSSAMPLERTYPE, + true /* permit_cache */, + new XStatisticsSamplerTypeConstant()); +} + +#endif // INCLUDE_JFR + +XTracer* XTracer::_tracer = NULL; + +XTracer::XTracer() : + GCTracer(Z) {} + +void XTracer::initialize() { + assert(_tracer == NULL, "Already initialized"); + _tracer = new XTracer(); + JFR_ONLY(register_jfr_type_serializers()); +} + +void XTracer::send_stat_counter(const XStatCounter& counter, uint64_t increment, uint64_t value) { + NoSafepointVerifier nsv; + + EventZStatisticsCounter e; + if (e.should_commit()) { + e.set_id(counter.id()); + e.set_increment(increment); + e.set_value(value); + e.commit(); + } +} + +void XTracer::send_stat_sampler(const XStatSampler& sampler, uint64_t value) { + NoSafepointVerifier nsv; + + EventZStatisticsSampler e; + if (e.should_commit()) { + e.set_id(sampler.id()); + e.set_value(value); + e.commit(); + } +} + +void XTracer::send_thread_phase(const char* name, const Ticks& start, const Ticks& end) { + NoSafepointVerifier nsv; + + EventZThreadPhase e(UNTIMED); + if (e.should_commit()) { + e.set_gcId(GCId::current_or_undefined()); + e.set_name(name); + e.set_starttime(start); + e.set_endtime(end); + e.commit(); + } +} + +void XTracer::send_thread_debug(const char* name, const Ticks& start, const Ticks& end) { + NoSafepointVerifier nsv; + + EventZThreadDebug e(UNTIMED); + if (e.should_commit()) { + e.set_gcId(GCId::current_or_undefined()); + e.set_name(name); + e.set_starttime(start); + e.set_endtime(end); + e.commit(); + } +} diff --git a/src/hotspot/share/gc/x/xTracer.hpp b/src/hotspot/share/gc/x/xTracer.hpp new file mode 100644 index 0000000000000..d9219d79c51f3 --- /dev/null +++ b/src/hotspot/share/gc/x/xTracer.hpp @@ -0,0 +1,65 @@ +/* + * Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XTRACER_HPP +#define SHARE_GC_X_XTRACER_HPP + +#include "gc/shared/gcTrace.hpp" + +class XStatCounter; +class XStatPhase; +class XStatSampler; + +class XTracer : public GCTracer, public CHeapObj { +private: + static XTracer* _tracer; + + XTracer(); + + void send_stat_counter(const XStatCounter& counter, uint64_t increment, uint64_t value); + void send_stat_sampler(const XStatSampler& sampler, uint64_t value); + void send_thread_phase(const char* name, const Ticks& start, const Ticks& end); + void send_thread_debug(const char* name, const Ticks& start, const Ticks& end); + +public: + static XTracer* tracer(); + static void initialize(); + + void report_stat_counter(const XStatCounter& counter, uint64_t increment, uint64_t value); + void report_stat_sampler(const XStatSampler& sampler, uint64_t value); + void report_thread_phase(const char* name, const Ticks& start, const Ticks& end); + void report_thread_debug(const char* name, const Ticks& start, const Ticks& end); +}; + +// For temporary latency measurements during development and debugging +class XTraceThreadDebug : public StackObj { +private: + const Ticks _start; + const char* const _name; + +public: + XTraceThreadDebug(const char* name); + ~XTraceThreadDebug(); +}; + +#endif // SHARE_GC_X_XTRACER_HPP diff --git a/src/hotspot/share/gc/x/xTracer.inline.hpp b/src/hotspot/share/gc/x/xTracer.inline.hpp new file mode 100644 index 0000000000000..22dd2e2b6fb43 --- /dev/null +++ b/src/hotspot/share/gc/x/xTracer.inline.hpp @@ -0,0 +1,67 @@ +/* + * Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XTRACER_INLINE_HPP +#define SHARE_GC_X_XTRACER_INLINE_HPP + +#include "gc/x/xTracer.hpp" + +#include "jfr/jfrEvents.hpp" + +inline XTracer* XTracer::tracer() { + return _tracer; +} + +inline void XTracer::report_stat_counter(const XStatCounter& counter, uint64_t increment, uint64_t value) { + if (EventZStatisticsCounter::is_enabled()) { + send_stat_counter(counter, increment, value); + } +} + +inline void XTracer::report_stat_sampler(const XStatSampler& sampler, uint64_t value) { + if (EventZStatisticsSampler::is_enabled()) { + send_stat_sampler(sampler, value); + } +} + +inline void XTracer::report_thread_phase(const char* name, const Ticks& start, const Ticks& end) { + if (EventZThreadPhase::is_enabled()) { + send_thread_phase(name, start, end); + } +} + +inline void XTracer::report_thread_debug(const char* name, const Ticks& start, const Ticks& end) { + if (EventZThreadDebug::is_enabled()) { + send_thread_debug(name, start, end); + } +} + +inline XTraceThreadDebug::XTraceThreadDebug(const char* name) : + _start(Ticks::now()), + _name(name) {} + +inline XTraceThreadDebug::~XTraceThreadDebug() { + XTracer::tracer()->report_thread_debug(_name, _start, Ticks::now()); +} + +#endif // SHARE_GC_X_XTRACER_INLINE_HPP diff --git a/src/hotspot/share/gc/x/xUncommitter.cpp b/src/hotspot/share/gc/x/xUncommitter.cpp new file mode 100644 index 0000000000000..ffd57b8c2a8e1 --- /dev/null +++ b/src/hotspot/share/gc/x/xUncommitter.cpp @@ -0,0 +1,96 @@ +/* + * Copyright (c) 2019, 2021, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/shared/gc_globals.hpp" +#include "gc/x/xHeap.inline.hpp" +#include "gc/x/xLock.inline.hpp" +#include "gc/x/xStat.hpp" +#include "gc/x/xUncommitter.hpp" +#include "jfr/jfrEvents.hpp" +#include "logging/log.hpp" + +static const XStatCounter XCounterUncommit("Memory", "Uncommit", XStatUnitBytesPerSecond); + +XUncommitter::XUncommitter(XPageAllocator* page_allocator) : + _page_allocator(page_allocator), + _lock(), + _stop(false) { + set_name("XUncommitter"); + create_and_start(); +} + +bool XUncommitter::wait(uint64_t timeout) const { + XLocker locker(&_lock); + while (!ZUncommit && !_stop) { + _lock.wait(); + } + + if (!_stop && timeout > 0) { + log_debug(gc, heap)("Uncommit Timeout: " UINT64_FORMAT "s", timeout); + _lock.wait(timeout * MILLIUNITS); + } + + return !_stop; +} + +bool XUncommitter::should_continue() const { + XLocker locker(&_lock); + return !_stop; +} + +void XUncommitter::run_service() { + uint64_t timeout = 0; + + while (wait(timeout)) { + EventZUncommit event; + size_t uncommitted = 0; + + while (should_continue()) { + // Uncommit chunk + const size_t flushed = _page_allocator->uncommit(&timeout); + if (flushed == 0) { + // Done + break; + } + + uncommitted += flushed; + } + + if (uncommitted > 0) { + // Update statistics + XStatInc(XCounterUncommit, uncommitted); + log_info(gc, heap)("Uncommitted: " SIZE_FORMAT "M(%.0f%%)", + uncommitted / M, percent_of(uncommitted, XHeap::heap()->max_capacity())); + + // Send event + event.commit(uncommitted); + } + } +} + +void XUncommitter::stop_service() { + XLocker locker(&_lock); + _stop = true; + _lock.notify_all(); +} diff --git a/src/hotspot/share/gc/x/xUncommitter.hpp b/src/hotspot/share/gc/x/xUncommitter.hpp new file mode 100644 index 0000000000000..9f6212fa98db9 --- /dev/null +++ b/src/hotspot/share/gc/x/xUncommitter.hpp @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XUNCOMMITTER_HPP +#define SHARE_GC_X_XUNCOMMITTER_HPP + +#include "gc/shared/concurrentGCThread.hpp" +#include "gc/x/xLock.hpp" + +class XPageAllocation; + +class XUncommitter : public ConcurrentGCThread { +private: + XPageAllocator* const _page_allocator; + mutable XConditionLock _lock; + bool _stop; + + bool wait(uint64_t timeout) const; + bool should_continue() const; + +protected: + virtual void run_service(); + virtual void stop_service(); + +public: + XUncommitter(XPageAllocator* page_allocator); +}; + +#endif // SHARE_GC_X_XUNCOMMITTER_HPP diff --git a/src/hotspot/share/gc/x/xUnload.cpp b/src/hotspot/share/gc/x/xUnload.cpp new file mode 100644 index 0000000000000..66bdf2e222b72 --- /dev/null +++ b/src/hotspot/share/gc/x/xUnload.cpp @@ -0,0 +1,176 @@ +/* + * Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "classfile/classLoaderDataGraph.hpp" +#include "classfile/systemDictionary.hpp" +#include "code/codeBehaviours.hpp" +#include "code/codeCache.hpp" +#include "code/dependencyContext.hpp" +#include "gc/shared/gcBehaviours.hpp" +#include "gc/shared/suspendibleThreadSet.hpp" +#include "gc/x/xBarrier.inline.hpp" +#include "gc/x/xLock.inline.hpp" +#include "gc/x/xNMethod.hpp" +#include "gc/x/xStat.hpp" +#include "gc/x/xUnload.hpp" +#include "memory/metaspaceUtils.hpp" +#include "oops/access.inline.hpp" + +static const XStatSubPhase XSubPhaseConcurrentClassesUnlink("Concurrent Classes Unlink"); +static const XStatSubPhase XSubPhaseConcurrentClassesPurge("Concurrent Classes Purge"); + +class XPhantomIsAliveObjectClosure : public BoolObjectClosure { +public: + virtual bool do_object_b(oop o) { + return XBarrier::is_alive_barrier_on_phantom_oop(o); + } +}; + +class XIsUnloadingOopClosure : public OopClosure { +private: + XPhantomIsAliveObjectClosure _is_alive; + bool _is_unloading; + +public: + XIsUnloadingOopClosure() : + _is_alive(), + _is_unloading(false) {} + + virtual void do_oop(oop* p) { + const oop o = RawAccess<>::oop_load(p); + if (o != NULL && !_is_alive.do_object_b(o)) { + _is_unloading = true; + } + } + + virtual void do_oop(narrowOop* p) { + ShouldNotReachHere(); + } + + bool is_unloading() const { + return _is_unloading; + } +}; + +class XIsUnloadingBehaviour : public IsUnloadingBehaviour { +public: + virtual bool has_dead_oop(CompiledMethod* method) const { + nmethod* const nm = method->as_nmethod(); + XReentrantLock* const lock = XNMethod::lock_for_nmethod(nm); + XLocker locker(lock); + XIsUnloadingOopClosure cl; + XNMethod::nmethod_oops_do_inner(nm, &cl); + return cl.is_unloading(); + } +}; + +class XCompiledICProtectionBehaviour : public CompiledICProtectionBehaviour { +public: + virtual bool lock(CompiledMethod* method) { + nmethod* const nm = method->as_nmethod(); + XReentrantLock* const lock = XNMethod::lock_for_nmethod(nm); + lock->lock(); + return true; + } + + virtual void unlock(CompiledMethod* method) { + nmethod* const nm = method->as_nmethod(); + XReentrantLock* const lock = XNMethod::lock_for_nmethod(nm); + lock->unlock(); + } + + virtual bool is_safe(CompiledMethod* method) { + if (SafepointSynchronize::is_at_safepoint()) { + return true; + } + + nmethod* const nm = method->as_nmethod(); + XReentrantLock* const lock = XNMethod::lock_for_nmethod(nm); + return lock->is_owned(); + } +}; + +XUnload::XUnload(XWorkers* workers) : + _workers(workers) { + + if (!ClassUnloading) { + return; + } + + static XIsUnloadingBehaviour is_unloading_behaviour; + IsUnloadingBehaviour::set_current(&is_unloading_behaviour); + + static XCompiledICProtectionBehaviour ic_protection_behaviour; + CompiledICProtectionBehaviour::set_current(&ic_protection_behaviour); +} + +void XUnload::prepare() { + if (!ClassUnloading) { + return; + } + + CodeCache::increment_unloading_cycle(); + DependencyContext::cleaning_start(); +} + +void XUnload::unlink() { + if (!ClassUnloading) { + return; + } + + XStatTimer timer(XSubPhaseConcurrentClassesUnlink); + SuspendibleThreadSetJoiner sts; + bool unloading_occurred; + + { + MutexLocker ml(ClassLoaderDataGraph_lock); + unloading_occurred = SystemDictionary::do_unloading(XStatPhase::timer()); + } + + Klass::clean_weak_klass_links(unloading_occurred); + XNMethod::unlink(_workers, unloading_occurred); + DependencyContext::cleaning_end(); +} + +void XUnload::purge() { + if (!ClassUnloading) { + return; + } + + XStatTimer timer(XSubPhaseConcurrentClassesPurge); + + { + SuspendibleThreadSetJoiner sts; + XNMethod::purge(); + } + + ClassLoaderDataGraph::purge(/*at_safepoint*/false); + CodeCache::purge_exception_caches(); +} + +void XUnload::finish() { + // Resize and verify metaspace + MetaspaceGC::compute_new_size(); + DEBUG_ONLY(MetaspaceUtils::verify();) +} diff --git a/src/hotspot/share/gc/x/xUnload.hpp b/src/hotspot/share/gc/x/xUnload.hpp new file mode 100644 index 0000000000000..df6ba7ed2eb71 --- /dev/null +++ b/src/hotspot/share/gc/x/xUnload.hpp @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XUNLOAD_HPP +#define SHARE_GC_X_XUNLOAD_HPP + +class XWorkers; + +class XUnload { +private: + XWorkers* const _workers; + +public: + XUnload(XWorkers* workers); + + void prepare(); + void unlink(); + void purge(); + void finish(); +}; + +#endif // SHARE_GC_X_XUNLOAD_HPP diff --git a/src/hotspot/share/gc/x/xUnmapper.cpp b/src/hotspot/share/gc/x/xUnmapper.cpp new file mode 100644 index 0000000000000..baa09769074a6 --- /dev/null +++ b/src/hotspot/share/gc/x/xUnmapper.cpp @@ -0,0 +1,101 @@ +/* + * Copyright (c) 2020, 2021, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/shared/gc_globals.hpp" +#include "gc/x/xList.inline.hpp" +#include "gc/x/xLock.inline.hpp" +#include "gc/x/xPage.inline.hpp" +#include "gc/x/xPageAllocator.hpp" +#include "gc/x/xUnmapper.hpp" +#include "jfr/jfrEvents.hpp" +#include "runtime/globals.hpp" + +XUnmapper::XUnmapper(XPageAllocator* page_allocator) : + _page_allocator(page_allocator), + _lock(), + _queue(), + _stop(false) { + set_name("XUnmapper"); + create_and_start(); +} + +XPage* XUnmapper::dequeue() { + XLocker locker(&_lock); + + for (;;) { + if (_stop) { + return NULL; + } + + XPage* const page = _queue.remove_first(); + if (page != NULL) { + return page; + } + + _lock.wait(); + } +} + +void XUnmapper::do_unmap_and_destroy_page(XPage* page) const { + EventZUnmap event; + const size_t unmapped = page->size(); + + // Unmap and destroy + _page_allocator->unmap_page(page); + _page_allocator->destroy_page(page); + + // Send event + event.commit(unmapped); +} + +void XUnmapper::unmap_and_destroy_page(XPage* page) { + // Asynchronous unmap and destroy is not supported with ZVerifyViews + if (ZVerifyViews) { + // Immediately unmap and destroy + do_unmap_and_destroy_page(page); + } else { + // Enqueue for asynchronous unmap and destroy + XLocker locker(&_lock); + _queue.insert_last(page); + _lock.notify_all(); + } +} + +void XUnmapper::run_service() { + for (;;) { + XPage* const page = dequeue(); + if (page == NULL) { + // Stop + return; + } + + do_unmap_and_destroy_page(page); + } +} + +void XUnmapper::stop_service() { + XLocker locker(&_lock); + _stop = true; + _lock.notify_all(); +} diff --git a/src/hotspot/share/gc/x/xUnmapper.hpp b/src/hotspot/share/gc/x/xUnmapper.hpp new file mode 100644 index 0000000000000..9a2651ea6ebc7 --- /dev/null +++ b/src/hotspot/share/gc/x/xUnmapper.hpp @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XUNMAPPER_HPP +#define SHARE_GC_X_XUNMAPPER_HPP + +#include "gc/shared/concurrentGCThread.hpp" +#include "gc/x/xList.hpp" +#include "gc/x/xLock.hpp" + +class XPage; +class XPageAllocator; + +class XUnmapper : public ConcurrentGCThread { +private: + XPageAllocator* const _page_allocator; + XConditionLock _lock; + XList _queue; + bool _stop; + + XPage* dequeue(); + void do_unmap_and_destroy_page(XPage* page) const; + +protected: + virtual void run_service(); + virtual void stop_service(); + +public: + XUnmapper(XPageAllocator* page_allocator); + + void unmap_and_destroy_page(XPage* page); +}; + +#endif // SHARE_GC_X_XUNMAPPER_HPP diff --git a/src/hotspot/share/gc/x/xUtils.hpp b/src/hotspot/share/gc/x/xUtils.hpp new file mode 100644 index 0000000000000..26f14c0e98f78 --- /dev/null +++ b/src/hotspot/share/gc/x/xUtils.hpp @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XUTILS_HPP +#define SHARE_GC_X_XUTILS_HPP + +#include "memory/allStatic.hpp" +#include "utilities/globalDefinitions.hpp" + +class XUtils : public AllStatic { +public: + // Allocation + static uintptr_t alloc_aligned(size_t alignment, size_t size); + + // Size conversion + static size_t bytes_to_words(size_t size_in_words); + static size_t words_to_bytes(size_t size_in_words); + + // Object + static size_t object_size(uintptr_t addr); + static void object_copy_disjoint(uintptr_t from, uintptr_t to, size_t size); + static void object_copy_conjoint(uintptr_t from, uintptr_t to, size_t size); +}; + +#endif // SHARE_GC_X_XUTILS_HPP diff --git a/src/hotspot/share/gc/x/xUtils.inline.hpp b/src/hotspot/share/gc/x/xUtils.inline.hpp new file mode 100644 index 0000000000000..09180959311d8 --- /dev/null +++ b/src/hotspot/share/gc/x/xUtils.inline.hpp @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XUTILS_INLINE_HPP +#define SHARE_GC_X_XUTILS_INLINE_HPP + +#include "gc/x/xUtils.hpp" + +#include "gc/x/xOop.inline.hpp" +#include "oops/oop.inline.hpp" +#include "utilities/align.hpp" +#include "utilities/copy.hpp" +#include "utilities/debug.hpp" +#include "utilities/globalDefinitions.hpp" + +inline size_t XUtils::bytes_to_words(size_t size_in_bytes) { + assert(is_aligned(size_in_bytes, BytesPerWord), "Size not word aligned"); + return size_in_bytes >> LogBytesPerWord; +} + +inline size_t XUtils::words_to_bytes(size_t size_in_words) { + return size_in_words << LogBytesPerWord; +} + +inline size_t XUtils::object_size(uintptr_t addr) { + return words_to_bytes(XOop::from_address(addr)->size()); +} + +inline void XUtils::object_copy_disjoint(uintptr_t from, uintptr_t to, size_t size) { + Copy::aligned_disjoint_words((HeapWord*)from, (HeapWord*)to, bytes_to_words(size)); +} + +inline void XUtils::object_copy_conjoint(uintptr_t from, uintptr_t to, size_t size) { + if (from != to) { + Copy::aligned_conjoint_words((HeapWord*)from, (HeapWord*)to, bytes_to_words(size)); + } +} + +#endif // SHARE_GC_X_XUTILS_INLINE_HPP diff --git a/src/hotspot/share/gc/x/xValue.hpp b/src/hotspot/share/gc/x/xValue.hpp new file mode 100644 index 0000000000000..4b2838c8a2c28 --- /dev/null +++ b/src/hotspot/share/gc/x/xValue.hpp @@ -0,0 +1,140 @@ +/* + * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XVALUE_HPP +#define SHARE_GC_X_XVALUE_HPP + +#include "memory/allStatic.hpp" +#include "utilities/globalDefinitions.hpp" + +// +// Storage +// + +template +class XValueStorage : public AllStatic { +private: + static uintptr_t _top; + static uintptr_t _end; + +public: + static const size_t offset = 4 * K; + + static uintptr_t alloc(size_t size); +}; + +class XContendedStorage : public XValueStorage { +public: + static size_t alignment(); + static uint32_t count(); + static uint32_t id(); +}; + +class XPerCPUStorage : public XValueStorage { +public: + static size_t alignment(); + static uint32_t count(); + static uint32_t id(); +}; + +class XPerNUMAStorage : public XValueStorage { +public: + static size_t alignment(); + static uint32_t count(); + static uint32_t id(); +}; + +class XPerWorkerStorage : public XValueStorage { +public: + static size_t alignment(); + static uint32_t count(); + static uint32_t id(); +}; + +// +// Value +// + +template +class XValue : public CHeapObj { +private: + const uintptr_t _addr; + + uintptr_t value_addr(uint32_t value_id) const; + +public: + XValue(); + XValue(const T& value); + + const T* addr(uint32_t value_id = S::id()) const; + T* addr(uint32_t value_id = S::id()); + + const T& get(uint32_t value_id = S::id()) const; + T& get(uint32_t value_id = S::id()); + + void set(const T& value, uint32_t value_id = S::id()); + void set_all(const T& value); +}; + +template using XContended = XValue; +template using XPerCPU = XValue; +template using XPerNUMA = XValue; +template using XPerWorker = XValue; + +// +// Iterator +// + +template +class XValueIterator { +private: + XValue* const _value; + uint32_t _value_id; + +public: + XValueIterator(XValue* value); + + bool next(T** value); +}; + +template using XPerCPUIterator = XValueIterator; +template using XPerNUMAIterator = XValueIterator; +template using XPerWorkerIterator = XValueIterator; + +template +class XValueConstIterator { +private: + const XValue* const _value; + uint32_t _value_id; + +public: + XValueConstIterator(const XValue* value); + + bool next(const T** value); +}; + +template using XPerCPUConstIterator = XValueConstIterator; +template using XPerNUMAConstIterator = XValueConstIterator; +template using XPerWorkerConstIterator = XValueConstIterator; + +#endif // SHARE_GC_X_XVALUE_HPP diff --git a/src/hotspot/share/gc/x/xValue.inline.hpp b/src/hotspot/share/gc/x/xValue.inline.hpp new file mode 100644 index 0000000000000..1b12eb7d55525 --- /dev/null +++ b/src/hotspot/share/gc/x/xValue.inline.hpp @@ -0,0 +1,210 @@ +/* + * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XVALUE_INLINE_HPP +#define SHARE_GC_X_XVALUE_INLINE_HPP + +#include "gc/x/xValue.hpp" + +#include "gc/shared/gc_globals.hpp" +#include "gc/x/xCPU.inline.hpp" +#include "gc/x/xGlobals.hpp" +#include "gc/x/xNUMA.hpp" +#include "gc/x/xThread.inline.hpp" +#include "gc/x/xUtils.hpp" +#include "runtime/globals.hpp" +#include "utilities/align.hpp" + +// +// Storage +// + +template uintptr_t XValueStorage::_end = 0; +template uintptr_t XValueStorage::_top = 0; + +template +uintptr_t XValueStorage::alloc(size_t size) { + assert(size <= offset, "Allocation too large"); + + // Allocate entry in existing memory block + const uintptr_t addr = align_up(_top, S::alignment()); + _top = addr + size; + + if (_top < _end) { + // Success + return addr; + } + + // Allocate new block of memory + const size_t block_alignment = offset; + const size_t block_size = offset * S::count(); + _top = XUtils::alloc_aligned(block_alignment, block_size); + _end = _top + offset; + + // Retry allocation + return alloc(size); +} + +inline size_t XContendedStorage::alignment() { + return XCacheLineSize; +} + +inline uint32_t XContendedStorage::count() { + return 1; +} + +inline uint32_t XContendedStorage::id() { + return 0; +} + +inline size_t XPerCPUStorage::alignment() { + return sizeof(uintptr_t); +} + +inline uint32_t XPerCPUStorage::count() { + return XCPU::count(); +} + +inline uint32_t XPerCPUStorage::id() { + return XCPU::id(); +} + +inline size_t XPerNUMAStorage::alignment() { + return sizeof(uintptr_t); +} + +inline uint32_t XPerNUMAStorage::count() { + return XNUMA::count(); +} + +inline uint32_t XPerNUMAStorage::id() { + return XNUMA::id(); +} + +inline size_t XPerWorkerStorage::alignment() { + return sizeof(uintptr_t); +} + +inline uint32_t XPerWorkerStorage::count() { + return UseDynamicNumberOfGCThreads ? ConcGCThreads : MAX2(ConcGCThreads, ParallelGCThreads); +} + +inline uint32_t XPerWorkerStorage::id() { + return XThread::worker_id(); +} + +// +// Value +// + +template +inline uintptr_t XValue::value_addr(uint32_t value_id) const { + return _addr + (value_id * S::offset); +} + +template +inline XValue::XValue() : + _addr(S::alloc(sizeof(T))) { + // Initialize all instances + XValueIterator iter(this); + for (T* addr; iter.next(&addr);) { + ::new (addr) T; + } +} + +template +inline XValue::XValue(const T& value) : + _addr(S::alloc(sizeof(T))) { + // Initialize all instances + XValueIterator iter(this); + for (T* addr; iter.next(&addr);) { + ::new (addr) T(value); + } +} + +template +inline const T* XValue::addr(uint32_t value_id) const { + return reinterpret_cast(value_addr(value_id)); +} + +template +inline T* XValue::addr(uint32_t value_id) { + return reinterpret_cast(value_addr(value_id)); +} + +template +inline const T& XValue::get(uint32_t value_id) const { + return *addr(value_id); +} + +template +inline T& XValue::get(uint32_t value_id) { + return *addr(value_id); +} + +template +inline void XValue::set(const T& value, uint32_t value_id) { + get(value_id) = value; +} + +template +inline void XValue::set_all(const T& value) { + XValueIterator iter(this); + for (T* addr; iter.next(&addr);) { + *addr = value; + } +} + +// +// Iterator +// + +template +inline XValueIterator::XValueIterator(XValue* value) : + _value(value), + _value_id(0) {} + +template +inline bool XValueIterator::next(T** value) { + if (_value_id < S::count()) { + *value = _value->addr(_value_id++); + return true; + } + return false; +} + +template +inline XValueConstIterator::XValueConstIterator(const XValue* value) : + _value(value), + _value_id(0) {} + +template +inline bool XValueConstIterator::next(const T** value) { + if (_value_id < S::count()) { + *value = _value->addr(_value_id++); + return true; + } + return false; +} + +#endif // SHARE_GC_X_XVALUE_INLINE_HPP diff --git a/src/hotspot/share/gc/x/xVerify.cpp b/src/hotspot/share/gc/x/xVerify.cpp new file mode 100644 index 0000000000000..ed3e224091c6b --- /dev/null +++ b/src/hotspot/share/gc/x/xVerify.cpp @@ -0,0 +1,415 @@ +/* + * Copyright (c) 2019, 2022, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "classfile/classLoaderData.hpp" +#include "gc/shared/gc_globals.hpp" +#include "gc/x/xAddress.inline.hpp" +#include "gc/x/xHeap.inline.hpp" +#include "gc/x/xNMethod.hpp" +#include "gc/x/xOop.hpp" +#include "gc/x/xPageAllocator.hpp" +#include "gc/x/xResurrection.hpp" +#include "gc/x/xRootsIterator.hpp" +#include "gc/x/xStackWatermark.hpp" +#include "gc/x/xStat.hpp" +#include "gc/x/xVerify.hpp" +#include "memory/iterator.inline.hpp" +#include "memory/resourceArea.hpp" +#include "oops/oop.hpp" +#include "runtime/frame.inline.hpp" +#include "runtime/globals.hpp" +#include "runtime/handles.hpp" +#include "runtime/javaThread.hpp" +#include "runtime/safepoint.hpp" +#include "runtime/stackFrameStream.inline.hpp" +#include "runtime/stackWatermark.inline.hpp" +#include "runtime/stackWatermarkSet.inline.hpp" +#include "utilities/debug.hpp" +#include "utilities/globalDefinitions.hpp" +#include "utilities/preserveException.hpp" + +#define BAD_OOP_ARG(o, p) "Bad oop " PTR_FORMAT " found at " PTR_FORMAT, p2i(o), p2i(p) + +static void z_verify_oop(oop* p) { + const oop o = RawAccess<>::oop_load(p); + if (o != NULL) { + const uintptr_t addr = XOop::to_address(o); + guarantee(XAddress::is_good(addr), BAD_OOP_ARG(o, p)); + guarantee(oopDesc::is_oop(XOop::from_address(addr)), BAD_OOP_ARG(o, p)); + } +} + +static void z_verify_possibly_weak_oop(oop* p) { + const oop o = RawAccess<>::oop_load(p); + if (o != NULL) { + const uintptr_t addr = XOop::to_address(o); + guarantee(XAddress::is_good(addr) || XAddress::is_finalizable_good(addr), BAD_OOP_ARG(o, p)); + guarantee(oopDesc::is_oop(XOop::from_address(XAddress::good(addr))), BAD_OOP_ARG(o, p)); + } +} + +class XVerifyRootClosure : public OopClosure { +private: + const bool _verify_fixed; + +public: + XVerifyRootClosure(bool verify_fixed) : + _verify_fixed(verify_fixed) {} + + virtual void do_oop(oop* p) { + if (_verify_fixed) { + z_verify_oop(p); + } else { + // Don't know the state of the oop. + oop obj = *p; + obj = NativeAccess::oop_load(&obj); + z_verify_oop(&obj); + } + } + + virtual void do_oop(narrowOop*) { + ShouldNotReachHere(); + } + + bool verify_fixed() const { + return _verify_fixed; + } +}; + +class XVerifyCodeBlobClosure : public CodeBlobToOopClosure { +public: + XVerifyCodeBlobClosure(XVerifyRootClosure* _cl) : + CodeBlobToOopClosure(_cl, false /* fix_relocations */) {} + + virtual void do_code_blob(CodeBlob* cb) { + CodeBlobToOopClosure::do_code_blob(cb); + } +}; + +class XVerifyStack : public OopClosure { +private: + XVerifyRootClosure* const _cl; + JavaThread* const _jt; + uint64_t _last_good; + bool _verifying_bad_frames; + +public: + XVerifyStack(XVerifyRootClosure* cl, JavaThread* jt) : + _cl(cl), + _jt(jt), + _last_good(0), + _verifying_bad_frames(false) { + XStackWatermark* const stack_watermark = StackWatermarkSet::get(jt, StackWatermarkKind::gc); + + if (_cl->verify_fixed()) { + assert(stack_watermark->processing_started(), "Should already have been fixed"); + assert(stack_watermark->processing_completed(), "Should already have been fixed"); + } else { + // We don't really know the state of the stack, verify watermark. + if (!stack_watermark->processing_started()) { + _verifying_bad_frames = true; + } else { + // Not time yet to verify bad frames + _last_good = stack_watermark->last_processed(); + } + } + } + + void do_oop(oop* p) { + if (_verifying_bad_frames) { + const oop obj = *p; + guarantee(!XAddress::is_good(XOop::to_address(obj)), BAD_OOP_ARG(obj, p)); + } + _cl->do_oop(p); + } + + void do_oop(narrowOop* p) { + ShouldNotReachHere(); + } + + void prepare_next_frame(frame& frame) { + if (_cl->verify_fixed()) { + // All frames need to be good + return; + } + + // The verification has two modes, depending on whether we have reached the + // last processed frame or not. Before it is reached, we expect everything to + // be good. After reaching it, we expect everything to be bad. + const uintptr_t sp = reinterpret_cast(frame.sp()); + + if (!_verifying_bad_frames && sp == _last_good) { + // Found the last good frame, now verify the bad ones + _verifying_bad_frames = true; + } + } + + void verify_frames() { + XVerifyCodeBlobClosure cb_cl(_cl); + for (StackFrameStream frames(_jt, true /* update */, false /* process_frames */); + !frames.is_done(); + frames.next()) { + frame& frame = *frames.current(); + frame.oops_do(this, &cb_cl, frames.register_map(), DerivedPointerIterationMode::_ignore); + prepare_next_frame(frame); + } + } +}; + +class XVerifyOopClosure : public ClaimMetadataVisitingOopIterateClosure { +private: + const bool _verify_weaks; + +public: + XVerifyOopClosure(bool verify_weaks) : + ClaimMetadataVisitingOopIterateClosure(ClassLoaderData::_claim_other), + _verify_weaks(verify_weaks) {} + + virtual void do_oop(oop* p) { + if (_verify_weaks) { + z_verify_possibly_weak_oop(p); + } else { + // We should never encounter finalizable oops through strong + // paths. This assumes we have only visited strong roots. + z_verify_oop(p); + } + } + + virtual void do_oop(narrowOop* p) { + ShouldNotReachHere(); + } + + virtual ReferenceIterationMode reference_iteration_mode() { + return _verify_weaks ? DO_FIELDS : DO_FIELDS_EXCEPT_REFERENT; + } + + // Don't follow this metadata when verifying oops + virtual void do_method(Method* m) {} + virtual void do_nmethod(nmethod* nm) {} +}; + +typedef ClaimingCLDToOopClosure XVerifyCLDClosure; + +class XVerifyThreadClosure : public ThreadClosure { +private: + XVerifyRootClosure* const _cl; + +public: + XVerifyThreadClosure(XVerifyRootClosure* cl) : + _cl(cl) {} + + virtual void do_thread(Thread* thread) { + thread->oops_do_no_frames(_cl, NULL); + + JavaThread* const jt = JavaThread::cast(thread); + if (!jt->has_last_Java_frame()) { + return; + } + + XVerifyStack verify_stack(_cl, jt); + verify_stack.verify_frames(); + } +}; + +class XVerifyNMethodClosure : public NMethodClosure { +private: + OopClosure* const _cl; + BarrierSetNMethod* const _bs_nm; + const bool _verify_fixed; + + bool trust_nmethod_state() const { + // The root iterator will visit non-processed + // nmethods class unloading is turned off. + return ClassUnloading || _verify_fixed; + } + +public: + XVerifyNMethodClosure(OopClosure* cl, bool verify_fixed) : + _cl(cl), + _bs_nm(BarrierSet::barrier_set()->barrier_set_nmethod()), + _verify_fixed(verify_fixed) {} + + virtual void do_nmethod(nmethod* nm) { + assert(!trust_nmethod_state() || !_bs_nm->is_armed(nm), "Should not encounter any armed nmethods"); + + XNMethod::nmethod_oops_do(nm, _cl); + } +}; + +void XVerify::roots_strong(bool verify_fixed) { + assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint"); + assert(!XResurrection::is_blocked(), "Invalid phase"); + + XVerifyRootClosure cl(verify_fixed); + XVerifyCLDClosure cld_cl(&cl); + XVerifyThreadClosure thread_cl(&cl); + XVerifyNMethodClosure nm_cl(&cl, verify_fixed); + + XRootsIterator iter(ClassLoaderData::_claim_none); + iter.apply(&cl, + &cld_cl, + &thread_cl, + &nm_cl); +} + +void XVerify::roots_weak() { + assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint"); + assert(!XResurrection::is_blocked(), "Invalid phase"); + + XVerifyRootClosure cl(true /* verify_fixed */); + XWeakRootsIterator iter; + iter.apply(&cl); +} + +void XVerify::objects(bool verify_weaks) { + assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint"); + assert(XGlobalPhase == XPhaseMarkCompleted, "Invalid phase"); + assert(!XResurrection::is_blocked(), "Invalid phase"); + + XVerifyOopClosure cl(verify_weaks); + ObjectToOopClosure object_cl(&cl); + XHeap::heap()->object_iterate(&object_cl, verify_weaks); +} + +void XVerify::before_zoperation() { + // Verify strong roots + XStatTimerDisable disable; + if (ZVerifyRoots) { + roots_strong(false /* verify_fixed */); + } +} + +void XVerify::after_mark() { + // Verify all strong roots and strong references + XStatTimerDisable disable; + if (ZVerifyRoots) { + roots_strong(true /* verify_fixed */); + } + if (ZVerifyObjects) { + objects(false /* verify_weaks */); + } +} + +void XVerify::after_weak_processing() { + // Verify all roots and all references + XStatTimerDisable disable; + if (ZVerifyRoots) { + roots_strong(true /* verify_fixed */); + roots_weak(); + } + if (ZVerifyObjects) { + objects(true /* verify_weaks */); + } +} + +template +class XPageDebugMapOrUnmapClosure : public XPageClosure { +private: + const XPageAllocator* const _allocator; + +public: + XPageDebugMapOrUnmapClosure(const XPageAllocator* allocator) : + _allocator(allocator) {} + + void do_page(const XPage* page) { + if (Map) { + _allocator->debug_map_page(page); + } else { + _allocator->debug_unmap_page(page); + } + } +}; + +XVerifyViewsFlip::XVerifyViewsFlip(const XPageAllocator* allocator) : + _allocator(allocator) { + if (ZVerifyViews) { + // Unmap all pages + XPageDebugMapOrUnmapClosure cl(_allocator); + XHeap::heap()->pages_do(&cl); + } +} + +XVerifyViewsFlip::~XVerifyViewsFlip() { + if (ZVerifyViews) { + // Map all pages + XPageDebugMapOrUnmapClosure cl(_allocator); + XHeap::heap()->pages_do(&cl); + } +} + +#ifdef ASSERT + +class XVerifyBadOopClosure : public OopClosure { +public: + virtual void do_oop(oop* p) { + const oop o = *p; + assert(!XAddress::is_good(XOop::to_address(o)), "Should not be good: " PTR_FORMAT, p2i(o)); + } + + virtual void do_oop(narrowOop* p) { + ShouldNotReachHere(); + } +}; + +// This class encapsulates various marks we need to deal with calling the +// frame iteration code from arbitrary points in the runtime. It is mostly +// due to problems that we might want to eventually clean up inside of the +// frame iteration code, such as creating random handles even though there +// is no safepoint to protect against, and fiddling around with exceptions. +class StackWatermarkProcessingMark { + ResetNoHandleMark _rnhm; + HandleMark _hm; + PreserveExceptionMark _pem; + ResourceMark _rm; + +public: + StackWatermarkProcessingMark(Thread* thread) : + _rnhm(), + _hm(thread), + _pem(thread), + _rm(thread) {} +}; + +void XVerify::verify_frame_bad(const frame& fr, RegisterMap& register_map) { + XVerifyBadOopClosure verify_cl; + fr.oops_do(&verify_cl, NULL, ®ister_map, DerivedPointerIterationMode::_ignore); +} + +void XVerify::verify_thread_head_bad(JavaThread* jt) { + XVerifyBadOopClosure verify_cl; + jt->oops_do_no_frames(&verify_cl, NULL); +} + +void XVerify::verify_thread_frames_bad(JavaThread* jt) { + if (jt->has_last_Java_frame()) { + XVerifyBadOopClosure verify_cl; + StackWatermarkProcessingMark swpm(Thread::current()); + // Traverse the execution stack + for (StackFrameStream fst(jt, true /* update */, false /* process_frames */); !fst.is_done(); fst.next()) { + fst.current()->oops_do(&verify_cl, NULL /* code_cl */, fst.register_map(), DerivedPointerIterationMode::_ignore); + } + } +} + +#endif // ASSERT diff --git a/src/hotspot/share/gc/x/xVerify.hpp b/src/hotspot/share/gc/x/xVerify.hpp new file mode 100644 index 0000000000000..bbe10f376fa6a --- /dev/null +++ b/src/hotspot/share/gc/x/xVerify.hpp @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2019, 2022, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XVERIFY_HPP +#define SHARE_GC_X_XVERIFY_HPP + +#include "memory/allStatic.hpp" + +class frame; +class XPageAllocator; + +class XVerify : public AllStatic { +private: + static void roots_strong(bool verify_fixed); + static void roots_weak(); + + static void objects(bool verify_weaks); + +public: + static void before_zoperation(); + static void after_mark(); + static void after_weak_processing(); + + static void verify_thread_head_bad(JavaThread* thread) NOT_DEBUG_RETURN; + static void verify_thread_frames_bad(JavaThread* thread) NOT_DEBUG_RETURN; + static void verify_frame_bad(const frame& fr, RegisterMap& register_map) NOT_DEBUG_RETURN; +}; + +class XVerifyViewsFlip { +private: + const XPageAllocator* const _allocator; + +public: + XVerifyViewsFlip(const XPageAllocator* allocator); + ~XVerifyViewsFlip(); +}; + +#endif // SHARE_GC_X_XVERIFY_HPP diff --git a/src/hotspot/share/gc/x/xVirtualMemory.cpp b/src/hotspot/share/gc/x/xVirtualMemory.cpp new file mode 100644 index 0000000000000..650f22f328392 --- /dev/null +++ b/src/hotspot/share/gc/x/xVirtualMemory.cpp @@ -0,0 +1,208 @@ +/* + * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/shared/gcLogPrecious.hpp" +#include "gc/x/xAddress.inline.hpp" +#include "gc/x/xAddressSpaceLimit.hpp" +#include "gc/x/xGlobals.hpp" +#include "gc/x/xVirtualMemory.inline.hpp" +#include "services/memTracker.hpp" +#include "utilities/align.hpp" +#include "utilities/debug.hpp" + +XVirtualMemoryManager::XVirtualMemoryManager(size_t max_capacity) : + _manager(), + _reserved(0), + _initialized(false) { + + // Check max supported heap size + if (max_capacity > XAddressOffsetMax) { + log_error_p(gc)("Java heap too large (max supported heap size is " SIZE_FORMAT "G)", + XAddressOffsetMax / G); + return; + } + + // Initialize platform specific parts before reserving address space + pd_initialize_before_reserve(); + + // Reserve address space + if (!reserve(max_capacity)) { + log_error_pd(gc)("Failed to reserve enough address space for Java heap"); + return; + } + + // Initialize platform specific parts after reserving address space + pd_initialize_after_reserve(); + + // Successfully initialized + _initialized = true; +} + +size_t XVirtualMemoryManager::reserve_discontiguous(uintptr_t start, size_t size, size_t min_range) { + if (size < min_range) { + // Too small + return 0; + } + + assert(is_aligned(size, XGranuleSize), "Misaligned"); + + if (reserve_contiguous(start, size)) { + return size; + } + + const size_t half = size / 2; + if (half < min_range) { + // Too small + return 0; + } + + // Divide and conquer + const size_t first_part = align_down(half, XGranuleSize); + const size_t second_part = size - first_part; + return reserve_discontiguous(start, first_part, min_range) + + reserve_discontiguous(start + first_part, second_part, min_range); +} + +size_t XVirtualMemoryManager::reserve_discontiguous(size_t size) { + // Don't try to reserve address ranges smaller than 1% of the requested size. + // This avoids an explosion of reservation attempts in case large parts of the + // address space is already occupied. + const size_t min_range = align_up(size / 100, XGranuleSize); + size_t start = 0; + size_t reserved = 0; + + // Reserve size somewhere between [0, XAddressOffsetMax) + while (reserved < size && start < XAddressOffsetMax) { + const size_t remaining = MIN2(size - reserved, XAddressOffsetMax - start); + reserved += reserve_discontiguous(start, remaining, min_range); + start += remaining; + } + + return reserved; +} + +bool XVirtualMemoryManager::reserve_contiguous(uintptr_t start, size_t size) { + assert(is_aligned(size, XGranuleSize), "Must be granule aligned"); + + // Reserve address views + const uintptr_t marked0 = XAddress::marked0(start); + const uintptr_t marked1 = XAddress::marked1(start); + const uintptr_t remapped = XAddress::remapped(start); + + // Reserve address space + if (!pd_reserve(marked0, size)) { + return false; + } + + if (!pd_reserve(marked1, size)) { + pd_unreserve(marked0, size); + return false; + } + + if (!pd_reserve(remapped, size)) { + pd_unreserve(marked0, size); + pd_unreserve(marked1, size); + return false; + } + + // Register address views with native memory tracker + nmt_reserve(marked0, size); + nmt_reserve(marked1, size); + nmt_reserve(remapped, size); + + // Make the address range free + _manager.free(start, size); + + return true; +} + +bool XVirtualMemoryManager::reserve_contiguous(size_t size) { + // Allow at most 8192 attempts spread evenly across [0, XAddressOffsetMax) + const size_t unused = XAddressOffsetMax - size; + const size_t increment = MAX2(align_up(unused / 8192, XGranuleSize), XGranuleSize); + + for (size_t start = 0; start + size <= XAddressOffsetMax; start += increment) { + if (reserve_contiguous(start, size)) { + // Success + return true; + } + } + + // Failed + return false; +} + +bool XVirtualMemoryManager::reserve(size_t max_capacity) { + const size_t limit = MIN2(XAddressOffsetMax, XAddressSpaceLimit::heap_view()); + const size_t size = MIN2(max_capacity * XVirtualToPhysicalRatio, limit); + + size_t reserved = size; + bool contiguous = true; + + // Prefer a contiguous address space + if (!reserve_contiguous(size)) { + // Fall back to a discontiguous address space + reserved = reserve_discontiguous(size); + contiguous = false; + } + + log_info_p(gc, init)("Address Space Type: %s/%s/%s", + (contiguous ? "Contiguous" : "Discontiguous"), + (limit == XAddressOffsetMax ? "Unrestricted" : "Restricted"), + (reserved == size ? "Complete" : "Degraded")); + log_info_p(gc, init)("Address Space Size: " SIZE_FORMAT "M x " SIZE_FORMAT " = " SIZE_FORMAT "M", + reserved / M, XHeapViews, (reserved * XHeapViews) / M); + + // Record reserved + _reserved = reserved; + + return reserved >= max_capacity; +} + +void XVirtualMemoryManager::nmt_reserve(uintptr_t start, size_t size) { + MemTracker::record_virtual_memory_reserve((void*)start, size, CALLER_PC); + MemTracker::record_virtual_memory_type((void*)start, mtJavaHeap); +} + +bool XVirtualMemoryManager::is_initialized() const { + return _initialized; +} + +XVirtualMemory XVirtualMemoryManager::alloc(size_t size, bool force_low_address) { + uintptr_t start; + + // Small pages are allocated at low addresses, while medium/large pages + // are allocated at high addresses (unless forced to be at a low address). + if (force_low_address || size <= XPageSizeSmall) { + start = _manager.alloc_low_address(size); + } else { + start = _manager.alloc_high_address(size); + } + + return XVirtualMemory(start, size); +} + +void XVirtualMemoryManager::free(const XVirtualMemory& vmem) { + _manager.free(vmem.start(), vmem.size()); +} diff --git a/src/hotspot/share/gc/x/xVirtualMemory.hpp b/src/hotspot/share/gc/x/xVirtualMemory.hpp new file mode 100644 index 0000000000000..c9e5c67ea5750 --- /dev/null +++ b/src/hotspot/share/gc/x/xVirtualMemory.hpp @@ -0,0 +1,82 @@ +/* + * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XVIRTUALMEMORY_HPP +#define SHARE_GC_X_XVIRTUALMEMORY_HPP + +#include "gc/x/xMemory.hpp" + +class VMStructs; + +class XVirtualMemory { + friend class ::VMStructs; + +private: + uintptr_t _start; + uintptr_t _end; + +public: + XVirtualMemory(); + XVirtualMemory(uintptr_t start, size_t size); + + bool is_null() const; + uintptr_t start() const; + uintptr_t end() const; + size_t size() const; + + XVirtualMemory split(size_t size); +}; + +class XVirtualMemoryManager { +private: + XMemoryManager _manager; + uintptr_t _reserved; + bool _initialized; + + // Platform specific implementation + void pd_initialize_before_reserve(); + void pd_initialize_after_reserve(); + bool pd_reserve(uintptr_t addr, size_t size); + void pd_unreserve(uintptr_t addr, size_t size); + + bool reserve_contiguous(uintptr_t start, size_t size); + bool reserve_contiguous(size_t size); + size_t reserve_discontiguous(uintptr_t start, size_t size, size_t min_range); + size_t reserve_discontiguous(size_t size); + bool reserve(size_t max_capacity); + + void nmt_reserve(uintptr_t start, size_t size); + +public: + XVirtualMemoryManager(size_t max_capacity); + + bool is_initialized() const; + + size_t reserved() const; + uintptr_t lowest_available_address() const; + + XVirtualMemory alloc(size_t size, bool force_low_address); + void free(const XVirtualMemory& vmem); +}; + +#endif // SHARE_GC_X_XVIRTUALMEMORY_HPP diff --git a/src/hotspot/share/gc/x/xVirtualMemory.inline.hpp b/src/hotspot/share/gc/x/xVirtualMemory.inline.hpp new file mode 100644 index 0000000000000..8c834b42c7f47 --- /dev/null +++ b/src/hotspot/share/gc/x/xVirtualMemory.inline.hpp @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XVIRTUALMEMORY_INLINE_HPP +#define SHARE_GC_X_XVIRTUALMEMORY_INLINE_HPP + +#include "gc/x/xVirtualMemory.hpp" + +#include "gc/x/xMemory.inline.hpp" + +inline XVirtualMemory::XVirtualMemory() : + _start(UINTPTR_MAX), + _end(UINTPTR_MAX) {} + +inline XVirtualMemory::XVirtualMemory(uintptr_t start, size_t size) : + _start(start), + _end(start + size) {} + +inline bool XVirtualMemory::is_null() const { + return _start == UINTPTR_MAX; +} + +inline uintptr_t XVirtualMemory::start() const { + return _start; +} + +inline uintptr_t XVirtualMemory::end() const { + return _end; +} + +inline size_t XVirtualMemory::size() const { + return _end - _start; +} + +inline XVirtualMemory XVirtualMemory::split(size_t size) { + _start += size; + return XVirtualMemory(_start - size, size); +} + +inline size_t XVirtualMemoryManager::reserved() const { + return _reserved; +} + +inline uintptr_t XVirtualMemoryManager::lowest_available_address() const { + return _manager.peek_low_address(); +} + +#endif // SHARE_GC_X_XVIRTUALMEMORY_INLINE_HPP diff --git a/src/hotspot/share/gc/x/xWeakRootsProcessor.cpp b/src/hotspot/share/gc/x/xWeakRootsProcessor.cpp new file mode 100644 index 0000000000000..27eaead98fe44 --- /dev/null +++ b/src/hotspot/share/gc/x/xWeakRootsProcessor.cpp @@ -0,0 +1,79 @@ +/* + * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/x/xBarrier.inline.hpp" +#include "gc/x/xRootsIterator.hpp" +#include "gc/x/xTask.hpp" +#include "gc/x/xWeakRootsProcessor.hpp" +#include "gc/x/xWorkers.hpp" + +class XPhantomCleanOopClosure : public OopClosure { +public: + virtual void do_oop(oop* p) { + // Read the oop once, to make sure the liveness check + // and the later clearing uses the same value. + const oop obj = Atomic::load(p); + if (XBarrier::is_alive_barrier_on_phantom_oop(obj)) { + XBarrier::keep_alive_barrier_on_phantom_oop_field(p); + } else { + // The destination could have been modified/reused, in which case + // we don't want to clear it. However, no one could write the same + // oop here again (the object would be strongly live and we would + // not consider clearing such oops), so therefore we don't have an + // ABA problem here. + Atomic::cmpxchg(p, obj, oop(NULL)); + } + } + + virtual void do_oop(narrowOop* p) { + ShouldNotReachHere(); + } +}; + +XWeakRootsProcessor::XWeakRootsProcessor(XWorkers* workers) : + _workers(workers) {} + +class XProcessWeakRootsTask : public XTask { +private: + XWeakRootsIterator _weak_roots; + +public: + XProcessWeakRootsTask() : + XTask("XProcessWeakRootsTask"), + _weak_roots() {} + + ~XProcessWeakRootsTask() { + _weak_roots.report_num_dead(); + } + + virtual void work() { + XPhantomCleanOopClosure cl; + _weak_roots.apply(&cl); + } +}; + +void XWeakRootsProcessor::process_weak_roots() { + XProcessWeakRootsTask task; + _workers->run(&task); +} diff --git a/src/hotspot/share/gc/x/xWeakRootsProcessor.hpp b/src/hotspot/share/gc/x/xWeakRootsProcessor.hpp new file mode 100644 index 0000000000000..c63b2702374b2 --- /dev/null +++ b/src/hotspot/share/gc/x/xWeakRootsProcessor.hpp @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XWEAKROOTSPROCESSOR_HPP +#define SHARE_GC_X_XWEAKROOTSPROCESSOR_HPP + +class XWorkers; + +class XWeakRootsProcessor { +private: + XWorkers* const _workers; + +public: + XWeakRootsProcessor(XWorkers* workers); + + void process_weak_roots(); +}; + +#endif // SHARE_GC_X_XWEAKROOTSPROCESSOR_HPP diff --git a/src/hotspot/share/gc/x/xWorkers.cpp b/src/hotspot/share/gc/x/xWorkers.cpp new file mode 100644 index 0000000000000..642c63f0531e5 --- /dev/null +++ b/src/hotspot/share/gc/x/xWorkers.cpp @@ -0,0 +1,117 @@ +/* + * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/shared/gc_globals.hpp" +#include "gc/shared/gcLogPrecious.hpp" +#include "gc/x/xLock.inline.hpp" +#include "gc/x/xStat.hpp" +#include "gc/x/xTask.hpp" +#include "gc/x/xThread.hpp" +#include "gc/x/xWorkers.hpp" +#include "runtime/java.hpp" + +class XWorkersInitializeTask : public WorkerTask { +private: + const uint _nworkers; + uint _started; + XConditionLock _lock; + +public: + XWorkersInitializeTask(uint nworkers) : + WorkerTask("XWorkersInitializeTask"), + _nworkers(nworkers), + _started(0), + _lock() {} + + virtual void work(uint worker_id) { + // Register as worker + XThread::set_worker(); + + // Wait for all threads to start + XLocker locker(&_lock); + if (++_started == _nworkers) { + // All threads started + _lock.notify_all(); + } else { + while (_started != _nworkers) { + _lock.wait(); + } + } + } +}; + +XWorkers::XWorkers() : + _workers("XWorker", + UseDynamicNumberOfGCThreads ? ConcGCThreads : MAX2(ConcGCThreads, ParallelGCThreads)) { + + if (UseDynamicNumberOfGCThreads) { + log_info_p(gc, init)("GC Workers: %u (dynamic)", _workers.max_workers()); + } else { + log_info_p(gc, init)("GC Workers: %u/%u (static)", ConcGCThreads, _workers.max_workers()); + } + + // Initialize worker threads + _workers.initialize_workers(); + _workers.set_active_workers(_workers.max_workers()); + if (_workers.active_workers() != _workers.max_workers()) { + vm_exit_during_initialization("Failed to create XWorkers"); + } + + // Execute task to register threads as workers + XWorkersInitializeTask task(_workers.max_workers()); + _workers.run_task(&task); +} + +uint XWorkers::active_workers() const { + return _workers.active_workers(); +} + +void XWorkers::set_active_workers(uint nworkers) { + log_info(gc, task)("Using %u workers", nworkers); + _workers.set_active_workers(nworkers); +} + +void XWorkers::run(XTask* task) { + log_debug(gc, task)("Executing Task: %s, Active Workers: %u", task->name(), active_workers()); + XStatWorkers::at_start(); + _workers.run_task(task->worker_task()); + XStatWorkers::at_end(); +} + +void XWorkers::run_all(XTask* task) { + // Save number of active workers + const uint prev_active_workers = _workers.active_workers(); + + // Execute task using all workers + _workers.set_active_workers(_workers.max_workers()); + log_debug(gc, task)("Executing Task: %s, Active Workers: %u", task->name(), active_workers()); + _workers.run_task(task->worker_task()); + + // Restore number of active workers + _workers.set_active_workers(prev_active_workers); +} + +void XWorkers::threads_do(ThreadClosure* tc) const { + _workers.threads_do(tc); +} diff --git a/src/hotspot/share/gc/x/xWorkers.hpp b/src/hotspot/share/gc/x/xWorkers.hpp new file mode 100644 index 0000000000000..33c49bb7fef5c --- /dev/null +++ b/src/hotspot/share/gc/x/xWorkers.hpp @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_XWORKERS_HPP +#define SHARE_GC_X_XWORKERS_HPP + +#include "gc/shared/workerThread.hpp" + +class ThreadClosure; +class XTask; + +class XWorkers { +private: + WorkerThreads _workers; + +public: + XWorkers(); + + uint active_workers() const; + void set_active_workers(uint nworkers); + + void run(XTask* task); + void run_all(XTask* task); + + void threads_do(ThreadClosure* tc) const; +}; + +#endif // SHARE_GC_X_XWORKERS_HPP diff --git a/src/hotspot/share/gc/x/x_globals.hpp b/src/hotspot/share/gc/x/x_globals.hpp new file mode 100644 index 0000000000000..a23c1353a6842 --- /dev/null +++ b/src/hotspot/share/gc/x/x_globals.hpp @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_X_X_GLOBALS_HPP +#define SHARE_GC_X_X_GLOBALS_HPP + +#define GC_X_FLAGS(develop, \ + develop_pd, \ + product, \ + product_pd, \ + notproduct, \ + range, \ + constraint) \ + \ + product(bool, ZVerifyViews, false, DIAGNOSTIC, \ + "Verify heap view accesses") \ + \ +// end of GC_X_FLAGS + +#endif // SHARE_GC_X_X_GLOBALS_HPP diff --git a/src/hotspot/share/gc/z/c1/zBarrierSetC1.cpp b/src/hotspot/share/gc/z/c1/zBarrierSetC1.cpp index 93001511f88f1..5f2ed9c304f1f 100644 --- a/src/hotspot/share/gc/z/c1/zBarrierSetC1.cpp +++ b/src/hotspot/share/gc/z/c1/zBarrierSetC1.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,8 +22,11 @@ */ #include "precompiled.hpp" +#include "c1/c1_FrameMap.hpp" #include "c1/c1_LIR.hpp" +#include "c1/c1_LIRAssembler.hpp" #include "c1/c1_LIRGenerator.hpp" +#include "c1/c1_MacroAssembler.hpp" #include "c1/c1_CodeStubs.hpp" #include "gc/z/c1/zBarrierSetC1.hpp" #include "gc/z/zBarrierSet.hpp" @@ -75,6 +78,7 @@ address ZLoadBarrierStubC1::runtime_stub() const { void ZLoadBarrierStubC1::visit(LIR_OpVisitState* visitor) { visitor->do_slow_case(); visitor->do_input(_ref_addr); + visitor->do_input(_ref); visitor->do_output(_ref); if (_tmp->is_valid()) { visitor->do_temp(_tmp); @@ -91,21 +95,78 @@ void ZLoadBarrierStubC1::print_name(outputStream* out) const { } #endif // PRODUCT -class LIR_OpZLoadBarrierTest : public LIR_Op { +ZStoreBarrierStubC1::ZStoreBarrierStubC1(LIRAccess& access, + LIR_Opr new_zaddress, + LIR_Opr new_zpointer, + LIR_Opr tmp, + bool is_atomic, + address runtime_stub) : + _ref_addr(access.resolved_addr()), + _new_zaddress(new_zaddress), + _new_zpointer(new_zpointer), + _tmp(tmp), + _is_atomic(is_atomic), + _runtime_stub(runtime_stub) { + assert(_ref_addr->is_address(), "Must be an address"); +} + +LIR_Opr ZStoreBarrierStubC1::ref_addr() const { + return _ref_addr; +} + +LIR_Opr ZStoreBarrierStubC1::new_zaddress() const { + return _new_zaddress; +} + +LIR_Opr ZStoreBarrierStubC1::new_zpointer() const { + return _new_zpointer; +} + +LIR_Opr ZStoreBarrierStubC1::tmp() const { + return _tmp; +} + +bool ZStoreBarrierStubC1::is_atomic() const { + return _is_atomic; +} + +address ZStoreBarrierStubC1::runtime_stub() const { + return _runtime_stub; +} + +void ZStoreBarrierStubC1::visit(LIR_OpVisitState* visitor) { + visitor->do_slow_case(); + visitor->do_input(_ref_addr); + visitor->do_temp(_new_zpointer); + visitor->do_temp(_tmp); +} + +void ZStoreBarrierStubC1::emit_code(LIR_Assembler* ce) { + ZBarrierSet::assembler()->generate_c1_store_barrier_stub(ce, this); +} + +#ifndef PRODUCT +void ZStoreBarrierStubC1::print_name(outputStream* out) const { + out->print("ZStoreBarrierStubC1"); +} +#endif // PRODUCT + +class LIR_OpZUncolor : public LIR_Op { private: LIR_Opr _opr; public: - LIR_OpZLoadBarrierTest(LIR_Opr opr) : - LIR_Op(lir_zloadbarrier_test, LIR_OprFact::illegalOpr, NULL), + LIR_OpZUncolor(LIR_Opr opr) : + LIR_Op(), _opr(opr) {} virtual void visit(LIR_OpVisitState* state) { state->do_input(_opr); + state->do_output(_opr); } virtual void emit_code(LIR_Assembler* ce) { - ZBarrierSet::assembler()->generate_c1_load_barrier_test(ce, _opr); + ZBarrierSet::assembler()->generate_c1_uncolor(ce, _opr); } virtual void print_instr(outputStream* out) const { @@ -115,7 +176,45 @@ class LIR_OpZLoadBarrierTest : public LIR_Op { #ifndef PRODUCT virtual const char* name() const { - return "lir_z_load_barrier_test"; + return "lir_z_uncolor"; + } +#endif // PRODUCT +}; + +class LIR_OpZLoadBarrier : public LIR_Op { +private: + LIR_Opr _opr; + ZLoadBarrierStubC1* const _stub; + const bool _on_non_strong; + +public: + LIR_OpZLoadBarrier(LIR_Opr opr, ZLoadBarrierStubC1* stub, bool on_non_strong) : + LIR_Op(), + _opr(opr), + _stub(stub), + _on_non_strong(on_non_strong) { + assert(stub != nullptr, "The stub is the load barrier slow path."); + } + + virtual void visit(LIR_OpVisitState* state) { + state->do_input(_opr); + state->do_output(_opr); + state->do_stub(_stub); + } + + virtual void emit_code(LIR_Assembler* ce) { + ZBarrierSet::assembler()->generate_c1_load_barrier(ce, _opr, _stub, _on_non_strong); + ce->append_code_stub(_stub); + } + + virtual void print_instr(outputStream* out) const { + _opr->print(out); + out->print(" "); + } + +#ifndef PRODUCT + virtual const char* name() const { + return "lir_z_load_barrier"; } #endif // PRODUCT }; @@ -125,8 +224,10 @@ static bool barrier_needed(LIRAccess& access) { } ZBarrierSetC1::ZBarrierSetC1() : - _load_barrier_on_oop_field_preloaded_runtime_stub(NULL), - _load_barrier_on_weak_oop_field_preloaded_runtime_stub(NULL) {} + _load_barrier_on_oop_field_preloaded_runtime_stub(nullptr), + _load_barrier_on_weak_oop_field_preloaded_runtime_stub(nullptr), + _store_barrier_on_oop_field_with_healing(nullptr), + _store_barrier_on_oop_field_without_healing(nullptr) {} address ZBarrierSetC1::load_barrier_on_oop_field_preloaded_runtime_stub(DecoratorSet decorators) const { assert((decorators & ON_PHANTOM_OOP_REF) == 0, "Unsupported decorator"); @@ -139,21 +240,169 @@ address ZBarrierSetC1::load_barrier_on_oop_field_preloaded_runtime_stub(Decorato } } +address ZBarrierSetC1::store_barrier_on_oop_field_runtime_stub(bool self_healing) const { + if (self_healing) { + return _store_barrier_on_oop_field_with_healing; + } else { + return _store_barrier_on_oop_field_without_healing; + } +} + +class LIR_OpZColor : public LIR_Op { + friend class LIR_OpVisitState; + +private: + LIR_Opr _opr; + +public: + LIR_OpZColor(LIR_Opr opr) : + LIR_Op(lir_none, opr, nullptr /* info */), + _opr(opr) {} + + virtual void visit(LIR_OpVisitState* state) { + state->do_input(_opr); + state->do_output(_opr); + } + + virtual void emit_code(LIR_Assembler* ce) { + ZBarrierSet::assembler()->generate_c1_color(ce, _opr); + } + + virtual void print_instr(outputStream* out) const { + _opr->print(out); out->print(" "); + } + +#ifndef PRODUCT + virtual const char* name() const { + return "lir_z_color"; + } +#endif // PRODUCT +}; + +class LIR_OpZStoreBarrier : public LIR_Op { + friend class LIR_OpVisitState; + +private: + LIR_Opr _addr; + LIR_Opr _new_zaddress; + LIR_Opr _new_zpointer; + CodeStub* _stub; + CodeEmitInfo* _info; + +public: + LIR_OpZStoreBarrier(LIR_Opr addr, + LIR_Opr new_zaddress, + LIR_Opr new_zpointer, + CodeStub* stub, + CodeEmitInfo* info) : + LIR_Op(lir_none, new_zpointer, nullptr /* info */), + _addr(addr), + _new_zaddress(new_zaddress), + _new_zpointer(new_zpointer), + _stub(stub), + _info(info) {} + + virtual void visit(LIR_OpVisitState* state) { + state->do_input(_new_zaddress); + state->do_input(_addr); + + // Use temp registers to ensure these they use different registers. + state->do_temp(_addr); + state->do_temp(_new_zaddress); + + state->do_output(_new_zpointer); + state->do_stub(_stub); + + if (_info != nullptr) { + state->do_info(_info); + } + } + + virtual void emit_code(LIR_Assembler* ce) { + const ZBarrierSetAssembler* const bs_asm = + (const ZBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler(); + if (_info != nullptr) { + ce->add_debug_info_for_null_check_here(_info); + } + bs_asm->generate_c1_store_barrier(ce, + _addr->as_address_ptr(), + _new_zaddress, + _new_zpointer, + (ZStoreBarrierStubC1*)_stub); + ce->append_code_stub(_stub); + } + + virtual void print_instr(outputStream* out) const { + _addr->print(out); out->print(" "); + _new_zaddress->print(out); out->print(" "); + _new_zpointer->print(out); out->print(" "); + } + +#ifndef PRODUCT + virtual const char* name() const { + return "lir_z_store_barrier"; + } +#endif // PRODUCT +}; + #ifdef ASSERT #define __ access.gen()->lir(__FILE__, __LINE__)-> #else #define __ access.gen()->lir()-> #endif -void ZBarrierSetC1::load_barrier(LIRAccess& access, LIR_Opr result) const { - // Fast path - __ append(new LIR_OpZLoadBarrierTest(result)); +LIR_Opr ZBarrierSetC1::color(LIRAccess& access, LIR_Opr ref) const { + // Only used from CAS where we have control over the used register + assert(ref->is_single_cpu(), "Should be using a register"); + + __ append(new LIR_OpZColor(ref)); + return ref; +} + +void ZBarrierSetC1::load_barrier(LIRAccess& access, LIR_Opr result) const { // Slow path const address runtime_stub = load_barrier_on_oop_field_preloaded_runtime_stub(access.decorators()); - CodeStub* const stub = new ZLoadBarrierStubC1(access, result, runtime_stub); - __ branch(lir_cond_notEqual, stub); - __ branch_destination(stub->continuation()); + auto stub = new ZLoadBarrierStubC1(access, result, runtime_stub); + + const bool on_non_strong = + (access.decorators() & ON_WEAK_OOP_REF) != 0 || + (access.decorators() & ON_PHANTOM_OOP_REF) != 0; + + __ append(new LIR_OpZLoadBarrier(result, stub, on_non_strong)); +} + +LIR_Opr ZBarrierSetC1::store_barrier(LIRAccess& access, LIR_Opr new_zaddress, bool is_atomic) const { + LIRGenerator* gen = access.gen(); + + LIR_Opr new_zaddress_reg; + if (new_zaddress->is_single_cpu()) { + new_zaddress_reg = new_zaddress; + } else if (new_zaddress->is_constant()) { + new_zaddress_reg = gen->new_register(access.type()); + gen->lir()->move(new_zaddress, new_zaddress_reg); + } else { + ShouldNotReachHere(); + } + + LIR_Opr new_zpointer = gen->new_register(T_OBJECT); + LIR_Opr tmp = gen->new_pointer_register(); + ZStoreBarrierStubC1* const stub = + new ZStoreBarrierStubC1(access, + new_zaddress_reg, + new_zpointer, + tmp, + is_atomic, + store_barrier_on_oop_field_runtime_stub(is_atomic)); + + __ append(new LIR_OpZStoreBarrier(access.resolved_addr(), + new_zaddress_reg, + new_zpointer, + stub, + access.access_emit_info())); + access.access_emit_info() = nullptr; + + return new_zpointer; } LIR_Opr ZBarrierSetC1::resolve_address(LIRAccess& access, bool resolve_in_register) { @@ -164,51 +413,86 @@ LIR_Opr ZBarrierSetC1::resolve_address(LIRAccess& access, bool resolve_in_regist return BarrierSetC1::resolve_address(access, resolve_in_register || patch_before_barrier); } -#undef __ - void ZBarrierSetC1::load_at_resolved(LIRAccess& access, LIR_Opr result) { + if (!barrier_needed(access)) { + BarrierSetC1::load_at_resolved(access, result); + return; + } + BarrierSetC1::load_at_resolved(access, result); + load_barrier(access, result); +} - if (barrier_needed(access)) { - load_barrier(access, result); +void ZBarrierSetC1::store_at_resolved(LIRAccess& access, LIR_Opr value) { + if (!barrier_needed(access)) { + BarrierSetC1::store_at_resolved(access, value); + return; } + + value = store_barrier(access, value, false /* is_atomic */); + + BarrierSetC1::store_at_resolved(access, value); } -static void pre_load_barrier(LIRAccess& access) { - DecoratorSet decorators = access.decorators(); +LIR_Opr ZBarrierSetC1::atomic_cmpxchg_at_resolved(LIRAccess& access, LIRItem& cmp_value, LIRItem& new_value) { + if (!barrier_needed(access)) { + return BarrierSetC1::atomic_cmpxchg_at_resolved(access, cmp_value, new_value); + } - // Downgrade access to MO_UNORDERED - decorators = (decorators & ~MO_DECORATOR_MASK) | MO_UNORDERED; + new_value.load_item(); + const LIR_Opr new_value_zpointer = store_barrier(access, new_value.result(), true /* is_atomic */); - // Remove ACCESS_WRITE - decorators = (decorators & ~ACCESS_WRITE); + cmp_value.load_item(); + cmp_value.set_destroys_register(); + color(access, cmp_value.result()); - // Generate synthetic load at - access.gen()->access_load_at(decorators, - access.type(), - access.base().item(), - access.offset().opr(), - access.gen()->new_register(access.type()), - NULL /* patch_emit_info */, - NULL /* load_emit_info */); +#ifdef AMD64 + const LIR_Opr cmp_value_opr = FrameMap::rax_oop_opr; +#else + const LIR_Opr cmp_value_opr = access.gen()->new_register(T_OBJECT); +#endif + access.gen()->lir()->move(cmp_value.result(), cmp_value_opr); + + __ cas_obj(access.resolved_addr()->as_address_ptr()->base(), + cmp_value_opr, + new_value_zpointer, +#ifdef RISCV + access.gen()->new_register(T_OBJECT), + access.gen()->new_register(T_OBJECT), + access.gen()->new_register(T_OBJECT)); +#else + LIR_OprFact::illegalOpr, LIR_OprFact::illegalOpr); +#endif + LIR_Opr result = access.gen()->new_register(T_INT); + __ cmove(lir_cond_equal, LIR_OprFact::intConst(1), LIR_OprFact::intConst(0), + result, T_INT); + + return result; } LIR_Opr ZBarrierSetC1::atomic_xchg_at_resolved(LIRAccess& access, LIRItem& value) { - if (barrier_needed(access)) { - pre_load_barrier(access); + if (!barrier_needed(access)) { + return BarrierSetC1::atomic_xchg_at_resolved(access, value); } - return BarrierSetC1::atomic_xchg_at_resolved(access, value); -} + value.load_item(); -LIR_Opr ZBarrierSetC1::atomic_cmpxchg_at_resolved(LIRAccess& access, LIRItem& cmp_value, LIRItem& new_value) { - if (barrier_needed(access)) { - pre_load_barrier(access); - } + LIR_Opr value_zpointer = store_barrier(access, value.result(), true /* is_atomic */); - return BarrierSetC1::atomic_cmpxchg_at_resolved(access, cmp_value, new_value); + // The parent class expects the in-parameter and out-parameter to be the same. + // Move the colored pointer to the expected register. +#ifdef AMD64 + __ xchg(access.resolved_addr(), value_zpointer, value_zpointer, LIR_OprFact::illegalOpr); +#else + __ xchg(access.resolved_addr(), value_zpointer, value_zpointer, access.gen()->new_register(T_INT)); +#endif + __ append(new LIR_OpZUncolor(value_zpointer)); + + return value_zpointer; } +#undef __ + class ZLoadBarrierRuntimeStubCodeGenClosure : public StubAssemblerCodeGenClosure { private: const DecoratorSet _decorators; @@ -219,19 +503,44 @@ class ZLoadBarrierRuntimeStubCodeGenClosure : public StubAssemblerCodeGenClosure virtual OopMapSet* generate_code(StubAssembler* sasm) { ZBarrierSet::assembler()->generate_c1_load_barrier_runtime_stub(sasm, _decorators); - return NULL; + return nullptr; } }; -static address generate_c1_runtime_stub(BufferBlob* blob, DecoratorSet decorators, const char* name) { +static address generate_c1_load_runtime_stub(BufferBlob* blob, DecoratorSet decorators, const char* name) { ZLoadBarrierRuntimeStubCodeGenClosure cl(decorators); CodeBlob* const code_blob = Runtime1::generate_blob(blob, -1 /* stub_id */, name, false /* expect_oop_map*/, &cl); return code_blob->code_begin(); } +class ZStoreBarrierRuntimeStubCodeGenClosure : public StubAssemblerCodeGenClosure { +private: + const bool _self_healing; + +public: + ZStoreBarrierRuntimeStubCodeGenClosure(bool self_healing) : + _self_healing(self_healing) {} + + virtual OopMapSet* generate_code(StubAssembler* sasm) { + ZBarrierSet::assembler()->generate_c1_store_barrier_runtime_stub(sasm, _self_healing); + return nullptr; + } +}; + +static address generate_c1_store_runtime_stub(BufferBlob* blob, bool self_healing, const char* name) { + ZStoreBarrierRuntimeStubCodeGenClosure cl(self_healing); + CodeBlob* const code_blob = Runtime1::generate_blob(blob, -1 /* stub_id */, name, false /* expect_oop_map*/, &cl); + return code_blob->code_begin(); +} + void ZBarrierSetC1::generate_c1_runtime_stubs(BufferBlob* blob) { _load_barrier_on_oop_field_preloaded_runtime_stub = - generate_c1_runtime_stub(blob, ON_STRONG_OOP_REF, "load_barrier_on_oop_field_preloaded_runtime_stub"); + generate_c1_load_runtime_stub(blob, ON_STRONG_OOP_REF, "load_barrier_on_oop_field_preloaded_runtime_stub"); _load_barrier_on_weak_oop_field_preloaded_runtime_stub = - generate_c1_runtime_stub(blob, ON_WEAK_OOP_REF, "load_barrier_on_weak_oop_field_preloaded_runtime_stub"); + generate_c1_load_runtime_stub(blob, ON_WEAK_OOP_REF, "load_barrier_on_weak_oop_field_preloaded_runtime_stub"); + + _store_barrier_on_oop_field_with_healing = + generate_c1_store_runtime_stub(blob, true /* self_healing */, "store_barrier_on_oop_field_with_healing"); + _store_barrier_on_oop_field_without_healing = + generate_c1_store_runtime_stub(blob, false /* self_healing */, "store_barrier_on_oop_field_without_healing"); } diff --git a/src/hotspot/share/gc/z/c1/zBarrierSetC1.hpp b/src/hotspot/share/gc/z/c1/zBarrierSetC1.hpp index 2eec7366c53df..a17c3f7628c32 100644 --- a/src/hotspot/share/gc/z/c1/zBarrierSetC1.hpp +++ b/src/hotspot/share/gc/z/c1/zBarrierSetC1.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -55,19 +55,59 @@ class ZLoadBarrierStubC1 : public CodeStub { #endif // PRODUCT }; +class ZStoreBarrierStubC1 : public CodeStub { +private: + LIR_Opr _ref_addr; + LIR_Opr _new_zaddress; + LIR_Opr _new_zpointer; + LIR_Opr _tmp; + bool _is_atomic; + address _runtime_stub; + +public: + ZStoreBarrierStubC1(LIRAccess& access, + LIR_Opr new_zaddress, + LIR_Opr new_zpointer, + LIR_Opr tmp, + bool is_atomic, + address runtime_stub); + + LIR_Opr ref_addr() const; + LIR_Opr new_zaddress() const; + LIR_Opr new_zpointer() const; + LIR_Opr tmp() const; + bool is_atomic() const; + address runtime_stub() const; + + virtual void emit_code(LIR_Assembler* ce); + virtual void visit(LIR_OpVisitState* visitor); + +#ifndef PRODUCT + virtual void print_name(outputStream* out) const; +#endif // PRODUCT +}; + class ZBarrierSetC1 : public BarrierSetC1 { private: address _load_barrier_on_oop_field_preloaded_runtime_stub; address _load_barrier_on_weak_oop_field_preloaded_runtime_stub; + address _store_barrier_on_oop_field_with_healing; + address _store_barrier_on_oop_field_without_healing; address load_barrier_on_oop_field_preloaded_runtime_stub(DecoratorSet decorators) const; + address store_barrier_on_oop_field_runtime_stub(bool self_healing) const; + + LIR_Opr color(LIRAccess& access, LIR_Opr ref) const; + void load_barrier(LIRAccess& access, LIR_Opr result) const; + LIR_Opr store_barrier(LIRAccess& access, LIR_Opr new_zaddress, bool is_atomic) const; protected: virtual LIR_Opr resolve_address(LIRAccess& access, bool resolve_in_register); virtual void load_at_resolved(LIRAccess& access, LIR_Opr result); - virtual LIR_Opr atomic_xchg_at_resolved(LIRAccess& access, LIRItem& value); + virtual void store_at_resolved(LIRAccess& access, LIR_Opr value); virtual LIR_Opr atomic_cmpxchg_at_resolved(LIRAccess& access, LIRItem& cmp_value, LIRItem& new_value); + virtual LIR_Opr atomic_xchg_at_resolved(LIRAccess& access, LIRItem& value); public: ZBarrierSetC1(); diff --git a/src/hotspot/share/gc/z/c2/zBarrierSetC2.cpp b/src/hotspot/share/gc/z/c2/zBarrierSetC2.cpp index 8d729d296e324..1903c28a6fc1a 100644 --- a/src/hotspot/share/gc/z/c2/zBarrierSetC2.cpp +++ b/src/hotspot/share/gc/z/c2/zBarrierSetC2.cpp @@ -22,6 +22,7 @@ */ #include "precompiled.hpp" +#include "asm/macroAssembler.hpp" #include "classfile/javaClasses.hpp" #include "gc/z/c2/zBarrierSetC2.hpp" #include "gc/z/zBarrierSet.hpp" @@ -44,63 +45,197 @@ #include "utilities/growableArray.hpp" #include "utilities/macros.hpp" +template +class ZArenaHashtable : public ResourceObj { + class ZArenaHashtableEntry : public ResourceObj { + public: + ZArenaHashtableEntry* _next; + K _key; + V _value; + }; + + static const size_t _table_mask = _table_size - 1; + + Arena* _arena; + ZArenaHashtableEntry* _table[_table_size]; + +public: + class Iterator { + ZArenaHashtable* _table; + ZArenaHashtableEntry* _current_entry; + size_t _current_index; + + public: + Iterator(ZArenaHashtable* table) : + _table(table), + _current_entry(table->_table[0]), + _current_index(0) { + if (_current_entry == nullptr) { + next(); + } + } + + bool has_next() { return _current_entry != nullptr; } + K key() { return _current_entry->_key; } + V value() { return _current_entry->_value; } + + void next() { + if (_current_entry != nullptr) { + _current_entry = _current_entry->_next; + } + while (_current_entry == nullptr && ++_current_index < _table_size) { + _current_entry = _table->_table[_current_index]; + } + } + }; + + ZArenaHashtable(Arena* arena) : + _arena(arena), + _table() { + Copy::zero_to_bytes(&_table, sizeof(_table)); + } + + void add(K key, V value) { + ZArenaHashtableEntry* entry = new (_arena) ZArenaHashtableEntry(); + entry->_key = key; + entry->_value = value; + entry->_next = _table[key & _table_mask]; + _table[key & _table_mask] = entry; + } + + V* get(K key) const { + for (ZArenaHashtableEntry* e = _table[key & _table_mask]; e != nullptr; e = e->_next) { + if (e->_key == key) { + return &(e->_value); + } + } + return nullptr; + } + + Iterator iterator() { + return Iterator(this); + } +}; + +typedef ZArenaHashtable ZOffsetTable; + class ZBarrierSetC2State : public ArenaObj { private: - GrowableArray* _stubs; - Node_Array _live; + GrowableArray* _stubs; + Node_Array _live; + int _trampoline_stubs_count; + int _stubs_start_offset; public: ZBarrierSetC2State(Arena* arena) : - _stubs(new (arena) GrowableArray(arena, 8, 0, NULL)), - _live(arena) {} + _stubs(new (arena) GrowableArray(arena, 8, 0, nullptr)), + _live(arena), + _trampoline_stubs_count(0), + _stubs_start_offset(0) {} - GrowableArray* stubs() { + GrowableArray* stubs() { return _stubs; } RegMask* live(const Node* node) { if (!node->is_Mach()) { // Don't need liveness for non-MachNodes - return NULL; + return nullptr; } const MachNode* const mach = node->as_Mach(); - if (mach->barrier_data() == ZLoadBarrierElided) { + if (mach->barrier_data() == ZBarrierElided) { // Don't need liveness data for nodes without barriers - return NULL; + return nullptr; } RegMask* live = (RegMask*)_live[node->_idx]; - if (live == NULL) { + if (live == nullptr) { live = new (Compile::current()->comp_arena()->AmallocWords(sizeof(RegMask))) RegMask(); _live.map(node->_idx, (Node*)live); } return live; } + + void inc_trampoline_stubs_count() { + assert(_trampoline_stubs_count != INT_MAX, "Overflow"); + ++_trampoline_stubs_count; + } + + int trampoline_stubs_count() { + return _trampoline_stubs_count; + } + + void set_stubs_start_offset(int offset) { + _stubs_start_offset = offset; + } + + int stubs_start_offset() { + return _stubs_start_offset; + } }; static ZBarrierSetC2State* barrier_set_state() { return reinterpret_cast(Compile::current()->barrier_set_state()); } -ZLoadBarrierStubC2* ZLoadBarrierStubC2::create(const MachNode* node, Address ref_addr, Register ref, Register tmp, uint8_t barrier_data) { - ZLoadBarrierStubC2* const stub = new (Compile::current()->comp_arena()) ZLoadBarrierStubC2(node, ref_addr, ref, tmp, barrier_data); +void ZBarrierStubC2::register_stub(ZBarrierStubC2* stub) { if (!Compile::current()->output()->in_scratch_emit_size()) { barrier_set_state()->stubs()->append(stub); } +} - return stub; +void ZBarrierStubC2::inc_trampoline_stubs_count() { + if (!Compile::current()->output()->in_scratch_emit_size()) { + barrier_set_state()->inc_trampoline_stubs_count(); + } +} + +int ZBarrierStubC2::trampoline_stubs_count() { + return barrier_set_state()->trampoline_stubs_count(); } -ZLoadBarrierStubC2::ZLoadBarrierStubC2(const MachNode* node, Address ref_addr, Register ref, Register tmp, uint8_t barrier_data) : +int ZBarrierStubC2::stubs_start_offset() { + return barrier_set_state()->stubs_start_offset(); +} + +ZBarrierStubC2::ZBarrierStubC2(const MachNode* node) : _node(node), - _ref_addr(ref_addr), - _ref(ref), - _tmp(tmp), - _barrier_data(barrier_data), _entry(), - _continuation() { + _continuation() {} + +Register ZBarrierStubC2::result() const { + return noreg; +} + +RegMask& ZBarrierStubC2::live() const { + return *barrier_set_state()->live(_node); +} + +Label* ZBarrierStubC2::entry() { + // The _entry will never be bound when in_scratch_emit_size() is true. + // However, we still need to return a label that is not bound now, but + // will eventually be bound. Any eventually bound label will do, as it + // will only act as a placeholder, so we return the _continuation label. + return Compile::current()->output()->in_scratch_emit_size() ? &_continuation : &_entry; +} + +Label* ZBarrierStubC2::continuation() { + return &_continuation; +} + +ZLoadBarrierStubC2* ZLoadBarrierStubC2::create(const MachNode* node, Address ref_addr, Register ref) { + ZLoadBarrierStubC2* const stub = new (Compile::current()->comp_arena()) ZLoadBarrierStubC2(node, ref_addr, ref); + register_stub(stub); + + return stub; +} + +ZLoadBarrierStubC2::ZLoadBarrierStubC2(const MachNode* node, Address ref_addr, Register ref) : + ZBarrierStubC2(node), + _ref_addr(ref_addr), + _ref(ref) { assert_different_registers(ref, ref_addr.base()); assert_different_registers(ref, ref_addr.index()); } @@ -113,43 +248,74 @@ Register ZLoadBarrierStubC2::ref() const { return _ref; } -Register ZLoadBarrierStubC2::tmp() const { - return _tmp; +Register ZLoadBarrierStubC2::result() const { + return ref(); } address ZLoadBarrierStubC2::slow_path() const { + const uint8_t barrier_data = _node->barrier_data(); DecoratorSet decorators = DECORATORS_NONE; - if (_barrier_data & ZLoadBarrierStrong) { + if (barrier_data & ZBarrierStrong) { decorators |= ON_STRONG_OOP_REF; } - if (_barrier_data & ZLoadBarrierWeak) { + if (barrier_data & ZBarrierWeak) { decorators |= ON_WEAK_OOP_REF; } - if (_barrier_data & ZLoadBarrierPhantom) { + if (barrier_data & ZBarrierPhantom) { decorators |= ON_PHANTOM_OOP_REF; } - if (_barrier_data & ZLoadBarrierNoKeepalive) { + if (barrier_data & ZBarrierNoKeepalive) { decorators |= AS_NO_KEEPALIVE; } return ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators); } -RegMask& ZLoadBarrierStubC2::live() const { - RegMask* mask = barrier_set_state()->live(_node); - assert(mask != NULL, "must be mach-node with barrier"); - return *mask; +void ZLoadBarrierStubC2::emit_code(MacroAssembler& masm) { + ZBarrierSet::assembler()->generate_c2_load_barrier_stub(&masm, static_cast(this)); } -Label* ZLoadBarrierStubC2::entry() { - // The _entry will never be bound when in_scratch_emit_size() is true. - // However, we still need to return a label that is not bound now, but - // will eventually be bound. Any label will do, as it will only act as - // a placeholder, so we return the _continuation label. - return Compile::current()->output()->in_scratch_emit_size() ? &_continuation : &_entry; +ZStoreBarrierStubC2* ZStoreBarrierStubC2::create(const MachNode* node, Address ref_addr, Register new_zaddress, Register new_zpointer, bool is_native, bool is_atomic) { + ZStoreBarrierStubC2* const stub = new (Compile::current()->comp_arena()) ZStoreBarrierStubC2(node, ref_addr, new_zaddress, new_zpointer, is_native, is_atomic); + register_stub(stub); + + return stub; } -Label* ZLoadBarrierStubC2::continuation() { - return &_continuation; +ZStoreBarrierStubC2::ZStoreBarrierStubC2(const MachNode* node, Address ref_addr, Register new_zaddress, Register new_zpointer, bool is_native, bool is_atomic) : + ZBarrierStubC2(node), + _ref_addr(ref_addr), + _new_zaddress(new_zaddress), + _new_zpointer(new_zpointer), + _is_native(is_native), + _is_atomic(is_atomic) { +} + +Address ZStoreBarrierStubC2::ref_addr() const { + return _ref_addr; +} + +Register ZStoreBarrierStubC2::new_zaddress() const { + return _new_zaddress; +} + +Register ZStoreBarrierStubC2::new_zpointer() const { + return _new_zpointer; +} + +bool ZStoreBarrierStubC2::is_native() const { + return _is_native; +} + +bool ZStoreBarrierStubC2::is_atomic() const { + return _is_atomic; +} + +Register ZStoreBarrierStubC2::result() const { + return noreg; +} + +void ZStoreBarrierStubC2::emit_code(MacroAssembler& masm) { + ZBarrierSet::assembler()->generate_c2_store_barrier_stub(&masm, static_cast(this)); } void* ZBarrierSetC2::create_barrier_state(Arena* comp_arena) const { @@ -157,22 +323,23 @@ void* ZBarrierSetC2::create_barrier_state(Arena* comp_arena) const { } void ZBarrierSetC2::late_barrier_analysis() const { - analyze_dominating_barriers(); compute_liveness_at_stubs(); + analyze_dominating_barriers(); } void ZBarrierSetC2::emit_stubs(CodeBuffer& cb) const { MacroAssembler masm(&cb); - GrowableArray* const stubs = barrier_set_state()->stubs(); + GrowableArray* const stubs = barrier_set_state()->stubs(); + barrier_set_state()->set_stubs_start_offset(masm.offset()); for (int i = 0; i < stubs->length(); i++) { // Make sure there is enough space in the code buffer - if (cb.insts()->maybe_expand_to_ensure_remaining(PhaseOutput::MAX_inst_size) && cb.blob() == NULL) { + if (cb.insts()->maybe_expand_to_ensure_remaining(PhaseOutput::MAX_inst_size) && cb.blob() == nullptr) { ciEnv::current()->record_failure("CodeCache is full"); return; } - ZBarrierSet::assembler()->generate_c2_load_barrier_stub(&masm, stubs->at(i)); + stubs->at(i)->emit_code(masm); } masm.flush(); @@ -181,13 +348,13 @@ void ZBarrierSetC2::emit_stubs(CodeBuffer& cb) const { int ZBarrierSetC2::estimate_stub_size() const { Compile* const C = Compile::current(); BufferBlob* const blob = C->output()->scratch_buffer_blob(); - GrowableArray* const stubs = barrier_set_state()->stubs(); + GrowableArray* const stubs = barrier_set_state()->stubs(); int size = 0; for (int i = 0; i < stubs->length(); i++) { CodeBuffer cb(blob->content_begin(), (address)C->output()->scratch_locs_memory() - blob->content_begin()); MacroAssembler masm(&cb); - ZBarrierSet::assembler()->generate_c2_load_barrier_stub(&masm, stubs->at(i)); + stubs->at(i)->emit_code(masm); size += cb.insts_size(); } @@ -195,23 +362,39 @@ int ZBarrierSetC2::estimate_stub_size() const { } static void set_barrier_data(C2Access& access) { - if (ZBarrierSet::barrier_needed(access.decorators(), access.type())) { - uint8_t barrier_data = 0; + if (!ZBarrierSet::barrier_needed(access.decorators(), access.type())) { + return; + } - if (access.decorators() & ON_PHANTOM_OOP_REF) { - barrier_data |= ZLoadBarrierPhantom; - } else if (access.decorators() & ON_WEAK_OOP_REF) { - barrier_data |= ZLoadBarrierWeak; - } else { - barrier_data |= ZLoadBarrierStrong; - } + if (access.decorators() & C2_TIGHTLY_COUPLED_ALLOC) { + access.set_barrier_data(ZBarrierElided); + return; + } - if (access.decorators() & AS_NO_KEEPALIVE) { - barrier_data |= ZLoadBarrierNoKeepalive; - } + uint8_t barrier_data = 0; - access.set_barrier_data(barrier_data); + if (access.decorators() & ON_PHANTOM_OOP_REF) { + barrier_data |= ZBarrierPhantom; + } else if (access.decorators() & ON_WEAK_OOP_REF) { + barrier_data |= ZBarrierWeak; + } else { + barrier_data |= ZBarrierStrong; } + + if (access.decorators() & IN_NATIVE) { + barrier_data |= ZBarrierNative; + } + + if (access.decorators() & AS_NO_KEEPALIVE) { + barrier_data |= ZBarrierNoKeepalive; + } + + access.set_barrier_data(barrier_data); +} + +Node* ZBarrierSetC2::store_at_resolved(C2Access& access, C2AccessValue& val) const { + set_barrier_data(access); + return BarrierSetC2::store_at_resolved(access, val); } Node* ZBarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) const { @@ -252,16 +435,16 @@ bool ZBarrierSetC2::array_copy_requires_gc_barriers(bool tightly_coupled_alloc, // This TypeFunc assumes a 64bit system static const TypeFunc* clone_type() { // Create input type (domain) - const Type** domain_fields = TypeTuple::fields(4); + const Type** const domain_fields = TypeTuple::fields(4); domain_fields[TypeFunc::Parms + 0] = TypeInstPtr::NOTNULL; // src domain_fields[TypeFunc::Parms + 1] = TypeInstPtr::NOTNULL; // dst domain_fields[TypeFunc::Parms + 2] = TypeLong::LONG; // size lower domain_fields[TypeFunc::Parms + 3] = Type::HALF; // size upper - const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + 4, domain_fields); + const TypeTuple* const domain = TypeTuple::make(TypeFunc::Parms + 4, domain_fields); // Create result type (range) - const Type** range_fields = TypeTuple::fields(0); - const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 0, range_fields); + const Type** const range_fields = TypeTuple::fields(0); + const TypeTuple* const range = TypeTuple::make(TypeFunc::Parms + 0, range_fields); return TypeFunc::make(domain, range); } @@ -270,9 +453,9 @@ static const TypeFunc* clone_type() { void ZBarrierSetC2::clone_at_expansion(PhaseMacroExpand* phase, ArrayCopyNode* ac) const { Node* const src = ac->in(ArrayCopyNode::Src); - const TypeAryPtr* ary_ptr = src->get_ptr_type()->isa_aryptr(); + const TypeAryPtr* const ary_ptr = src->get_ptr_type()->isa_aryptr(); - if (ac->is_clone_array() && ary_ptr != NULL) { + if (ac->is_clone_array() && ary_ptr != nullptr) { BasicType bt = ary_ptr->elem()->array_element_basic_type(); if (is_reference_type(bt)) { // Clone object array @@ -282,11 +465,11 @@ void ZBarrierSetC2::clone_at_expansion(PhaseMacroExpand* phase, ArrayCopyNode* a bt = T_LONG; } - Node* ctrl = ac->in(TypeFunc::Control); - Node* mem = ac->in(TypeFunc::Memory); - Node* src = ac->in(ArrayCopyNode::Src); + Node* const ctrl = ac->in(TypeFunc::Control); + Node* const mem = ac->in(TypeFunc::Memory); + Node* const src = ac->in(ArrayCopyNode::Src); Node* src_offset = ac->in(ArrayCopyNode::SrcPos); - Node* dest = ac->in(ArrayCopyNode::Dest); + Node* const dest = ac->in(ArrayCopyNode::Dest); Node* dest_offset = ac->in(ArrayCopyNode::DestPos); Node* length = ac->in(ArrayCopyNode::Length); @@ -296,7 +479,7 @@ void ZBarrierSetC2::clone_at_expansion(PhaseMacroExpand* phase, ArrayCopyNode* a // to the first element in the array when cloning object arrays. Otherwise, load // barriers are applied to parts of the header. Also adjust the length accordingly. assert(src_offset == dest_offset, "should be equal"); - jlong offset = src_offset->get_long(); + const jlong offset = src_offset->get_long(); if (offset != arrayOopDesc::base_offset_in_bytes(T_OBJECT)) { assert(!UseCompressedClassPointers, "should only happen without compressed class pointers"); assert((arrayOopDesc::base_offset_in_bytes(T_OBJECT) - offset) == BytesPerLong, "unexpected offset"); @@ -305,16 +488,16 @@ void ZBarrierSetC2::clone_at_expansion(PhaseMacroExpand* phase, ArrayCopyNode* a dest_offset = src_offset; } } - Node* payload_src = phase->basic_plus_adr(src, src_offset); - Node* payload_dst = phase->basic_plus_adr(dest, dest_offset); + Node* const payload_src = phase->basic_plus_adr(src, src_offset); + Node* const payload_dst = phase->basic_plus_adr(dest, dest_offset); - const char* copyfunc_name = "arraycopy"; - address copyfunc_addr = phase->basictype2arraycopy(bt, NULL, NULL, true, copyfunc_name, true); + const char* copyfunc_name = "arraycopy"; + const address copyfunc_addr = phase->basictype2arraycopy(bt, nullptr, nullptr, true, copyfunc_name, true); - const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM; - const TypeFunc* call_type = OptoRuntime::fast_arraycopy_Type(); + const TypePtr* const raw_adr_type = TypeRawPtr::BOTTOM; + const TypeFunc* const call_type = OptoRuntime::fast_arraycopy_Type(); - Node* call = phase->make_leaf_call(ctrl, mem, call_type, copyfunc_addr, copyfunc_name, raw_adr_type, payload_src, payload_dst, length XTOP); + Node* const call = phase->make_leaf_call(ctrl, mem, call_type, copyfunc_addr, copyfunc_name, raw_adr_type, payload_src, payload_dst, length XTOP); phase->transform_later(call); phase->igvn().replace_node(ac, call); @@ -378,92 +561,213 @@ static uint block_index(const Block* block, const Node* node) { return 0; } -void ZBarrierSetC2::analyze_dominating_barriers() const { - ResourceMark rm; - Compile* const C = Compile::current(); - PhaseCFG* const cfg = C->cfg(); - Block_List worklist; - Node_List mem_ops; - Node_List barrier_loads; +// Look through various node aliases +static const Node* look_through_node(const Node* node) { + while (node != nullptr) { + const Node* new_node = node; + if (node->is_Mach()) { + const MachNode* const node_mach = node->as_Mach(); + if (node_mach->ideal_Opcode() == Op_CheckCastPP) { + new_node = node->in(1); + } + if (node_mach->is_SpillCopy()) { + new_node = node->in(1); + } + } + if (new_node == node || new_node == nullptr) { + break; + } else { + node = new_node; + } + } - // Step 1 - Find accesses, and track them in lists - for (uint i = 0; i < cfg->number_of_blocks(); ++i) { - const Block* const block = cfg->get_block(i); - for (uint j = 0; j < block->number_of_nodes(); ++j) { - const Node* const node = block->get_node(j); - if (!node->is_Mach()) { + return node; +} + +// Whether the given offset is undefined. +static bool is_undefined(intptr_t offset) { + return offset == Type::OffsetTop; +} + +// Whether the given offset is unknown. +static bool is_unknown(intptr_t offset) { + return offset == Type::OffsetBot; +} + +// Whether the given offset is concrete (defined and compile-time known). +static bool is_concrete(intptr_t offset) { + return !is_undefined(offset) && !is_unknown(offset); +} + +// Compute base + offset components of the memory address accessed by mach. +// Return a node representing the base address, or null if the base cannot be +// found or the offset is undefined or a concrete negative value. If a non-null +// base is returned, the offset is a concrete, nonnegative value or unknown. +static const Node* get_base_and_offset(const MachNode* mach, intptr_t& offset) { + const TypePtr* adr_type = nullptr; + offset = 0; + const Node* base = mach->get_base_and_disp(offset, adr_type); + + if (base == nullptr || base == NodeSentinel) { + return nullptr; + } + + if (offset == 0 && base->is_Mach() && base->as_Mach()->ideal_Opcode() == Op_AddP) { + // The memory address is computed by 'base' and fed to 'mach' via an + // indirect memory operand (indicated by offset == 0). The ultimate base and + // offset can be fetched directly from the inputs and Ideal type of 'base'. + offset = base->bottom_type()->isa_oopptr()->offset(); + // Even if 'base' is not an Ideal AddP node anymore, Matcher::ReduceInst() + // guarantees that the base address is still available at the same slot. + base = base->in(AddPNode::Base); + assert(base != nullptr, ""); + } + + if (is_undefined(offset) || (is_concrete(offset) && offset < 0)) { + return nullptr; + } + + return look_through_node(base); +} + +// Whether a phi node corresponds to an array allocation. +// This test is incomplete: in some edge cases, it might return false even +// though the node does correspond to an array allocation. +static bool is_array_allocation(const Node* phi) { + precond(phi->is_Phi()); + // Check whether phi has a successor cast (CheckCastPP) to Java array pointer, + // possibly below spill copies and other cast nodes. Limit the exploration to + // a single path from the phi node consisting of these node types. + const Node* current = phi; + while (true) { + const Node* next = nullptr; + for (DUIterator_Fast imax, i = current->fast_outs(imax); i < imax; i++) { + if (!current->fast_out(i)->isa_Mach()) { continue; } - - MachNode* const mach = node->as_Mach(); - switch (mach->ideal_Opcode()) { - case Op_LoadP: - if ((mach->barrier_data() & ZLoadBarrierStrong) != 0) { - barrier_loads.push(mach); - } - if ((mach->barrier_data() & (ZLoadBarrierStrong | ZLoadBarrierNoKeepalive)) == - ZLoadBarrierStrong) { - mem_ops.push(mach); - } - break; - case Op_CompareAndExchangeP: - case Op_CompareAndSwapP: - case Op_GetAndSetP: - if ((mach->barrier_data() & ZLoadBarrierStrong) != 0) { - barrier_loads.push(mach); + const MachNode* succ = current->fast_out(i)->as_Mach(); + if (succ->ideal_Opcode() == Op_CheckCastPP) { + if (succ->get_ptr_type()->isa_aryptr()) { + // Cast to Java array pointer: phi corresponds to an array allocation. + return true; } - case Op_StoreP: - mem_ops.push(mach); - break; - - default: - break; + // Other cast: record as candidate for further exploration. + next = succ; + } else if (succ->is_SpillCopy() && next == nullptr) { + // Spill copy, and no better candidate found: record as candidate. + next = succ; } } + if (next == nullptr) { + // No evidence found that phi corresponds to an array allocation, and no + // candidates available to continue exploring. + return false; + } + // Continue exploring from the best candidate found. + current = next; } + ShouldNotReachHere(); +} - // Step 2 - Find dominating accesses for each load - for (uint i = 0; i < barrier_loads.size(); i++) { - MachNode* const load = barrier_loads.at(i)->as_Mach(); - const TypePtr* load_adr_type = NULL; - intptr_t load_offset = 0; - const Node* const load_obj = load->get_base_and_disp(load_offset, load_adr_type); - Block* const load_block = cfg->get_block_for_node(load); - const uint load_index = block_index(load_block, load); - - for (uint j = 0; j < mem_ops.size(); j++) { - MachNode* mem = mem_ops.at(j)->as_Mach(); - const TypePtr* mem_adr_type = NULL; - intptr_t mem_offset = 0; - const Node* mem_obj = mem->get_base_and_disp(mem_offset, mem_adr_type); - Block* mem_block = cfg->get_block_for_node(mem); - uint mem_index = block_index(mem_block, mem); +// Match the phi node that connects a TLAB allocation fast path with its slowpath +static bool is_allocation(const Node* node) { + if (node->req() != 3) { + return false; + } + const Node* const fast_node = node->in(2); + if (!fast_node->is_Mach()) { + return false; + } + const MachNode* const fast_mach = fast_node->as_Mach(); + if (fast_mach->ideal_Opcode() != Op_LoadP) { + return false; + } + const TypePtr* const adr_type = nullptr; + intptr_t offset; + const Node* const base = get_base_and_offset(fast_mach, offset); + if (base == nullptr || !base->is_Mach() || !is_concrete(offset)) { + return false; + } + const MachNode* const base_mach = base->as_Mach(); + if (base_mach->ideal_Opcode() != Op_ThreadLocal) { + return false; + } + return offset == in_bytes(Thread::tlab_top_offset()); +} - if (load_obj == NodeSentinel || mem_obj == NodeSentinel || - load_obj == NULL || mem_obj == NULL || - load_offset < 0 || mem_offset < 0) { - continue; - } +static void elide_mach_barrier(MachNode* mach) { + mach->set_barrier_data(ZBarrierElided); +} - if (mem_obj != load_obj || mem_offset != load_offset) { - // Not the same addresses, not a candidate - continue; +void ZBarrierSetC2::analyze_dominating_barriers_impl(Node_List& accesses, Node_List& access_dominators) const { + Compile* const C = Compile::current(); + PhaseCFG* const cfg = C->cfg(); + + for (uint i = 0; i < accesses.size(); i++) { + MachNode* const access = accesses.at(i)->as_Mach(); + intptr_t access_offset; + const Node* const access_obj = get_base_and_offset(access, access_offset); + Block* const access_block = cfg->get_block_for_node(access); + const uint access_index = block_index(access_block, access); + + if (access_obj == nullptr) { + // No information available + continue; + } + + for (uint j = 0; j < access_dominators.size(); j++) { + const Node* const mem = access_dominators.at(j); + if (mem->is_Phi()) { + // Allocation node + if (mem != access_obj) { + continue; + } + if (is_unknown(access_offset) && !is_array_allocation(mem)) { + // The accessed address has an unknown offset, but the allocated + // object cannot be determined to be an array. Avoid eliding in this + // case, to be on the safe side. + continue; + } + assert((is_concrete(access_offset) && access_offset >= 0) || (is_unknown(access_offset) && is_array_allocation(mem)), + "candidate allocation-dominated access offsets must be either concrete and nonnegative, or unknown (for array allocations only)"); + } else { + // Access node + const MachNode* const mem_mach = mem->as_Mach(); + intptr_t mem_offset; + const Node* const mem_obj = get_base_and_offset(mem_mach, mem_offset); + + if (mem_obj == nullptr || + !is_concrete(access_offset) || + !is_concrete(mem_offset)) { + // No information available + continue; + } + + if (mem_obj != access_obj || mem_offset != access_offset) { + // Not the same addresses, not a candidate + continue; + } + assert(is_concrete(access_offset) && access_offset >= 0, + "candidate non-allocation-dominated access offsets must be concrete and nonnegative"); } - if (load_block == mem_block) { + Block* mem_block = cfg->get_block_for_node(mem); + const uint mem_index = block_index(mem_block, mem); + + if (access_block == mem_block) { // Earlier accesses in the same block - if (mem_index < load_index && !block_has_safepoint(mem_block, mem_index + 1, load_index)) { - load->set_barrier_data(ZLoadBarrierElided); + if (mem_index < access_index && !block_has_safepoint(mem_block, mem_index + 1, access_index)) { + elide_mach_barrier(access); } - } else if (mem_block->dominates(load_block)) { + } else if (mem_block->dominates(access_block)) { // Dominating block? Look around for safepoints ResourceMark rm; Block_List stack; VectorSet visited; - stack.push(load_block); - bool safepoint_found = block_has_safepoint(load_block); + stack.push(access_block); + bool safepoint_found = block_has_safepoint(access_block); while (!safepoint_found && stack.size() > 0) { - Block* block = stack.pop(); + const Block* const block = stack.pop(); if (visited.test_set(block->_pre_order)) { continue; } @@ -477,19 +781,93 @@ void ZBarrierSetC2::analyze_dominating_barriers() const { // Push predecessor blocks for (uint p = 1; p < block->num_preds(); ++p) { - Block* pred = cfg->get_block_for_node(block->pred(p)); + Block* const pred = cfg->get_block_for_node(block->pred(p)); stack.push(pred); } } if (!safepoint_found) { - load->set_barrier_data(ZLoadBarrierElided); + elide_mach_barrier(access); } } } } } +void ZBarrierSetC2::analyze_dominating_barriers() const { + ResourceMark rm; + Compile* const C = Compile::current(); + PhaseCFG* const cfg = C->cfg(); + + Node_List loads; + Node_List load_dominators; + + Node_List stores; + Node_List store_dominators; + + Node_List atomics; + Node_List atomic_dominators; + + // Step 1 - Find accesses and allocations, and track them in lists + for (uint i = 0; i < cfg->number_of_blocks(); ++i) { + const Block* const block = cfg->get_block(i); + for (uint j = 0; j < block->number_of_nodes(); ++j) { + Node* const node = block->get_node(j); + if (node->is_Phi()) { + if (is_allocation(node)) { + load_dominators.push(node); + store_dominators.push(node); + // An allocation can't be considered to "dominate" an atomic operation. + // For example a CAS requires the memory location to be store-good. + // When you have a dominating store or atomic instruction, that is + // indeed ensured to be the case. However, as for allocations, the + // initialized memory location could be raw null, which isn't store-good. + } + continue; + } else if (!node->is_Mach()) { + continue; + } + + MachNode* const mach = node->as_Mach(); + switch (mach->ideal_Opcode()) { + case Op_LoadP: + if ((mach->barrier_data() & ZBarrierStrong) != 0 && + (mach->barrier_data() & ZBarrierNoKeepalive) == 0) { + loads.push(mach); + load_dominators.push(mach); + } + break; + case Op_StoreP: + if (mach->barrier_data() != 0) { + stores.push(mach); + load_dominators.push(mach); + store_dominators.push(mach); + atomic_dominators.push(mach); + } + break; + case Op_CompareAndExchangeP: + case Op_CompareAndSwapP: + case Op_GetAndSetP: + if (mach->barrier_data() != 0) { + atomics.push(mach); + load_dominators.push(mach); + store_dominators.push(mach); + atomic_dominators.push(mach); + } + break; + + default: + break; + } + } + } + + // Step 2 - Find dominating accesses or allocations for each access + analyze_dominating_barriers_impl(loads, load_dominators); + analyze_dominating_barriers_impl(stores, store_dominators); + analyze_dominating_barriers_impl(atomics, atomic_dominators); +} + // == Reduced spilling optimization == void ZBarrierSetC2::compute_liveness_at_stubs() const { @@ -547,7 +925,7 @@ void ZBarrierSetC2::compute_liveness_at_stubs() const { // If this node tracks liveness, update it RegMask* const regs = barrier_set_state()->live(node); - if (regs != NULL) { + if (regs != nullptr) { regs->OR(new_live); } } @@ -565,19 +943,39 @@ void ZBarrierSetC2::compute_liveness_at_stubs() const { } } +void ZBarrierSetC2::eliminate_gc_barrier(PhaseMacroExpand* macro, Node* node) const { + eliminate_gc_barrier_data(node); +} + +void ZBarrierSetC2::eliminate_gc_barrier_data(Node* node) const { + if (node->is_LoadStore()) { + LoadStoreNode* loadstore = node->as_LoadStore(); + loadstore->set_barrier_data(ZBarrierElided); + } else if (node->is_Mem()) { + MemNode* mem = node->as_Mem(); + mem->set_barrier_data(ZBarrierElided); + } +} + #ifndef PRODUCT void ZBarrierSetC2::dump_barrier_data(const MachNode* mach, outputStream* st) const { - if ((mach->barrier_data() & ZLoadBarrierStrong) != 0) { + if ((mach->barrier_data() & ZBarrierStrong) != 0) { st->print("strong "); } - if ((mach->barrier_data() & ZLoadBarrierWeak) != 0) { + if ((mach->barrier_data() & ZBarrierWeak) != 0) { st->print("weak "); } - if ((mach->barrier_data() & ZLoadBarrierPhantom) != 0) { + if ((mach->barrier_data() & ZBarrierPhantom) != 0) { st->print("phantom "); } - if ((mach->barrier_data() & ZLoadBarrierNoKeepalive) != 0) { + if ((mach->barrier_data() & ZBarrierNoKeepalive) != 0) { st->print("nokeepalive "); } + if ((mach->barrier_data() & ZBarrierNative) != 0) { + st->print("native "); + } + if ((mach->barrier_data() & ZBarrierElided) != 0) { + st->print("elided "); + } } #endif // !PRODUCT diff --git a/src/hotspot/share/gc/z/c2/zBarrierSetC2.hpp b/src/hotspot/share/gc/z/c2/zBarrierSetC2.hpp index 144ffb26bc420..a0f29fbc51076 100644 --- a/src/hotspot/share/gc/z/c2/zBarrierSetC2.hpp +++ b/src/hotspot/share/gc/z/c2/zBarrierSetC2.hpp @@ -29,42 +29,91 @@ #include "opto/node.hpp" #include "utilities/growableArray.hpp" -const uint8_t ZLoadBarrierElided = 0; -const uint8_t ZLoadBarrierStrong = 1; -const uint8_t ZLoadBarrierWeak = 2; -const uint8_t ZLoadBarrierPhantom = 4; -const uint8_t ZLoadBarrierNoKeepalive = 8; +const uint8_t ZBarrierStrong = 1; +const uint8_t ZBarrierWeak = 2; +const uint8_t ZBarrierPhantom = 4; +const uint8_t ZBarrierNoKeepalive = 8; +const uint8_t ZBarrierNative = 16; +const uint8_t ZBarrierElided = 32; -class ZLoadBarrierStubC2 : public ArenaObj { -private: +class Block; +class MachNode; + +class MacroAssembler; + +class ZBarrierStubC2 : public ArenaObj { +protected: const MachNode* _node; - const Address _ref_addr; - const Register _ref; - const Register _tmp; - const uint8_t _barrier_data; Label _entry; Label _continuation; - ZLoadBarrierStubC2(const MachNode* node, Address ref_addr, Register ref, Register tmp, uint8_t barrier_data); +static void register_stub(ZBarrierStubC2* stub); +static void inc_trampoline_stubs_count(); +static int trampoline_stubs_count(); +static int stubs_start_offset(); public: - static ZLoadBarrierStubC2* create(const MachNode* node, Address ref_addr, Register ref, Register tmp, uint8_t barrier_data); + ZBarrierStubC2(const MachNode* node); - Address ref_addr() const; - Register ref() const; - Register tmp() const; - address slow_path() const; RegMask& live() const; Label* entry(); Label* continuation(); + + virtual Register result() const = 0; + virtual void emit_code(MacroAssembler& masm) = 0; +}; + +class ZLoadBarrierStubC2 : public ZBarrierStubC2 { +private: + const Address _ref_addr; + const Register _ref; + +protected: + ZLoadBarrierStubC2(const MachNode* node, Address ref_addr, Register ref); + +public: + static ZLoadBarrierStubC2* create(const MachNode* node, Address ref_addr, Register ref); + + Address ref_addr() const; + Register ref() const; + address slow_path() const; + + virtual Register result() const; + virtual void emit_code(MacroAssembler& masm); +}; + +class ZStoreBarrierStubC2 : public ZBarrierStubC2 { +private: + const Address _ref_addr; + const Register _new_zaddress; + const Register _new_zpointer; + const bool _is_native; + const bool _is_atomic; + +protected: + ZStoreBarrierStubC2(const MachNode* node, Address ref_addr, Register new_zaddress, Register new_zpointer, bool is_native, bool is_atomic); + +public: + static ZStoreBarrierStubC2* create(const MachNode* node, Address ref_addr, Register new_zaddress, Register new_zpointer, bool is_native, bool is_atomic); + + Address ref_addr() const; + Register new_zaddress() const; + Register new_zpointer() const; + bool is_native() const; + bool is_atomic() const; + + virtual Register result() const; + virtual void emit_code(MacroAssembler& masm); }; class ZBarrierSetC2 : public BarrierSetC2 { private: void compute_liveness_at_stubs() const; + void analyze_dominating_barriers_impl(Node_List& accesses, Node_List& access_dominators) const; void analyze_dominating_barriers() const; protected: + virtual Node* store_at_resolved(C2Access& access, C2AccessValue& val) const; virtual Node* load_at_resolved(C2Access& access, const Type* val_type) const; virtual Node* atomic_cmpxchg_val_at_resolved(C2AtomicParseAccess& access, Node* expected_val, @@ -91,6 +140,8 @@ class ZBarrierSetC2 : public BarrierSetC2 { virtual void late_barrier_analysis() const; virtual int estimate_stub_size() const; virtual void emit_stubs(CodeBuffer& cb) const; + virtual void eliminate_gc_barrier(PhaseMacroExpand* macro, Node* node) const; + virtual void eliminate_gc_barrier_data(Node* node) const; #ifndef PRODUCT virtual void dump_barrier_data(const MachNode* mach, outputStream* st) const; diff --git a/src/hotspot/share/gc/z/shared/vmStructs_z_shared.hpp b/src/hotspot/share/gc/z/shared/vmStructs_z_shared.hpp new file mode 100644 index 0000000000000..f0c03abbf7ac7 --- /dev/null +++ b/src/hotspot/share/gc/z/shared/vmStructs_z_shared.hpp @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_Z_SHARED_VMSTRUCTS_Z_SHARED_HPP +#define SHARE_GC_Z_SHARED_VMSTRUCTS_Z_SHARED_HPP + +#include "gc/x/vmStructs_x.hpp" +#include "gc/z/vmStructs_z.hpp" + +#define VM_STRUCTS_Z_SHARED(nonstatic_field, volatile_nonstatic_field, static_field) \ + VM_STRUCTS_X( \ + nonstatic_field, \ + volatile_nonstatic_field, \ + static_field) \ + \ + VM_STRUCTS_Z( \ + nonstatic_field, \ + volatile_nonstatic_field, \ + static_field) + +#define VM_INT_CONSTANTS_Z_SHARED(declare_constant, declare_constant_with_value) \ + VM_INT_CONSTANTS_X( \ + declare_constant, \ + declare_constant_with_value) \ + \ + VM_INT_CONSTANTS_Z( \ + declare_constant, \ + declare_constant_with_value) + +#define VM_LONG_CONSTANTS_Z_SHARED(declare_constant) \ + VM_LONG_CONSTANTS_X( \ + declare_constant) \ + \ + VM_LONG_CONSTANTS_Z( \ + declare_constant) + +#define VM_TYPES_Z_SHARED(declare_type, declare_toplevel_type, declare_integer_type) \ + VM_TYPES_X( \ + declare_type, \ + declare_toplevel_type, \ + declare_integer_type) \ + \ + VM_TYPES_Z( \ + declare_type, \ + declare_toplevel_type, \ + declare_integer_type) + +#endif // SHARE_GC_Z_SHARED_VMSTRUCTS_Z_SHARED_HPP diff --git a/src/hotspot/share/gc/z/shared/zSharedArguments.cpp b/src/hotspot/share/gc/z/shared/zSharedArguments.cpp new file mode 100644 index 0000000000000..8a00a851acb08 --- /dev/null +++ b/src/hotspot/share/gc/z/shared/zSharedArguments.cpp @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/shared/gcArguments.hpp" +#include "gc/x/xArguments.hpp" +#include "gc/z/shared/zSharedArguments.hpp" +#include "gc/z/zArguments.hpp" +#include "runtime/globals.hpp" +#include "runtime/globals_extension.hpp" +#include "runtime/java.hpp" + +void ZSharedArguments::initialize_alignments() { + if (ZGenerational) { + ZArguments::initialize_alignments(); + } else { + XArguments::initialize_alignments(); + } +} + +void ZSharedArguments::initialize() { + GCArguments::initialize(); + + if (ZGenerational) { + ZArguments::initialize(); + } else { + XArguments::initialize(); + } +} + +size_t ZSharedArguments::heap_virtual_to_physical_ratio() { + if (ZGenerational) { + return ZArguments::heap_virtual_to_physical_ratio(); + } else { + return XArguments::heap_virtual_to_physical_ratio(); + } +} + +size_t ZSharedArguments::conservative_max_heap_alignment() { + return 0; +} + +CollectedHeap* ZSharedArguments::create_heap() { + if (ZGenerational) { + return ZArguments::create_heap(); + } else { + return XArguments::create_heap(); + } +} + +bool ZSharedArguments::is_supported() const { + if (ZGenerational) { + return ZArguments::is_os_supported(); + } else { + return XArguments::is_os_supported(); + } +} diff --git a/src/hotspot/share/gc/z/shared/zSharedArguments.hpp b/src/hotspot/share/gc/z/shared/zSharedArguments.hpp new file mode 100644 index 0000000000000..74659f581b918 --- /dev/null +++ b/src/hotspot/share/gc/z/shared/zSharedArguments.hpp @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_Z_SHARED_ZSHAREDARGUMENTS_HPP +#define SHARE_GC_Z_SHARED_ZSHAREDARGUMENTS_HPP + +#include "gc/shared/gcArguments.hpp" + +class CollectedHeap; + +class ZSharedArguments : public GCArguments { +private: + virtual void initialize_alignments(); + + virtual void initialize(); + virtual size_t conservative_max_heap_alignment(); + virtual size_t heap_virtual_to_physical_ratio(); + virtual CollectedHeap* create_heap(); + + virtual bool is_supported() const; + + bool is_os_supported() const; +}; + +#endif // SHARE_GC_Z_SHARED_ZSHAREDARGUMENTS_HPP diff --git a/src/hotspot/share/gc/z/shared/z_shared_globals.hpp b/src/hotspot/share/gc/z/shared/z_shared_globals.hpp new file mode 100644 index 0000000000000..34e5e875d4880 --- /dev/null +++ b/src/hotspot/share/gc/z/shared/z_shared_globals.hpp @@ -0,0 +1,103 @@ +/* + * Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_Z_SHARED_Z_SHARED_GLOBALS_HPP +#define SHARE_GC_Z_SHARED_Z_SHARED_GLOBALS_HPP + +#include "gc/x/x_globals.hpp" +#include "gc/z/z_globals.hpp" + +#define GC_Z_SHARED_FLAGS(develop, \ + develop_pd, \ + product, \ + product_pd, \ + notproduct, \ + range, \ + constraint) \ + \ + product(double, ZAllocationSpikeTolerance, 2.0, \ + "Allocation spike tolerance factor") \ + \ + /* Updated in arguments parsing to ZGenerational ? 5.0 : 25.0 */ \ + product(double, ZFragmentationLimit, 0 /* ignored */, \ + "Maximum allowed heap fragmentation") \ + range(0, 100) \ + \ + product(size_t, ZMarkStackSpaceLimit, 8*G, \ + "Maximum number of bytes allocated for mark stacks") \ + range(32*M, 1024*G) \ + \ + product(double, ZCollectionInterval, 0, \ + "Force GC at a fixed time interval (in seconds). " \ + "Backwards compatible alias for ZCollectionIntervalMajor") \ + \ + product(bool, ZProactive, true, \ + "Enable proactive GC cycles") \ + \ + product(bool, ZUncommit, true, \ + "Uncommit unused memory") \ + \ + product(uintx, ZUncommitDelay, 5 * 60, \ + "Uncommit memory if it has been unused for the specified " \ + "amount of time (in seconds)") \ + \ + product(uint, ZStatisticsInterval, 10, DIAGNOSTIC, \ + "Time between statistics print outs (in seconds)") \ + range(1, (uint)-1) \ + \ + product(bool, ZStressRelocateInPlace, false, DIAGNOSTIC, \ + "Always relocate pages in-place") \ + \ + product(bool, ZVerifyRoots, trueInDebug, DIAGNOSTIC, \ + "Verify roots") \ + \ + product(bool, ZVerifyObjects, false, DIAGNOSTIC, \ + "Verify objects") \ + \ + product(bool, ZVerifyMarking, trueInDebug, DIAGNOSTIC, \ + "Verify marking stacks") \ + \ + product(bool, ZVerifyForwarding, false, DIAGNOSTIC, \ + "Verify forwarding tables") \ + \ + GC_X_FLAGS( \ + develop, \ + develop_pd, \ + product, \ + product_pd, \ + notproduct, \ + range, \ + constraint) \ + \ + GC_Z_FLAGS( \ + develop, \ + develop_pd, \ + product, \ + product_pd, \ + notproduct, \ + range, \ + constraint) + +// end of GC_Z_SHARED_FLAGS + +#endif // SHARE_GC_Z_SHARED_Z_SHARED_GLOBALS_HPP diff --git a/src/hotspot/share/gc/z/vmStructs_z.cpp b/src/hotspot/share/gc/z/vmStructs_z.cpp index c86d11c8189b0..cb3948851d604 100644 --- a/src/hotspot/share/gc/z/vmStructs_z.cpp +++ b/src/hotspot/share/gc/z/vmStructs_z.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -23,16 +23,17 @@ #include "precompiled.hpp" #include "gc/z/vmStructs_z.hpp" +#include "gc/z/zAddress.hpp" ZGlobalsForVMStructs::ZGlobalsForVMStructs() : - _ZGlobalPhase(&ZGlobalPhase), - _ZGlobalSeqNum(&ZGlobalSeqNum), _ZAddressOffsetMask(&ZAddressOffsetMask), - _ZAddressMetadataMask(&ZAddressMetadataMask), - _ZAddressMetadataFinalizable(&ZAddressMetadataFinalizable), - _ZAddressGoodMask(&ZAddressGoodMask), - _ZAddressBadMask(&ZAddressBadMask), - _ZAddressWeakBadMask(&ZAddressWeakBadMask), + _ZPointerLoadGoodMask(&ZPointerLoadGoodMask), + _ZPointerLoadBadMask(&ZPointerLoadBadMask), + _ZPointerLoadShift(const_cast(&ZPointerLoadShift)), + _ZPointerMarkGoodMask(&ZPointerMarkGoodMask), + _ZPointerMarkBadMask(&ZPointerMarkBadMask), + _ZPointerStoreGoodMask(&ZPointerStoreGoodMask), + _ZPointerStoreBadMask(&ZPointerStoreBadMask), _ZObjectAlignmentSmallShift(&ZObjectAlignmentSmallShift), _ZObjectAlignmentSmall(&ZObjectAlignmentSmall) { } diff --git a/src/hotspot/share/gc/z/vmStructs_z.hpp b/src/hotspot/share/gc/z/vmStructs_z.hpp index 3c0eb9f74ea88..47fa6ac3021e6 100644 --- a/src/hotspot/share/gc/z/vmStructs_z.hpp +++ b/src/hotspot/share/gc/z/vmStructs_z.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,6 +30,7 @@ #include "gc/z/zGranuleMap.hpp" #include "gc/z/zHeap.hpp" #include "gc/z/zPageAllocator.hpp" +#include "gc/z/zPageType.hpp" #include "utilities/macros.hpp" // Expose some ZGC globals to the SA agent. @@ -41,16 +42,17 @@ class ZGlobalsForVMStructs { ZGlobalsForVMStructs(); - uint32_t* _ZGlobalPhase; + uintptr_t* _ZAddressOffsetMask; - uint32_t* _ZGlobalSeqNum; + uintptr_t* _ZPointerLoadGoodMask; + uintptr_t* _ZPointerLoadBadMask; + size_t* _ZPointerLoadShift; - uintptr_t* _ZAddressOffsetMask; - uintptr_t* _ZAddressMetadataMask; - uintptr_t* _ZAddressMetadataFinalizable; - uintptr_t* _ZAddressGoodMask; - uintptr_t* _ZAddressBadMask; - uintptr_t* _ZAddressWeakBadMask; + uintptr_t* _ZPointerMarkGoodMask; + uintptr_t* _ZPointerMarkBadMask; + + uintptr_t* _ZPointerStoreGoodMask; + uintptr_t* _ZPointerStoreBadMask; const int* _ZObjectAlignmentSmallShift; const int* _ZObjectAlignmentSmall; @@ -60,30 +62,29 @@ typedef ZGranuleMap ZGranuleMapForPageTable; typedef ZGranuleMap ZGranuleMapForForwarding; typedef ZAttachedArray ZAttachedArrayForForwarding; -#define VM_STRUCTS_ZGC(nonstatic_field, volatile_nonstatic_field, static_field) \ +#define VM_STRUCTS_Z(nonstatic_field, volatile_nonstatic_field, static_field) \ static_field(ZGlobalsForVMStructs, _instance_p, ZGlobalsForVMStructs*) \ - nonstatic_field(ZGlobalsForVMStructs, _ZGlobalPhase, uint32_t*) \ - nonstatic_field(ZGlobalsForVMStructs, _ZGlobalSeqNum, uint32_t*) \ - nonstatic_field(ZGlobalsForVMStructs, _ZAddressOffsetMask, uintptr_t*) \ - nonstatic_field(ZGlobalsForVMStructs, _ZAddressMetadataMask, uintptr_t*) \ - nonstatic_field(ZGlobalsForVMStructs, _ZAddressMetadataFinalizable, uintptr_t*) \ - nonstatic_field(ZGlobalsForVMStructs, _ZAddressGoodMask, uintptr_t*) \ - nonstatic_field(ZGlobalsForVMStructs, _ZAddressBadMask, uintptr_t*) \ - nonstatic_field(ZGlobalsForVMStructs, _ZAddressWeakBadMask, uintptr_t*) \ + \ + nonstatic_field(ZGlobalsForVMStructs, _ZAddressOffsetMask, uintptr_t*) \ + nonstatic_field(ZGlobalsForVMStructs, _ZPointerLoadGoodMask, uintptr_t*) \ + nonstatic_field(ZGlobalsForVMStructs, _ZPointerLoadBadMask, uintptr_t*) \ + nonstatic_field(ZGlobalsForVMStructs, _ZPointerLoadShift, size_t*) \ + nonstatic_field(ZGlobalsForVMStructs, _ZPointerMarkGoodMask, uintptr_t*) \ + nonstatic_field(ZGlobalsForVMStructs, _ZPointerMarkBadMask, uintptr_t*) \ + nonstatic_field(ZGlobalsForVMStructs, _ZPointerStoreGoodMask, uintptr_t*) \ + nonstatic_field(ZGlobalsForVMStructs, _ZPointerStoreBadMask, uintptr_t*) \ nonstatic_field(ZGlobalsForVMStructs, _ZObjectAlignmentSmallShift, const int*) \ - nonstatic_field(ZGlobalsForVMStructs, _ZObjectAlignmentSmall, const int*) \ + nonstatic_field(ZGlobalsForVMStructs, _ZObjectAlignmentSmall, const int*) \ \ nonstatic_field(ZCollectedHeap, _heap, ZHeap) \ \ nonstatic_field(ZHeap, _page_allocator, ZPageAllocator) \ nonstatic_field(ZHeap, _page_table, ZPageTable) \ - nonstatic_field(ZHeap, _forwarding_table, ZForwardingTable) \ - nonstatic_field(ZHeap, _relocate, ZRelocate) \ \ - nonstatic_field(ZPage, _type, const uint8_t) \ - nonstatic_field(ZPage, _seqnum, uint32_t) \ + nonstatic_field(ZPage, _type, const ZPageType) \ + volatile_nonstatic_field(ZPage, _seqnum, uint32_t) \ nonstatic_field(ZPage, _virtual, const ZVirtualMemory) \ - volatile_nonstatic_field(ZPage, _top, uintptr_t) \ + volatile_nonstatic_field(ZPage, _top, zoffset_end) \ \ nonstatic_field(ZPageAllocator, _max_capacity, const size_t) \ volatile_nonstatic_field(ZPageAllocator, _capacity, size_t) \ @@ -96,8 +97,8 @@ typedef ZAttachedArray ZAttachedArrayForForwardin \ nonstatic_field(ZForwardingTable, _map, ZGranuleMapForForwarding) \ \ - nonstatic_field(ZVirtualMemory, _start, const uintptr_t) \ - nonstatic_field(ZVirtualMemory, _end, const uintptr_t) \ + nonstatic_field(ZVirtualMemory, _start, const zoffset) \ + nonstatic_field(ZVirtualMemory, _end, const zoffset_end) \ \ nonstatic_field(ZForwarding, _virtual, const ZVirtualMemory) \ nonstatic_field(ZForwarding, _object_alignment_shift, const size_t) \ @@ -106,15 +107,14 @@ typedef ZAttachedArray ZAttachedArrayForForwardin nonstatic_field(ZForwardingEntry, _entry, uint64_t) \ nonstatic_field(ZAttachedArrayForForwarding, _length, const size_t) -#define VM_INT_CONSTANTS_ZGC(declare_constant, declare_constant_with_value) \ - declare_constant(ZPhaseRelocate) \ - declare_constant(ZPageTypeSmall) \ - declare_constant(ZPageTypeMedium) \ - declare_constant(ZPageTypeLarge) \ +#define VM_INT_CONSTANTS_Z(declare_constant, declare_constant_with_value) \ + declare_constant(ZPageType::small) \ + declare_constant(ZPageType::medium) \ + declare_constant(ZPageType::large) \ declare_constant(ZObjectAlignmentMediumShift) \ declare_constant(ZObjectAlignmentLargeShift) -#define VM_LONG_CONSTANTS_ZGC(declare_constant) \ +#define VM_LONG_CONSTANTS_Z(declare_constant) \ declare_constant(ZGranuleSizeShift) \ declare_constant(ZPageSizeSmallShift) \ declare_constant(ZPageSizeMediumShift) \ @@ -123,12 +123,15 @@ typedef ZAttachedArray ZAttachedArrayForForwardin declare_constant(ZAddressOffsetMask) \ declare_constant(ZAddressOffsetMax) -#define VM_TYPES_ZGC(declare_type, declare_toplevel_type, declare_integer_type) \ +#define VM_TYPES_Z(declare_type, declare_toplevel_type, declare_integer_type) \ + declare_toplevel_type(zoffset) \ + declare_toplevel_type(zoffset_end) \ declare_toplevel_type(ZGlobalsForVMStructs) \ declare_type(ZCollectedHeap, CollectedHeap) \ declare_toplevel_type(ZHeap) \ declare_toplevel_type(ZRelocate) \ declare_toplevel_type(ZPage) \ + declare_toplevel_type(ZPageType) \ declare_toplevel_type(ZPageAllocator) \ declare_toplevel_type(ZPageTable) \ declare_toplevel_type(ZAttachedArrayForForwarding) \ diff --git a/src/hotspot/share/gc/z/zAbort.cpp b/src/hotspot/share/gc/z/zAbort.cpp index 1ac18ce997066..938e77b7d8007 100644 --- a/src/hotspot/share/gc/z/zAbort.cpp +++ b/src/hotspot/share/gc/z/zAbort.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,5 +28,5 @@ volatile bool ZAbort::_should_abort = false; void ZAbort::abort() { - Atomic::release_store_fence(&_should_abort, true); + Atomic::store(&_should_abort, true); } diff --git a/src/hotspot/share/gc/z/zAbort.hpp b/src/hotspot/share/gc/z/zAbort.hpp index f87bca8c0a389..925b0a79ac371 100644 --- a/src/hotspot/share/gc/z/zAbort.hpp +++ b/src/hotspot/share/gc/z/zAbort.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -35,4 +35,12 @@ class ZAbort : public AllStatic { static void abort(); }; +// Macro to execute a abortion check +#define abortpoint() \ + do { \ + if (ZAbort::should_abort()) { \ + return; \ + } \ + } while (false) + #endif // SHARE_GC_Z_ZABORT_HPP diff --git a/src/hotspot/share/gc/z/zAbort.inline.hpp b/src/hotspot/share/gc/z/zAbort.inline.hpp index a8af1b7f720c8..0037f7ec4488d 100644 --- a/src/hotspot/share/gc/z/zAbort.inline.hpp +++ b/src/hotspot/share/gc/z/zAbort.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,7 +29,7 @@ #include "runtime/atomic.hpp" inline bool ZAbort::should_abort() { - return Atomic::load_acquire(&_should_abort); + return Atomic::load(&_should_abort); } #endif // SHARE_GC_Z_ZABORT_INLINE_HPP diff --git a/src/hotspot/share/gc/z/zAddress.cpp b/src/hotspot/share/gc/z/zAddress.cpp index cfa7c04d3d261..d1c199fad070c 100644 --- a/src/hotspot/share/gc/z/zAddress.cpp +++ b/src/hotspot/share/gc/z/zAddress.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,37 +22,129 @@ */ #include "precompiled.hpp" -#include "gc/z/zAddress.hpp" -#include "gc/z/zGlobals.hpp" +#include "gc/shared/barrierSet.hpp" +#include "gc/shared/gc_globals.hpp" +#include "gc/z/zAddress.inline.hpp" +#include "gc/z/zVerify.hpp" +#include "oops/oopsHierarchy.hpp" +#include "runtime/java.hpp" +#include "utilities/formatBuffer.hpp" -void ZAddress::set_good_mask(uintptr_t mask) { - ZAddressGoodMask = mask; - ZAddressBadMask = ZAddressGoodMask ^ ZAddressMetadataMask; - ZAddressWeakBadMask = (ZAddressGoodMask | ZAddressMetadataRemapped | ZAddressMetadataFinalizable) ^ ZAddressMetadataMask; +size_t ZAddressHeapBaseShift; +size_t ZAddressHeapBase; + +size_t ZAddressOffsetBits; +uintptr_t ZAddressOffsetMask; +size_t ZAddressOffsetMax; + +uintptr_t ZPointerRemapped; +uintptr_t ZPointerRemappedYoungMask; +uintptr_t ZPointerRemappedOldMask; +uintptr_t ZPointerMarkedYoung; +uintptr_t ZPointerMarkedOld; +uintptr_t ZPointerFinalizable; +uintptr_t ZPointerRemembered; + +uintptr_t ZPointerLoadGoodMask; +uintptr_t ZPointerLoadBadMask; + +uintptr_t ZPointerMarkGoodMask; +uintptr_t ZPointerMarkBadMask; + +uintptr_t ZPointerStoreGoodMask; +uintptr_t ZPointerStoreBadMask; + +uintptr_t ZPointerVectorLoadBadMask[8]; +uintptr_t ZPointerVectorStoreBadMask[8]; +uintptr_t ZPointerVectorStoreGoodMask[8]; + +static uint32_t* ZPointerCalculateStoreGoodMaskLowOrderBitsAddr() { + const uintptr_t addr = reinterpret_cast(&ZPointerStoreGoodMask); + return reinterpret_cast(addr + ZPointerStoreGoodMaskLowOrderBitsOffset); +} + +uint32_t* ZPointerStoreGoodMaskLowOrderBitsAddr = ZPointerCalculateStoreGoodMaskLowOrderBitsAddr(); + +static void set_vector_mask(uintptr_t vector_mask[], uintptr_t mask) { + for (int i = 0; i < 8; ++i) { + vector_mask[i] = mask; + } +} + +void ZGlobalsPointers::set_good_masks() { + ZPointerRemapped = ZPointerRemappedOldMask & ZPointerRemappedYoungMask; + + ZPointerLoadGoodMask = ZPointer::remap_bits(ZPointerRemapped); + ZPointerMarkGoodMask = ZPointerLoadGoodMask | ZPointerMarkedYoung | ZPointerMarkedOld; + ZPointerStoreGoodMask = ZPointerMarkGoodMask | ZPointerRemembered; + + ZPointerLoadBadMask = ZPointerLoadGoodMask ^ ZPointerLoadMetadataMask; + ZPointerMarkBadMask = ZPointerMarkGoodMask ^ ZPointerMarkMetadataMask; + ZPointerStoreBadMask = ZPointerStoreGoodMask ^ ZPointerStoreMetadataMask; + + set_vector_mask(ZPointerVectorLoadBadMask, ZPointerLoadBadMask); + set_vector_mask(ZPointerVectorStoreBadMask, ZPointerStoreBadMask); + set_vector_mask(ZPointerVectorStoreGoodMask, ZPointerStoreGoodMask); + + pd_set_good_masks(); +} + +static void initialize_check_oop_function() { +#ifdef CHECK_UNHANDLED_OOPS + if (ZVerifyOops) { + // Enable extra verification of usages of oops in oopsHierarchy.hpp + check_oop_function = [](oopDesc* obj) { + (void)to_zaddress(obj); + }; + } +#endif } -void ZAddress::initialize() { +void ZGlobalsPointers::initialize() { ZAddressOffsetBits = ZPlatformAddressOffsetBits(); ZAddressOffsetMask = (((uintptr_t)1 << ZAddressOffsetBits) - 1) << ZAddressOffsetShift; ZAddressOffsetMax = (uintptr_t)1 << ZAddressOffsetBits; - ZAddressMetadataShift = ZPlatformAddressMetadataShift(); - ZAddressMetadataMask = (((uintptr_t)1 << ZAddressMetadataBits) - 1) << ZAddressMetadataShift; + // Check max supported heap size + if (MaxHeapSize > ZAddressOffsetMax) { + vm_exit_during_initialization( + err_msg("Java heap too large (max supported heap size is " SIZE_FORMAT "G)", + ZAddressOffsetMax / G)); + } - ZAddressMetadataMarked0 = (uintptr_t)1 << (ZAddressMetadataShift + 0); - ZAddressMetadataMarked1 = (uintptr_t)1 << (ZAddressMetadataShift + 1); - ZAddressMetadataRemapped = (uintptr_t)1 << (ZAddressMetadataShift + 2); - ZAddressMetadataFinalizable = (uintptr_t)1 << (ZAddressMetadataShift + 3); + ZAddressHeapBaseShift = ZPlatformAddressHeapBaseShift(); + ZAddressHeapBase = (uintptr_t)1 << ZAddressHeapBaseShift; + + ZPointerRemappedYoungMask = ZPointerRemapped10 | ZPointerRemapped00; + ZPointerRemappedOldMask = ZPointerRemapped01 | ZPointerRemapped00; + ZPointerMarkedYoung = ZPointerMarkedYoung0; + ZPointerMarkedOld = ZPointerMarkedOld0; + ZPointerFinalizable = ZPointerFinalizable0; + ZPointerRemembered = ZPointerRemembered0; + + set_good_masks(); + + initialize_check_oop_function(); +} + +void ZGlobalsPointers::flip_young_mark_start() { + ZPointerMarkedYoung ^= (ZPointerMarkedYoung0 | ZPointerMarkedYoung1); + ZPointerRemembered ^= (ZPointerRemembered0 | ZPointerRemembered1); + set_good_masks(); +} - ZAddressMetadataMarked = ZAddressMetadataMarked0; - set_good_mask(ZAddressMetadataRemapped); +void ZGlobalsPointers::flip_young_relocate_start() { + ZPointerRemappedYoungMask ^= ZPointerRemappedMask; + set_good_masks(); } -void ZAddress::flip_to_marked() { - ZAddressMetadataMarked ^= (ZAddressMetadataMarked0 | ZAddressMetadataMarked1); - set_good_mask(ZAddressMetadataMarked); +void ZGlobalsPointers::flip_old_mark_start() { + ZPointerMarkedOld ^= (ZPointerMarkedOld0 | ZPointerMarkedOld1); + ZPointerFinalizable ^= (ZPointerFinalizable0 | ZPointerFinalizable1); + set_good_masks(); } -void ZAddress::flip_to_remapped() { - set_good_mask(ZAddressMetadataRemapped); +void ZGlobalsPointers::flip_old_relocate_start() { + ZPointerRemappedOldMask ^= ZPointerRemappedMask; + set_good_masks(); } diff --git a/src/hotspot/share/gc/z/zAddress.hpp b/src/hotspot/share/gc/z/zAddress.hpp index 2908c37bbe6f1..86b9550c04374 100644 --- a/src/hotspot/share/gc/z/zAddress.hpp +++ b/src/hotspot/share/gc/z/zAddress.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,42 +26,288 @@ #include "memory/allStatic.hpp" #include "utilities/globalDefinitions.hpp" +#include CPU_HEADER(gc/z/zAddress) + +// One bit that denotes where the heap start. All uncolored +// oops have this bit set, plus an offset within the heap. +extern uintptr_t ZAddressHeapBase; +extern uintptr_t ZAddressHeapBaseShift; + +// Describes the maximal offset inside the heap. +extern size_t ZAddressOffsetBits; +const size_t ZAddressOffsetShift = 0; +extern uintptr_t ZAddressOffsetMask; +extern size_t ZAddressOffsetMax; + +// Layout of metadata bits in colored pointer / zpointer. +// +// A zpointer is a combination of the address bits (heap base bit + offset) +// and two low-order metadata bytes, with the following layout: +// +// RRRRMMmmFFrr0000 +// **** : Used by load barrier +// ********** : Used by mark barrier +// ************ : Used by store barrier +// **** : Reserved bits +// +// The table below describes what each color does. +// +// +-------------+-------------------+--------------------------+ +// | Bit pattern | Description | Included colors | +// +-------------+-------------------+--------------------------+ +// | rr | Remembered bits | Remembered[0, 1] | +// +-------------+-------------------+--------------------------+ +// | FF | Finalizable bits | Finalizable[0, 1] | +// +-------------+-------------------+--------------------------+ +// | mm | Marked young bits | MarkedYoung[0, 1] | +// +-------------+-------------------+--------------------------+ +// | MM | Marked old bits | MarkedOld[0, 1] | +// +-------------+-------------------+--------------------------+ +// | RRRR | Remapped bits | Remapped[00, 01, 10, 11] | +// +-------------+-------------------+--------------------------+ +// +// The low order zero address bits sometimes overlap with the high order zero metadata +// bits, depending on the remapped bit being set. +// +// vvv- overlapping address and metadata zeros +// aaa...aaa0001MMmmFFrr0000 = Remapped00 zpointer +// +// vv-- overlapping address and metadata zeros +// aaa...aaa00010MMmmFFrr0000 = Remapped01 zpointer +// +// v--- overlapping address and metadata zero +// aaa...aaa000100MMmmFFrr0000 = Remapped10 zpointer +// +// ---- no overlapping address and metadata zeros +// aaa...aaa0001000MMmmFFrr0000 = Remapped11 zpointer +// +// The overlapping is performed because the x86 JIT-compiled load barriers expect the +// address bits to start right after the load-good bit. It allows combining the good +// bit check and unmasking into a single speculative shift instruction. On AArch64 we +// don't do this, and hence there are no overlapping address and metadata zeros there. +// +// The remapped bits are notably not grouped into two sets of bits, one for the young +// collection and one for the old collection, like the other bits. The reason is that +// the load barrier is only compatible with bit patterns where there is a single zero in +// its bits of operation (the load metadata bit mask). Instead, the single bit that we +// set encodes the combined state of a conceptual RemappedYoung[0, 1] and +// RemappedOld[0, 1] pair. The encoding scheme is that the shift of the load good bit, +// minus the shift of the load metadata bit start encodes the numbers 0, 1, 2 and 3. +// These numbers in binary correspond to 00, 01, 10 and 11. The low order bit in said +// numbers correspond to the simulated RemappedYoung[0, 1] value, and the high order bit +// corresponds to the simulated RemappedOld[0, 1] value. On AArch64, the remap bits +// of zpointers are the complement of this bit. So there are 3 good bits and one bad bit +// instead. This lends itself better to AArch64 instructions. +// +// We decide the bit to be taken by having the RemappedYoungMask and RemappedOldMask +// variables, which alternate between what two bits they accept for their corresponding +// old and young phase. The Remapped bit is chosen by taking the intersection of those +// two variables. +// +// RemappedOldMask alternates between these two bit patterns: +// +// RemappedOld0 => 0011 +// RemappedOld1 => 1100 +// +// RemappedYoungMask alternates between these two bit patterns: +// +// RemappedYoung0 => 0101 +// RemappedYoung1 => 1010 +// +// The corresponding intersections look like this: +// +// RemappedOld0 & RemappedYoung0 = 0001 = Remapped00 +// RemappedOld0 & RemappedYoung1 = 0010 = Remapped01 +// RemappedOld1 & RemappedYoung0 = 0100 = Remapped10 +// RemappedOld1 & RemappedYoung1 = 1000 = Remapped11 + +constexpr uintptr_t z_pointer_mask(size_t shift, size_t bits) { + return (((uintptr_t)1 << bits) - 1) << shift; +} + +constexpr uintptr_t z_pointer_bit(size_t shift, size_t offset) { + return (uintptr_t)1 << (shift + offset); +} + +// Reserved bits +const size_t ZPointerReservedShift = 0; +const size_t ZPointerReservedBits = 4; +const uintptr_t ZPointerReservedMask = z_pointer_mask(ZPointerReservedShift, ZPointerReservedBits); + +const uintptr_t ZPointerReserved0 = z_pointer_bit(ZPointerReservedShift, 0); +const uintptr_t ZPointerReserved1 = z_pointer_bit(ZPointerReservedShift, 1); +const uintptr_t ZPointerReserved2 = z_pointer_bit(ZPointerReservedShift, 2); +const uintptr_t ZPointerReserved3 = z_pointer_bit(ZPointerReservedShift, 3); + +// Remembered set bits +const size_t ZPointerRememberedShift = ZPointerReservedShift + ZPointerReservedBits; +const size_t ZPointerRememberedBits = 2; +const uintptr_t ZPointerRememberedMask = z_pointer_mask(ZPointerRememberedShift, ZPointerRememberedBits); + +const uintptr_t ZPointerRemembered0 = z_pointer_bit(ZPointerRememberedShift, 0); +const uintptr_t ZPointerRemembered1 = z_pointer_bit(ZPointerRememberedShift, 1); + +// Marked bits +const size_t ZPointerMarkedShift = ZPointerRememberedShift + ZPointerRememberedBits; +const size_t ZPointerMarkedBits = 6; +const uintptr_t ZPointerMarkedMask = z_pointer_mask(ZPointerMarkedShift, ZPointerMarkedBits); + +const uintptr_t ZPointerFinalizable0 = z_pointer_bit(ZPointerMarkedShift, 0); +const uintptr_t ZPointerFinalizable1 = z_pointer_bit(ZPointerMarkedShift, 1); +const uintptr_t ZPointerMarkedYoung0 = z_pointer_bit(ZPointerMarkedShift, 2); +const uintptr_t ZPointerMarkedYoung1 = z_pointer_bit(ZPointerMarkedShift, 3); +const uintptr_t ZPointerMarkedOld0 = z_pointer_bit(ZPointerMarkedShift, 4); +const uintptr_t ZPointerMarkedOld1 = z_pointer_bit(ZPointerMarkedShift, 5); + +// Remapped bits +const size_t ZPointerRemappedShift = ZPointerMarkedShift + ZPointerMarkedBits; +const size_t ZPointerRemappedBits = 4; +const uintptr_t ZPointerRemappedMask = z_pointer_mask(ZPointerRemappedShift, ZPointerRemappedBits); + +const uintptr_t ZPointerRemapped00 = z_pointer_bit(ZPointerRemappedShift, 0); +const uintptr_t ZPointerRemapped01 = z_pointer_bit(ZPointerRemappedShift, 1); +const uintptr_t ZPointerRemapped10 = z_pointer_bit(ZPointerRemappedShift, 2); +const uintptr_t ZPointerRemapped11 = z_pointer_bit(ZPointerRemappedShift, 3); + +// The shift table is tightly coupled with the zpointer layout given above +constexpr int ZPointerLoadShiftTable[] = { + ZPointerRemappedShift + ZPointerRemappedShift, // [0] Null + ZPointerRemappedShift + 1, // [1] Remapped00 + ZPointerRemappedShift + 2, // [2] Remapped01 + 0, + ZPointerRemappedShift + 3, // [4] Remapped10 + 0, + 0, + 0, + ZPointerRemappedShift + 4 // [8] Remapped11 +}; + +// Barrier metadata masks +const uintptr_t ZPointerLoadMetadataMask = ZPointerRemappedMask; +const uintptr_t ZPointerMarkMetadataMask = ZPointerLoadMetadataMask | ZPointerMarkedMask; +const uintptr_t ZPointerStoreMetadataMask = ZPointerMarkMetadataMask | ZPointerRememberedMask; +const uintptr_t ZPointerAllMetadataMask = ZPointerStoreMetadataMask; + +// The current expected bit +extern uintptr_t ZPointerRemapped; +extern uintptr_t ZPointerMarkedOld; +extern uintptr_t ZPointerMarkedYoung; +extern uintptr_t ZPointerFinalizable; +extern uintptr_t ZPointerRemembered; + +// The current expected remap bit for the young (or old) collection is either of two bits. +// The other collection alternates the bits, so we need to use a mask. +extern uintptr_t ZPointerRemappedYoungMask; +extern uintptr_t ZPointerRemappedOldMask; + +// Good/bad masks +extern uintptr_t ZPointerLoadGoodMask; +extern uintptr_t ZPointerLoadBadMask; + +extern uintptr_t ZPointerMarkGoodMask; +extern uintptr_t ZPointerMarkBadMask; + +extern uintptr_t ZPointerStoreGoodMask; +extern uintptr_t ZPointerStoreBadMask; + +extern uintptr_t ZPointerVectorLoadBadMask[8]; +extern uintptr_t ZPointerVectorStoreBadMask[8]; +extern uintptr_t ZPointerVectorStoreGoodMask[8]; + +// The bad mask is 64 bit. Its low order 32 bits contain all possible value combinations +// that this mask will have. Therefore, the memory where the 32 low order bits are stored +// can be used as a 32 bit GC epoch counter, that has a different bit pattern every time +// the bad mask is flipped. This provides a pointer to such 32 bits. +extern uint32_t* ZPointerStoreGoodMaskLowOrderBitsAddr; +const int ZPointerStoreGoodMaskLowOrderBitsOffset = LITTLE_ENDIAN_ONLY(0) BIG_ENDIAN_ONLY(4); + +// Offsets +// - Virtual address range offsets +// - Physical memory offsets +enum class zoffset : uintptr_t {}; +// Offsets including end of offset range +enum class zoffset_end : uintptr_t {}; + +// Colored oop +enum class zpointer : uintptr_t { null = 0 }; + +// Uncolored oop - safe to dereference +enum class zaddress : uintptr_t { null = 0 }; + +// Uncolored oop - not safe to dereference, could point uncommitted memory +enum class zaddress_unsafe : uintptr_t { null = 0 }; + +class ZOffset : public AllStatic { +public: + static zaddress address(zoffset offset); + static zaddress_unsafe address_unsafe(zoffset offset); +}; + +class ZPointer : public AllStatic { +public: + static zaddress uncolor(zpointer ptr); + static zaddress uncolor_store_good(zpointer ptr); + static zaddress_unsafe uncolor_unsafe(zpointer ptr); + static zpointer set_remset_bits(zpointer ptr); + + static bool is_load_bad(zpointer ptr); + static bool is_load_good(zpointer ptr); + static bool is_load_good_or_null(zpointer ptr); + + static bool is_old_load_good(zpointer ptr); + static bool is_young_load_good(zpointer ptr); + + static bool is_mark_bad(zpointer ptr); + static bool is_mark_good(zpointer ptr); + static bool is_mark_good_or_null(zpointer ptr); + + static bool is_store_bad(zpointer ptr); + static bool is_store_good(zpointer ptr); + static bool is_store_good_or_null(zpointer ptr); + + static bool is_marked_finalizable(zpointer ptr); + static bool is_marked_old(zpointer ptr); + static bool is_marked_young(zpointer ptr); + static bool is_marked_any_old(zpointer ptr); + static bool is_remapped(zpointer ptr); + static bool is_remembered_exact(zpointer ptr); + + static constexpr int load_shift_lookup_index(uintptr_t value); + static constexpr int load_shift_lookup(uintptr_t value); + static uintptr_t remap_bits(uintptr_t colored); +}; class ZAddress : public AllStatic { +public: + static zpointer color(zaddress addr, uintptr_t color); + static zpointer color(zaddress_unsafe addr, uintptr_t color); + + static zoffset offset(zaddress addr); + static zoffset offset(zaddress_unsafe addr); + + static zpointer load_good(zaddress addr, zpointer prev); + static zpointer finalizable_good(zaddress addr, zpointer prev); + static zpointer mark_good(zaddress addr, zpointer prev); + static zpointer mark_old_good(zaddress addr, zpointer prev); + static zpointer mark_young_good(zaddress addr, zpointer prev); + static zpointer store_good(zaddress addr); + static zpointer store_good_or_null(zaddress addr); +}; + +class ZGlobalsPointers : public AllStatic { friend class ZAddressTest; private: - static void set_good_mask(uintptr_t mask); + static void set_good_masks(); + static void pd_set_good_masks(); public: static void initialize(); - static void flip_to_marked(); - static void flip_to_remapped(); - - static bool is_null(uintptr_t value); - static bool is_bad(uintptr_t value); - static bool is_good(uintptr_t value); - static bool is_good_or_null(uintptr_t value); - static bool is_weak_bad(uintptr_t value); - static bool is_weak_good(uintptr_t value); - static bool is_weak_good_or_null(uintptr_t value); - static bool is_marked(uintptr_t value); - static bool is_marked_or_null(uintptr_t value); - static bool is_finalizable(uintptr_t value); - static bool is_finalizable_good(uintptr_t value); - static bool is_remapped(uintptr_t value); - static bool is_in(uintptr_t value); - - static uintptr_t offset(uintptr_t value); - static uintptr_t good(uintptr_t value); - static uintptr_t good_or_null(uintptr_t value); - static uintptr_t finalizable_good(uintptr_t value); - static uintptr_t marked(uintptr_t value); - static uintptr_t marked0(uintptr_t value); - static uintptr_t marked1(uintptr_t value); - static uintptr_t remapped(uintptr_t value); - static uintptr_t remapped_or_null(uintptr_t value); + static void flip_young_mark_start(); + static void flip_young_relocate_start(); + static void flip_old_mark_start(); + static void flip_old_relocate_start(); }; #endif // SHARE_GC_Z_ZADDRESS_HPP diff --git a/src/hotspot/share/gc/z/zAddress.inline.hpp b/src/hotspot/share/gc/z/zAddress.inline.hpp index a151e7182e49a..7088a71ef7b0c 100644 --- a/src/hotspot/share/gc/z/zAddress.inline.hpp +++ b/src/hotspot/share/gc/z/zAddress.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,24 +26,450 @@ #include "gc/z/zAddress.hpp" -#include "gc/z/zGlobals.hpp" +#include "gc/shared/gc_globals.hpp" +#include "oops/oop.hpp" +#include "oops/oopsHierarchy.hpp" +#include "runtime/atomic.hpp" #include "utilities/globalDefinitions.hpp" #include "utilities/macros.hpp" #include "utilities/powerOfTwo.hpp" +#include CPU_HEADER_INLINE(gc/z/zAddress) -inline bool ZAddress::is_null(uintptr_t value) { - return value == 0; +// zoffset functions + +inline uintptr_t untype(zoffset offset) { + const uintptr_t value = static_cast(offset); + assert(value < ZAddressOffsetMax, "must have no other bits"); + return value; +} + +inline uintptr_t untype(zoffset_end offset) { + const uintptr_t value = static_cast(offset); + assert(value <= ZAddressOffsetMax, "must have no other bits"); + return value; +} + +inline zoffset to_zoffset(uintptr_t value) { + assert(value < ZAddressOffsetMax, "must have no other bits"); + return zoffset(value); +} + +inline zoffset to_zoffset(zoffset_end offset) { + const uintptr_t value = untype(offset); + return to_zoffset(value); +} + +inline zoffset operator+(zoffset offset, size_t size) { + return to_zoffset(untype(offset) + size); +} + +inline zoffset& operator+=(zoffset& offset, size_t size) { + offset = to_zoffset(untype(offset) + size); + return offset; +} + +inline zoffset operator-(zoffset offset, size_t size) { + const uintptr_t value = untype(offset) - size; + return to_zoffset(value); +} + +inline size_t operator-(zoffset left, zoffset right) { + const size_t diff = untype(left) - untype(right); + assert(diff < ZAddressOffsetMax, "Underflow"); + return diff; +} + +inline zoffset& operator-=(zoffset& offset, size_t size) { + offset = to_zoffset(untype(offset) - size); + return offset; +} + +inline bool to_zoffset_end(zoffset_end* result, zoffset_end start, size_t size) { + const uintptr_t value = untype(start) + size; + if (value <= ZAddressOffsetMax) { + *result = zoffset_end(value); + return true; + } + return false; +} + +inline zoffset_end to_zoffset_end(zoffset start, size_t size) { + const uintptr_t value = untype(start) + size; + assert(value <= ZAddressOffsetMax, "Overflow start: " PTR_FORMAT " size: " PTR_FORMAT " value: " PTR_FORMAT, + untype(start), size, value); + return zoffset_end(value); +} + +inline zoffset_end to_zoffset_end(uintptr_t value) { + assert(value <= ZAddressOffsetMax, "Overflow"); + return zoffset_end(value); +} + +inline zoffset_end to_zoffset_end(zoffset offset) { + return zoffset_end(untype(offset)); +} + +inline bool operator!=(zoffset first, zoffset_end second) { + return untype(first) != untype(second); } -inline bool ZAddress::is_bad(uintptr_t value) { - return value & ZAddressBadMask; +inline bool operator!=(zoffset_end first, zoffset second) { + return untype(first) != untype(second); } -inline bool ZAddress::is_good(uintptr_t value) { - return !is_bad(value) && !is_null(value); +inline bool operator==(zoffset first, zoffset_end second) { + return untype(first) == untype(second); } -inline bool ZAddress::is_good_or_null(uintptr_t value) { +inline bool operator==(zoffset_end first, zoffset second) { + return untype(first) == untype(second); +} + +inline bool operator<(zoffset_end first, zoffset second) { + return untype(first) < untype(second); +} + +inline bool operator<(zoffset first, zoffset_end second) { + return untype(first) < untype(second); +} + +inline bool operator>(zoffset first, zoffset_end second) { + return untype(first) > untype(second); +} + +inline bool operator>=(zoffset first, zoffset_end second) { + return untype(first) >= untype(second); +} + +inline size_t operator-(zoffset_end first, zoffset second) { + return untype(first) - untype(second); +} + +inline zoffset_end operator-(zoffset_end first, size_t second) { + return to_zoffset_end(untype(first) - second); +} + +inline size_t operator-(zoffset_end first, zoffset_end second) { + return untype(first) - untype(second); +} + +inline zoffset_end& operator-=(zoffset_end& offset, size_t size) { + offset = to_zoffset_end(untype(offset) - size); + return offset; +} + +inline zoffset_end& operator+=(zoffset_end& offset, size_t size) { + offset = to_zoffset_end(untype(offset) + size); + return offset; +} + +// zpointer functions + +#define report_is_valid_failure(str) assert(!assert_on_failure, "%s: " PTR_FORMAT, str, value); + +inline bool is_valid(zpointer ptr, bool assert_on_failure = false) { + if (assert_on_failure && !ZVerifyOops) { + return true; + } + + const uintptr_t value = static_cast(ptr); + + if (value == 0) { + // Accept raw null + return false; + } + + if ((value & ~ZPointerStoreMetadataMask) != 0) { +#ifndef AARCH64 + const int index = ZPointer::load_shift_lookup_index(value); + if (index != 0 && !is_power_of_2(index)) { + report_is_valid_failure("Invalid remap bits"); + return false; + } +#endif + + const int shift = ZPointer::load_shift_lookup(value); + if (!is_power_of_2(value & (ZAddressHeapBase << shift))) { + report_is_valid_failure("Missing heap base"); + return false; + } + + if (((value >> shift) & 7) != 0) { + report_is_valid_failure("Alignment bits should not be set"); + return false; + } + } + + const uintptr_t load_metadata = ZPointer::remap_bits(value); + if (!is_power_of_2(load_metadata)) { + report_is_valid_failure("Must have exactly one load metadata bit"); + return false; + } + + const uintptr_t store_metadata = (value & (ZPointerStoreMetadataMask ^ ZPointerLoadMetadataMask)); + const uintptr_t marked_young_metadata = store_metadata & (ZPointerMarkedYoung0 | ZPointerMarkedYoung1); + const uintptr_t marked_old_metadata = store_metadata & (ZPointerMarkedOld0 | ZPointerMarkedOld1 | + ZPointerFinalizable0 | ZPointerFinalizable1); + const uintptr_t remembered_metadata = store_metadata & (ZPointerRemembered0 | ZPointerRemembered1); + if (!is_power_of_2(marked_young_metadata)) { + report_is_valid_failure("Must have exactly one marked young metadata bit"); + return false; + } + + if (!is_power_of_2(marked_old_metadata)) { + report_is_valid_failure("Must have exactly one marked old metadata bit"); + return false; + } + + if (remembered_metadata == 0) { + report_is_valid_failure("Must have at least one remembered metadata bit set"); + return false; + } + + if ((marked_young_metadata | marked_old_metadata | remembered_metadata) != store_metadata) { + report_is_valid_failure("Must have exactly three sets of store metadata bits"); + return false; + } + + if ((value & ZPointerReservedMask) != 0) { + report_is_valid_failure("Dirty reserved bits"); + return false; + } + + return true; +} + +inline void assert_is_valid(zpointer ptr) { + DEBUG_ONLY(is_valid(ptr, true /* assert_on_failure */);) +} + +inline uintptr_t untype(zpointer ptr) { + return static_cast(ptr); +} + +inline zpointer to_zpointer(uintptr_t value) { + assert_is_valid(zpointer(value)); + return zpointer(value); +} + +inline zpointer to_zpointer(oopDesc* o) { + return ::to_zpointer(uintptr_t(o)); +} + +// Is it exactly null? +inline bool is_null(zpointer ptr) { + return ptr == zpointer::null; +} + +inline bool is_null_any(zpointer ptr) { + const uintptr_t raw_addr = untype(ptr); + return (raw_addr & ~ZPointerAllMetadataMask) == 0; +} + +// Is it null - colored or not? +inline bool is_null_assert_load_good(zpointer ptr) { + const bool result = is_null_any(ptr); + assert(!result || ZPointer::is_load_good(ptr), "Got bad colored null"); + return result; +} + +// zaddress functions + +inline bool is_null(zaddress addr) { + return addr == zaddress::null; +} + +inline bool is_valid(zaddress addr, bool assert_on_failure = false) { + if (assert_on_failure && !ZVerifyOops) { + return true; + } + + if (is_null(addr)) { + // Null is valid + return true; + } + + const uintptr_t value = static_cast(addr); + + if (value & 0x7) { + // No low order bits + report_is_valid_failure("Has low-order bits set"); + return false; + } + + if ((value & ZAddressHeapBase) == 0) { + // Must have a heap base bit + report_is_valid_failure("Missing heap base"); + return false; + } + + if (value >= (ZAddressHeapBase + ZAddressOffsetMax)) { + // Must not point outside of the heap's virtual address range + report_is_valid_failure("Address outside of the heap"); + return false; + } + + return true; +} + +inline void assert_is_valid(zaddress addr) { + DEBUG_ONLY(is_valid(addr, true /* assert_on_failure */);) +} + +inline uintptr_t untype(zaddress addr) { + return static_cast(addr); +} + +#ifdef ASSERT +inline void dereferenceable_test(zaddress addr) { + if (ZVerifyOops && !is_null(addr)) { + // Intentionally crash if the address is pointing into unmapped memory + (void)Atomic::load((int*)(uintptr_t)addr); + } +} +#endif + +inline zaddress to_zaddress(uintptr_t value) { + const zaddress addr = zaddress(value); + assert_is_valid(addr); + DEBUG_ONLY(dereferenceable_test(addr)); + return addr; +} + +inline zaddress to_zaddress(oopDesc* o) { + return to_zaddress(uintptr_t(o)); +} + +inline oop to_oop(zaddress addr) { + const oop obj = cast_to_oop(addr); + assert(!ZVerifyOops || oopDesc::is_oop_or_null(obj), "Broken oop: " PTR_FORMAT " [" PTR_FORMAT " " PTR_FORMAT " " PTR_FORMAT " " PTR_FORMAT "]", + p2i(obj), + *(uintptr_t*)(untype(addr) + 0x00), + *(uintptr_t*)(untype(addr) + 0x08), + *(uintptr_t*)(untype(addr) + 0x10), + *(uintptr_t*)(untype(addr) + 0x18)); + return obj; +} + +inline zaddress operator+(zaddress addr, size_t size) { + return to_zaddress(untype(addr) + size); +} + +inline size_t operator-(zaddress left, zaddress right) { + assert(left >= right, "Unexpected order - left: " PTR_FORMAT " right: " PTR_FORMAT, untype(left), untype(right)); + return untype(left) - untype(right); +} + +// zaddress_unsafe functions + +inline bool is_null(zaddress_unsafe addr) { + return addr == zaddress_unsafe::null; +} + +inline bool is_valid(zaddress_unsafe addr, bool assert_on_failure = false) { + return is_valid(zaddress(addr), assert_on_failure); +} + +inline void assert_is_valid(zaddress_unsafe addr) { + DEBUG_ONLY(is_valid(addr, true /* assert_on_failure */);) +} + + +inline uintptr_t untype(zaddress_unsafe addr) { + return static_cast(addr); +} + +// The zaddress_unsafe type denotes that this +// memory isn't guaranteed to be dereferenceable. +// The containing page could have been reclaimed +// and/or uncommitted. +// +// The zaddress type denotes that this memory can +// be dereferenced (runtime verified). +// +// This function can be used when the caller guarantees +// that addr points to dereferenceable memory. Examples +// of cases after which this function can be used: +// +// 1) A load good check on the colored pointer that addr was created from +// 2) A load barrier has self-healed the pointer in addr +// 3) A check that the addr doesn't belong to a relocation set. Since addr +// could denote two different objects in the two generations, a check +// against the colored pointer, that addr was created from, is needed to +// figure out what relocation set to look in. +// 4) From the relocation code +inline zaddress safe(zaddress_unsafe addr) { + return to_zaddress(untype(addr)); +} + +inline zaddress_unsafe to_zaddress_unsafe(uintptr_t value) { + const zaddress_unsafe addr = zaddress_unsafe(value); + assert_is_valid(addr); + return addr; +} + +inline zaddress_unsafe unsafe(zaddress addr) { + return to_zaddress_unsafe(untype(addr)); +} + +inline zaddress_unsafe to_zaddress_unsafe(oop o) { + return to_zaddress_unsafe(cast_from_oop(o)); +} + +inline zaddress_unsafe operator+(zaddress_unsafe offset, size_t size) { + return to_zaddress_unsafe(untype(offset) + size); +} + +inline size_t operator-(zaddress_unsafe left, zaddress_unsafe right) { + return untype(left) - untype(right); +} + +// ZOffset functions + +inline zaddress ZOffset::address(zoffset offset) { + return to_zaddress(untype(offset) | ZAddressHeapBase); +} + +inline zaddress_unsafe ZOffset::address_unsafe(zoffset offset) { + return to_zaddress_unsafe(untype(offset) | ZAddressHeapBase); +} + +// ZPointer functions + +inline zaddress ZPointer::uncolor(zpointer ptr) { + assert(ZPointer::is_load_good(ptr) || is_null_any(ptr), + "Should be load good when handed out: " PTR_FORMAT, untype(ptr)); + const uintptr_t raw_addr = untype(ptr); + return to_zaddress(raw_addr >> ZPointer::load_shift_lookup(raw_addr)); +} + +inline zaddress ZPointer::uncolor_store_good(zpointer ptr) { + assert(ZPointer::is_store_good(ptr), "Should be store good: " PTR_FORMAT, untype(ptr)); + return uncolor(ptr); +} + +inline zaddress_unsafe ZPointer::uncolor_unsafe(zpointer ptr) { + assert(ZPointer::is_store_bad(ptr), "Unexpected ptr"); + const uintptr_t raw_addr = untype(ptr); + return to_zaddress_unsafe(raw_addr >> ZPointer::load_shift_lookup(raw_addr)); +} + +inline zpointer ZPointer::set_remset_bits(zpointer ptr) { + uintptr_t raw_addr = untype(ptr); + assert(raw_addr != 0, "raw nulls should have been purged in promotion to old gen"); + raw_addr |= ZPointerRemembered0 | ZPointerRemembered1; + return to_zpointer(raw_addr); +} + +inline bool ZPointer::is_load_bad(zpointer ptr) { + return untype(ptr) & ZPointerLoadBadMask; +} + +inline bool ZPointer::is_load_good(zpointer ptr) { + return !is_load_bad(ptr) && !is_null(ptr); +} + +inline bool ZPointer::is_load_good_or_null(zpointer ptr) { // Checking if an address is "not bad" is an optimized version of // checking if it's "good or null", which eliminates an explicit // null check. However, the implicit null check only checks that @@ -51,87 +477,179 @@ inline bool ZAddress::is_good_or_null(uintptr_t value) { // This means that an address without mask bits would pass through // the barrier as if it was null. This should be harmless as such // addresses should ever be passed through the barrier. - const bool result = !is_bad(value); - assert((is_good(value) || is_null(value)) == result, "Bad address"); + const bool result = !is_load_bad(ptr); + assert((is_load_good(ptr) || is_null(ptr)) == result, "Bad address"); return result; } -inline bool ZAddress::is_weak_bad(uintptr_t value) { - return value & ZAddressWeakBadMask; +inline bool ZPointer::is_young_load_good(zpointer ptr) { + assert(!is_null(ptr), "not supported"); + return (remap_bits(untype(ptr)) & ZPointerRemappedYoungMask) != 0; +} + +inline bool ZPointer::is_old_load_good(zpointer ptr) { + assert(!is_null(ptr), "not supported"); + return (remap_bits(untype(ptr)) & ZPointerRemappedOldMask) != 0; } -inline bool ZAddress::is_weak_good(uintptr_t value) { - return !is_weak_bad(value) && !is_null(value); +inline bool ZPointer::is_mark_bad(zpointer ptr) { + return untype(ptr) & ZPointerMarkBadMask; } -inline bool ZAddress::is_weak_good_or_null(uintptr_t value) { - return !is_weak_bad(value); +inline bool ZPointer::is_mark_good(zpointer ptr) { + return !is_mark_bad(ptr) && !is_null(ptr); } -inline bool ZAddress::is_marked(uintptr_t value) { - return value & ZAddressMetadataMarked; +inline bool ZPointer::is_mark_good_or_null(zpointer ptr) { + // Checking if an address is "not bad" is an optimized version of + // checking if it's "good or null", which eliminates an explicit + // null check. However, the implicit null check only checks that + // the mask bits are zero, not that the entire address is zero. + // This means that an address without mask bits would pass through + // the barrier as if it was null. This should be harmless as such + // addresses should ever be passed through the barrier. + const bool result = !is_mark_bad(ptr); + assert((is_mark_good(ptr) || is_null(ptr)) == result, "Bad address"); + return result; } -inline bool ZAddress::is_marked_or_null(uintptr_t value) { - return is_marked(value) || is_null(value); +inline bool ZPointer::is_store_bad(zpointer ptr) { + return untype(ptr) & ZPointerStoreBadMask; } -inline bool ZAddress::is_finalizable(uintptr_t value) { - return value & ZAddressMetadataFinalizable; +inline bool ZPointer::is_store_good(zpointer ptr) { + return !is_store_bad(ptr) && !is_null(ptr); } -inline bool ZAddress::is_finalizable_good(uintptr_t value) { - return is_finalizable(value) && is_good(value ^ ZAddressMetadataFinalizable); +inline bool ZPointer::is_store_good_or_null(zpointer ptr) { + // Checking if an address is "not bad" is an optimized version of + // checking if it's "good or null", which eliminates an explicit + // null check. However, the implicit null check only checks that + // the mask bits are zero, not that the entire address is zero. + // This means that an address without mask bits would pass through + // the barrier as if it was null. This should be harmless as such + // addresses should ever be passed through the barrier. + const bool result = !is_store_bad(ptr); + assert((is_store_good(ptr) || is_null(ptr)) == result, "Bad address"); + return result; } -inline bool ZAddress::is_remapped(uintptr_t value) { - return value & ZAddressMetadataRemapped; +inline bool ZPointer::is_marked_finalizable(zpointer ptr) { + assert(!is_null(ptr), "must not be null"); + return untype(ptr) & ZPointerFinalizable; } -inline bool ZAddress::is_in(uintptr_t value) { - // Check that exactly one non-offset bit is set - if (!is_power_of_2(value & ~ZAddressOffsetMask)) { - return false; - } +inline bool ZPointer::is_marked_old(zpointer ptr) { + return untype(ptr) & (ZPointerMarkedOld); +} - // Check that one of the non-finalizable metadata is set - return value & (ZAddressMetadataMask & ~ZAddressMetadataFinalizable); +inline bool ZPointer::is_marked_young(zpointer ptr) { + return untype(ptr) & (ZPointerMarkedYoung); } -inline uintptr_t ZAddress::offset(uintptr_t value) { - return value & ZAddressOffsetMask; +inline bool ZPointer::is_marked_any_old(zpointer ptr) { + return untype(ptr) & (ZPointerMarkedOld | + ZPointerFinalizable); } -inline uintptr_t ZAddress::good(uintptr_t value) { - return offset(value) | ZAddressGoodMask; +inline bool ZPointer::is_remapped(zpointer ptr) { + assert(!is_null(ptr), "must not be null"); + return remap_bits(untype(ptr)) & ZPointerRemapped; } -inline uintptr_t ZAddress::good_or_null(uintptr_t value) { - return is_null(value) ? 0 : good(value); +inline bool ZPointer::is_remembered_exact(zpointer ptr) { + assert(!is_null(ptr), "must not be null"); + return (untype(ptr) & ZPointerRemembered) == ZPointerRemembered; } -inline uintptr_t ZAddress::finalizable_good(uintptr_t value) { - return offset(value) | ZAddressMetadataFinalizable | ZAddressGoodMask; +inline constexpr int ZPointer::load_shift_lookup_index(uintptr_t value) { + return (value >> ZPointerRemappedShift) & ((1 << ZPointerRemappedBits) - 1); } -inline uintptr_t ZAddress::marked(uintptr_t value) { - return offset(value) | ZAddressMetadataMarked; +// ZAddress functions + +inline zpointer ZAddress::color(zaddress addr, uintptr_t color) { + return to_zpointer((untype(addr) << ZPointer::load_shift_lookup(color)) | color); } -inline uintptr_t ZAddress::marked0(uintptr_t value) { - return offset(value) | ZAddressMetadataMarked0; +inline zpointer ZAddress::color(zaddress_unsafe addr, uintptr_t color) { + return to_zpointer((untype(addr) << ZPointer::load_shift_lookup(color)) | color); } -inline uintptr_t ZAddress::marked1(uintptr_t value) { - return offset(value) | ZAddressMetadataMarked1; +inline zoffset ZAddress::offset(zaddress addr) { + return to_zoffset(untype(addr) & ZAddressOffsetMask); +} + +inline zoffset ZAddress::offset(zaddress_unsafe addr) { + return to_zoffset(untype(addr) & ZAddressOffsetMask); +} + +inline zpointer color_null() { + return ZAddress::color(zaddress::null, ZPointerStoreGoodMask | ZPointerRememberedMask); +} + +inline zpointer ZAddress::load_good(zaddress addr, zpointer prev) { + if (is_null_any(prev)) { + return color_null(); + } + + const uintptr_t non_load_bits_mask = ZPointerLoadMetadataMask ^ ZPointerAllMetadataMask; + const uintptr_t non_load_prev_bits = untype(prev) & non_load_bits_mask; + return color(addr, ZPointerLoadGoodMask | non_load_prev_bits | ZPointerRememberedMask); +} + +inline zpointer ZAddress::finalizable_good(zaddress addr, zpointer prev) { + if (is_null_any(prev)) { + return color_null(); + } + + const uintptr_t non_mark_bits_mask = ZPointerMarkMetadataMask ^ ZPointerAllMetadataMask; + const uintptr_t non_mark_prev_bits = untype(prev) & non_mark_bits_mask; + return color(addr, ZPointerLoadGoodMask | ZPointerMarkedYoung | ZPointerFinalizable | non_mark_prev_bits | ZPointerRememberedMask); +} + +inline zpointer ZAddress::mark_good(zaddress addr, zpointer prev) { + if (is_null_any(prev)) { + return color_null(); + } + + const uintptr_t non_mark_bits_mask = ZPointerMarkMetadataMask ^ ZPointerAllMetadataMask; + const uintptr_t non_mark_prev_bits = untype(prev) & non_mark_bits_mask; + return color(addr, ZPointerLoadGoodMask | ZPointerMarkedYoung | ZPointerMarkedOld | non_mark_prev_bits | ZPointerRememberedMask); +} + +inline zpointer ZAddress::mark_old_good(zaddress addr, zpointer prev) { + if (is_null_any(prev)) { + return color_null(); + } + + const uintptr_t prev_color = untype(prev); + + const uintptr_t young_marked_mask = ZPointerMarkedYoung0 | ZPointerMarkedYoung1; + const uintptr_t young_marked = prev_color & young_marked_mask; + + return color(addr, ZPointerLoadGoodMask | ZPointerMarkedOld | young_marked | ZPointerRememberedMask); +} + +inline zpointer ZAddress::mark_young_good(zaddress addr, zpointer prev) { + if (is_null_any(prev)) { + return color_null(); + } + + const uintptr_t prev_color = untype(prev); + + const uintptr_t old_marked_mask = ZPointerMarkedMask ^ (ZPointerMarkedYoung0 | ZPointerMarkedYoung1); + const uintptr_t old_marked = prev_color & old_marked_mask; + + return color(addr, ZPointerLoadGoodMask | ZPointerMarkedYoung | old_marked | ZPointerRememberedMask); } -inline uintptr_t ZAddress::remapped(uintptr_t value) { - return offset(value) | ZAddressMetadataRemapped; +inline zpointer ZAddress::store_good(zaddress addr) { + return color(addr, ZPointerStoreGoodMask); } -inline uintptr_t ZAddress::remapped_or_null(uintptr_t value) { - return is_null(value) ? 0 : remapped(value); +inline zpointer ZAddress::store_good_or_null(zaddress addr) { + return is_null(addr) ? zpointer::null : store_good(addr); } #endif // SHARE_GC_Z_ZADDRESS_INLINE_HPP diff --git a/src/hotspot/share/gc/z/zAddressSpaceLimit.cpp b/src/hotspot/share/gc/z/zAddressSpaceLimit.cpp index 518fa07e0d4d5..41ae19566b980 100644 --- a/src/hotspot/share/gc/z/zAddressSpaceLimit.cpp +++ b/src/hotspot/share/gc/z/zAddressSpaceLimit.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -46,8 +46,8 @@ size_t ZAddressSpaceLimit::mark_stack() { return align_up(limit, ZMarkStackSpaceExpandSize); } -size_t ZAddressSpaceLimit::heap_view() { - // Allow all heap views to occupy 50% of the address space - const size_t limit = address_space_limit() / MaxVirtMemFraction / ZHeapViews; +size_t ZAddressSpaceLimit::heap() { + // Allow the heap to occupy 50% of the address space + const size_t limit = address_space_limit() / MaxVirtMemFraction; return align_up(limit, ZGranuleSize); } diff --git a/src/hotspot/share/gc/z/zAddressSpaceLimit.hpp b/src/hotspot/share/gc/z/zAddressSpaceLimit.hpp index ec0faf2c0870a..d8e7e7cfd3617 100644 --- a/src/hotspot/share/gc/z/zAddressSpaceLimit.hpp +++ b/src/hotspot/share/gc/z/zAddressSpaceLimit.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,7 +30,7 @@ class ZAddressSpaceLimit : public AllStatic { public: static size_t mark_stack(); - static size_t heap_view(); + static size_t heap(); }; #endif // SHARE_GC_Z_ZADDRESSSPACELIMIT_HPP diff --git a/src/hotspot/share/gc/z/zAllocationFlags.hpp b/src/hotspot/share/gc/z/zAllocationFlags.hpp index 86b195efafd62..44d6ea740ea12 100644 --- a/src/hotspot/share/gc/z/zAllocationFlags.hpp +++ b/src/hotspot/share/gc/z/zAllocationFlags.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -38,7 +38,7 @@ // | | | | // | | | * 0-0 Non-Blocking Flag (1-bit) // | | | -// | | * 1-1 Worker Relocation Flag (1-bit) +// | | * 1-1 GC Relocation Flag (1-bit) // | | // | * 2-2 Low Address Flag (1-bit) // | @@ -48,7 +48,7 @@ class ZAllocationFlags { private: typedef ZBitField field_non_blocking; - typedef ZBitField field_worker_relocation; + typedef ZBitField field_gc_relocation; typedef ZBitField field_low_address; uint8_t _flags; @@ -61,8 +61,8 @@ class ZAllocationFlags { _flags |= field_non_blocking::encode(true); } - void set_worker_relocation() { - _flags |= field_worker_relocation::encode(true); + void set_gc_relocation() { + _flags |= field_gc_relocation::encode(true); } void set_low_address() { @@ -73,8 +73,8 @@ class ZAllocationFlags { return field_non_blocking::decode(_flags); } - bool worker_relocation() const { - return field_worker_relocation::decode(_flags); + bool gc_relocation() const { + return field_gc_relocation::decode(_flags); } bool low_address() const { diff --git a/src/hotspot/share/gc/z/zAllocator.cpp b/src/hotspot/share/gc/z/zAllocator.cpp new file mode 100644 index 0000000000000..9fdb811a89a99 --- /dev/null +++ b/src/hotspot/share/gc/z/zAllocator.cpp @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/z/zAllocator.hpp" +#include "gc/z/zObjectAllocator.hpp" + +ZAllocatorEden* ZAllocator::_eden; +ZAllocatorForRelocation* ZAllocator::_relocation[ZAllocator::_relocation_allocators]; + +ZAllocator::ZAllocator(ZPageAge age) : + _object_allocator(age) {} + +void ZAllocator::retire_pages() { + _object_allocator.retire_pages(); +} + +ZAllocatorEden::ZAllocatorEden() : + ZAllocator(ZPageAge::eden) { + ZAllocator::_eden = this; +} + +size_t ZAllocatorEden::tlab_used() const { + return _object_allocator.used(); +} + +size_t ZAllocatorEden::remaining() const { + return _object_allocator.remaining(); +} + +ZPageAge ZAllocatorForRelocation::install() { + for (uint i = 0; i < ZAllocator::_relocation_allocators; ++i) { + if (_relocation[i] == nullptr) { + _relocation[i] = this; + return static_cast(i + 1); + } + } + + ShouldNotReachHere(); + return ZPageAge::eden; +} + +ZAllocatorForRelocation::ZAllocatorForRelocation() : + ZAllocator(install()) { +} + +zaddress ZAllocatorForRelocation::alloc_object(size_t size) { + return _object_allocator.alloc_object_for_relocation(size); +} + +void ZAllocatorForRelocation::undo_alloc_object(zaddress addr, size_t size) { + _object_allocator.undo_alloc_object_for_relocation(addr, size); +} + +ZPage* ZAllocatorForRelocation::alloc_page_for_relocation(ZPageType type, size_t size, ZAllocationFlags flags) { + return _object_allocator.alloc_page_for_relocation(type, size, flags); +} diff --git a/src/hotspot/share/gc/z/zAllocator.hpp b/src/hotspot/share/gc/z/zAllocator.hpp new file mode 100644 index 0000000000000..3457f2b3c1837 --- /dev/null +++ b/src/hotspot/share/gc/z/zAllocator.hpp @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_Z_ZALLOCATOR_HPP +#define SHARE_GC_Z_ZALLOCATOR_HPP + +#include "gc/z/zAllocationFlags.hpp" +#include "gc/z/zObjectAllocator.hpp" +#include "gc/z/zPageAge.hpp" +#include "gc/z/zPageType.hpp" + +class ZAllocatorEden; +class ZAllocatorForRelocation; +class ZPage; + +class ZAllocator { + friend class ZAllocatorEden; + friend class ZAllocatorSurvivor; + friend class ZAllocatorOld; + +public: + static constexpr uint _relocation_allocators = static_cast(ZPageAge::old); + +protected: + ZObjectAllocator _object_allocator; + + static ZAllocatorEden* _eden; + static ZAllocatorForRelocation* _relocation[ZAllocator::_relocation_allocators]; + +public: + static ZAllocatorEden* eden(); + static ZAllocatorForRelocation* relocation(ZPageAge page_age); + static ZAllocatorForRelocation* old(); + + ZAllocator(ZPageAge age); + + void retire_pages(); +}; + +class ZAllocatorEden : public ZAllocator { +public: + ZAllocatorEden(); + + // Mutator allocation + zaddress alloc_tlab(size_t size); + zaddress alloc_object(size_t size); + + // Statistics + size_t tlab_used() const; + size_t remaining() const; +}; + +class ZAllocatorForRelocation : public ZAllocator { +private: + ZPageAge install(); + +public: + ZAllocatorForRelocation(); + + // Relocation + zaddress alloc_object(size_t size); + void undo_alloc_object(zaddress addr, size_t size); + + ZPage* alloc_page_for_relocation(ZPageType type, size_t size, ZAllocationFlags flags); +}; + +#endif // SHARE_GC_Z_ZALLOCATOR_HPP diff --git a/src/hotspot/share/gc/z/zAllocator.inline.hpp b/src/hotspot/share/gc/z/zAllocator.inline.hpp new file mode 100644 index 0000000000000..ba558a1b0f38f --- /dev/null +++ b/src/hotspot/share/gc/z/zAllocator.inline.hpp @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_Z_ZALLOCATOR_INLINE_HPP +#define SHARE_GC_Z_ZALLOCATOR_INLINE_HPP + +#include "gc/z/zAllocator.hpp" + +#include "gc/z/zAddress.inline.hpp" +#include "gc/z/zHeap.hpp" + +inline ZAllocatorEden* ZAllocator::eden() { + return _eden; +} + +inline ZAllocatorForRelocation* ZAllocator::relocation(ZPageAge page_age) { + return _relocation[static_cast(page_age) - 1]; +} + +inline ZAllocatorForRelocation* ZAllocator::old() { + return relocation(ZPageAge::old); +} + +inline zaddress ZAllocatorEden::alloc_tlab(size_t size) { + guarantee(size <= ZHeap::heap()->max_tlab_size(), "TLAB too large"); + return _object_allocator.alloc_object(size); +} + +inline zaddress ZAllocatorEden::alloc_object(size_t size) { + const zaddress addr = _object_allocator.alloc_object(size); + + if (is_null(addr)) { + ZHeap::heap()->out_of_memory(); + } + + return addr; +} + +#endif // SHARE_GC_Z_ZALLOCATOR_INLINE_HPP diff --git a/src/hotspot/share/gc/z/zArguments.cpp b/src/hotspot/share/gc/z/zArguments.cpp index ad59d2fbcecfb..01ecf8f3fc4b8 100644 --- a/src/hotspot/share/gc/z/zArguments.cpp +++ b/src/hotspot/share/gc/z/zArguments.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -37,9 +37,74 @@ void ZArguments::initialize_alignments() { HeapAlignment = SpaceAlignment; } -void ZArguments::initialize() { - GCArguments::initialize(); +void ZArguments::select_max_gc_threads() { + // Select number of parallel threads + if (FLAG_IS_DEFAULT(ParallelGCThreads)) { + FLAG_SET_DEFAULT(ParallelGCThreads, ZHeuristics::nparallel_workers()); + } + + if (ParallelGCThreads == 0) { + vm_exit_during_initialization("The flag -XX:+UseZGC can not be combined with -XX:ParallelGCThreads=0"); + } + + // The max number of concurrent threads we heuristically want for a generation + uint max_nworkers_generation; + + if (FLAG_IS_DEFAULT(ConcGCThreads)) { + max_nworkers_generation = ZHeuristics::nconcurrent_workers(); + + // Computed max number of GC threads at a time in the machine + uint max_nworkers = max_nworkers_generation; + + if (!FLAG_IS_DEFAULT(ZYoungGCThreads)) { + max_nworkers = MAX2(max_nworkers, ZYoungGCThreads); + } + + if (!FLAG_IS_DEFAULT(ZOldGCThreads)) { + max_nworkers = MAX2(max_nworkers, ZOldGCThreads); + } + + FLAG_SET_DEFAULT(ConcGCThreads, max_nworkers); + } else { + max_nworkers_generation = ConcGCThreads; + } + + if (FLAG_IS_DEFAULT(ZYoungGCThreads)) { + if (UseDynamicNumberOfGCThreads) { + FLAG_SET_ERGO(ZYoungGCThreads, max_nworkers_generation); + } else { + const uint static_young_threads = MAX2(uint(max_nworkers_generation * 0.9), 1u); + FLAG_SET_ERGO(ZYoungGCThreads, static_young_threads); + } + } + + if (FLAG_IS_DEFAULT(ZOldGCThreads)) { + if (UseDynamicNumberOfGCThreads) { + FLAG_SET_ERGO(ZOldGCThreads, max_nworkers_generation); + } else { + const uint static_old_threads = MAX2(ConcGCThreads - ZYoungGCThreads, 1u); + FLAG_SET_ERGO(ZOldGCThreads, static_old_threads); + } + } + + if (ConcGCThreads == 0) { + vm_exit_during_initialization("The flag -XX:+UseZGC can not be combined with -XX:ConcGCThreads=0"); + } + if (ZYoungGCThreads > ConcGCThreads) { + vm_exit_during_initialization("The flag -XX:ZYoungGCThreads can't be higher than -XX:ConcGCThreads"); + } else if (ZYoungGCThreads == 0) { + vm_exit_during_initialization("The flag -XX:ZYoungGCThreads can't be lower than 1"); + } + + if (ZOldGCThreads > ConcGCThreads) { + vm_exit_during_initialization("The flag -XX:ZOldGCThreads can't be higher than -XX:ConcGCThreads"); + } else if (ZOldGCThreads == 0) { + vm_exit_during_initialization("The flag -XX:ZOldGCThreads can't be lower than 1"); + } +} + +void ZArguments::initialize() { // Check mark stack size const size_t mark_stack_space_limit = ZAddressSpaceLimit::mark_stack(); if (ZMarkStackSpaceLimit > mark_stack_space_limit) { @@ -54,22 +119,53 @@ void ZArguments::initialize() { FLAG_SET_DEFAULT(UseNUMA, true); } - // Select number of parallel threads - if (FLAG_IS_DEFAULT(ParallelGCThreads)) { - FLAG_SET_DEFAULT(ParallelGCThreads, ZHeuristics::nparallel_workers()); + select_max_gc_threads(); + + // Backwards compatible alias for ZCollectionIntervalMajor + if (!FLAG_IS_DEFAULT(ZCollectionInterval)) { + FLAG_SET_ERGO_IF_DEFAULT(ZCollectionIntervalMajor, ZCollectionInterval); } - if (ParallelGCThreads == 0) { - vm_exit_during_initialization("The flag -XX:+UseZGC can not be combined with -XX:ParallelGCThreads=0"); + if (!FLAG_IS_CMDLINE(MaxHeapSize) && + !FLAG_IS_CMDLINE(MaxRAMFraction) && + !FLAG_IS_CMDLINE(MaxRAMPercentage)) { + // We are really just guessing how much memory the program needs. + // When that is the case, we don't want the soft and hard limits to be the same + // as it can cause flakyness in the number of GC threads used, in order to keep + // to a random number we just pulled out of thin air. + FLAG_SET_ERGO_IF_DEFAULT(SoftMaxHeapSize, MaxHeapSize * 90 / 100); } - // Select number of concurrent threads - if (FLAG_IS_DEFAULT(ConcGCThreads)) { - FLAG_SET_DEFAULT(ConcGCThreads, ZHeuristics::nconcurrent_workers()); + if (FLAG_IS_DEFAULT(ZFragmentationLimit)) { + FLAG_SET_DEFAULT(ZFragmentationLimit, 5.0); } - if (ConcGCThreads == 0) { - vm_exit_during_initialization("The flag -XX:+UseZGC can not be combined with -XX:ConcGCThreads=0"); + if (!FLAG_IS_DEFAULT(ZTenuringThreshold) && ZTenuringThreshold != -1) { + FLAG_SET_ERGO_IF_DEFAULT(MaxTenuringThreshold, ZTenuringThreshold); + if (MaxTenuringThreshold == 0) { + FLAG_SET_ERGO_IF_DEFAULT(AlwaysTenure, true); + } + } + + if (FLAG_IS_DEFAULT(MaxTenuringThreshold)) { + uint tenuring_threshold; + for (tenuring_threshold = 0; tenuring_threshold < MaxTenuringThreshold; ++tenuring_threshold) { + // Reduce the number of object ages, if the resulting garbage is too high + const size_t medium_page_overhead = ZPageSizeMedium * tenuring_threshold; + const size_t small_page_overhead = ZPageSizeSmall * ConcGCThreads * tenuring_threshold; + if (small_page_overhead + medium_page_overhead >= ZHeuristics::significant_young_overhead()) { + break; + } + } + FLAG_SET_DEFAULT(MaxTenuringThreshold, tenuring_threshold); + if (tenuring_threshold == 0 && FLAG_IS_DEFAULT(AlwaysTenure)) { + // Some flag constraint function says AlwaysTenure must be true iff MaxTenuringThreshold == 0 + FLAG_SET_DEFAULT(AlwaysTenure, true); + } + } + + if (!FLAG_IS_DEFAULT(ZTenuringThreshold) && NeverTenure) { + vm_exit_during_initialization(err_msg("ZTenuringThreshold and NeverTenure are incompatible")); } // Large page size must match granule size @@ -79,10 +175,9 @@ void ZArguments::initialize() { ZGranuleSize / M)); } - // The heuristics used when UseDynamicNumberOfGCThreads is - // enabled defaults to using a ZAllocationSpikeTolerance of 1. - if (UseDynamicNumberOfGCThreads && FLAG_IS_DEFAULT(ZAllocationSpikeTolerance)) { - FLAG_SET_DEFAULT(ZAllocationSpikeTolerance, 1); + if (!FLAG_IS_DEFAULT(ZTenuringThreshold) && ZTenuringThreshold > static_cast(MaxTenuringThreshold)) { + vm_exit_during_initialization(err_msg("ZTenuringThreshold must be be within bounds of " + "MaxTenuringThreshold")); } #ifdef COMPILER2 @@ -98,6 +193,11 @@ void ZArguments::initialize() { // CompressedOops not supported FLAG_SET_DEFAULT(UseCompressedOops, false); + // More events + if (FLAG_IS_DEFAULT(LogEventsBufferEntries)) { + FLAG_SET_DEFAULT(LogEventsBufferEntries, 250); + } + // Verification before startup and after exit not (yet) supported FLAG_SET_DEFAULT(VerifyDuringStartup, false); FLAG_SET_DEFAULT(VerifyBeforeExit, false); @@ -106,20 +206,23 @@ void ZArguments::initialize() { FLAG_SET_DEFAULT(ZVerifyRoots, true); FLAG_SET_DEFAULT(ZVerifyObjects, true); } -} -size_t ZArguments::heap_virtual_to_physical_ratio() { - return ZHeapViews * ZVirtualToPhysicalRatio; +#ifdef ASSERT + // This check slows down testing too much. Turn it off for now. + if (FLAG_IS_DEFAULT(VerifyDependencies)) { + FLAG_SET_DEFAULT(VerifyDependencies, false); + } +#endif } -size_t ZArguments::conservative_max_heap_alignment() { - return 0; +size_t ZArguments::heap_virtual_to_physical_ratio() { + return ZVirtualToPhysicalRatio; } CollectedHeap* ZArguments::create_heap() { return new ZCollectedHeap(); } -bool ZArguments::is_supported() const { +bool ZArguments::is_supported() { return is_os_supported(); } diff --git a/src/hotspot/share/gc/z/zArguments.hpp b/src/hotspot/share/gc/z/zArguments.hpp index cc177e25d90a4..ac1e613d4ccbb 100644 --- a/src/hotspot/share/gc/z/zArguments.hpp +++ b/src/hotspot/share/gc/z/zArguments.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,18 +28,19 @@ class CollectedHeap; -class ZArguments : public GCArguments { +class ZArguments : AllStatic { private: - virtual void initialize_alignments(); + static void select_max_gc_threads(); - virtual void initialize(); - virtual size_t conservative_max_heap_alignment(); - virtual size_t heap_virtual_to_physical_ratio(); - virtual CollectedHeap* create_heap(); +public: + static void initialize_alignments(); + static void initialize(); + static size_t heap_virtual_to_physical_ratio(); + static CollectedHeap* create_heap(); - virtual bool is_supported() const; + static bool is_supported(); - bool is_os_supported() const; + static bool is_os_supported(); }; #endif // SHARE_GC_Z_ZARGUMENTS_HPP diff --git a/src/hotspot/share/gc/z/zArray.hpp b/src/hotspot/share/gc/z/zArray.hpp index f7bd3967d7913..7bcd4f59eebe2 100644 --- a/src/hotspot/share/gc/z/zArray.hpp +++ b/src/hotspot/share/gc/z/zArray.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,27 +25,59 @@ #define SHARE_GC_Z_ZARRAY_HPP #include "memory/allocation.hpp" +#include "runtime/atomic.hpp" +#include "runtime/os.hpp" +#include "runtime/thread.hpp" #include "utilities/growableArray.hpp" +#include + +class ZLock; + template using ZArray = GrowableArrayCHeap; template class ZArrayIteratorImpl : public StackObj { private: - const T* _next; - const T* const _end; + size_t _next; + const size_t _end; + const T* const _array; - bool next_serial(T* elem); - bool next_parallel(T* elem); + bool next_serial(size_t* index); + bool next_parallel(size_t* index); public: ZArrayIteratorImpl(const T* array, size_t length); ZArrayIteratorImpl(const ZArray* array); bool next(T* elem); + bool next_index(size_t* index); + + T index_to_elem(size_t index); }; template using ZArrayIterator = ZArrayIteratorImpl; template using ZArrayParallelIterator = ZArrayIteratorImpl; +template +class ZActivatedArray { +private: + typedef typename std::remove_extent::type ItemT; + + ZLock* _lock; + uint64_t _count; + ZArray _array; + +public: + explicit ZActivatedArray(bool locked = true); + ~ZActivatedArray(); + + void activate(); + template + void deactivate_and_apply(Function function); + + bool is_activated() const; + bool add_if_activated(ItemT* item); +}; + #endif // SHARE_GC_Z_ZARRAY_HPP diff --git a/src/hotspot/share/gc/z/zArray.inline.hpp b/src/hotspot/share/gc/z/zArray.inline.hpp index b7e5bba9adb68..e86bc7383789d 100644 --- a/src/hotspot/share/gc/z/zArray.inline.hpp +++ b/src/hotspot/share/gc/z/zArray.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,55 +26,121 @@ #include "gc/z/zArray.hpp" +#include "gc/z/zLock.inline.hpp" #include "runtime/atomic.hpp" template -inline bool ZArrayIteratorImpl::next_serial(T* elem) { +inline bool ZArrayIteratorImpl::next_serial(size_t* index) { if (_next == _end) { return false; } - *elem = *_next; + *index = _next; _next++; return true; } template -inline bool ZArrayIteratorImpl::next_parallel(T* elem) { - const T* old_next = Atomic::load(&_next); +inline bool ZArrayIteratorImpl::next_parallel(size_t* index) { + const size_t claimed_index = Atomic::fetch_then_add(&_next, 1u, memory_order_relaxed); - for (;;) { - if (old_next == _end) { - return false; - } - - const T* const new_next = old_next + 1; - const T* const prev_next = Atomic::cmpxchg(&_next, old_next, new_next); - if (prev_next == old_next) { - *elem = *old_next; - return true; - } - - old_next = prev_next; + if (claimed_index < _end) { + *index = claimed_index; + return true; } + + return false; } template inline ZArrayIteratorImpl::ZArrayIteratorImpl(const T* array, size_t length) : - _next(array), - _end(array + length) {} + _next(0), + _end(length), + _array(array) {} template inline ZArrayIteratorImpl::ZArrayIteratorImpl(const ZArray* array) : - ZArrayIteratorImpl(array->is_empty() ? NULL : array->adr_at(0), array->length()) {} + ZArrayIteratorImpl(array->is_empty() ? nullptr : array->adr_at(0), array->length()) {} template inline bool ZArrayIteratorImpl::next(T* elem) { + size_t index; + if (next_index(&index)) { + *elem = index_to_elem(index); + return true; + } + + return false; +} + +template +inline bool ZArrayIteratorImpl::next_index(size_t* index) { if (Parallel) { - return next_parallel(elem); + return next_parallel(index); } else { - return next_serial(elem); + return next_serial(index); + } +} + +template +inline T ZArrayIteratorImpl::index_to_elem(size_t index) { + assert(index < _end, "Out of bounds"); + return _array[index]; +} + +template +ZActivatedArray::ZActivatedArray(bool locked) : + _lock(locked ? new ZLock() : nullptr), + _count(0), + _array() {} + +template +ZActivatedArray::~ZActivatedArray() { + FreeHeap(_lock); +} + +template +bool ZActivatedArray::is_activated() const { + ZLocker locker(_lock); + return _count > 0; +} + +template +bool ZActivatedArray::add_if_activated(ItemT* item) { + ZLocker locker(_lock); + if (_count > 0) { + _array.append(item); + return true; + } + + return false; +} + +template +void ZActivatedArray::activate() { + ZLocker locker(_lock); + _count++; +} + +template +template +void ZActivatedArray::deactivate_and_apply(Function function) { + ZArray array; + + { + ZLocker locker(_lock); + assert(_count > 0, "Invalid state"); + if (--_count == 0u) { + // Fully deactivated - remove all elements + array.swap(&_array); + } + } + + // Apply function to all elements - if fully deactivated + ZArrayIterator iter(&array); + for (ItemT* item; iter.next(&item);) { + function(item); } } diff --git a/src/hotspot/share/gc/z/zBarrier.cpp b/src/hotspot/share/gc/z/zBarrier.cpp index 63730c58b19e0..6e1d3ee009365 100644 --- a/src/hotspot/share/gc/z/zBarrier.cpp +++ b/src/hotspot/share/gc/z/zBarrier.cpp @@ -23,253 +23,291 @@ #include "precompiled.hpp" #include "classfile/javaClasses.hpp" +#include "gc/z/zAddress.inline.hpp" #include "gc/z/zBarrier.inline.hpp" +#include "gc/z/zGeneration.inline.hpp" #include "gc/z/zHeap.inline.hpp" -#include "gc/z/zOop.inline.hpp" -#include "gc/z/zThread.inline.hpp" +#include "gc/z/zStoreBarrierBuffer.inline.hpp" #include "memory/iterator.inline.hpp" #include "oops/oop.inline.hpp" #include "runtime/safepoint.hpp" #include "utilities/debug.hpp" -template -bool ZBarrier::should_mark_through(uintptr_t addr) { - // Finalizable marked oops can still exists on the heap after marking - // has completed, in which case we just want to convert this into a - // good oop and not push it on the mark stack. - if (!during_mark()) { - assert(ZAddress::is_marked(addr), "Should be marked"); - assert(ZAddress::is_finalizable(addr), "Should be finalizable"); - return false; - } +#ifdef ASSERT +static bool during_young_mark() { + return ZGeneration::young()->is_phase_mark(); +} - // During marking, we mark through already marked oops to avoid having - // some large part of the object graph hidden behind a pushed, but not - // yet flushed, entry on a mutator mark stack. Always marking through - // allows the GC workers to proceed through the object graph even if a - // mutator touched an oop first, which in turn will reduce the risk of - // having to flush mark stacks multiple times to terminate marking. - // - // However, when doing finalizable marking we don't always want to mark - // through. First, marking through an already strongly marked oop would - // be wasteful, since we will then proceed to do finalizable marking on - // an object which is, or will be, marked strongly. Second, marking - // through an already finalizable marked oop would also be wasteful, - // since such oops can never end up on a mutator mark stack and can - // therefore not hide some part of the object graph from GC workers. - if (finalizable) { - return !ZAddress::is_marked(addr); - } +static bool during_old_mark() { + return ZGeneration::old()->is_phase_mark(); +} - // Mark through - return true; +static bool during_any_mark() { + return during_young_mark() || during_old_mark(); } +#endif -template -uintptr_t ZBarrier::mark(uintptr_t addr) { - uintptr_t good_addr; +zaddress ZBarrier::relocate_or_remap(zaddress_unsafe addr, ZGeneration* generation) { + return generation->relocate_or_remap_object(addr); +} - if (ZAddress::is_marked(addr)) { - // Already marked, but try to mark though anyway - good_addr = ZAddress::good(addr); - } else if (ZAddress::is_remapped(addr)) { - // Already remapped, but also needs to be marked - good_addr = ZAddress::good(addr); - } else { - // Needs to be both remapped and marked - good_addr = remap(addr); +zaddress ZBarrier::remap(zaddress_unsafe addr, ZGeneration* generation) { + return generation->remap_object(addr); +} + +// +// Weak load barrier +// + +static void keep_alive_young(zaddress addr) { + if (ZGeneration::young()->is_phase_mark()) { + ZBarrier::mark_young(addr); } +} - // Mark - if (should_mark_through(addr)) { - ZHeap::heap()->mark_object(good_addr); +zaddress ZBarrier::blocking_keep_alive_on_weak_slow_path(volatile zpointer* p, zaddress addr) { + if (is_null(addr)) { + return zaddress::null; } - if (finalizable) { - // Make the oop finalizable marked/good, instead of normal marked/good. - // This is needed because an object might first becomes finalizable - // marked by the GC, and then loaded by a mutator thread. In this case, - // the mutator thread must be able to tell that the object needs to be - // strongly marked. The finalizable bit in the oop exists to make sure - // that a load of a finalizable marked oop will fall into the barrier - // slow path so that we can mark the object as strongly reachable. - return ZAddress::finalizable_good(good_addr); + if (ZHeap::heap()->is_old(addr)) { + if (!ZHeap::heap()->is_object_strongly_live(addr)) { + return zaddress::null; + } + } else { + // Young gen objects are never blocked, need to keep alive + keep_alive_young(addr); } - return good_addr; + // Strongly live + return addr; } -uintptr_t ZBarrier::remap(uintptr_t addr) { - assert(!ZAddress::is_good(addr), "Should not be good"); - assert(!ZAddress::is_weak_good(addr), "Should not be weak good"); - return ZHeap::heap()->remap_object(addr); -} +zaddress ZBarrier::blocking_keep_alive_on_phantom_slow_path(volatile zpointer* p, zaddress addr) { + if (is_null(addr)) { + return zaddress::null; + } -uintptr_t ZBarrier::relocate(uintptr_t addr) { - assert(!ZAddress::is_good(addr), "Should not be good"); - assert(!ZAddress::is_weak_good(addr), "Should not be weak good"); - return ZHeap::heap()->relocate_object(addr); -} + if (ZHeap::heap()->is_old(addr)) { + if (!ZHeap::heap()->is_object_live(addr)) { + return zaddress::null; + } + } else { + // Young gen objects are never blocked, need to keep alive + keep_alive_young(addr); + } -uintptr_t ZBarrier::relocate_or_mark(uintptr_t addr) { - return during_relocate() ? relocate(addr) : mark(addr); + // Strongly live + return addr; } -uintptr_t ZBarrier::relocate_or_mark_no_follow(uintptr_t addr) { - return during_relocate() ? relocate(addr) : mark(addr); +zaddress ZBarrier::blocking_load_barrier_on_weak_slow_path(volatile zpointer* p, zaddress addr) { + if (is_null(addr)) { + return zaddress::null; + } + + if (ZHeap::heap()->is_old(addr)) { + if (!ZHeap::heap()->is_object_strongly_live(addr)) { + return zaddress::null; + } + } else { + // Young objects are never considered non-strong + // Note: Should not need to keep object alive in this operation, + // but the barrier colors the pointer mark good, so we need + // to mark the object accordingly. + keep_alive_young(addr); + } + + return addr; } -uintptr_t ZBarrier::relocate_or_remap(uintptr_t addr) { - return during_relocate() ? relocate(addr) : remap(addr); +zaddress ZBarrier::blocking_load_barrier_on_phantom_slow_path(volatile zpointer* p, zaddress addr) { + if (is_null(addr)) { + return zaddress::null; + } + + if (ZHeap::heap()->is_old(addr)) { + if (!ZHeap::heap()->is_object_live(addr)) { + return zaddress::null; + } + } else { + // Young objects are never considered non-strong + // Note: Should not need to keep object alive in this operation, + // but the barrier colors the pointer mark good, so we need + // to mark the object accordingly. + keep_alive_young(addr); + } + + return addr; } // -// Load barrier +// Clean barrier // -uintptr_t ZBarrier::load_barrier_on_oop_slow_path(uintptr_t addr) { - return relocate_or_mark(addr); -} -uintptr_t ZBarrier::load_barrier_on_invisible_root_oop_slow_path(uintptr_t addr) { - return relocate_or_mark_no_follow(addr); -} +zaddress ZBarrier::verify_old_object_live_slow_path(zaddress addr) { + // Verify that the object was indeed alive + assert(ZHeap::heap()->is_young(addr) || ZHeap::heap()->is_object_live(addr), "Should be live"); -void ZBarrier::load_barrier_on_oop_fields(oop o) { - assert(ZAddress::is_good(ZOop::to_address(o)), "Should be good"); - ZLoadBarrierOopClosure cl; - o->oop_iterate(&cl); + return addr; } // -// Weak load barrier +// Mark barrier // -uintptr_t ZBarrier::weak_load_barrier_on_oop_slow_path(uintptr_t addr) { - return ZAddress::is_weak_good(addr) ? ZAddress::good(addr) : relocate_or_remap(addr); -} -uintptr_t ZBarrier::weak_load_barrier_on_weak_oop_slow_path(uintptr_t addr) { - const uintptr_t good_addr = weak_load_barrier_on_oop_slow_path(addr); - if (ZHeap::heap()->is_object_strongly_live(good_addr)) { - return good_addr; +zaddress ZBarrier::mark_slow_path(zaddress addr) { + assert(during_any_mark(), "Invalid phase"); + + if (is_null(addr)) { + return addr; } - // Not strongly live - return 0; + mark(addr); + + return addr; } -uintptr_t ZBarrier::weak_load_barrier_on_phantom_oop_slow_path(uintptr_t addr) { - const uintptr_t good_addr = weak_load_barrier_on_oop_slow_path(addr); - if (ZHeap::heap()->is_object_live(good_addr)) { - return good_addr; +zaddress ZBarrier::mark_from_young_slow_path(zaddress addr) { + assert(during_young_mark(), "Invalid phase"); + + if (is_null(addr)) { + return addr; } - // Not live - return 0; -} + if (ZHeap::heap()->is_young(addr)) { + ZGeneration::young()->mark_object(addr); + return addr; + } -// -// Keep alive barrier -// -uintptr_t ZBarrier::keep_alive_barrier_on_oop_slow_path(uintptr_t addr) { - assert(during_mark(), "Invalid phase"); + if (ZGeneration::young()->type() == ZYoungType::major_full_roots || + ZGeneration::young()->type() == ZYoungType::major_partial_roots) { + // The initial major young collection is responsible for finding roots + // from the young generation to the old generation. + ZGeneration::old()->mark_object(addr); + return addr; + } - // Mark - return mark(addr); -} + // Don't mark pointers to the old generation for minor during major; + // the initial young collection pushed the young-to-old pointers that + // were part of the SATB. All other young-to-old pointers are irrelevant. -uintptr_t ZBarrier::keep_alive_barrier_on_weak_oop_slow_path(uintptr_t addr) { - assert(ZResurrection::is_blocked(), "This operation is only valid when resurrection is blocked"); - const uintptr_t good_addr = weak_load_barrier_on_oop_slow_path(addr); - assert(ZHeap::heap()->is_object_strongly_live(good_addr), "Should be live"); - return good_addr; + // We still want to heal the pointers so they become store_good, so that + // after a young collection, all young pointers are store good. + return addr; } -uintptr_t ZBarrier::keep_alive_barrier_on_phantom_oop_slow_path(uintptr_t addr) { - assert(ZResurrection::is_blocked(), "This operation is only valid when resurrection is blocked"); - const uintptr_t good_addr = weak_load_barrier_on_oop_slow_path(addr); - assert(ZHeap::heap()->is_object_live(good_addr), "Should be live"); - return good_addr; -} +zaddress ZBarrier::mark_from_old_slow_path(zaddress addr) { + assert(during_old_mark(), "Invalid phase"); -// -// Mark barrier -// -uintptr_t ZBarrier::mark_barrier_on_oop_slow_path(uintptr_t addr) { - assert(during_mark(), "Invalid phase"); - assert(ZThread::is_worker(), "Invalid thread"); + if (is_null(addr)) { + return addr; + } - // Mark - return mark(addr); -} + if (ZHeap::heap()->is_old(addr)) { + ZGeneration::old()->mark_object(addr); + return addr; + } -uintptr_t ZBarrier::mark_barrier_on_finalizable_oop_slow_path(uintptr_t addr) { - assert(during_mark(), "Invalid phase"); - assert(ZThread::is_worker(), "Invalid thread"); + // Don't mark pointers to the young generation; they will be + // processed by the remembered set scanning. - // Mark - return mark(addr); + // Returning null means this location is not self healed by the caller. + return zaddress::null; } -// -// Narrow oop variants, never used. -// -oop ZBarrier::load_barrier_on_oop_field(volatile narrowOop* p) { - ShouldNotReachHere(); - return NULL; -} +zaddress ZBarrier::mark_young_slow_path(zaddress addr) { + assert(during_young_mark(), "Invalid phase"); + + if (is_null(addr)) { + return addr; + } -oop ZBarrier::load_barrier_on_oop_field_preloaded(volatile narrowOop* p, oop o) { - ShouldNotReachHere(); - return NULL; + mark_if_young(addr); + + return addr; } -void ZBarrier::load_barrier_on_oop_array(volatile narrowOop* p, size_t length) { - ShouldNotReachHere(); +zaddress ZBarrier::mark_finalizable_slow_path(zaddress addr) { + assert(during_any_mark(), "Invalid phase"); + + if (is_null(addr)) { + return addr; + } + + if (ZHeap::heap()->is_old(addr)) { + ZGeneration::old()->mark_object(addr); + return addr; + } + + ZGeneration::young()->mark_object_if_active(addr); + return addr; } -oop ZBarrier::load_barrier_on_weak_oop_field_preloaded(volatile narrowOop* p, oop o) { - ShouldNotReachHere(); - return NULL; +zaddress ZBarrier::mark_finalizable_from_old_slow_path(zaddress addr) { + assert(during_any_mark(), "Invalid phase"); + + if (is_null(addr)) { + return addr; + } + + if (ZHeap::heap()->is_old(addr)) { + ZGeneration::old()->mark_object(addr); + return addr; + } + + // Don't mark pointers to the young generation; they will be + // processed by the remembered set scanning. + + // Returning null means this location is not self healed by the caller. + return zaddress::null; } -oop ZBarrier::load_barrier_on_phantom_oop_field_preloaded(volatile narrowOop* p, oop o) { - ShouldNotReachHere(); - return NULL; +zaddress ZBarrier::heap_store_slow_path(volatile zpointer* p, zaddress addr, zpointer prev, bool heal) { + ZStoreBarrierBuffer* buffer = ZStoreBarrierBuffer::buffer_for_store(heal); + + if (buffer != nullptr) { + // Buffer store barriers whenever possible + buffer->add(p, prev); + } else { + mark_and_remember(p, addr); + } + + return addr; } -oop ZBarrier::weak_load_barrier_on_oop_field_preloaded(volatile narrowOop* p, oop o) { - ShouldNotReachHere(); - return NULL; +zaddress ZBarrier::no_keep_alive_heap_store_slow_path(volatile zpointer* p, zaddress addr) { + remember(p); + + return addr; } -oop ZBarrier::weak_load_barrier_on_weak_oop_field_preloaded(volatile narrowOop* p, oop o) { - ShouldNotReachHere(); - return NULL; +zaddress ZBarrier::native_store_slow_path(zaddress addr) { + if (!is_null(addr)) { + mark(addr); + } + + return addr; } -oop ZBarrier::weak_load_barrier_on_phantom_oop_field_preloaded(volatile narrowOop* p, oop o) { - ShouldNotReachHere(); - return NULL; +zaddress ZBarrier::keep_alive_slow_path(zaddress addr) { + if (!is_null(addr)) { + mark(addr); + } + + return addr; } #ifdef ASSERT // ON_WEAK barriers should only ever be applied to j.l.r.Reference.referents. -void ZBarrier::verify_on_weak(volatile oop* referent_addr) { - if (referent_addr != NULL) { - uintptr_t base = (uintptr_t)referent_addr - java_lang_ref_Reference::referent_offset(); - oop obj = cast_to_oop(base); +void ZBarrier::verify_on_weak(volatile zpointer* referent_addr) { + if (referent_addr != nullptr) { + const uintptr_t base = (uintptr_t)referent_addr - java_lang_ref_Reference::referent_offset(); + const oop obj = cast_to_oop(base); assert(oopDesc::is_oop(obj), "Verification failed for: ref " PTR_FORMAT " obj: " PTR_FORMAT, (uintptr_t)referent_addr, base); assert(java_lang_ref_Reference::is_referent_field(obj, java_lang_ref_Reference::referent_offset()), "Sanity"); } } #endif - -void ZLoadBarrierOopClosure::do_oop(oop* p) { - ZBarrier::load_barrier_on_oop_field(p); -} - -void ZLoadBarrierOopClosure::do_oop(narrowOop* p) { - ShouldNotReachHere(); -} diff --git a/src/hotspot/share/gc/z/zBarrier.hpp b/src/hotspot/share/gc/z/zBarrier.hpp index 2dfc1591888fa..14ad0a655f774 100644 --- a/src/hotspot/share/gc/z/zBarrier.hpp +++ b/src/hotspot/share/gc/z/zBarrier.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,112 +24,162 @@ #ifndef SHARE_GC_Z_ZBARRIER_HPP #define SHARE_GC_Z_ZBARRIER_HPP +#include "gc/z/zAddress.hpp" #include "memory/allStatic.hpp" #include "memory/iterator.hpp" -#include "oops/oop.hpp" -typedef bool (*ZBarrierFastPath)(uintptr_t); -typedef uintptr_t (*ZBarrierSlowPath)(uintptr_t); +// == Shift based load barrier == +// +// The load barriers of ZGC check if a loaded value is safe to expose or not, and +// then shifts the pointer to remove metadata bits, such that it points to mapped +// memory. +// +// A pointer is safe to expose if it does not have any load-bad bits set in its +// metadata bits. In the C++ code and non-nmethod generated code, that is checked +// by testing the pointer value against a load-bad mask, checking that no bad bit +// is set, followed by a shift, removing the metadata bits if they were good. +// However, for nmethod code, the test + shift sequence is optimized in such +// a way that the shift both tests if the pointer is exposable or not, and removes +// the metadata bits, with the same instruction. This is a speculative optimization +// that assumes that the loaded pointer is frequently going to be load-good or null +// when checked. Therefore, the nmethod load barriers just apply the shift with the +// current "good" shift (which is patched with nmethod entry barriers for each GC +// phase). If the result of that shift was a raw null value, then the ZF flag is set. +// If the result is a good pointer, then the very last bit that was removed by the +// shift, must have been a 1, which would have set the CF flag. Therefore, the "above" +// branch condition code is used to take a slowpath only iff CF == 0 and ZF == 0. +// CF == 0 implies it was not a good pointer, and ZF == 0 implies the resulting address +// was not a null value. Then we decide that the pointer is bad. This optimization +// is necessary to get satisfactory performance, but does come with a few constraints: +// +// 1) The load barrier can only recognize 4 different good patterns across all GC phases. +// The reason is that when a load barrier applies the currently good shift, then +// the value of said shift may differ only by 3, until we risk shifting away more +// than the low order three zeroes of an address, given a bad pointer, which would +// yield spurious false positives. +// +// 2) Those bit patterns must have only a single bit set. We achieve that by moving +// non-relocation work to store barriers. +// +// Another consequence of this speculative optimization, is that when the compiled code +// takes a slow path, it needs to reload the oop, because the shifted oop is now +// broken after being shifted with a different shift to what was used when the oop +// was stored. + +typedef bool (*ZBarrierFastPath)(zpointer); +typedef zpointer (*ZBarrierColor)(zaddress, zpointer); + +class ZGeneration; + +void z_assert_is_barrier_safe(); class ZBarrier : public AllStatic { -private: - static const bool GCThread = true; - static const bool AnyThread = false; - - static const bool Follow = true; - static const bool DontFollow = false; - - static const bool Strong = false; - static const bool Finalizable = true; + friend class ZContinuation; + friend class ZStoreBarrierBuffer; + friend class ZUncoloredRoot; - static const bool Publish = true; - static const bool Overflow = false; - - template static void self_heal(volatile oop* p, uintptr_t addr, uintptr_t heal_addr); +private: + static void assert_transition_monotonicity(zpointer ptr, zpointer heal_ptr); + static void self_heal(ZBarrierFastPath fast_path, volatile zpointer* p, zpointer ptr, zpointer heal_ptr, bool allow_null); + + template + static zaddress barrier(ZBarrierFastPath fast_path, ZBarrierSlowPath slow_path, ZBarrierColor color, volatile zpointer* p, zpointer o, bool allow_null = false); + + static zaddress make_load_good(zpointer ptr); + static zaddress make_load_good_no_relocate(zpointer ptr); + static zaddress relocate_or_remap(zaddress_unsafe addr, ZGeneration* generation); + static zaddress remap(zaddress_unsafe addr, ZGeneration* generation); + static void remember(volatile zpointer* p); + static void mark_and_remember(volatile zpointer* p, zaddress addr); + + // Fast paths in increasing strength level + static bool is_load_good_or_null_fast_path(zpointer ptr); + static bool is_mark_good_fast_path(zpointer ptr); + static bool is_store_good_fast_path(zpointer ptr); + static bool is_store_good_or_null_fast_path(zpointer ptr); + static bool is_store_good_or_null_any_fast_path(zpointer ptr); + + static bool is_mark_young_good_fast_path(zpointer ptr); + static bool is_finalizable_good_fast_path(zpointer ptr); + + // Slow paths + static zaddress blocking_keep_alive_on_weak_slow_path(volatile zpointer* p, zaddress addr); + static zaddress blocking_keep_alive_on_phantom_slow_path(volatile zpointer* p, zaddress addr); + static zaddress blocking_load_barrier_on_weak_slow_path(volatile zpointer* p, zaddress addr); + static zaddress blocking_load_barrier_on_phantom_slow_path(volatile zpointer* p, zaddress addr); + + static zaddress verify_old_object_live_slow_path(zaddress addr); + + static zaddress mark_slow_path(zaddress addr); + static zaddress mark_young_slow_path(zaddress addr); + static zaddress mark_from_young_slow_path(zaddress addr); + static zaddress mark_from_old_slow_path(zaddress addr); + static zaddress mark_finalizable_slow_path(zaddress addr); + static zaddress mark_finalizable_from_old_slow_path(zaddress addr); + + static zaddress keep_alive_slow_path(zaddress addr); + static zaddress heap_store_slow_path(volatile zpointer* p, zaddress addr, zpointer prev, bool heal); + static zaddress native_store_slow_path(zaddress addr); + static zaddress no_keep_alive_heap_store_slow_path(volatile zpointer* p, zaddress addr); + + static zaddress promote_slow_path(zaddress addr); + + // Helpers for non-strong oop refs barriers + static zaddress blocking_keep_alive_load_barrier_on_weak_oop_field_preloaded(volatile zpointer* p, zpointer o); + static zaddress blocking_keep_alive_load_barrier_on_phantom_oop_field_preloaded(volatile zpointer* p, zpointer o); + static zaddress blocking_load_barrier_on_weak_oop_field_preloaded(volatile zpointer* p, zpointer o); + static zaddress blocking_load_barrier_on_phantom_oop_field_preloaded(volatile zpointer* p, zpointer o); + + // Verification + static void verify_on_weak(volatile zpointer* referent_addr) NOT_DEBUG_RETURN; - template static oop barrier(volatile oop* p, oop o); - template static oop weak_barrier(volatile oop* p, oop o); - template static void root_barrier(oop* p, oop o); +public: - static bool is_good_or_null_fast_path(uintptr_t addr); - static bool is_weak_good_or_null_fast_path(uintptr_t addr); - static bool is_marked_or_null_fast_path(uintptr_t addr); + static zpointer load_atomic(volatile zpointer* p); - static bool during_mark(); - static bool during_relocate(); - template static bool should_mark_through(uintptr_t addr); - template static uintptr_t mark(uintptr_t addr); - static uintptr_t remap(uintptr_t addr); - static uintptr_t relocate(uintptr_t addr); - static uintptr_t relocate_or_mark(uintptr_t addr); - static uintptr_t relocate_or_mark_no_follow(uintptr_t addr); - static uintptr_t relocate_or_remap(uintptr_t addr); + // Helpers for relocation + static ZGeneration* remap_generation(zpointer ptr); + static void remap_young_relocated(volatile zpointer* p, zpointer o); - static uintptr_t load_barrier_on_oop_slow_path(uintptr_t addr); - static uintptr_t load_barrier_on_invisible_root_oop_slow_path(uintptr_t addr); + // Helpers for marking + template + static void mark(zaddress addr); + template + static void mark_young(zaddress addr); + template + static void mark_if_young(zaddress addr); - static uintptr_t weak_load_barrier_on_oop_slow_path(uintptr_t addr); - static uintptr_t weak_load_barrier_on_weak_oop_slow_path(uintptr_t addr); - static uintptr_t weak_load_barrier_on_phantom_oop_slow_path(uintptr_t addr); + // Load barrier + static zaddress load_barrier_on_oop_field(volatile zpointer* p); + static zaddress load_barrier_on_oop_field_preloaded(volatile zpointer* p, zpointer o); - static uintptr_t keep_alive_barrier_on_oop_slow_path(uintptr_t addr); - static uintptr_t keep_alive_barrier_on_weak_oop_slow_path(uintptr_t addr); - static uintptr_t keep_alive_barrier_on_phantom_oop_slow_path(uintptr_t addr); + static zaddress keep_alive_load_barrier_on_oop_field_preloaded(volatile zpointer* p, zpointer o); - static uintptr_t mark_barrier_on_oop_slow_path(uintptr_t addr); - static uintptr_t mark_barrier_on_finalizable_oop_slow_path(uintptr_t addr); + // Load barriers on non-strong oop refs + static zaddress load_barrier_on_weak_oop_field_preloaded(volatile zpointer* p, zpointer o); + static zaddress load_barrier_on_phantom_oop_field_preloaded(volatile zpointer* p, zpointer o); - static void verify_on_weak(volatile oop* referent_addr) NOT_DEBUG_RETURN; + static zaddress no_keep_alive_load_barrier_on_weak_oop_field_preloaded(volatile zpointer* p, zpointer o); + static zaddress no_keep_alive_load_barrier_on_phantom_oop_field_preloaded(volatile zpointer* p, zpointer o); -public: - // Load barrier - static oop load_barrier_on_oop(oop o); - static oop load_barrier_on_oop_field(volatile oop* p); - static oop load_barrier_on_oop_field_preloaded(volatile oop* p, oop o); - static void load_barrier_on_oop_array(volatile oop* p, size_t length); - static void load_barrier_on_oop_fields(oop o); - static oop load_barrier_on_weak_oop_field_preloaded(volatile oop* p, oop o); - static oop load_barrier_on_phantom_oop_field_preloaded(volatile oop* p, oop o); - static void load_barrier_on_root_oop_field(oop* p); - static void load_barrier_on_invisible_root_oop_field(oop* p); - - // Weak load barrier - static oop weak_load_barrier_on_oop_field(volatile oop* p); - static oop weak_load_barrier_on_oop_field_preloaded(volatile oop* p, oop o); - static oop weak_load_barrier_on_weak_oop(oop o); - static oop weak_load_barrier_on_weak_oop_field_preloaded(volatile oop* p, oop o); - static oop weak_load_barrier_on_phantom_oop(oop o); - static oop weak_load_barrier_on_phantom_oop_field_preloaded(volatile oop* p, oop o); - - // Is alive barrier - static bool is_alive_barrier_on_weak_oop(oop o); - static bool is_alive_barrier_on_phantom_oop(oop o); - - // Keep alive barrier - static void keep_alive_barrier_on_oop(oop o); - static void keep_alive_barrier_on_weak_oop_field(volatile oop* p); - static void keep_alive_barrier_on_phantom_oop_field(volatile oop* p); - static void keep_alive_barrier_on_phantom_root_oop_field(oop* p); + // Reference processor / weak cleaning barriers + static bool clean_barrier_on_weak_oop_field(volatile zpointer* p); + static bool clean_barrier_on_phantom_oop_field(volatile zpointer* p); + static bool clean_barrier_on_final_oop_field(volatile zpointer* p); // Mark barrier - static void mark_barrier_on_oop_field(volatile oop* p, bool finalizable); - static void mark_barrier_on_oop_array(volatile oop* p, size_t length, bool finalizable); - - // Narrow oop variants, never used. - static oop load_barrier_on_oop_field(volatile narrowOop* p); - static oop load_barrier_on_oop_field_preloaded(volatile narrowOop* p, oop o); - static void load_barrier_on_oop_array(volatile narrowOop* p, size_t length); - static oop load_barrier_on_weak_oop_field_preloaded(volatile narrowOop* p, oop o); - static oop load_barrier_on_phantom_oop_field_preloaded(volatile narrowOop* p, oop o); - static oop weak_load_barrier_on_oop_field_preloaded(volatile narrowOop* p, oop o); - static oop weak_load_barrier_on_weak_oop_field_preloaded(volatile narrowOop* p, oop o); - static oop weak_load_barrier_on_phantom_oop_field_preloaded(volatile narrowOop* p, oop o); -}; - -class ZLoadBarrierOopClosure : public BasicOopIterateClosure { -public: - virtual void do_oop(oop* p); - virtual void do_oop(narrowOop* p); + static void mark_barrier_on_young_oop_field(volatile zpointer* p); + static void mark_barrier_on_old_oop_field(volatile zpointer* p, bool finalizable); + static void mark_barrier_on_oop_field(volatile zpointer* p, bool finalizable); + static void mark_young_good_barrier_on_oop_field(volatile zpointer* p); + static zaddress remset_barrier_on_oop_field(volatile zpointer* p); + static void promote_barrier_on_young_oop_field(volatile zpointer* p); + + // Store barrier + static void store_barrier_on_heap_oop_field(volatile zpointer* p, bool heal); + static void store_barrier_on_native_oop_field(volatile zpointer* p, bool heal); + + static void no_keep_alive_store_barrier_on_heap_oop_field(volatile zpointer* p); }; #endif // SHARE_GC_Z_ZBARRIER_HPP diff --git a/src/hotspot/share/gc/z/zBarrier.inline.hpp b/src/hotspot/share/gc/z/zBarrier.inline.hpp index 391dd09a8c99d..e0d83619934da 100644 --- a/src/hotspot/share/gc/z/zBarrier.inline.hpp +++ b/src/hotspot/share/gc/z/zBarrier.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,85 +28,50 @@ #include "code/codeCache.hpp" #include "gc/z/zAddress.inline.hpp" -#include "gc/z/zOop.inline.hpp" +#include "gc/z/zGeneration.inline.hpp" +#include "gc/z/zHeap.inline.hpp" #include "gc/z/zResurrection.inline.hpp" #include "oops/oop.hpp" #include "runtime/atomic.hpp" #include "runtime/continuation.hpp" // A self heal must always "upgrade" the address metadata bits in -// accordance with the metadata bits state machine, which has the -// valid state transitions as described below (where N is the GC -// cycle). -// -// Note the subtleness of overlapping GC cycles. Specifically that -// oops are colored Remapped(N) starting at relocation N and ending -// at marking N + 1. -// -// +--- Mark Start -// | +--- Mark End -// | | +--- Relocate Start -// | | | +--- Relocate End -// | | | | -// Marked |---N---|--N+1--|--N+2--|---- -// Finalizable |---N---|--N+1--|--N+2--|---- -// Remapped ----|---N---|--N+1--|--N+2--| -// -// VALID STATE TRANSITIONS -// -// Marked(N) -> Remapped(N) -// -> Marked(N + 1) -// -> Finalizable(N + 1) -// -// Finalizable(N) -> Marked(N) -// -> Remapped(N) -// -> Marked(N + 1) -// -> Finalizable(N + 1) -// -// Remapped(N) -> Marked(N + 1) -// -> Finalizable(N + 1) -// -// PHASE VIEW -// -// ZPhaseMark -// Load & Mark -// Marked(N) <- Marked(N - 1) -// <- Finalizable(N - 1) -// <- Remapped(N - 1) -// <- Finalizable(N) -// -// Mark(Finalizable) -// Finalizable(N) <- Marked(N - 1) -// <- Finalizable(N - 1) -// <- Remapped(N - 1) -// -// Load(AS_NO_KEEPALIVE) -// Remapped(N - 1) <- Marked(N - 1) -// <- Finalizable(N - 1) -// -// ZPhaseMarkCompleted (Resurrection blocked) -// Load & Load(ON_WEAK/PHANTOM_OOP_REF | AS_NO_KEEPALIVE) & KeepAlive -// Marked(N) <- Marked(N - 1) -// <- Finalizable(N - 1) -// <- Remapped(N - 1) -// <- Finalizable(N) -// -// Load(ON_STRONG_OOP_REF | AS_NO_KEEPALIVE) -// Remapped(N - 1) <- Marked(N - 1) -// <- Finalizable(N - 1) -// -// ZPhaseMarkCompleted (Resurrection unblocked) -// Load -// Marked(N) <- Finalizable(N) -// -// ZPhaseRelocate -// Load & Load(AS_NO_KEEPALIVE) -// Remapped(N) <- Marked(N) -// <- Finalizable(N) - -template -inline void ZBarrier::self_heal(volatile oop* p, uintptr_t addr, uintptr_t heal_addr) { - if (heal_addr == 0) { +// accordance with the metadata bits state machine. The following +// assert verifies the monotonicity of the transitions. + +inline void ZBarrier::assert_transition_monotonicity(zpointer old_ptr, zpointer new_ptr) { + const bool old_is_load_good = ZPointer::is_load_good(old_ptr); + const bool old_is_mark_good = ZPointer::is_mark_good(old_ptr); + const bool old_is_store_good = ZPointer::is_store_good(old_ptr); + + const bool new_is_load_good = ZPointer::is_load_good(new_ptr); + const bool new_is_mark_good = ZPointer::is_mark_good(new_ptr); + const bool new_is_store_good = ZPointer::is_store_good(new_ptr); + + assert(!old_is_load_good || new_is_load_good, "non-monotonic load good transition"); + assert(!old_is_mark_good || new_is_mark_good, "non-monotonic mark good transition"); + assert(!old_is_store_good || new_is_store_good, "non-monotonic store good transition"); + + if (is_null_any(new_ptr)) { + // Null is good enough at this point + return; + } + + const bool old_is_marked_young = ZPointer::is_marked_young(old_ptr); + const bool old_is_marked_old = ZPointer::is_marked_old(old_ptr); + const bool old_is_marked_finalizable = ZPointer::is_marked_finalizable(old_ptr); + + const bool new_is_marked_young = ZPointer::is_marked_young(new_ptr); + const bool new_is_marked_old = ZPointer::is_marked_old(new_ptr); + const bool new_is_marked_finalizable = ZPointer::is_marked_finalizable(new_ptr); + + assert(!old_is_marked_young || new_is_marked_young, "non-monotonic marked young transition"); + assert(!old_is_marked_old || new_is_marked_old, "non-monotonic marked old transition"); + assert(!old_is_marked_finalizable || new_is_marked_finalizable || new_is_marked_old, "non-monotonic marked final transition"); +} + +inline void ZBarrier::self_heal(ZBarrierFastPath fast_path, volatile zpointer* p, zpointer ptr, zpointer heal_ptr, bool allow_null) { + if (!allow_null && is_null_assert_load_good(heal_ptr) && !is_null_any(ptr)) { // Never heal with null since it interacts badly with reference processing. // A mutator clearing an oop would be similar to calling Reference.clear(), // which would make the reference non-discoverable or silently dropped @@ -114,18 +79,28 @@ inline void ZBarrier::self_heal(volatile oop* p, uintptr_t addr, uintptr_t heal_ return; } - assert(!fast_path(addr), "Invalid self heal"); - assert(fast_path(heal_addr), "Invalid self heal"); + assert_is_valid(ptr); + assert_is_valid(heal_ptr); + assert(!fast_path(ptr), "Invalid self heal"); + assert(fast_path(heal_ptr), "Invalid self heal"); + + assert(ZPointer::is_remapped(heal_ptr), "invariant"); for (;;) { + if (ptr == zpointer::null) { + assert(!ZVerifyOops || !ZHeap::heap()->is_in(uintptr_t(p)) || !ZHeap::heap()->is_old(p), "No raw null in old"); + } + + assert_transition_monotonicity(ptr, heal_ptr); + // Heal - const uintptr_t prev_addr = Atomic::cmpxchg((volatile uintptr_t*)p, addr, heal_addr, memory_order_relaxed); - if (prev_addr == addr) { + const zpointer prev_ptr = Atomic::cmpxchg(p, ptr, heal_ptr, memory_order_relaxed); + if (prev_ptr == ptr) { // Success return; } - if (fast_path(prev_addr)) { + if (fast_path(prev_ptr)) { // Must not self heal return; } @@ -133,261 +108,668 @@ inline void ZBarrier::self_heal(volatile oop* p, uintptr_t addr, uintptr_t heal_ // The oop location was healed by another barrier, but still needs upgrading. // Re-apply healing to make sure the oop is not left with weaker (remapped or // finalizable) metadata bits than what this barrier tried to apply. - assert(ZAddress::offset(prev_addr) == ZAddress::offset(heal_addr), "Invalid offset"); - addr = prev_addr; + ptr = prev_ptr; } } -template -inline oop ZBarrier::barrier(volatile oop* p, oop o) { - const uintptr_t addr = ZOop::to_address(o); +inline ZGeneration* ZBarrier::remap_generation(zpointer ptr) { + assert(!ZPointer::is_load_good(ptr), "no need to remap load-good pointer"); - // Fast path - if (fast_path(addr)) { - return ZOop::from_address(addr); + if (ZPointer::is_old_load_good(ptr)) { + return ZGeneration::young(); } - // Slow path - const uintptr_t good_addr = slow_path(addr); + if (ZPointer::is_young_load_good(ptr)) { + return ZGeneration::old(); + } + + // Double remap bad - the pointer is neither old load good nor + // young load good. First the code ... + + const uintptr_t remembered_bits = untype(ptr) & ZPointerRememberedMask; + const bool old_to_old_ptr = remembered_bits == ZPointerRememberedMask; - if (p != NULL) { - self_heal(p, addr, good_addr); + if (old_to_old_ptr) { + return ZGeneration::old(); + } + + const zaddress_unsafe addr = ZPointer::uncolor_unsafe(ptr); + if (ZGeneration::young()->forwarding(addr) != nullptr) { + assert(ZGeneration::old()->forwarding(addr) == nullptr, "Mutually exclusive"); + return ZGeneration::young(); + } else { + return ZGeneration::old(); } - return ZOop::from_address(good_addr); + // ... then the explanation. Time to put your seat belt on. + + // In this context we only have access to the ptr (colored oop), but we + // don't know if this refers to a stale young gen or old gen object. + // However, by being careful with when we run young and old collections, + // and by explicitly remapping roots we can figure this out by looking + // at the metadata bits in the pointer. + + // *Roots (including remset)*: + // + // will never have double remap bit errors, + // and will never enter this path. The reason is that there's always a + // phase that remaps all roots between all relocation phases: + // + // 1) Young marking remaps the roots, before the young relocation runs + // + // 2) The old roots_remap phase blocks out young collections and runs just + // before old relocation starts + + // *Heap object fields*: + // + // could have double remap bit errors, and may enter this path. We are using + // knowledge about how *remember* bits are set, to narrow down the + // possibilities. + + // Short summary: + // + // If both remember bits are set, when we have a double + // remap bit error, then we know that we are dealing with + // an old-to-old pointer. + // + // Otherwise, we are dealing with a young-to-any pointer, + // and the address that contained the pointed-to object, is + // guaranteed to have only been used by either the young gen + // or the old gen. + + // Longer explanation: + + // Double remap bad pointers in young gen: + // + // After young relocation, the young gen objects were promoted to old gen, + // and we keep track of those old-to-young pointers via the remset + // (described above in the roots section). + // + // However, when young marking started, the current set of young gen objects + // are snapshotted, and subsequent allocations end up in the next young + // collection. Between young mark start, and young relocate start, stores + // can happen to either the "young allocating" objects, or objects that + // are about to become survivors. For both survivors and young-allocating + // objects, it is true that their zpointers will be store good when + // young marking finishes, and can not get demoted. These pointers will become + // young remap bad after young relocate start. We don't maintain a remset + // for the young allocating objects, so we don't have the same guarantee as + // we have for roots (including remset). Pointers in these objects are + // therefore therefore susceptible to become double remap bad. + // + // The scenario that can happen is: + // - Store in young allocating or future survivor happens between young mark + // start and young relocate start + // - Young relocate start makes this pointer young remap bad + // - It is NOT fixed in roots_remap (it is not part of the remset or roots) + // - Old relocate start makes this pointer also old remap bad + + // Double remap bad pointers in old gen: + // + // When an object is promoted, all oop*s are added to the remset. (Could + // have either double or single remember bits at this point) + // + // As long as we have a remset entry for the oop*, we ensure that the pointer + // is not double remap bad. See the roots section. + // + // However, at some point the GC notices that the pointer points to an old + // object, and that there's no need for a remset entry. Because of that, + // the young collection will not visit the pointer, and the pointer can + // become double remap bad. + // + // The scenario that can happen is: + // - Old marking visits the object + // - Old relocation starts and then young relocation starts + // or + // - Young relocation starts and then old relocation starts + + // About double *remember* bits: + // + // Whenever we: + // - perform a store barrier, we heal with one remember bit. + // - mark objects in young gen, we heal with one remember bit. + // - perform a non-store barrier outside of young gen, we heal with + // double remember bits. + // - "remset forget" a pointer in an old object, we heal with double + // remember bits. + // + // Double remember bits ensures that *every* store that encounters it takes + // a slow path. + // + // If we encounter a pointer that is both double remap bad *and* has double + // remember bits, we know that it can't be young and it has to be old! + // + // Pointers in young objects: + // + // The only double remap bad young pointers are inside "young allocating" + // objects and survivors, as described above. When such a pointer was written + // into the young allocating memory, or marked in young gen, the pointer was + // remap good and the store/young mark barrier healed with a single remember bit. + // No other barrier could replace that bit, because store good is the greatest + // barrier, and all other barriers will take the fast-path. This is true until + // the young relocation starts. + // + // After the young relocation has started, the pointer became young remap + // bad, and maybe we even started an old relocation, and the pointer became + // double remap bad. When the next load barrier triggers, it will self heal + // with double remember bits, but *importantly* it will at the same time + // heal with good remap bits. + // + // So, if we have entered this "double remap bad" path, and the pointer was + // located in young gen, then it was young allocating or a survivor, and it + // must only have one remember bit set! + // + // Pointers in old objects: + // + // When pointers become forgotten, they are tagged with double remembered + // bits. Only way to convert the pointer into having only one remembered + // bit, is to perform a store. When that happens, the pointer becomes both + // remap good and remembered again, and will be handled as the roots + // described above. + + // With the above information: + // + // Iff we find a double remap bad pointer with *double remember bits*, + // then we know that it is an old-to-old pointer, and we should use the + // forwarding table of the old generation. + // + // Iff we find a double remap bad pointer with a *single remember bit*, + // then we know that it is a young-to-any pointer. We still don't know + // if the pointed-to object is young or old. + + // Figuring out if a double remap bad pointer in young pointed at + // young or old: + // + // The scenario that created a double remap bad pointer in the young + // allocating or survivor memory is that it was written during the last + // young marking before the old relocation started. At that point, the old + // generation collection has already taken its marking snapshot, and + // determined what pages will be marked and therefore eligible to become + // part of the old relocation set. If the young generation relocated/freed + // a page (address range), and that address range was then reused for an old + // page, it won't be part of the old snapshot and it therefore won't be + // selected for old relocation. + // + // Because of this, we know that the object written into the young + // allocating page will at most belong to one of the two relocation sets, + // and we can therefore simply check in which table we installed + // ZForwarding. } -template -inline oop ZBarrier::weak_barrier(volatile oop* p, oop o) { - const uintptr_t addr = ZOop::to_address(o); +inline zaddress ZBarrier::make_load_good(zpointer o) { + if (is_null_any(o)) { + return zaddress::null; + } - // Fast path - if (fast_path(addr)) { - // Return the good address instead of the weak good address - // to ensure that the currently active heap view is used. - return ZOop::from_address(ZAddress::good_or_null(addr)); + if (ZPointer::is_load_good_or_null(o)) { + return ZPointer::uncolor(o); } - // Slow path - const uintptr_t good_addr = slow_path(addr); + return relocate_or_remap(ZPointer::uncolor_unsafe(o), remap_generation(o)); +} - if (p != NULL) { - // The slow path returns a good/marked address or null, but we never mark - // oops in a weak load barrier so we always heal with the remapped address. - self_heal(p, addr, ZAddress::remapped_or_null(good_addr)); +inline zaddress ZBarrier::make_load_good_no_relocate(zpointer o) { + if (is_null_any(o)) { + return zaddress::null; } - return ZOop::from_address(good_addr); + if (ZPointer::is_load_good_or_null(o)) { + return ZPointer::uncolor(o); + } + + return remap(ZPointer::uncolor_unsafe(o), remap_generation(o)); +} + +inline void z_assert_is_barrier_safe() { + assert(!Thread::current()->is_ConcurrentGC_thread() || /* Need extra checks for ConcurrentGCThreads */ + Thread::current()->is_suspendible_thread() || /* Thread prevents safepoints */ + Thread::current()->is_indirectly_suspendible_thread() || /* Coordinator thread prevents safepoints */ + SafepointSynchronize::is_at_safepoint(), /* Is at safepoint */ + "Shouldn't perform load barrier"); } -template -inline void ZBarrier::root_barrier(oop* p, oop o) { - const uintptr_t addr = ZOop::to_address(o); +template +inline zaddress ZBarrier::barrier(ZBarrierFastPath fast_path, ZBarrierSlowPath slow_path, ZBarrierColor color, volatile zpointer* p, zpointer o, bool allow_null) { + z_assert_is_barrier_safe(); // Fast path - if (fast_path(addr)) { - return; + if (fast_path(o)) { + return ZPointer::uncolor(o); } + // Make load good + const zaddress load_good_addr = make_load_good(o); + // Slow path - const uintptr_t good_addr = slow_path(addr); + const zaddress good_addr = slow_path(load_good_addr); + + // Self heal + if (p != nullptr) { + // Color + const zpointer good_ptr = color(good_addr, o); + + assert(!is_null(good_ptr), "Always block raw null"); + + self_heal(fast_path, p, o, good_ptr, allow_null); + } + + return good_addr; +} + +inline void ZBarrier::remap_young_relocated(volatile zpointer* p, zpointer o) { + assert(ZPointer::is_old_load_good(o), "Should be old load good"); + assert(!ZPointer::is_young_load_good(o), "Should not be young load good"); + + // Make load good + const zaddress load_good_addr = make_load_good_no_relocate(o); + + // Color + const zpointer good_ptr = ZAddress::load_good(load_good_addr, o); - // Non-atomic healing helps speed up root scanning. This is safe to do - // since we are always healing roots in a safepoint, or under a lock, - // which ensures we are never racing with mutators modifying roots while - // we are healing them. It's also safe in case multiple GC threads try - // to heal the same root if it is aligned, since they would always heal - // the root in the same way and it does not matter in which order it - // happens. For misaligned oops, there needs to be mutual exclusion. - *p = ZOop::from_address(good_addr); + assert(!is_null(good_ptr), "Always block raw null"); + + // Despite knowing good_ptr isn't null in this context, we use the + // load_good_or_null fast path, because it is faster. + self_heal(is_load_good_or_null_fast_path, p, o, good_ptr, false /* allow_null */); +} + +inline zpointer ZBarrier::load_atomic(volatile zpointer* p) { + const zpointer ptr = Atomic::load(p); + assert_is_valid(ptr); + return ptr; } -inline bool ZBarrier::is_good_or_null_fast_path(uintptr_t addr) { - return ZAddress::is_good_or_null(addr); +// +// Fast paths +// + +inline bool ZBarrier::is_load_good_or_null_fast_path(zpointer ptr) { + return ZPointer::is_load_good_or_null(ptr); } -inline bool ZBarrier::is_weak_good_or_null_fast_path(uintptr_t addr) { - return ZAddress::is_weak_good_or_null(addr); +inline bool ZBarrier::is_mark_good_fast_path(zpointer ptr) { + return ZPointer::is_mark_good(ptr); } -inline bool ZBarrier::is_marked_or_null_fast_path(uintptr_t addr) { - return ZAddress::is_marked_or_null(addr); +inline bool ZBarrier::is_store_good_fast_path(zpointer ptr) { + return ZPointer::is_store_good(ptr); } -inline bool ZBarrier::during_mark() { - return ZGlobalPhase == ZPhaseMark; +inline bool ZBarrier::is_store_good_or_null_fast_path(zpointer ptr) { + return ZPointer::is_store_good_or_null(ptr); } -inline bool ZBarrier::during_relocate() { - return ZGlobalPhase == ZPhaseRelocate; +inline bool ZBarrier::is_store_good_or_null_any_fast_path(zpointer ptr) { + return is_null_any(ptr) || !ZPointer::is_store_bad(ptr); +} + +inline bool ZBarrier::is_mark_young_good_fast_path(zpointer ptr) { + return ZPointer::is_load_good(ptr) && ZPointer::is_marked_young(ptr); +} + +inline bool ZBarrier::is_finalizable_good_fast_path(zpointer ptr) { + return ZPointer::is_load_good(ptr) && ZPointer::is_marked_any_old(ptr); } // -// Load barrier +// Slow paths +// + +inline zaddress ZBarrier::promote_slow_path(zaddress addr) { + // No need to do anything + return addr; +} + // -inline oop ZBarrier::load_barrier_on_oop(oop o) { - return load_barrier_on_oop_field_preloaded((oop*)NULL, o); +// Color functions +// + +inline zpointer color_load_good(zaddress new_addr, zpointer old_ptr) { + return ZAddress::load_good(new_addr, old_ptr); +} + +inline zpointer color_finalizable_good(zaddress new_addr, zpointer old_ptr) { + if (ZPointer::is_marked_old(old_ptr)) { + // Don't down-grade pointers + return ZAddress::mark_old_good(new_addr, old_ptr); + } else { + return ZAddress::finalizable_good(new_addr, old_ptr); + } +} + +inline zpointer color_mark_good(zaddress new_addr, zpointer old_ptr) { + return ZAddress::mark_good(new_addr, old_ptr); +} + +inline zpointer color_mark_young_good(zaddress new_addr, zpointer old_ptr) { + return ZAddress::mark_young_good(new_addr, old_ptr); +} + +inline zpointer color_remset_good(zaddress new_addr, zpointer old_ptr) { + if (new_addr == zaddress::null || ZHeap::heap()->is_young(new_addr)) { + return ZAddress::mark_good(new_addr, old_ptr); + } else { + // If remembered set scanning finds an old-to-old pointer, we won't mark it + // and hence only really care about setting remembered bits to 11 so that + // subsequent stores trip on the store-bad bit pattern. However, the contract + // with the fast path check, is that the pointer should invariantly be young + // mark good at least, so we color it as such. + return ZAddress::mark_young_good(new_addr, old_ptr); + } +} + +inline zpointer color_store_good(zaddress new_addr, zpointer old_ptr) { + return ZAddress::store_good(new_addr); } -inline oop ZBarrier::load_barrier_on_oop_field(volatile oop* p) { - const oop o = Atomic::load(p); +// +// Load barrier +// + +inline zaddress ZBarrier::load_barrier_on_oop_field(volatile zpointer* p) { + const zpointer o = load_atomic(p); return load_barrier_on_oop_field_preloaded(p, o); } -inline oop ZBarrier::load_barrier_on_oop_field_preloaded(volatile oop* p, oop o) { - return barrier(p, o); +inline zaddress ZBarrier::load_barrier_on_oop_field_preloaded(volatile zpointer* p, zpointer o) { + auto slow_path = [](zaddress addr) -> zaddress { + return addr; + }; + + return barrier(is_load_good_or_null_fast_path, slow_path, color_load_good, p, o); +} + +inline zaddress ZBarrier::keep_alive_load_barrier_on_oop_field_preloaded(volatile zpointer* p, zpointer o) { + assert(!ZResurrection::is_blocked(), "This operation is only valid when resurrection is not blocked"); + return barrier(is_mark_good_fast_path, keep_alive_slow_path, color_mark_good, p, o); +} + +// +// Load barrier on non-strong oop refs +// + +inline zaddress ZBarrier::load_barrier_on_weak_oop_field_preloaded(volatile zpointer* p, zpointer o) { + verify_on_weak(p); + + if (ZResurrection::is_blocked()) { + return blocking_keep_alive_load_barrier_on_weak_oop_field_preloaded(p, o); + } + + return keep_alive_load_barrier_on_oop_field_preloaded(p, o); } -inline void ZBarrier::load_barrier_on_oop_array(volatile oop* p, size_t length) { - for (volatile const oop* const end = p + length; p < end; p++) { - load_barrier_on_oop_field(p); +inline zaddress ZBarrier::load_barrier_on_phantom_oop_field_preloaded(volatile zpointer* p, zpointer o) { + if (ZResurrection::is_blocked()) { + return blocking_keep_alive_load_barrier_on_phantom_oop_field_preloaded(p, o); } + + return keep_alive_load_barrier_on_oop_field_preloaded(p, o); } -inline oop ZBarrier::load_barrier_on_weak_oop_field_preloaded(volatile oop* p, oop o) { +inline zaddress ZBarrier::no_keep_alive_load_barrier_on_weak_oop_field_preloaded(volatile zpointer* p, zpointer o) { verify_on_weak(p); if (ZResurrection::is_blocked()) { - return barrier(p, o); + return blocking_load_barrier_on_weak_oop_field_preloaded(p, o); } + // Normal load barrier doesn't keep the object alive return load_barrier_on_oop_field_preloaded(p, o); } -inline oop ZBarrier::load_barrier_on_phantom_oop_field_preloaded(volatile oop* p, oop o) { +inline zaddress ZBarrier::no_keep_alive_load_barrier_on_phantom_oop_field_preloaded(volatile zpointer* p, zpointer o) { if (ZResurrection::is_blocked()) { - return barrier(p, o); + return blocking_load_barrier_on_phantom_oop_field_preloaded(p, o); } + // Normal load barrier doesn't keep the object alive return load_barrier_on_oop_field_preloaded(p, o); } -inline void ZBarrier::load_barrier_on_root_oop_field(oop* p) { - const oop o = *p; - root_barrier(p, o); +inline zaddress ZBarrier::blocking_keep_alive_load_barrier_on_weak_oop_field_preloaded(volatile zpointer* p, zpointer o) { + auto slow_path = [=](zaddress addr) -> zaddress { + return ZBarrier::blocking_keep_alive_on_weak_slow_path(p, addr); + }; + return barrier(is_mark_good_fast_path, slow_path, color_mark_good, p, o); +} + +inline zaddress ZBarrier::blocking_keep_alive_load_barrier_on_phantom_oop_field_preloaded(volatile zpointer* p, zpointer o) { + auto slow_path = [=](zaddress addr) -> zaddress { + return ZBarrier::blocking_keep_alive_on_phantom_slow_path(p, addr); + }; + return barrier(is_mark_good_fast_path, slow_path, color_mark_good, p, o); +} + +inline zaddress ZBarrier::blocking_load_barrier_on_weak_oop_field_preloaded(volatile zpointer* p, zpointer o) { + auto slow_path = [=](zaddress addr) -> zaddress { + return ZBarrier::blocking_load_barrier_on_weak_slow_path(p, addr); + }; + return barrier(is_mark_good_fast_path, slow_path, color_mark_good, p, o); } -inline void ZBarrier::load_barrier_on_invisible_root_oop_field(oop* p) { - const oop o = *p; - root_barrier(p, o); +inline zaddress ZBarrier::blocking_load_barrier_on_phantom_oop_field_preloaded(volatile zpointer* p, zpointer o) { + auto slow_path = [=](zaddress addr) -> zaddress { + return ZBarrier::blocking_load_barrier_on_phantom_slow_path(p, addr); + }; + return barrier(is_mark_good_fast_path, slow_path, color_mark_good, p, o); } // -// Weak load barrier +// Clean barrier // -inline oop ZBarrier::weak_load_barrier_on_oop_field(volatile oop* p) { - assert(!ZResurrection::is_blocked(), "Should not be called during resurrection blocked phase"); - const oop o = Atomic::load(p); - return weak_load_barrier_on_oop_field_preloaded(p, o); + +inline bool ZBarrier::clean_barrier_on_weak_oop_field(volatile zpointer* p) { + assert(ZResurrection::is_blocked(), "This operation is only valid when resurrection is blocked"); + const zpointer o = load_atomic(p); + auto slow_path = [=](zaddress addr) -> zaddress { + return ZBarrier::blocking_load_barrier_on_weak_slow_path(p, addr); + }; + return is_null(barrier(is_mark_good_fast_path, slow_path, color_mark_good, p, o, true /* allow_null */)); } -inline oop ZBarrier::weak_load_barrier_on_oop_field_preloaded(volatile oop* p, oop o) { - return weak_barrier(p, o); +inline bool ZBarrier::clean_barrier_on_phantom_oop_field(volatile zpointer* p) { + assert(ZResurrection::is_blocked(), "This operation is only valid when resurrection is blocked"); + const zpointer o = load_atomic(p); + auto slow_path = [=](zaddress addr) -> zaddress { + return ZBarrier::blocking_load_barrier_on_phantom_slow_path(p, addr); + }; + return is_null(barrier(is_mark_good_fast_path, slow_path, color_mark_good, p, o, true /* allow_null */)); } -inline oop ZBarrier::weak_load_barrier_on_weak_oop(oop o) { - return weak_load_barrier_on_weak_oop_field_preloaded((oop*)NULL, o); +inline bool ZBarrier::clean_barrier_on_final_oop_field(volatile zpointer* p) { + assert(ZResurrection::is_blocked(), "Invalid phase"); + + // The referent in a FinalReference should never be cleared by the GC. Instead + // it should just be healed (as if it was a phantom oop) and this function should + // return true if the object pointer to by the referent is not strongly reachable. + const zpointer o = load_atomic(p); + auto slow_path = [=](zaddress addr) -> zaddress { + return ZBarrier::blocking_load_barrier_on_phantom_slow_path(p, addr); + }; + const zaddress addr = barrier(is_mark_good_fast_path, slow_path, color_mark_good, p, o); + assert(!is_null(addr), "Should be finalizable marked"); + + return is_null(blocking_load_barrier_on_weak_slow_path(p, addr)); } -inline oop ZBarrier::weak_load_barrier_on_weak_oop_field_preloaded(volatile oop* p, oop o) { - verify_on_weak(p); +// +// Mark barrier +// +inline void ZBarrier::mark_barrier_on_oop_field(volatile zpointer* p, bool finalizable) { + const zpointer o = load_atomic(p); - if (ZResurrection::is_blocked()) { - return barrier(p, o); + if (finalizable) { + // During marking, we mark through already marked oops to avoid having + // some large part of the object graph hidden behind a pushed, but not + // yet flushed, entry on a mutator mark stack. Always marking through + // allows the GC workers to proceed through the object graph even if a + // mutator touched an oop first, which in turn will reduce the risk of + // having to flush mark stacks multiple times to terminate marking. + // + // However, when doing finalizable marking we don't always want to mark + // through. First, marking through an already strongly marked oop would + // be wasteful, since we will then proceed to do finalizable marking on + // an object which is, or will be, marked strongly. Second, marking + // through an already finalizable marked oop would also be wasteful, + // since such oops can never end up on a mutator mark stack and can + // therefore not hide some part of the object graph from GC workers. + + // Make the oop finalizable marked/good, instead of normal marked/good. + // This is needed because an object might first becomes finalizable + // marked by the GC, and then loaded by a mutator thread. In this case, + // the mutator thread must be able to tell that the object needs to be + // strongly marked. The finalizable bit in the oop exists to make sure + // that a load of a finalizable marked oop will fall into the barrier + // slow path so that we can mark the object as strongly reachable. + + // Note: that this does not color the pointer finalizable marked if it + // is already colored marked old good. + barrier(is_finalizable_good_fast_path, mark_finalizable_slow_path, color_finalizable_good, p, o); + } else { + barrier(is_mark_good_fast_path, mark_slow_path, color_mark_good, p, o); } +} + +inline void ZBarrier::mark_barrier_on_old_oop_field(volatile zpointer* p, bool finalizable) { + assert(ZHeap::heap()->is_old(p), "Should be from old"); + const zpointer o = load_atomic(p); - return weak_load_barrier_on_oop_field_preloaded(p, o); + if (finalizable) { + // During marking, we mark through already marked oops to avoid having + // some large part of the object graph hidden behind a pushed, but not + // yet flushed, entry on a mutator mark stack. Always marking through + // allows the GC workers to proceed through the object graph even if a + // mutator touched an oop first, which in turn will reduce the risk of + // having to flush mark stacks multiple times to terminate marking. + // + // However, when doing finalizable marking we don't always want to mark + // through. First, marking through an already strongly marked oop would + // be wasteful, since we will then proceed to do finalizable marking on + // an object which is, or will be, marked strongly. Second, marking + // through an already finalizable marked oop would also be wasteful, + // since such oops can never end up on a mutator mark stack and can + // therefore not hide some part of the object graph from GC workers. + + // Make the oop finalizable marked/good, instead of normal marked/good. + // This is needed because an object might first becomes finalizable + // marked by the GC, and then loaded by a mutator thread. In this case, + // the mutator thread must be able to tell that the object needs to be + // strongly marked. The finalizable bit in the oop exists to make sure + // that a load of a finalizable marked oop will fall into the barrier + // slow path so that we can mark the object as strongly reachable. + + // Note: that this does not color the pointer finalizable marked if it + // is already colored marked old good. + barrier(is_finalizable_good_fast_path, mark_finalizable_from_old_slow_path, color_finalizable_good, p, o); + } else { + barrier(is_mark_good_fast_path, mark_from_old_slow_path, color_mark_good, p, o); + } } -inline oop ZBarrier::weak_load_barrier_on_phantom_oop(oop o) { - return weak_load_barrier_on_phantom_oop_field_preloaded((oop*)NULL, o); +inline void ZBarrier::mark_barrier_on_young_oop_field(volatile zpointer* p) { + assert(ZHeap::heap()->is_young(p), "Should be from young"); + const zpointer o = load_atomic(p); + barrier(is_store_good_or_null_any_fast_path, mark_from_young_slow_path, color_store_good, p, o); } -inline oop ZBarrier::weak_load_barrier_on_phantom_oop_field_preloaded(volatile oop* p, oop o) { - if (ZResurrection::is_blocked()) { - return barrier(p, o); - } +inline void ZBarrier::promote_barrier_on_young_oop_field(volatile zpointer* p) { + const zpointer o = load_atomic(p); + // Objects that get promoted to the old generation, must invariantly contain + // only store good pointers. However, the young marking code above filters + // out null pointers, so we need to explicitly ensure even null pointers are + // store good, before objects may get promoted (and before relocate start). + // This barrier ensures that. + // This could simply be ensured in the marking above, but promotion rates + // are typically rather low, and fixing all null pointers strictly, when + // only a few had to be store good due to promotions, is generally not favourable + barrier(is_store_good_fast_path, promote_slow_path, color_store_good, p, o); +} + +inline zaddress ZBarrier::remset_barrier_on_oop_field(volatile zpointer* p) { + const zpointer o = load_atomic(p); + return barrier(is_mark_young_good_fast_path, mark_young_slow_path, color_remset_good, p, o); +} - return weak_load_barrier_on_oop_field_preloaded(p, o); +inline void ZBarrier::mark_young_good_barrier_on_oop_field(volatile zpointer* p) { + const zpointer o = load_atomic(p); + barrier(is_mark_young_good_fast_path, mark_young_slow_path, color_mark_young_good, p, o); } // -// Is alive barrier +// Store barrier // -inline bool ZBarrier::is_alive_barrier_on_weak_oop(oop o) { - // Check if oop is logically non-null. This operation - // is only valid when resurrection is blocked. - assert(ZResurrection::is_blocked(), "Invalid phase"); - return weak_load_barrier_on_weak_oop(o) != NULL; + +inline void ZBarrier::store_barrier_on_heap_oop_field(volatile zpointer* p, bool heal) { + const zpointer prev = load_atomic(p); + + auto slow_path = [=](zaddress addr) -> zaddress { + return ZBarrier::heap_store_slow_path(p, addr, prev, heal); + }; + + if (heal) { + barrier(is_store_good_fast_path, slow_path, color_store_good, p, prev); + } else { + barrier(is_store_good_or_null_fast_path, slow_path, color_store_good, nullptr, prev); + } } -inline bool ZBarrier::is_alive_barrier_on_phantom_oop(oop o) { - // Check if oop is logically non-null. This operation - // is only valid when resurrection is blocked. - assert(ZResurrection::is_blocked(), "Invalid phase"); - return weak_load_barrier_on_phantom_oop(o) != NULL; +inline void ZBarrier::store_barrier_on_native_oop_field(volatile zpointer* p, bool heal) { + const zpointer prev = load_atomic(p); + + if (heal) { + barrier(is_store_good_fast_path, native_store_slow_path, color_store_good, p, prev); + } else { + barrier(is_store_good_or_null_fast_path, native_store_slow_path, color_store_good, nullptr, prev); + } } -// -// Keep alive barrier -// -inline void ZBarrier::keep_alive_barrier_on_weak_oop_field(volatile oop* p) { - assert(ZResurrection::is_blocked(), "This operation is only valid when resurrection is blocked"); - const oop o = Atomic::load(p); - barrier(p, o); +inline void ZBarrier::no_keep_alive_store_barrier_on_heap_oop_field(volatile zpointer* p) { + const zpointer prev = load_atomic(p); + + auto slow_path = [=](zaddress addr) -> zaddress { + return ZBarrier::no_keep_alive_heap_store_slow_path(p, addr); + }; + + barrier(is_store_good_fast_path, slow_path, color_store_good, nullptr, prev); } -inline void ZBarrier::keep_alive_barrier_on_phantom_oop_field(volatile oop* p) { - assert(ZResurrection::is_blocked(), "This operation is only valid when resurrection is blocked"); - const oop o = Atomic::load(p); - barrier(p, o); +inline void ZBarrier::remember(volatile zpointer* p) { + if (ZHeap::heap()->is_old(p)) { + ZGeneration::young()->remember(p); + } } -inline void ZBarrier::keep_alive_barrier_on_phantom_root_oop_field(oop* p) { - // The keep alive operation is only valid when resurrection is blocked. - // - // Except with Loom, where we intentionally trigger arms nmethods after - // unlinking, to get a sense of what nmethods are alive. This will trigger - // the keep alive barriers, but the oops are healed and the slow-paths - // will not trigger. We have stronger checks in the slow-paths. - assert(ZResurrection::is_blocked() || (CodeCache::contains((void*)p)), - "This operation is only valid when resurrection is blocked"); - const oop o = *p; - root_barrier(p, o); -} - -inline void ZBarrier::keep_alive_barrier_on_oop(oop o) { - const uintptr_t addr = ZOop::to_address(o); - assert(ZAddress::is_good(addr), "Invalid address"); - - if (during_mark()) { - keep_alive_barrier_on_oop_slow_path(addr); +inline void ZBarrier::mark_and_remember(volatile zpointer* p, zaddress addr) { + if (!is_null(addr)) { + mark(addr); } + remember(p); } -// -// Mark barrier -// -inline void ZBarrier::mark_barrier_on_oop_field(volatile oop* p, bool finalizable) { - const oop o = Atomic::load(p); +template +inline void ZBarrier::mark(zaddress addr) { + assert(!ZVerifyOops || oopDesc::is_oop(to_oop(addr), false), "must be oop"); - if (finalizable) { - barrier(p, o); + if (ZHeap::heap()->is_old(addr)) { + ZGeneration::old()->mark_object_if_active(addr); } else { - const uintptr_t addr = ZOop::to_address(o); - if (ZAddress::is_good(addr)) { - // Mark through good oop - mark_barrier_on_oop_slow_path(addr); - } else { - // Mark through bad oop - barrier(p, o); - } + ZGeneration::young()->mark_object_if_active(addr); } } -inline void ZBarrier::mark_barrier_on_oop_array(volatile oop* p, size_t length, bool finalizable) { - for (volatile const oop* const end = p + length; p < end; p++) { - mark_barrier_on_oop_field(p, finalizable); +template +inline void ZBarrier::mark_young(zaddress addr) { + assert(ZGeneration::young()->is_phase_mark(), "Should only be called during marking"); + assert(!ZVerifyOops || oopDesc::is_oop(to_oop(addr), false), "must be oop"); + assert(ZHeap::heap()->is_young(addr), "Must be young"); + + ZGeneration::young()->mark_object(addr); +} + +template +inline void ZBarrier::mark_if_young(zaddress addr) { + if (ZHeap::heap()->is_young(addr)) { + mark_young(addr); } } diff --git a/src/hotspot/share/gc/z/zBarrierSet.cpp b/src/hotspot/share/gc/z/zBarrierSet.cpp index d37623b862457..c2922f54fcc99 100644 --- a/src/hotspot/share/gc/z/zBarrierSet.cpp +++ b/src/hotspot/share/gc/z/zBarrierSet.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,11 +26,16 @@ #include "gc/z/zBarrierSetAssembler.hpp" #include "gc/z/zBarrierSetNMethod.hpp" #include "gc/z/zBarrierSetStackChunk.hpp" +#include "gc/z/zGeneration.inline.hpp" #include "gc/z/zGlobals.hpp" #include "gc/z/zHeap.inline.hpp" #include "gc/z/zStackWatermark.hpp" #include "gc/z/zThreadLocalData.hpp" +#include "runtime/deoptimization.hpp" +#include "runtime/frame.inline.hpp" #include "runtime/javaThread.hpp" +#include "runtime/registerMap.hpp" +#include "runtime/stackWatermarkSet.hpp" #include "utilities/macros.hpp" #ifdef COMPILER1 #include "gc/z/c1/zBarrierSetC1.hpp" @@ -80,12 +85,18 @@ void ZBarrierSet::on_thread_destroy(Thread* thread) { } void ZBarrierSet::on_thread_attach(Thread* thread) { - // Set thread local address bad mask - ZThreadLocalData::set_address_bad_mask(thread, ZAddressBadMask); + // Set thread local masks + ZThreadLocalData::set_load_bad_mask(thread, ZPointerLoadBadMask); + ZThreadLocalData::set_load_good_mask(thread, ZPointerLoadGoodMask); + ZThreadLocalData::set_mark_bad_mask(thread, ZPointerMarkBadMask); + ZThreadLocalData::set_store_bad_mask(thread, ZPointerStoreBadMask); + ZThreadLocalData::set_store_good_mask(thread, ZPointerStoreGoodMask); + ZThreadLocalData::set_nmethod_disarmed(thread, ZPointerStoreGoodMask); if (thread->is_Java_thread()) { JavaThread* const jt = JavaThread::cast(thread); StackWatermark* const watermark = new ZStackWatermark(jt); StackWatermarkSet::add_watermark(jt, watermark); + ZThreadLocalData::store_barrier_buffer(jt)->initialize(); } } @@ -94,6 +105,53 @@ void ZBarrierSet::on_thread_detach(Thread* thread) { ZHeap::heap()->mark_flush_and_free(thread); } +static void deoptimize_allocation(JavaThread* thread) { + RegisterMap reg_map(thread, RegisterMap::UpdateMap::skip, + RegisterMap::ProcessFrames::include, + RegisterMap::WalkContinuation::skip); + const frame runtime_frame = thread->last_frame(); + assert(runtime_frame.is_runtime_frame(), "must be runtime frame"); + + const frame caller_frame = runtime_frame.sender(®_map); + assert(caller_frame.is_compiled_frame(), "must be compiled"); + + const nmethod* const nm = caller_frame.cb()->as_nmethod(); + if (nm->is_compiled_by_c2() && !caller_frame.is_deoptimized_frame()) { + Deoptimization::deoptimize_frame(thread, caller_frame.id()); + } +} + +void ZBarrierSet::on_slowpath_allocation_exit(JavaThread* thread, oop new_obj) { + const ZPage* const page = ZHeap::heap()->page(to_zaddress(new_obj)); + const ZPageAge age = page->age(); + if (age == ZPageAge::old) { + // We promised C2 that its allocations would end up in young gen. This object + // breaks that promise. Take a few steps in the interpreter instead, which has + // no such assumptions about where an object resides. + deoptimize_allocation(thread); + return; + } + + if (!ZGeneration::young()->is_phase_mark_complete()) { + return; + } + + if (!page->is_relocatable()) { + return; + } + + if (ZRelocate::compute_to_age(age) != ZPageAge::old) { + return; + } + + // If the object is young, we have to still be careful that it isn't racingly + // about to get promoted to the old generation. That causes issues when null + // pointers are supposed to be coloured, but the JIT is a bit sloppy and + // reinitializes memory with raw nulls. We detect this situation and detune + // rather than relying on the JIT to never be sloppy with redundant initialization. + deoptimize_allocation(thread); +} + void ZBarrierSet::print_on(outputStream* st) const { st->print_cr("ZBarrierSet"); } diff --git a/src/hotspot/share/gc/z/zBarrierSet.hpp b/src/hotspot/share/gc/z/zBarrierSet.hpp index ebb80e106af57..213f85dcea8c7 100644 --- a/src/hotspot/share/gc/z/zBarrierSet.hpp +++ b/src/hotspot/share/gc/z/zBarrierSet.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,10 +25,14 @@ #define SHARE_GC_Z_ZBARRIERSET_HPP #include "gc/shared/barrierSet.hpp" +#include "gc/z/zAddress.hpp" class ZBarrierSetAssembler; class ZBarrierSet : public BarrierSet { +private: + static zpointer store_good(oop obj); + public: ZBarrierSet(); @@ -40,6 +44,8 @@ class ZBarrierSet : public BarrierSet { virtual void on_thread_attach(Thread* thread); virtual void on_thread_detach(Thread* thread); + virtual void on_slowpath_allocation_exit(JavaThread* thread, oop new_obj); + virtual void print_on(outputStream* st) const; template @@ -53,48 +59,96 @@ class ZBarrierSet : public BarrierSet { template static void verify_decorators_absent(); - static oop* field_addr(oop base, ptrdiff_t offset); + static zpointer* field_addr(oop base, ptrdiff_t offset); + + static zaddress load_barrier(zpointer* p, zpointer o); + static zaddress load_barrier_on_unknown_oop_ref(oop base, ptrdiff_t offset, zpointer* p, zpointer o); + + static void store_barrier_heap_with_healing(zpointer* p); + static void store_barrier_heap_without_healing(zpointer* p); + static void no_keep_alive_store_barrier_heap(zpointer* p); + + static void store_barrier_native_with_healing(zpointer* p); + static void store_barrier_native_without_healing(zpointer* p); + + static void unsupported(); + static zaddress load_barrier(narrowOop* p, zpointer o) { unsupported(); return zaddress::null; } + static zaddress load_barrier_on_unknown_oop_ref(oop base, ptrdiff_t offset, narrowOop* p, zpointer o) { unsupported(); return zaddress::null; } + static void store_barrier_heap_with_healing(narrowOop* p) { unsupported(); } + static void store_barrier_heap_without_healing(narrowOop* p) { unsupported(); } + static void no_keep_alive_store_barrier_heap(narrowOop* p) { unsupported(); } + static void store_barrier_native_with_healing(narrowOop* p) { unsupported(); } + static void store_barrier_native_without_healing(narrowOop* p) { unsupported(); } - template - static oop load_barrier_on_oop_field_preloaded(T* addr, oop o); + static zaddress oop_copy_one_barriers(zpointer* dst, zpointer* src); + static bool oop_copy_one_check_cast(zpointer* dst, zpointer* src, Klass* dst_klass); + static void oop_copy_one(zpointer* dst, zpointer* src); - template - static oop load_barrier_on_unknown_oop_field_preloaded(oop base, ptrdiff_t offset, T* addr, oop o); + static bool oop_arraycopy_in_heap_check_cast(zpointer* dst, zpointer* src, size_t length, Klass* dst_klass); + static bool oop_arraycopy_in_heap_no_check_cast(zpointer* dst, zpointer* src, size_t length); public: // // In heap // - template - static oop oop_load_in_heap(T* addr); + static oop oop_load_in_heap(zpointer* p); + static oop oop_load_in_heap(oop* p) { return oop_load_in_heap((zpointer*)p); }; + static oop oop_load_in_heap(narrowOop* p) { unsupported(); return nullptr; } + static oop oop_load_in_heap_at(oop base, ptrdiff_t offset); - template - static oop oop_atomic_cmpxchg_in_heap(T* addr, oop compare_value, oop new_value); + static void oop_store_in_heap(zpointer* p, oop value); + static void oop_store_in_heap(oop* p, oop value) { oop_store_in_heap((zpointer*)p, value); } + static void oop_store_in_heap(narrowOop* p, oop value) { unsupported(); } + static void oop_store_in_heap_at(oop base, ptrdiff_t offset, oop value); + + static void oop_store_not_in_heap(zpointer* p, oop value); + static void oop_store_not_in_heap(oop* p, oop value) { oop_store_not_in_heap((zpointer*)p, value); } + static void oop_store_not_in_heap(narrowOop* p, oop value) { unsupported(); } + static void oop_store_not_in_heap_at(oop base, ptrdiff_t offset, oop value); + + static oop oop_atomic_cmpxchg_in_heap(zpointer* p, oop compare_value, oop new_value); + static oop oop_atomic_cmpxchg_in_heap(oop* p, oop compare_value, oop new_value) { return oop_atomic_cmpxchg_in_heap((zpointer*)p, compare_value, new_value); } + static oop oop_atomic_cmpxchg_in_heap(narrowOop* p, oop compare_value, oop new_value) { unsupported(); return nullptr; } static oop oop_atomic_cmpxchg_in_heap_at(oop base, ptrdiff_t offset, oop compare_value, oop new_value); - template - static oop oop_atomic_xchg_in_heap(T* addr, oop new_value); + static oop oop_atomic_xchg_in_heap(zpointer* p, oop new_value); + static oop oop_atomic_xchg_in_heap(oop* p, oop new_value) { return oop_atomic_xchg_in_heap((zpointer*)p, new_value); } + static oop oop_atomic_xchg_in_heap(narrowOop* p, oop new_value) { unsupported(); return nullptr; } static oop oop_atomic_xchg_in_heap_at(oop base, ptrdiff_t offset, oop new_value); - template - static bool oop_arraycopy_in_heap(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw, - arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw, + static bool oop_arraycopy_in_heap(arrayOop src_obj, size_t src_offset_in_bytes, zpointer* src_raw, + arrayOop dst_obj, size_t dst_offset_in_bytes, zpointer* dst_raw, size_t length); + static bool oop_arraycopy_in_heap(arrayOop src_obj, size_t src_offset_in_bytes, oop* src_raw, + arrayOop dst_obj, size_t dst_offset_in_bytes, oop* dst_raw, + size_t length) { + return oop_arraycopy_in_heap(src_obj, src_offset_in_bytes, (zpointer*)src_raw, + dst_obj, dst_offset_in_bytes, (zpointer*)dst_raw, + length); + } + static bool oop_arraycopy_in_heap(arrayOop src_obj, size_t src_offset_in_bytes, narrowOop* src_raw, + arrayOop dst_obj, size_t dst_offset_in_bytes, narrowOop* dst_raw, + size_t length) { unsupported(); return false; } static void clone_in_heap(oop src, oop dst, size_t size); // // Not in heap // - template - static oop oop_load_not_in_heap(T* addr); - - template - static oop oop_atomic_cmpxchg_not_in_heap(T* addr, oop compare_value, oop new_value); - - template - static oop oop_atomic_xchg_not_in_heap(T* addr, oop new_value); + static oop oop_load_not_in_heap(zpointer* p); + static oop oop_load_not_in_heap(oop* p); + static oop oop_load_not_in_heap(narrowOop* p) { unsupported(); return nullptr; } + + static oop oop_atomic_cmpxchg_not_in_heap(zpointer* p, oop compare_value, oop new_value); + static oop oop_atomic_cmpxchg_not_in_heap(oop* p, oop compare_value, oop new_value) { + return oop_atomic_cmpxchg_not_in_heap((zpointer*)p, compare_value, new_value); + } + static oop oop_atomic_cmpxchg_not_in_heap(narrowOop* addr, oop compare_value, oop new_value) { unsupported(); return nullptr; } + + static oop oop_atomic_xchg_not_in_heap(zpointer* p, oop new_value); + static oop oop_atomic_xchg_not_in_heap(oop* p, oop new_value) { return oop_atomic_xchg_not_in_heap((zpointer*)p, new_value); } + static oop oop_atomic_xchg_not_in_heap(narrowOop* p, oop new_value) { unsupported(); return nullptr; } }; }; diff --git a/src/hotspot/share/gc/z/zBarrierSet.inline.hpp b/src/hotspot/share/gc/z/zBarrierSet.inline.hpp index 0970b519e106d..bfbae74972d80 100644 --- a/src/hotspot/share/gc/z/zBarrierSet.inline.hpp +++ b/src/hotspot/share/gc/z/zBarrierSet.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,7 +27,11 @@ #include "gc/z/zBarrierSet.hpp" #include "gc/shared/accessBarrierSupport.inline.hpp" +#include "gc/z/zAddress.inline.hpp" #include "gc/z/zBarrier.inline.hpp" +#include "gc/z/zIterator.inline.hpp" +#include "gc/z/zNMethod.hpp" +#include "memory/iterator.inline.hpp" #include "utilities/debug.hpp" template @@ -47,40 +51,44 @@ inline void ZBarrierSet::AccessBarrier::verify_decorato } template -inline oop* ZBarrierSet::AccessBarrier::field_addr(oop base, ptrdiff_t offset) { - assert(base != NULL, "Invalid base"); - return reinterpret_cast(reinterpret_cast((void*)base) + offset); +inline void ZBarrierSet::AccessBarrier::unsupported() { + ShouldNotReachHere(); } template -template -inline oop ZBarrierSet::AccessBarrier::load_barrier_on_oop_field_preloaded(T* addr, oop o) { +inline zpointer* ZBarrierSet::AccessBarrier::field_addr(oop base, ptrdiff_t offset) { + assert(base != nullptr, "Invalid base"); + return reinterpret_cast(reinterpret_cast((void*)base) + offset); +} + +template +inline zaddress ZBarrierSet::AccessBarrier::load_barrier(zpointer* p, zpointer o) { verify_decorators_absent(); if (HasDecorator::value) { if (HasDecorator::value) { - return ZBarrier::weak_load_barrier_on_oop_field_preloaded(addr, o); + // Load barriers on strong oop refs don't keep objects alive + return ZBarrier::load_barrier_on_oop_field_preloaded(p, o); } else if (HasDecorator::value) { - return ZBarrier::weak_load_barrier_on_weak_oop_field_preloaded(addr, o); + return ZBarrier::no_keep_alive_load_barrier_on_weak_oop_field_preloaded(p, o); } else { assert((HasDecorator::value), "Must be"); - return ZBarrier::weak_load_barrier_on_phantom_oop_field_preloaded(addr, o); + return ZBarrier::no_keep_alive_load_barrier_on_phantom_oop_field_preloaded(p, o); } } else { if (HasDecorator::value) { - return ZBarrier::load_barrier_on_oop_field_preloaded(addr, o); + return ZBarrier::load_barrier_on_oop_field_preloaded(p, o); } else if (HasDecorator::value) { - return ZBarrier::load_barrier_on_weak_oop_field_preloaded(addr, o); + return ZBarrier::load_barrier_on_weak_oop_field_preloaded(p, o); } else { assert((HasDecorator::value), "Must be"); - return ZBarrier::load_barrier_on_phantom_oop_field_preloaded(addr, o); + return ZBarrier::load_barrier_on_phantom_oop_field_preloaded(p, o); } } } template -template -inline oop ZBarrierSet::AccessBarrier::load_barrier_on_unknown_oop_field_preloaded(oop base, ptrdiff_t offset, T* addr, oop o) { +inline zaddress ZBarrierSet::AccessBarrier::load_barrier_on_unknown_oop_ref(oop base, ptrdiff_t offset, zpointer* p, zpointer o) { verify_decorators_present(); const DecoratorSet decorators_known_strength = @@ -88,57 +96,182 @@ inline oop ZBarrierSet::AccessBarrier::load_barrier_on_ if (HasDecorator::value) { if (decorators_known_strength & ON_STRONG_OOP_REF) { - return ZBarrier::weak_load_barrier_on_oop_field_preloaded(addr, o); + // Load barriers on strong oop refs don't keep objects alive + return ZBarrier::load_barrier_on_oop_field_preloaded(p, o); } else if (decorators_known_strength & ON_WEAK_OOP_REF) { - return ZBarrier::weak_load_barrier_on_weak_oop_field_preloaded(addr, o); + return ZBarrier::no_keep_alive_load_barrier_on_weak_oop_field_preloaded(p, o); } else { assert(decorators_known_strength & ON_PHANTOM_OOP_REF, "Must be"); - return ZBarrier::weak_load_barrier_on_phantom_oop_field_preloaded(addr, o); + return ZBarrier::no_keep_alive_load_barrier_on_phantom_oop_field_preloaded(p, o); } } else { if (decorators_known_strength & ON_STRONG_OOP_REF) { - return ZBarrier::load_barrier_on_oop_field_preloaded(addr, o); + return ZBarrier::load_barrier_on_oop_field_preloaded(p, o); } else if (decorators_known_strength & ON_WEAK_OOP_REF) { - return ZBarrier::load_barrier_on_weak_oop_field_preloaded(addr, o); + return ZBarrier::load_barrier_on_weak_oop_field_preloaded(p, o); } else { assert(decorators_known_strength & ON_PHANTOM_OOP_REF, "Must be"); - return ZBarrier::load_barrier_on_phantom_oop_field_preloaded(addr, o); + return ZBarrier::load_barrier_on_phantom_oop_field_preloaded(p, o); } } } +inline zpointer ZBarrierSet::store_good(oop obj) { + assert(ZPointerStoreGoodMask != 0, "sanity"); + + const zaddress addr = to_zaddress(obj); + return ZAddress::store_good(addr); +} + +template +inline void ZBarrierSet::AccessBarrier::store_barrier_heap_with_healing(zpointer* p) { + if (!HasDecorator::value) { + ZBarrier::store_barrier_on_heap_oop_field(p, true /* heal */); + } else { + assert(false, "Should not be used on uninitialized memory"); + } +} + +template +inline void ZBarrierSet::AccessBarrier::store_barrier_heap_without_healing(zpointer* p) { + if (!HasDecorator::value) { + ZBarrier::store_barrier_on_heap_oop_field(p, false /* heal */); + } +} + +template +inline void ZBarrierSet::AccessBarrier::no_keep_alive_store_barrier_heap(zpointer* p) { + if (!HasDecorator::value) { + ZBarrier::no_keep_alive_store_barrier_on_heap_oop_field(p); + } +} + +template +inline void ZBarrierSet::AccessBarrier::store_barrier_native_with_healing(zpointer* p) { + if (!HasDecorator::value) { + ZBarrier::store_barrier_on_native_oop_field(p, true /* heal */); + } else { + assert(false, "Should not be used on uninitialized memory"); + } +} + +template +inline void ZBarrierSet::AccessBarrier::store_barrier_native_without_healing(zpointer* p) { + if (!HasDecorator::value) { + ZBarrier::store_barrier_on_native_oop_field(p, false /* heal */); + } +} + // // In heap // template -template -inline oop ZBarrierSet::AccessBarrier::oop_load_in_heap(T* addr) { +inline oop ZBarrierSet::AccessBarrier::oop_load_in_heap(zpointer* p) { verify_decorators_absent(); - const oop o = Raw::oop_load_in_heap(addr); - return load_barrier_on_oop_field_preloaded(addr, o); + const zpointer o = Raw::load_in_heap(p); + assert_is_valid(o); + + return to_oop(load_barrier(p, o)); } template inline oop ZBarrierSet::AccessBarrier::oop_load_in_heap_at(oop base, ptrdiff_t offset) { - oop* const addr = field_addr(base, offset); - const oop o = Raw::oop_load_in_heap(addr); + zpointer* const p = field_addr(base, offset); + + const zpointer o = Raw::load_in_heap(p); + assert_is_valid(o); if (HasDecorator::value) { - return load_barrier_on_unknown_oop_field_preloaded(base, offset, addr, o); + return to_oop(load_barrier_on_unknown_oop_ref(base, offset, p, o)); + } + + return to_oop(load_barrier(p, o)); +} + +template +bool is_store_barrier_no_keep_alive() { + if (HasDecorator::value) { + return HasDecorator::value; } - return load_barrier_on_oop_field_preloaded(addr, o); + if (HasDecorator::value) { + return true; + } + + assert((decorators & ON_PHANTOM_OOP_REF) != 0, "Must be"); + return true; +} + +template +inline bool is_store_barrier_no_keep_alive(oop base, ptrdiff_t offset) { + if (!HasDecorator::value) { + return is_store_barrier_no_keep_alive(); + } + + const DecoratorSet decorators_known_strength = + AccessBarrierSupport::resolve_possibly_unknown_oop_ref_strength(base, offset); + + if ((decorators_known_strength & ON_STRONG_OOP_REF) != 0) { + return (decorators & AS_NO_KEEPALIVE) != 0; + } + + if ((decorators_known_strength & ON_WEAK_OOP_REF) != 0) { + return true; + } + + assert((decorators_known_strength & ON_PHANTOM_OOP_REF) != 0, "Must be"); + return true; +} + +template +inline void ZBarrierSet::AccessBarrier::oop_store_in_heap(zpointer* p, oop value) { + verify_decorators_absent(); + + if (is_store_barrier_no_keep_alive()) { + no_keep_alive_store_barrier_heap(p); + } else { + store_barrier_heap_without_healing(p); + } + + Raw::store_in_heap(p, store_good(value)); } template -template -inline oop ZBarrierSet::AccessBarrier::oop_atomic_cmpxchg_in_heap(T* addr, oop compare_value, oop new_value) { +inline void ZBarrierSet::AccessBarrier::oop_store_in_heap_at(oop base, ptrdiff_t offset, oop value) { + zpointer* const p = field_addr(base, offset); + + if (is_store_barrier_no_keep_alive(base, offset)) { + no_keep_alive_store_barrier_heap(p); + } else { + store_barrier_heap_without_healing(p); + } + + Raw::store_in_heap(p, store_good(value)); +} + +template +inline void ZBarrierSet::AccessBarrier::oop_store_not_in_heap(zpointer* p, oop value) { + verify_decorators_absent(); + + if (!is_store_barrier_no_keep_alive()) { + store_barrier_native_without_healing(p); + } + + Raw::store(p, store_good(value)); +} + +template +inline oop ZBarrierSet::AccessBarrier::oop_atomic_cmpxchg_in_heap(zpointer* p, oop compare_value, oop new_value) { verify_decorators_present(); verify_decorators_absent(); - ZBarrier::load_barrier_on_oop_field(addr); - return Raw::oop_atomic_cmpxchg_in_heap(addr, compare_value, new_value); + store_barrier_heap_with_healing(p); + + const zpointer o = Raw::atomic_cmpxchg_in_heap(p, store_good(compare_value), store_good(new_value)); + assert_is_valid(o); + + return to_oop(ZPointer::uncolor_store_good(o)); } template @@ -150,18 +283,27 @@ inline oop ZBarrierSet::AccessBarrier::oop_atomic_cmpxc // calls with ON_UNKNOWN_OOP_REF set. However, we treat these as ON_STRONG_OOP_REF, // with the motivation that if you're doing Unsafe operations on a Reference.referent // field, then you're on your own anyway. - ZBarrier::load_barrier_on_oop_field(field_addr(base, offset)); - return Raw::oop_atomic_cmpxchg_in_heap_at(base, offset, compare_value, new_value); + zpointer* const p = field_addr(base, offset); + + store_barrier_heap_with_healing(p); + + const zpointer o = Raw::atomic_cmpxchg_in_heap(p, store_good(compare_value), store_good(new_value)); + assert_is_valid(o); + + return to_oop(ZPointer::uncolor_store_good(o)); } template -template -inline oop ZBarrierSet::AccessBarrier::oop_atomic_xchg_in_heap(T* addr, oop new_value) { +inline oop ZBarrierSet::AccessBarrier::oop_atomic_xchg_in_heap(zpointer* p, oop new_value) { verify_decorators_present(); verify_decorators_absent(); - const oop o = Raw::oop_atomic_xchg_in_heap(addr, new_value); - return ZBarrier::load_barrier_on_oop(o); + store_barrier_heap_with_healing(p); + + const zpointer o = Raw::atomic_xchg_in_heap(p, store_good(new_value)); + assert_is_valid(o); + + return to_oop(ZPointer::uncolor_store_good(o)); } template @@ -169,74 +311,189 @@ inline oop ZBarrierSet::AccessBarrier::oop_atomic_xchg_ verify_decorators_present(); verify_decorators_absent(); - const oop o = Raw::oop_atomic_xchg_in_heap_at(base, offset, new_value); - return ZBarrier::load_barrier_on_oop(o); + zpointer* const p = field_addr(base, offset); + + store_barrier_heap_with_healing(p); + + const zpointer o = Raw::atomic_xchg_in_heap(p, store_good(new_value)); + assert_is_valid(o); + + return to_oop(ZPointer::uncolor_store_good(o)); } template -template -inline bool ZBarrierSet::AccessBarrier::oop_arraycopy_in_heap(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw, - arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw, - size_t length) { - T* src = arrayOopDesc::obj_offset_to_raw(src_obj, src_offset_in_bytes, src_raw); - T* dst = arrayOopDesc::obj_offset_to_raw(dst_obj, dst_offset_in_bytes, dst_raw); +inline zaddress ZBarrierSet::AccessBarrier::oop_copy_one_barriers(zpointer* dst, zpointer* src) { + store_barrier_heap_without_healing(dst); + + return ZBarrier::load_barrier_on_oop_field(src); +} + +template +inline void ZBarrierSet::AccessBarrier::oop_copy_one(zpointer* dst, zpointer* src) { + const zaddress obj = oop_copy_one_barriers(dst, src); - if (!HasDecorator::value) { - // No check cast, bulk barrier and bulk copy - ZBarrier::load_barrier_on_oop_array(src, length); - return Raw::oop_arraycopy_in_heap(NULL, 0, src, NULL, 0, dst, length); + Atomic::store(dst, ZAddress::store_good(obj)); +} + +template +inline bool ZBarrierSet::AccessBarrier::oop_copy_one_check_cast(zpointer* dst, zpointer* src, Klass* dst_klass) { + const zaddress obj = oop_copy_one_barriers(dst, src); + + if (!oopDesc::is_instanceof_or_null(to_oop(obj), dst_klass)) { + // Check cast failed + return false; } + Atomic::store(dst, ZAddress::store_good(obj)); + + return true; +} + + +template +inline bool ZBarrierSet::AccessBarrier::oop_arraycopy_in_heap_check_cast(zpointer* dst, zpointer* src, size_t length, Klass* dst_klass) { // Check cast and copy each elements - Klass* const dst_klass = objArrayOop(dst_obj)->element_klass(); - for (const T* const end = src + length; src < end; src++, dst++) { - const oop elem = ZBarrier::load_barrier_on_oop_field(src); - if (!oopDesc::is_instanceof_or_null(elem, dst_klass)) { + for (const zpointer* const end = src + length; src < end; src++, dst++) { + if (!oop_copy_one_check_cast(dst, src, dst_klass)) { // Check cast failed return false; } + } + + return true; +} + +template +inline bool ZBarrierSet::AccessBarrier::oop_arraycopy_in_heap_no_check_cast(zpointer* dst, zpointer* src, size_t length) { + const bool is_disjoint = HasDecorator::value; - // Cast is safe, since we know it's never a narrowOop - *(oop*)dst = elem; + if (is_disjoint || src > dst) { + for (const zpointer* const end = src + length; src < end; src++, dst++) { + oop_copy_one(dst, src); + } + return true; } + if (src < dst) { + const zpointer* const end = src; + src += length - 1; + dst += length - 1; + for ( ; src >= end; src--, dst--) { + oop_copy_one(dst, src); + } + return true; + } + + // src and dst are the same; nothing to do return true; } +template +inline bool ZBarrierSet::AccessBarrier::oop_arraycopy_in_heap(arrayOop src_obj, size_t src_offset_in_bytes, zpointer* src_raw, + arrayOop dst_obj, size_t dst_offset_in_bytes, zpointer* dst_raw, + size_t length) { + zpointer* const src = arrayOopDesc::obj_offset_to_raw(src_obj, src_offset_in_bytes, src_raw); + zpointer* const dst = arrayOopDesc::obj_offset_to_raw(dst_obj, dst_offset_in_bytes, dst_raw); + + if (HasDecorator::value) { + Klass* const dst_klass = objArrayOop(dst_obj)->element_klass(); + return oop_arraycopy_in_heap_check_cast(dst, src, length, dst_klass); + } + + return oop_arraycopy_in_heap_no_check_cast(dst, src, length); +} + +class ZStoreBarrierOopClosure : public BasicOopIterateClosure { +public: + virtual void do_oop(oop* p_) { + volatile zpointer* const p = (volatile zpointer*)p_; + const zpointer ptr = ZBarrier::load_atomic(p); + const zaddress addr = ZPointer::uncolor(ptr); + ZBarrier::store_barrier_on_heap_oop_field(p, false /* heal */); + *p = ZAddress::store_good(addr); + } + + virtual void do_oop(narrowOop* p) { + ShouldNotReachHere(); + } +}; + +class ZLoadBarrierOopClosure : public BasicOopIterateClosure { +public: + virtual void do_oop(oop* p) { + ZBarrier::load_barrier_on_oop_field((zpointer*)p); + } + + virtual void do_oop(narrowOop* p) { + ShouldNotReachHere(); + } +}; + template inline void ZBarrierSet::AccessBarrier::clone_in_heap(oop src, oop dst, size_t size) { - ZBarrier::load_barrier_on_oop_fields(src); + assert_is_valid(to_zaddress(src)); + + // Fix the oops + ZLoadBarrierOopClosure cl; + ZIterator::oop_iterate(src, &cl); + + // Clone the object Raw::clone_in_heap(src, dst, size); + + assert(ZHeap::heap()->is_young(to_zaddress(dst)), "ZColorStoreGoodOopClosure is only valid for young objects"); + + // Color store good before handing out + ZStoreBarrierOopClosure cl_sg; + ZIterator::oop_iterate(dst, &cl_sg); } // // Not in heap // template -template -inline oop ZBarrierSet::AccessBarrier::oop_load_not_in_heap(T* addr) { +inline oop ZBarrierSet::AccessBarrier::oop_load_not_in_heap(zpointer* p) { + verify_decorators_absent(); + + const zpointer o = Raw::template load(p); + assert_is_valid(o); + return to_oop(load_barrier(p, o)); +} + +template +inline oop ZBarrierSet::AccessBarrier::oop_load_not_in_heap(oop* p) { verify_decorators_absent(); - const oop o = Raw::oop_load_not_in_heap(addr); - return load_barrier_on_oop_field_preloaded(addr, o); + if (HasDecorator::value) { + return ZNMethod::load_oop(p, decorators); + } else { + return oop_load_not_in_heap((zpointer*)p); + } } template -template -inline oop ZBarrierSet::AccessBarrier::oop_atomic_cmpxchg_not_in_heap(T* addr, oop compare_value, oop new_value) { +inline oop ZBarrierSet::AccessBarrier::oop_atomic_cmpxchg_not_in_heap(zpointer* p, oop compare_value, oop new_value) { verify_decorators_present(); verify_decorators_absent(); - return Raw::oop_atomic_cmpxchg_not_in_heap(addr, compare_value, new_value); + store_barrier_native_with_healing(p); + + const zpointer o = Raw::atomic_cmpxchg(p, store_good(compare_value), store_good(new_value)); + assert_is_valid(o); + + return to_oop(ZPointer::uncolor_store_good(o)); } template -template -inline oop ZBarrierSet::AccessBarrier::oop_atomic_xchg_not_in_heap(T* addr, oop new_value) { +inline oop ZBarrierSet::AccessBarrier::oop_atomic_xchg_not_in_heap(zpointer* p, oop new_value) { verify_decorators_present(); verify_decorators_absent(); - return Raw::oop_atomic_xchg_not_in_heap(addr, new_value); + store_barrier_native_with_healing(p); + + const zpointer o = Raw::atomic_xchg(p, store_good(new_value)); + assert_is_valid(o); + + return to_oop(ZPointer::uncolor_store_good(o)); } #endif // SHARE_GC_Z_ZBARRIERSET_INLINE_HPP diff --git a/src/hotspot/share/gc/z/zBarrierSetAssembler.cpp b/src/hotspot/share/gc/z/zBarrierSetAssembler.cpp index 8127ff61079ab..e68142f0778ba 100644 --- a/src/hotspot/share/gc/z/zBarrierSetAssembler.cpp +++ b/src/hotspot/share/gc/z/zBarrierSetAssembler.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,10 +26,18 @@ #include "gc/z/zThreadLocalData.hpp" #include "runtime/javaThread.hpp" -Address ZBarrierSetAssemblerBase::address_bad_mask_from_thread(Register thread) { - return Address(thread, ZThreadLocalData::address_bad_mask_offset()); +Address ZBarrierSetAssemblerBase::load_bad_mask_from_thread(Register thread) { + return Address(thread, ZThreadLocalData::load_bad_mask_offset()); } -Address ZBarrierSetAssemblerBase::address_bad_mask_from_jni_env(Register env) { - return Address(env, ZThreadLocalData::address_bad_mask_offset() - JavaThread::jni_environment_offset()); +Address ZBarrierSetAssemblerBase::mark_bad_mask_from_thread(Register thread) { + return Address(thread, ZThreadLocalData::mark_bad_mask_offset()); +} + +Address ZBarrierSetAssemblerBase::load_bad_mask_from_jni_env(Register env) { + return Address(env, ZThreadLocalData::load_bad_mask_offset() - JavaThread::jni_environment_offset()); +} + +Address ZBarrierSetAssemblerBase::mark_bad_mask_from_jni_env(Register env) { + return Address(env, ZThreadLocalData::mark_bad_mask_offset() - JavaThread::jni_environment_offset()); } diff --git a/src/hotspot/share/gc/z/zBarrierSetAssembler.hpp b/src/hotspot/share/gc/z/zBarrierSetAssembler.hpp index e63c7a557d4c3..bcc757d6132f2 100644 --- a/src/hotspot/share/gc/z/zBarrierSetAssembler.hpp +++ b/src/hotspot/share/gc/z/zBarrierSetAssembler.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,8 +29,11 @@ class ZBarrierSetAssemblerBase : public BarrierSetAssembler { public: - static Address address_bad_mask_from_thread(Register thread); - static Address address_bad_mask_from_jni_env(Register env); + static Address load_bad_mask_from_thread(Register thread); + static Address mark_bad_mask_from_thread(Register thread); + + static Address load_bad_mask_from_jni_env(Register env); + static Address mark_bad_mask_from_jni_env(Register env); }; // Needs to be included after definition of ZBarrierSetAssemblerBase diff --git a/src/hotspot/share/gc/z/zBarrierSetNMethod.cpp b/src/hotspot/share/gc/z/zBarrierSetNMethod.cpp index 57c0a421f958c..2c58b0155648a 100644 --- a/src/hotspot/share/gc/z/zBarrierSetNMethod.cpp +++ b/src/hotspot/share/gc/z/zBarrierSetNMethod.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -23,11 +23,16 @@ #include "precompiled.hpp" #include "code/nmethod.hpp" +#include "gc/shared/barrierSet.hpp" +#include "gc/z/zAddress.hpp" +#include "gc/z/zBarrier.inline.hpp" +#include "gc/z/zBarrierSetAssembler.hpp" #include "gc/z/zBarrierSetNMethod.hpp" -#include "gc/z/zGlobals.hpp" #include "gc/z/zLock.inline.hpp" #include "gc/z/zNMethod.hpp" +#include "gc/z/zResurrection.inline.hpp" #include "gc/z/zThreadLocalData.hpp" +#include "gc/z/zUncoloredRoot.inline.hpp" #include "logging/log.hpp" #include "runtime/threadWXSetters.inline.hpp" @@ -35,7 +40,10 @@ bool ZBarrierSetNMethod::nmethod_entry_barrier(nmethod* nm) { ZLocker locker(ZNMethod::lock_for_nmethod(nm)); log_trace(nmethod, barrier)("Entered critical zone for %p", nm); + log_develop_trace(gc, nmethod)("nmethod: " PTR_FORMAT " visited by entry (try)", p2i(nm)); + if (!is_armed(nm)) { + log_develop_trace(gc, nmethod)("nmethod: " PTR_FORMAT " visited by entry (disarmed)", p2i(nm)); // Some other thread got here first and healed the oops // and disarmed the nmethod. return true; @@ -44,6 +52,7 @@ bool ZBarrierSetNMethod::nmethod_entry_barrier(nmethod* nm) { MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXWrite, Thread::current())); if (nm->is_unloading()) { + log_develop_trace(gc, nmethod)("nmethod: " PTR_FORMAT " visited by entry (unloading)", p2i(nm)); // We don't need to take the lock when unlinking nmethods from // the Method, because it is only concurrently unlinked by // the entry barrier, which acquires the per nmethod lock. @@ -51,13 +60,20 @@ bool ZBarrierSetNMethod::nmethod_entry_barrier(nmethod* nm) { // We can end up calling nmethods that are unloading // since we clear compiled ICs lazily. Returning false - // will re-resovle the call and update the compiled IC. + // will re-resolve the call and update the compiled IC. return false; } + // Heal barriers + ZNMethod::nmethod_patch_barriers(nm); + // Heal oops - ZNMethod::nmethod_oops_barrier(nm); + ZUncoloredRootProcessWeakOopClosure cl(ZNMethod::color(nm)); + ZNMethod::nmethod_oops_do_inner(nm, &cl); + const uintptr_t prev_color = ZNMethod::color(nm); + const uintptr_t new_color = *(int*)ZPointerStoreGoodMaskLowOrderBitsAddr; + log_develop_trace(gc, nmethod)("nmethod: " PTR_FORMAT " visited by entry (complete) [" PTR_FORMAT " -> " PTR_FORMAT "]", p2i(nm), prev_color, new_color); // CodeCache unloading support nm->mark_as_maybe_on_stack(); @@ -69,7 +85,7 @@ bool ZBarrierSetNMethod::nmethod_entry_barrier(nmethod* nm) { } int* ZBarrierSetNMethod::disarmed_guard_value_address() const { - return (int*)ZAddressBadMaskHighOrderBitsAddr; + return (int*)ZPointerStoreGoodMaskLowOrderBitsAddr; } ByteSize ZBarrierSetNMethod::thread_disarmed_guard_value_offset() const { diff --git a/src/hotspot/share/gc/z/zBarrierSetRuntime.cpp b/src/hotspot/share/gc/z/zBarrierSetRuntime.cpp index 895068c8cfdc8..da7adf7cc3a80 100644 --- a/src/hotspot/share/gc/z/zBarrierSetRuntime.cpp +++ b/src/hotspot/share/gc/z/zBarrierSetRuntime.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,31 +28,39 @@ #include "runtime/interfaceSupport.inline.hpp" JRT_LEAF(oopDesc*, ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded(oopDesc* o, oop* p)) - return ZBarrier::load_barrier_on_oop_field_preloaded(p, o); + return to_oop(ZBarrier::load_barrier_on_oop_field_preloaded((zpointer*)p, to_zpointer(o))); JRT_END -JRT_LEAF(oopDesc*, ZBarrierSetRuntime::weak_load_barrier_on_oop_field_preloaded(oopDesc* o, oop* p)) - return ZBarrier::weak_load_barrier_on_oop_field_preloaded(p, o); +JRT_LEAF(zpointer, ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_store_good(oopDesc* o, oop* p)) + return ZAddress::color(ZBarrier::load_barrier_on_oop_field_preloaded((zpointer*)p, to_zpointer(o)), ZPointerStoreGoodMask); JRT_END -JRT_LEAF(oopDesc*, ZBarrierSetRuntime::weak_load_barrier_on_weak_oop_field_preloaded(oopDesc* o, oop* p)) - return ZBarrier::weak_load_barrier_on_weak_oop_field_preloaded(p, o); +JRT_LEAF(oopDesc*, ZBarrierSetRuntime::load_barrier_on_weak_oop_field_preloaded(oopDesc* o, oop* p)) + return to_oop(ZBarrier::load_barrier_on_weak_oop_field_preloaded((zpointer*)p, to_zpointer(o))); JRT_END -JRT_LEAF(oopDesc*, ZBarrierSetRuntime::weak_load_barrier_on_phantom_oop_field_preloaded(oopDesc* o, oop* p)) - return ZBarrier::weak_load_barrier_on_phantom_oop_field_preloaded(p, o); +JRT_LEAF(oopDesc*, ZBarrierSetRuntime::load_barrier_on_phantom_oop_field_preloaded(oopDesc* o, oop* p)) + return to_oop(ZBarrier::load_barrier_on_phantom_oop_field_preloaded((zpointer*)p, to_zpointer(o))); JRT_END -JRT_LEAF(oopDesc*, ZBarrierSetRuntime::load_barrier_on_weak_oop_field_preloaded(oopDesc* o, oop* p)) - return ZBarrier::load_barrier_on_weak_oop_field_preloaded(p, o); +JRT_LEAF(oopDesc*, ZBarrierSetRuntime::no_keepalive_load_barrier_on_weak_oop_field_preloaded(oopDesc* o, oop* p)) + return to_oop(ZBarrier::no_keep_alive_load_barrier_on_weak_oop_field_preloaded((zpointer*)p, to_zpointer(o))); JRT_END -JRT_LEAF(oopDesc*, ZBarrierSetRuntime::load_barrier_on_phantom_oop_field_preloaded(oopDesc* o, oop* p)) - return ZBarrier::load_barrier_on_phantom_oop_field_preloaded(p, o); +JRT_LEAF(oopDesc*, ZBarrierSetRuntime::no_keepalive_load_barrier_on_phantom_oop_field_preloaded(oopDesc* o, oop* p)) + return to_oop(ZBarrier::no_keep_alive_load_barrier_on_phantom_oop_field_preloaded((zpointer*)p, to_zpointer(o))); +JRT_END + +JRT_LEAF(void, ZBarrierSetRuntime::store_barrier_on_oop_field_with_healing(oop* p)) + ZBarrier::store_barrier_on_heap_oop_field((zpointer*)p, true /* heal */); +JRT_END + +JRT_LEAF(void, ZBarrierSetRuntime::store_barrier_on_oop_field_without_healing(oop* p)) + ZBarrier::store_barrier_on_heap_oop_field((zpointer*)p, false /* heal */); JRT_END -JRT_LEAF(void, ZBarrierSetRuntime::load_barrier_on_oop_array(oop* p, size_t length)) - ZBarrier::load_barrier_on_oop_array(p, length); +JRT_LEAF(void, ZBarrierSetRuntime::store_barrier_on_native_oop_field_without_healing(oop* p)) + ZBarrier::store_barrier_on_native_oop_field((zpointer*)p, false /* heal */); JRT_END JRT_LEAF(void, ZBarrierSetRuntime::clone(oopDesc* src, oopDesc* dst, size_t size)) @@ -60,22 +68,23 @@ JRT_LEAF(void, ZBarrierSetRuntime::clone(oopDesc* src, oopDesc* dst, size_t size JRT_END address ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(DecoratorSet decorators) { - if (decorators & ON_PHANTOM_OOP_REF) { - if (decorators & AS_NO_KEEPALIVE) { - return weak_load_barrier_on_phantom_oop_field_preloaded_addr(); + if (decorators & AS_NO_KEEPALIVE) { + if (decorators & ON_PHANTOM_OOP_REF) { + return no_keepalive_load_barrier_on_phantom_oop_field_preloaded_addr(); + } else if (decorators & ON_WEAK_OOP_REF) { + return no_keepalive_load_barrier_on_weak_oop_field_preloaded_addr(); } else { - return load_barrier_on_phantom_oop_field_preloaded_addr(); - } - } else if (decorators & ON_WEAK_OOP_REF) { - if (decorators & AS_NO_KEEPALIVE) { - return weak_load_barrier_on_weak_oop_field_preloaded_addr(); - } else { - return load_barrier_on_weak_oop_field_preloaded_addr(); + assert((decorators & ON_STRONG_OOP_REF), "Expected type"); + // Normal loads on strong oop never keep objects alive + return load_barrier_on_oop_field_preloaded_addr(); } } else { - if (decorators & AS_NO_KEEPALIVE) { - return weak_load_barrier_on_oop_field_preloaded_addr(); + if (decorators & ON_PHANTOM_OOP_REF) { + return load_barrier_on_phantom_oop_field_preloaded_addr(); + } else if (decorators & ON_WEAK_OOP_REF) { + return load_barrier_on_weak_oop_field_preloaded_addr(); } else { + assert((decorators & ON_STRONG_OOP_REF), "Expected type"); return load_barrier_on_oop_field_preloaded_addr(); } } @@ -85,6 +94,10 @@ address ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr() { return reinterpret_cast
(load_barrier_on_oop_field_preloaded); } +address ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_store_good_addr() { + return reinterpret_cast
(load_barrier_on_oop_field_preloaded_store_good); +} + address ZBarrierSetRuntime::load_barrier_on_weak_oop_field_preloaded_addr() { return reinterpret_cast
(load_barrier_on_weak_oop_field_preloaded); } @@ -93,20 +106,24 @@ address ZBarrierSetRuntime::load_barrier_on_phantom_oop_field_preloaded_addr() { return reinterpret_cast
(load_barrier_on_phantom_oop_field_preloaded); } -address ZBarrierSetRuntime::weak_load_barrier_on_oop_field_preloaded_addr() { - return reinterpret_cast
(weak_load_barrier_on_oop_field_preloaded); +address ZBarrierSetRuntime::no_keepalive_load_barrier_on_weak_oop_field_preloaded_addr() { + return reinterpret_cast
(no_keepalive_load_barrier_on_weak_oop_field_preloaded); +} + +address ZBarrierSetRuntime::no_keepalive_load_barrier_on_phantom_oop_field_preloaded_addr() { + return reinterpret_cast
(no_keepalive_load_barrier_on_phantom_oop_field_preloaded); } -address ZBarrierSetRuntime::weak_load_barrier_on_weak_oop_field_preloaded_addr() { - return reinterpret_cast
(weak_load_barrier_on_weak_oop_field_preloaded); +address ZBarrierSetRuntime::store_barrier_on_oop_field_with_healing_addr() { + return reinterpret_cast
(store_barrier_on_oop_field_with_healing); } -address ZBarrierSetRuntime::weak_load_barrier_on_phantom_oop_field_preloaded_addr() { - return reinterpret_cast
(weak_load_barrier_on_phantom_oop_field_preloaded); +address ZBarrierSetRuntime::store_barrier_on_oop_field_without_healing_addr() { + return reinterpret_cast
(store_barrier_on_oop_field_without_healing); } -address ZBarrierSetRuntime::load_barrier_on_oop_array_addr() { - return reinterpret_cast
(load_barrier_on_oop_array); +address ZBarrierSetRuntime::store_barrier_on_native_oop_field_without_healing_addr() { + return reinterpret_cast
(store_barrier_on_native_oop_field_without_healing); } address ZBarrierSetRuntime::clone_addr() { diff --git a/src/hotspot/share/gc/z/zBarrierSetRuntime.hpp b/src/hotspot/share/gc/z/zBarrierSetRuntime.hpp index b3a143de3e971..a569ff3c15818 100644 --- a/src/hotspot/share/gc/z/zBarrierSetRuntime.hpp +++ b/src/hotspot/share/gc/z/zBarrierSetRuntime.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,32 +24,36 @@ #ifndef SHARE_GC_Z_ZBARRIERSETRUNTIME_HPP #define SHARE_GC_Z_ZBARRIERSETRUNTIME_HPP +#include "gc/z/zAddress.hpp" #include "memory/allStatic.hpp" #include "oops/accessDecorators.hpp" +#include "oops/oopsHierarchy.hpp" #include "utilities/globalDefinitions.hpp" -class oopDesc; - class ZBarrierSetRuntime : public AllStatic { private: static oopDesc* load_barrier_on_oop_field_preloaded(oopDesc* o, oop* p); + static zpointer load_barrier_on_oop_field_preloaded_store_good(oopDesc* o, oop* p); static oopDesc* load_barrier_on_weak_oop_field_preloaded(oopDesc* o, oop* p); static oopDesc* load_barrier_on_phantom_oop_field_preloaded(oopDesc* o, oop* p); - static oopDesc* weak_load_barrier_on_oop_field_preloaded(oopDesc* o, oop* p); - static oopDesc* weak_load_barrier_on_weak_oop_field_preloaded(oopDesc* o, oop* p); - static oopDesc* weak_load_barrier_on_phantom_oop_field_preloaded(oopDesc* o, oop* p); - static void load_barrier_on_oop_array(oop* p, size_t length); + static oopDesc* no_keepalive_load_barrier_on_weak_oop_field_preloaded(oopDesc* o, oop* p); + static oopDesc* no_keepalive_load_barrier_on_phantom_oop_field_preloaded(oopDesc* o, oop* p); + static void store_barrier_on_oop_field_with_healing(oop* p); + static void store_barrier_on_oop_field_without_healing(oop* p); + static void store_barrier_on_native_oop_field_without_healing(oop* p); static void clone(oopDesc* src, oopDesc* dst, size_t size); public: static address load_barrier_on_oop_field_preloaded_addr(DecoratorSet decorators); static address load_barrier_on_oop_field_preloaded_addr(); + static address load_barrier_on_oop_field_preloaded_store_good_addr(); static address load_barrier_on_weak_oop_field_preloaded_addr(); static address load_barrier_on_phantom_oop_field_preloaded_addr(); - static address weak_load_barrier_on_oop_field_preloaded_addr(); - static address weak_load_barrier_on_weak_oop_field_preloaded_addr(); - static address weak_load_barrier_on_phantom_oop_field_preloaded_addr(); - static address load_barrier_on_oop_array_addr(); + static address no_keepalive_load_barrier_on_weak_oop_field_preloaded_addr(); + static address no_keepalive_load_barrier_on_phantom_oop_field_preloaded_addr(); + static address store_barrier_on_oop_field_with_healing_addr(); + static address store_barrier_on_oop_field_without_healing_addr(); + static address store_barrier_on_native_oop_field_without_healing_addr(); static address clone_addr(); }; diff --git a/src/hotspot/share/gc/z/zBarrierSetStackChunk.cpp b/src/hotspot/share/gc/z/zBarrierSetStackChunk.cpp index 0b1452746f247..e33b86ef22b85 100644 --- a/src/hotspot/share/gc/z/zBarrierSetStackChunk.cpp +++ b/src/hotspot/share/gc/z/zBarrierSetStackChunk.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2022, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -23,25 +23,25 @@ */ #include "precompiled.hpp" -#include "gc/z/zBarrier.inline.hpp" #include "gc/z/zBarrierSetStackChunk.hpp" -#include "runtime/atomic.hpp" +#include "gc/z/zContinuation.hpp" #include "utilities/debug.hpp" void ZBarrierSetStackChunk::encode_gc_mode(stackChunkOop chunk, OopIterator* iterator) { - // Do nothing + ZContinuation::ZColorStackOopClosure cl(chunk); + iterator->oops_do(&cl); } void ZBarrierSetStackChunk::decode_gc_mode(stackChunkOop chunk, OopIterator* iterator) { - // Do nothing + ZContinuation::ZUncolorStackOopClosure cl; + iterator->oops_do(&cl); } oop ZBarrierSetStackChunk::load_oop(stackChunkOop chunk, oop* addr) { - oop obj = Atomic::load(addr); - return ZBarrier::load_barrier_on_oop_field_preloaded((volatile oop*)NULL, obj); + return ZContinuation::load_oop(chunk, addr); } oop ZBarrierSetStackChunk::load_oop(stackChunkOop chunk, narrowOop* addr) { ShouldNotReachHere(); - return NULL; + return nullptr; } diff --git a/src/hotspot/share/gc/z/zBitMap.hpp b/src/hotspot/share/gc/z/zBitMap.hpp index dc00fc199812e..3568723383c7f 100644 --- a/src/hotspot/share/gc/z/zBitMap.hpp +++ b/src/hotspot/share/gc/z/zBitMap.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,6 +26,12 @@ #include "utilities/bitMap.hpp" +class ZMovableBitMap : public CHeapBitMap { +public: + ZMovableBitMap(); + ZMovableBitMap(ZMovableBitMap&& bitmap); +}; + class ZBitMap : public CHeapBitMap { private: static bm_word_t bit_mask_pair(idx_t bit); @@ -35,8 +41,26 @@ class ZBitMap : public CHeapBitMap { public: ZBitMap(idx_t size_in_bits); + ZBitMap(const ZBitMap& other); bool par_set_bit_pair(idx_t bit, bool finalizable, bool& inc_live); + + class ReverseIterator; +}; + +class ZBitMap::ReverseIterator { + BitMap* const _bitmap; + BitMap::idx_t _beg; + BitMap::idx_t _end; + +public: + ReverseIterator(BitMap* bitmap); + ReverseIterator(BitMap* bitmap, BitMap::idx_t beg, BitMap::idx_t end); + + void reset(BitMap::idx_t beg, BitMap::idx_t end); + void reset(BitMap::idx_t end); + + bool next(BitMap::idx_t* index); }; #endif // SHARE_GC_Z_ZBITMAP_HPP diff --git a/src/hotspot/share/gc/z/zBitMap.inline.hpp b/src/hotspot/share/gc/z/zBitMap.inline.hpp index f757cec270625..79d786040878c 100644 --- a/src/hotspot/share/gc/z/zBitMap.inline.hpp +++ b/src/hotspot/share/gc/z/zBitMap.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,9 +30,23 @@ #include "utilities/bitMap.inline.hpp" #include "utilities/debug.hpp" +inline ZMovableBitMap::ZMovableBitMap() : + CHeapBitMap(mtGC) {} + +inline ZMovableBitMap::ZMovableBitMap(ZMovableBitMap&& bitmap) : + CHeapBitMap(mtGC) { + update(bitmap.map(), bitmap.size()); + bitmap.update(nullptr, 0); +} + inline ZBitMap::ZBitMap(idx_t size_in_bits) : CHeapBitMap(size_in_bits, mtGC, false /* clear */) {} +inline ZBitMap::ZBitMap(const ZBitMap& other) : + CHeapBitMap(other.size(), mtGC, false /* clear */) { + memcpy(map(), other.map(), size_in_bytes()); +} + inline BitMap::bm_word_t ZBitMap::bit_mask_pair(idx_t bit) { assert(bit_in_word(bit) < BitsPerWord - 1, "Invalid bit index"); return (bm_word_t)3 << bit_in_word(bit); @@ -77,4 +91,34 @@ inline bool ZBitMap::par_set_bit_pair(idx_t bit, bool finalizable, bool& inc_liv } } +inline ZBitMap::ReverseIterator::ReverseIterator(BitMap* bitmap) : + ZBitMap::ReverseIterator(bitmap, 0, bitmap->size()) {} + +inline ZBitMap::ReverseIterator::ReverseIterator(BitMap* bitmap, BitMap::idx_t beg, BitMap::idx_t end) : + _bitmap(bitmap), + _beg(beg), + _end(end) {} + +inline void ZBitMap::ReverseIterator::reset(BitMap::idx_t beg, BitMap::idx_t end) { + assert(beg < _bitmap->size(), "beg index out of bounds"); + assert(end >= beg && end <= _bitmap->size(), "end index out of bounds"); + _beg = beg; + _end = end; +} + +inline void ZBitMap::ReverseIterator::reset(BitMap::idx_t end) { + assert(end >= _beg && end <= _bitmap->size(), "end index out of bounds"); + _end = end; +} + +inline bool ZBitMap::ReverseIterator::next(BitMap::idx_t *index) { + BitMap::ReverseIterator iter(*_bitmap, _beg, _end); + if (iter.is_empty()) { + return false; + } + + *index = _end = iter.index(); + return true; +} + #endif // SHARE_GC_Z_ZBITMAP_INLINE_HPP diff --git a/src/hotspot/share/gc/z/zCPU.cpp b/src/hotspot/share/gc/z/zCPU.cpp index ba9da1d6e3398..f6aaa96476f75 100644 --- a/src/hotspot/share/gc/z/zCPU.cpp +++ b/src/hotspot/share/gc/z/zCPU.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -32,12 +32,12 @@ #define ZCPU_UNKNOWN_AFFINITY ((Thread*)-1) #define ZCPU_UNKNOWN_SELF ((Thread*)-2) -PaddedEnd* ZCPU::_affinity = NULL; +PaddedEnd* ZCPU::_affinity = nullptr; THREAD_LOCAL Thread* ZCPU::_self = ZCPU_UNKNOWN_SELF; THREAD_LOCAL uint32_t ZCPU::_cpu = 0; void ZCPU::initialize() { - assert(_affinity == NULL, "Already initialized"); + assert(_affinity == nullptr, "Already initialized"); const uint32_t ncpus = count(); _affinity = PaddedArray::create_unfreeable(ncpus); diff --git a/src/hotspot/share/gc/z/zCPU.inline.hpp b/src/hotspot/share/gc/z/zCPU.inline.hpp index 45c7e36568e3d..67d26f4c2e17c 100644 --- a/src/hotspot/share/gc/z/zCPU.inline.hpp +++ b/src/hotspot/share/gc/z/zCPU.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -34,7 +34,7 @@ inline uint32_t ZCPU::count() { } inline uint32_t ZCPU::id() { - assert(_affinity != NULL, "Not initialized"); + assert(_affinity != nullptr, "Not initialized"); // Fast path if (_affinity[_cpu]._thread == _self) { diff --git a/src/hotspot/share/gc/z/zCollectedHeap.cpp b/src/hotspot/share/gc/z/zCollectedHeap.cpp index c4547cb2fa4f8..749179b26cf54 100644 --- a/src/hotspot/share/gc/z/zCollectedHeap.cpp +++ b/src/hotspot/share/gc/z/zCollectedHeap.cpp @@ -21,20 +21,27 @@ * questions. */ +#include "gc/z/zAddress.hpp" #include "precompiled.hpp" #include "classfile/classLoaderData.hpp" #include "gc/shared/gcHeapSummary.hpp" -#include "gc/shared/gcLocker.inline.hpp" +#include "gc/shared/gcLogPrecious.hpp" #include "gc/shared/suspendibleThreadSet.hpp" +#include "gc/z/zAbort.hpp" +#include "gc/z/zAddress.inline.hpp" +#include "gc/z/zAllocator.inline.hpp" #include "gc/z/zCollectedHeap.hpp" +#include "gc/z/zContinuation.inline.hpp" #include "gc/z/zDirector.hpp" #include "gc/z/zDriver.hpp" +#include "gc/z/zGeneration.inline.hpp" #include "gc/z/zGlobals.hpp" #include "gc/z/zHeap.inline.hpp" +#include "gc/z/zJNICritical.hpp" #include "gc/z/zNMethod.hpp" #include "gc/z/zObjArrayAllocator.hpp" -#include "gc/z/zOop.inline.hpp" #include "gc/z/zServiceability.hpp" +#include "gc/z/zStackChunkGCData.inline.hpp" #include "gc/z/zStat.hpp" #include "gc/z/zUtils.inline.hpp" #include "memory/classLoaderMetaspace.hpp" @@ -43,6 +50,8 @@ #include "memory/universe.hpp" #include "oops/stackChunkOop.hpp" #include "runtime/continuationJavaClasses.hpp" +#include "runtime/jniHandles.inline.hpp" +#include "services/memoryUsage.hpp" #include "utilities/align.hpp" ZCollectedHeap* ZCollectedHeap::heap() { @@ -54,8 +63,9 @@ ZCollectedHeap::ZCollectedHeap() : _barrier_set(), _initialize(&_barrier_set), _heap(), - _driver(new ZDriver()), - _director(new ZDirector(_driver)), + _driver_minor(new ZDriverMinor()), + _driver_major(new ZDriverMajor()), + _director(new ZDirector()), _stat(new ZStat()), _runtime_workers() {} @@ -72,7 +82,7 @@ jint ZCollectedHeap::initialize() { return JNI_ENOMEM; } - Universe::calculate_verify_data((HeapWord*)0, (HeapWord*)UINTPTR_MAX); + Universe::set_verify_data(~(ZAddressHeapBase - 1) | 0x7, ZAddressHeapBase); return JNI_OK; } @@ -91,6 +101,8 @@ class ZStopConcurrentGCThreadClosure : public ThreadClosure { }; void ZCollectedHeap::stop() { + log_info_p(gc, exit)("Stopping ZGC"); + ZAbort::abort(); ZStopConcurrentGCThreadClosure cl; gc_threads_do(&cl); } @@ -126,45 +138,28 @@ bool ZCollectedHeap::is_in(const void* p) const { } bool ZCollectedHeap::requires_barriers(stackChunkOop obj) const { - uintptr_t* cont_addr = obj->field_addr(jdk_internal_vm_StackChunk::cont_offset()); - - if (!_heap.is_allocating(cast_from_oop(obj))) { - // An object that isn't allocating, is visible from GC tracing. Such - // stack chunks require barriers. - return true; - } - - if (!ZAddress::is_good_or_null(*cont_addr)) { - // If a chunk is allocated after a GC started, but before relocate start - // we can have an allocating chunk that isn't deeply good. That means that - // the contained oops might be bad and require GC barriers. - return true; - } - - // The chunk is allocating and its pointers are good. This chunk needs no - // GC barriers - return false; + return ZContinuation::requires_barriers(&_heap, obj); } HeapWord* ZCollectedHeap::allocate_new_tlab(size_t min_size, size_t requested_size, size_t* actual_size) { const size_t size_in_bytes = ZUtils::words_to_bytes(align_object_size(requested_size)); - const uintptr_t addr = _heap.alloc_tlab(size_in_bytes); + const zaddress addr = ZAllocator::eden()->alloc_tlab(size_in_bytes); - if (addr != 0) { + if (!is_null(addr)) { *actual_size = requested_size; } - return (HeapWord*)addr; + return (HeapWord*)untype(addr); } oop ZCollectedHeap::array_allocate(Klass* klass, size_t size, int length, bool do_zero, TRAPS) { - ZObjArrayAllocator allocator(klass, size, length, do_zero, THREAD); + const ZObjArrayAllocator allocator(klass, size, length, do_zero, THREAD); return allocator.allocate(); } HeapWord* ZCollectedHeap::mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded) { const size_t size_in_bytes = ZUtils::words_to_bytes(align_object_size(size)); - return (HeapWord*)_heap.alloc_object(size_in_bytes); + return (HeapWord*)ZAllocator::eden()->alloc_object(size_in_bytes); } MetaWord* ZCollectedHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data, @@ -175,7 +170,7 @@ MetaWord* ZCollectedHeap::satisfy_failed_metadata_allocation(ClassLoaderData* lo // Expand and retry allocation MetaWord* const result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype); - if (result != NULL) { + if (result != nullptr) { return result; } @@ -184,17 +179,47 @@ MetaWord* ZCollectedHeap::satisfy_failed_metadata_allocation(ClassLoaderData* lo } void ZCollectedHeap::collect(GCCause::Cause cause) { - _driver->collect(cause); + // Handle external collection requests + switch (cause) { + case GCCause::_wb_young_gc: + case GCCause::_scavenge_alot: + // Start urgent minor GC + _driver_minor->collect(ZDriverRequest(cause, ZYoungGCThreads, 0)); + break; + + case GCCause::_heap_dump: + case GCCause::_heap_inspection: + case GCCause::_wb_full_gc: + case GCCause::_wb_breakpoint: + case GCCause::_dcmd_gc_run: + case GCCause::_java_lang_system_gc: + case GCCause::_full_gc_alot: + case GCCause::_jvmti_force_gc: + case GCCause::_metadata_GC_clear_soft_refs: + case GCCause::_codecache_GC_aggressive: + // Start urgent major GC + _driver_major->collect(ZDriverRequest(cause, ZYoungGCThreads, ZOldGCThreads)); + break; + + case GCCause::_metadata_GC_threshold: + case GCCause::_codecache_GC_threshold: + // Start not urgent major GC + _driver_major->collect(ZDriverRequest(cause, 1, 1)); + break; + + default: + fatal("Unsupported GC cause (%s)", GCCause::to_string(cause)); + break; + } } void ZCollectedHeap::collect_as_vm_thread(GCCause::Cause cause) { // These collection requests are ignored since ZGC can't run a synchronous // GC cycle from within the VM thread. This is considered benign, since the // only GC causes coming in here should be heap dumper and heap inspector. - // However, neither the heap dumper nor the heap inspector really need a GC - // to happen, but the result of their heap iterations might in that case be - // less accurate since they might include objects that would otherwise have - // been collected by a GC. + // If the heap dumper or heap inspector explicitly requests a gc and the + // caller is not the VM thread a synchronous GC cycle is performed from the + // caller thread in the prologue. assert(Thread::current()->is_VM_thread(), "Should be the VM thread"); guarantee(cause == GCCause::_heap_dump || cause == GCCause::_heap_inspection, "Invalid cause"); @@ -226,19 +251,27 @@ bool ZCollectedHeap::uses_stack_watermark_barrier() const { } MemoryUsage ZCollectedHeap::memory_usage() { - return _heap.serviceability_memory_pool()->get_memory_usage(); + const size_t initial_size = ZHeap::heap()->initial_capacity(); + const size_t committed = ZHeap::heap()->capacity(); + const size_t used = MIN2(ZHeap::heap()->used(), committed); + const size_t max_size = ZHeap::heap()->max_capacity(); + + return MemoryUsage(initial_size, used, committed, max_size); } GrowableArray ZCollectedHeap::memory_managers() { - GrowableArray memory_managers(2); - memory_managers.append(_heap.serviceability_cycle_memory_manager()); - memory_managers.append(_heap.serviceability_pause_memory_manager()); + GrowableArray memory_managers(4); + memory_managers.append(_heap.serviceability_cycle_memory_manager(true /* minor */)); + memory_managers.append(_heap.serviceability_cycle_memory_manager(false /* minor */)); + memory_managers.append(_heap.serviceability_pause_memory_manager(true /* minor */)); + memory_managers.append(_heap.serviceability_pause_memory_manager(false /* minor */)); return memory_managers; } GrowableArray ZCollectedHeap::memory_pools() { - GrowableArray memory_pools(1); - memory_pools.append(_heap.serviceability_memory_pool()); + GrowableArray memory_pools(2); + memory_pools.append(_heap.serviceability_memory_pool(ZGenerationId::young)); + memory_pools.append(_heap.serviceability_memory_pool(ZGenerationId::old)); return memory_pools; } @@ -250,6 +283,14 @@ ParallelObjectIteratorImpl* ZCollectedHeap::parallel_object_iterator(uint nworke return _heap.parallel_object_iterator(nworkers, true /* visit_weaks */); } +void ZCollectedHeap::pin_object(JavaThread* thread, oop obj) { + ZJNICritical::enter(thread); +} + +void ZCollectedHeap::unpin_object(JavaThread* thread, oop obj) { + ZJNICritical::exit(thread); +} + void ZCollectedHeap::keep_alive(oop obj) { _heap.keep_alive(obj); } @@ -259,7 +300,14 @@ void ZCollectedHeap::register_nmethod(nmethod* nm) { } void ZCollectedHeap::unregister_nmethod(nmethod* nm) { - ZNMethod::unregister_nmethod(nm); + // ZGC follows the 'unlink | handshake | purge', where nmethods are unlinked + // from the system, threads are handshaked so that no reference to the + // unlinked nmethods exist, then the nmethods are deleted in the purge phase. + // + // CollectedHeap::unregister_nmethod is called during the flush phase, which + // is too late for ZGC. + + ZNMethod::purge_nmethod(nm); } void ZCollectedHeap::verify_nmethod(nmethod* nm) { @@ -272,30 +320,39 @@ WorkerThreads* ZCollectedHeap::safepoint_workers() { void ZCollectedHeap::gc_threads_do(ThreadClosure* tc) const { tc->do_thread(_director); - tc->do_thread(_driver); + tc->do_thread(_driver_major); + tc->do_thread(_driver_minor); tc->do_thread(_stat); _heap.threads_do(tc); _runtime_workers.threads_do(tc); } VirtualSpaceSummary ZCollectedHeap::create_heap_space_summary() { - return VirtualSpaceSummary((HeapWord*)0, (HeapWord*)capacity(), (HeapWord*)max_capacity()); + const uintptr_t start = ZAddressHeapBase; + + // Fake values. ZGC does not commit memory contiguously in the reserved + // address space, and the reserved space is larger than MaxHeapSize. + const uintptr_t committed_end = ZAddressHeapBase + capacity(); + const uintptr_t reserved_end = ZAddressHeapBase + max_capacity(); + + return VirtualSpaceSummary((HeapWord*)start, (HeapWord*)committed_end, (HeapWord*)reserved_end); +} + +bool ZCollectedHeap::contains_null(const oop* p) const { + const zpointer* const ptr = (const zpointer*)p; + return is_null_any(*ptr); } void ZCollectedHeap::safepoint_synchronize_begin() { + ZGeneration::young()->synchronize_relocation(); + ZGeneration::old()->synchronize_relocation(); SuspendibleThreadSet::synchronize(); } void ZCollectedHeap::safepoint_synchronize_end() { SuspendibleThreadSet::desynchronize(); -} - -void ZCollectedHeap::pin_object(JavaThread* thread, oop obj) { - GCLocker::lock_critical(thread); -} - -void ZCollectedHeap::unpin_object(JavaThread* thread, oop obj) { - GCLocker::unlock_critical(thread); + ZGeneration::old()->desynchronize_relocation(); + ZGeneration::young()->desynchronize_relocation(); } void ZCollectedHeap::prepare_for_verify() { @@ -308,21 +365,29 @@ void ZCollectedHeap::print_on(outputStream* st) const { void ZCollectedHeap::print_on_error(outputStream* st) const { st->print_cr("ZGC Globals:"); - st->print_cr(" GlobalPhase: %u (%s)", ZGlobalPhase, ZGlobalPhaseToString()); - st->print_cr(" GlobalSeqNum: %u", ZGlobalSeqNum); - st->print_cr(" Offset Max: " SIZE_FORMAT "%s (" PTR_FORMAT ")", + st->print_cr(" Young Collection: %s/%u", ZGeneration::young()->phase_to_string(), ZGeneration::young()->seqnum()); + st->print_cr(" Old Collection: %s/%u", ZGeneration::old()->phase_to_string(), ZGeneration::old()->seqnum()); + st->print_cr(" Offset Max: " SIZE_FORMAT "%s (" PTR_FORMAT ")", byte_size_in_exact_unit(ZAddressOffsetMax), exact_unit_for_byte_size(ZAddressOffsetMax), ZAddressOffsetMax); - st->print_cr(" Page Size Small: " SIZE_FORMAT "M", ZPageSizeSmall / M); - st->print_cr(" Page Size Medium: " SIZE_FORMAT "M", ZPageSizeMedium / M); + st->print_cr(" Page Size Small: " SIZE_FORMAT "M", ZPageSizeSmall / M); + st->print_cr(" Page Size Medium: " SIZE_FORMAT "M", ZPageSizeMedium / M); st->cr(); st->print_cr("ZGC Metadata Bits:"); - st->print_cr(" Good: " PTR_FORMAT, ZAddressGoodMask); - st->print_cr(" Bad: " PTR_FORMAT, ZAddressBadMask); - st->print_cr(" WeakBad: " PTR_FORMAT, ZAddressWeakBadMask); - st->print_cr(" Marked: " PTR_FORMAT, ZAddressMetadataMarked); - st->print_cr(" Remapped: " PTR_FORMAT, ZAddressMetadataRemapped); + st->print_cr(" LoadGood: " PTR_FORMAT, ZPointerLoadGoodMask); + st->print_cr(" LoadBad: " PTR_FORMAT, ZPointerLoadBadMask); + st->print_cr(" MarkGood: " PTR_FORMAT, ZPointerMarkGoodMask); + st->print_cr(" MarkBad: " PTR_FORMAT, ZPointerMarkBadMask); + st->print_cr(" StoreGood: " PTR_FORMAT, ZPointerStoreGoodMask); + st->print_cr(" StoreBad: " PTR_FORMAT, ZPointerStoreBadMask); + st->print_cr(" ------------------- "); + st->print_cr(" Remapped: " PTR_FORMAT, ZPointerRemapped); + st->print_cr(" RemappedYoung: " PTR_FORMAT, ZPointerRemappedYoungMask); + st->print_cr(" RemappedOld: " PTR_FORMAT, ZPointerRemappedOldMask); + st->print_cr(" MarkedYoung: " PTR_FORMAT, ZPointerMarkedYoung); + st->print_cr(" MarkedOld: " PTR_FORMAT, ZPointerMarkedOld); + st->print_cr(" Remembered: " PTR_FORMAT, ZPointerRemembered); st->cr(); CollectedHeap::print_on_error(st); } @@ -340,11 +405,11 @@ bool ZCollectedHeap::print_location(outputStream* st, void* addr) const { } void ZCollectedHeap::verify(VerifyOption option /* ignored */) { - _heap.verify(); + fatal("Externally triggered verification not supported"); } bool ZCollectedHeap::is_oop(oop object) const { - return _heap.is_oop(ZOop::to_address(object)); + return _heap.is_oop(cast_from_oop(object)); } bool ZCollectedHeap::supports_concurrent_gc_breakpoints() const { diff --git a/src/hotspot/share/gc/z/zCollectedHeap.hpp b/src/hotspot/share/gc/z/zCollectedHeap.hpp index d89d6917667fc..2b83f61400958 100644 --- a/src/hotspot/share/gc/z/zCollectedHeap.hpp +++ b/src/hotspot/share/gc/z/zCollectedHeap.hpp @@ -34,7 +34,8 @@ #include "services/memoryUsage.hpp" class ZDirector; -class ZDriver; +class ZDriverMajor; +class ZDriverMinor; class ZStat; class ZCollectedHeap : public CollectedHeap { @@ -45,7 +46,8 @@ class ZCollectedHeap : public CollectedHeap { ZBarrierSet _barrier_set; ZInitialize _initialize; ZHeap _heap; - ZDriver* _driver; + ZDriverMinor* _driver_minor; + ZDriverMajor* _driver_major; ZDirector* _director; ZStat* _stat; ZRuntimeWorkers _runtime_workers; @@ -110,6 +112,8 @@ class ZCollectedHeap : public CollectedHeap { VirtualSpaceSummary create_heap_space_summary() override; + bool contains_null(const oop* p) const override; + void safepoint_synchronize_begin() override; void safepoint_synchronize_end() override; diff --git a/src/hotspot/share/gc/z/zContinuation.cpp b/src/hotspot/share/gc/z/zContinuation.cpp new file mode 100644 index 0000000000000..9deb583f813d4 --- /dev/null +++ b/src/hotspot/share/gc/z/zContinuation.cpp @@ -0,0 +1,109 @@ +/* + * Copyright (c) 2022, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/z/zAddress.inline.hpp" +#include "gc/z/zBarrier.inline.hpp" +#include "gc/z/zContinuation.inline.hpp" +#include "gc/z/zStackChunkGCData.inline.hpp" +#include "runtime/atomic.hpp" + +static zpointer materialize_zpointer(stackChunkOop chunk, void* addr) { + volatile uintptr_t* const value_addr = (volatile uintptr_t*)addr; + + // A stack chunk has two modes: + // + // 1) It's recently allocated and the contents is a copy of the native stack. + // All oops have the format of oops in the stack. That is, they are + // zaddresses, and don't have any colored metadata bits. + // + // 2) It has lived long enough that the GC needs to visit the oops. + // Before the GC visits the oops, they are converted into zpointers, + // and become colored pointers. + // + // The load_oop function supports loading oops from chunks in either of the + // two modes. It even supports loading oops, while another thread is + // converting the chunk to "gc mode" [transition from (1) to (2)]. So, we + // load the oop once and perform all checks on that loaded copy. + + // Load once + const uintptr_t value = Atomic::load(value_addr); + + if ((value & ~ZPointerAllMetadataMask) == 0) { + // Must be null of some sort - either zaddress or zpointer + return zpointer::null; + } + + const uintptr_t impossible_zaddress_mask = ~((ZAddressHeapBase - 1) | ZAddressHeapBase); + if ((value & impossible_zaddress_mask) != 0) { + // Must be a zpointer - it has bits forbidden in zaddresses + return to_zpointer(value); + } + + // Must be zaddress + const zaddress_unsafe zaddr = to_zaddress_unsafe(value); + + // A zaddress means that the chunk was recently allocated, and the layout is + // that of a native stack. That means that oops are uncolored (zaddress). But + // the oops still have an implicit color, saved away in the chunk. + + // Use the implicit color, and create a zpointer that is equivalent with + // what we would have written if we where to eagerly create the zpointer + // when the stack frames where copied into the chunk. + const uintptr_t color = ZStackChunkGCData::color(chunk); + return ZAddress::color(zaddr, color); +} + +oop ZContinuation::load_oop(stackChunkOop chunk, void* addr) { + // addr could contain either a zpointer or a zaddress + const zpointer zptr = materialize_zpointer(chunk, addr); + + // Apply the load barrier, without healing the zaddress/zpointer + return to_oop(ZBarrier::load_barrier_on_oop_field_preloaded(nullptr /* p */, zptr)); +} + +ZContinuation::ZColorStackOopClosure::ZColorStackOopClosure(stackChunkOop chunk) : + _color(ZStackChunkGCData::color(chunk)) { +} + +void ZContinuation::ZColorStackOopClosure::do_oop(oop* p) { + // Convert zaddress to zpointer + // TODO: Comment why this is safe and non volatile + zaddress_unsafe* const p_zaddress_unsafe = (zaddress_unsafe*)p; + zpointer* const p_zpointer = (zpointer*)p; + *p_zpointer = ZAddress::color(*p_zaddress_unsafe, _color); +} + +void ZContinuation::ZColorStackOopClosure::do_oop(narrowOop* p) { + ShouldNotReachHere(); +} + +void ZContinuation::ZUncolorStackOopClosure::do_oop(oop* p) { + const zpointer ptr = *(volatile zpointer*)p; + const zaddress addr = ZPointer::uncolor(ptr); + *(volatile zaddress*)p = addr; +} + +void ZContinuation::ZUncolorStackOopClosure::do_oop(narrowOop* p) { + ShouldNotReachHere(); +} diff --git a/src/hotspot/share/gc/z/zContinuation.hpp b/src/hotspot/share/gc/z/zContinuation.hpp new file mode 100644 index 0000000000000..4ae1a29fb2b65 --- /dev/null +++ b/src/hotspot/share/gc/z/zContinuation.hpp @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2022, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_Z_ZCONTINUATION_HPP +#define SHARE_GC_Z_ZCONTINUATION_HPP + +#include "memory/allStatic.hpp" +#include "memory/iterator.hpp" +#include "oops/oopsHierarchy.hpp" + +class OopClosure; +class ZHeap; + +class ZContinuation : public AllStatic { +public: + static bool requires_barriers(const ZHeap* heap, stackChunkOop chunk); + + static oop load_oop(stackChunkOop chunk, void* addr); + + class ZColorStackOopClosure : public OopClosure { + private: + uintptr_t _color; + + public: + ZColorStackOopClosure(stackChunkOop chunk); + virtual void do_oop(oop* p) override; + virtual void do_oop(narrowOop* p) override; + }; + + class ZUncolorStackOopClosure : public OopClosure { + public: + virtual void do_oop(oop* p) override; + virtual void do_oop(narrowOop* p) override; + }; +}; + +#endif // SHARE_GC_Z_ZCONTINUATION_HPP diff --git a/src/hotspot/share/gc/z/zContinuation.inline.hpp b/src/hotspot/share/gc/z/zContinuation.inline.hpp new file mode 100644 index 0000000000000..1e7c09e4da57a --- /dev/null +++ b/src/hotspot/share/gc/z/zContinuation.inline.hpp @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2022, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_Z_ZCONTINUATION_INLINE_HPP +#define SHARE_GC_Z_ZCONTINUATION_INLINE_HPP + +#include "gc/z/zContinuation.hpp" + +#include "classfile/javaClasses.hpp" +#include "gc/z/zAddress.inline.hpp" +#include "gc/z/zHeap.inline.hpp" +#include "gc/z/zStackChunkGCData.inline.hpp" +#include "oops/oop.inline.hpp" + +inline bool ZContinuation::requires_barriers(const ZHeap* heap, stackChunkOop chunk) { + if (!heap->is_allocating(to_zaddress(chunk))) { + // An object that isn't allocating, is visible from GC tracing. Such + // stack chunks require barriers. + return true; + } + + if (ZStackChunkGCData::color(chunk) != ZPointerStoreGoodMask) { + // If a chunk is allocated after a GC started, but before relocate start + // we can have an allocating chunk that isn't deeply good. That means that + // the contained oops might be bad and require GC barriers. + return true; + } + + // The chunk is allocating and its pointers are good. This chunk needs no + // GC barriers + return false; +} + +#endif // SHARE_GC_Z_ZCONTINUATION_INLINE_HPP diff --git a/src/hotspot/share/gc/z/zDebug.gdb b/src/hotspot/share/gc/z/zDebug.gdb index 210063c3ac3f4..181da66b9c924 100644 --- a/src/hotspot/share/gc/z/zDebug.gdb +++ b/src/hotspot/share/gc/z/zDebug.gdb @@ -14,30 +14,30 @@ define zpo set $obj = (oopDesc*)($arg0) printf "Oop: 0x%016llx\tState: ", (uintptr_t)$obj - if ((uintptr_t)$obj & (uintptr_t)ZAddressGoodMask) + if ((uintptr_t)$obj & (uintptr_t)ZPointerStoreGoodMask) printf "Good " - if ((uintptr_t)$obj & (uintptr_t)ZAddressMetadataRemapped) + if ((uintptr_t)$obj & (uintptr_t)ZPointerRemapped) printf "(Remapped)" else - if ((uintptr_t)$obj & (uintptr_t)ZAddressMetadataMarked) - printf "(Marked)" + if ((uintptr_t)$obj & (uintptr_t)ZPointerMarkedOld) + printf "(MarkedOld)" else printf "(Unknown)" end end else printf "Bad " - if ((uintptr_t)ZAddressGoodMask & (uintptr_t)ZAddressMetadataMarked) + if ((uintptr_t)ZPointerStoreGoodMask & (uintptr_t)ZPointerMarkedOld) # Should be marked - if ((uintptr_t)$obj & (uintptr_t)ZAddressMetadataRemapped) + if ((uintptr_t)$obj & (uintptr_t)ZPointerRemapped) printf "(Not Marked, Remapped)" else printf "(Not Marked, Not Remapped)" end else - if ((uintptr_t)ZAddressGoodMask & (uintptr_t)ZAddressMetadataRemapped) + if ((uintptr_t)ZPointerStoreGoodMask & (uintptr_t)ZPointerRemapped) # Should be remapped - if ((uintptr_t)$obj & (uintptr_t)ZAddressMetadataMarked) + if ((uintptr_t)$obj & (uintptr_t)ZPointerMarkedOld) printf "(Marked, Not Remapped)" else printf "(Not Marked, Not Remapped)" @@ -129,20 +129,99 @@ define zmarked end end +# For some reason gdb doesn't like ZGeneration::ZPhase::Mark etc. +# Use hard-coded values instead. +define z_print_phase + if $arg0 == 0 + printf "Mark" + else + if $arg0 == 1 + printf "MarkComplete" + else + if $arg0 == 2 + printf "Relocate" + else + printf "Unknown" + end + end + end +end + +define z_print_generation + printf "%u", $arg0->_seqnum + printf "/" + z_print_phase $arg0->_phase +end + +define zz + printf "Old: " + z_print_generation ZHeap::_heap->_old + + printf " | " + + printf "Young: " + z_print_generation ZHeap::_heap->_young + + printf "\n" +end + # Print heap information define zph printf "Heap\n" - printf " GlobalPhase: %u\n", ZGlobalPhase - printf " GlobalSeqNum: %u\n", ZGlobalSeqNum + printf " Young Phase: %u\n", ZHeap::_heap->_young->_phase + printf " Old Phase: %u\n", ZHeap::_heap->_old->_phase + printf " Young SeqNum: %u\n", ZHeap::_heap->_young->_seqnum + printf " Old SeqNum: %u\n", ZHeap::_heap->_old->_seqnum printf " Offset Max: %-15llu (0x%llx)\n", ZAddressOffsetMax, ZAddressOffsetMax printf " Page Size Small: %-15llu (0x%llx)\n", ZPageSizeSmall, ZPageSizeSmall printf " Page Size Medium: %-15llu (0x%llx)\n", ZPageSizeMedium, ZPageSizeMedium printf "Metadata Bits\n" - printf " Good: 0x%016llx\n", ZAddressGoodMask - printf " Bad: 0x%016llx\n", ZAddressBadMask - printf " WeakBad: 0x%016llx\n", ZAddressWeakBadMask - printf " Marked: 0x%016llx\n", ZAddressMetadataMarked - printf " Remapped: 0x%016llx\n", ZAddressMetadataRemapped + printf " Good: 0x%016llx\n", ZPointerStoreGoodMask + printf " Bad: 0x%016llx\n", ZPointerStoreBadMask + printf " MarkedYoung: 0x%016llx\n", ZPointerMarkedYoung + printf " MarkedOld: 0x%016llx\n", ZPointerMarkedOld + printf " Remapped: 0x%016llx\n", ZPointerRemapped +end + +define print_bits + set $value=$arg0 + set $bits=$arg1 + + set $bit=0 + while ($bit < $bits) + set $bit_pos = (1ull << ($bits - 1 - $bit)) + printf "%d", ($arg0 & $bit_pos) != 0 + set $bit = $bit + 1 + end + + printf " <%lX>", $value +end + +define print_bits8 + print_bits $arg0 8 +end + +define print_s_bits8 + printf $arg0 + print_bits8 $arg1 +end + +# Print metadata information +define zpm + printf "Metadata Load Bits " + print_s_bits8 "\n Mask: " ZPointerLoadMetadataMask + print_s_bits8 "\n Good: " ZPointerLoadGoodMask + print_s_bits8 "\n Remapped: " ZPointerRemapped + print_s_bits8 "\n Bad: " ZPointerLoadBadMask + printf "\n " + printf "\nMetadata Store Bits " + print_s_bits8 "\n Mask: " ZPointerStoreMetadataMask + print_s_bits8 "\n Good: " ZPointerStoreGoodMask + print_s_bits8 "\n Bad: " ZPointerStoreBadMask + print_s_bits8 "\n MarkedYoung: " ZPointerMarkedYoung + print_s_bits8 "\n MarkedOld: " ZPointerMarkedOld + print_s_bits8 "\n Finalizable: " ZPointerFinalizable + printf "\n" end # End of file diff --git a/src/hotspot/share/gc/z/zDirector.cpp b/src/hotspot/share/gc/z/zDirector.cpp index a9668a6a77f04..ac2c0ccfe4f54 100644 --- a/src/hotspot/share/gc/z/zDirector.cpp +++ b/src/hotspot/share/gc/z/zDirector.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -23,89 +23,77 @@ #include "precompiled.hpp" #include "gc/shared/gc_globals.hpp" +#include "gc/z/zCollectedHeap.hpp" #include "gc/z/zDirector.hpp" #include "gc/z/zDriver.hpp" +#include "gc/z/zGeneration.inline.hpp" #include "gc/z/zHeap.inline.hpp" #include "gc/z/zHeuristics.hpp" +#include "gc/z/zLock.inline.hpp" #include "gc/z/zStat.hpp" #include "logging/log.hpp" +ZDirector* ZDirector::_director; + constexpr double one_in_1000 = 3.290527; -constexpr double sample_interval = 1.0 / ZStatAllocRate::sample_hz; -ZDirector::ZDirector(ZDriver* driver) : - _driver(driver), - _metronome(ZStatAllocRate::sample_hz) { +struct ZWorkerResizeStats { + bool _is_active; + double _serial_gc_time_passed; + double _parallel_gc_time_passed; + uint _nworkers_current; +}; + +struct ZDirectorHeapStats { + size_t _soft_max_heap_size; + size_t _used; + uint _total_collections; +}; + +struct ZDirectorGenerationGeneralStats { + size_t _used; + uint _total_collections_at_start; +}; + +struct ZDirectorGenerationStats { + ZStatCycleStats _cycle; + ZStatWorkersStats _workers; + ZWorkerResizeStats _resize; + ZStatHeapStats _stat_heap; + ZDirectorGenerationGeneralStats _general; +}; + +struct ZDirectorStats { + ZStatMutatorAllocRateStats _mutator_alloc_rate; + ZDirectorHeapStats _heap; + ZDirectorGenerationStats _young_stats; + ZDirectorGenerationStats _old_stats; +}; + +ZDirector::ZDirector() : + _monitor(), + _stopped(false) { + _director = this; set_name("ZDirector"); create_and_start(); } -static void sample_allocation_rate() { - // Sample allocation rate. This is needed by rule_allocation_rate() - // below to estimate the time we have until we run out of memory. - const double bytes_per_second = ZStatAllocRate::sample_and_reset(); - - log_debug(gc, alloc)("Allocation Rate: %.1fMB/s, Predicted: %.1fMB/s, Avg: %.1f(+/-%.1f)MB/s", - bytes_per_second / M, - ZStatAllocRate::predict() / M, - ZStatAllocRate::avg() / M, - ZStatAllocRate::sd() / M); -} - -static ZDriverRequest rule_allocation_stall() { - // Perform GC if we've observed at least one allocation stall since - // the last GC started. - if (!ZHeap::heap()->has_alloc_stalled()) { - return GCCause::_no_gc; - } - - log_debug(gc, director)("Rule: Allocation Stall Observed"); +// Minor GC rules - return GCCause::_z_allocation_stall; -} - -static ZDriverRequest rule_warmup() { - if (ZStatCycle::is_warm()) { +static bool rule_minor_timer(const ZDirectorStats& stats) { + if (ZCollectionIntervalMinor <= 0) { // Rule disabled - return GCCause::_no_gc; - } - - // Perform GC if heap usage passes 10/20/30% and no other GC has been - // performed yet. This allows us to get some early samples of the GC - // duration, which is needed by the other rules. - const size_t soft_max_capacity = ZHeap::heap()->soft_max_capacity(); - const size_t used = ZHeap::heap()->used(); - const double used_threshold_percent = (ZStatCycle::nwarmup_cycles() + 1) * 0.1; - const size_t used_threshold = soft_max_capacity * used_threshold_percent; - - log_debug(gc, director)("Rule: Warmup %.0f%%, Used: " SIZE_FORMAT "MB, UsedThreshold: " SIZE_FORMAT "MB", - used_threshold_percent * 100, used / M, used_threshold / M); - - if (used < used_threshold) { - return GCCause::_no_gc; - } - - return GCCause::_z_warmup; -} - -static ZDriverRequest rule_timer() { - if (ZCollectionInterval <= 0) { - // Rule disabled - return GCCause::_no_gc; + return false; } // Perform GC if timer has expired. - const double time_since_last_gc = ZStatCycle::time_since_last(); - const double time_until_gc = ZCollectionInterval - time_since_last_gc; - - log_debug(gc, director)("Rule: Timer, Interval: %.3fs, TimeUntilGC: %.3fs", - ZCollectionInterval, time_until_gc); + const double time_since_last_gc = stats._young_stats._cycle._time_since_last; + const double time_until_gc = ZCollectionIntervalMinor - time_since_last_gc; - if (time_until_gc > 0) { - return GCCause::_no_gc; - } + log_debug(gc, director)("Rule Minor: Timer, Interval: %.3fs, TimeUntilGC: %.3fs", + ZCollectionIntervalMinor, time_until_gc); - return GCCause::_z_timer; + return time_until_gc <= 0; } static double estimated_gc_workers(double serial_gc_time, double parallelizable_gc_time, double time_until_deadline) { @@ -113,42 +101,29 @@ static double estimated_gc_workers(double serial_gc_time, double parallelizable_ return parallelizable_gc_time / parallelizable_time_until_deadline; } -static uint discrete_gc_workers(double gc_workers) { - return clamp(ceil(gc_workers), 1, ConcGCThreads); +static uint discrete_young_gc_workers(double gc_workers) { + return clamp(ceil(gc_workers), 1, ZYoungGCThreads); } -static double select_gc_workers(double serial_gc_time, double parallelizable_gc_time, double alloc_rate_sd_percent, double time_until_oom) { +static double select_young_gc_workers(const ZDirectorStats& stats, double serial_gc_time, double parallelizable_gc_time, double alloc_rate_sd_percent, double time_until_oom) { // Use all workers until we're warm - if (!ZStatCycle::is_warm()) { - const double not_warm_gc_workers = ConcGCThreads; - log_debug(gc, director)("Select GC Workers (Not Warm), GCWorkers: %.3f", not_warm_gc_workers); + if (!stats._old_stats._cycle._is_warm) { + const double not_warm_gc_workers = ZYoungGCThreads; + log_debug(gc, director)("Select Minor GC Workers (Not Warm), GCWorkers: %.3f", not_warm_gc_workers); return not_warm_gc_workers; } - // Calculate number of GC workers needed to avoid a long GC cycle and to avoid OOM. - const double avoid_long_gc_workers = estimated_gc_workers(serial_gc_time, parallelizable_gc_time, 10 /* seconds */); - const double avoid_oom_gc_workers = estimated_gc_workers(serial_gc_time, parallelizable_gc_time, time_until_oom); - - const double gc_workers = MAX2(avoid_long_gc_workers, avoid_oom_gc_workers); - const uint actual_gc_workers = discrete_gc_workers(gc_workers); - const uint last_gc_workers = ZStatCycle::last_active_workers(); - - // More than 15% division from the average is considered unsteady - if (alloc_rate_sd_percent >= 0.15) { - const double half_gc_workers = ConcGCThreads / 2.0; - const double unsteady_gc_workers = MAX3(gc_workers, last_gc_workers, half_gc_workers); - log_debug(gc, director)("Select GC Workers (Unsteady), " - "AvoidLongGCWorkers: %.3f, AvoidOOMGCWorkers: %.3f, LastGCWorkers: %.3f, HalfGCWorkers: %.3f, GCWorkers: %.3f", - avoid_long_gc_workers, avoid_oom_gc_workers, (double)last_gc_workers, half_gc_workers, unsteady_gc_workers); - return unsteady_gc_workers; - } + // Calculate number of GC workers needed to avoid OOM. + const double gc_workers = estimated_gc_workers(serial_gc_time, parallelizable_gc_time, time_until_oom); + const uint actual_gc_workers = discrete_young_gc_workers(gc_workers); + const double last_gc_workers = stats._young_stats._cycle._last_active_workers; - if (actual_gc_workers < last_gc_workers) { + if ((double)actual_gc_workers < last_gc_workers) { // Before decreasing number of GC workers compared to the previous GC cycle, check if the // next GC cycle will need to increase it again. If so, use the same number of GC workers // that will be needed in the next cycle. const double gc_duration_delta = (parallelizable_gc_time / actual_gc_workers) - (parallelizable_gc_time / last_gc_workers); - const double additional_time_for_allocations = ZStatCycle::time_since_last() - gc_duration_delta - sample_interval; + const double additional_time_for_allocations = stats._young_stats._cycle._time_since_last - gc_duration_delta; const double next_time_until_oom = time_until_oom + additional_time_for_allocations; const double next_avoid_oom_gc_workers = estimated_gc_workers(serial_gc_time, parallelizable_gc_time, next_time_until_oom); @@ -156,29 +131,32 @@ static double select_gc_workers(double serial_gc_time, double parallelizable_gc_ const double next_gc_workers = next_avoid_oom_gc_workers + 0.5; const double try_lowering_gc_workers = clamp(next_gc_workers, actual_gc_workers, last_gc_workers); - log_debug(gc, director)("Select GC Workers (Try Lowering), " - "AvoidLongGCWorkers: %.3f, AvoidOOMGCWorkers: %.3f, NextAvoidOOMGCWorkers: %.3f, LastGCWorkers: %.3f, GCWorkers: %.3f", - avoid_long_gc_workers, avoid_oom_gc_workers, next_avoid_oom_gc_workers, (double)last_gc_workers, try_lowering_gc_workers); + log_debug(gc, director)("Select Minor GC Workers (Try Lowering), " + "AvoidOOMGCWorkers: %.3f, NextAvoidOOMGCWorkers: %.3f, LastGCWorkers: %.3f, GCWorkers: %.3f", + gc_workers, next_avoid_oom_gc_workers, last_gc_workers, try_lowering_gc_workers); return try_lowering_gc_workers; } - log_debug(gc, director)("Select GC Workers (Normal), " - "AvoidLongGCWorkers: %.3f, AvoidOOMGCWorkers: %.3f, LastGCWorkers: %.3f, GCWorkers: %.3f", - avoid_long_gc_workers, avoid_oom_gc_workers, (double)last_gc_workers, gc_workers); + log_debug(gc, director)("Select Minor GC Workers (Normal), " + "AvoidOOMGCWorkers: %.3f, LastGCWorkers: %.3f, GCWorkers: %.3f", + gc_workers, last_gc_workers, gc_workers); return gc_workers; } -ZDriverRequest rule_allocation_rate_dynamic() { - if (!ZStatCycle::is_time_trustable()) { +ZDriverRequest rule_minor_allocation_rate_dynamic(const ZDirectorStats& stats, + double serial_gc_time_passed, + double parallel_gc_time_passed, + bool conservative_alloc_rate, + size_t capacity) { + if (!stats._old_stats._cycle._is_time_trustable) { // Rule disabled - return GCCause::_no_gc; + return ZDriverRequest(GCCause::_no_gc, ZYoungGCThreads, 0); } // Calculate amount of free memory available. Note that we take the // relocation headroom into account to avoid in-place relocation. - const size_t soft_max_capacity = ZHeap::heap()->soft_max_capacity(); - const size_t used = ZHeap::heap()->used(); - const size_t free_including_headroom = soft_max_capacity - MIN2(soft_max_capacity, used); + const size_t used = stats._heap._used; + const size_t free_including_headroom = capacity - MIN2(capacity, used); const size_t free = free_including_headroom - MIN2(free_including_headroom, ZHeuristics::relocation_headroom()); // Calculate time until OOM given the max allocation rate and the amount @@ -187,36 +165,35 @@ ZDriverRequest rule_allocation_rate_dynamic() { // phase changes in the allocate rate. We then add ~3.3 sigma to account for // the allocation rate variance, which means the probability is 1 in 1000 // that a sample is outside of the confidence interval. - const double alloc_rate_predict = ZStatAllocRate::predict(); - const double alloc_rate_avg = ZStatAllocRate::avg(); - const double alloc_rate_sd = ZStatAllocRate::sd(); + const ZStatMutatorAllocRateStats alloc_rate_stats = stats._mutator_alloc_rate; + const double alloc_rate_predict = alloc_rate_stats._predict; + const double alloc_rate_avg = alloc_rate_stats._avg; + const double alloc_rate_sd = alloc_rate_stats._sd; const double alloc_rate_sd_percent = alloc_rate_sd / (alloc_rate_avg + 1.0); - const double alloc_rate = (MAX2(alloc_rate_predict, alloc_rate_avg) * ZAllocationSpikeTolerance) + (alloc_rate_sd * one_in_1000) + 1.0; + const double alloc_rate_conservative = (MAX2(alloc_rate_predict, alloc_rate_avg) * ZAllocationSpikeTolerance) + (alloc_rate_sd * one_in_1000) + 1.0; + const double alloc_rate = conservative_alloc_rate ? alloc_rate_conservative : alloc_rate_stats._avg; const double time_until_oom = (free / alloc_rate) / (1.0 + alloc_rate_sd_percent); // Calculate max serial/parallel times of a GC cycle. The times are // moving averages, we add ~3.3 sigma to account for the variance. - const double serial_gc_time = ZStatCycle::serial_time().davg() + (ZStatCycle::serial_time().dsd() * one_in_1000); - const double parallelizable_gc_time = ZStatCycle::parallelizable_time().davg() + (ZStatCycle::parallelizable_time().dsd() * one_in_1000); + const double serial_gc_time = fabsd(stats._young_stats._cycle._avg_serial_time + (stats._young_stats._cycle._sd_serial_time * one_in_1000) - serial_gc_time_passed); + const double parallelizable_gc_time = fabsd(stats._young_stats._cycle._avg_parallelizable_time + (stats._young_stats._cycle._sd_parallelizable_time * one_in_1000) - parallel_gc_time_passed); // Calculate number of GC workers needed to avoid OOM. - const double gc_workers = select_gc_workers(serial_gc_time, parallelizable_gc_time, alloc_rate_sd_percent, time_until_oom); + const double gc_workers = select_young_gc_workers(stats, serial_gc_time, parallelizable_gc_time, alloc_rate_sd_percent, time_until_oom); // Convert to a discrete number of GC workers within limits. - const uint actual_gc_workers = discrete_gc_workers(gc_workers); + const uint actual_gc_workers = discrete_young_gc_workers(gc_workers); // Calculate GC duration given number of GC workers needed. const double actual_gc_duration = serial_gc_time + (parallelizable_gc_time / actual_gc_workers); - const uint last_gc_workers = ZStatCycle::last_active_workers(); // Calculate time until GC given the time until OOM and GC duration. - // We also subtract the sample interval, so that we don't overshoot the - // target time and end up starting the GC too late in the next interval. - const double time_until_gc = time_until_oom - actual_gc_duration - sample_interval; + const double time_until_gc = time_until_oom - actual_gc_duration; - log_debug(gc, director)("Rule: Allocation Rate (Dynamic GC Workers), " + log_debug(gc, director)("Rule Minor: Allocation Rate (Dynamic GC Workers), " "MaxAllocRate: %.1fMB/s (+/-%.1f%%), Free: " SIZE_FORMAT "MB, GCCPUTime: %.3f, " - "GCDuration: %.3fs, TimeUntilOOM: %.3fs, TimeUntilGC: %.3fs, GCWorkers: %u -> %u", + "GCDuration: %.3fs, TimeUntilOOM: %.3fs, TimeUntilGC: %.3fs, GCWorkers: %u", alloc_rate / M, alloc_rate_sd_percent * 100, free / M, @@ -224,20 +201,53 @@ ZDriverRequest rule_allocation_rate_dynamic() { serial_gc_time + (parallelizable_gc_time / actual_gc_workers), time_until_oom, time_until_gc, - last_gc_workers, actual_gc_workers); - if (actual_gc_workers <= last_gc_workers && time_until_gc > 0) { - return ZDriverRequest(GCCause::_no_gc, actual_gc_workers); + // Bail out if we are not "close" to needing the GC to start yet, where + // close is 5% of the time left until OOM. If we don't check that we + // are "close", then the heuristics instead add more threads and we + // end up not triggering GCs until we have the max number of threads. + if (time_until_gc > time_until_oom * 0.05) { + return ZDriverRequest(GCCause::_no_gc, actual_gc_workers, 0); } - return ZDriverRequest(GCCause::_z_allocation_rate, actual_gc_workers); + return ZDriverRequest(GCCause::_z_allocation_rate, actual_gc_workers, 0); +} + +ZDriverRequest rule_soft_minor_allocation_rate_dynamic(const ZDirectorStats& stats, + double serial_gc_time_passed, + double parallel_gc_time_passed) { + return rule_minor_allocation_rate_dynamic(stats, + 0.0 /* serial_gc_time_passed */, + 0.0 /* parallel_gc_time_passed */, + false /* conservative_alloc_rate */, + stats._heap._soft_max_heap_size /* capacity */); +} + +ZDriverRequest rule_semi_hard_minor_allocation_rate_dynamic(const ZDirectorStats& stats, + double serial_gc_time_passed, + double parallel_gc_time_passed) { + return rule_minor_allocation_rate_dynamic(stats, + 0.0 /* serial_gc_time_passed */, + 0.0 /* parallel_gc_time_passed */, + false /* conservative_alloc_rate */, + ZHeap::heap()->max_capacity() /* capacity */); +} + +ZDriverRequest rule_hard_minor_allocation_rate_dynamic(const ZDirectorStats& stats, + double serial_gc_time_passed, + double parallel_gc_time_passed) { + return rule_minor_allocation_rate_dynamic(stats, + 0.0 /* serial_gc_time_passed */, + 0.0 /* parallel_gc_time_passed */, + true /* conservative_alloc_rate */, + ZHeap::heap()->max_capacity() /* capacity */); } -static ZDriverRequest rule_allocation_rate_static() { - if (!ZStatCycle::is_time_trustable()) { +static bool rule_minor_allocation_rate_static(const ZDirectorStats& stats) { + if (!stats._old_stats._cycle._is_time_trustable) { // Rule disabled - return GCCause::_no_gc; + return false; } // Perform GC if the estimated max allocation rate indicates that we @@ -248,8 +258,8 @@ static ZDriverRequest rule_allocation_rate_static() { // Calculate amount of free memory available. Note that we take the // relocation headroom into account to avoid in-place relocation. - const size_t soft_max_capacity = ZHeap::heap()->soft_max_capacity(); - const size_t used = ZHeap::heap()->used(); + const size_t soft_max_capacity = stats._heap._soft_max_heap_size; + const size_t used = stats._heap._used; const size_t free_including_headroom = soft_max_capacity - MIN2(soft_max_capacity, used); const size_t free = free_including_headroom - MIN2(free_including_headroom, ZHeuristics::relocation_headroom()); @@ -259,69 +269,286 @@ static ZDriverRequest rule_allocation_rate_static() { // phase changes in the allocate rate. We then add ~3.3 sigma to account for // the allocation rate variance, which means the probability is 1 in 1000 // that a sample is outside of the confidence interval. - const double max_alloc_rate = (ZStatAllocRate::avg() * ZAllocationSpikeTolerance) + (ZStatAllocRate::sd() * one_in_1000); + const ZStatMutatorAllocRateStats alloc_rate_stats = stats._mutator_alloc_rate; + const double max_alloc_rate = (alloc_rate_stats._avg * ZAllocationSpikeTolerance) + (alloc_rate_stats._sd * one_in_1000); const double time_until_oom = free / (max_alloc_rate + 1.0); // Plus 1.0B/s to avoid division by zero // Calculate max serial/parallel times of a GC cycle. The times are // moving averages, we add ~3.3 sigma to account for the variance. - const double serial_gc_time = ZStatCycle::serial_time().davg() + (ZStatCycle::serial_time().dsd() * one_in_1000); - const double parallelizable_gc_time = ZStatCycle::parallelizable_time().davg() + (ZStatCycle::parallelizable_time().dsd() * one_in_1000); + const double serial_gc_time = stats._young_stats._cycle._avg_serial_time + (stats._young_stats._cycle._sd_serial_time * one_in_1000); + const double parallelizable_gc_time = stats._young_stats._cycle._avg_parallelizable_time + (stats._young_stats._cycle._sd_parallelizable_time * one_in_1000); // Calculate GC duration given number of GC workers needed. - const double gc_duration = serial_gc_time + (parallelizable_gc_time / ConcGCThreads); + const double gc_duration = serial_gc_time + (parallelizable_gc_time / ZYoungGCThreads); // Calculate time until GC given the time until OOM and max duration of GC. // We also deduct the sample interval, so that we don't overshoot the target // time and end up starting the GC too late in the next interval. - const double time_until_gc = time_until_oom - gc_duration - sample_interval; + const double time_until_gc = time_until_oom - gc_duration; - log_debug(gc, director)("Rule: Allocation Rate (Static GC Workers), MaxAllocRate: %.1fMB/s, Free: " SIZE_FORMAT "MB, GCDuration: %.3fs, TimeUntilGC: %.3fs", + log_debug(gc, director)("Rule Minor: Allocation Rate (Static GC Workers), MaxAllocRate: %.1fMB/s, Free: " SIZE_FORMAT "MB, GCDuration: %.3fs, TimeUntilGC: %.3fs", max_alloc_rate / M, free / M, gc_duration, time_until_gc); - if (time_until_gc > 0) { - return GCCause::_no_gc; + return time_until_gc <= 0; +} + +static bool is_young_small(const ZDirectorStats& stats) { + // Calculate amount of freeable memory available. + const size_t soft_max_capacity = stats._heap._soft_max_heap_size; + const size_t young_used = stats._young_stats._general._used; + + const double young_used_percent = percent_of(young_used, soft_max_capacity); + + // If the freeable memory isn't even 5% of the heap, we can't expect to free up + // all that much memory, so let's not even try - it will likely be a wasted effort + // that takes away CPU power to the hopefullt more profitable major colelction. + return young_used_percent <= 5.0; +} + +template +static bool is_high_usage(const ZDirectorStats& stats, PrintFn* print_function = nullptr) { + // Calculate amount of free memory available. Note that we take the + // relocation headroom into account to avoid in-place relocation. + const size_t soft_max_capacity = stats._heap._soft_max_heap_size; + const size_t used = stats._heap._used; + const size_t free_including_headroom = soft_max_capacity - MIN2(soft_max_capacity, used); + const size_t free = free_including_headroom - MIN2(free_including_headroom, ZHeuristics::relocation_headroom()); + const double free_percent = percent_of(free, soft_max_capacity); + + if (print_function != nullptr) { + (*print_function)(free, free_percent); } - return GCCause::_z_allocation_rate; + // The heap has high usage if there is less than 5% free memory left + return free_percent <= 5.0; +} + +static bool is_major_urgent(const ZDirectorStats& stats) { + return is_young_small(stats) && is_high_usage(stats); } -static ZDriverRequest rule_allocation_rate() { +static bool rule_minor_allocation_rate(const ZDirectorStats& stats) { + if (ZCollectionIntervalOnly) { + // Rule disabled + return false; + } + + if (ZHeap::heap()->is_alloc_stalling_for_old()) { + // Don't collect young if we have threads stalled waiting for an old collection + return false; + } + + if (is_young_small(stats)) { + return false; + } + if (UseDynamicNumberOfGCThreads) { - return rule_allocation_rate_dynamic(); - } else { - return rule_allocation_rate_static(); + if (rule_soft_minor_allocation_rate_dynamic(stats, + 0.0 /* serial_gc_time_passed */, + 0.0 /* parallel_gc_time_passed */).cause() != GCCause::_no_gc) { + return true; + } + + if (rule_hard_minor_allocation_rate_dynamic(stats, + 0.0 /* serial_gc_time_passed */, + 0.0 /* parallel_gc_time_passed */).cause() != GCCause::_no_gc) { + return true; + } + + return false; } + + return rule_minor_allocation_rate_static(stats); } -static ZDriverRequest rule_high_usage() { - // Perform GC if the amount of free memory is 5% or less. This is a preventive - // meassure in the case where the application has a very low allocation rate, +static bool rule_minor_high_usage(const ZDirectorStats& stats) { + if (ZCollectionIntervalOnly) { + // Rule disabled + return false; + } + + if (is_young_small(stats)) { + return false; + } + + // Perform GC if the amount of free memory is small. This is a preventive + // measure in the case where the application has a very low allocation rate, // such that the allocation rate rule doesn't trigger, but the amount of free // memory is still slowly but surely heading towards zero. In this situation, // we start a GC cycle to avoid a potential allocation stall later. - // Calculate amount of free memory available. Note that we take the - // relocation headroom into account to avoid in-place relocation. - const size_t soft_max_capacity = ZHeap::heap()->soft_max_capacity(); - const size_t used = ZHeap::heap()->used(); + const size_t soft_max_capacity = stats._heap._soft_max_heap_size; + const size_t used = stats._heap._used; const size_t free_including_headroom = soft_max_capacity - MIN2(soft_max_capacity, used); const size_t free = free_including_headroom - MIN2(free_including_headroom, ZHeuristics::relocation_headroom()); const double free_percent = percent_of(free, soft_max_capacity); - log_debug(gc, director)("Rule: High Usage, Free: " SIZE_FORMAT "MB(%.1f%%)", - free / M, free_percent); + auto print_function = [&](size_t free, double free_percent) { + log_debug(gc, director)("Rule Minor: High Usage, Free: " SIZE_FORMAT "MB(%.1f%%)", + free / M, free_percent); + }; - if (free_percent > 5.0) { - return GCCause::_no_gc; + return is_high_usage(stats, &print_function); +} + +// Major GC rules + +static bool rule_major_timer(const ZDirectorStats& stats) { + if (ZCollectionIntervalMajor <= 0) { + // Rule disabled + return false; } - return GCCause::_z_high_usage; + // Perform GC if timer has expired. + const double time_since_last_gc = stats._old_stats._cycle._time_since_last; + const double time_until_gc = ZCollectionIntervalMajor - time_since_last_gc; + + log_debug(gc, director)("Rule Major: Timer, Interval: %.3fs, TimeUntilGC: %.3fs", + ZCollectionIntervalMajor, time_until_gc); + + return time_until_gc <= 0; } -static ZDriverRequest rule_proactive() { - if (!ZProactive || !ZStatCycle::is_warm()) { +static bool rule_major_warmup(const ZDirectorStats& stats) { + if (ZCollectionIntervalOnly) { // Rule disabled - return GCCause::_no_gc; + return false; + } + + if (stats._old_stats._cycle._is_warm) { + // Rule disabled + return false; + } + + // Perform GC if heap usage passes 10/20/30% and no other GC has been + // performed yet. This allows us to get some early samples of the GC + // duration, which is needed by the other rules. + const size_t soft_max_capacity = stats._heap._soft_max_heap_size; + const size_t used = stats._heap._used; + const double used_threshold_percent = (stats._old_stats._cycle._nwarmup_cycles + 1) * 0.1; + const size_t used_threshold = soft_max_capacity * used_threshold_percent; + + log_debug(gc, director)("Rule Major: Warmup %.0f%%, Used: " SIZE_FORMAT "MB, UsedThreshold: " SIZE_FORMAT "MB", + used_threshold_percent * 100, used / M, used_threshold / M); + + return used >= used_threshold; +} + +static double gc_time(ZDirectorGenerationStats generation_stats) { + // Calculate max serial/parallel times of a generation GC cycle. The times are + // moving averages, we add ~3.3 sigma to account for the variance. + const double serial_gc_time = generation_stats._cycle._avg_serial_time + (generation_stats._cycle._sd_serial_time * one_in_1000); + const double parallelizable_gc_time = generation_stats._cycle._avg_parallelizable_time + (generation_stats._cycle._sd_parallelizable_time * one_in_1000); + + // Calculate young GC time and duration given number of GC workers needed. + return serial_gc_time + parallelizable_gc_time; +} + +static double calculate_extra_young_gc_time(const ZDirectorStats& stats) { + if (!stats._old_stats._cycle._is_time_trustable) { + return 0.0; + } + + // Calculate amount of free memory available. Note that we take the + // relocation headroom into account to avoid in-place relocation. + const size_t old_used = stats._old_stats._general._used; + const size_t old_live = stats._old_stats._stat_heap._live_at_mark_end; + const size_t old_garbage = old_used - old_live; + + const double young_gc_time = gc_time(stats._young_stats); + + // Calculate how much memory young collections are predicted to free. + const size_t reclaimed_per_young_gc = stats._young_stats._stat_heap._reclaimed_avg; + + // Calculate current YC time and predicted YC time after an old collection. + const double current_young_gc_time_per_bytes_freed = double(young_gc_time) / double(reclaimed_per_young_gc); + const double potential_young_gc_time_per_bytes_freed = double(young_gc_time) / double(reclaimed_per_young_gc + old_garbage); + + // Calculate extra time per young collection inflicted by *not* doing an + // old collection that frees up memory in the old generation. + const double extra_young_gc_time_per_bytes_freed = current_young_gc_time_per_bytes_freed - potential_young_gc_time_per_bytes_freed; + const double extra_young_gc_time = extra_young_gc_time_per_bytes_freed * (reclaimed_per_young_gc + old_garbage); + + return extra_young_gc_time; +} + +static bool rule_major_allocation_rate(const ZDirectorStats& stats) { + if (!stats._old_stats._cycle._is_time_trustable) { + // Rule disabled + return false; + } + + // Calculate GC time. + const double old_gc_time = gc_time(stats._old_stats); + const double young_gc_time = gc_time(stats._young_stats); + + // Calculate how much memory collections are predicted to free. + const size_t reclaimed_per_young_gc = stats._young_stats._stat_heap._reclaimed_avg; + const size_t reclaimed_per_old_gc = stats._old_stats._stat_heap._reclaimed_avg; + + // Calculate the GC cost for each reclaimed byte + const double current_young_gc_time_per_bytes_freed = double(young_gc_time) / double(reclaimed_per_young_gc); + const double current_old_gc_time_per_bytes_freed = double(old_gc_time) / double(reclaimed_per_old_gc); + + // Calculate extra time per young collection inflicted by *not* doing an + // old collection that frees up memory in the old generation. + const double extra_young_gc_time = calculate_extra_young_gc_time(stats); + + // Doing an old collection makes subsequent young collections more efficient. + // Calculate the number of young collections ahead that we will try to amortize + // the cost of doing an old collection for. + const int lookahead = stats._heap._total_collections - stats._old_stats._general._total_collections_at_start; + + // Calculate extra young collection overhead predicted for a number of future + // young collections, due to not freeing up memory in the old generation. + const double extra_young_gc_time_for_lookahead = extra_young_gc_time * lookahead; + + log_debug(gc, director)("Rule Major: Allocation Rate, ExtraYoungGCTime: %.3fs, OldGCTime: %.3fs, Lookahead: %d, ExtraYoungGCTimeForLookahead: %.3fs", + extra_young_gc_time, old_gc_time, lookahead, extra_young_gc_time_for_lookahead); + + // If we continue doing as many minor collections as we already did since the + // last major collection (N), without doing a major collection, then the minor + // GC effort of freeing up memory for another N cycles, plus the effort of doing, + // a major GC combined, is lower compared to the extra GC overhead per minor + // collection, freeing an equal amount of memory, at a higher GC frequency. + // In other words, the cost for minor collections of not doing a major collection + // will seemingly be greater than the cost of doing a major collection and getting + // cheaper minor collections for a time to come. + const bool can_amortize_time_cost = extra_young_gc_time_for_lookahead > old_gc_time; + + // If the garbage is cheaper to reap in the old generation, then it makes sense + // to upgrade minor collections to major collections. + const bool old_garbage_is_cheaper = current_old_gc_time_per_bytes_freed < current_young_gc_time_per_bytes_freed; + + return can_amortize_time_cost || old_garbage_is_cheaper || is_major_urgent(stats); +} + +static double calculate_young_to_old_worker_ratio(const ZDirectorStats& stats) { + const double young_gc_time = gc_time(stats._young_stats); + const double old_gc_time = gc_time(stats._old_stats); + const size_t reclaimed_per_young_gc = stats._young_stats._stat_heap._reclaimed_avg; + const size_t reclaimed_per_old_gc = stats._old_stats._stat_heap._reclaimed_avg; + const double current_young_bytes_freed_per_gc_time = double(reclaimed_per_young_gc) / double(young_gc_time); + const double current_old_bytes_freed_per_gc_time = double(reclaimed_per_old_gc) / double(old_gc_time); + const double old_vs_young_efficiency_ratio = current_old_bytes_freed_per_gc_time / current_young_bytes_freed_per_gc_time; + + return old_vs_young_efficiency_ratio; +} + +static bool rule_major_proactive(const ZDirectorStats& stats) { + if (ZCollectionIntervalOnly) { + // Rule disabled + return false; + } + + if (!ZProactive) { + // Rule disabled + return false; + } + + if (!stats._old_stats._cycle._is_warm) { + // Rule disabled + return false; } // Perform GC if the impact of doing so, in terms of application throughput @@ -333,74 +560,359 @@ static ZDriverRequest rule_proactive() { // 10% of the max capacity since the previous GC, or more than 5 minutes has // passed since the previous GC. This helps avoid superfluous GCs when running // applications with very low allocation rate. - const size_t used_after_last_gc = ZStatHeap::used_at_relocate_end(); - const size_t used_increase_threshold = ZHeap::heap()->soft_max_capacity() * 0.10; // 10% + const size_t used_after_last_gc = stats._old_stats._stat_heap._used_at_relocate_end; + const size_t used_increase_threshold = stats._heap._soft_max_heap_size * 0.10; // 10% const size_t used_threshold = used_after_last_gc + used_increase_threshold; - const size_t used = ZHeap::heap()->used(); - const double time_since_last_gc = ZStatCycle::time_since_last(); + const size_t used = stats._heap._used; + const double time_since_last_gc = stats._old_stats._cycle._time_since_last; const double time_since_last_gc_threshold = 5 * 60; // 5 minutes if (used < used_threshold && time_since_last_gc < time_since_last_gc_threshold) { // Don't even consider doing a proactive GC - log_debug(gc, director)("Rule: Proactive, UsedUntilEnabled: " SIZE_FORMAT "MB, TimeUntilEnabled: %.3fs", + log_debug(gc, director)("Rule Major: Proactive, UsedUntilEnabled: " SIZE_FORMAT "MB, TimeUntilEnabled: %.3fs", (used_threshold - used) / M, time_since_last_gc_threshold - time_since_last_gc); - return GCCause::_no_gc; + return false; } const double assumed_throughput_drop_during_gc = 0.50; // 50% const double acceptable_throughput_drop = 0.01; // 1% - const double serial_gc_time = ZStatCycle::serial_time().davg() + (ZStatCycle::serial_time().dsd() * one_in_1000); - const double parallelizable_gc_time = ZStatCycle::parallelizable_time().davg() + (ZStatCycle::parallelizable_time().dsd() * one_in_1000); - const double gc_duration = serial_gc_time + (parallelizable_gc_time / ConcGCThreads); + const double serial_old_gc_time = stats._old_stats._cycle._avg_serial_time + (stats._old_stats._cycle._sd_serial_time * one_in_1000); + const double parallelizable_old_gc_time = stats._old_stats._cycle._avg_parallelizable_time + (stats._old_stats._cycle._sd_parallelizable_time * one_in_1000); + const double serial_young_gc_time = stats._young_stats._cycle._avg_serial_time + (stats._young_stats._cycle._sd_serial_time * one_in_1000); + const double parallelizable_young_gc_time = stats._young_stats._cycle._avg_parallelizable_time + (stats._young_stats._cycle._sd_parallelizable_time * one_in_1000); + const double serial_gc_time = serial_old_gc_time + serial_young_gc_time; + const double parallelizable_gc_time = parallelizable_old_gc_time + parallelizable_young_gc_time; + const double gc_duration = serial_gc_time + parallelizable_gc_time; const double acceptable_gc_interval = gc_duration * ((assumed_throughput_drop_during_gc / acceptable_throughput_drop) - 1.0); const double time_until_gc = acceptable_gc_interval - time_since_last_gc; - log_debug(gc, director)("Rule: Proactive, AcceptableGCInterval: %.3fs, TimeSinceLastGC: %.3fs, TimeUntilGC: %.3fs", + log_debug(gc, director)("Rule Major: Proactive, AcceptableGCInterval: %.3fs, TimeSinceLastGC: %.3fs, TimeUntilGC: %.3fs", acceptable_gc_interval, time_since_last_gc, time_until_gc); - if (time_until_gc > 0) { + return time_until_gc <= 0; +} + +static GCCause::Cause make_minor_gc_decision(const ZDirectorStats& stats) { + if (ZDriver::minor()->is_busy()) { + return GCCause::_no_gc; + } + + if (ZDriver::major()->is_busy() && !stats._old_stats._resize._is_active) { return GCCause::_no_gc; } - return GCCause::_z_proactive; + if (rule_minor_timer(stats)) { + return GCCause::_z_timer; + } + + if (rule_minor_allocation_rate(stats)) { + return GCCause::_z_allocation_rate; + } + + if (rule_minor_high_usage(stats)) { + return GCCause::_z_high_usage; + } + + return GCCause::_no_gc; } -static ZDriverRequest make_gc_decision() { - // List of rules - using ZDirectorRule = ZDriverRequest (*)(); - const ZDirectorRule rules[] = { - rule_allocation_stall, - rule_warmup, - rule_timer, - rule_allocation_rate, - rule_high_usage, - rule_proactive, +static GCCause::Cause make_major_gc_decision(const ZDirectorStats& stats) { + if (ZDriver::major()->is_busy()) { + return GCCause::_no_gc; + } + + if (rule_major_timer(stats)) { + return GCCause::_z_timer; + } + + if (rule_major_warmup(stats)) { + return GCCause::_z_warmup; + } + + if (rule_major_proactive(stats)) { + return GCCause::_z_proactive; + } + + return GCCause::_no_gc; +} + +static ZWorkerResizeStats sample_worker_resize_stats(ZStatCycleStats& cycle_stats, ZStatWorkersStats& worker_stats, ZWorkers* workers) { + ZLocker locker(workers->resizing_lock()); + + if (!workers->is_active()) { + // If the workers are not active, it isn't safe to read stats + // from the stat_cycle, so return early. + return { + false, // _is_active + 0.0, // _serial_gc_time_passed + 0.0, // _parallel_gc_time_passed + 0 // _nworkers_current + }; + } + + const double parallel_gc_duration_passed = worker_stats._accumulated_duration; + const double parallel_gc_time_passed = worker_stats._accumulated_time; + const double serial_gc_time_passed = cycle_stats._duration_since_start - parallel_gc_duration_passed; + const uint active_nworkers = workers->active_workers(); + + return { + true, // _is_active + serial_gc_time_passed, // _serial_gc_time_passed + parallel_gc_time_passed, // _parallel_gc_time_passed + active_nworkers // _nworkers_current }; +} - // Execute rules - for (size_t i = 0; i < ARRAY_SIZE(rules); i++) { - const ZDriverRequest request = rules[i](); - if (request.cause() != GCCause::_no_gc) { - return request; +// Output information for select_worker_threads +struct ZWorkerCounts { + uint _young_workers; + uint _old_workers; +}; + +enum class ZWorkerSelectionType { + start_major, + minor_during_old, + normal +}; + +static ZWorkerCounts select_worker_threads(const ZDirectorStats& stats, uint young_workers, ZWorkerSelectionType type) { + const uint active_young_workers = stats._young_stats._resize._nworkers_current; + const uint active_old_workers = stats._old_stats._resize._nworkers_current; + + if (ZHeap::heap()->is_alloc_stalling()) { + // Boost GC threads when stalling + return {ZYoungGCThreads, ZOldGCThreads}; + } else if (active_young_workers + active_old_workers > ConcGCThreads) { + // Threads are boosted, due to stalling recently; retain that boosting + return {active_young_workers, active_old_workers}; + } + + const double young_to_old_ratio = calculate_young_to_old_worker_ratio(stats); + uint old_workers = clamp(uint(young_workers * young_to_old_ratio), 1u, ZOldGCThreads); + + if (type != ZWorkerSelectionType::normal && old_workers + young_workers > ConcGCThreads) { + // We need to somehow clamp the GC threads so the two generations don't exceed ConcGCThreads + const double old_ratio = (young_to_old_ratio / (1.0 + young_to_old_ratio)); + const double young_ratio = 1.0 - old_ratio; + const uint young_workers_clamped = clamp(uint(ConcGCThreads * young_ratio), 1u, ZYoungGCThreads); + const uint old_workers_clamped = clamp(ConcGCThreads - young_workers_clamped, 1u, ZOldGCThreads); + + if (type == ZWorkerSelectionType::start_major) { + // Adjust down the old workers so the next minor during major will be less sad + old_workers = old_workers_clamped; + // Since collecting the old generation depends on the initial young collection + // finishing, we don't want it to have fewer workers than the old generation. + young_workers = MAX2(old_workers, young_workers); + } else if (type == ZWorkerSelectionType::minor_during_old) { + // Adjust young and old workers for minor during old to fit within ConcGCThreads + young_workers = young_workers_clamped; + old_workers = old_workers_clamped; } } - return GCCause::_no_gc; + return {young_workers, old_workers}; +} + +static void adjust_gc(const ZDirectorStats& stats) { + if (!UseDynamicNumberOfGCThreads) { + return; + } + + const ZWorkerResizeStats young_resize_stats = stats._young_stats._resize; + const ZWorkerResizeStats old_resize_stats = stats._old_stats._resize; + + if (!young_resize_stats._is_active) { + // Young generation collection is not running. We only resize the number + // of threads when the young generation is running. The number of threads + // for the old generation is modelled as a ratio of the number of threads + // needed in the young generation. If we don't need to GC the young generation + // at all, then we don't have anything to scale with, and the allocation + // pressure on the GC can't be that high. If it is, a minor collection will + // start, and inform us how to scale the old threads. + return; + } + + const ZDriverRequest request = rule_semi_hard_minor_allocation_rate_dynamic(stats, + young_resize_stats._serial_gc_time_passed, + young_resize_stats._parallel_gc_time_passed); + if (request.cause() == GCCause::_no_gc) { + // No urgency + return; + } + + uint desired_young_workers = MAX2(request.young_nworkers(), young_resize_stats._nworkers_current); + + if (desired_young_workers > young_resize_stats._nworkers_current) { + // We need to increase workers + const uint needed_young_increase = desired_young_workers - young_resize_stats._nworkers_current; + // We want to increase by more than the minimum amount to ensure that + // there are enough margins, but also to avoid too frequent resizing. + const uint desired_young_increase = needed_young_increase * 2; + desired_young_workers = MIN2(young_resize_stats._nworkers_current + desired_young_increase, ZYoungGCThreads); + } + + const uint young_current_workers = young_resize_stats._nworkers_current; + const uint old_current_workers = old_resize_stats._nworkers_current; + + const bool minor_during_old = old_resize_stats._is_active; + ZWorkerSelectionType type = minor_during_old ? ZWorkerSelectionType::minor_during_old + : ZWorkerSelectionType::normal; + + const ZWorkerCounts selection = select_worker_threads(stats, desired_young_workers, type); + + if (old_resize_stats._is_active && old_current_workers != selection._old_workers) { + ZGeneration::old()->workers()->request_resize_workers(selection._old_workers); + } + if (young_current_workers != selection._young_workers) { + ZGeneration::young()->workers()->request_resize_workers(selection._young_workers); + } +} + +static ZWorkerCounts initial_workers(const ZDirectorStats& stats, ZWorkerSelectionType type) { + if (!UseDynamicNumberOfGCThreads) { + return {ZYoungGCThreads, ZOldGCThreads}; + } + + const ZDriverRequest soft_request = rule_soft_minor_allocation_rate_dynamic(stats, 0.0 /* serial_gc_time_passed */, 0.0 /* parallel_gc_time_passed */); + const ZDriverRequest hard_request = rule_hard_minor_allocation_rate_dynamic(stats, 0.0 /* serial_gc_time_passed */, 0.0 /* parallel_gc_time_passed */); + const uint young_workers = MAX3(1u, soft_request.young_nworkers(), hard_request.young_nworkers()); + + return select_worker_threads(stats, young_workers, type); +} + +static void start_major_gc(const ZDirectorStats& stats, GCCause::Cause cause) { + const ZWorkerCounts selection = initial_workers(stats, ZWorkerSelectionType::start_major); + const ZDriverRequest request(cause, selection._young_workers, selection._old_workers); + ZDriver::major()->collect(request); +} + +static void start_minor_gc(const ZDirectorStats& stats, GCCause::Cause cause) { + const ZWorkerSelectionType type = ZDriver::major()->is_busy() + ? ZWorkerSelectionType::minor_during_old + : ZWorkerSelectionType::normal; + const ZWorkerCounts selection = initial_workers(stats, type); + + if (UseDynamicNumberOfGCThreads && ZDriver::major()->is_busy()) { + const ZWorkerResizeStats old_resize_stats = stats._old_stats._resize; + const uint old_current_workers = old_resize_stats._nworkers_current; + + if (old_current_workers != selection._old_workers) { + ZGeneration::old()->workers()->request_resize_workers(selection._old_workers); + } + } + + const ZDriverRequest request(cause, selection._young_workers, 0); + ZDriver::minor()->collect(request); +} + +static bool start_gc(const ZDirectorStats& stats) { + // Try start major collections first as they include a minor collection + const GCCause::Cause major_cause = make_major_gc_decision(stats); + if (major_cause != GCCause::_no_gc) { + start_major_gc(stats, major_cause); + return true; + } + + const GCCause::Cause minor_cause = make_minor_gc_decision(stats); + if (minor_cause != GCCause::_no_gc) { + if (!ZDriver::major()->is_busy() && rule_major_allocation_rate(stats)) { + // Merge minor GC into major GC + start_major_gc(stats, GCCause::_z_allocation_rate); + } else { + start_minor_gc(stats, minor_cause); + } + + return true; + } + + return false; +} + +void ZDirector::evaluate_rules() { + ZLocker locker(&_director->_monitor); + _director->_monitor.notify(); +} + +bool ZDirector::wait_for_tick() { + const uint64_t interval_ms = MILLIUNITS / decision_hz; + + ZLocker locker(&_monitor); + + if (_stopped) { + // Stopped + return false; + } + + // Wait + _monitor.wait(interval_ms); + return true; +} + +static ZDirectorHeapStats sample_heap_stats() { + const ZHeap* const heap = ZHeap::heap(); + const ZCollectedHeap* const collected_heap = ZCollectedHeap::heap(); + return { + heap->soft_max_capacity(), + heap->used(), + collected_heap->total_collections() + }; +} + +// This function samples all the stat values used by the heuristics to compute what to do. +// This is where synchronization code goes to ensure that the values we read are valid. +static ZDirectorStats sample_stats() { + ZGenerationYoung* young = ZGeneration::young(); + ZGenerationOld* old = ZGeneration::old(); + const ZStatMutatorAllocRateStats mutator_alloc_rate = ZStatMutatorAllocRate::stats(); + const ZDirectorHeapStats heap = sample_heap_stats(); + + ZStatCycleStats young_cycle = young->stat_cycle()->stats(); + ZStatCycleStats old_cycle = old->stat_cycle()->stats(); + + ZStatWorkersStats young_workers = young->stat_workers()->stats(); + ZStatWorkersStats old_workers = old->stat_workers()->stats(); + + ZWorkerResizeStats young_resize = sample_worker_resize_stats(young_cycle, young_workers, young->workers()); + ZWorkerResizeStats old_resize = sample_worker_resize_stats(old_cycle, old_workers, old->workers()); + + ZStatHeapStats young_stat_heap = young->stat_heap()->stats(); + ZStatHeapStats old_stat_heap = old->stat_heap()->stats(); + + ZDirectorGenerationGeneralStats young_generation = { ZHeap::heap()->used_young(), 0 }; + ZDirectorGenerationGeneralStats old_generation = { ZHeap::heap()->used_old(), old->total_collections_at_start() }; + + return { + mutator_alloc_rate, + heap, + { + young_cycle, + young_workers, + young_resize, + young_stat_heap, + young_generation + }, + { + old_cycle, + old_workers, + old_resize, + old_stat_heap, + old_generation + } + }; } -void ZDirector::run_service() { +void ZDirector::run_thread() { // Main loop - while (_metronome.wait_for_tick()) { - sample_allocation_rate(); - if (!_driver->is_busy()) { - const ZDriverRequest request = make_gc_decision(); - if (request.cause() != GCCause::_no_gc) { - _driver->collect(request); - } + while (wait_for_tick()) { + ZDirectorStats stats = sample_stats(); + if (!start_gc(stats)) { + adjust_gc(stats); } } } -void ZDirector::stop_service() { - _metronome.stop(); +void ZDirector::terminate() { + ZLocker locker(&_monitor); + _stopped = true; + _monitor.notify(); } diff --git a/src/hotspot/share/gc/z/zDirector.hpp b/src/hotspot/share/gc/z/zDirector.hpp index a46c4ba770211..73c556a2fbd46 100644 --- a/src/hotspot/share/gc/z/zDirector.hpp +++ b/src/hotspot/share/gc/z/zDirector.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,22 +24,27 @@ #ifndef SHARE_GC_Z_ZDIRECTOR_HPP #define SHARE_GC_Z_ZDIRECTOR_HPP -#include "gc/shared/concurrentGCThread.hpp" -#include "gc/z/zMetronome.hpp" +#include "gc/z/zLock.hpp" +#include "gc/z/zThread.hpp" -class ZDriver; - -class ZDirector : public ConcurrentGCThread { +class ZDirector : public ZThread { private: - ZDriver* const _driver; - ZMetronome _metronome; + static const uint64_t decision_hz = 100; + static ZDirector* _director; + + ZConditionLock _monitor; + bool _stopped; + + bool wait_for_tick(); protected: - virtual void run_service(); - virtual void stop_service(); + virtual void run_thread(); + virtual void terminate(); public: - ZDirector(ZDriver* driver); + ZDirector(); + + static void evaluate_rules(); }; #endif // SHARE_GC_Z_ZDIRECTOR_HPP diff --git a/src/hotspot/share/gc/z/zDriver.cpp b/src/hotspot/share/gc/z/zDriver.cpp index 4388febe2809b..f8ae47399e506 100644 --- a/src/hotspot/share/gc/z/zDriver.cpp +++ b/src/hotspot/share/gc/z/zDriver.cpp @@ -22,216 +22,319 @@ */ #include "precompiled.hpp" +#include "gc/shared/gc_globals.hpp" +#include "gc/shared/gcCause.hpp" #include "gc/shared/gcId.hpp" -#include "gc/shared/gcLocker.hpp" -#include "gc/shared/gcVMOperations.hpp" -#include "gc/shared/isGCActiveMark.hpp" #include "gc/z/zAbort.inline.hpp" #include "gc/z/zBreakpoint.hpp" #include "gc/z/zCollectedHeap.hpp" +#include "gc/z/zDirector.hpp" #include "gc/z/zDriver.hpp" +#include "gc/z/zGCIdPrinter.hpp" +#include "gc/z/zGeneration.inline.hpp" #include "gc/z/zHeap.inline.hpp" -#include "gc/z/zMessagePort.inline.hpp" +#include "gc/z/zLock.inline.hpp" #include "gc/z/zServiceability.hpp" #include "gc/z/zStat.hpp" -#include "gc/z/zVerify.hpp" -#include "logging/log.hpp" -#include "memory/universe.hpp" -#include "runtime/threads.hpp" -#include "runtime/vmOperations.hpp" -#include "runtime/vmThread.hpp" -static const ZStatPhaseCycle ZPhaseCycle("Garbage Collection Cycle"); -static const ZStatPhasePause ZPhasePauseMarkStart("Pause Mark Start"); -static const ZStatPhaseConcurrent ZPhaseConcurrentMark("Concurrent Mark"); -static const ZStatPhaseConcurrent ZPhaseConcurrentMarkContinue("Concurrent Mark Continue"); -static const ZStatPhaseConcurrent ZPhaseConcurrentMarkFree("Concurrent Mark Free"); -static const ZStatPhasePause ZPhasePauseMarkEnd("Pause Mark End"); -static const ZStatPhaseConcurrent ZPhaseConcurrentProcessNonStrongReferences("Concurrent Process Non-Strong References"); -static const ZStatPhaseConcurrent ZPhaseConcurrentResetRelocationSet("Concurrent Reset Relocation Set"); -static const ZStatPhaseConcurrent ZPhaseConcurrentSelectRelocationSet("Concurrent Select Relocation Set"); -static const ZStatPhasePause ZPhasePauseRelocateStart("Pause Relocate Start"); -static const ZStatPhaseConcurrent ZPhaseConcurrentRelocated("Concurrent Relocate"); -static const ZStatCriticalPhase ZCriticalPhaseGCLockerStall("GC Locker Stall", false /* verbose */); -static const ZStatSampler ZSamplerJavaThreads("System", "Java Threads", ZStatUnitThreads); +static const ZStatPhaseCollection ZPhaseCollectionMinor("Minor Collection", true /* minor */); +static const ZStatPhaseCollection ZPhaseCollectionMajor("Major Collection", false /* minor */); -ZDriverRequest::ZDriverRequest() : - ZDriverRequest(GCCause::_no_gc) {} +template +class ZGCCauseSetter : public GCCauseSetter { +private: + DriverT* _driver; -ZDriverRequest::ZDriverRequest(GCCause::Cause cause) : - ZDriverRequest(cause, ConcGCThreads) {} +public: + ZGCCauseSetter(DriverT* driver, GCCause::Cause cause) : + GCCauseSetter(ZCollectedHeap::heap(), cause), + _driver(driver) { + _driver->set_gc_cause(cause); + } -ZDriverRequest::ZDriverRequest(GCCause::Cause cause, uint nworkers) : - _cause(cause), - _nworkers(nworkers) {} + ~ZGCCauseSetter() { + _driver->set_gc_cause(GCCause::_no_gc); + } +}; + +ZLock* ZDriver::_lock; +ZDriverMinor* ZDriver::_minor; +ZDriverMajor* ZDriver::_major; -bool ZDriverRequest::operator==(const ZDriverRequest& other) const { - return _cause == other._cause; +void ZDriver::initialize() { + _lock = new ZLock(); } -GCCause::Cause ZDriverRequest::cause() const { - return _cause; +void ZDriver::lock() { + _lock->lock(); } -uint ZDriverRequest::nworkers() const { - return _nworkers; +void ZDriver::unlock() { + _lock->unlock(); } -class VM_ZOperation : public VM_Operation { -private: - const uint _gc_id; - bool _gc_locked; - bool _success; +void ZDriver::set_minor(ZDriverMinor* minor) { + _minor = minor; +} -public: - VM_ZOperation() : - _gc_id(GCId::current()), - _gc_locked(false), - _success(false) {} - - virtual bool needs_inactive_gc_locker() const { - // An inactive GC locker is needed in operations where we change the bad - // mask or move objects. Changing the bad mask will invalidate all oops, - // which makes it conceptually the same thing as moving all objects. - return false; - } +void ZDriver::set_major(ZDriverMajor* major) { + _major = major; +} - virtual bool skip_thread_oop_barriers() const { - return true; - } +ZDriverMinor* ZDriver::minor() { + return _minor; +} - virtual bool do_operation() = 0; +ZDriverMajor* ZDriver::major() { + return _major; +} - virtual bool doit_prologue() { - Heap_lock->lock(); - return true; - } +ZDriverLocker::ZDriverLocker() { + ZDriver::lock(); +} - virtual void doit() { - // Abort if GC locker state is incompatible - if (needs_inactive_gc_locker() && GCLocker::check_active_before_gc()) { - _gc_locked = true; - return; - } +ZDriverLocker::~ZDriverLocker() { + ZDriver::unlock(); +} - // Setup GC id and active marker - GCIdMark gc_id_mark(_gc_id); - IsGCActiveMark gc_active_mark; +ZDriverUnlocker::ZDriverUnlocker() { + ZDriver::unlock(); +} - // Verify before operation - ZVerify::before_zoperation(); +ZDriverUnlocker::~ZDriverUnlocker() { + ZDriver::lock(); +} - // Execute operation - _success = do_operation(); +ZDriver::ZDriver() : + _gc_cause(GCCause::_no_gc) { +} - // Update statistics - ZStatSample(ZSamplerJavaThreads, Threads::number_of_threads()); - } +void ZDriver::set_gc_cause(GCCause::Cause cause) { + _gc_cause = cause; +} - virtual void doit_epilogue() { - Heap_lock->unlock(); - } +GCCause::Cause ZDriver::gc_cause() { + return _gc_cause; +} - bool gc_locked() const { - return _gc_locked; - } +ZDriverMinor::ZDriverMinor() : + ZDriver(), + _port(), + _gc_timer(), + _jfr_tracer(), + _used_at_start() { + ZDriver::set_minor(this); + set_name("ZDriverMinor"); + create_and_start(); +} + +bool ZDriverMinor::is_busy() const { + return _port.is_busy(); +} - bool success() const { - return _success; +void ZDriverMinor::collect(const ZDriverRequest& request) { + switch (request.cause()) { + case GCCause::_wb_young_gc: + // Start synchronous GC + _port.send_sync(request); + break; + + case GCCause::_scavenge_alot: + case GCCause::_z_timer: + case GCCause::_z_allocation_rate: + case GCCause::_z_allocation_stall: + case GCCause::_z_high_usage: + // Start asynchronous GC + _port.send_async(request); + break; + + default: + fatal("Unsupported GC cause (%s)", GCCause::to_string(request.cause())); + break; } }; -class VM_ZMarkStart : public VM_ZOperation { -public: - virtual VMOp_Type type() const { - return VMOp_ZMarkStart; - } +GCTracer* ZDriverMinor::jfr_tracer() { + return &_jfr_tracer; +} - virtual bool needs_inactive_gc_locker() const { - return true; - } +void ZDriverMinor::set_used_at_start(size_t used) { + _used_at_start = used; +} - virtual bool do_operation() { - ZStatTimer timer(ZPhasePauseMarkStart); - ZServiceabilityPauseTracer tracer; +size_t ZDriverMinor::used_at_start() const { + return _used_at_start; +} - ZCollectedHeap::heap()->increment_total_collections(true /* full */); +class ZDriverScopeMinor : public StackObj { +private: + GCIdMark _gc_id; + GCCause::Cause _gc_cause; + ZGCCauseSetter _gc_cause_setter; + ZStatTimer _stat_timer; + ZServiceabilityCycleTracer _tracer; - ZHeap::heap()->mark_start(); - return true; +public: + ZDriverScopeMinor(const ZDriverRequest& request, ConcurrentGCTimer* gc_timer) : + _gc_id(), + _gc_cause(request.cause()), + _gc_cause_setter(ZDriver::minor(), _gc_cause), + _stat_timer(ZPhaseCollectionMinor, gc_timer), + _tracer(true /* minor */) { + // Select number of worker threads to use + ZGeneration::young()->set_active_workers(request.young_nworkers()); } }; -class VM_ZMarkEnd : public VM_ZOperation { -public: - virtual VMOp_Type type() const { - return VMOp_ZMarkEnd; - } +void ZDriverMinor::gc(const ZDriverRequest& request) { + ZDriverScopeMinor scope(request, &_gc_timer); + ZGCIdMinor minor_id(gc_id()); + ZGeneration::young()->collect(ZYoungType::minor, &_gc_timer); +} - virtual bool do_operation() { - ZStatTimer timer(ZPhasePauseMarkEnd); - ZServiceabilityPauseTracer tracer; - return ZHeap::heap()->mark_end(); - } -}; +static void handle_alloc_stalling_for_young() { + ZHeap::heap()->handle_alloc_stalling_for_young(); +} -class VM_ZRelocateStart : public VM_ZOperation { -public: - virtual VMOp_Type type() const { - return VMOp_ZRelocateStart; +void ZDriverMinor::handle_alloc_stalls() const { + handle_alloc_stalling_for_young(); +} + +void ZDriverMinor::run_thread() { + // Main loop + for (;;) { + // Wait for GC request + const ZDriverRequest request = _port.receive(); + + ZDriverLocker locker; + + abortpoint(); + + // Run GC + gc(request); + + abortpoint(); + + // Notify GC completed + _port.ack(); + + // Handle allocation stalls + handle_alloc_stalls(); + + // Good point to consider back-to-back GC + ZDirector::evaluate_rules(); } +} + +void ZDriverMinor::terminate() { + const ZDriverRequest request(GCCause::_no_gc, 0, 0); + _port.send_async(request); +} - virtual bool needs_inactive_gc_locker() const { +static bool should_clear_soft_references(GCCause::Cause cause) { + // Clear soft references if implied by the GC cause + switch (cause) { + case GCCause::_wb_full_gc: + case GCCause::_metadata_GC_clear_soft_refs: + case GCCause::_z_allocation_stall: return true; + + case GCCause::_heap_dump: + case GCCause::_heap_inspection: + case GCCause::_wb_breakpoint: + case GCCause::_dcmd_gc_run: + case GCCause::_java_lang_system_gc: + case GCCause::_full_gc_alot: + case GCCause::_jvmti_force_gc: + case GCCause::_z_timer: + case GCCause::_z_warmup: + case GCCause::_z_allocation_rate: + case GCCause::_z_proactive: + case GCCause::_metadata_GC_threshold: + case GCCause::_codecache_GC_threshold: + case GCCause::_codecache_GC_aggressive: + break; + + default: + fatal("Unsupported GC cause (%s)", GCCause::to_string(cause)); + break; } - virtual bool do_operation() { - ZStatTimer timer(ZPhasePauseRelocateStart); - ZServiceabilityPauseTracer tracer; - ZHeap::heap()->relocate_start(); + // Clear soft references if threads are stalled waiting for an old collection + if (ZHeap::heap()->is_alloc_stalling_for_old()) { return true; } -}; -class VM_ZVerify : public VM_Operation { -public: - virtual VMOp_Type type() const { - return VMOp_ZVerify; - } + // Don't clear + return false; +} - virtual bool skip_thread_oop_barriers() const { +static bool should_preclean_young(GCCause::Cause cause) { + // Preclean young if implied by the GC cause + switch (cause) { + case GCCause::_heap_dump: + case GCCause::_heap_inspection: + case GCCause::_wb_full_gc: + case GCCause::_wb_breakpoint: + case GCCause::_dcmd_gc_run: + case GCCause::_java_lang_system_gc: + case GCCause::_full_gc_alot: + case GCCause::_jvmti_force_gc: + case GCCause::_metadata_GC_clear_soft_refs: + case GCCause::_z_allocation_stall: return true; + + case GCCause::_z_timer: + case GCCause::_z_warmup: + case GCCause::_z_allocation_rate: + case GCCause::_z_proactive: + case GCCause::_metadata_GC_threshold: + case GCCause::_codecache_GC_threshold: + case GCCause::_codecache_GC_aggressive: + break; + + default: + fatal("Unsupported GC cause (%s)", GCCause::to_string(cause)); + break; } - virtual void doit() { - ZVerify::after_weak_processing(); + // Preclean young if threads are stalled waiting for an old collection + if (ZHeap::heap()->is_alloc_stalling_for_old()) { + return true; } -}; -ZDriver::ZDriver() : - _gc_cycle_port(), - _gc_locker_port() { - set_name("ZDriver"); + // Preclean young if implied by configuration + return ScavengeBeforeFullGC; +} + +ZDriverMajor::ZDriverMajor() : + ZDriver(), + _port(), + _gc_timer(), + _jfr_tracer(), + _used_at_start() { + ZDriver::set_major(this); + set_name("ZDriverMajor"); create_and_start(); } -bool ZDriver::is_busy() const { - return _gc_cycle_port.is_busy(); +bool ZDriverMajor::is_busy() const { + return _port.is_busy(); } -void ZDriver::collect(const ZDriverRequest& request) { +void ZDriverMajor::collect(const ZDriverRequest& request) { switch (request.cause()) { - case GCCause::_wb_young_gc: + case GCCause::_heap_dump: + case GCCause::_heap_inspection: case GCCause::_wb_full_gc: case GCCause::_dcmd_gc_run: case GCCause::_java_lang_system_gc: case GCCause::_full_gc_alot: - case GCCause::_scavenge_alot: case GCCause::_jvmti_force_gc: case GCCause::_metadata_GC_clear_soft_refs: case GCCause::_codecache_GC_aggressive: // Start synchronous GC - _gc_cycle_port.send_sync(request); + _port.send_sync(request); break; case GCCause::_z_timer: @@ -239,273 +342,142 @@ void ZDriver::collect(const ZDriverRequest& request) { case GCCause::_z_allocation_rate: case GCCause::_z_allocation_stall: case GCCause::_z_proactive: - case GCCause::_z_high_usage: case GCCause::_codecache_GC_threshold: case GCCause::_metadata_GC_threshold: // Start asynchronous GC - _gc_cycle_port.send_async(request); - break; - - case GCCause::_gc_locker: - // Restart VM operation previously blocked by the GC locker - _gc_locker_port.signal(); + _port.send_async(request); break; case GCCause::_wb_breakpoint: ZBreakpoint::start_gc(); - _gc_cycle_port.send_async(request); + _port.send_async(request); break; default: - // Other causes not supported fatal("Unsupported GC cause (%s)", GCCause::to_string(request.cause())); break; } } -template -bool ZDriver::pause() { - for (;;) { - T op; - VMThread::execute(&op); - if (op.gc_locked()) { - // Wait for GC to become unlocked and restart the VM operation - ZStatTimer timer(ZCriticalPhaseGCLockerStall); - _gc_locker_port.wait(); - continue; - } - - // Notify VM operation completed - _gc_locker_port.ack(); - - return op.success(); - } -} - -void ZDriver::pause_mark_start() { - pause(); +GCTracer* ZDriverMajor::jfr_tracer() { + return &_jfr_tracer; } -void ZDriver::concurrent_mark() { - ZStatTimer timer(ZPhaseConcurrentMark); - ZBreakpoint::at_after_marking_started(); - ZHeap::heap()->mark(true /* initial */); - ZBreakpoint::at_before_marking_completed(); +void ZDriverMajor::set_used_at_start(size_t used) { + _used_at_start = used; } -bool ZDriver::pause_mark_end() { - return pause(); +size_t ZDriverMajor::used_at_start() const { + return _used_at_start; } -void ZDriver::concurrent_mark_continue() { - ZStatTimer timer(ZPhaseConcurrentMarkContinue); - ZHeap::heap()->mark(false /* initial */); -} - -void ZDriver::concurrent_mark_free() { - ZStatTimer timer(ZPhaseConcurrentMarkFree); - ZHeap::heap()->mark_free(); -} - -void ZDriver::concurrent_process_non_strong_references() { - ZStatTimer timer(ZPhaseConcurrentProcessNonStrongReferences); - ZBreakpoint::at_after_reference_processing_started(); - ZHeap::heap()->process_non_strong_references(); -} - -void ZDriver::concurrent_reset_relocation_set() { - ZStatTimer timer(ZPhaseConcurrentResetRelocationSet); - ZHeap::heap()->reset_relocation_set(); -} - -void ZDriver::pause_verify() { - if (ZVerifyRoots || ZVerifyObjects) { - VM_ZVerify op; - VMThread::execute(&op); - } -} - -void ZDriver::concurrent_select_relocation_set() { - ZStatTimer timer(ZPhaseConcurrentSelectRelocationSet); - ZHeap::heap()->select_relocation_set(); -} - -void ZDriver::pause_relocate_start() { - pause(); -} - -void ZDriver::concurrent_relocate() { - ZStatTimer timer(ZPhaseConcurrentRelocated); - ZHeap::heap()->relocate(); -} - -void ZDriver::check_out_of_memory() { - ZHeap::heap()->check_out_of_memory(); -} - -static bool should_clear_soft_references(const ZDriverRequest& request) { - // Clear soft references if implied by the GC cause - if (request.cause() == GCCause::_wb_full_gc || - request.cause() == GCCause::_metadata_GC_clear_soft_refs || - request.cause() == GCCause::_z_allocation_stall) { - // Clear - return true; - } - - // Don't clear - return false; -} - -static uint select_active_worker_threads_dynamic(const ZDriverRequest& request) { - // Use requested number of worker threads - return request.nworkers(); -} - -static uint select_active_worker_threads_static(const ZDriverRequest& request) { - const GCCause::Cause cause = request.cause(); - const uint nworkers = request.nworkers(); - - // Boost number of worker threads if implied by the GC cause - if (cause == GCCause::_wb_full_gc || - cause == GCCause::_java_lang_system_gc || - cause == GCCause::_metadata_GC_clear_soft_refs || - cause == GCCause::_z_allocation_stall) { - // Boost - const uint boosted_nworkers = MAX2(nworkers, ParallelGCThreads); - return boosted_nworkers; - } - - // Use requested number of worker threads - return nworkers; -} - -static uint select_active_worker_threads(const ZDriverRequest& request) { - if (UseDynamicNumberOfGCThreads) { - return select_active_worker_threads_dynamic(request); - } else { - return select_active_worker_threads_static(request); - } -} - -class ZDriverGCScope : public StackObj { +class ZDriverScopeMajor : public StackObj { private: - GCIdMark _gc_id; - GCCause::Cause _gc_cause; - GCCauseSetter _gc_cause_setter; - ZStatTimer _timer; - ZServiceabilityCycleTracer _tracer; + GCIdMark _gc_id; + GCCause::Cause _gc_cause; + ZGCCauseSetter _gc_cause_setter; + ZStatTimer _stat_timer; + ZServiceabilityCycleTracer _tracer; public: - ZDriverGCScope(const ZDriverRequest& request) : + ZDriverScopeMajor(const ZDriverRequest& request, ConcurrentGCTimer* gc_timer) : _gc_id(), _gc_cause(request.cause()), - _gc_cause_setter(ZCollectedHeap::heap(), _gc_cause), - _timer(ZPhaseCycle), - _tracer() { - // Update statistics - ZStatCycle::at_start(); - + _gc_cause_setter(ZDriver::major(), _gc_cause), + _stat_timer(ZPhaseCollectionMajor, gc_timer), + _tracer(false /* minor */) { // Set up soft reference policy - const bool clear = should_clear_soft_references(request); - ZHeap::heap()->set_soft_reference_policy(clear); + const bool clear = should_clear_soft_references(request.cause()); + ZGeneration::old()->set_soft_reference_policy(clear); // Select number of worker threads to use - const uint nworkers = select_active_worker_threads(request); - ZHeap::heap()->set_active_workers(nworkers); + ZGeneration::young()->set_active_workers(request.young_nworkers()); + ZGeneration::old()->set_active_workers(request.old_nworkers()); } - ~ZDriverGCScope() { - // Update statistics - ZStatCycle::at_end(_gc_cause, ZHeap::heap()->active_workers()); - + ~ZDriverScopeMajor() { // Update data used by soft reference policy - Universe::heap()->update_capacity_and_used_at_gc(); + ZCollectedHeap::heap()->update_capacity_and_used_at_gc(); // Signal that we have completed a visit to all live objects - Universe::heap()->record_whole_heap_examined_timestamp(); + ZCollectedHeap::heap()->record_whole_heap_examined_timestamp(); } }; -// Macro to execute a termination check after a concurrent phase. Note -// that it's important that the termination check comes after the call -// to the function f, since we can't abort between pause_relocate_start() -// and concurrent_relocate(). We need to let concurrent_relocate() call -// abort_page() on the remaining entries in the relocation set. -#define concurrent(f) \ - do { \ - concurrent_##f(); \ - if (should_terminate()) { \ - return; \ - } \ - } while (false) - -void ZDriver::gc(const ZDriverRequest& request) { - ZDriverGCScope scope(request); - - // Phase 1: Pause Mark Start - pause_mark_start(); - - // Phase 2: Concurrent Mark - concurrent(mark); - - // Phase 3: Pause Mark End - while (!pause_mark_end()) { - // Phase 3.5: Concurrent Mark Continue - concurrent(mark_continue); +void ZDriverMajor::collect_young(const ZDriverRequest& request) { + ZGCIdMajor major_id(gc_id(), 'Y'); + if (should_preclean_young(request.cause())) { + // Collect young generation and promote everything to old generation + ZGeneration::young()->collect(ZYoungType::major_full_preclean, &_gc_timer); + + abortpoint(); + + // Collect young generation and gather roots pointing into old generation + ZGeneration::young()->collect(ZYoungType::major_full_roots, &_gc_timer); + } else { + // Collect young generation and gather roots pointing into old generation + ZGeneration::young()->collect(ZYoungType::major_partial_roots, &_gc_timer); } - // Phase 4: Concurrent Mark Free - concurrent(mark_free); + abortpoint(); + + // Handle allocations waiting for a young collection + handle_alloc_stalling_for_young(); +} + +void ZDriverMajor::collect_old() { + ZGCIdMajor major_id(gc_id(), 'O'); + ZGeneration::old()->collect(&_gc_timer); +} - // Phase 5: Concurrent Process Non-Strong References - concurrent(process_non_strong_references); +void ZDriverMajor::gc(const ZDriverRequest& request) { + ZDriverScopeMajor scope(request, &_gc_timer); - // Phase 6: Concurrent Reset Relocation Set - concurrent(reset_relocation_set); + // Collect the young generation + collect_young(request); - // Phase 7: Pause Verify - pause_verify(); + abortpoint(); - // Phase 8: Concurrent Select Relocation Set - concurrent(select_relocation_set); + // Collect the old generation + collect_old(); +} - // Phase 9: Pause Relocate Start - pause_relocate_start(); +static void handle_alloc_stalling_for_old() { + ZHeap::heap()->handle_alloc_stalling_for_old(); +} - // Phase 10: Concurrent Relocate - concurrent(relocate); +void ZDriverMajor::handle_alloc_stalls() const { + handle_alloc_stalling_for_old(); } -void ZDriver::run_service() { +void ZDriverMajor::run_thread() { // Main loop - while (!should_terminate()) { + for (;;) { // Wait for GC request - const ZDriverRequest request = _gc_cycle_port.receive(); - if (request.cause() == GCCause::_no_gc) { - continue; - } + const ZDriverRequest request = _port.receive(); + + ZDriverLocker locker; ZBreakpoint::at_before_gc(); + abortpoint(); + // Run GC gc(request); - if (should_terminate()) { - // Abort - break; - } + abortpoint(); // Notify GC completed - _gc_cycle_port.ack(); + _port.ack(); - // Check for out of memory condition - check_out_of_memory(); + // Handle allocation stalls + handle_alloc_stalls(); ZBreakpoint::at_after_gc(); } } -void ZDriver::stop_service() { - ZAbort::abort(); - _gc_cycle_port.send_async(GCCause::_no_gc); +void ZDriverMajor::terminate() { + const ZDriverRequest request(GCCause::_no_gc, 0, 0); + _port.send_async(request); } diff --git a/src/hotspot/share/gc/z/zDriver.hpp b/src/hotspot/share/gc/z/zDriver.hpp index 08b1b80f2aa35..5f1fe08a0b6ae 100644 --- a/src/hotspot/share/gc/z/zDriver.hpp +++ b/src/hotspot/share/gc/z/zDriver.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,61 +24,123 @@ #ifndef SHARE_GC_Z_ZDRIVER_HPP #define SHARE_GC_Z_ZDRIVER_HPP -#include "gc/shared/concurrentGCThread.hpp" -#include "gc/shared/gcCause.hpp" -#include "gc/z/zMessagePort.hpp" +#include "gc/shared/gcTimer.hpp" +#include "gc/z/zDriverPort.hpp" +#include "gc/z/zThread.hpp" +#include "gc/z/zTracer.hpp" + +// glibc which may get brought in via +// defines the macros minor and major. These keywords are central to +// the GC algorithm. These macros are undefined here so the code may +// use minor and major. +#ifdef minor +#undef minor +#endif +#ifdef major +#undef major +#endif class VM_ZOperation; +class ZDriverMinor; +class ZDriverMajor; +class ZLock; + +class ZDriver : public ZThread { + friend class ZDriverLocker; + friend class ZDriverUnlocker; -class ZDriverRequest { private: - GCCause::Cause _cause; - uint _nworkers; + static ZLock* _lock; + static ZDriverMinor* _minor; + static ZDriverMajor* _major; + + GCCause::Cause _gc_cause; + + static void lock(); + static void unlock(); public: - ZDriverRequest(); - ZDriverRequest(GCCause::Cause cause); - ZDriverRequest(GCCause::Cause cause, uint nworkers); + static void initialize(); + + static void set_minor(ZDriverMinor* minor); + static void set_major(ZDriverMajor* major); - bool operator==(const ZDriverRequest& other) const; + static ZDriverMinor* minor(); + static ZDriverMajor* major(); - GCCause::Cause cause() const; - uint nworkers() const; + ZDriver(); + + void set_gc_cause(GCCause::Cause cause); + GCCause::Cause gc_cause(); }; -class ZDriver : public ConcurrentGCThread { +class ZDriverMinor : public ZDriver { private: - ZMessagePort _gc_cycle_port; - ZRendezvousPort _gc_locker_port; + ZDriverPort _port; + ConcurrentGCTimer _gc_timer; + ZMinorTracer _jfr_tracer; + size_t _used_at_start; + + void gc(const ZDriverRequest& request); + void handle_alloc_stalls() const; + +protected: + virtual void run_thread(); + virtual void terminate(); + +public: + ZDriverMinor(); + + bool is_busy() const; - template bool pause(); + void collect(const ZDriverRequest& request); + + GCTracer* jfr_tracer(); - void pause_mark_start(); - void concurrent_mark(); - bool pause_mark_end(); - void concurrent_mark_continue(); - void concurrent_mark_free(); - void concurrent_process_non_strong_references(); - void concurrent_reset_relocation_set(); - void pause_verify(); - void concurrent_select_relocation_set(); - void pause_relocate_start(); - void concurrent_relocate(); + void set_used_at_start(size_t used); + size_t used_at_start() const; +}; - void check_out_of_memory(); +class ZDriverMajor : public ZDriver { +private: + ZDriverPort _port; + ConcurrentGCTimer _gc_timer; + ZMajorTracer _jfr_tracer; + size_t _used_at_start; + void collect_young(const ZDriverRequest& request); + + void collect_old(); void gc(const ZDriverRequest& request); + void handle_alloc_stalls() const; protected: - virtual void run_service(); - virtual void stop_service(); + virtual void run_thread(); + virtual void terminate(); public: - ZDriver(); + ZDriverMajor(); bool is_busy() const; void collect(const ZDriverRequest& request); + + GCTracer* jfr_tracer(); + + void set_used_at_start(size_t used); + size_t used_at_start() const; +}; + +class ZDriverLocker : public StackObj { +public: + ZDriverLocker(); + ~ZDriverLocker(); +}; + +class ZDriverUnlocker : public StackObj { +public: + ZDriverUnlocker(); + ~ZDriverUnlocker(); }; #endif // SHARE_GC_Z_ZDRIVER_HPP diff --git a/src/hotspot/share/gc/z/zDriverPort.cpp b/src/hotspot/share/gc/z/zDriverPort.cpp new file mode 100644 index 0000000000000..1dd6c902a8f4a --- /dev/null +++ b/src/hotspot/share/gc/z/zDriverPort.cpp @@ -0,0 +1,185 @@ +/* + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/z/zDriverPort.hpp" +#include "gc/z/zFuture.inline.hpp" +#include "gc/z/zList.inline.hpp" +#include "gc/z/zLock.inline.hpp" +#include "utilities/debug.hpp" + +ZDriverRequest::ZDriverRequest() : + ZDriverRequest(GCCause::_no_gc, 0, 0) {} + +ZDriverRequest::ZDriverRequest(GCCause::Cause cause, uint young_nworkers, uint old_nworkers) : + _cause(cause), + _young_nworkers(young_nworkers), + _old_nworkers(old_nworkers) {} + +bool ZDriverRequest::operator==(const ZDriverRequest& other) const { + return _cause == other._cause; +} + +GCCause::Cause ZDriverRequest::cause() const { + return _cause; +} + +uint ZDriverRequest::young_nworkers() const { + return _young_nworkers; +} + +uint ZDriverRequest::old_nworkers() const { + return _old_nworkers; +} + +class ZDriverPortEntry { + friend class ZList; + +private: + const ZDriverRequest _message; + uint64_t _seqnum; + ZFuture _result; + ZListNode _node; + +public: + ZDriverPortEntry(const ZDriverRequest& message) : + _message(message), + _seqnum(0) {} + + void set_seqnum(uint64_t seqnum) { + _seqnum = seqnum; + } + + uint64_t seqnum() const { + return _seqnum; + } + + ZDriverRequest message() const { + return _message; + } + + void wait() { + const ZDriverRequest message = _result.get(); + assert(message == _message, "Message mismatch"); + } + + void satisfy(const ZDriverRequest& message) { + _result.set(message); + } +}; + +ZDriverPort::ZDriverPort() : + _lock(), + _has_message(false), + _seqnum(0), + _queue() {} + +bool ZDriverPort::is_busy() const { + ZLocker locker(&_lock); + return _has_message; +} + +void ZDriverPort::send_sync(const ZDriverRequest& message) { + ZDriverPortEntry entry(message); + + { + // Enqueue message + ZLocker locker(&_lock); + entry.set_seqnum(_seqnum); + _queue.insert_last(&entry); + _lock.notify(); + } + + // Wait for completion + entry.wait(); + + { + // Guard deletion of underlying semaphore. This is a workaround for a + // bug in sem_post() in glibc < 2.21, where it's not safe to destroy + // the semaphore immediately after returning from sem_wait(). The + // reason is that sem_post() can touch the semaphore after a waiting + // thread have returned from sem_wait(). To avoid this race we are + // forcing the waiting thread to acquire/release the lock held by the + // posting thread. https://sourceware.org/bugzilla/show_bug.cgi?id=12674 + ZLocker locker(&_lock); + } +} + +void ZDriverPort::send_async(const ZDriverRequest& message) { + ZLocker locker(&_lock); + if (!_has_message) { + // Post message + _message = message; + _has_message = true; + _lock.notify(); + } +} + +ZDriverRequest ZDriverPort::receive() { + ZLocker locker(&_lock); + + // Wait for message + while (!_has_message && _queue.is_empty()) { + _lock.wait(); + } + + // Increment request sequence number + _seqnum++; + + if (!_has_message) { + // Message available in the queue + _message = _queue.first()->message(); + _has_message = true; + } + + return _message; +} + +void ZDriverPort::ack() { + ZLocker locker(&_lock); + + if (!_has_message) { + // Nothing to ack + return; + } + + // Satisfy requests (and duplicates) in queue + ZListIterator iter(&_queue); + for (ZDriverPortEntry* entry; iter.next(&entry);) { + if (entry->message() == _message && entry->seqnum() < _seqnum) { + // Dequeue and satisfy request. Note that the dequeue operation must + // happen first, since the request will immediately be deallocated + // once it has been satisfied. + _queue.remove(entry); + entry->satisfy(_message); + } + } + + if (_queue.is_empty()) { + // Queue is empty + _has_message = false; + } else { + // Post first message in queue + _message = _queue.first()->message(); + } +} diff --git a/src/hotspot/share/gc/z/zDriverPort.hpp b/src/hotspot/share/gc/z/zDriverPort.hpp new file mode 100644 index 0000000000000..6efdc9bf7f42c --- /dev/null +++ b/src/hotspot/share/gc/z/zDriverPort.hpp @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_Z_ZDRIVERPORT_HPP +#define SHARE_GC_Z_ZDRIVERPORT_HPP + +#include "gc/shared/gcCause.hpp" +#include "gc/z/zList.hpp" +#include "gc/z/zLock.hpp" + +class ZDriverPortEntry; + +class ZDriverRequest { +private: + GCCause::Cause _cause; + uint _young_nworkers; + uint _old_nworkers; + +public: + ZDriverRequest(); + ZDriverRequest(GCCause::Cause cause, uint young_nworkers, uint old_nworkers); + + bool operator==(const ZDriverRequest& other) const; + + GCCause::Cause cause() const; + uint young_nworkers() const; + uint old_nworkers() const; +}; + +class ZDriverPort { +private: + mutable ZConditionLock _lock; + bool _has_message; + ZDriverRequest _message; + uint64_t _seqnum; + ZList _queue; + +public: + ZDriverPort(); + + bool is_busy() const; + + // For use by sender + void send_sync(const ZDriverRequest& request); + void send_async(const ZDriverRequest& request); + + // For use by receiver + ZDriverRequest receive(); + void ack(); +}; + +#endif // SHARE_GC_Z_ZDRIVERPORT_HPP diff --git a/src/hotspot/share/gc/z/zForwarding.cpp b/src/hotspot/share/gc/z/zForwarding.cpp index cabd381cbc142..d8a3913806ca8 100644 --- a/src/hotspot/share/gc/z/zForwarding.cpp +++ b/src/hotspot/share/gc/z/zForwarding.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,10 +22,15 @@ */ #include "precompiled.hpp" +#include "gc/shared/gcLogPrecious.hpp" #include "gc/z/zAddress.inline.hpp" +#include "gc/z/zCollectedHeap.hpp" #include "gc/z/zForwarding.inline.hpp" +#include "gc/z/zPage.inline.hpp" #include "gc/z/zStat.hpp" #include "gc/z/zUtils.inline.hpp" +#include "logging/log.hpp" +#include "runtime/atomic.hpp" #include "utilities/align.hpp" // @@ -45,9 +50,42 @@ // count has become zero (released) or negative one (claimed). // -static const ZStatCriticalPhase ZCriticalPhaseRelocationStall("Relocation Stall"); +bool ZForwarding::claim() { + return Atomic::cmpxchg(&_claimed, false, true) == false; +} + +void ZForwarding::in_place_relocation_start(zoffset relocated_watermark) { + _page->log_msg(" In-place reloc start - relocated to: " PTR_FORMAT, untype(relocated_watermark)); + + _in_place = true; + + // Support for ZHeap::is_in checks of from-space objects + // in a page that is in-place relocating + Atomic::store(&_in_place_thread, Thread::current()); + _in_place_top_at_start = _page->top(); +} + +void ZForwarding::in_place_relocation_finish() { + assert(_in_place, "Must be an in-place relocated page"); + + _page->log_msg(" In-place reloc finish - top at start: " PTR_FORMAT, untype(_in_place_top_at_start)); + + if (_from_age == ZPageAge::old || _to_age != ZPageAge::old) { + // Only do this for non-promoted pages, that still need to reset live map. + // Done with iterating over the "from-page" view, so can now drop the _livemap. + _page->finalize_reset_for_in_place_relocation(); + } + + // Disable relaxed ZHeap::is_in checks + Atomic::store(&_in_place_thread, (Thread*)nullptr); +} + +bool ZForwarding::in_place_relocation_is_below_top_at_start(zoffset offset) const { + // Only the relocating thread is allowed to know about the old relocation top. + return Atomic::load(&_in_place_thread) == Thread::current() && offset < _in_place_top_at_start; +} -bool ZForwarding::retain_page() { +bool ZForwarding::retain_page(ZRelocateQueue* queue) { for (;;) { const int32_t ref_count = Atomic::load_acquire(&_ref_count); @@ -58,8 +96,9 @@ bool ZForwarding::retain_page() { if (ref_count < 0) { // Claimed - const bool success = wait_page_released(); - assert(success, "Should always succeed"); + queue->add_and_wait(this); + + // Released return false; } @@ -70,7 +109,7 @@ bool ZForwarding::retain_page() { } } -ZPage* ZForwarding::claim_page() { +void ZForwarding::in_place_relocation_claim_page() { for (;;) { const int32_t ref_count = Atomic::load(&_ref_count); assert(ref_count > 0, "Invalid state"); @@ -89,7 +128,8 @@ ZPage* ZForwarding::claim_page() { } } - return _page; + // Done + break; } } @@ -130,48 +170,207 @@ void ZForwarding::release_page() { } } -bool ZForwarding::wait_page_released() const { +ZPage* ZForwarding::detach_page() { + // Wait until released if (Atomic::load_acquire(&_ref_count) != 0) { - ZStatTimer timer(ZCriticalPhaseRelocationStall); ZLocker locker(&_ref_lock); while (Atomic::load_acquire(&_ref_count) != 0) { - if (_ref_abort) { - return false; - } - _ref_lock.wait(); } } - return true; + return _page; } -ZPage* ZForwarding::detach_page() { - // Wait until released - if (Atomic::load_acquire(&_ref_count) != 0) { - ZLocker locker(&_ref_lock); - while (Atomic::load_acquire(&_ref_count) != 0) { - _ref_lock.wait(); - } +ZPage* ZForwarding::page() { + assert(Atomic::load(&_ref_count) != 0, "The page has been released/detached"); + return _page; +} + +void ZForwarding::mark_done() { + Atomic::store(&_done, true); +} + +bool ZForwarding::is_done() const { + return Atomic::load(&_done); +} + +// +// The relocated_remembered_fields are used when the old generation +// collection is relocating objects, concurrently with the young +// generation collection's remembered set scanning for the marking. +// +// When the OC is relocating objects, the old remembered set bits +// for the from-space objects need to be moved over to the to-space +// objects. +// +// The YC doesn't want to wait for the OC, so it eagerly helps relocating +// objects with remembered set bits, so that it can perform marking on the +// to-space copy of the object fields that are associated with the remembered +// set bits. +// +// This requires some synchronization between the OC and YC, and this is +// mainly done via the _relocated_remembered_fields_state in each ZForwarding. +// The values corresponds to: +// +// none: Starting state - neither OC nor YC has stated their intentions +// published: The OC has completed relocating all objects, and published an array +// of all to-space fields that should have a remembered set entry. +// reject: The OC relocation of the page happened concurrently with the YC +// remset scanning. Two situations: +// a) The page had not been released yet: The YC eagerly relocated and +// scanned the to-space objects with remset entries. +// b) The page had been released: The YC accepts the array published in +// (published). +// accept: The YC found that the forwarding/page had already been relocated when +// the YC started. +// +// Central to this logic is the ZRemembered::scan_forwarding function, where +// the YC tries to "retain" the forwarding/page. If it succeeds it means that +// the OC has not finished (or maybe not even started) the relocation of all objects. +// +// When the YC manages to retaining the page it will bring the state from: +// none -> reject - Started collecting remembered set info +// published -> reject - Rejected the OC's remembered set info +// reject -> reject - An earlier YC had already handled the remembered set info +// accept -> - Invalid state - will not happen +// +// When the YC fails to retain the page the state transitions are: +// none -> x - The page was relocated before the YC started +// published -> x - The OC completed relocation before YC visited this forwarding. +// The YC will use the remembered set info collected by the OC. +// reject -> x - A previous YC has already handled the remembered set info +// accept -> x - See above +// +// x is: +// reject - if the relocation finished while the current YC was running +// accept - if the relocation finished before the current YC started +// +// Note the subtlety that even though the relocation could released the page +// and made it non-retainable, the relocation code might not have gotten to +// the point where the page is removed from the page table. It could also be +// the case that the relocated page became in-place relocated, and we therefore +// shouldn't be scanning it this YC. +// +// The (reject) state is the "dangerous" state, where both OC and YC work on +// the same forwarding/page somewhat concurrently. While (accept) denotes that +// that the entire relocation of a page (including freeing/reusing it) was +// completed before the current YC started. +// +// After all remset entries of relocated objects have been scanned, the code +// proceeds to visit all pages in the page table, to scan all pages not part +// of the OC relocation set. Pages with virtual addresses that doesn't match +// any of the once in the OC relocation set will be visited. Pages with +// virtual address that *do* have a corresponding forwarding entry has two +// cases: +// +// a) The forwarding entry is marked with (reject). This means that the +// corresponding page is guaranteed to be one that has been relocated by the +// current OC during the active YC. Any remset entry is guaranteed to have +// already been scanned by the scan_forwarding code. +// +// b) The forwarding entry is marked with (accept). This means that the page was +// *not* created by the OC relocation during this YC, which means that the +// page must be scanned. +// + +void ZForwarding::relocated_remembered_fields_after_relocate() { + assert(from_age() == ZPageAge::old, "Only old pages have remsets"); + + _relocated_remembered_fields_publish_young_seqnum = ZGeneration::young()->seqnum(); + + if (ZGeneration::young()->is_phase_mark()) { + relocated_remembered_fields_publish(); + } +} + +void ZForwarding::relocated_remembered_fields_publish() { + // The OC has relocated all objects and collected all fields that + // used to have remembered set entries. Now publish the fields to + // the YC. + + const ZPublishState res = Atomic::cmpxchg(&_relocated_remembered_fields_state, ZPublishState::none, ZPublishState::published); + + // none: OK to publish + // published: Not possible - this operation makes this transition + // reject: YC started scanning the "from" page concurrently and rejects the fields + // the OC collected. + // accept: YC accepted the fields published by this function - not possible + // because they weren't published before the CAS above + + if (res == ZPublishState::none) { + // fields were successfully published + log_debug(gc, remset)("Forwarding remset published : " PTR_FORMAT " " PTR_FORMAT, untype(start()), untype(end())); + + return; + } + + log_debug(gc, remset)("Forwarding remset discarded : " PTR_FORMAT " " PTR_FORMAT, untype(start()), untype(end())); + + // reject: YC scans the remset concurrently + // accept: YC accepted published remset - not possible, we just atomically published it + // YC failed to retain page - not possible, since the current page is retainable + assert(res == ZPublishState::reject, "Unexpected value"); + + // YC has rejected the stored values and will (or have already) find them them itself + _relocated_remembered_fields_array.clear_and_deallocate(); +} + +void ZForwarding::relocated_remembered_fields_notify_concurrent_scan_of() { + // Invariant: The page is being retained + assert(ZGeneration::young()->is_phase_mark(), "Only called when"); + + const ZPublishState res = Atomic::cmpxchg(&_relocated_remembered_fields_state, ZPublishState::none, ZPublishState::reject); + + // none: OC has not completed relocation + // published: OC has completed and published all relocated remembered fields + // reject: A previous YC has already handled the field + // accept: A previous YC has determined that there's no concurrency between + // OC relocation and YC remembered fields scanning - not possible + // since the page has been retained (still being relocated) and + // we are in the process of scanning fields + + if (res == ZPublishState::none) { + // Successfully notified and rejected any collected data from the OC + log_debug(gc, remset)("Forwarding remset eager : " PTR_FORMAT " " PTR_FORMAT, untype(start()), untype(end())); + + return; + } + + if (res == ZPublishState::published) { + // OC relocation already collected and published fields + + // Still notify concurrent scanning and reject the collected data from the OC + const ZPublishState res2 = Atomic::cmpxchg(&_relocated_remembered_fields_state, ZPublishState::published, ZPublishState::reject); + assert(res2 == ZPublishState::published, "Should not fail"); + + log_debug(gc, remset)("Forwarding remset eager and reject: " PTR_FORMAT " " PTR_FORMAT, untype(start()), untype(end())); + + // The YC rejected the publish fields and is responsible for the array + // Eagerly deallocate the memory + _relocated_remembered_fields_array.clear_and_deallocate(); + return; } - // Detach and return page - ZPage* const page = _page; - _page = NULL; - return page; + log_debug(gc, remset)("Forwarding remset redundant : " PTR_FORMAT " " PTR_FORMAT, untype(start()), untype(end())); + + // Previous YC already handled the remembered fields + assert(res == ZPublishState::reject, "Unexpected value"); } -void ZForwarding::abort_page() { - ZLocker locker(&_ref_lock); - assert(Atomic::load(&_ref_count) > 0, "Invalid state"); - assert(!_ref_abort, "Invalid state"); - _ref_abort = true; - _ref_lock.notify_all(); +bool ZForwarding::relocated_remembered_fields_published_contains(volatile zpointer* p) { + for (volatile zpointer* const elem : _relocated_remembered_fields_array) { + if (elem == p) { + return true; + } + } + + return false; } void ZForwarding::verify() const { guarantee(_ref_count != 0, "Invalid reference count"); - guarantee(_page != NULL, "Invalid page"); + guarantee(_page != nullptr, "Invalid page"); uint32_t live_objects = 0; size_t live_bytes = 0; @@ -198,7 +397,7 @@ void ZForwarding::verify() const { guarantee(entry.to_offset() != other.to_offset(), "Duplicate to"); } - const uintptr_t to_addr = ZAddress::good(entry.to_offset()); + const zaddress to_addr = ZOffset::address(to_zoffset(entry.to_offset())); const size_t size = ZUtils::object_size(to_addr); const size_t aligned_size = align_up(size, _page->object_alignment()); live_bytes += aligned_size; @@ -206,5 +405,5 @@ void ZForwarding::verify() const { } // Verify number of live objects and bytes - _page->verify_live(live_objects, live_bytes); + _page->verify_live(live_objects, live_bytes, _in_place); } diff --git a/src/hotspot/share/gc/z/zForwarding.hpp b/src/hotspot/share/gc/z/zForwarding.hpp index 8212cb2f81beb..a99473322d44c 100644 --- a/src/hotspot/share/gc/z/zForwarding.hpp +++ b/src/hotspot/share/gc/z/zForwarding.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,14 +24,19 @@ #ifndef SHARE_GC_Z_ZFORWARDING_HPP #define SHARE_GC_Z_ZFORWARDING_HPP +#include "gc/z/zArray.hpp" #include "gc/z/zAttachedArray.hpp" #include "gc/z/zForwardingEntry.hpp" +#include "gc/z/zGenerationId.hpp" #include "gc/z/zLock.hpp" +#include "gc/z/zPageAge.hpp" +#include "gc/z/zPageType.hpp" #include "gc/z/zVirtualMemory.hpp" class ObjectClosure; class ZForwardingAllocator; class ZPage; +class ZRelocateQueue; typedef size_t ZForwardingCursor; @@ -39,47 +44,116 @@ class ZForwarding { friend class VMStructs; friend class ZForwardingTest; + enum class ZPublishState : int8_t { + none, // No publishing done yet + published, // OC published remset field info, which YC will reject or accept + reject, // YC remset scanning accepted OC published remset field info + accept // YC remset scanning rejected OC published remset field info + }; + private: typedef ZAttachedArray AttachedArray; + typedef ZArray PointerArray; const ZVirtualMemory _virtual; const size_t _object_alignment_shift; const AttachedArray _entries; - ZPage* _page; + ZPage* const _page; + ZPageAge _from_age; + ZPageAge _to_age; + volatile bool _claimed; mutable ZConditionLock _ref_lock; volatile int32_t _ref_count; - bool _ref_abort; + volatile bool _done; + + // Relocated remembered set fields support + volatile ZPublishState _relocated_remembered_fields_state; + PointerArray _relocated_remembered_fields_array; + uint32_t _relocated_remembered_fields_publish_young_seqnum; + + // In-place relocation support bool _in_place; + zoffset_end _in_place_top_at_start; + + // Debugging + volatile Thread* _in_place_thread; ZForwardingEntry* entries() const; ZForwardingEntry at(ZForwardingCursor* cursor) const; ZForwardingEntry first(uintptr_t from_index, ZForwardingCursor* cursor) const; ZForwardingEntry next(ZForwardingCursor* cursor) const; - ZForwarding(ZPage* page, size_t nentries); + template + void object_iterate_forwarded_via_livemap(Function function); + + ZForwarding(ZPage* page, ZPageAge to_age, size_t nentries); public: static uint32_t nentries(const ZPage* page); - static ZForwarding* alloc(ZForwardingAllocator* allocator, ZPage* page); + static ZForwarding* alloc(ZForwardingAllocator* allocator, ZPage* page, ZPageAge to_age); - uint8_t type() const; - uintptr_t start() const; + ZPageType type() const; + ZPageAge from_age() const; + ZPageAge to_age() const; + zoffset start() const; + zoffset_end end() const; size_t size() const; size_t object_alignment_shift() const; - void object_iterate(ObjectClosure *cl); - bool retain_page(); - ZPage* claim_page(); + bool is_promotion() const; + + // Visit from-objects + template + void object_iterate(Function function); + + template + void address_unsafe_iterate_via_table(Function function); + + // Visit to-objects + template + void object_iterate_forwarded(Function function); + + template + void object_iterate_forwarded_via_table(Function function); + + template + void oops_do_in_forwarded(Function function); + + template + void oops_do_in_forwarded_via_table(Function function); + + bool claim(); + + // In-place relocation support + bool in_place_relocation() const; + void in_place_relocation_claim_page(); + void in_place_relocation_start(zoffset relocated_watermark); + void in_place_relocation_finish(); + bool in_place_relocation_is_below_top_at_start(zoffset addr) const; + + bool retain_page(ZRelocateQueue* queue); void release_page(); - bool wait_page_released() const; + ZPage* detach_page(); - void abort_page(); + ZPage* page(); + + void mark_done(); + bool is_done() const; - void set_in_place(); - bool in_place() const; + zaddress find(zaddress_unsafe addr); ZForwardingEntry find(uintptr_t from_index, ZForwardingCursor* cursor) const; - uintptr_t insert(uintptr_t from_index, uintptr_t to_offset, ZForwardingCursor* cursor); + zoffset insert(uintptr_t from_index, zoffset to_offset, ZForwardingCursor* cursor); + + // Relocated remembered set fields support + void relocated_remembered_fields_register(volatile zpointer* p); + void relocated_remembered_fields_after_relocate(); + void relocated_remembered_fields_publish(); + void relocated_remembered_fields_notify_concurrent_scan_of(); + bool relocated_remembered_fields_is_concurrently_scanned() const; + template + void relocated_remembered_fields_apply_to_published(Function function); + bool relocated_remembered_fields_published_contains(volatile zpointer* p); void verify() const; }; diff --git a/src/hotspot/share/gc/z/zForwarding.inline.hpp b/src/hotspot/share/gc/z/zForwarding.inline.hpp index cff6d7a905c4f..1afd3ac684b17 100644 --- a/src/hotspot/share/gc/z/zForwarding.inline.hpp +++ b/src/hotspot/share/gc/z/zForwarding.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,12 +26,15 @@ #include "gc/z/zForwarding.hpp" +#include "gc/z/zAddress.inline.hpp" #include "gc/z/zAttachedArray.inline.hpp" #include "gc/z/zForwardingAllocator.inline.hpp" #include "gc/z/zHash.inline.hpp" #include "gc/z/zHeap.hpp" +#include "gc/z/zIterator.inline.hpp" #include "gc/z/zLock.inline.hpp" #include "gc/z/zPage.inline.hpp" +#include "gc/z/zUtils.inline.hpp" #include "gc/z/zVirtualMemory.inline.hpp" #include "runtime/atomic.hpp" #include "utilities/debug.hpp" @@ -47,30 +50,50 @@ inline uint32_t ZForwarding::nentries(const ZPage* page) { return round_up_power_of_2(page->live_objects() * 2); } -inline ZForwarding* ZForwarding::alloc(ZForwardingAllocator* allocator, ZPage* page) { +inline ZForwarding* ZForwarding::alloc(ZForwardingAllocator* allocator, ZPage* page, ZPageAge to_age) { const size_t nentries = ZForwarding::nentries(page); void* const addr = AttachedArray::alloc(allocator, nentries); - return ::new (addr) ZForwarding(page, nentries); + return ::new (addr) ZForwarding(page, to_age, nentries); } -inline ZForwarding::ZForwarding(ZPage* page, size_t nentries) : +inline ZForwarding::ZForwarding(ZPage* page, ZPageAge to_age, size_t nentries) : _virtual(page->virtual_memory()), _object_alignment_shift(page->object_alignment_shift()), _entries(nentries), _page(page), + _from_age(page->age()), + _to_age(to_age), + _claimed(false), _ref_lock(), _ref_count(1), - _ref_abort(false), - _in_place(false) {} + _done(false), + _relocated_remembered_fields_state(ZPublishState::none), + _relocated_remembered_fields_array(), + _relocated_remembered_fields_publish_young_seqnum(0), + _in_place(false), + _in_place_top_at_start(), + _in_place_thread(nullptr) {} -inline uint8_t ZForwarding::type() const { +inline ZPageType ZForwarding::type() const { return _page->type(); } -inline uintptr_t ZForwarding::start() const { +inline ZPageAge ZForwarding::from_age() const { + return _from_age; +} + +inline ZPageAge ZForwarding::to_age() const { + return _to_age; +} + +inline zoffset ZForwarding::start() const { return _virtual.start(); } +inline zoffset_end ZForwarding::end() const { + return _virtual.end(); +} + inline size_t ZForwarding::size() const { return _virtual.size(); } @@ -79,15 +102,96 @@ inline size_t ZForwarding::object_alignment_shift() const { return _object_alignment_shift; } -inline void ZForwarding::object_iterate(ObjectClosure *cl) { - return _page->object_iterate(cl); +inline bool ZForwarding::is_promotion() const { + return _from_age != ZPageAge::old && + _to_age == ZPageAge::old; +} + +template +inline void ZForwarding::object_iterate(Function function) { + ZObjectClosure cl(function); + _page->object_iterate(function); +} + +template +inline void ZForwarding::address_unsafe_iterate_via_table(Function function) { + for (ZForwardingCursor i = 0; i < _entries.length(); i++) { + const ZForwardingEntry entry = at(&i); + if (!entry.populated()) { + // Skip empty entries + continue; + } + + // Find to-object + + const zoffset from_offset = start() + (entry.from_index() << object_alignment_shift()); + const zaddress_unsafe from_addr = ZOffset::address_unsafe(from_offset); + + // Apply function + function(from_addr); + } +} + +template +inline void ZForwarding::object_iterate_forwarded_via_livemap(Function function) { + assert(!in_place_relocation(), "Not allowed to use livemap iteration"); + + object_iterate([&](oop obj) { + // Find to-object + const zaddress_unsafe from_addr = to_zaddress_unsafe(obj); + const zaddress to_addr = this->find(from_addr); + const oop to_obj = to_oop(to_addr); + + // Apply function + function(to_obj); + }); +} + +template +inline void ZForwarding::object_iterate_forwarded_via_table(Function function) { + for (ZForwardingCursor i = 0; i < _entries.length(); i++) { + const ZForwardingEntry entry = at(&i); + if (!entry.populated()) { + // Skip empty entries + continue; + } + + // Find to-object + const zoffset to_offset = to_zoffset(entry.to_offset()); + const zaddress to_addr = ZOffset::address(to_offset); + const oop to_obj = to_oop(to_addr); + + // Apply function + function(to_obj); + } +} + +template +inline void ZForwarding::object_iterate_forwarded(Function function) { + if (in_place_relocation()) { + // The original objects are not available anymore, can't use the livemap + object_iterate_forwarded_via_table(function); + } else { + object_iterate_forwarded_via_livemap(function); + } +} + +template +void ZForwarding::oops_do_in_forwarded(Function function) { + object_iterate_forwarded([&](oop to_obj) { + ZIterator::basic_oop_iterate_safe(to_obj, function); + }); } -inline void ZForwarding::set_in_place() { - _in_place = true; +template +void ZForwarding::oops_do_in_forwarded_via_table(Function function) { + object_iterate_forwarded_via_table([&](oop to_obj) { + ZIterator::basic_oop_iterate_safe(to_obj, function); + }); } -inline bool ZForwarding::in_place() const { +inline bool ZForwarding::in_place_relocation() const { + assert(Atomic::load(&_ref_count) != 0, "The page has been released/detached"); return _in_place; } @@ -114,6 +218,13 @@ inline ZForwardingEntry ZForwarding::next(ZForwardingCursor* cursor) const { return at(cursor); } +inline zaddress ZForwarding::find(zaddress_unsafe addr) { + const uintptr_t from_index = (ZAddress::offset(addr) - start()) >> object_alignment_shift(); + ZForwardingCursor cursor; + const ZForwardingEntry entry = find(from_index, &cursor); + return entry.populated() ? ZOffset::address(to_zoffset(entry.to_offset())) : zaddress::null; +} + inline ZForwardingEntry ZForwarding::find(uintptr_t from_index, ZForwardingCursor* cursor) const { // Reading entries in the table races with the atomic CAS done for // insertion into the table. This is safe because each entry is at @@ -132,8 +243,8 @@ inline ZForwardingEntry ZForwarding::find(uintptr_t from_index, ZForwardingCurso return entry; } -inline uintptr_t ZForwarding::insert(uintptr_t from_index, uintptr_t to_offset, ZForwardingCursor* cursor) { - const ZForwardingEntry new_entry(from_index, to_offset); +inline zoffset ZForwarding::insert(uintptr_t from_index, zoffset to_offset, ZForwardingCursor* cursor) { + const ZForwardingEntry new_entry(from_index, untype(to_offset)); const ZForwardingEntry old_entry; // Empty // Make sure that object copy is finished @@ -152,7 +263,7 @@ inline uintptr_t ZForwarding::insert(uintptr_t from_index, uintptr_t to_offset, while (entry.populated()) { if (entry.from_index() == from_index) { // Match found, return already inserted address - return entry.to_offset(); + return to_zoffset(entry.to_offset()); } entry = next(cursor); @@ -160,4 +271,75 @@ inline uintptr_t ZForwarding::insert(uintptr_t from_index, uintptr_t to_offset, } } +inline void ZForwarding::relocated_remembered_fields_register(volatile zpointer* p) { + // Invariant: Page is being retained + assert(ZGeneration::young()->is_phase_mark(), "Only called when"); + + const ZPublishState res = Atomic::load(&_relocated_remembered_fields_state); + + // none: Gather remembered fields + // published: Have already published fields - not possible since they haven't been + // collected yet + // reject: YC rejected fields collected by the OC + // accept: YC has marked that there's no more concurrent scanning of relocated + // fields - not possible since this code is still relocating objects + + if (res == ZPublishState::none) { + _relocated_remembered_fields_array.push(p); + return; + } + + assert(res == ZPublishState::reject, "Unexpected value"); +} + +// Returns true iff the page is being (or about to be) relocated by the OC +// while the YC gathered the remembered fields of the "from" page. +inline bool ZForwarding::relocated_remembered_fields_is_concurrently_scanned() const { + return Atomic::load(&_relocated_remembered_fields_state) == ZPublishState::reject; +} + +template +inline void ZForwarding::relocated_remembered_fields_apply_to_published(Function function) { + // Invariant: Page is not being retained + assert(ZGeneration::young()->is_phase_mark(), "Only called when"); + + const ZPublishState res = Atomic::load_acquire(&_relocated_remembered_fields_state); + + // none: Nothing published - page had already been relocated before YC started + // published: OC relocated and published relocated remembered fields + // reject: A previous YC concurrently scanned relocated remembered fields of the "from" page + // accept: A previous YC marked that it didn't do (reject) + + if (res == ZPublishState::published) { + log_debug(gc, remset)("Forwarding remset accept : " PTR_FORMAT " " PTR_FORMAT " (" PTR_FORMAT ", %s)", + untype(start()), untype(end()), p2i(this), Thread::current()->name()); + + // OC published relocated remembered fields + ZArrayIterator iter(&_relocated_remembered_fields_array); + for (volatile zpointer* to_field_addr; iter.next(&to_field_addr);) { + function(to_field_addr); + } + + // YC responsible for the array - eagerly deallocate + _relocated_remembered_fields_array.clear_and_deallocate(); + } + + assert(_relocated_remembered_fields_publish_young_seqnum != 0, "Must have been set"); + if (_relocated_remembered_fields_publish_young_seqnum == ZGeneration::young()->seqnum()) { + log_debug(gc, remset)("scan_forwarding failed retain unsafe " PTR_FORMAT, untype(start())); + // The page was relocated concurrently with the current young generation + // collection. Mark that it is unsafe (and unnecessary) to call scan_page + // on the page in the page table. + assert(res != ZPublishState::accept, "Unexpected"); + Atomic::store(&_relocated_remembered_fields_state, ZPublishState::reject); + } else { + log_debug(gc, remset)("scan_forwarding failed retain safe " PTR_FORMAT, untype(start())); + // Guaranteed that the page was fully relocated and removed from page table. + // Because of this we can signal to scan_page that any page found in page table + // of the same slot as the current forwarding is a page that is safe to scan, + // and in fact must be scanned. + Atomic::store(&_relocated_remembered_fields_state, ZPublishState::accept); + } +} + #endif // SHARE_GC_Z_ZFORWARDING_INLINE_HPP diff --git a/src/hotspot/share/gc/z/zForwardingAllocator.cpp b/src/hotspot/share/gc/z/zForwardingAllocator.cpp index 6550259966b32..814d4b83f95f0 100644 --- a/src/hotspot/share/gc/z/zForwardingAllocator.cpp +++ b/src/hotspot/share/gc/z/zForwardingAllocator.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,9 +26,9 @@ #include "memory/allocation.inline.hpp" ZForwardingAllocator::ZForwardingAllocator() : - _start(NULL), - _end(NULL), - _top(NULL) {} + _start(nullptr), + _end(nullptr), + _top(nullptr) {} ZForwardingAllocator::~ZForwardingAllocator() { FREE_C_HEAP_ARRAY(char, _start); diff --git a/src/hotspot/share/gc/z/zForwardingAllocator.inline.hpp b/src/hotspot/share/gc/z/zForwardingAllocator.inline.hpp index 7c2e2155b16d0..41d9106843450 100644 --- a/src/hotspot/share/gc/z/zForwardingAllocator.inline.hpp +++ b/src/hotspot/share/gc/z/zForwardingAllocator.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -38,7 +38,7 @@ inline bool ZForwardingAllocator::is_full() const { } inline void* ZForwardingAllocator::alloc(size_t size) { - char* const addr = Atomic::fetch_and_add(&_top, size); + char* const addr = Atomic::fetch_then_add(&_top, size); assert(addr + size <= _end, "Allocation should never fail"); return addr; } diff --git a/src/hotspot/share/gc/z/zForwardingEntry.hpp b/src/hotspot/share/gc/z/zForwardingEntry.hpp index eb064ecdf6e95..20a81b6e05ca5 100644 --- a/src/hotspot/share/gc/z/zForwardingEntry.hpp +++ b/src/hotspot/share/gc/z/zForwardingEntry.hpp @@ -27,6 +27,7 @@ #include "gc/z/zBitField.hpp" #include "memory/allocation.hpp" #include "metaprogramming/primitiveConversions.hpp" + #include // diff --git a/src/hotspot/share/gc/z/zForwardingTable.hpp b/src/hotspot/share/gc/z/zForwardingTable.hpp index ad57646373bc0..697756a38fa9f 100644 --- a/src/hotspot/share/gc/z/zForwardingTable.hpp +++ b/src/hotspot/share/gc/z/zForwardingTable.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,19 +25,23 @@ #define SHARE_GC_Z_ZFORWARDINGTABLE_HPP #include "gc/z/zGranuleMap.hpp" +#include "gc/z/zIndexDistributor.hpp" class ZForwarding; class ZForwardingTable { + friend class ZRemsetTableIterator; friend class VMStructs; private: ZGranuleMap _map; + ZForwarding* at(size_t index) const; + public: ZForwardingTable(); - ZForwarding* get(uintptr_t addr) const; + ZForwarding* get(zaddress_unsafe addr) const; void insert(ZForwarding* forwarding); void remove(ZForwarding* forwarding); diff --git a/src/hotspot/share/gc/z/zForwardingTable.inline.hpp b/src/hotspot/share/gc/z/zForwardingTable.inline.hpp index 9643247409427..543a61e956022 100644 --- a/src/hotspot/share/gc/z/zForwardingTable.inline.hpp +++ b/src/hotspot/share/gc/z/zForwardingTable.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,30 +30,35 @@ #include "gc/z/zForwarding.inline.hpp" #include "gc/z/zGlobals.hpp" #include "gc/z/zGranuleMap.inline.hpp" +#include "gc/z/zIndexDistributor.inline.hpp" #include "utilities/debug.hpp" inline ZForwardingTable::ZForwardingTable() : _map(ZAddressOffsetMax) {} -inline ZForwarding* ZForwardingTable::get(uintptr_t addr) const { - assert(!ZAddress::is_null(addr), "Invalid address"); +inline ZForwarding* ZForwardingTable::at(size_t index) const { + return _map.at(index); +} + +inline ZForwarding* ZForwardingTable::get(zaddress_unsafe addr) const { + assert(!is_null(addr), "Invalid address"); return _map.get(ZAddress::offset(addr)); } inline void ZForwardingTable::insert(ZForwarding* forwarding) { - const uintptr_t offset = forwarding->start(); + const zoffset offset = forwarding->start(); const size_t size = forwarding->size(); - assert(_map.get(offset) == NULL, "Invalid entry"); + assert(_map.get(offset) == nullptr, "Invalid entry"); _map.put(offset, size, forwarding); } inline void ZForwardingTable::remove(ZForwarding* forwarding) { - const uintptr_t offset = forwarding->start(); + const zoffset offset = forwarding->start(); const size_t size = forwarding->size(); assert(_map.get(offset) == forwarding, "Invalid entry"); - _map.put(offset, size, NULL); + _map.put(offset, size, nullptr); } #endif // SHARE_GC_Z_ZFORWARDINGTABLE_INLINE_HPP diff --git a/src/hotspot/share/gc/z/zGCIdPrinter.cpp b/src/hotspot/share/gc/z/zGCIdPrinter.cpp new file mode 100644 index 0000000000000..ed36d1b9bcf3a --- /dev/null +++ b/src/hotspot/share/gc/z/zGCIdPrinter.cpp @@ -0,0 +1,92 @@ +/* + * Copyright (c) 2022, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/z/zGCIdPrinter.hpp" +#include "include/jvm.h" + +ZGCIdPrinter* ZGCIdPrinter::_instance; + +void ZGCIdPrinter::initialize() { + _instance = new ZGCIdPrinter(); + GCId::set_printer(_instance); +} + +int ZGCIdPrinter::print_gc_id_unchecked(uint gc_id, char* buf, size_t len) { + if (gc_id == _minor_gc_id) { + // Minor collections are always tagged with 'y' + return jio_snprintf(buf, len, "GC(%u) y: ", gc_id); + } + + if (gc_id == _major_gc_id) { + // Major collections are either tagged with 'Y' or 'O', + // this is controlled by _major_tag. + return jio_snprintf(buf, len, "GC(%u) %c: ", gc_id, _major_tag); + } + + // The initial log for each GC should be untagged this + // is handled by not yet having set the current GC id + // for that collection and thus falling through to here. + return jio_snprintf(buf, len, "GC(%u) ", gc_id); +} + +size_t ZGCIdPrinter::print_gc_id(uint gc_id, char* buf, size_t len) { + const int ret = print_gc_id_unchecked(gc_id, buf, len); + assert(ret > 0, "Failed to print prefix. Log buffer too small?"); + return (size_t)ret; +} + +ZGCIdPrinter::ZGCIdPrinter() : + _minor_gc_id(GCId::undefined()), + _major_gc_id(GCId::undefined()), + _major_tag('-') { } + +void ZGCIdPrinter::set_minor_gc_id(uint id) { + _minor_gc_id = id; +} + +void ZGCIdPrinter::set_major_gc_id(uint id) { + _major_gc_id = id; +} + +void ZGCIdPrinter::set_major_tag(char tag) { + _major_tag = tag; +} + +ZGCIdMinor::ZGCIdMinor(uint gc_id) { + ZGCIdPrinter::_instance->set_minor_gc_id(gc_id); +} + +ZGCIdMinor::~ZGCIdMinor() { + ZGCIdPrinter::_instance->set_minor_gc_id(GCId::undefined()); +} + +ZGCIdMajor::ZGCIdMajor(uint gc_id, char tag) { + ZGCIdPrinter::_instance->set_major_gc_id(gc_id); + ZGCIdPrinter::_instance->set_major_tag(tag); +} + +ZGCIdMajor::~ZGCIdMajor() { + ZGCIdPrinter::_instance->set_major_gc_id(GCId::undefined()); + ZGCIdPrinter::_instance->set_major_tag('-'); +} diff --git a/src/hotspot/share/gc/z/zGCIdPrinter.hpp b/src/hotspot/share/gc/z/zGCIdPrinter.hpp new file mode 100644 index 0000000000000..38bebf7ab67b1 --- /dev/null +++ b/src/hotspot/share/gc/z/zGCIdPrinter.hpp @@ -0,0 +1,67 @@ +/* + * Copyright (c) 2022, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_Z_ZGCIDPRINTER_HPP +#define SHARE_GC_Z_ZGCIDPRINTER_HPP + +#include "gc/shared/gcId.hpp" +#include "memory/allocation.hpp" +#include "utilities/globalDefinitions.hpp" + +class ZGCIdPrinter : public GCIdPrinter { + friend class ZGCIdMajor; + friend class ZGCIdMinor; + +private: + static ZGCIdPrinter* _instance; + + uint _minor_gc_id; + uint _major_gc_id; + char _major_tag; + + ZGCIdPrinter(); + + void set_minor_gc_id(uint id); + void set_major_gc_id(uint id); + void set_major_tag(char tag); + + int print_gc_id_unchecked(uint gc_id, char *buf, size_t len); + size_t print_gc_id(uint gc_id, char *buf, size_t len) override; + +public: + static void initialize(); +}; + +class ZGCIdMinor : public StackObj { +public: + ZGCIdMinor(uint gc_id); + ~ZGCIdMinor(); +}; + +class ZGCIdMajor : public StackObj { +public: + ZGCIdMajor(uint gc_id, char tag); + ~ZGCIdMajor(); +}; + +#endif // SHARE_GC_Z_ZGCIDPRINTER_HPP diff --git a/src/hotspot/share/gc/z/zGeneration.cpp b/src/hotspot/share/gc/z/zGeneration.cpp new file mode 100644 index 0000000000000..2fd231484e026 --- /dev/null +++ b/src/hotspot/share/gc/z/zGeneration.cpp @@ -0,0 +1,1486 @@ +/* + * Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "classfile/classLoaderDataGraph.hpp" +#include "code/nmethod.hpp" +#include "gc/shared/gcLocker.hpp" +#include "gc/shared/gcVMOperations.hpp" +#include "gc/shared/isGCActiveMark.hpp" +#include "gc/shared/suspendibleThreadSet.hpp" +#include "gc/z/zAllocator.inline.hpp" +#include "gc/z/zBarrierSet.hpp" +#include "gc/z/zBarrierSetAssembler.hpp" +#include "gc/z/zBarrierSetNMethod.hpp" +#include "gc/z/zBreakpoint.hpp" +#include "gc/z/zCollectedHeap.hpp" +#include "gc/z/zDriver.hpp" +#include "gc/z/zForwarding.hpp" +#include "gc/z/zForwardingTable.inline.hpp" +#include "gc/z/zGeneration.inline.hpp" +#include "gc/z/zHeap.inline.hpp" +#include "gc/z/zJNICritical.hpp" +#include "gc/z/zMark.inline.hpp" +#include "gc/z/zPageAllocator.hpp" +#include "gc/z/zRelocationSet.inline.hpp" +#include "gc/z/zRelocationSetSelector.inline.hpp" +#include "gc/z/zRemembered.hpp" +#include "gc/z/zRootsIterator.hpp" +#include "gc/z/zStat.hpp" +#include "gc/z/zTask.hpp" +#include "gc/z/zUncoloredRoot.inline.hpp" +#include "gc/z/zVerify.hpp" +#include "gc/z/zWorkers.hpp" +#include "logging/log.hpp" +#include "memory/universe.hpp" +#include "prims/jvmtiTagMap.hpp" +#include "runtime/atomic.hpp" +#include "runtime/continuation.hpp" +#include "runtime/handshake.hpp" +#include "runtime/safepoint.hpp" +#include "runtime/threads.hpp" +#include "runtime/vmOperations.hpp" +#include "runtime/vmThread.hpp" +#include "utilities/debug.hpp" +#include "utilities/events.hpp" + +static const ZStatPhaseGeneration ZPhaseGenerationYoung[] { + ZStatPhaseGeneration("Young Generation", ZGenerationId::young), + ZStatPhaseGeneration("Young Generation (Promote All)", ZGenerationId::young), + ZStatPhaseGeneration("Young Generation (Collect Roots)", ZGenerationId::young), + ZStatPhaseGeneration("Young Generation", ZGenerationId::young) +}; + +static const ZStatPhaseGeneration ZPhaseGenerationOld("Old Generation", ZGenerationId::old); + +static const ZStatPhasePause ZPhasePauseMarkStartYoung("Pause Mark Start", ZGenerationId::young); +static const ZStatPhasePause ZPhasePauseMarkStartYoungAndOld("Pause Mark Start (Major)", ZGenerationId::young); +static const ZStatPhaseConcurrent ZPhaseConcurrentMarkYoung("Concurrent Mark", ZGenerationId::young); +static const ZStatPhaseConcurrent ZPhaseConcurrentMarkContinueYoung("Concurrent Mark Continue", ZGenerationId::young); +static const ZStatPhasePause ZPhasePauseMarkEndYoung("Pause Mark End", ZGenerationId::young); +static const ZStatPhaseConcurrent ZPhaseConcurrentMarkFreeYoung("Concurrent Mark Free", ZGenerationId::young); +static const ZStatPhaseConcurrent ZPhaseConcurrentResetRelocationSetYoung("Concurrent Reset Relocation Set", ZGenerationId::young); +static const ZStatPhaseConcurrent ZPhaseConcurrentSelectRelocationSetYoung("Concurrent Select Relocation Set", ZGenerationId::young); +static const ZStatPhasePause ZPhasePauseRelocateStartYoung("Pause Relocate Start", ZGenerationId::young); +static const ZStatPhaseConcurrent ZPhaseConcurrentRelocatedYoung("Concurrent Relocate", ZGenerationId::young); + +static const ZStatPhaseConcurrent ZPhaseConcurrentMarkOld("Concurrent Mark", ZGenerationId::old); +static const ZStatPhaseConcurrent ZPhaseConcurrentMarkContinueOld("Concurrent Mark Continue", ZGenerationId::old); +static const ZStatPhasePause ZPhasePauseMarkEndOld("Pause Mark End", ZGenerationId::old); +static const ZStatPhaseConcurrent ZPhaseConcurrentMarkFreeOld("Concurrent Mark Free", ZGenerationId::old); +static const ZStatPhaseConcurrent ZPhaseConcurrentProcessNonStrongOld("Concurrent Process Non-Strong", ZGenerationId::old); +static const ZStatPhaseConcurrent ZPhaseConcurrentResetRelocationSetOld("Concurrent Reset Relocation Set", ZGenerationId::old); +static const ZStatPhaseConcurrent ZPhaseConcurrentSelectRelocationSetOld("Concurrent Select Relocation Set", ZGenerationId::old); +static const ZStatPhasePause ZPhasePauseRelocateStartOld("Pause Relocate Start", ZGenerationId::old); +static const ZStatPhaseConcurrent ZPhaseConcurrentRelocatedOld("Concurrent Relocate", ZGenerationId::old); +static const ZStatPhaseConcurrent ZPhaseConcurrentRemapRootsOld("Concurrent Remap Roots", ZGenerationId::old); + +static const ZStatSubPhase ZSubPhaseConcurrentMarkRootsYoung("Concurrent Mark Roots", ZGenerationId::young); +static const ZStatSubPhase ZSubPhaseConcurrentMarkFollowYoung("Concurrent Mark Follow", ZGenerationId::young); + +static const ZStatSubPhase ZSubPhaseConcurrentMarkRootsOld("Concurrent Mark Roots", ZGenerationId::old); +static const ZStatSubPhase ZSubPhaseConcurrentMarkFollowOld("Concurrent Mark Follow", ZGenerationId::old); +static const ZStatSubPhase ZSubPhaseConcurrentRemapRootsColoredOld("Concurrent Remap Roots Colored", ZGenerationId::old); +static const ZStatSubPhase ZSubPhaseConcurrentRemapRootsUncoloredOld("Concurrent Remap Roots Uncolored", ZGenerationId::old); +static const ZStatSubPhase ZSubPhaseConcurrentRemapRememberedOld("Concurrent Remap Remembered", ZGenerationId::old); + +static const ZStatSampler ZSamplerJavaThreads("System", "Java Threads", ZStatUnitThreads); + +ZGenerationYoung* ZGeneration::_young; +ZGenerationOld* ZGeneration::_old; + +ZGeneration::ZGeneration(ZGenerationId id, ZPageTable* page_table, ZPageAllocator* page_allocator) : + _id(id), + _page_allocator(page_allocator), + _page_table(page_table), + _forwarding_table(), + _workers(id, &_stat_workers), + _mark(this, page_table), + _relocate(this), + _relocation_set(this), + _freed(0), + _promoted(0), + _compacted(0), + _phase(Phase::Relocate), + _seqnum(1), + _stat_heap(), + _stat_cycle(), + _stat_workers(), + _stat_mark(), + _stat_relocation(), + _gc_timer(nullptr) { +} + +bool ZGeneration::is_initialized() const { + return _mark.is_initialized(); +} + +ZWorkers* ZGeneration::workers() { + return &_workers; +} + +uint ZGeneration::active_workers() const { + return _workers.active_workers(); +} + +void ZGeneration::set_active_workers(uint nworkers) { + _workers.set_active_workers(nworkers); +} + +void ZGeneration::threads_do(ThreadClosure* tc) const { + _workers.threads_do(tc); +} + +void ZGeneration::mark_flush_and_free(Thread* thread) { + _mark.flush_and_free(thread); +} + +void ZGeneration::mark_free() { + _mark.free(); +} + +void ZGeneration::free_empty_pages(ZRelocationSetSelector* selector, int bulk) { + // Freeing empty pages in bulk is an optimization to avoid grabbing + // the page allocator lock, and trying to satisfy stalled allocations + // too frequently. + if (selector->should_free_empty_pages(bulk)) { + const size_t freed = ZHeap::heap()->free_empty_pages(selector->empty_pages()); + increase_freed(freed); + selector->clear_empty_pages(); + } +} + +void ZGeneration::flip_age_pages(const ZRelocationSetSelector* selector) { + if (is_young()) { + _relocate.flip_age_pages(selector->not_selected_small()); + _relocate.flip_age_pages(selector->not_selected_medium()); + _relocate.flip_age_pages(selector->not_selected_large()); + } +} + +static double fragmentation_limit(ZGenerationId generation) { + if (generation == ZGenerationId::old) { + return ZFragmentationLimit; + } else { + return ZYoungCompactionLimit; + } +} + +void ZGeneration::select_relocation_set(ZGenerationId generation, bool promote_all) { + // Register relocatable pages with selector + ZRelocationSetSelector selector(fragmentation_limit(generation)); + { + ZGenerationPagesIterator pt_iter(_page_table, _id, _page_allocator); + for (ZPage* page; pt_iter.next(&page);) { + if (!page->is_relocatable()) { + // Not relocatable, don't register + // Note that the seqnum can change under our feet here as the page + // can be concurrently freed and recycled by a concurrent generation + // collection. However this property is stable across such transitions. + // If it was not relocatable before recycling, then it won't be + // relocatable after it gets recycled either, as the seqnum atomically + // becomes allocating for the given generation. The opposite property + // also holds: if the page is relocatable, then it can't have been + // concurrently freed; if it was re-allocated it would not be + // relocatable, and if it was not re-allocated we know that it was + // allocated earlier than mark start of the current generation + // collection. + continue; + } + + if (page->is_marked()) { + // Register live page + selector.register_live_page(page); + } else { + // Register empty page + selector.register_empty_page(page); + + // Reclaim empty pages in bulk + + // An active iterator blocks immediate recycle and delete of pages. + // The intent it to allow the code that iterates over the pages to + // safely read the properties of the pages without them being changed + // by another thread. However, this function both iterates over the + // pages AND frees/recycles them. We "yield" the iterator, so that we + // can perform immediate recycling (as long as no other thread is + // iterating over the pages). The contract is that the pages that are + // about to be freed are "owned" by this thread, and no other thread + // will change their states. + pt_iter.yield([&]() { + free_empty_pages(&selector, 64 /* bulk */); + }); + } + } + + // Reclaim remaining empty pages + free_empty_pages(&selector, 0 /* bulk */); + } + + // Select relocation set + selector.select(); + + // Selecting tenuring threshold must be done after select + // which produces the liveness data, but before install, + // which consumes the tenuring threshold. + if (generation == ZGenerationId::young) { + ZGeneration::young()->select_tenuring_threshold(selector.stats(), promote_all); + } + + // Install relocation set + _relocation_set.install(&selector); + + // Flip age young pages that were not selected + flip_age_pages(&selector); + + // Setup forwarding table + ZRelocationSetIterator rs_iter(&_relocation_set); + for (ZForwarding* forwarding; rs_iter.next(&forwarding);) { + _forwarding_table.insert(forwarding); + } + + // Update statistics + stat_relocation()->at_select_relocation_set(selector.stats()); + stat_heap()->at_select_relocation_set(selector.stats()); +} + +ZRelocationSetParallelIterator ZGeneration::relocation_set_parallel_iterator() { + return ZRelocationSetParallelIterator(&_relocation_set); +} + +void ZGeneration::reset_relocation_set() { + // Reset forwarding table + ZRelocationSetIterator iter(&_relocation_set); + for (ZForwarding* forwarding; iter.next(&forwarding);) { + _forwarding_table.remove(forwarding); + } + + // Reset relocation set + _relocation_set.reset(_page_allocator); +} + +void ZGeneration::synchronize_relocation() { + _relocate.synchronize(); +} + +void ZGeneration::desynchronize_relocation() { + _relocate.desynchronize(); +} + +void ZGeneration::reset_statistics() { + assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint"); + _freed = 0; + _promoted = 0; + _compacted = 0; + _page_allocator->reset_statistics(_id); +} + +ssize_t ZGeneration::freed() const { + return _freed; +} + +void ZGeneration::increase_freed(size_t size) { + Atomic::add(&_freed, size, memory_order_relaxed); +} + +size_t ZGeneration::promoted() const { + return _promoted; +} + +void ZGeneration::increase_promoted(size_t size) { + Atomic::add(&_promoted, size, memory_order_relaxed); +} + +size_t ZGeneration::compacted() const { + return _compacted; +} + +void ZGeneration::increase_compacted(size_t size) { + Atomic::add(&_compacted, size, memory_order_relaxed); +} + +ConcurrentGCTimer* ZGeneration::gc_timer() const { + return _gc_timer; +} + +void ZGeneration::set_gc_timer(ConcurrentGCTimer* gc_timer) { + assert(_gc_timer == nullptr, "Incorrect scoping"); + _gc_timer = gc_timer; +} + +void ZGeneration::clear_gc_timer() { + assert(_gc_timer != nullptr, "Incorrect scoping"); + _gc_timer = nullptr; +} + +void ZGeneration::log_phase_switch(Phase from, Phase to) { + const char* const str[] = { + "Young Mark Start", + "Young Mark End", + "Young Relocate Start", + "Old Mark Start", + "Old Mark End", + "Old Relocate Start" + }; + + size_t index = 0; + + if (is_old()) { + index += 3; + } + + if (to == Phase::Relocate) { + index += 2; + } + + if (from == Phase::Mark && to == Phase::MarkComplete) { + index += 1; + } + + assert(index < ARRAY_SIZE(str), "OOB: " SIZE_FORMAT " < " SIZE_FORMAT, index, ARRAY_SIZE(str)); + + Events::log_zgc_phase_switch("%-21s %4u", str[index], seqnum()); +} + +void ZGeneration::set_phase(Phase new_phase) { + log_phase_switch(_phase, new_phase); + + _phase = new_phase; +} + +void ZGeneration::at_collection_start(ConcurrentGCTimer* gc_timer) { + set_gc_timer(gc_timer); + stat_cycle()->at_start(); + stat_heap()->at_collection_start(_page_allocator->stats(this)); + workers()->set_active(); +} + +void ZGeneration::at_collection_end() { + workers()->set_inactive(); + stat_cycle()->at_end(stat_workers(), should_record_stats()); + // The heap at collection end data is gathered at relocate end + clear_gc_timer(); +} + +const char* ZGeneration::phase_to_string() const { + switch (_phase) { + case Phase::Mark: + return "Mark"; + + case Phase::MarkComplete: + return "MarkComplete"; + + case Phase::Relocate: + return "Relocate"; + + default: + return "Unknown"; + } +} + +class VM_ZOperation : public VM_Operation { +private: + const uint _gc_id; + bool _success; + +public: + VM_ZOperation() : + _gc_id(GCId::current()), + _success(false) {} + + virtual bool block_jni_critical() const { + // Blocking JNI critical regions is needed in operations where we change + // the bad mask or move objects. Changing the bad mask will invalidate all + // oops, which makes it conceptually the same thing as moving all objects. + return false; + } + + virtual bool skip_thread_oop_barriers() const { + return true; + } + + virtual bool do_operation() = 0; + + virtual bool doit_prologue() { + Heap_lock->lock(); + return true; + } + + virtual void doit() { + // Setup GC id and active marker + GCIdMark gc_id_mark(_gc_id); + IsGCActiveMark gc_active_mark; + + // Verify before operation + ZVerify::before_zoperation(); + + // Execute operation + _success = do_operation(); + + // Update statistics + ZStatSample(ZSamplerJavaThreads, Threads::number_of_threads()); + } + + virtual void doit_epilogue() { + Heap_lock->unlock(); + } + + bool success() const { + return _success; + } + + bool pause() { + if (block_jni_critical()) { + ZJNICritical::block(); + } + + VMThread::execute(this); + + if (block_jni_critical()) { + ZJNICritical::unblock(); + } + + return _success; + } +}; + +ZYoungTypeSetter::ZYoungTypeSetter(ZYoungType type) { + assert(ZGeneration::young()->_active_type == ZYoungType::none, "Invalid type"); + ZGeneration::young()->_active_type = type; +} + +ZYoungTypeSetter::~ZYoungTypeSetter() { + assert(ZGeneration::young()->_active_type != ZYoungType::none, "Invalid type"); + ZGeneration::young()->_active_type = ZYoungType::none; +} + +ZGenerationYoung::ZGenerationYoung(ZPageTable* page_table, + const ZForwardingTable* old_forwarding_table, + ZPageAllocator* page_allocator) : + ZGeneration(ZGenerationId::young, page_table, page_allocator), + _active_type(ZYoungType::none), + _tenuring_threshold(0), + _remembered(page_table, old_forwarding_table, page_allocator), + _jfr_tracer() { + ZGeneration::_young = this; +} + +uint ZGenerationYoung::tenuring_threshold() { + return _tenuring_threshold; +} + +class ZGenerationCollectionScopeYoung : public StackObj { +private: + ZYoungTypeSetter _type_setter; + ZStatTimer _stat_timer; + +public: + ZGenerationCollectionScopeYoung(ZYoungType type, ConcurrentGCTimer* gc_timer) : + _type_setter(type), + _stat_timer(ZPhaseGenerationYoung[(int)type], gc_timer) { + // Update statistics and set the GC timer + ZGeneration::young()->at_collection_start(gc_timer); + } + + ~ZGenerationCollectionScopeYoung() { + // Update statistics and clear the GC timer + ZGeneration::young()->at_collection_end(); + } +}; + +bool ZGenerationYoung::should_record_stats() { + return type() == ZYoungType::minor || + type() == ZYoungType::major_partial_roots; +} + +void ZGenerationYoung::collect(ZYoungType type, ConcurrentGCTimer* timer) { + ZGenerationCollectionScopeYoung scope(type, timer); + + // Phase 1: Pause Mark Start + pause_mark_start(); + + // Phase 2: Concurrent Mark + concurrent_mark(); + + abortpoint(); + + // Phase 3: Pause Mark End + while (!pause_mark_end()) { + // Phase 3.5: Concurrent Mark Continue + concurrent_mark_continue(); + + abortpoint(); + } + + // Phase 4: Concurrent Mark Free + concurrent_mark_free(); + + abortpoint(); + + // Phase 5: Concurrent Reset Relocation Set + concurrent_reset_relocation_set(); + + abortpoint(); + + // Phase 6: Concurrent Select Relocation Set + concurrent_select_relocation_set(); + + abortpoint(); + + // Phase 7: Pause Relocate Start + pause_relocate_start(); + + // Note that we can't have an abortpoint here. We need + // to let concurrent_relocate() call abort_page() + // on the remaining entries in the relocation set. + + // Phase 8: Concurrent Relocate + concurrent_relocate(); +} + +class VM_ZMarkStartYoungAndOld : public VM_ZOperation { +public: + virtual VMOp_Type type() const { + return VMOp_ZMarkStartYoungAndOld; + } + + virtual bool block_jni_critical() const { + return true; + } + + virtual bool do_operation() { + ZStatTimerYoung timer(ZPhasePauseMarkStartYoungAndOld); + ZServiceabilityPauseTracer tracer; + + ZCollectedHeap::heap()->increment_total_collections(true /* full */); + ZGeneration::young()->mark_start(); + ZGeneration::old()->mark_start(); + + return true; + } +}; + +class VM_ZMarkStartYoung : public VM_ZOperation { +public: + virtual VMOp_Type type() const { + return VMOp_ZMarkStartYoung; + } + + virtual bool block_jni_critical() const { + return true; + } + + virtual bool do_operation() { + ZStatTimerYoung timer(ZPhasePauseMarkStartYoung); + ZServiceabilityPauseTracer tracer; + + ZCollectedHeap::heap()->increment_total_collections(false /* full */); + ZGeneration::young()->mark_start(); + + return true; + } +}; + +void ZGenerationYoung::flip_mark_start() { + ZGlobalsPointers::flip_young_mark_start(); + ZBarrierSet::assembler()->patch_barriers(); + ZVerify::on_color_flip(); +} + +void ZGenerationYoung::flip_relocate_start() { + ZGlobalsPointers::flip_young_relocate_start(); + ZBarrierSet::assembler()->patch_barriers(); + ZVerify::on_color_flip(); +} + +void ZGenerationYoung::pause_mark_start() { + if (type() == ZYoungType::major_full_roots || + type() == ZYoungType::major_partial_roots) { + VM_ZMarkStartYoungAndOld().pause(); + } else { + VM_ZMarkStartYoung().pause(); + } +} + +void ZGenerationYoung::concurrent_mark() { + ZStatTimerYoung timer(ZPhaseConcurrentMarkYoung); + mark_roots(); + mark_follow(); +} + +class VM_ZMarkEndYoung : public VM_ZOperation { +public: + virtual VMOp_Type type() const { + return VMOp_ZMarkEndYoung; + } + + virtual bool do_operation() { + ZStatTimerYoung timer(ZPhasePauseMarkEndYoung); + ZServiceabilityPauseTracer tracer; + + return ZGeneration::young()->mark_end(); + } +}; + + +bool ZGenerationYoung::pause_mark_end() { + return VM_ZMarkEndYoung().pause(); +} + +void ZGenerationYoung::concurrent_mark_continue() { + ZStatTimerYoung timer(ZPhaseConcurrentMarkContinueYoung); + mark_follow(); +} + +void ZGenerationYoung::concurrent_mark_free() { + ZStatTimerYoung timer(ZPhaseConcurrentMarkFreeYoung); + mark_free(); +} + +void ZGenerationYoung::concurrent_reset_relocation_set() { + ZStatTimerYoung timer(ZPhaseConcurrentResetRelocationSetYoung); + reset_relocation_set(); +} + +void ZGenerationYoung::select_tenuring_threshold(ZRelocationSetSelectorStats stats, bool promote_all) { + const char* reason = ""; + if (promote_all) { + _tenuring_threshold = 0; + reason = "Promote All"; + } else if (ZTenuringThreshold != -1) { + _tenuring_threshold = static_cast(ZTenuringThreshold); + reason = "ZTenuringThreshold"; + } else { + _tenuring_threshold = compute_tenuring_threshold(stats); + reason = "Computed"; + } + log_info(gc, reloc)("Using tenuring threshold: %d (%s)", _tenuring_threshold, reason); +} + +uint ZGenerationYoung::compute_tenuring_threshold(ZRelocationSetSelectorStats stats) { + size_t young_live_total = 0; + size_t young_live_last = 0; + double young_life_expectancy_sum = 0.0; + uint young_life_expectancy_samples = 0; + uint last_populated_age = 0; + size_t last_populated_live = 0; + + for (uint i = 0; i <= ZPageAgeMax; ++i) { + const ZPageAge age = static_cast(i); + const size_t young_live = stats.small(age).live() + stats.medium(age).live() + stats.large(age).live(); + if (young_live > 0) { + last_populated_age = i; + last_populated_live = young_live; + if (young_live_last > 0) { + young_life_expectancy_sum += double(young_live) / double(young_live_last); + young_life_expectancy_samples++; + } + } + young_live_total += young_live; + young_live_last = young_live; + } + + if (young_live_total == 0) { + return 0; + } + + const size_t young_used_at_mark_start = ZGeneration::young()->stat_heap()->used_generation_at_mark_start(); + const size_t young_garbage = ZGeneration::young()->stat_heap()->garbage_at_mark_end(); + const size_t young_allocated = ZGeneration::young()->stat_heap()->allocated_at_mark_end(); + const size_t soft_max_capacity = ZHeap::heap()->soft_max_capacity(); + + // The life expectancy shows by what factor on average one age changes between + // two ages in the age table. Values below 1 indicate generational behaviour where + // the live bytes is shrinking from age to age. Values at or above 1 indicate + // anti-generational patterns where the live bytes isn't going down or grows + // from age to age. + const double young_life_expectancy = young_life_expectancy_samples == 0 ? 1.0 : young_life_expectancy_sum / young_life_expectancy_samples; + + // The life decay factor is the reciprocal of the life expectancy. Therefore, + // values at or below 1 indicate anti-generational behaviour where the live + // bytes either stays the same or grows from age to age. Conversely, values + // above 1 indicate generational behaviour where the live bytes shrinks from + // age to age. The more it shrinks from age to age, the higher the value. + // Therefore, the higher this value is, the higher we want the tenuring + // threshold to be, as we exponentially avoid promotions to the old generation. + const double young_life_decay_factor = 1.0 / young_life_expectancy; + + // The young residency reciprocal indicates the inverse of how small the + // resident part of the young generation is compared to the entire heap. Values + // below 1 indicate it is relatively big. Conversely, values above 1 indicate + // it is relatively small. + const double young_residency_reciprocal = double(soft_max_capacity) / double(young_live_total); + + // The old residency factor clamps the old residency reciprocal to + // at least 1. That implies this factor is 1 unless the resident memory of + // the old generation is small compared to the residency of the heap. The + // smaller the old generation is, the higher this value is. The reasoning + // is that the less memory that is resident in the old generation, the less + // point there is in promoting objects to the old generation, as the amount + // of work it removes from the young generation collections becomes less + // and less valuable, the smaller the old generation is. + const double young_residency_factor = MAX2(young_residency_reciprocal, 1.0); + + // The allocated to garbage ratio, compares the ratio of newly allocated + // memory since GC started to how much garbage we are freeing up. The higher + // the value, the harder it is for the YC to keep up with the allocation rate. + const double allocated_garbage_ratio = double(young_allocated) / double(young_garbage + 1); + + // We slow down the young residency factor with a log. A larger log slows + // it down faster. We select a log between 2 - 16 scaled by the allocated + // to garbage factor. This selects a larger log when the GC has a harder + // time keeping up, which causes more promotions to the old generation, + // making the young collections faster so they can catch up. + const double young_log = MAX2(MIN2(allocated_garbage_ratio, 1.0) * 16, 2.0); + + // The young log residency is essentially the young residency factor, but slowed + // down by the log_{young_log}(X) function described above. + const double young_log_residency = log(young_residency_factor) / log(young_log); + + // The tenuring threshold is computed as the young life decay factor times + // the young residency factor. That takes into consideration that the + // value should be higher the more generational the age table is, and higher + // the more insignificant the footprint of young resident memory is, yet breaks + // if the GC is finding it hard to keep up with the allocation rate. + const double tenuring_threshold_raw = young_life_decay_factor * young_log_residency; + + log_trace(gc, reloc)("Young Allocated: " SIZE_FORMAT "M", young_allocated / M); + log_trace(gc, reloc)("Young Garbage: " SIZE_FORMAT "M", young_garbage / M); + log_debug(gc, reloc)("Allocated To Garbage: %.1f", allocated_garbage_ratio); + log_trace(gc, reloc)("Young Log: %.1f", young_log); + log_trace(gc, reloc)("Young Residency Reciprocal: %.1f", young_residency_reciprocal); + log_trace(gc, reloc)("Young Residency Factor: %.1f", young_residency_factor); + log_debug(gc, reloc)("Young Log Residency: %.1f", young_log_residency); + log_debug(gc, reloc)("Life Decay Factor: %.1f", young_life_decay_factor); + + // Round to an integer as we can't have non-integral tenuring threshold. + const uint upper_bound = MIN2(last_populated_age + 1u, (uint)MaxTenuringThreshold); + const uint lower_bound = MIN2(1u, upper_bound); + const uint tenuring_threshold = clamp((uint)round(tenuring_threshold_raw), lower_bound, upper_bound); + + return tenuring_threshold; +} + +void ZGenerationYoung::concurrent_select_relocation_set() { + ZStatTimerYoung timer(ZPhaseConcurrentSelectRelocationSetYoung); + const bool promote_all = type() == ZYoungType::major_full_preclean; + select_relocation_set(_id, promote_all); +} + +class VM_ZRelocateStartYoung : public VM_ZOperation { +public: + virtual VMOp_Type type() const { + return VMOp_ZRelocateStartYoung; + } + + virtual bool block_jni_critical() const { + return true; + } + + virtual bool do_operation() { + ZStatTimerYoung timer(ZPhasePauseRelocateStartYoung); + ZServiceabilityPauseTracer tracer; + + ZGeneration::young()->relocate_start(); + + return true; + } +}; + +void ZGenerationYoung::pause_relocate_start() { + VM_ZRelocateStartYoung().pause(); +} + +void ZGenerationYoung::concurrent_relocate() { + ZStatTimerYoung timer(ZPhaseConcurrentRelocatedYoung); + relocate(); +} + +void ZGenerationYoung::mark_start() { + assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint"); + + // Change good colors + flip_mark_start(); + + // Retire allocating pages + ZAllocator::eden()->retire_pages(); + for (ZPageAge i = ZPageAge::survivor1; i <= ZPageAge::survivor14; i = static_cast(static_cast(i) + 1)) { + ZAllocator::relocation(i)->retire_pages(); + } + + // Reset allocated/reclaimed/used statistics + reset_statistics(); + + // Increment sequence number + _seqnum++; + + // Enter mark phase + set_phase(Phase::Mark); + + // Reset marking information and mark roots + _mark.start(); + + // Flip remembered set bits + _remembered.flip(); + + // Update statistics + stat_heap()->at_mark_start(_page_allocator->stats(this)); +} + +void ZGenerationYoung::mark_roots() { + ZStatTimerYoung timer(ZSubPhaseConcurrentMarkRootsYoung); + _mark.mark_young_roots(); +} + +void ZGenerationYoung::mark_follow() { + // Combine following with scanning the remembered set + ZStatTimerYoung timer(ZSubPhaseConcurrentMarkFollowYoung); + _remembered.scan_and_follow(&_mark); +} + +bool ZGenerationYoung::mark_end() { + assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint"); + + // End marking + if (!_mark.end()) { + // Marking not completed, continue concurrent mark + return false; + } + + // Enter mark completed phase + set_phase(Phase::MarkComplete); + + // Update statistics + stat_heap()->at_mark_end(_page_allocator->stats(this)); + + // Notify JVMTI that some tagmap entry objects may have died. + JvmtiTagMap::set_needs_cleaning(); + + return true; +} + +void ZGenerationYoung::relocate_start() { + assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint"); + + // Change good colors + flip_relocate_start(); + + // Enter relocate phase + set_phase(Phase::Relocate); + + // Update statistics + stat_heap()->at_relocate_start(_page_allocator->stats(this)); + + _relocate.start(); +} + +void ZGenerationYoung::relocate() { + // Relocate relocation set + _relocate.relocate(&_relocation_set); + + // Update statistics + stat_heap()->at_relocate_end(_page_allocator->stats(this), should_record_stats()); +} + +void ZGenerationYoung::flip_promote(ZPage* from_page, ZPage* to_page) { + _page_table->replace(from_page, to_page); + + // Update statistics + _page_allocator->promote_used(from_page->size()); + increase_freed(from_page->size()); + increase_promoted(from_page->live_bytes()); +} + +void ZGenerationYoung::in_place_relocate_promote(ZPage* from_page, ZPage* to_page) { + _page_table->replace(from_page, to_page); + + // Update statistics + _page_allocator->promote_used(from_page->size()); +} + +void ZGenerationYoung::register_flip_promoted(const ZArray& pages) { + _relocation_set.register_flip_promoted(pages); +} + +void ZGenerationYoung::register_in_place_relocate_promoted(ZPage* page) { + _relocation_set.register_in_place_relocate_promoted(page); +} + +void ZGenerationYoung::register_with_remset(ZPage* page) { + _remembered.register_found_old(page); +} + +ZGenerationTracer* ZGenerationYoung::jfr_tracer() { + return &_jfr_tracer; +} + +ZGenerationOld::ZGenerationOld(ZPageTable* page_table, ZPageAllocator* page_allocator) : + ZGeneration(ZGenerationId::old, page_table, page_allocator), + _reference_processor(&_workers), + _weak_roots_processor(&_workers), + _unload(&_workers), + _total_collections_at_start(0), + _young_seqnum_at_reloc_start(0), + _jfr_tracer() { + ZGeneration::_old = this; +} + +class ZGenerationCollectionScopeOld : public StackObj { +private: + ZStatTimer _stat_timer; + ZDriverUnlocker _unlocker; + +public: + ZGenerationCollectionScopeOld(ConcurrentGCTimer* gc_timer) : + _stat_timer(ZPhaseGenerationOld, gc_timer), + _unlocker() { + // Update statistics and set the GC timer + ZGeneration::old()->at_collection_start(gc_timer); + } + + ~ZGenerationCollectionScopeOld() { + // Update statistics and clear the GC timer + ZGeneration::old()->at_collection_end(); + } +}; + +bool ZGenerationOld::should_record_stats() { + return true; +} + +void ZGenerationOld::collect(ConcurrentGCTimer* timer) { + ZGenerationCollectionScopeOld scope(timer); + + // Phase 1: Concurrent Mark + concurrent_mark(); + + abortpoint(); + + // Phase 2: Pause Mark End + while (!pause_mark_end()) { + // Phase 2.5: Concurrent Mark Continue + concurrent_mark_continue(); + + abortpoint(); + } + + // Phase 3: Concurrent Mark Free + concurrent_mark_free(); + + abortpoint(); + + // Phase 4: Concurrent Process Non-Strong References + concurrent_process_non_strong_references(); + + abortpoint(); + + // Phase 5: Concurrent Reset Relocation Set + concurrent_reset_relocation_set(); + + abortpoint(); + + // Phase 6: Pause Verify + pause_verify(); + + // Phase 7: Concurrent Select Relocation Set + concurrent_select_relocation_set(); + + abortpoint(); + + { + ZDriverLocker locker; + + // Phase 8: Concurrent Remap Roots + concurrent_remap_young_roots(); + + abortpoint(); + + // Phase 9: Pause Relocate Start + pause_relocate_start(); + } + + // Note that we can't have an abortpoint here. We need + // to let concurrent_relocate() call abort_page() + // on the remaining entries in the relocation set. + + // Phase 10: Concurrent Relocate + concurrent_relocate(); +} + +void ZGenerationOld::flip_mark_start() { + ZGlobalsPointers::flip_old_mark_start(); + ZBarrierSet::assembler()->patch_barriers(); + ZVerify::on_color_flip(); +} + +void ZGenerationOld::flip_relocate_start() { + ZGlobalsPointers::flip_old_relocate_start(); + ZBarrierSet::assembler()->patch_barriers(); + ZVerify::on_color_flip(); +} + +void ZGenerationOld::concurrent_mark() { + ZStatTimerOld timer(ZPhaseConcurrentMarkOld); + ZBreakpoint::at_after_marking_started(); + mark_roots(); + mark_follow(); + ZBreakpoint::at_before_marking_completed(); +} + +class VM_ZMarkEndOld : public VM_ZOperation { +public: + virtual VMOp_Type type() const { + return VMOp_ZMarkEndOld; + } + + virtual bool do_operation() { + ZStatTimerOld timer(ZPhasePauseMarkEndOld); + ZServiceabilityPauseTracer tracer; + + return ZGeneration::old()->mark_end(); + } +}; + +bool ZGenerationOld::pause_mark_end() { + return VM_ZMarkEndOld().pause(); +} + +void ZGenerationOld::concurrent_mark_continue() { + ZStatTimerOld timer(ZPhaseConcurrentMarkContinueOld); + mark_follow(); +} + +void ZGenerationOld::concurrent_mark_free() { + ZStatTimerOld timer(ZPhaseConcurrentMarkFreeOld); + mark_free(); +} + +void ZGenerationOld::concurrent_process_non_strong_references() { + ZStatTimerOld timer(ZPhaseConcurrentProcessNonStrongOld); + ZBreakpoint::at_after_reference_processing_started(); + process_non_strong_references(); +} + +void ZGenerationOld::concurrent_reset_relocation_set() { + ZStatTimerOld timer(ZPhaseConcurrentResetRelocationSetOld); + reset_relocation_set(); +} + +class VM_ZVerifyOld : public VM_Operation { +public: + virtual VMOp_Type type() const { + return VMOp_ZVerifyOld; + } + + virtual bool skip_thread_oop_barriers() const { + return true; + } + + virtual void doit() { + ZVerify::after_weak_processing(); + } + + void pause() { + VMThread::execute(this); + } +}; + +void ZGenerationOld::pause_verify() { + // Note that we block out concurrent young collections when performing the + // verification. The verification checks that store good oops in the + // old generation have a corresponding remembered set entry, or is in + // a store barrier buffer (hence asynchronously creating such entries). + // That lookup would otherwise race with installation of base pointers + // into the store barrier buffer. We dodge that race by blocking out + // young collections during this verification. + if (ZVerifyRoots || ZVerifyObjects) { + // Limited verification + ZDriverLocker locker; + VM_ZVerifyOld().pause(); + } +} + +void ZGenerationOld::concurrent_select_relocation_set() { + ZStatTimerOld timer(ZPhaseConcurrentSelectRelocationSetOld); + select_relocation_set(_id, false /* promote_all */); +} + +class VM_ZRelocateStartOld : public VM_ZOperation { +public: + virtual VMOp_Type type() const { + return VMOp_ZRelocateStartOld; + } + + virtual bool block_jni_critical() const { + return true; + } + + virtual bool do_operation() { + ZStatTimerOld timer(ZPhasePauseRelocateStartOld); + ZServiceabilityPauseTracer tracer; + + ZGeneration::old()->relocate_start(); + + return true; + } +}; + +void ZGenerationOld::pause_relocate_start() { + VM_ZRelocateStartOld().pause(); +} + +void ZGenerationOld::concurrent_relocate() { + ZStatTimerOld timer(ZPhaseConcurrentRelocatedOld); + relocate(); +} + +void ZGenerationOld::concurrent_remap_young_roots() { + ZStatTimerOld timer(ZPhaseConcurrentRemapRootsOld); + remap_young_roots(); +} + +void ZGenerationOld::mark_start() { + assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint"); + + // Verification + ClassLoaderDataGraph::verify_claimed_marks_cleared(ClassLoaderData::_claim_strong); + + // Change good colors + flip_mark_start(); + + // Retire allocating pages + ZAllocator::old()->retire_pages(); + + // Reset allocated/reclaimed/used statistics + reset_statistics(); + + // Reset encountered/dropped/enqueued statistics + _reference_processor.reset_statistics(); + + // Increment sequence number + _seqnum++; + + // Enter mark phase + set_phase(Phase::Mark); + + // Reset marking information and mark roots + _mark.start(); + + // Update statistics + stat_heap()->at_mark_start(_page_allocator->stats(this)); + + // Note that we start a marking cycle. + // Unlike other GCs, the color switch implicitly changes the nmethods + // to be armed, and the thread-local disarm values are lazily updated + // when JavaThreads wake up from safepoints. + CodeCache::on_gc_marking_cycle_start(); + + _total_collections_at_start = ZCollectedHeap::heap()->total_collections(); +} + +void ZGenerationOld::mark_roots() { + ZStatTimerOld timer(ZSubPhaseConcurrentMarkRootsOld); + _mark.mark_old_roots(); +} + +void ZGenerationOld::mark_follow() { + ZStatTimerOld timer(ZSubPhaseConcurrentMarkFollowOld); + _mark.mark_follow(); +} + +bool ZGenerationOld::mark_end() { + assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint"); + + // Try end marking + if (!_mark.end()) { + // Marking not completed, continue concurrent mark + return false; + } + + // Enter mark completed phase + set_phase(Phase::MarkComplete); + + // Verify after mark + ZVerify::after_mark(); + + // Update statistics + stat_heap()->at_mark_end(_page_allocator->stats(this)); + + // Block resurrection of weak/phantom references + ZResurrection::block(); + + // Prepare to unload stale metadata and nmethods + _unload.prepare(); + + // Notify JVMTI that some tagmap entry objects may have died. + JvmtiTagMap::set_needs_cleaning(); + + // Note that we finished a marking cycle. + // Unlike other GCs, we do not arm the nmethods + // when marking terminates. + CodeCache::on_gc_marking_cycle_finish(); + + return true; +} + +void ZGenerationOld::set_soft_reference_policy(bool clear) { + _reference_processor.set_soft_reference_policy(clear); +} + +class ZRendezvousHandshakeClosure : public HandshakeClosure { +public: + ZRendezvousHandshakeClosure() : + HandshakeClosure("ZRendezvous") {} + + void do_thread(Thread* thread) { + // Does nothing + } +}; + +class ZRendezvousGCThreads: public VM_Operation { + public: + VMOp_Type type() const { return VMOp_ZRendezvousGCThreads; } + + virtual bool evaluate_at_safepoint() const { + // We only care about synchronizing the GC threads. + // Leave the Java threads running. + return false; + } + + virtual bool skip_thread_oop_barriers() const { + fatal("Concurrent VMOps should not call this"); + return true; + } + + void doit() { + // Light weight "handshake" of the GC threads + SuspendibleThreadSet::synchronize(); + SuspendibleThreadSet::desynchronize(); + }; +}; + + +void ZGenerationOld::process_non_strong_references() { + // Process Soft/Weak/Final/PhantomReferences + _reference_processor.process_references(); + + // Process weak roots + _weak_roots_processor.process_weak_roots(); + + // Unlink stale metadata and nmethods + _unload.unlink(); + + // Perform a handshake. This is needed 1) to make sure that stale + // metadata and nmethods are no longer observable. And 2), to + // prevent the race where a mutator first loads an oop, which is + // logically null but not yet cleared. Then this oop gets cleared + // by the reference processor and resurrection is unblocked. At + // this point the mutator could see the unblocked state and pass + // this invalid oop through the normal barrier path, which would + // incorrectly try to mark the oop. + ZRendezvousHandshakeClosure cl; + Handshake::execute(&cl); + + // GC threads are not part of the handshake above. + // Explicitly "handshake" them. + ZRendezvousGCThreads op; + VMThread::execute(&op); + + // Unblock resurrection of weak/phantom references + ZResurrection::unblock(); + + // Purge stale metadata and nmethods that were unlinked + _unload.purge(); + + // Enqueue Soft/Weak/Final/PhantomReferences. Note that this + // must be done after unblocking resurrection. Otherwise the + // Finalizer thread could call Reference.get() on the Finalizers + // that were just enqueued, which would incorrectly return null + // during the resurrection block window, since such referents + // are only Finalizable marked. + _reference_processor.enqueue_references(); + + // Clear old markings claim bits. + // Note: Clearing _claim_strong also clears _claim_finalizable. + ClassLoaderDataGraph::clear_claimed_marks(ClassLoaderData::_claim_strong); +} + +void ZGenerationOld::relocate_start() { + assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint"); + + // Finish unloading stale metadata and nmethods + _unload.finish(); + + // Change good colors + flip_relocate_start(); + + // Enter relocate phase + set_phase(Phase::Relocate); + + // Update statistics + stat_heap()->at_relocate_start(_page_allocator->stats(this)); + + // Need to know the remset parity when relocating objects + _young_seqnum_at_reloc_start = ZGeneration::young()->seqnum(); + + _relocate.start(); +} + +void ZGenerationOld::relocate() { + // Relocate relocation set + _relocate.relocate(&_relocation_set); + + // Update statistics + stat_heap()->at_relocate_end(_page_allocator->stats(this), should_record_stats()); +} + +class ZRemapOopClosure : public OopClosure { +public: + virtual void do_oop(oop* p) { + ZBarrier::load_barrier_on_oop_field((volatile zpointer*)p); + } + + virtual void do_oop(narrowOop* p) { + ShouldNotReachHere(); + } +}; + +class ZRemapThreadClosure : public ThreadClosure { +public: + virtual void do_thread(Thread* thread) { + JavaThread* const jt = JavaThread::cast(thread); + StackWatermarkSet::finish_processing(jt, nullptr, StackWatermarkKind::gc); + } +}; + +class ZRemapNMethodClosure : public NMethodClosure { +private: + ZBarrierSetNMethod* const _bs_nm; + +public: + ZRemapNMethodClosure() : + _bs_nm(static_cast(BarrierSet::barrier_set()->barrier_set_nmethod())) {} + + virtual void do_nmethod(nmethod* nm) { + ZLocker locker(ZNMethod::lock_for_nmethod(nm)); + if (_bs_nm->is_armed(nm)) { + // Heal barriers + ZNMethod::nmethod_patch_barriers(nm); + + // Heal oops + ZUncoloredRootProcessOopClosure cl(ZNMethod::color(nm)); + ZNMethod::nmethod_oops_do_inner(nm, &cl); + + log_trace(gc, nmethod)("nmethod: " PTR_FORMAT " visited by old remapping", p2i(nm)); + + // Disarm + _bs_nm->disarm(nm); + } + } +}; + +typedef ClaimingCLDToOopClosure ZRemapCLDClosure; + +class ZRemapYoungRootsTask : public ZTask { +private: + ZGenerationPagesParallelIterator _old_pages_parallel_iterator; + + ZRootsIteratorAllColored _roots_colored; + ZRootsIteratorAllUncolored _roots_uncolored; + + ZRemapOopClosure _cl_colored; + ZRemapCLDClosure _cld_cl; + + ZRemapThreadClosure _thread_cl; + ZRemapNMethodClosure _nm_cl; + +public: + ZRemapYoungRootsTask(ZPageTable* page_table, ZPageAllocator* page_allocator) : + ZTask("ZRemapYoungRootsTask"), + _old_pages_parallel_iterator(page_table, ZGenerationId::old, page_allocator), + _roots_colored(ZGenerationIdOptional::old), + _roots_uncolored(ZGenerationIdOptional::old), + _cl_colored(), + _cld_cl(&_cl_colored), + _thread_cl(), + _nm_cl() { + ClassLoaderDataGraph_lock->lock(); + } + + ~ZRemapYoungRootsTask() { + ClassLoaderDataGraph_lock->unlock(); + } + + virtual void work() { + { + ZStatTimerWorker timer(ZSubPhaseConcurrentRemapRootsColoredOld); + _roots_colored.apply(&_cl_colored, + &_cld_cl); + } + + { + ZStatTimerWorker timer(ZSubPhaseConcurrentRemapRootsUncoloredOld); + _roots_uncolored.apply(&_thread_cl, + &_nm_cl); + } + + { + ZStatTimerWorker timer(ZSubPhaseConcurrentRemapRememberedOld); + _old_pages_parallel_iterator.do_pages([&](ZPage* page) { + // Visit all object fields that potentially pointing into young generation + page->oops_do_current_remembered(ZBarrier::load_barrier_on_oop_field); + return true; + }); + } + } +}; + +// This function is used by the old generation to purge roots to the young generation from +// young remap bit errors, before the old generation performs old relocate start. By doing +// that, we can know that double remap bit errors don't need to be concerned with double +// remap bit errors, in the young generation roots. That makes it possible to figure out +// which generation table to use when remapping a pointer, without needing an extra adjust +// phase that walks the entire heap. +void ZGenerationOld::remap_young_roots() { + // We upgrade the number of workers to the number last used by the young generation. The + // reason is that this code is run under the driver lock, which means that a young generation + // collection might be waiting for this code to complete. + uint prev_nworkers = _workers.active_workers(); + uint remap_nworkers = clamp(ZGeneration::young()->workers()->active_workers() + prev_nworkers, 1u, ZOldGCThreads); + _workers.set_active_workers(remap_nworkers); + + // TODO: The STS joiner is only needed to satisfy z_assert_is_barrier_safe that doesn't + // understand the driver locker. Consider making the assert aware of the driver locker. + SuspendibleThreadSetJoiner sts_joiner; + + ZRemapYoungRootsTask task(_page_table, _page_allocator); + workers()->run(&task); + _workers.set_active_workers(prev_nworkers); +} + +uint ZGenerationOld::total_collections_at_start() const { + return _total_collections_at_start; +} + +ZGenerationTracer* ZGenerationOld::jfr_tracer() { + return &_jfr_tracer; +} diff --git a/src/hotspot/share/gc/z/zGeneration.hpp b/src/hotspot/share/gc/z/zGeneration.hpp new file mode 100644 index 0000000000000..23736f45b7be7 --- /dev/null +++ b/src/hotspot/share/gc/z/zGeneration.hpp @@ -0,0 +1,322 @@ +/* + * Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_Z_ZGENERATION_HPP +#define SHARE_GC_Z_ZGENERATION_HPP + +#include "gc/z/zForwardingTable.hpp" +#include "gc/z/zGenerationId.hpp" +#include "gc/z/zMark.hpp" +#include "gc/z/zReferenceProcessor.hpp" +#include "gc/z/zRelocate.hpp" +#include "gc/z/zRelocationSet.hpp" +#include "gc/z/zRemembered.hpp" +#include "gc/z/zStat.hpp" +#include "gc/z/zTracer.hpp" +#include "gc/z/zUnload.hpp" +#include "gc/z/zWeakRootsProcessor.hpp" +#include "gc/z/zWorkers.hpp" +#include "memory/allocation.hpp" + +class ThreadClosure; +class ZForwardingTable; +class ZGenerationOld; +class ZGenerationYoung; +class ZPage; +class ZPageAllocator; +class ZPageTable; +class ZRelocationSetSelector; + +class ZGeneration { + friend class ZForwardingTest; + friend class ZLiveMapTest; + +protected: + static ZGenerationYoung* _young; + static ZGenerationOld* _old; + + enum class Phase { + Mark, + MarkComplete, + Relocate + }; + + const ZGenerationId _id; + ZPageAllocator* const _page_allocator; + ZPageTable* const _page_table; + ZForwardingTable _forwarding_table; + ZWorkers _workers; + ZMark _mark; + ZRelocate _relocate; + ZRelocationSet _relocation_set; + + volatile size_t _freed; + volatile size_t _promoted; + volatile size_t _compacted; + + Phase _phase; + uint32_t _seqnum; + + ZStatHeap _stat_heap; + ZStatCycle _stat_cycle; + ZStatWorkers _stat_workers; + ZStatMark _stat_mark; + ZStatRelocation _stat_relocation; + + ConcurrentGCTimer* _gc_timer; + + void free_empty_pages(ZRelocationSetSelector* selector, int bulk); + void flip_age_pages(const ZRelocationSetSelector* selector); + void flip_age_pages(const ZArray* pages); + + void mark_free(); + + void select_relocation_set(ZGenerationId generation, bool promote_all); + void reset_relocation_set(); + + ZGeneration(ZGenerationId id, ZPageTable* page_table, ZPageAllocator* page_allocator); + + void log_phase_switch(Phase from, Phase to); + +public: + bool is_initialized() const; + + // GC phases + void set_phase(Phase new_phase); + bool is_phase_relocate() const; + bool is_phase_mark() const; + bool is_phase_mark_complete() const; + const char* phase_to_string() const; + + uint32_t seqnum() const; + + ZGenerationId id() const; + ZGenerationIdOptional id_optional() const; + bool is_young() const; + bool is_old() const; + + static ZGenerationYoung* young(); + static ZGenerationOld* old(); + static ZGeneration* generation(ZGenerationId id); + + // Statistics + void reset_statistics(); + virtual bool should_record_stats() = 0; + ssize_t freed() const; + void increase_freed(size_t size); + size_t promoted() const; + void increase_promoted(size_t size); + size_t compacted() const; + void increase_compacted(size_t size); + + ConcurrentGCTimer* gc_timer() const; + void set_gc_timer(ConcurrentGCTimer* gc_timer); + void clear_gc_timer(); + + ZStatHeap* stat_heap(); + ZStatCycle* stat_cycle(); + ZStatWorkers* stat_workers(); + ZStatMark* stat_mark(); + ZStatRelocation* stat_relocation(); + + void at_collection_start(ConcurrentGCTimer* gc_timer); + void at_collection_end(); + + // Workers + ZWorkers* workers(); + uint active_workers() const; + void set_active_workers(uint nworkers); + + // Worker resizing + bool should_worker_resize(); + + ZPageTable* page_table() const; + const ZForwardingTable* forwarding_table() const; + ZForwarding* forwarding(zaddress_unsafe addr) const; + + ZRelocationSetParallelIterator relocation_set_parallel_iterator(); + + // Marking + template + void mark_object(zaddress addr); + template + void mark_object_if_active(zaddress addr); + void mark_flush_and_free(Thread* thread); + + // Relocation + void synchronize_relocation(); + void desynchronize_relocation(); + zaddress relocate_or_remap_object(zaddress_unsafe addr); + zaddress remap_object(zaddress_unsafe addr); + + // Threads + void threads_do(ThreadClosure* tc) const; +}; + +enum class ZYoungType { + minor, + major_full_preclean, + major_full_roots, + major_partial_roots, + none +}; + +class ZYoungTypeSetter { +public: + ZYoungTypeSetter(ZYoungType type); + ~ZYoungTypeSetter(); +}; + +class ZGenerationYoung : public ZGeneration { + friend class VM_ZMarkEndYoung; + friend class VM_ZMarkStartYoung; + friend class VM_ZMarkStartYoungAndOld; + friend class VM_ZRelocateStartYoung; + friend class ZYoungTypeSetter; + +private: + ZYoungType _active_type; + uint _tenuring_threshold; + ZRemembered _remembered; + ZYoungTracer _jfr_tracer; + + void flip_mark_start(); + void flip_relocate_start(); + + void mark_start(); + void mark_roots(); + void mark_follow(); + bool mark_end(); + void relocate_start(); + void relocate(); + + void pause_mark_start(); + void concurrent_mark(); + bool pause_mark_end(); + void concurrent_mark_continue(); + void concurrent_mark_free(); + void concurrent_reset_relocation_set(); + void concurrent_select_relocation_set(); + void pause_relocate_start(); + void concurrent_relocate(); + +public: + ZGenerationYoung(ZPageTable* page_table, + const ZForwardingTable* old_forwarding_table, + ZPageAllocator* page_allocator); + + ZYoungType type() const; + + void collect(ZYoungType type, ConcurrentGCTimer* timer); + + // Statistics + bool should_record_stats(); + + // Support for promoting object to the old generation + void flip_promote(ZPage* from_page, ZPage* to_page); + void in_place_relocate_promote(ZPage* from_page, ZPage* to_page); + + void register_flip_promoted(const ZArray& pages); + void register_in_place_relocate_promoted(ZPage* page); + + uint tenuring_threshold(); + void select_tenuring_threshold(ZRelocationSetSelectorStats stats, bool promote_all); + uint compute_tenuring_threshold(ZRelocationSetSelectorStats stats); + + // Add remembered set entries + void remember(volatile zpointer* p); + void remember_fields(zaddress addr); + + // Scan a remembered set entry + void scan_remembered_field(volatile zpointer* p); + + // Register old pages with remembered set + void register_with_remset(ZPage* page); + + // Serviceability + ZGenerationTracer* jfr_tracer(); + + // Verification + bool is_remembered(volatile zpointer* p) const; +}; + +class ZGenerationOld : public ZGeneration { + friend class VM_ZMarkEndOld; + friend class VM_ZMarkStartYoungAndOld; + friend class VM_ZRelocateStartOld; + +private: + ZReferenceProcessor _reference_processor; + ZWeakRootsProcessor _weak_roots_processor; + ZUnload _unload; + uint _total_collections_at_start; + uint32_t _young_seqnum_at_reloc_start; + ZOldTracer _jfr_tracer; + + void flip_mark_start(); + void flip_relocate_start(); + + void mark_start(); + void mark_roots(); + void mark_follow(); + bool mark_end(); + void process_non_strong_references(); + void relocate_start(); + void relocate(); + void remap_young_roots(); + + void concurrent_mark(); + bool pause_mark_end(); + void concurrent_mark_continue(); + void concurrent_mark_free(); + void concurrent_process_non_strong_references(); + void concurrent_reset_relocation_set(); + void pause_verify(); + void concurrent_select_relocation_set(); + void pause_relocate_start(); + void concurrent_relocate(); + void concurrent_remap_young_roots(); + +public: + ZGenerationOld(ZPageTable* page_table, ZPageAllocator* page_allocator); + + void collect(ConcurrentGCTimer* timer); + + // Statistics + bool should_record_stats(); + + // Reference processing + ReferenceDiscoverer* reference_discoverer(); + void set_soft_reference_policy(bool clear); + + uint total_collections_at_start() const; + + bool active_remset_is_current() const; + + ZRelocateQueue* relocate_queue(); + + // Serviceability + ZGenerationTracer* jfr_tracer(); +}; + +#endif // SHARE_GC_Z_ZGENERATION_HPP diff --git a/src/hotspot/share/gc/z/zGeneration.inline.hpp b/src/hotspot/share/gc/z/zGeneration.inline.hpp new file mode 100644 index 0000000000000..3f138e0acbbb9 --- /dev/null +++ b/src/hotspot/share/gc/z/zGeneration.inline.hpp @@ -0,0 +1,188 @@ +/* + * Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_Z_ZGENERATION_INLINE_HPP +#define SHARE_GC_Z_ZGENERATION_INLINE_HPP + +#include "gc/z/zGeneration.hpp" + +#include "gc/z/zAbort.inline.hpp" +#include "gc/z/zHeap.inline.hpp" +#include "gc/z/zWorkers.inline.hpp" +#include "utilities/debug.hpp" + +inline bool ZGeneration::is_phase_relocate() const { + return _phase == Phase::Relocate; +} + +inline bool ZGeneration::is_phase_mark() const { + return _phase == Phase::Mark; +} + +inline bool ZGeneration::is_phase_mark_complete() const { + return _phase == Phase::MarkComplete; +} + +inline uint32_t ZGeneration::seqnum() const { + return _seqnum; +} + +inline ZGenerationId ZGeneration::id() const { + return _id; +} + +inline ZGenerationIdOptional ZGeneration::id_optional() const { + return static_cast(_id); +} + +inline bool ZGeneration::is_young() const { + return _id == ZGenerationId::young; +} + +inline bool ZGeneration::is_old() const { + return _id == ZGenerationId::old; +} + +inline ZGenerationYoung* ZGeneration::young() { + return _young; +} + +inline ZGenerationOld* ZGeneration::old() { + return _old; +} + +inline ZGeneration* ZGeneration::generation(ZGenerationId id) { + if (id == ZGenerationId::young) { + return _young; + } else { + return _old; + } +} + +inline ZForwarding* ZGeneration::forwarding(zaddress_unsafe addr) const { + return _forwarding_table.get(addr); +} + +inline bool ZGeneration::should_worker_resize() { + return _workers.should_worker_resize(); +} + +inline ZStatHeap* ZGeneration::stat_heap() { + return &_stat_heap; +} + +inline ZStatCycle* ZGeneration::stat_cycle() { + return &_stat_cycle; +} + +inline ZStatWorkers* ZGeneration::stat_workers() { + return &_stat_workers; +} + +inline ZStatMark* ZGeneration::stat_mark() { + return &_stat_mark; +} + +inline ZStatRelocation* ZGeneration::stat_relocation() { + return &_stat_relocation; +} + +inline ZPageTable* ZGeneration::page_table() const { + return _page_table; +} + +inline const ZForwardingTable* ZGeneration::forwarding_table() const { + return &_forwarding_table; +} + +template +inline void ZGeneration::mark_object(zaddress addr) { + assert(is_phase_mark(), "Should be marking"); + _mark.mark_object(addr); +} + +template +inline void ZGeneration::mark_object_if_active(zaddress addr) { + if (is_phase_mark()) { + mark_object(addr); + } +} + +inline zaddress ZGeneration::relocate_or_remap_object(zaddress_unsafe addr) { + ZForwarding* const forwarding = _forwarding_table.get(addr); + if (forwarding == nullptr) { + // Not forwarding + return safe(addr); + } + + // Relocate object + return _relocate.relocate_object(forwarding, addr); +} + +inline zaddress ZGeneration::remap_object(zaddress_unsafe addr) { + ZForwarding* const forwarding = _forwarding_table.get(addr); + if (forwarding == nullptr) { + // Not forwarding + return safe(addr); + } + + // Remap object + return _relocate.forward_object(forwarding, addr); +} + +inline ZYoungType ZGenerationYoung::type() const { + assert(_active_type != ZYoungType::none, "Invalid type"); + return _active_type; +} + +inline void ZGenerationYoung::remember(volatile zpointer* p) { + _remembered.remember(p); +} + +inline void ZGenerationYoung::scan_remembered_field(volatile zpointer* p) { + _remembered.scan_field(p); +} + +inline bool ZGenerationYoung::is_remembered(volatile zpointer* p) const { + return _remembered.is_remembered(p); +} + +inline ReferenceDiscoverer* ZGenerationOld::reference_discoverer() { + return &_reference_processor; +} + +inline bool ZGenerationOld::active_remset_is_current() const { + assert(_young_seqnum_at_reloc_start != 0, "Must be set before used"); + + // The remembered set bits flip every time a new young collection starts + const uint32_t seqnum = ZGeneration::young()->seqnum(); + const uint32_t seqnum_diff = seqnum - _young_seqnum_at_reloc_start; + const bool in_current = (seqnum_diff & 1u) == 0u; + return in_current; +} + +inline ZRelocateQueue* ZGenerationOld::relocate_queue() { + return _relocate.queue(); +} + +#endif // SHARE_GC_Z_ZGENERATION_INLINE_HPP diff --git a/src/hotspot/share/gc/z/zGenerationId.hpp b/src/hotspot/share/gc/z/zGenerationId.hpp new file mode 100644 index 0000000000000..a93d269bbb5fc --- /dev/null +++ b/src/hotspot/share/gc/z/zGenerationId.hpp @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_Z_ZGENERATIONID_HPP +#define SHARE_GC_Z_ZGENERATIONID_HPP + +#include "utilities/globalDefinitions.hpp" + +enum class ZGenerationId : uint8_t { + young, + old +}; + +enum class ZGenerationIdOptional : uint8_t { + young, + old, + none +}; + +#endif // SHARE_GC_Z_ZGENERATIONID_HPP diff --git a/src/hotspot/share/gc/z/zGlobals.cpp b/src/hotspot/share/gc/z/zGlobals.cpp index 28200e23b6e1a..460ed4f09fe5d 100644 --- a/src/hotspot/share/gc/z/zGlobals.cpp +++ b/src/hotspot/share/gc/z/zGlobals.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,9 +24,6 @@ #include "precompiled.hpp" #include "gc/z/zGlobals.hpp" -uint32_t ZGlobalPhase = ZPhaseRelocate; -uint32_t ZGlobalSeqNum = 1; - size_t ZPageSizeMediumShift; size_t ZPageSizeMedium; @@ -37,43 +34,3 @@ int ZObjectAlignmentMediumShift; const int& ZObjectAlignmentSmall = MinObjAlignmentInBytes; int ZObjectAlignmentMedium; - -uintptr_t ZAddressGoodMask; -uintptr_t ZAddressBadMask; -uintptr_t ZAddressWeakBadMask; - -static uint32_t* ZAddressCalculateBadMaskHighOrderBitsAddr() { - const uintptr_t addr = reinterpret_cast(&ZAddressBadMask); - return reinterpret_cast(addr + ZAddressBadMaskHighOrderBitsOffset); -} - -uint32_t* ZAddressBadMaskHighOrderBitsAddr = ZAddressCalculateBadMaskHighOrderBitsAddr(); - -size_t ZAddressOffsetBits; -uintptr_t ZAddressOffsetMask; -size_t ZAddressOffsetMax; - -size_t ZAddressMetadataShift; -uintptr_t ZAddressMetadataMask; - -uintptr_t ZAddressMetadataMarked; -uintptr_t ZAddressMetadataMarked0; -uintptr_t ZAddressMetadataMarked1; -uintptr_t ZAddressMetadataRemapped; -uintptr_t ZAddressMetadataFinalizable; - -const char* ZGlobalPhaseToString() { - switch (ZGlobalPhase) { - case ZPhaseMark: - return "Mark"; - - case ZPhaseMarkCompleted: - return "MarkCompleted"; - - case ZPhaseRelocate: - return "Relocate"; - - default: - return "Unknown"; - } -} diff --git a/src/hotspot/share/gc/z/zGlobals.hpp b/src/hotspot/share/gc/z/zGlobals.hpp index 3b65c75076af3..0ca089420806c 100644 --- a/src/hotspot/share/gc/z/zGlobals.hpp +++ b/src/hotspot/share/gc/z/zGlobals.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -31,31 +31,13 @@ // Collector name const char* const ZName = "The Z Garbage Collector"; -// Global phase state -extern uint32_t ZGlobalPhase; -const uint32_t ZPhaseMark = 0; -const uint32_t ZPhaseMarkCompleted = 1; -const uint32_t ZPhaseRelocate = 2; -const char* ZGlobalPhaseToString(); - -// Global sequence number -extern uint32_t ZGlobalSeqNum; - // Granule shift/size const size_t ZGranuleSizeShift = 21; // 2MB const size_t ZGranuleSize = (size_t)1 << ZGranuleSizeShift; -// Number of heap views -const size_t ZHeapViews = ZPlatformHeapViews; - // Virtual memory to physical memory ratio const size_t ZVirtualToPhysicalRatio = 16; // 16:1 -// Page types -const uint8_t ZPageTypeSmall = 0; -const uint8_t ZPageTypeMedium = 1; -const uint8_t ZPageTypeLarge = 2; - // Page size shifts const size_t ZPageSizeSmallShift = ZGranuleSizeShift; extern size_t ZPageSizeMediumShift; @@ -78,53 +60,11 @@ extern const int& ZObjectAlignmentSmall; extern int ZObjectAlignmentMedium; const int ZObjectAlignmentLarge = 1 << ZObjectAlignmentLargeShift; -// -// Good/Bad mask states -// -------------------- -// -// GoodMask BadMask WeakGoodMask WeakBadMask -// -------------------------------------------------------------- -// Marked0 001 110 101 010 -// Marked1 010 101 110 001 -// Remapped 100 011 100 011 -// - -// Good/bad masks -extern uintptr_t ZAddressGoodMask; -extern uintptr_t ZAddressBadMask; -extern uintptr_t ZAddressWeakBadMask; - -// The bad mask is 64 bit. Its high order 32 bits contain all possible value combinations -// that this mask will have. Therefore, the memory where the 32 high order bits are stored, -// can be used as a 32 bit GC epoch counter, that has a different bit pattern every time -// the bad mask is flipped. This provides a pointer to said 32 bits. -extern uint32_t* ZAddressBadMaskHighOrderBitsAddr; -const int ZAddressBadMaskHighOrderBitsOffset = LITTLE_ENDIAN_ONLY(4) BIG_ENDIAN_ONLY(0); - -// Pointer part of address -extern size_t ZAddressOffsetBits; -const size_t ZAddressOffsetShift = 0; -extern uintptr_t ZAddressOffsetMask; -extern size_t ZAddressOffsetMax; - -// Metadata part of address -const size_t ZAddressMetadataBits = 4; -extern size_t ZAddressMetadataShift; -extern uintptr_t ZAddressMetadataMask; - -// Metadata types -extern uintptr_t ZAddressMetadataMarked; -extern uintptr_t ZAddressMetadataMarked0; -extern uintptr_t ZAddressMetadataMarked1; -extern uintptr_t ZAddressMetadataRemapped; -extern uintptr_t ZAddressMetadataFinalizable; - // Cache line size const size_t ZCacheLineSize = ZPlatformCacheLineSize; #define ZCACHE_ALIGNED ATTRIBUTE_ALIGNED(ZCacheLineSize) // Mark stack space -extern uintptr_t ZMarkStackSpaceStart; const size_t ZMarkStackSpaceExpandSize = (size_t)1 << 25; // 32M // Mark stack and magazine sizes @@ -147,10 +87,10 @@ const size_t ZMarkCacheSize = 1024; // Must be a power of tw // Partial array minimum size const size_t ZMarkPartialArrayMinSizeShift = 12; // 4K const size_t ZMarkPartialArrayMinSize = (size_t)1 << ZMarkPartialArrayMinSizeShift; +const size_t ZMarkPartialArrayMinLength = ZMarkPartialArrayMinSize / oopSize; // Max number of proactive/terminate flush attempts const size_t ZMarkProactiveFlushMax = 10; -const size_t ZMarkTerminateFlushMax = 3; // Try complete mark timeout const uint64_t ZMarkCompleteTimeout = 200; // us diff --git a/src/hotspot/share/gc/z/zGranuleMap.hpp b/src/hotspot/share/gc/z/zGranuleMap.hpp index f2cb317c8de22..58c95e331b65a 100644 --- a/src/hotspot/share/gc/z/zGranuleMap.hpp +++ b/src/hotspot/share/gc/z/zGranuleMap.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,34 +24,41 @@ #ifndef SHARE_GC_Z_ZGRANULEMAP_HPP #define SHARE_GC_Z_ZGRANULEMAP_HPP +#include "gc/z/zAddress.hpp" #include "gc/z/zArray.hpp" #include "memory/allocation.hpp" template class ZGranuleMap { friend class VMStructs; - template friend class ZGranuleMapIterator; + template friend class ZGranuleMapIterator; + friend class ZForwardingTable; + friend class ZPageTable; + friend class ZRemsetTableIterator; private: const size_t _size; T* const _map; - size_t index_for_offset(uintptr_t offset) const; + size_t index_for_offset(zoffset offset) const; + + T at(size_t index) const; public: ZGranuleMap(size_t max_offset); ~ZGranuleMap(); - T get(uintptr_t offset) const; - void put(uintptr_t offset, T value); - void put(uintptr_t offset, size_t size, T value); + T get(zoffset offset) const; + void put(zoffset offset, T value); + void put(zoffset offset, size_t size, T value); - T get_acquire(uintptr_t offset) const; - void release_put(uintptr_t offset, T value); + T get_acquire(zoffset offset) const; + void release_put(zoffset offset, T value); + void release_put(zoffset offset, size_t size, T value); }; -template -class ZGranuleMapIterator : public ZArrayIteratorImpl { +template +class ZGranuleMapIterator : public ZArrayIteratorImpl { public: ZGranuleMapIterator(const ZGranuleMap* granule_map); }; diff --git a/src/hotspot/share/gc/z/zGranuleMap.inline.hpp b/src/hotspot/share/gc/z/zGranuleMap.inline.hpp index 457bccfa61c80..8265ac9892852 100644 --- a/src/hotspot/share/gc/z/zGranuleMap.inline.hpp +++ b/src/hotspot/share/gc/z/zGranuleMap.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,6 +26,7 @@ #include "gc/z/zGranuleMap.hpp" +#include "gc/z/zAddress.inline.hpp" #include "gc/z/zArray.inline.hpp" #include "gc/z/zGlobals.hpp" #include "memory/allocation.inline.hpp" @@ -46,49 +47,62 @@ inline ZGranuleMap::~ZGranuleMap() { } template -inline size_t ZGranuleMap::index_for_offset(uintptr_t offset) const { - const size_t index = offset >> ZGranuleSizeShift; +inline size_t ZGranuleMap::index_for_offset(zoffset offset) const { + const size_t index = untype(offset) >> ZGranuleSizeShift; assert(index < _size, "Invalid index"); + return index; } template -inline T ZGranuleMap::get(uintptr_t offset) const { +inline T ZGranuleMap::at(size_t index) const { + assert(index < _size, "Invalid index"); + return Atomic::load(_map + index); +} + +template +inline T ZGranuleMap::get(zoffset offset) const { const size_t index = index_for_offset(offset); - return _map[index]; + return at(index); } template -inline void ZGranuleMap::put(uintptr_t offset, T value) { +inline void ZGranuleMap::put(zoffset offset, T value) { const size_t index = index_for_offset(offset); - _map[index] = value; + Atomic::store(_map + index, value); } template -inline void ZGranuleMap::put(uintptr_t offset, size_t size, T value) { +inline void ZGranuleMap::put(zoffset offset, size_t size, T value) { assert(is_aligned(size, ZGranuleSize), "Misaligned"); const size_t start_index = index_for_offset(offset); const size_t end_index = start_index + (size >> ZGranuleSizeShift); for (size_t index = start_index; index < end_index; index++) { - _map[index] = value; + Atomic::store(_map + index, value); } } template -inline T ZGranuleMap::get_acquire(uintptr_t offset) const { +inline T ZGranuleMap::get_acquire(zoffset offset) const { const size_t index = index_for_offset(offset); return Atomic::load_acquire(_map + index); } template -inline void ZGranuleMap::release_put(uintptr_t offset, T value) { +inline void ZGranuleMap::release_put(zoffset offset, T value) { const size_t index = index_for_offset(offset); Atomic::release_store(_map + index, value); } template -inline ZGranuleMapIterator::ZGranuleMapIterator(const ZGranuleMap* granule_map) : - ZArrayIteratorImpl(granule_map->_map, granule_map->_size) {} +inline void ZGranuleMap::release_put(zoffset offset, size_t size, T value) { + OrderAccess::release(); + put(offset, size, value); +} + +template +inline ZGranuleMapIterator::ZGranuleMapIterator(const ZGranuleMap* granule_map) : + ZArrayIteratorImpl(granule_map->_map, granule_map->_size) {} #endif // SHARE_GC_Z_ZGRANULEMAP_INLINE_HPP diff --git a/src/hotspot/share/gc/z/zHash.hpp b/src/hotspot/share/gc/z/zHash.hpp index e646969848890..d8903c6f99a78 100644 --- a/src/hotspot/share/gc/z/zHash.hpp +++ b/src/hotspot/share/gc/z/zHash.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,13 +24,15 @@ #ifndef SHARE_GC_Z_ZHASH_HPP #define SHARE_GC_Z_ZHASH_HPP -#include "memory/allStatic.hpp" +#include "gc/z/zAddress.hpp" +#include "memory/allocation.hpp" #include "utilities/globalDefinitions.hpp" class ZHash : public AllStatic { public: static uint32_t uint32_to_uint32(uint32_t key); static uint32_t address_to_uint32(uintptr_t key); + static uint32_t offset_to_uint32(zoffset key); }; #endif // SHARE_GC_Z_ZHASH_HPP diff --git a/src/hotspot/share/gc/z/zHash.inline.hpp b/src/hotspot/share/gc/z/zHash.inline.hpp index 2fda5189da4cb..987501219fbd9 100644 --- a/src/hotspot/share/gc/z/zHash.inline.hpp +++ b/src/hotspot/share/gc/z/zHash.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -74,4 +74,8 @@ inline uint32_t ZHash::address_to_uint32(uintptr_t key) { return uint32_to_uint32((uint32_t)(key >> 3)); } +inline uint32_t ZHash::offset_to_uint32(zoffset key) { + return address_to_uint32(untype(key)); +} + #endif // SHARE_GC_Z_ZHASH_INLINE_HPP diff --git a/src/hotspot/share/gc/z/zHeap.cpp b/src/hotspot/share/gc/z/zHeap.cpp index 8ad31a6073872..7c2eb9707b220 100644 --- a/src/hotspot/share/gc/z/zHeap.cpp +++ b/src/hotspot/share/gc/z/zHeap.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,62 +24,78 @@ #include "precompiled.hpp" #include "classfile/classLoaderDataGraph.hpp" #include "gc/shared/gc_globals.hpp" +#include "gc/shared/gcLogPrecious.hpp" #include "gc/shared/locationPrinter.hpp" #include "gc/shared/tlab_globals.hpp" #include "gc/z/zAddress.inline.hpp" #include "gc/z/zArray.inline.hpp" +#include "gc/z/zGeneration.inline.hpp" #include "gc/z/zGlobals.hpp" #include "gc/z/zHeap.inline.hpp" #include "gc/z/zHeapIterator.hpp" #include "gc/z/zHeuristics.hpp" -#include "gc/z/zMark.inline.hpp" #include "gc/z/zPage.inline.hpp" #include "gc/z/zPageTable.inline.hpp" -#include "gc/z/zRelocationSet.inline.hpp" -#include "gc/z/zRelocationSetSelector.inline.hpp" #include "gc/z/zResurrection.hpp" #include "gc/z/zStat.hpp" -#include "gc/z/zThread.inline.hpp" +#include "gc/z/zUncoloredRoot.inline.hpp" +#include "gc/z/zUtils.hpp" #include "gc/z/zVerify.hpp" #include "gc/z/zWorkers.hpp" #include "logging/log.hpp" #include "memory/iterator.hpp" #include "memory/metaspaceUtils.hpp" #include "memory/resourceArea.hpp" -#include "prims/jvmtiTagMap.hpp" -#include "runtime/handshake.hpp" #include "runtime/javaThread.hpp" -#include "runtime/safepoint.hpp" #include "utilities/debug.hpp" static const ZStatCounter ZCounterUndoPageAllocation("Memory", "Undo Page Allocation", ZStatUnitOpsPerSecond); static const ZStatCounter ZCounterOutOfMemory("Memory", "Out Of Memory", ZStatUnitOpsPerSecond); -ZHeap* ZHeap::_heap = NULL; +ZHeap* ZHeap::_heap = nullptr; ZHeap::ZHeap() : - _workers(), - _object_allocator(), - _page_allocator(&_workers, MinHeapSize, InitialHeapSize, MaxHeapSize), + _page_allocator(MinHeapSize, InitialHeapSize, SoftMaxHeapSize, MaxHeapSize), _page_table(), - _forwarding_table(), - _mark(&_workers, &_page_table), - _reference_processor(&_workers), - _weak_roots_processor(&_workers), - _relocate(&_workers), - _relocation_set(&_workers), - _unload(&_workers), - _serviceability(min_capacity(), max_capacity()) { + _allocator_eden(), + _allocator_relocation(), + _serviceability(initial_capacity(), min_capacity(), max_capacity()), + _old(&_page_table, &_page_allocator), + _young(&_page_table, _old.forwarding_table(), &_page_allocator), + _initialized(false) { + // Install global heap instance - assert(_heap == NULL, "Already initialized"); + assert(_heap == nullptr, "Already initialized"); _heap = this; + if (!_page_allocator.is_initialized() || !_young.is_initialized() || !_old.is_initialized()) { + return; + } + + // Prime cache + if (!_page_allocator.prime_cache(_old.workers(), InitialHeapSize)) { + log_error_p(gc)("Failed to allocate initial Java heap (" SIZE_FORMAT "M)", InitialHeapSize / M); + return; + } + + if (UseDynamicNumberOfGCThreads) { + log_info_p(gc, init)("GC Workers Max: %u (dynamic)", ConcGCThreads); + } + // Update statistics - ZStatHeap::set_at_initialize(_page_allocator.stats()); + _young.stat_heap()->at_initialize(_page_allocator.min_capacity(), _page_allocator.max_capacity()); + _old.stat_heap()->at_initialize(_page_allocator.min_capacity(), _page_allocator.max_capacity()); + + // Successfully initialized + _initialized = true; } bool ZHeap::is_initialized() const { - return _page_allocator.is_initialized() && _mark.is_initialized(); + return _initialized; +} + +size_t ZHeap::initial_capacity() const { + return _page_allocator.initial_capacity(); } size_t ZHeap::min_capacity() const { @@ -102,6 +118,18 @@ size_t ZHeap::used() const { return _page_allocator.used(); } +size_t ZHeap::used_generation(ZGenerationId id) const { + return _page_allocator.used_generation(id); +} + +size_t ZHeap::used_young() const { + return _page_allocator.used_generation(ZGenerationId::young); +} + +size_t ZHeap::used_old() const { + return _page_allocator.used_generation(ZGenerationId::old); +} + size_t ZHeap::unused() const { return _page_allocator.unused(); } @@ -111,7 +139,7 @@ size_t ZHeap::tlab_capacity() const { } size_t ZHeap::tlab_used() const { - return _object_allocator.used(); + return _allocator_eden.tlab_used(); } size_t ZHeap::max_tlab_size() const { @@ -119,7 +147,7 @@ size_t ZHeap::max_tlab_size() const { } size_t ZHeap::unsafe_max_tlab_alloc() const { - size_t size = _object_allocator.remaining(); + size_t size = _allocator_eden.remaining(); if (size < MinTLABSize) { // The remaining space in the allocator is not enough to @@ -134,33 +162,58 @@ size_t ZHeap::unsafe_max_tlab_alloc() const { } bool ZHeap::is_in(uintptr_t addr) const { + if (addr == 0) { + // Null isn't in the heap. + return false; + } + // An address is considered to be "in the heap" if it points into // the allocated part of a page, regardless of which heap view is // used. Note that an address with the finalizable metadata bit set // is not pointing into a heap view, and therefore not considered // to be "in the heap". - if (ZAddress::is_in(addr)) { - const ZPage* const page = _page_table.get(addr); - if (page != NULL) { - return page->is_in(addr); - } + assert(!is_valid(zpointer(addr)), "Don't pass in colored oops"); + + if (!is_valid(zaddress(addr))) { + return false; } - return false; -} + const zaddress o = to_zaddress(addr); + const ZPage* const page = _page_table.get(o); + if (page == nullptr) { + return false; + } -uint ZHeap::active_workers() const { - return _workers.active_workers(); + return is_in_page_relaxed(page, o); } -void ZHeap::set_active_workers(uint nworkers) { - _workers.set_active_workers(nworkers); +bool ZHeap::is_in_page_relaxed(const ZPage* page, zaddress addr) const { + if (page->is_in(addr)) { + return true; + } + + // Could still be a from-object during an in-place relocation + if (_old.is_phase_relocate()) { + const ZForwarding* const forwarding = _old.forwarding(unsafe(addr)); + if (forwarding != nullptr && forwarding->in_place_relocation_is_below_top_at_start(ZAddress::offset(addr))) { + return true; + } + } + if (_young.is_phase_relocate()) { + const ZForwarding* const forwarding = _young.forwarding(unsafe(addr)); + if (forwarding != nullptr && forwarding->in_place_relocation_is_below_top_at_start(ZAddress::offset(addr))) { + return true; + } + } + + return false; } void ZHeap::threads_do(ThreadClosure* tc) const { _page_allocator.threads_do(tc); - _workers.threads_do(tc); + _young.threads_do(tc); + _old.threads_do(tc); } void ZHeap::out_of_memory() { @@ -170,9 +223,9 @@ void ZHeap::out_of_memory() { log_info(gc)("Out Of Memory (%s)", Thread::current()->name()); } -ZPage* ZHeap::alloc_page(uint8_t type, size_t size, ZAllocationFlags flags) { - ZPage* const page = _page_allocator.alloc_page(type, size, flags); - if (page != NULL) { +ZPage* ZHeap::alloc_page(ZPageType type, size_t size, ZAllocationFlags flags, ZPageAge age) { + ZPage* const page = _page_allocator.alloc_page(type, size, flags, age); + if (page != nullptr) { // Insert page table entry _page_table.insert(page); } @@ -185,276 +238,69 @@ void ZHeap::undo_alloc_page(ZPage* page) { ZStatInc(ZCounterUndoPageAllocation); log_trace(gc)("Undo page allocation, thread: " PTR_FORMAT " (%s), page: " PTR_FORMAT ", size: " SIZE_FORMAT, - ZThread::id(), ZThread::name(), p2i(page), page->size()); + p2i(Thread::current()), ZUtils::thread_name(), p2i(page), page->size()); - free_page(page, false /* reclaimed */); + free_page(page); } -void ZHeap::free_page(ZPage* page, bool reclaimed) { +void ZHeap::free_page(ZPage* page) { // Remove page table entry _page_table.remove(page); + if (page->is_old()) { + page->verify_remset_cleared_current(); + page->verify_remset_cleared_previous(); + } + // Free page - _page_allocator.free_page(page, reclaimed); + _page_allocator.free_page(page); } -void ZHeap::free_pages(const ZArray* pages, bool reclaimed) { +size_t ZHeap::free_empty_pages(const ZArray* pages) { + size_t freed = 0; // Remove page table entries ZArrayIterator iter(pages); for (ZPage* page; iter.next(&page);) { + if (page->is_old()) { + // The remset of pages should be clean when installed into the page + // cache. + page->remset_clear(); + } _page_table.remove(page); + freed += page->size(); } // Free pages - _page_allocator.free_pages(pages, reclaimed); -} - -void ZHeap::flip_to_marked() { - ZVerifyViewsFlip flip(&_page_allocator); - ZAddress::flip_to_marked(); -} - -void ZHeap::flip_to_remapped() { - ZVerifyViewsFlip flip(&_page_allocator); - ZAddress::flip_to_remapped(); -} + _page_allocator.free_pages(pages); -void ZHeap::mark_start() { - assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint"); - - // Verification - ClassLoaderDataGraph::verify_claimed_marks_cleared(ClassLoaderData::_claim_strong); - - if (ZHeap::heap()->has_alloc_stalled()) { - // If there are stalled allocations, ensure that regardless of the - // cause of the GC, we have to clear soft references, as we are just - // about to increment the sequence number, and all previous allocations - // will throw if not presented with enough memory. - ZHeap::heap()->set_soft_reference_policy(true); - } - - // Flip address view - flip_to_marked(); - - // Retire allocating pages - _object_allocator.retire_pages(); - - // Reset allocated/reclaimed/used statistics - _page_allocator.reset_statistics(); - - // Reset encountered/dropped/enqueued statistics - _reference_processor.reset_statistics(); - - // Enter mark phase - ZGlobalPhase = ZPhaseMark; - - // Reset marking information and mark roots - _mark.start(); - - // Update statistics - ZStatHeap::set_at_mark_start(_page_allocator.stats()); -} - -void ZHeap::mark(bool initial) { - _mark.mark(initial); -} - -void ZHeap::mark_flush_and_free(Thread* thread) { - _mark.flush_and_free(thread); -} - -bool ZHeap::mark_end() { - assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint"); - - // Try end marking - if (!_mark.end()) { - // Marking not completed, continue concurrent mark - return false; - } - - // Enter mark completed phase - ZGlobalPhase = ZPhaseMarkCompleted; - - // Verify after mark - ZVerify::after_mark(); - - // Update statistics - ZStatHeap::set_at_mark_end(_page_allocator.stats()); - - // Block resurrection of weak/phantom references - ZResurrection::block(); - - // Prepare to unload stale metadata and nmethods - _unload.prepare(); - - // Notify JVMTI that some tagmap entry objects may have died. - JvmtiTagMap::set_needs_cleaning(); - - return true; -} - -void ZHeap::mark_free() { - _mark.free(); + return freed; } void ZHeap::keep_alive(oop obj) { - ZBarrier::keep_alive_barrier_on_oop(obj); -} - -void ZHeap::set_soft_reference_policy(bool clear) { - _reference_processor.set_soft_reference_policy(clear); -} - -class ZRendezvousClosure : public HandshakeClosure { -public: - ZRendezvousClosure() : - HandshakeClosure("ZRendezvous") {} - - void do_thread(Thread* thread) {} -}; - -void ZHeap::process_non_strong_references() { - // Process Soft/Weak/Final/PhantomReferences - _reference_processor.process_references(); - - // Process weak roots - _weak_roots_processor.process_weak_roots(); - - // Unlink stale metadata and nmethods - _unload.unlink(); - - // Perform a handshake. This is needed 1) to make sure that stale - // metadata and nmethods are no longer observable. And 2), to - // prevent the race where a mutator first loads an oop, which is - // logically null but not yet cleared. Then this oop gets cleared - // by the reference processor and resurrection is unblocked. At - // this point the mutator could see the unblocked state and pass - // this invalid oop through the normal barrier path, which would - // incorrectly try to mark the oop. - ZRendezvousClosure cl; - Handshake::execute(&cl); - - // Unblock resurrection of weak/phantom references - ZResurrection::unblock(); - - // Purge stale metadata and nmethods that were unlinked - _unload.purge(); - - // Enqueue Soft/Weak/Final/PhantomReferences. Note that this - // must be done after unblocking resurrection. Otherwise the - // Finalizer thread could call Reference.get() on the Finalizers - // that were just enqueued, which would incorrectly return null - // during the resurrection block window, since such referents - // are only Finalizable marked. - _reference_processor.enqueue_references(); - - // Clear old markings claim bits. - // Note: Clearing _claim_strong also clears _claim_finalizable. - ClassLoaderDataGraph::clear_claimed_marks(ClassLoaderData::_claim_strong); + const zaddress addr = to_zaddress(obj); + ZBarrier::mark(addr); } -void ZHeap::free_empty_pages(ZRelocationSetSelector* selector, int bulk) { - // Freeing empty pages in bulk is an optimization to avoid grabbing - // the page allocator lock, and trying to satisfy stalled allocations - // too frequently. - if (selector->should_free_empty_pages(bulk)) { - free_pages(selector->empty_pages(), true /* reclaimed */); - selector->clear_empty_pages(); - } -} - -void ZHeap::select_relocation_set() { - // Do not allow pages to be deleted - _page_allocator.enable_deferred_delete(); - - // Register relocatable pages with selector - ZRelocationSetSelector selector; - ZPageTableIterator pt_iter(&_page_table); - for (ZPage* page; pt_iter.next(&page);) { - if (!page->is_relocatable()) { - // Not relocatable, don't register - continue; - } - - if (page->is_marked()) { - // Register live page - selector.register_live_page(page); - } else { - // Register empty page - selector.register_empty_page(page); - - // Reclaim empty pages in bulk - free_empty_pages(&selector, 64 /* bulk */); - } - } - - // Reclaim remaining empty pages - free_empty_pages(&selector, 0 /* bulk */); - - // Allow pages to be deleted - _page_allocator.disable_deferred_delete(); - - // Select relocation set - selector.select(); - - // Install relocation set - _relocation_set.install(&selector); - - // Setup forwarding table - ZRelocationSetIterator rs_iter(&_relocation_set); - for (ZForwarding* forwarding; rs_iter.next(&forwarding);) { - _forwarding_table.insert(forwarding); - } - - // Update statistics - ZStatRelocation::set_at_select_relocation_set(selector.stats()); - ZStatHeap::set_at_select_relocation_set(selector.stats()); +void ZHeap::mark_flush_and_free(Thread* thread) { + _young.mark_flush_and_free(thread); + _old.mark_flush_and_free(thread); } -void ZHeap::reset_relocation_set() { - // Reset forwarding table - ZRelocationSetIterator iter(&_relocation_set); - for (ZForwarding* forwarding; iter.next(&forwarding);) { - _forwarding_table.remove(forwarding); - } - - // Reset relocation set - _relocation_set.reset(); +bool ZHeap::is_allocating(zaddress addr) const { + const ZPage* const page = _page_table.get(addr); + return page->is_allocating(); } -void ZHeap::relocate_start() { +void ZHeap::object_iterate(ObjectClosure* object_cl, bool visit_weaks) { assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint"); - - // Finish unloading stale metadata and nmethods - _unload.finish(); - - // Flip address view - flip_to_remapped(); - - // Enter relocate phase - ZGlobalPhase = ZPhaseRelocate; - - // Update statistics - ZStatHeap::set_at_relocate_start(_page_allocator.stats()); -} - -void ZHeap::relocate() { - // Relocate relocation set - _relocate.relocate(&_relocation_set); - - // Update statistics - ZStatHeap::set_at_relocate_end(_page_allocator.stats(), _object_allocator.relocated()); -} - -bool ZHeap::is_allocating(uintptr_t addr) const { - const ZPage* const page = _page_table.get(addr); - return page->is_allocating(); + ZHeapIterator iter(1 /* nworkers */, visit_weaks); + iter.object_iterate(object_cl, 0 /* worker_id */); } -void ZHeap::object_iterate(ObjectClosure* cl, bool visit_weaks) { +void ZHeap::object_and_field_iterate(ObjectClosure* object_cl, OopFieldClosure* field_cl, bool visit_weaks) { assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint"); ZHeapIterator iter(1 /* nworkers */, visit_weaks); - iter.object_iterate(cl, 0 /* worker_id */); + iter.object_and_field_iterate(object_cl, field_cl, 0 /* worker_id */); } ParallelObjectIteratorImpl* ZHeap::parallel_object_iterator(uint nworkers, bool visit_weaks) { @@ -462,28 +308,20 @@ ParallelObjectIteratorImpl* ZHeap::parallel_object_iterator(uint nworkers, bool return new ZHeapIterator(nworkers, visit_weaks); } -void ZHeap::pages_do(ZPageClosure* cl) { - ZPageTableIterator iter(&_page_table); - for (ZPage* page; iter.next(&page);) { - cl->do_page(page); - } - _page_allocator.pages_do(cl); -} - void ZHeap::serviceability_initialize() { _serviceability.initialize(); } -GCMemoryManager* ZHeap::serviceability_cycle_memory_manager() { - return _serviceability.cycle_memory_manager(); +GCMemoryManager* ZHeap::serviceability_cycle_memory_manager(bool minor) { + return _serviceability.cycle_memory_manager(minor); } -GCMemoryManager* ZHeap::serviceability_pause_memory_manager() { - return _serviceability.pause_memory_manager(); +GCMemoryManager* ZHeap::serviceability_pause_memory_manager(bool minor) { + return _serviceability.pause_memory_manager(minor); } -MemoryPool* ZHeap::serviceability_memory_pool() { - return _serviceability.memory_pool(); +MemoryPool* ZHeap::serviceability_memory_pool(ZGenerationId id) { + return _serviceability.memory_pool(id); } ZServiceabilityCounters* ZHeap::serviceability_counters() { @@ -503,7 +341,7 @@ void ZHeap::print_extended_on(outputStream* st) const { st->cr(); // Do not allow pages to be deleted - _page_allocator.enable_deferred_delete(); + _page_allocator.enable_safe_destroy(); // Print all pages st->print_cr("ZGC Page Table:"); @@ -513,24 +351,99 @@ void ZHeap::print_extended_on(outputStream* st) const { } // Allow pages to be deleted - _page_allocator.disable_deferred_delete(); + _page_allocator.disable_safe_destroy(); } bool ZHeap::print_location(outputStream* st, uintptr_t addr) const { - if (LocationPrinter::is_valid_obj((void*)addr)) { - st->print(PTR_FORMAT " is a %s oop: ", addr, ZAddress::is_good(addr) ? "good" : "bad"); - ZOop::from_address(addr)->print_on(st); - return true; + // Intentionally unchecked cast + const bool uncolored = is_valid(zaddress(addr)); + const bool colored = is_valid(zpointer(addr)); + if (colored && uncolored) { + // Should not reach here + return false; + } + + if (colored) { + return print_location(st, zpointer(addr)); + } + + if (uncolored) { + return print_location(st, zaddress(addr)); } return false; } -void ZHeap::verify() { - // Heap verification can only be done between mark end and - // relocate start. This is the only window where all oop are - // good and the whole heap is in a consistent state. - guarantee(ZGlobalPhase == ZPhaseMarkCompleted, "Invalid phase"); +bool ZHeap::print_location(outputStream* st, zaddress addr) const { + assert(is_valid(addr), "must be"); + + st->print(PTR_FORMAT " is a zaddress: ", untype(addr)); + + if (addr == zaddress::null) { + st->print_raw_cr("NULL"); + return true; + } + + if (!ZHeap::is_in(untype(addr))) { + st->print_raw_cr("not in heap"); + return false; + } + + if (LocationPrinter::is_valid_obj((void*)untype(addr))) { + to_oop(addr)->print_on(st); + return true; + } + + ZPage* const page = ZHeap::page(addr); + zaddress_unsafe base; + + if (page->is_relocatable() && page->is_marked() && !ZGeneration::generation(page->generation_id())->is_phase_mark()) { + base = page->find_base((volatile zpointer*) addr); + } else { + // TODO: This part is probably broken, but register printing recovers from crashes + st->print_raw("Unreliable "); + base = page->find_base_unsafe((volatile zpointer*) addr); + } - ZVerify::after_weak_processing(); + if (base == zaddress_unsafe::null) { + st->print_raw_cr("Cannot find base"); + return false; + } + + if (untype(base) == untype(addr)) { + st->print_raw_cr("Bad mark info/base"); + return false; + } + + st->print_raw_cr("Internal address"); + print_location(st, untype(base)); + return true; +} + +bool ZHeap::print_location(outputStream* st, zpointer ptr) const { + assert(is_valid(ptr), "must be"); + + st->print(PTR_FORMAT " is %s zpointer: ", untype(ptr), + ZPointer::is_load_good(ptr) ? "a good" : "a bad"); + + if (!ZPointer::is_load_good(ptr)) { + st->print_cr("decoded " PTR_FORMAT, untype(ZPointer::uncolor_unsafe(ptr))); + // ptr is not load good but let us still investigate the uncolored address + return print_location(st, untype(ZPointer::uncolor_unsafe(ptr))); + } + + const zaddress addr = ZPointer::uncolor(ptr); + + if (addr == zaddress::null) { + st->print_raw_cr("NULL"); + return true; + } + + if (LocationPrinter::is_valid_obj((void*)untype(addr))) { + to_oop(addr)->print_on(st); + return true; + } + + st->print_cr("invalid object " PTR_FORMAT, untype(addr)); + return false; } diff --git a/src/hotspot/share/gc/z/zHeap.hpp b/src/hotspot/share/gc/z/zHeap.hpp index 04250ad551faa..38969f1b4f204 100644 --- a/src/hotspot/share/gc/z/zHeap.hpp +++ b/src/hotspot/share/gc/z/zHeap.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,49 +25,37 @@ #define SHARE_GC_Z_ZHEAP_HPP #include "gc/z/zAllocationFlags.hpp" +#include "gc/z/zAllocator.hpp" #include "gc/z/zArray.hpp" -#include "gc/z/zForwardingTable.hpp" -#include "gc/z/zMark.hpp" -#include "gc/z/zObjectAllocator.hpp" +#include "gc/z/zGeneration.hpp" +#include "gc/z/zPageAge.hpp" #include "gc/z/zPageAllocator.hpp" #include "gc/z/zPageTable.hpp" -#include "gc/z/zReferenceProcessor.hpp" -#include "gc/z/zRelocate.hpp" -#include "gc/z/zRelocationSet.hpp" -#include "gc/z/zWeakRootsProcessor.hpp" +#include "gc/z/zPageType.hpp" #include "gc/z/zServiceability.hpp" -#include "gc/z/zUnload.hpp" -#include "gc/z/zWorkers.hpp" -class ThreadClosure; -class ZPage; -class ZRelocationSetSelector; +class OopFieldClosure; class ZHeap { + friend class ZForwardingTest; + friend class ZLiveMapTest; friend class VMStructs; private: - static ZHeap* _heap; - - ZWorkers _workers; - ZObjectAllocator _object_allocator; - ZPageAllocator _page_allocator; - ZPageTable _page_table; - ZForwardingTable _forwarding_table; - ZMark _mark; - ZReferenceProcessor _reference_processor; - ZWeakRootsProcessor _weak_roots_processor; - ZRelocate _relocate; - ZRelocationSet _relocation_set; - ZUnload _unload; - ZServiceability _serviceability; - - void flip_to_marked(); - void flip_to_remapped(); - - void free_empty_pages(ZRelocationSetSelector* selector, int bulk); + static ZHeap* _heap; - void out_of_memory(); + ZPageAllocator _page_allocator; + ZPageTable _page_table; + + ZAllocatorEden _allocator_eden; + ZAllocatorForRelocation _allocator_relocation[ZAllocator::_relocation_allocators]; + + ZServiceability _serviceability; + + ZGenerationOld _old; + ZGenerationYoung _young; + + bool _initialized; public: static ZHeap* heap(); @@ -76,12 +64,18 @@ class ZHeap { bool is_initialized() const; + void out_of_memory(); + // Heap metrics + size_t initial_capacity() const; size_t min_capacity() const; size_t max_capacity() const; size_t soft_max_capacity() const; size_t capacity() const; size_t used() const; + size_t used_generation(ZGenerationId id) const; + size_t used_young() const; + size_t used_old() const; size_t unused() const; size_t tlab_capacity() const; @@ -90,77 +84,61 @@ class ZHeap { size_t unsafe_max_tlab_alloc() const; bool is_in(uintptr_t addr) const; + bool is_in_page_relaxed(const ZPage* page, zaddress addr) const; - // Threads - uint active_workers() const; - void set_active_workers(uint nworkers); - void threads_do(ThreadClosure* tc) const; + bool is_young(zaddress addr) const; + bool is_young(volatile zpointer* ptr) const; - // Reference processing - ReferenceDiscoverer* reference_discoverer(); - void set_soft_reference_policy(bool clear); + bool is_old(zaddress addr) const; + bool is_old(volatile zpointer* ptr) const; - // Non-strong reference processing - void process_non_strong_references(); + ZPage* page(zaddress addr) const; + ZPage* page(volatile zpointer* addr) const; + + // Liveness + bool is_object_live(zaddress addr) const; + bool is_object_strongly_live(zaddress addr) const; + void keep_alive(oop obj); + void mark_flush_and_free(Thread* thread); // Page allocation - ZPage* alloc_page(uint8_t type, size_t size, ZAllocationFlags flags); + ZPage* alloc_page(ZPageType type, size_t size, ZAllocationFlags flags, ZPageAge age); void undo_alloc_page(ZPage* page); - void free_page(ZPage* page, bool reclaimed); - void free_pages(const ZArray* pages, bool reclaimed); + void free_page(ZPage* page); + size_t free_empty_pages(const ZArray* pages); // Object allocation - uintptr_t alloc_tlab(size_t size); - uintptr_t alloc_object(size_t size); - uintptr_t alloc_object_for_relocation(size_t size); - void undo_alloc_object_for_relocation(uintptr_t addr, size_t size); - bool has_alloc_stalled() const; - void check_out_of_memory(); - - // Marking - bool is_object_live(uintptr_t addr) const; - bool is_object_strongly_live(uintptr_t addr) const; - template void mark_object(uintptr_t addr); - void mark_start(); - void mark(bool initial); - void mark_flush_and_free(Thread* thread); - bool mark_end(); - void mark_free(); - void keep_alive(oop obj); - - // Relocation set - void select_relocation_set(); - void reset_relocation_set(); - - // Relocation - void relocate_start(); - uintptr_t relocate_object(uintptr_t addr); - uintptr_t remap_object(uintptr_t addr); - void relocate(); + bool is_alloc_stalling() const; + bool is_alloc_stalling_for_old() const; + void handle_alloc_stalling_for_young(); + void handle_alloc_stalling_for_old(); // Continuations - bool is_allocating(uintptr_t addr) const; + bool is_allocating(zaddress addr) const; // Iteration - void object_iterate(ObjectClosure* cl, bool visit_weaks); + void object_iterate(ObjectClosure* object_cl, bool visit_weaks); + void object_and_field_iterate(ObjectClosure* object_cl, OopFieldClosure* field_cl, bool visit_weaks); ParallelObjectIteratorImpl* parallel_object_iterator(uint nworkers, bool visit_weaks); - void pages_do(ZPageClosure* cl); + + void threads_do(ThreadClosure* tc) const; // Serviceability void serviceability_initialize(); - GCMemoryManager* serviceability_cycle_memory_manager(); - GCMemoryManager* serviceability_pause_memory_manager(); - MemoryPool* serviceability_memory_pool(); + GCMemoryManager* serviceability_cycle_memory_manager(bool minor); + GCMemoryManager* serviceability_pause_memory_manager(bool minor); + MemoryPool* serviceability_memory_pool(ZGenerationId id); ZServiceabilityCounters* serviceability_counters(); // Printing void print_on(outputStream* st) const; void print_extended_on(outputStream* st) const; bool print_location(outputStream* st, uintptr_t addr) const; + bool print_location(outputStream* st, zaddress addr) const; + bool print_location(outputStream* st, zpointer ptr) const; // Verification bool is_oop(uintptr_t addr) const; - void verify(); }; #endif // SHARE_GC_Z_ZHEAP_HPP diff --git a/src/hotspot/share/gc/z/zHeap.inline.hpp b/src/hotspot/share/gc/z/zHeap.inline.hpp index 22f780a22cf33..56c274b774aa1 100644 --- a/src/hotspot/share/gc/z/zHeap.inline.hpp +++ b/src/hotspot/share/gc/z/zHeap.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,100 +28,70 @@ #include "gc/z/zAddress.inline.hpp" #include "gc/z/zForwardingTable.inline.hpp" +#include "gc/z/zGenerationId.hpp" #include "gc/z/zMark.inline.hpp" #include "gc/z/zPage.inline.hpp" #include "gc/z/zPageTable.inline.hpp" +#include "gc/z/zRemembered.inline.hpp" #include "utilities/debug.hpp" inline ZHeap* ZHeap::heap() { - assert(_heap != NULL, "Not initialized"); + assert(_heap != nullptr, "Not initialized"); return _heap; } -inline ReferenceDiscoverer* ZHeap::reference_discoverer() { - return &_reference_processor; +inline bool ZHeap::is_young(zaddress addr) const { + return page(addr)->is_young(); } -inline bool ZHeap::is_object_live(uintptr_t addr) const { - ZPage* page = _page_table.get(addr); - return page->is_object_live(addr); +inline bool ZHeap::is_young(volatile zpointer* ptr) const { + return page(ptr)->is_young(); } -inline bool ZHeap::is_object_strongly_live(uintptr_t addr) const { - ZPage* page = _page_table.get(addr); - return page->is_object_strongly_live(addr); +inline bool ZHeap::is_old(zaddress addr) const { + return !is_young(addr); } -template -inline void ZHeap::mark_object(uintptr_t addr) { - assert(ZGlobalPhase == ZPhaseMark, "Mark not allowed"); - _mark.mark_object(addr); +inline bool ZHeap::is_old(volatile zpointer* ptr) const { + return !is_young(ptr); } -inline uintptr_t ZHeap::alloc_tlab(size_t size) { - guarantee(size <= max_tlab_size(), "TLAB too large"); - return _object_allocator.alloc_object(size); +inline ZPage* ZHeap::page(zaddress addr) const { + return _page_table.get(addr); } -inline uintptr_t ZHeap::alloc_object(size_t size) { - uintptr_t addr = _object_allocator.alloc_object(size); - assert(ZAddress::is_good_or_null(addr), "Bad address"); - - if (addr == 0) { - out_of_memory(); - } - - return addr; +inline ZPage* ZHeap::page(volatile zpointer* ptr) const { + return _page_table.get(ptr); } -inline uintptr_t ZHeap::alloc_object_for_relocation(size_t size) { - const uintptr_t addr = _object_allocator.alloc_object_for_relocation(&_page_table, size); - assert(ZAddress::is_good_or_null(addr), "Bad address"); - return addr; +inline bool ZHeap::is_object_live(zaddress addr) const { + const ZPage* const page = _page_table.get(addr); + return page->is_object_live(addr); } -inline void ZHeap::undo_alloc_object_for_relocation(uintptr_t addr, size_t size) { - ZPage* const page = _page_table.get(addr); - _object_allocator.undo_alloc_object_for_relocation(page, addr, size); +inline bool ZHeap::is_object_strongly_live(zaddress addr) const { + const ZPage* const page = _page_table.get(addr); + return page->is_object_strongly_live(addr); } -inline uintptr_t ZHeap::relocate_object(uintptr_t addr) { - assert(ZGlobalPhase == ZPhaseRelocate, "Relocate not allowed"); - - ZForwarding* const forwarding = _forwarding_table.get(addr); - if (forwarding == NULL) { - // Not forwarding - return ZAddress::good(addr); - } - - // Relocate object - return _relocate.relocate_object(forwarding, ZAddress::good(addr)); +inline bool ZHeap::is_alloc_stalling() const { + return _page_allocator.is_alloc_stalling(); } -inline uintptr_t ZHeap::remap_object(uintptr_t addr) { - assert(ZGlobalPhase == ZPhaseMark || - ZGlobalPhase == ZPhaseMarkCompleted, "Forward not allowed"); - - ZForwarding* const forwarding = _forwarding_table.get(addr); - if (forwarding == NULL) { - // Not forwarding - return ZAddress::good(addr); - } - - // Forward object - return _relocate.forward_object(forwarding, ZAddress::good(addr)); +inline bool ZHeap::is_alloc_stalling_for_old() const { + return _page_allocator.is_alloc_stalling_for_old(); } -inline bool ZHeap::has_alloc_stalled() const { - return _page_allocator.has_alloc_stalled(); +inline void ZHeap::handle_alloc_stalling_for_young() { + _page_allocator.handle_alloc_stalling_for_young(); } -inline void ZHeap::check_out_of_memory() { - _page_allocator.check_out_of_memory(); +inline void ZHeap::handle_alloc_stalling_for_old() { + _page_allocator.handle_alloc_stalling_for_old(); } inline bool ZHeap::is_oop(uintptr_t addr) const { - return ZAddress::is_good(addr) && is_object_aligned(addr) && is_in(addr); + return is_in(addr); } #endif // SHARE_GC_Z_ZHEAP_INLINE_HPP diff --git a/src/hotspot/share/gc/z/zHeapIterator.cpp b/src/hotspot/share/gc/z/zHeapIterator.cpp index f2eb79c0d55cd..a66ae55798ee5 100644 --- a/src/hotspot/share/gc/z/zHeapIterator.cpp +++ b/src/hotspot/share/gc/z/zHeapIterator.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,18 +22,21 @@ */ #include "precompiled.hpp" +#include "classfile/classLoaderData.hpp" #include "classfile/classLoaderDataGraph.hpp" +#include "gc/shared/barrierSet.hpp" #include "gc/shared/barrierSetNMethod.hpp" #include "gc/shared/gc_globals.hpp" #include "gc/shared/taskqueue.inline.hpp" #include "gc/z/zAddress.inline.hpp" #include "gc/z/zCollectedHeap.hpp" +#include "gc/z/zGenerationId.hpp" #include "gc/z/zGlobals.hpp" #include "gc/z/zGranuleMap.inline.hpp" +#include "gc/z/zHeap.inline.hpp" #include "gc/z/zHeapIterator.hpp" #include "gc/z/zLock.inline.hpp" #include "gc/z/zNMethod.hpp" -#include "gc/z/zOop.inline.hpp" #include "memory/iterator.inline.hpp" #include "utilities/bitMap.inline.hpp" @@ -56,17 +59,31 @@ class ZHeapIteratorContext { ZHeapIteratorQueue* const _queue; ZHeapIteratorArrayQueue* const _array_queue; const uint _worker_id; - ZStatTimerDisable _timer_disable; + ObjectClosure* _object_cl; + OopFieldClosure* _field_cl; public: - ZHeapIteratorContext(ZHeapIterator* iter, uint worker_id) : + ZHeapIteratorContext(ZHeapIterator* iter, ObjectClosure* object_cl, OopFieldClosure* field_cl, uint worker_id) : _iter(iter), _queue(_iter->_queues.queue(worker_id)), _array_queue(_iter->_array_queues.queue(worker_id)), - _worker_id(worker_id) {} + _worker_id(worker_id), + _object_cl(object_cl), + _field_cl(field_cl) {} + + void visit_field(oop base, oop* p) const { + if (_field_cl != nullptr) { + _field_cl->do_field(base, p); + } + } + + void visit_object(oop obj) const { + _object_cl->do_object(obj); + } void mark_and_push(oop obj) const { if (_iter->mark_object(obj)) { + visit_object(obj); _queue->push(obj); } } @@ -97,7 +114,7 @@ class ZHeapIteratorContext { }; template -class ZHeapIteratorRootOopClosure : public OopClosure { +class ZHeapIteratorColoredRootOopClosure : public OopClosure { private: const ZHeapIteratorContext& _context; @@ -110,10 +127,36 @@ class ZHeapIteratorRootOopClosure : public OopClosure { } public: - ZHeapIteratorRootOopClosure(const ZHeapIteratorContext& context) : + ZHeapIteratorColoredRootOopClosure(const ZHeapIteratorContext& context) : _context(context) {} virtual void do_oop(oop* p) { + _context.visit_field(nullptr, p); + const oop obj = load_oop(p); + _context.mark_and_push(obj); + } + + virtual void do_oop(narrowOop* p) { + ShouldNotReachHere(); + } +}; + +class ZHeapIteratorUncoloredRootOopClosure : public OopClosure { +private: + const ZHeapIteratorContext& _context; + + oop load_oop(oop* p) { + const oop o = Atomic::load(p); + assert_is_valid(to_zaddress(o)); + return RawAccess<>::oop_load(p); + } + +public: + ZHeapIteratorUncoloredRootOopClosure(const ZHeapIteratorContext& context) : + _context(context) {} + + virtual void do_oop(oop* p) { + _context.visit_field(nullptr, p); const oop obj = load_oop(p); _context.mark_and_push(obj); } @@ -150,6 +193,7 @@ class ZHeapIteratorOopClosure : public OopIterateClosure { } virtual void do_oop(oop* p) { + _context.visit_field(_base, p); const oop obj = load_oop(p); _context.mark_and_push(obj); } @@ -198,13 +242,13 @@ class ZHeapIteratorOopClosure : public OopIterateClosure { ZHeapIterator::ZHeapIterator(uint nworkers, bool visit_weaks) : _visit_weaks(visit_weaks), - _timer_disable(), _bitmaps(ZAddressOffsetMax), _bitmaps_lock(), _queues(nworkers), _array_queues(nworkers), - _roots(ClassLoaderData::_claim_other), - _weak_roots(), + _roots_colored(ZGenerationIdOptional::none), + _roots_uncolored(ZGenerationIdOptional::none), + _roots_weak_colored(ZGenerationIdOptional::none), _terminator(nworkers, &_queues) { // Create queues @@ -246,19 +290,19 @@ static size_t object_index_max() { } static size_t object_index(oop obj) { - const uintptr_t addr = ZOop::to_address(obj); - const uintptr_t offset = ZAddress::offset(addr); + const zaddress addr = to_zaddress(obj); + const zoffset offset = ZAddress::offset(addr); const uintptr_t mask = ZGranuleSize - 1; - return (offset & mask) >> ZObjectAlignmentSmallShift; + return (untype(offset) & mask) >> ZObjectAlignmentSmallShift; } ZHeapIteratorBitMap* ZHeapIterator::object_bitmap(oop obj) { - const uintptr_t offset = ZAddress::offset(ZOop::to_address(obj)); + const zoffset offset = ZAddress::offset(to_zaddress(obj)); ZHeapIteratorBitMap* bitmap = _bitmaps.get_acquire(offset); - if (bitmap == NULL) { + if (bitmap == nullptr) { ZLocker locker(&_bitmaps_lock); bitmap = _bitmaps.get(offset); - if (bitmap == NULL) { + if (bitmap == nullptr) { // Install new bitmap bitmap = new ZHeapIteratorBitMap(object_index_max()); _bitmaps.release_put(offset, bitmap); @@ -269,7 +313,7 @@ ZHeapIteratorBitMap* ZHeapIterator::object_bitmap(oop obj) { } bool ZHeapIterator::mark_object(oop obj) { - if (obj == NULL) { + if (obj == nullptr) { return false; } @@ -278,7 +322,7 @@ bool ZHeapIterator::mark_object(oop obj) { return bitmap->try_set_bit(index); } -typedef ClaimingCLDToOopClosure ZHeapIteratorCLDCLosure; +typedef ClaimingCLDToOopClosure ZHeapIteratorCLDClosure; class ZHeapIteratorNMethodClosure : public NMethodClosure { private: @@ -317,20 +361,26 @@ class ZHeapIteratorThreadClosure : public ThreadClosure { }; void ZHeapIterator::push_strong_roots(const ZHeapIteratorContext& context) { - ZHeapIteratorRootOopClosure cl(context); - ZHeapIteratorCLDCLosure cld_cl(&cl); - ZHeapIteratorNMethodClosure nm_cl(&cl); - ZHeapIteratorThreadClosure thread_cl(&cl, &nm_cl); - - _roots.apply(&cl, - &cld_cl, - &thread_cl, - &nm_cl); + { + ZHeapIteratorColoredRootOopClosure cl(context); + ZHeapIteratorCLDClosure cld_cl(&cl); + + _roots_colored.apply(&cl, + &cld_cl); + } + + { + ZHeapIteratorUncoloredRootOopClosure cl(context); + ZHeapIteratorNMethodClosure nm_cl(&cl); + ZHeapIteratorThreadClosure thread_cl(&cl, &nm_cl); + _roots_uncolored.apply(&thread_cl, + &nm_cl); + } } void ZHeapIterator::push_weak_roots(const ZHeapIteratorContext& context) { - ZHeapIteratorRootOopClosure cl(context); - _weak_roots.apply(&cl); + ZHeapIteratorColoredRootOopClosure cl(context); + _roots_weak_colored.apply(&cl); } template @@ -344,7 +394,7 @@ void ZHeapIterator::push_roots(const ZHeapIteratorContext& context) { template void ZHeapIterator::follow_object(const ZHeapIteratorContext& context, oop obj) { ZHeapIteratorOopClosure cl(context, obj); - obj->oop_iterate(&cl); + ZIterator::oop_iterate(obj, &cl); } void ZHeapIterator::follow_array(const ZHeapIteratorContext& context, oop obj) { @@ -370,14 +420,11 @@ void ZHeapIterator::follow_array_chunk(const ZHeapIteratorContext& context, cons // Follow array chunk ZHeapIteratorOopClosure cl(context, obj); - obj->oop_iterate_range(&cl, start, end); + ZIterator::oop_iterate_range(obj, &cl, start, end); } template -void ZHeapIterator::visit_and_follow(const ZHeapIteratorContext& context, ObjectClosure* cl, oop obj) { - // Visit - cl->do_object(obj); - +void ZHeapIterator::follow(const ZHeapIteratorContext& context, oop obj) { // Follow if (obj->is_objArray()) { follow_array(context, obj); @@ -387,13 +434,13 @@ void ZHeapIterator::visit_and_follow(const ZHeapIteratorContext& context, Object } template -void ZHeapIterator::drain(const ZHeapIteratorContext& context, ObjectClosure* cl) { +void ZHeapIterator::drain(const ZHeapIteratorContext& context) { ObjArrayTask array; oop obj; do { while (context.pop(obj)) { - visit_and_follow(context, cl, obj); + follow(context, obj); } if (context.pop_array(array)) { @@ -403,37 +450,47 @@ void ZHeapIterator::drain(const ZHeapIteratorContext& context, ObjectClosure* cl } template -void ZHeapIterator::steal(const ZHeapIteratorContext& context, ObjectClosure* cl) { +void ZHeapIterator::steal(const ZHeapIteratorContext& context) { ObjArrayTask array; oop obj; if (context.steal_array(array)) { follow_array_chunk(context, array); } else if (context.steal(obj)) { - visit_and_follow(context, cl, obj); + follow(context, obj); } } template -void ZHeapIterator::drain_and_steal(const ZHeapIteratorContext& context, ObjectClosure* cl) { +void ZHeapIterator::drain_and_steal(const ZHeapIteratorContext& context) { do { - drain(context, cl); - steal(context, cl); + drain(context); + steal(context); } while (!context.is_drained() || !_terminator.offer_termination()); } template -void ZHeapIterator::object_iterate_inner(const ZHeapIteratorContext& context, ObjectClosure* object_cl) { +void ZHeapIterator::object_iterate_inner(const ZHeapIteratorContext& context) { push_roots(context); - drain_and_steal(context, object_cl); + drain_and_steal(context); +} + +void ZHeapIterator::object_iterate(ObjectClosure* object_cl, uint worker_id) { + const ZHeapIteratorContext context(this, object_cl, nullptr /* field_cl */, worker_id); + + if (_visit_weaks) { + object_iterate_inner(context); + } else { + object_iterate_inner(context); + } } -void ZHeapIterator::object_iterate(ObjectClosure* cl, uint worker_id) { - ZHeapIteratorContext context(this, worker_id); +void ZHeapIterator::object_and_field_iterate(ObjectClosure* object_cl, OopFieldClosure* field_cl, uint worker_id) { + const ZHeapIteratorContext context(this, object_cl, field_cl, worker_id); if (_visit_weaks) { - object_iterate_inner(context, cl); + object_iterate_inner(context); } else { - object_iterate_inner(context, cl); + object_iterate_inner(context); } } diff --git a/src/hotspot/share/gc/z/zHeapIterator.hpp b/src/hotspot/share/gc/z/zHeapIterator.hpp index 5c3a82d8bb7d9..27d9e2f0df160 100644 --- a/src/hotspot/share/gc/z/zHeapIterator.hpp +++ b/src/hotspot/share/gc/z/zHeapIterator.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -36,7 +36,7 @@ class ZHeapIteratorBitMap; class ZHeapIteratorContext; using ZHeapIteratorBitMaps = ZGranuleMap; -using ZHeapIteratorBitMapsIterator = ZGranuleMapIterator; +using ZHeapIteratorBitMapsIterator = ZGranuleMapIterator; using ZHeapIteratorQueue = OverflowTaskQueue; using ZHeapIteratorQueues = GenericTaskQueueSet; using ZHeapIteratorArrayQueue = OverflowTaskQueue; @@ -44,17 +44,18 @@ using ZHeapIteratorArrayQueues = GenericTaskQueueSet - void visit_and_follow(const ZHeapIteratorContext& context, ObjectClosure* cl, oop obj); + void follow(const ZHeapIteratorContext& context, oop obj); template - void drain(const ZHeapIteratorContext& context, ObjectClosure* cl); + void drain(const ZHeapIteratorContext& context); template - void steal(const ZHeapIteratorContext& context, ObjectClosure* cl); + void steal(const ZHeapIteratorContext& context); template - void drain_and_steal(const ZHeapIteratorContext& context, ObjectClosure* cl); + void drain_and_steal(const ZHeapIteratorContext& context); template - void object_iterate_inner(const ZHeapIteratorContext& context, ObjectClosure* cl); + void object_iterate_inner(const ZHeapIteratorContext& context); public: ZHeapIterator(uint nworkers, bool visit_weaks); virtual ~ZHeapIterator(); - virtual void object_iterate(ObjectClosure* cl, uint worker_id); + virtual void object_iterate(ObjectClosure* object_cl, uint worker_id); + void object_and_field_iterate(ObjectClosure* object_cl, OopFieldClosure* field_cl, uint worker_id); }; #endif // SHARE_GC_Z_ZHEAPITERATOR_HPP diff --git a/src/hotspot/share/gc/z/zHeuristics.cpp b/src/hotspot/share/gc/z/zHeuristics.cpp index a999d2ab4aac3..bcd9dd844052b 100644 --- a/src/hotspot/share/gc/z/zHeuristics.cpp +++ b/src/hotspot/share/gc/z/zHeuristics.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,8 +22,8 @@ */ #include "precompiled.hpp" -#include "gc/shared/gcLogPrecious.hpp" #include "gc/shared/gc_globals.hpp" +#include "gc/shared/gcLogPrecious.hpp" #include "gc/z/zCPU.inline.hpp" #include "gc/z/zGlobals.hpp" #include "gc/z/zHeuristics.hpp" @@ -56,15 +56,14 @@ void ZHeuristics::set_medium_page_size() { size_t ZHeuristics::relocation_headroom() { // Calculate headroom needed to avoid in-place relocation. Each worker will try // to allocate a small page, and all workers will share a single medium page. - const uint nworkers = UseDynamicNumberOfGCThreads ? ConcGCThreads : MAX2(ConcGCThreads, ParallelGCThreads); - return (nworkers * ZPageSizeSmall) + ZPageSizeMedium; + return (ConcGCThreads * ZPageSizeSmall) + ZPageSizeMedium; } bool ZHeuristics::use_per_cpu_shared_small_pages() { // Use per-CPU shared small pages only if these pages occupy at most 3.125% // of the max heap size. Otherwise fall back to using a single shared small // page. This is useful when using small heaps on large machines. - const size_t per_cpu_share = (MaxHeapSize * 0.03125) / ZCPU::count(); + const size_t per_cpu_share = significant_heap_overhead() / ZCPU::count(); return per_cpu_share >= ZPageSizeSmall; } @@ -73,8 +72,7 @@ static uint nworkers_based_on_ncpus(double cpu_share_in_percent) { } static uint nworkers_based_on_heap_size(double heap_share_in_percent) { - const int nworkers = (MaxHeapSize * (heap_share_in_percent / 100.0)) / ZPageSizeSmall; - return MAX2(nworkers, 1); + return (MaxHeapSize * (heap_share_in_percent / 100.0)) / ZPageSizeSmall; } static uint nworkers(double cpu_share_in_percent) { @@ -90,15 +88,22 @@ uint ZHeuristics::nparallel_workers() { // close to the number of processors tends to lead to over-provisioning and // scheduling latency issues. Using 60% of the active processors appears to // be a fairly good balance. - return nworkers(60.0); + return MAX2(nworkers(60.0), 1u); } uint ZHeuristics::nconcurrent_workers() { // The number of concurrent threads we would like to use heavily depends // on the type of workload we are running. Using too many threads will have // a negative impact on the application throughput, while using too few - // threads will prolong the GC-cycle and we then risk being out-run by the - // application. When in dynamic mode, use up to 25% of the active processors. - // When in non-dynamic mode, use 12.5% of the active processors. - return nworkers(UseDynamicNumberOfGCThreads ? 25.0 : 12.5); + // threads will prolong the GC cycle and we then risk being out-run by the + // application. + return MAX2(nworkers(25.0), 1u); +} + +size_t ZHeuristics::significant_heap_overhead() { + return MaxHeapSize * ZFragmentationLimit; +} + +size_t ZHeuristics::significant_young_overhead() { + return MaxHeapSize * ZYoungCompactionLimit; } diff --git a/src/hotspot/share/gc/z/zHeuristics.hpp b/src/hotspot/share/gc/z/zHeuristics.hpp index 362fd775f0fa3..0e2d851216c7b 100644 --- a/src/hotspot/share/gc/z/zHeuristics.hpp +++ b/src/hotspot/share/gc/z/zHeuristics.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,6 +25,7 @@ #define SHARE_GC_Z_ZHEURISTICS_HPP #include "memory/allStatic.hpp" +#include "utilities/globalDefinitions.hpp" class ZHeuristics : public AllStatic { public: @@ -36,6 +37,9 @@ class ZHeuristics : public AllStatic { static uint nparallel_workers(); static uint nconcurrent_workers(); + + static size_t significant_heap_overhead(); + static size_t significant_young_overhead(); }; #endif // SHARE_GC_Z_ZHEURISTICS_HPP diff --git a/src/hotspot/share/gc/z/zIndexDistributor.hpp b/src/hotspot/share/gc/z/zIndexDistributor.hpp new file mode 100644 index 0000000000000..94f146176d6d2 --- /dev/null +++ b/src/hotspot/share/gc/z/zIndexDistributor.hpp @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_Z_ZINDEXDISTRIBUTOR_HPP +#define SHARE_GC_Z_ZINDEXDISTRIBUTOR_HPP + +class ZIndexDistributor { +private: + void* _strategy; + + template + Strategy* strategy(); + + static void* create_strategy(int count); + +public: + ZIndexDistributor(int count); + ~ZIndexDistributor(); + + template + void do_indices(Function function); +}; + +#endif // SHARE_GC_Z_ZINDEXDISTRIBUTOR_HPP diff --git a/src/hotspot/share/gc/z/zIndexDistributor.inline.hpp b/src/hotspot/share/gc/z/zIndexDistributor.inline.hpp new file mode 100644 index 0000000000000..951eb2600d739 --- /dev/null +++ b/src/hotspot/share/gc/z/zIndexDistributor.inline.hpp @@ -0,0 +1,331 @@ +/* + * Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_Z_ZINDEXDISTRIBUTOR_INLINE_HPP +#define SHARE_GC_Z_ZINDEXDISTRIBUTOR_INLINE_HPP + +#include "gc/z/zIndexDistributor.hpp" + +#include "gc/shared/gc_globals.hpp" +#include "gc/z/zGlobals.hpp" +#include "runtime/atomic.hpp" +#include "runtime/os.hpp" +#include "runtime/thread.hpp" +#include "utilities/align.hpp" + +class ZIndexDistributorStriped : public CHeapObj { + static const int MemSize = 4096; + + const int _max_index; + // For claiming a stripe + volatile int _claim_stripe; + // For claiming inside a stripe + char _mem[MemSize + ZCacheLineSize]; + + int claim_stripe() { + return Atomic::fetch_then_add(&_claim_stripe, 1, memory_order_relaxed); + } + + volatile int* claim_addr(int index) { + return (volatile int*)(align_up(_mem, ZCacheLineSize) + index * ZCacheLineSize); + } + +public: + ZIndexDistributorStriped(int max_index) : + _max_index(max_index), + _claim_stripe(0), + _mem() { + memset(_mem, 0, MemSize + ZCacheLineSize); + } + + template + void do_indices(Function function) { + const int count = MemSize / ZCacheLineSize; + const int stripe_max = _max_index / count; + + // Use claiming + for (int i; (i = claim_stripe()) < count;) { + for (int index; (index = Atomic::fetch_then_add(claim_addr(i), 1, memory_order_relaxed)) < stripe_max;) { + if (!function(i * stripe_max + index)) { + return; + } + } + } + + // Use stealing + for (int i = 0; i < count; i++) { + for (int index; (index = Atomic::fetch_then_add(claim_addr(i), 1, memory_order_relaxed)) < stripe_max;) { + if (!function(i * stripe_max + index)) { + return; + } + } + } + } +}; + +class ZIndexDistributorClaimTree : public CHeapObj { + friend class ZIndexDistributorTest; + +private: + // The N - 1 levels are used to claim a segment in the + // next level the Nth level claims an index. + static constexpr int N = 4; + static constexpr int ClaimLevels = N - 1; + + // Describes the how the number of indices increases when going up from the given level + static constexpr int level_multiplier(int level) { + assert(level < ClaimLevels, "Must be"); + constexpr int array[ClaimLevels]{16, 16, 16}; + return array[level]; + } + + // Number of indices in one segment at the last level + const int _last_level_segment_size_shift; + + // For deallocation + char* _malloced; + + // Contains the tree of claim variables + volatile int* _claim_array; + + // Claim index functions + + // Number of claim entries at the given level + static constexpr int claim_level_size(int level) { + if (level == 0) { + return 1; + } + + return level_multiplier(level - 1) * claim_level_size(level - 1); + } + + // The index the next level starts at + static constexpr int claim_level_end_index(int level) { + if (level == 0) { + + // First level uses padding + return ZCacheLineSize / sizeof(int); + } + + return claim_level_size(level) + claim_level_end_index(level - 1); + } + + static constexpr int claim_level_start_index(int level) { + return claim_level_end_index(level - 1); + } + + // Total size used to hold all claim variables + static size_t claim_variables_size() { + return sizeof(int) * claim_level_end_index(ClaimLevels); + } + + // Returns the index of the start of the current segment of the current level + static constexpr int claim_level_index_accumulate(int* indices, int level, int acc = 1) { + if (level == 0) { + return acc * indices[level]; + } + + return acc * indices[level] + claim_level_index_accumulate(indices, level - 1, acc * level_multiplier(level)); + } + + static constexpr int claim_level_index(int* indices, int level) { + assert(level > 0, "Must be"); + + // The claim index for the current level is found in the previous levels + return claim_level_index_accumulate(indices, level - 1); + } + + static constexpr int claim_index(int* indices, int level) { + if (level == 0) { + return 0; + } + + return claim_level_start_index(level) + claim_level_index(indices, level); + } + + // Claim functions + + int claim(int index) { + return Atomic::fetch_then_add(&_claim_array[index], 1, memory_order_relaxed); + } + + int claim_at(int* indices, int level) { + const int index = claim_index(indices, level); + const int value = claim(index); +#if 0 + if (level == 0) { tty->print_cr("Claim at: %d index: %d got: %d", indices[0], index, value); } + else if (level == 1) { tty->print_cr("Claim at: %d %d index: %d got: %d", indices[0], indices[1], index, value); } + else if (level == 2) { tty->print_cr("Claim at: %d %d %d index: %d got: %d", indices[0], indices[1], indices[2], index, value); } + else if (level == 3) { tty->print_cr("Claim at: %d %d %d %d index: %d got: %d", indices[0], indices[1], indices[2], indices[3], index, value); } + else if (level == 4) { tty->print_cr("Claim at: %d %d %d %d %d index: %d got: %d", indices[0], indices[1], indices[2], indices[3], indices[4], index, value); } +#endif + return value; + } + + template + void claim_and_do(Function function, int* indices, int level) { + if (level < N) { + // Visit ClaimLevels and the last level + const int ci = claim_index(indices, level); + for (indices[level] = 0; (indices[level] = claim(ci)) < level_segment_size(level);) { + claim_and_do(function, indices, level + 1); + } + return; + } + + doit(function, indices); + } + + template + void steal_and_do(Function function, int* indices, int level) { + for (indices[level] = 0; indices[level] < level_segment_size(level); indices[level]++) { + const int next_level = level + 1; + // First try to claim at next level + claim_and_do(function, indices, next_level); + // Then steal at next level + if (next_level < ClaimLevels) { + steal_and_do(function, indices, next_level); + } + } + } + + // Functions to claimed values to an index + + static constexpr int levels_size(int level) { + if (level == 0) { + return level_multiplier(0); + } + + return level_multiplier(level) * levels_size(level - 1); + } + + static int constexpr level_to_last_level_count_coverage(int level) { + return levels_size(ClaimLevels - 1) / levels_size(level); + } + + static int constexpr calculate_last_level_count(int* indices, int level = 0) { + if (level == N - 1) { + return 0; + } + + return indices[level] * level_to_last_level_count_coverage(level) + calculate_last_level_count(indices, level + 1); + } + + int calculate_index(int* indices) { + const int segment_start = calculate_last_level_count(indices) << _last_level_segment_size_shift; + return segment_start + indices[N - 1]; + } + + int level_segment_size(int level) { + if (level == ClaimLevels) { + return 1 << _last_level_segment_size_shift; + } + + return level_multiplier(level); + } + + template + void doit(Function function, int* indices) { + //const int index = first_level * second_level_max * _third_level_max + second_level * _third_level_max + third_level; + const int index = calculate_index(indices); + +#if 0 + tty->print_cr("doit Thread: " PTR_FORMAT ": %d %d %d %d => %d", + p2i(Thread::current()), + indices[0], indices[1], indices[2], indices[3], index); +#endif + + function(index); + } + + static int last_level_segment_size_shift(int count) { + const int last_level_size = count / levels_size(ClaimLevels - 1); + assert(levels_size(ClaimLevels - 1) * last_level_size == count, "Not exactly divisible"); + + return log2i_exact(last_level_size); + } + +public: + ZIndexDistributorClaimTree(int count) : + _last_level_segment_size_shift(last_level_segment_size_shift(count)), + _malloced((char*)os::malloc(claim_variables_size() + os::vm_page_size(), mtGC)), + _claim_array((volatile int*)align_up(_malloced, os::vm_page_size())) { + + assert((levels_size(ClaimLevels - 1) << _last_level_segment_size_shift) == count, "Incorrectly setup"); + +#if 0 + tty->print_cr("ZIndexDistributorClaimTree count: %d byte size: " SIZE_FORMAT, count, claim_variables_size() + os::vm_page_size()); +#endif + + memset(_malloced, 0, claim_variables_size() + os::vm_page_size()); + } + + ~ZIndexDistributorClaimTree() { + os::free(_malloced); + } + + template + void do_indices(Function function) { + int indices[N]; + claim_and_do(function, indices, 0 /* level */); + steal_and_do(function, indices, 0 /* level */); + } +}; + +// Using dynamically allocated objects just to be able to evaluate +// different strategies. Revert when one has been choosen. + +inline void* ZIndexDistributor::create_strategy(int count) { + switch (ZIndexDistributorStrategy) { + case 0: return new ZIndexDistributorClaimTree(count); + case 1: return new ZIndexDistributorStriped(count); + default: fatal("Unknown ZIndexDistributorStrategy"); return nullptr; + }; +} + +inline ZIndexDistributor::ZIndexDistributor(int count) : + _strategy(create_strategy(count)) {} + +inline ZIndexDistributor::~ZIndexDistributor() { + switch (ZIndexDistributorStrategy) { + case 0: delete static_cast(_strategy); break; + case 1: delete static_cast(_strategy); break; + default: fatal("Unknown ZIndexDistributorStrategy"); break; + }; +} + +template +inline Strategy* ZIndexDistributor::strategy() { + return static_cast(_strategy); +} + +template +inline void ZIndexDistributor::do_indices(Function function) { + switch (ZIndexDistributorStrategy) { + case 0: strategy()->do_indices(function); break; + case 1: strategy()->do_indices(function); break; + default: fatal("Unknown ZIndexDistributorStrategy"); + }; +} + +#endif // SHARE_GC_Z_ZINDEXDISTRIBUTOR_INLINE_HPP diff --git a/src/hotspot/share/gc/z/zInitialize.cpp b/src/hotspot/share/gc/z/zInitialize.cpp index 8bc647953155b..0c0dc6e87a6c4 100644 --- a/src/hotspot/share/gc/z/zInitialize.cpp +++ b/src/hotspot/share/gc/z/zInitialize.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,10 +25,14 @@ #include "gc/z/zAddress.hpp" #include "gc/z/zBarrierSet.hpp" #include "gc/z/zCPU.hpp" +#include "gc/z/zDriver.hpp" +#include "gc/z/zGCIdPrinter.hpp" #include "gc/z/zGlobals.hpp" #include "gc/z/zHeuristics.hpp" #include "gc/z/zInitialize.hpp" +#include "gc/z/zJNICritical.hpp" #include "gc/z/zLargePages.hpp" +#include "gc/z/zMarkStackAllocator.hpp" #include "gc/z/zNUMA.hpp" #include "gc/z/zStat.hpp" #include "gc/z/zThreadLocalAllocBuffer.hpp" @@ -43,7 +47,7 @@ ZInitialize::ZInitialize(ZBarrierSet* barrier_set) { VM_Version::jdk_debug_level()); // Early initialization - ZAddress::initialize(); + ZGlobalsPointers::initialize(); ZNUMA::initialize(); ZCPU::initialize(); ZStatValue::initialize(); @@ -52,6 +56,9 @@ ZInitialize::ZInitialize(ZBarrierSet* barrier_set) { ZLargePages::initialize(); ZHeuristics::set_medium_page_size(); ZBarrierSet::set_barrier_set(barrier_set); + ZJNICritical::initialize(); + ZDriver::initialize(); + ZGCIdPrinter::initialize(); pd_initialize(); } diff --git a/src/hotspot/share/gc/z/zIterator.hpp b/src/hotspot/share/gc/z/zIterator.hpp new file mode 100644 index 0000000000000..3a1de049dd095 --- /dev/null +++ b/src/hotspot/share/gc/z/zIterator.hpp @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_Z_ZITERATOR_HPP +#define SHARE_GC_Z_ZITERATOR_HPP + +#include "memory/allocation.hpp" +#include "memory/iterator.hpp" + +class ZIterator : AllStatic { +private: + static bool is_invisible_object(oop obj); + static bool is_invisible_object_array(oop obj); + +public: + // This iterator skips invisible roots + template + static void oop_iterate_safe(oop obj, OopClosureT* cl); + + template + static void oop_iterate(oop obj, OopClosureT* cl); + + template + static void oop_iterate_range(objArrayOop obj, OopClosureT* cl, int start, int end); + + // This function skips invisible roots + template + static void basic_oop_iterate_safe(oop obj, Function function); + + template + static void basic_oop_iterate(oop obj, Function function); +}; + +template +class ZObjectClosure : public ObjectClosure { +private: + Function _function; + +public: + ZObjectClosure(Function function); + virtual void do_object(oop obj); +}; + +#endif // SHARE_GC_Z_ZITERATOR_HPP diff --git a/src/hotspot/share/gc/z/zIterator.inline.hpp b/src/hotspot/share/gc/z/zIterator.inline.hpp new file mode 100644 index 0000000000000..c6a388b980956 --- /dev/null +++ b/src/hotspot/share/gc/z/zIterator.inline.hpp @@ -0,0 +1,104 @@ +/* + * Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_Z_ZITERATOR_INLINE_HPP +#define SHARE_GC_Z_ZITERATOR_INLINE_HPP + +#include "gc/z/zIterator.hpp" + +#include "memory/iterator.inline.hpp" +#include "oops/objArrayOop.hpp" +#include "oops/oop.inline.hpp" + +inline bool ZIterator::is_invisible_object(oop obj) { + return obj->mark_acquire().is_marked(); +} + +inline bool ZIterator::is_invisible_object_array(oop obj) { + return obj->klass()->is_objArray_klass() && is_invisible_object(obj); +} + +// This iterator skips invisible object arrays +template +void ZIterator::oop_iterate_safe(oop obj, OopClosureT* cl) { + // Skip invisible object arrays - we only filter out *object* arrays, + // because that check is arguably faster than the is_invisible_object + // check, and primitive arrays are cheap to call oop_iterate on. + if (!is_invisible_object_array(obj)) { + obj->oop_iterate(cl); + } +} + +template +void ZIterator::oop_iterate(oop obj, OopClosureT* cl) { + assert(!is_invisible_object_array(obj), "not safe"); + obj->oop_iterate(cl); +} + +template +void ZIterator::oop_iterate_range(objArrayOop obj, OopClosureT* cl, int start, int end) { + assert(!is_invisible_object_array(obj), "not safe"); + obj->oop_iterate_range(cl, start, end); +} + +template +class ZBasicOopIterateClosure : public BasicOopIterateClosure { +private: + Function _function; + +public: + ZBasicOopIterateClosure(Function function) : + _function(function) {} + + virtual void do_oop(oop* p) { + _function((volatile zpointer*)p); + } + + virtual void do_oop(narrowOop* p_) { + ShouldNotReachHere(); + } +}; + +// This function skips invisible roots +template +void ZIterator::basic_oop_iterate_safe(oop obj, Function function) { + ZBasicOopIterateClosure cl(function); + oop_iterate_safe(obj, &cl); +} + +template +void ZIterator::basic_oop_iterate(oop obj, Function function) { + ZBasicOopIterateClosure cl(function); + oop_iterate(obj, &cl); +} + +template +ZObjectClosure::ZObjectClosure(Function function) : + _function(function) {} + +template +void ZObjectClosure::do_object(oop obj) { + _function(obj); +} + +#endif // SHARE_GC_Z_ZITERATOR_INLINE_HPP diff --git a/src/hotspot/share/gc/z/zJNICritical.cpp b/src/hotspot/share/gc/z/zJNICritical.cpp new file mode 100644 index 0000000000000..d096367e12f79 --- /dev/null +++ b/src/hotspot/share/gc/z/zJNICritical.cpp @@ -0,0 +1,183 @@ +/* + * Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/z/zJNICritical.hpp" +#include "gc/z/zLock.inline.hpp" +#include "gc/z/zStat.hpp" +#include "runtime/atomic.hpp" +#include "runtime/interfaceSupport.inline.hpp" +#include "runtime/thread.inline.hpp" +#include "utilities/debug.hpp" + +// +// The JNI critical count reflects number of Java threads currently +// inside a JNI critical region. +// +// * Normal (count >= 0). Java threads are allowed to enter and exit +// a critical region. +// +// * Blocked (count == -1). No Java thread is inside a critical region, +// and no Java thread can enter a critical region. +// +// * Block in progress (count < -1). Java threads are only allowed +// to exit a critical region. Attempts to enter a critical region +// will be blocked. +// + +static const ZStatCriticalPhase ZCriticalPhaseJNICriticalStall("JNI Critical Stall", false /* verbose */); + +volatile int64_t ZJNICritical::_count; +ZConditionLock* ZJNICritical::_lock; + +void ZJNICritical::initialize() { + _count = 0; + _lock = new ZConditionLock(); +} + +void ZJNICritical::block() { + for (;;) { + const int64_t count = Atomic::load_acquire(&_count); + + if (count < 0) { + // Already blocked, wait until unblocked + ZLocker locker(_lock); + while (Atomic::load_acquire(&_count) < 0) { + _lock->wait(); + } + + // Unblocked + continue; + } + + // Increment and invert count + if (Atomic::cmpxchg(&_count, count, -(count + 1)) != count) { + continue; + } + + // If the previous count was 0, then we just incremented and inverted + // it to -1 and we have now blocked. Otherwise we wait until all Java + // threads have exited the critical region. + if (count != 0) { + // Wait until blocked + ZLocker locker(_lock); + while (Atomic::load_acquire(&_count) != -1) { + _lock->wait(); + } + } + + // Blocked + return; + } +} + +void ZJNICritical::unblock() { + const int64_t count = Atomic::load_acquire(&_count); + assert(count == -1, "Invalid count"); + + // Notify unblocked + ZLocker locker(_lock); + Atomic::release_store(&_count, (int64_t)0); + _lock->notify_all(); +} + +void ZJNICritical::enter_inner(JavaThread* thread) { + for (;;) { + const int64_t count = Atomic::load_acquire(&_count); + + if (count < 0) { + // Wait until unblocked + ZStatTimer timer(ZCriticalPhaseJNICriticalStall); + + // Transition thread to blocked before locking to avoid deadlock + ThreadBlockInVM tbivm(thread); + + ZLocker locker(_lock); + while (Atomic::load_acquire(&_count) < 0) { + _lock->wait(); + } + + // Unblocked + continue; + } + + // Increment count + if (Atomic::cmpxchg(&_count, count, count + 1) != count) { + continue; + } + + // Entered critical region + return; + } +} + +void ZJNICritical::enter(JavaThread* thread) { + assert(thread == JavaThread::current(), "Must be this thread"); + + if (!thread->in_critical()) { + enter_inner(thread); + } + + thread->enter_critical(); +} + +void ZJNICritical::exit_inner() { + for (;;) { + const int64_t count = Atomic::load_acquire(&_count); + assert(count != 0, "Invalid count"); + + if (count > 0) { + // No block in progress, decrement count + if (Atomic::cmpxchg(&_count, count, count - 1) != count) { + continue; + } + } else { + // Block in progress, increment count + if (Atomic::cmpxchg(&_count, count, count + 1) != count) { + continue; + } + + // If the previous count was -2, then we just incremented it to -1, + // and we should signal that all Java threads have now exited the + // critical region and we are now blocked. + if (count == -2) { + // Nofity blocked + ZLocker locker(_lock); + _lock->notify_all(); + } + } + + // Exited critical region + return; + } +} + +void ZJNICritical::exit(JavaThread* thread) { + assert(thread == JavaThread::current(), "Must be this thread"); + + thread->exit_critical(); + + if (!thread->in_critical()) { + exit_inner(); + } +} diff --git a/src/hotspot/share/gc/z/zJNICritical.hpp b/src/hotspot/share/gc/z/zJNICritical.hpp new file mode 100644 index 0000000000000..d2ba80eddba75 --- /dev/null +++ b/src/hotspot/share/gc/z/zJNICritical.hpp @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_Z_ZJNICRITICAL_HPP +#define SHARE_GC_Z_ZJNICRITICAL_HPP + +#include "memory/allocation.hpp" + +class JavaThread; +class ZConditionLock; + +class ZJNICritical : public AllStatic { +private: + static volatile int64_t _count; + static ZConditionLock* _lock; + + static void enter_inner(JavaThread* thread); + static void exit_inner(); + +public: + // For use by GC + static void initialize(); + static void block(); + static void unblock(); + + // For use by Java threads + static void enter(JavaThread* thread); + static void exit(JavaThread* thread); +}; + +#endif // SHARE_GC_Z_ZJNICRITICAL_HPP diff --git a/src/hotspot/share/gc/z/zList.inline.hpp b/src/hotspot/share/gc/z/zList.inline.hpp index f559f3a2d4520..dcbcd87c6c623 100644 --- a/src/hotspot/share/gc/z/zList.inline.hpp +++ b/src/hotspot/share/gc/z/zList.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -110,12 +110,12 @@ inline bool ZList::is_empty() const { template inline T* ZList::first() const { - return is_empty() ? NULL : cast_to_outer(_head._next); + return is_empty() ? nullptr : cast_to_outer(_head._next); } template inline T* ZList::last() const { - return is_empty() ? NULL : cast_to_outer(_head._prev); + return is_empty() ? nullptr : cast_to_outer(_head._prev); } template @@ -128,7 +128,7 @@ inline T* ZList::next(T* elem) const { ZListNode* const next = node->_next; next->verify_links_linked(); - return (next == &_head) ? NULL : cast_to_outer(next); + return (next == &_head) ? nullptr : cast_to_outer(next); } template @@ -141,7 +141,7 @@ inline T* ZList::prev(T* elem) const { ZListNode* const prev = node->_prev; prev->verify_links_linked(); - return (prev == &_head) ? NULL : cast_to_outer(prev); + return (prev == &_head) ? nullptr : cast_to_outer(prev); } template @@ -191,7 +191,7 @@ inline void ZList::remove(T* elem) { template inline T* ZList::remove_first() { T* elem = first(); - if (elem != NULL) { + if (elem != nullptr) { remove(elem); } @@ -201,7 +201,7 @@ inline T* ZList::remove_first() { template inline T* ZList::remove_last() { T* elem = last(); - if (elem != NULL) { + if (elem != nullptr) { remove(elem); } @@ -215,7 +215,7 @@ inline ZListIteratorImpl::ZListIteratorImpl(const ZList* list) : template inline bool ZListIteratorImpl::next(T** elem) { - if (_next != NULL) { + if (_next != nullptr) { *elem = _next; _next = Forward ? _list->next(_next) : _list->prev(_next); return true; @@ -232,7 +232,7 @@ inline ZListRemoveIteratorImpl::ZListRemoveIteratorImpl(ZList* li template inline bool ZListRemoveIteratorImpl::next(T** elem) { *elem = Forward ? _list->remove_first() : _list->remove_last(); - return *elem != NULL; + return *elem != nullptr; } #endif // SHARE_GC_Z_ZLIST_INLINE_HPP diff --git a/src/hotspot/share/gc/z/zLiveMap.cpp b/src/hotspot/share/gc/z/zLiveMap.cpp index 7e04b7e8c3793..0a3f61bad4cf5 100644 --- a/src/hotspot/share/gc/z/zLiveMap.cpp +++ b/src/hotspot/share/gc/z/zLiveMap.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,10 +22,11 @@ */ #include "precompiled.hpp" +#include "gc/z/zGeneration.inline.hpp" #include "gc/z/zHeap.inline.hpp" #include "gc/z/zLiveMap.inline.hpp" #include "gc/z/zStat.hpp" -#include "gc/z/zThread.inline.hpp" +#include "gc/z/zUtils.hpp" #include "logging/log.hpp" #include "runtime/atomic.hpp" #include "utilities/debug.hpp" @@ -48,14 +49,15 @@ ZLiveMap::ZLiveMap(uint32_t size) : _bitmap(bitmap_size(size, nsegments)), _segment_shift(exact_log2(segment_size())) {} -void ZLiveMap::reset(size_t index) { +void ZLiveMap::reset(ZGenerationId id) { + ZGeneration* const generation = ZGeneration::generation(id); const uint32_t seqnum_initializing = (uint32_t)-1; bool contention = false; // Multiple threads can enter here, make sure only one of them // resets the marking information while the others busy wait. for (uint32_t seqnum = Atomic::load_acquire(&_seqnum); - seqnum != ZGlobalSeqNum; + seqnum != generation->seqnum(); seqnum = Atomic::load_acquire(&_seqnum)) { if ((seqnum != seqnum_initializing) && (Atomic::cmpxchg(&_seqnum, seqnum, seqnum_initializing) == seqnum)) { @@ -73,7 +75,7 @@ void ZLiveMap::reset(size_t index) { // before the update of the page seqnum, such that when the // up-to-date seqnum is load acquired, the bit maps will not // contain stale information. - Atomic::release_store(&_seqnum, ZGlobalSeqNum); + Atomic::release_store(&_seqnum, generation->seqnum()); break; } @@ -83,8 +85,8 @@ void ZLiveMap::reset(size_t index) { ZStatInc(ZCounterMarkSeqNumResetContention); contention = true; - log_trace(gc)("Mark seqnum reset contention, thread: " PTR_FORMAT " (%s), map: " PTR_FORMAT ", bit: " SIZE_FORMAT, - ZThread::id(), ZThread::name(), p2i(this), index); + log_trace(gc)("Mark seqnum reset contention, thread: " PTR_FORMAT " (%s), map: " PTR_FORMAT, + p2i(Thread::current()), ZUtils::thread_name(), p2i(this)); } } } @@ -102,7 +104,7 @@ void ZLiveMap::reset_segment(BitMap::idx_t segment) { contention = true; log_trace(gc)("Mark segment reset contention, thread: " PTR_FORMAT " (%s), map: " PTR_FORMAT ", segment: " SIZE_FORMAT, - ZThread::id(), ZThread::name(), p2i(this), segment); + p2i(Thread::current()), ZUtils::thread_name(), p2i(this), segment); } } diff --git a/src/hotspot/share/gc/z/zLiveMap.hpp b/src/hotspot/share/gc/z/zLiveMap.hpp index 07ae862876f13..e3bcd2e267ddb 100644 --- a/src/hotspot/share/gc/z/zLiveMap.hpp +++ b/src/hotspot/share/gc/z/zLiveMap.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,7 +24,9 @@ #ifndef SHARE_GC_Z_ZLIVEMAP_HPP #define SHARE_GC_Z_ZLIVEMAP_HPP +#include "gc/z/zAddress.hpp" #include "gc/z/zBitMap.hpp" +#include "gc/z/zGenerationId.hpp" #include "memory/allocation.hpp" class ObjectClosure; @@ -63,28 +65,36 @@ class ZLiveMap { bool claim_segment(BitMap::idx_t segment); - void reset(size_t index); + void reset(ZGenerationId id); void reset_segment(BitMap::idx_t segment); - void iterate_segment(ObjectClosure* cl, BitMap::idx_t segment, uintptr_t page_start, size_t page_object_alignment_shift); + size_t do_object(ObjectClosure* cl, zaddress addr) const; + + template + void iterate_segment(BitMap::idx_t segment, Function function); public: ZLiveMap(uint32_t size); + ZLiveMap(const ZLiveMap& other) = delete; void reset(); void resize(uint32_t size); - bool is_marked() const; + bool is_marked(ZGenerationId id) const; uint32_t live_objects() const; size_t live_bytes() const; - bool get(size_t index) const; - bool set(size_t index, bool finalizable, bool& inc_live); + bool get(ZGenerationId id, BitMap::idx_t index) const; + bool set(ZGenerationId id, BitMap::idx_t index, bool finalizable, bool& inc_live); void inc_live(uint32_t objects, size_t bytes); - void iterate(ObjectClosure* cl, uintptr_t page_start, size_t page_object_alignment_shift); + template + void iterate(ZGenerationId id, Function function); + + BitMap::idx_t find_base_bit(BitMap::idx_t index); + BitMap::idx_t find_base_bit_in_segment(BitMap::idx_t start, BitMap::idx_t index); }; #endif // SHARE_GC_Z_ZLIVEMAP_HPP diff --git a/src/hotspot/share/gc/z/zLiveMap.inline.hpp b/src/hotspot/share/gc/z/zLiveMap.inline.hpp index b6d6f13367a70..28390b72a89ca 100644 --- a/src/hotspot/share/gc/z/zLiveMap.inline.hpp +++ b/src/hotspot/share/gc/z/zLiveMap.inline.hpp @@ -26,9 +26,10 @@ #include "gc/z/zLiveMap.hpp" +#include "gc/z/zAddress.inline.hpp" #include "gc/z/zBitMap.inline.hpp" +#include "gc/z/zGeneration.inline.hpp" #include "gc/z/zMark.hpp" -#include "gc/z/zOop.inline.hpp" #include "gc/z/zUtils.inline.hpp" #include "runtime/atomic.hpp" #include "utilities/bitMap.inline.hpp" @@ -38,17 +39,15 @@ inline void ZLiveMap::reset() { _seqnum = 0; } -inline bool ZLiveMap::is_marked() const { - return Atomic::load_acquire(&_seqnum) == ZGlobalSeqNum; +inline bool ZLiveMap::is_marked(ZGenerationId id) const { + return Atomic::load_acquire(&_seqnum) == ZGeneration::generation(id)->seqnum(); } inline uint32_t ZLiveMap::live_objects() const { - assert(ZGlobalPhase != ZPhaseMark, "Invalid phase"); return _live_objects; } inline size_t ZLiveMap::live_bytes() const { - assert(ZGlobalPhase != ZPhaseMark, "Invalid phase"); return _live_bytes; } @@ -96,18 +95,18 @@ inline BitMap::idx_t ZLiveMap::index_to_segment(BitMap::idx_t index) const { return index >> _segment_shift; } -inline bool ZLiveMap::get(size_t index) const { - BitMap::idx_t segment = index_to_segment(index); - return is_marked() && // Page is marked +inline bool ZLiveMap::get(ZGenerationId id, BitMap::idx_t index) const { + const BitMap::idx_t segment = index_to_segment(index); + return is_marked(id) && // Page is marked is_segment_live(segment) && // Segment is marked _bitmap.par_at(index, memory_order_relaxed); // Object is marked } -inline bool ZLiveMap::set(size_t index, bool finalizable, bool& inc_live) { - if (!is_marked()) { +inline bool ZLiveMap::set(ZGenerationId id, BitMap::idx_t index, bool finalizable, bool& inc_live) { + if (!is_marked(id)) { // First object to be marked during this // cycle, reset marking information. - reset(index); + reset(id); } const BitMap::idx_t segment = index_to_segment(index); @@ -133,43 +132,98 @@ inline BitMap::idx_t ZLiveMap::segment_end(BitMap::idx_t segment) const { return segment_start(segment) + segment_size(); } -inline void ZLiveMap::iterate_segment(ObjectClosure* cl, BitMap::idx_t segment, uintptr_t page_start, size_t page_object_alignment_shift) { +inline size_t ZLiveMap::do_object(ObjectClosure* cl, zaddress addr) const { + // Get the size of the object before calling the closure, which + // might overwrite the object in case we are relocating in-place. + const size_t size = ZUtils::object_size(addr); + + // Apply closure + cl->do_object(to_oop(addr)); + + return size; +} + +template +inline void ZLiveMap::iterate_segment(BitMap::idx_t segment, Function function) { assert(is_segment_live(segment), "Must be"); const BitMap::idx_t start_index = segment_start(segment); const BitMap::idx_t end_index = segment_end(segment); - BitMap::idx_t index = _bitmap.find_first_set_bit(start_index, end_index); - while (index < end_index) { - // Calculate object address - const uintptr_t addr = page_start + ((index / 2) << page_object_alignment_shift); - - // Get the size of the object before calling the closure, which - // might overwrite the object in case we are relocating in-place. - const size_t size = ZUtils::object_size(addr); + _bitmap.iterate(function, start_index, end_index); +} - // Apply closure - cl->do_object(ZOop::from_address(addr)); +template +inline void ZLiveMap::iterate(ZGenerationId id, Function function) { + if (!is_marked(id)) { + return; + } - // Find next bit after this object - const uintptr_t next_addr = align_up(addr + size, 1 << page_object_alignment_shift); - const BitMap::idx_t next_index = ((next_addr - page_start) >> page_object_alignment_shift) * 2; - if (next_index >= end_index) { - // End of live map - break; + auto live_only = [&](BitMap::idx_t index) -> bool { + if ((index & 1) == 0) { + return function(index); } + // Don't visit the finalizable bits + return true; + }; - index = _bitmap.find_first_set_bit(next_index, end_index); + for (BitMap::idx_t segment = first_live_segment(); segment < nsegments; segment = next_live_segment(segment)) { + // For each live segment + iterate_segment(segment, live_only); } } -inline void ZLiveMap::iterate(ObjectClosure* cl, uintptr_t page_start, size_t page_object_alignment_shift) { - if (is_marked()) { - for (BitMap::idx_t segment = first_live_segment(); segment < nsegments; segment = next_live_segment(segment)) { - // For each live segment - iterate_segment(cl, segment, page_start, page_object_alignment_shift); +// Find the bit index that correspond the start of the object that is lower, +// or equal, to the given index (index is inclusive). +// +// Typically used to find the start of an object when there's only a field +// address available. Note that it's not guaranteed that the found index +// corresponds to an object that spans the given index. This function just +// looks at the bits. The calling code is responsible to check the object +// at the returned index. +// +// returns -1 if no bit was found +inline BitMap::idx_t ZLiveMap::find_base_bit(BitMap::idx_t index) { + // Check first segment + const BitMap::idx_t start_segment = index_to_segment(index); + if (is_segment_live(start_segment)) { + const BitMap::idx_t res = find_base_bit_in_segment(segment_start(start_segment), index); + if (res != BitMap::idx_t(-1)) { + return res; } } + + // Search earlier segments + for (BitMap::idx_t segment = start_segment; segment-- > 0; ) { + if (is_segment_live(segment)) { + const BitMap::idx_t res = find_base_bit_in_segment(segment_start(segment), segment_end(segment) - 1); + if (res != BitMap::idx_t(-1)) { + return res; + } + } + } + + // Not found + return BitMap::idx_t(-1); +} + +// Find the bit index that correspond the start of the object that is lower, +// or equal, to the given index (index is inclusive). Stopping when reaching +// start. +inline BitMap::idx_t ZLiveMap::find_base_bit_in_segment(BitMap::idx_t start, BitMap::idx_t index) { + assert(index_to_segment(start) == index_to_segment(index), "Only supports searches within segments start: %zu index: %zu", start, index); + assert(is_segment_live(index_to_segment(start)), "Must be live"); + + // Search backwards - + 1 to make an exclusive index. + const BitMap::idx_t end = index + 1; + const BitMap::idx_t bit = _bitmap.find_last_set_bit(start, end); + if (bit == end) { + return BitMap::idx_t(-1); + } + + // The bitmaps contain pairs of bits to deal with strongly marked vs only + // finalizable marked. Align down to get the the first bit position. + return bit & ~BitMap::idx_t(1); } #endif // SHARE_GC_Z_ZLIVEMAP_INLINE_HPP diff --git a/src/hotspot/share/gc/z/zLock.hpp b/src/hotspot/share/gc/z/zLock.hpp index f069cb62391d8..640a9fb02d3ad 100644 --- a/src/hotspot/share/gc/z/zLock.hpp +++ b/src/hotspot/share/gc/z/zLock.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,7 +27,7 @@ #include "memory/allocation.hpp" #include "runtime/mutex.hpp" -class ZLock { +class ZLock : public CHeapObj { private: PlatformMutex _lock; @@ -52,7 +52,7 @@ class ZReentrantLock { bool is_owned() const; }; -class ZConditionLock { +class ZConditionLock : public CHeapObj { private: PlatformMonitor _lock; diff --git a/src/hotspot/share/gc/z/zLock.inline.hpp b/src/hotspot/share/gc/z/zLock.inline.hpp index d0df421b1ec55..fb4b2d91cd78f 100644 --- a/src/hotspot/share/gc/z/zLock.inline.hpp +++ b/src/hotspot/share/gc/z/zLock.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -45,7 +45,7 @@ inline void ZLock::unlock() { inline ZReentrantLock::ZReentrantLock() : _lock(), - _owner(NULL), + _owner(nullptr), _count(0) {} inline void ZReentrantLock::lock() { @@ -67,7 +67,7 @@ inline void ZReentrantLock::unlock() { _count--; if (_count == 0) { - Atomic::store(&_owner, (Thread*)NULL); + Atomic::store(&_owner, (Thread*)nullptr); _lock.unlock(); } } @@ -105,14 +105,14 @@ inline void ZConditionLock::notify_all() { template inline ZLocker::ZLocker(T* lock) : _lock(lock) { - if (_lock != NULL) { + if (_lock != nullptr) { _lock->lock(); } } template inline ZLocker::~ZLocker() { - if (_lock != NULL) { + if (_lock != nullptr) { _lock->unlock(); } } diff --git a/src/hotspot/share/gc/z/zMark.cpp b/src/hotspot/share/gc/z/zMark.cpp index e5b157f3a51b5..96f1576b83d34 100644 --- a/src/hotspot/share/gc/z/zMark.cpp +++ b/src/hotspot/share/gc/z/zMark.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,8 +30,13 @@ #include "gc/shared/gc_globals.hpp" #include "gc/shared/stringdedup/stringDedup.hpp" #include "gc/shared/suspendibleThreadSet.hpp" +#include "gc/shared/workerThread.hpp" #include "gc/z/zAbort.inline.hpp" +#include "gc/z/zAddress.inline.hpp" #include "gc/z/zBarrier.inline.hpp" +#include "gc/z/zBarrierSetNMethod.hpp" +#include "gc/z/zGeneration.inline.hpp" +#include "gc/z/zGenerationId.hpp" #include "gc/z/zHeap.inline.hpp" #include "gc/z/zLock.inline.hpp" #include "gc/z/zMark.inline.hpp" @@ -40,15 +45,14 @@ #include "gc/z/zMarkStack.inline.hpp" #include "gc/z/zMarkTerminate.inline.hpp" #include "gc/z/zNMethod.hpp" -#include "gc/z/zOop.inline.hpp" #include "gc/z/zPage.hpp" #include "gc/z/zPageTable.inline.hpp" #include "gc/z/zRootsIterator.hpp" #include "gc/z/zStackWatermark.hpp" #include "gc/z/zStat.hpp" #include "gc/z/zTask.hpp" -#include "gc/z/zThread.inline.hpp" #include "gc/z/zThreadLocalAllocBuffer.hpp" +#include "gc/z/zUncoloredRoot.inline.hpp" #include "gc/z/zUtils.inline.hpp" #include "gc/z/zWorkers.hpp" #include "logging/log.hpp" @@ -64,23 +68,23 @@ #include "runtime/stackWatermark.hpp" #include "runtime/stackWatermarkSet.inline.hpp" #include "runtime/threads.hpp" +#include "runtime/vmThread.hpp" #include "utilities/align.hpp" #include "utilities/globalDefinitions.hpp" #include "utilities/powerOfTwo.hpp" #include "utilities/ticks.hpp" -static const ZStatSubPhase ZSubPhaseConcurrentMark("Concurrent Mark"); -static const ZStatSubPhase ZSubPhaseConcurrentMarkTryFlush("Concurrent Mark Try Flush"); -static const ZStatSubPhase ZSubPhaseConcurrentMarkTryTerminate("Concurrent Mark Try Terminate"); -static const ZStatSubPhase ZSubPhaseMarkTryComplete("Pause Mark Try Complete"); +static const ZStatSubPhase ZSubPhaseConcurrentMarkRootUncoloredYoung("Concurrent Mark Root Uncolored", ZGenerationId::young); +static const ZStatSubPhase ZSubPhaseConcurrentMarkRootColoredYoung("Concurrent Mark Root Colored", ZGenerationId::young); +static const ZStatSubPhase ZSubPhaseConcurrentMarkRootUncoloredOld("Concurrent Mark Root Uncolored", ZGenerationId::old); +static const ZStatSubPhase ZSubPhaseConcurrentMarkRootColoredOld("Concurrent Mark Root Colored", ZGenerationId::old); -ZMark::ZMark(ZWorkers* workers, ZPageTable* page_table) : - _workers(workers), +ZMark::ZMark(ZGeneration* generation, ZPageTable* page_table) : + _generation(generation), _page_table(page_table), _allocator(), - _stripes(), + _stripes(_allocator.start()), _terminate(), - _work_terminateflush(true), _work_nproactiveflush(0), _work_nterminateflush(0), _nproactiveflush(0), @@ -107,16 +111,6 @@ void ZMark::start() { verify_all_stacks_empty(); } - // Increment global sequence number to invalidate - // marking information for all pages. - ZGlobalSeqNum++; - - // Note that we start a marking cycle. - // Unlike other GCs, the color switch implicitly changes the nmethods - // to be armed, and the thread-local disarm values are lazily updated - // when JavaThreads wake up from safepoints. - CodeCache::on_gc_marking_cycle_start(); - // Reset flush/continue counters _nproactiveflush = 0; _nterminateflush = 0; @@ -124,7 +118,7 @@ void ZMark::start() { _ncontinue = 0; // Set number of workers to use - _nworkers = _workers->active_workers(); + _nworkers = workers()->active_workers(); // Set number of mark stripes to use, based on number // of workers we will use in the concurrent mark phase. @@ -132,7 +126,7 @@ void ZMark::start() { _stripes.set_nstripes(nstripes); // Update statistics - ZStatMark::set_at_mark_start(nstripes); + _generation->stat_mark()->at_mark_start(nstripes); // Print worker/stripe distribution LogTarget(Debug, gc, marking) log; @@ -147,15 +141,24 @@ void ZMark::start() { } } +ZWorkers* ZMark::workers() const { + return _generation->workers(); +} + void ZMark::prepare_work() { - assert(_nworkers == _workers->active_workers(), "Invalid number of workers"); + // Set number of workers to use + _nworkers = workers()->active_workers(); + + // Set number of mark stripes to use, based on number + // of workers we will use in the concurrent mark phase. + const size_t nstripes = calculate_nstripes(_nworkers); + _stripes.set_nstripes(nstripes); // Set number of active workers _terminate.reset(_nworkers); // Reset flush counters _work_nproactiveflush = _work_nterminateflush = 0; - _work_terminateflush = true; } void ZMark::finish_work() { @@ -164,143 +167,229 @@ void ZMark::finish_work() { _nterminateflush += _work_nterminateflush; } -bool ZMark::is_array(uintptr_t addr) const { - return ZOop::from_address(addr)->is_objArray(); +void ZMark::follow_work_complete() { + follow_work(false /* partial */); +} + +bool ZMark::follow_work_partial() { + return follow_work(true /* partial */); +} + +bool ZMark::is_array(zaddress addr) const { + return to_oop(addr)->is_objArray(); +} + +static uintptr_t encode_partial_array_offset(zpointer* addr) { + return untype(ZAddress::offset(to_zaddress((uintptr_t)addr))) >> ZMarkPartialArrayMinSizeShift; +} + +static zpointer* decode_partial_array_offset(uintptr_t offset) { + return (zpointer*)ZOffset::address(to_zoffset(offset << ZMarkPartialArrayMinSizeShift)); } -void ZMark::push_partial_array(uintptr_t addr, size_t size, bool finalizable) { +void ZMark::push_partial_array(zpointer* addr, size_t length, bool finalizable) { assert(is_aligned(addr, ZMarkPartialArrayMinSize), "Address misaligned"); - ZMarkThreadLocalStacks* const stacks = ZThreadLocalData::stacks(Thread::current()); - ZMarkStripe* const stripe = _stripes.stripe_for_addr(addr); - const uintptr_t offset = ZAddress::offset(addr) >> ZMarkPartialArrayMinSizeShift; - const uintptr_t length = size / oopSize; + ZMarkThreadLocalStacks* const stacks = ZThreadLocalData::mark_stacks(Thread::current(), _generation->id()); + ZMarkStripe* const stripe = _stripes.stripe_for_addr((uintptr_t)addr); + const uintptr_t offset = encode_partial_array_offset(addr); const ZMarkStackEntry entry(offset, length, finalizable); log_develop_trace(gc, marking)("Array push partial: " PTR_FORMAT " (" SIZE_FORMAT "), stripe: " SIZE_FORMAT, - addr, size, _stripes.stripe_id(stripe)); + p2i(addr), length, _stripes.stripe_id(stripe)); + + stacks->push(&_allocator, &_stripes, stripe, &_terminate, entry, false /* publish */); +} - stacks->push(&_allocator, &_stripes, stripe, entry, false /* publish */); +static void mark_barrier_on_oop_array(volatile zpointer* p, size_t length, bool finalizable, bool young) { + for (volatile const zpointer* const end = p + length; p < end; p++) { + if (young) { + ZBarrier::mark_barrier_on_young_oop_field(p); + } else { + ZBarrier::mark_barrier_on_old_oop_field(p, finalizable); + } + } } -void ZMark::follow_small_array(uintptr_t addr, size_t size, bool finalizable) { - assert(size <= ZMarkPartialArrayMinSize, "Too large, should be split"); - const size_t length = size / oopSize; +void ZMark::follow_array_elements_small(zpointer* addr, size_t length, bool finalizable) { + assert(length <= ZMarkPartialArrayMinLength, "Too large, should be split"); - log_develop_trace(gc, marking)("Array follow small: " PTR_FORMAT " (" SIZE_FORMAT ")", addr, size); + log_develop_trace(gc, marking)("Array follow small: " PTR_FORMAT " (" SIZE_FORMAT ")", p2i(addr), length); - ZBarrier::mark_barrier_on_oop_array((oop*)addr, length, finalizable); + mark_barrier_on_oop_array(addr, length, finalizable, _generation->is_young()); } -void ZMark::follow_large_array(uintptr_t addr, size_t size, bool finalizable) { - assert(size <= (size_t)arrayOopDesc::max_array_length(T_OBJECT) * oopSize, "Too large"); - assert(size > ZMarkPartialArrayMinSize, "Too small, should not be split"); - const uintptr_t start = addr; - const uintptr_t end = start + size; +void ZMark::follow_array_elements_large(zpointer* addr, size_t length, bool finalizable) { + assert(length <= (size_t)arrayOopDesc::max_array_length(T_OBJECT), "Too large"); + assert(length > ZMarkPartialArrayMinLength, "Too small, should not be split"); + + zpointer* const start = addr; + zpointer* const end = start + length; // Calculate the aligned middle start/end/size, where the middle start // should always be greater than the start (hence the +1 below) to make // sure we always do some follow work, not just split the array into pieces. - const uintptr_t middle_start = align_up(start + 1, ZMarkPartialArrayMinSize); - const size_t middle_size = align_down(end - middle_start, ZMarkPartialArrayMinSize); - const uintptr_t middle_end = middle_start + middle_size; + zpointer* const middle_start = align_up(start + 1, ZMarkPartialArrayMinSize); + const size_t middle_length = align_down(end - middle_start, ZMarkPartialArrayMinLength); + zpointer* const middle_end = middle_start + middle_length; log_develop_trace(gc, marking)("Array follow large: " PTR_FORMAT "-" PTR_FORMAT" (" SIZE_FORMAT "), " "middle: " PTR_FORMAT "-" PTR_FORMAT " (" SIZE_FORMAT ")", - start, end, size, middle_start, middle_end, middle_size); + p2i(start), p2i(end), length, p2i(middle_start), p2i(middle_end), middle_length); // Push unaligned trailing part if (end > middle_end) { - const uintptr_t trailing_addr = middle_end; - const size_t trailing_size = end - middle_end; - push_partial_array(trailing_addr, trailing_size, finalizable); + zpointer* const trailing_addr = middle_end; + const size_t trailing_length = end - middle_end; + push_partial_array(trailing_addr, trailing_length, finalizable); } // Push aligned middle part(s) - uintptr_t partial_addr = middle_end; + zpointer* partial_addr = middle_end; while (partial_addr > middle_start) { const size_t parts = 2; - const size_t partial_size = align_up((partial_addr - middle_start) / parts, ZMarkPartialArrayMinSize); - partial_addr -= partial_size; - push_partial_array(partial_addr, partial_size, finalizable); + const size_t partial_length = align_up((partial_addr - middle_start) / parts, ZMarkPartialArrayMinLength); + partial_addr -= partial_length; + push_partial_array(partial_addr, partial_length, finalizable); } // Follow leading part assert(start < middle_start, "Miscalculated middle start"); - const uintptr_t leading_addr = start; - const size_t leading_size = middle_start - start; - follow_small_array(leading_addr, leading_size, finalizable); + zpointer* const leading_addr = start; + const size_t leading_length = middle_start - start; + follow_array_elements_small(leading_addr, leading_length, finalizable); } -void ZMark::follow_array(uintptr_t addr, size_t size, bool finalizable) { - if (size <= ZMarkPartialArrayMinSize) { - follow_small_array(addr, size, finalizable); +void ZMark::follow_array_elements(zpointer* addr, size_t length, bool finalizable) { + if (length <= ZMarkPartialArrayMinLength) { + follow_array_elements_small(addr, length, finalizable); } else { - follow_large_array(addr, size, finalizable); + follow_array_elements_large(addr, length, finalizable); } } void ZMark::follow_partial_array(ZMarkStackEntry entry, bool finalizable) { - const uintptr_t addr = ZAddress::good(entry.partial_array_offset() << ZMarkPartialArrayMinSizeShift); - const size_t size = entry.partial_array_length() * oopSize; + zpointer* const addr = decode_partial_array_offset(entry.partial_array_offset()); + const size_t length = entry.partial_array_length(); - follow_array(addr, size, finalizable); + follow_array_elements(addr, length, finalizable); } -template -class ZMarkBarrierOopClosure : public ClaimMetadataVisitingOopIterateClosure { +template +class ZMarkBarrierFollowOopClosure : public OopIterateClosure { +private: + static int claim_value() { + return finalizable ? ClassLoaderData::_claim_finalizable + : ClassLoaderData::_claim_strong; + } + + static ReferenceDiscoverer* discoverer() { + if (!finalizable) { + return ZGeneration::old()->reference_discoverer(); + } else { + return nullptr; + } + } + + static bool visit_metadata() { + // Only visit metadata if we're marking through the old generation + return ZGeneration::old()->is_phase_mark(); + } + + const bool _visit_metadata; + public: - ZMarkBarrierOopClosure() : - ClaimMetadataVisitingOopIterateClosure(finalizable - ? ClassLoaderData::_claim_finalizable - : ClassLoaderData::_claim_strong, - finalizable - ? NULL - : ZHeap::heap()->reference_discoverer()) {} + ZMarkBarrierFollowOopClosure() : + OopIterateClosure(discoverer()), + _visit_metadata(visit_metadata()) {} virtual void do_oop(oop* p) { - ZBarrier::mark_barrier_on_oop_field(p, finalizable); + switch (generation) { + case ZGenerationIdOptional::young: + ZBarrier::mark_barrier_on_young_oop_field((volatile zpointer*)p); + break; + case ZGenerationIdOptional::old: + ZBarrier::mark_barrier_on_old_oop_field((volatile zpointer*)p, finalizable); + break; + case ZGenerationIdOptional::none: + ZBarrier::mark_barrier_on_oop_field((volatile zpointer*)p, finalizable); + break; + } } virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); } + virtual bool do_metadata() final { + // Only help out with metadata visiting + return _visit_metadata; + } + virtual void do_nmethod(nmethod* nm) { + assert(do_metadata(), "Don't call otherwise"); assert(!finalizable, "Can't handle finalizable marking of nmethods"); nm->run_nmethod_entry_barrier(); } + + virtual void do_method(Method* m) { + // Mark interpreted frames for class redefinition + m->record_gc_epoch(); + } + + virtual void do_klass(Klass* klass) { + ClassLoaderData* cld = klass->class_loader_data(); + ZMarkBarrierFollowOopClosure cl; + cld->oops_do(&cl, claim_value()); + } + + virtual void do_cld(ClassLoaderData* cld) { + ZMarkBarrierFollowOopClosure cl; + cld->oops_do(&cl, claim_value()); + } }; void ZMark::follow_array_object(objArrayOop obj, bool finalizable) { - if (finalizable) { - ZMarkBarrierOopClosure cl; - cl.do_klass(obj->klass()); + if (_generation->is_old()) { + if (finalizable) { + ZMarkBarrierFollowOopClosure cl; + cl.do_klass(obj->klass()); + } else { + ZMarkBarrierFollowOopClosure cl; + cl.do_klass(obj->klass()); + } } else { - ZMarkBarrierOopClosure cl; - cl.do_klass(obj->klass()); + ZMarkBarrierFollowOopClosure cl; + if (cl.do_metadata()) { + cl.do_klass(obj->klass()); + } } - const uintptr_t addr = (uintptr_t)obj->base(); - const size_t size = (size_t)obj->length() * oopSize; + // Should be convertible to colorless oop + assert_is_valid(to_zaddress(obj)); - follow_array(addr, size, finalizable); + zpointer* const addr = (zpointer*)obj->base(); + const size_t length = (size_t)obj->length(); + + follow_array_elements(addr, length, finalizable); } void ZMark::follow_object(oop obj, bool finalizable) { - if (ContinuationGCSupport::relativize_stack_chunk(obj)) { - // Loom doesn't support mixing of finalizable marking and strong marking of - // stack chunks. See: RelativizeDerivedOopClosure. - ZMarkBarrierOopClosure cl; - obj->oop_iterate(&cl); - return; - } - - if (finalizable) { - ZMarkBarrierOopClosure cl; - obj->oop_iterate(&cl); + if (_generation->is_old()) { + if (ZHeap::heap()->is_old(to_zaddress(obj))) { + if (finalizable) { + ZMarkBarrierFollowOopClosure cl; + ZIterator::oop_iterate(obj, &cl); + } else { + ZMarkBarrierFollowOopClosure cl; + ZIterator::oop_iterate(obj, &cl); + } + } else { + fatal("Catch me!"); + } } else { - ZMarkBarrierOopClosure cl; - obj->oop_iterate(&cl); + // Young gen must help out with old marking + ZMarkBarrierFollowOopClosure cl; + ZIterator::oop_iterate(obj, &cl); } } @@ -335,7 +424,7 @@ void ZMark::mark_and_follow(ZMarkContext* context, ZMarkStackEntry entry) { } // Decode object address and additional flags - const uintptr_t addr = entry.object_address(); + const zaddress addr = ZOffset::address(to_zoffset(entry.object_address())); const bool mark = entry.mark(); bool inc_live = entry.inc_live(); const bool follow = entry.follow(); @@ -362,9 +451,9 @@ void ZMark::mark_and_follow(ZMarkContext* context, ZMarkStackEntry entry) { // Follow if (follow) { if (is_array(addr)) { - follow_array_object(objArrayOop(ZOop::from_address(addr)), finalizable); + follow_array_object(objArrayOop(to_oop(addr)), finalizable); } else { - const oop obj = ZOop::from_address(addr); + const oop obj = to_oop(addr); follow_object(obj, finalizable); // Try deduplicate @@ -373,25 +462,53 @@ void ZMark::mark_and_follow(ZMarkContext* context, ZMarkStackEntry entry) { } } -template -bool ZMark::drain(ZMarkContext* context, T* timeout) { - ZMarkStripe* const stripe = context->stripe(); +// This function returns true if we need to stop working to resize threads or +// abort marking +bool ZMark::rebalance_work(ZMarkContext* context) { + const size_t assumed_nstripes = context->nstripes(); + const size_t nstripes = _stripes.nstripes(); + + if (assumed_nstripes != nstripes) { + context->set_nstripes(nstripes); + } else if (nstripes < calculate_nstripes(_nworkers) && _allocator.clear_and_get_expanded_recently()) { + const size_t new_nstripes = nstripes << 1; + _stripes.set_nstripes(new_nstripes); + context->set_nstripes(new_nstripes); + } + + ZMarkStripe* stripe = _stripes.stripe_for_worker(_nworkers, WorkerThread::worker_id()); + if (context->stripe() != stripe) { + // Need to switch stripe + context->set_stripe(stripe); + flush_and_free(); + } else if (!_terminate.saturated()) { + // Work imbalance detected; striped marking is likely going to be in the way + flush_and_free(); + } + + SuspendibleThreadSet::yield(); + + return ZAbort::should_abort() || _generation->should_worker_resize(); +} + +bool ZMark::drain(ZMarkContext* context) { ZMarkThreadLocalStacks* const stacks = context->stacks(); ZMarkStackEntry entry; + size_t processed = 0; + + context->set_stripe(_stripes.stripe_for_worker(_nworkers, WorkerThread::worker_id())); + context->set_nstripes(_stripes.nstripes()); // Drain stripe stacks - while (stacks->pop(&_allocator, &_stripes, stripe, entry)) { + while (stacks->pop(&_allocator, &_stripes, context->stripe(), entry)) { mark_and_follow(context, entry); - // Check timeout - if (timeout->has_expired()) { - // Timeout + if ((processed++ & 31) == 0 && rebalance_work(context)) { return false; } } - // Success - return !timeout->has_expired(); + return true; } bool ZMark::try_steal_local(ZMarkContext* context) { @@ -403,7 +520,7 @@ bool ZMark::try_steal_local(ZMarkContext* context) { victim_stripe != stripe; victim_stripe = _stripes.stripe_next(victim_stripe)) { ZMarkStack* const stack = stacks->steal(&_stripes, victim_stripe); - if (stack != NULL) { + if (stack != nullptr) { // Success, install the stolen stack stacks->install(&_stripes, stripe, stack); return true; @@ -423,7 +540,7 @@ bool ZMark::try_steal_global(ZMarkContext* context) { victim_stripe != stripe; victim_stripe = _stripes.stripe_next(victim_stripe)) { ZMarkStack* const stack = victim_stripe->steal_stack(); - if (stack != NULL) { + if (stack != nullptr) { // Success, install the stolen stack stacks->install(&_stripes, stripe, stack); return true; @@ -438,10 +555,6 @@ bool ZMark::try_steal(ZMarkContext* context) { return try_steal_local(context) || try_steal_global(context); } -void ZMark::idle() const { - os::naked_short_sleep(1); -} - class ZMarkFlushAndFreeStacksClosure : public HandshakeClosure { private: ZMark* const _mark; @@ -456,6 +569,9 @@ class ZMarkFlushAndFreeStacksClosure : public HandshakeClosure { void do_thread(Thread* thread) { if (_mark->flush_and_free(thread)) { _flushed = true; + if (SafepointSynchronize::is_at_safepoint()) { + log_debug(gc, marking)("Thread broke mark termination %s", thread->name()); + } } } @@ -464,201 +580,125 @@ class ZMarkFlushAndFreeStacksClosure : public HandshakeClosure { } }; -bool ZMark::flush(bool at_safepoint) { - ZMarkFlushAndFreeStacksClosure cl(this); - if (at_safepoint) { - Threads::threads_do(&cl); - } else { - Handshake::execute(&cl); +class VM_ZMarkFlushOperation : public VM_Operation { +private: + ThreadClosure* _cl; + +public: + VM_ZMarkFlushOperation(ThreadClosure* cl) : + _cl(cl) {} + + virtual bool evaluate_at_safepoint() const { + return false; + } + + virtual void doit() { + // Flush VM thread + Thread* const thread = Thread::current(); + _cl->do_thread(thread); + } + + virtual VMOp_Type type() const { + return VMOp_ZMarkFlushOperation; } +}; + +bool ZMark::flush() { + ZMarkFlushAndFreeStacksClosure cl(this); + VM_ZMarkFlushOperation vm_cl(&cl); + Handshake::execute(&cl); + VMThread::execute(&vm_cl); // Returns true if more work is available return cl.flushed() || !_stripes.is_empty(); } -bool ZMark::try_flush(volatile size_t* nflush) { - Atomic::inc(nflush); +bool ZMark::try_terminate_flush() { + Atomic::inc(&_work_nterminateflush); + _terminate.set_resurrected(false); - ZStatTimer timer(ZSubPhaseConcurrentMarkTryFlush); - return flush(false /* at_safepoint */); + if (ZVerifyMarking) { + verify_worker_stacks_empty(); + } + + return flush() || + _terminate.resurrected(); } bool ZMark::try_proactive_flush() { // Only do proactive flushes from worker 0 - if (ZThread::worker_id() != 0) { + if (WorkerThread::worker_id() != 0) { return false; } - if (Atomic::load(&_work_nproactiveflush) == ZMarkProactiveFlushMax || - Atomic::load(&_work_nterminateflush) != 0) { + if (Atomic::load(&_work_nproactiveflush) == ZMarkProactiveFlushMax) { // Limit reached or we're trying to terminate return false; } - return try_flush(&_work_nproactiveflush); -} - -bool ZMark::try_terminate() { - ZStatTimer timer(ZSubPhaseConcurrentMarkTryTerminate); - - if (_terminate.enter_stage0()) { - // Last thread entered stage 0, flush - if (Atomic::load(&_work_terminateflush) && - Atomic::load(&_work_nterminateflush) != ZMarkTerminateFlushMax) { - // Exit stage 0 to allow other threads to continue marking - _terminate.exit_stage0(); - - // Flush before termination - if (!try_flush(&_work_nterminateflush)) { - // No more work available, skip further flush attempts - Atomic::store(&_work_terminateflush, false); - } - - // Don't terminate, regardless of whether we successfully - // flushed out more work or not. We've already exited - // termination stage 0, to allow other threads to continue - // marking, so this thread has to return false and also - // make another round of attempted marking. - return false; - } - } - - for (;;) { - if (_terminate.enter_stage1()) { - // Last thread entered stage 1, terminate - return true; - } + Atomic::inc(&_work_nproactiveflush); - // Idle to give the other threads - // a chance to enter termination. - idle(); - - if (!_terminate.try_exit_stage1()) { - // All workers in stage 1, terminate - return true; - } + SuspendibleThreadSetLeaver sts_leaver; + return flush(); +} - if (_terminate.try_exit_stage0()) { - // More work available, don't terminate - return false; - } - } +bool ZMark::try_terminate(ZMarkContext* context) { + return _terminate.try_terminate(&_stripes, context->nstripes()); } -class ZMarkNoTimeout : public StackObj { -public: - bool has_expired() { - // No timeout, but check for signal to abort - return ZAbort::should_abort(); - } -}; +void ZMark::leave() { + _terminate.leave(); +} -void ZMark::work_without_timeout(ZMarkContext* context) { - ZStatTimer timer(ZSubPhaseConcurrentMark); - ZMarkNoTimeout no_timeout; +// Returning true means marking finished successfully after marking as far as it could. +// Returning false means that marking finished unsuccessfully due to abort or resizing. +bool ZMark::follow_work(bool partial) { + ZMarkStripe* const stripe = _stripes.stripe_for_worker(_nworkers, WorkerThread::worker_id()); + ZMarkThreadLocalStacks* const stacks = ZThreadLocalData::mark_stacks(Thread::current(), _generation->id()); + ZMarkContext context(ZMarkStripesMax, stripe, stacks); for (;;) { - if (!drain(context, &no_timeout)) { - // Abort - break; + if (!drain(&context)) { + leave(); + return false; } - if (try_steal(context)) { + if (try_steal(&context)) { // Stole work continue; } + if (partial) { + return true; + } + if (try_proactive_flush()) { // Work available continue; } - if (try_terminate()) { + if (try_terminate(&context)) { // Terminate - break; + return true; } } } -class ZMarkTimeout : public StackObj { -private: - const Ticks _start; - const uint64_t _timeout; - const uint64_t _check_interval; - uint64_t _check_at; - uint64_t _check_count; - bool _expired; - +class ZMarkOopClosure : public OopClosure { public: - ZMarkTimeout(uint64_t timeout_in_micros) : - _start(Ticks::now()), - _timeout(_start.value() + TimeHelper::micros_to_counter(timeout_in_micros)), - _check_interval(200), - _check_at(_check_interval), - _check_count(0), - _expired(false) {} - - ~ZMarkTimeout() { - const Tickspan duration = Ticks::now() - _start; - log_debug(gc, marking)("Mark With Timeout (%s): %s, " UINT64_FORMAT " oops, %.3fms", - ZThread::name(), _expired ? "Expired" : "Completed", - _check_count, TimeHelper::counter_to_millis(duration.value())); - } - - bool has_expired() { - if (++_check_count == _check_at) { - _check_at += _check_interval; - if ((uint64_t)Ticks::now().value() >= _timeout) { - // Timeout - _expired = true; - } - } - - return _expired; - } -}; - -void ZMark::work_with_timeout(ZMarkContext* context, uint64_t timeout_in_micros) { - ZStatTimer timer(ZSubPhaseMarkTryComplete); - ZMarkTimeout timeout(timeout_in_micros); - - for (;;) { - if (!drain(context, &timeout)) { - // Timed out - break; - } - - if (try_steal(context)) { - // Stole work - continue; - } - - // Terminate - break; + virtual void do_oop(oop* p) { + ZBarrier::mark_barrier_on_oop_field((zpointer*)p, false /* finalizable */); } -} - -void ZMark::work(uint64_t timeout_in_micros) { - ZMarkStripe* const stripe = _stripes.stripe_for_worker(_nworkers, ZThread::worker_id()); - ZMarkThreadLocalStacks* const stacks = ZThreadLocalData::stacks(Thread::current()); - ZMarkContext context(_stripes.nstripes(), stripe, stacks); - if (timeout_in_micros == 0) { - work_without_timeout(&context); - } else { - work_with_timeout(&context, timeout_in_micros); + virtual void do_oop(narrowOop* p) { + ShouldNotReachHere(); } +}; - // Flush and publish stacks - stacks->flush(&_allocator, &_stripes); - - // Free remaining stacks - stacks->free(&_allocator); -} - -class ZMarkOopClosure : public OopClosure { +class ZMarkYoungOopClosure : public OopClosure { +public: virtual void do_oop(oop* p) { - ZBarrier::mark_barrier_on_oop_field(p, false /* finalizable */); + ZBarrier::mark_young_good_barrier_on_oop_field((zpointer*)p); } virtual void do_oop(narrowOop* p) { @@ -668,98 +708,222 @@ class ZMarkOopClosure : public OopClosure { class ZMarkThreadClosure : public ThreadClosure { private: - OopClosure* const _cl; + static ZUncoloredRoot::RootFunction root_function() { + return ZUncoloredRoot::mark; + } public: - ZMarkThreadClosure(OopClosure* cl) : - _cl(cl) { + ZMarkThreadClosure() { ZThreadLocalAllocBuffer::reset_statistics(); } ~ZMarkThreadClosure() { ZThreadLocalAllocBuffer::publish_statistics(); } + virtual void do_thread(Thread* thread) { JavaThread* const jt = JavaThread::cast(thread); - StackWatermarkSet::finish_processing(jt, _cl, StackWatermarkKind::gc); + + StackWatermarkSet::finish_processing(jt, (void*)root_function(), StackWatermarkKind::gc); ZThreadLocalAllocBuffer::update_stats(jt); } }; class ZMarkNMethodClosure : public NMethodClosure { private: - OopClosure* const _cl; + ZBarrierSetNMethod* const _bs_nm; public: - ZMarkNMethodClosure(OopClosure* cl) : - _cl(cl) {} + ZMarkNMethodClosure() : + _bs_nm(static_cast(BarrierSet::barrier_set()->barrier_set_nmethod())) {} virtual void do_nmethod(nmethod* nm) { ZLocker locker(ZNMethod::lock_for_nmethod(nm)); - if (ZNMethod::is_armed(nm)) { - ZNMethod::nmethod_oops_do_inner(nm, _cl); + if (_bs_nm->is_armed(nm)) { + // Heal barriers + ZNMethod::nmethod_patch_barriers(nm); + + // Heal oops + ZUncoloredRootMarkOopClosure cl(ZNMethod::color(nm)); + ZNMethod::nmethod_oops_do_inner(nm, &cl); // CodeCache unloading support nm->mark_as_maybe_on_stack(); - ZNMethod::disarm(nm); + log_trace(gc, nmethod)("nmethod: " PTR_FORMAT " visited by old", p2i(nm)); + + // Disarm + _bs_nm->disarm(nm); + } + } +}; + +class ZMarkYoungNMethodClosure : public NMethodClosure { +private: + ZBarrierSetNMethod* const _bs_nm; + +public: + ZMarkYoungNMethodClosure() : + _bs_nm(static_cast(BarrierSet::barrier_set()->barrier_set_nmethod())) {} + + virtual void do_nmethod(nmethod* nm) { + ZLocker locker(ZNMethod::lock_for_nmethod(nm)); + if (nm->is_unloading()) { + return; + } + + if (_bs_nm->is_armed(nm)) { + const uintptr_t prev_color = ZNMethod::color(nm); + + // Heal oops + ZUncoloredRootMarkYoungOopClosure cl(prev_color); + ZNMethod::nmethod_oops_do_inner(nm, &cl); + + // Disarm only the young marking, not any potential old marking cycle + + const uintptr_t old_marked_mask = ZPointerMarkedMask ^ (ZPointerMarkedYoung0 | ZPointerMarkedYoung1); + const uintptr_t old_marked = prev_color & old_marked_mask; + + const zpointer new_disarm_value_ptr = ZAddress::color(zaddress::null, ZPointerLoadGoodMask | ZPointerMarkedYoung | old_marked | ZPointerRemembered); + + // Check if disarming for young mark, completely disarms the nmethod entry barrier + const bool complete_disarm = ZPointer::is_store_good(new_disarm_value_ptr); + + if (complete_disarm) { + // We are about to completely disarm the nmethod, must take responsibility to patch all barriers before disarming + ZNMethod::nmethod_patch_barriers(nm); + } + + _bs_nm->set_guard_value(nm, (int)untype(new_disarm_value_ptr)); + + if (complete_disarm) { + log_trace(gc, nmethod)("nmethod: " PTR_FORMAT " visited by young (complete) [" PTR_FORMAT " -> " PTR_FORMAT "]", p2i(nm), prev_color, untype(new_disarm_value_ptr)); + assert(!_bs_nm->is_armed(nm), "Must not be considered armed anymore"); + } else { + log_trace(gc, nmethod)("nmethod: " PTR_FORMAT " visited by young (incomplete) [" PTR_FORMAT " -> " PTR_FORMAT "]", p2i(nm), prev_color, untype(new_disarm_value_ptr)); + assert(_bs_nm->is_armed(nm), "Must be considered armed"); + } } } }; -typedef ClaimingCLDToOopClosure ZMarkCLDClosure; +typedef ClaimingCLDToOopClosure ZMarkOldCLDClosure; -class ZMarkRootsTask : public ZTask { +class ZMarkOldRootsTask : public ZTask { private: - ZMark* const _mark; - SuspendibleThreadSetJoiner _sts_joiner; - ZRootsIterator _roots; + ZMark* const _mark; + ZRootsIteratorStrongColored _roots_colored; + ZRootsIteratorStrongUncolored _roots_uncolored; - ZMarkOopClosure _cl; - ZMarkCLDClosure _cld_cl; - ZMarkThreadClosure _thread_cl; - ZMarkNMethodClosure _nm_cl; + ZMarkOopClosure _cl_colored; + ZMarkOldCLDClosure _cld_cl; + + ZMarkThreadClosure _thread_cl; + ZMarkNMethodClosure _nm_cl; public: - ZMarkRootsTask(ZMark* mark) : - ZTask("ZMarkRootsTask"), + ZMarkOldRootsTask(ZMark* mark) : + ZTask("ZMarkOldRootsTask"), _mark(mark), - _sts_joiner(), - _roots(ClassLoaderData::_claim_strong), - _cl(), - _cld_cl(&_cl), - _thread_cl(&_cl), - _nm_cl(&_cl) { + _roots_colored(ZGenerationIdOptional::old), + _roots_uncolored(ZGenerationIdOptional::old), + _cl_colored(), + _cld_cl(&_cl_colored), + _thread_cl(), + _nm_cl() { ClassLoaderDataGraph_lock->lock(); } - ~ZMarkRootsTask() { + ~ZMarkOldRootsTask() { ClassLoaderDataGraph_lock->unlock(); } virtual void work() { - _roots.apply(&_cl, - &_cld_cl, - &_thread_cl, - &_nm_cl); + { + ZStatTimerWorker timer(ZSubPhaseConcurrentMarkRootColoredOld); + _roots_colored.apply(&_cl_colored, + &_cld_cl); + } + + { + ZStatTimerWorker timer(ZSubPhaseConcurrentMarkRootUncoloredOld); + _roots_uncolored.apply(&_thread_cl, + &_nm_cl); + } // Flush and free worker stacks. Needed here since // the set of workers executing during root scanning // can be different from the set of workers executing // during mark. - _mark->flush_and_free(); + ZHeap::heap()->mark_flush_and_free(Thread::current()); } }; -class ZMarkTask : public ZTask { +class ZMarkYoungCLDClosure : public ClaimingCLDToOopClosure { +public: + virtual void do_cld(ClassLoaderData* cld) { + if (!cld->is_alive()) { + // Skip marking through concurrently unloading CLDs + return; + } + ClaimingCLDToOopClosure::do_cld(cld); + } + + ZMarkYoungCLDClosure(OopClosure* cl) : + ClaimingCLDToOopClosure(cl) {} +}; + +class ZMarkYoungRootsTask : public ZTask { private: - ZMark* const _mark; - const uint64_t _timeout_in_micros; + ZMark* const _mark; + ZRootsIteratorAllColored _roots_colored; + ZRootsIteratorAllUncolored _roots_uncolored; + + ZMarkYoungOopClosure _cl_colored; + ZMarkYoungCLDClosure _cld_cl; + + ZMarkThreadClosure _thread_cl; + ZMarkYoungNMethodClosure _nm_cl; public: - ZMarkTask(ZMark* mark, uint64_t timeout_in_micros = 0) : - ZTask("ZMarkTask"), + ZMarkYoungRootsTask(ZMark* mark) : + ZTask("ZMarkYoungRootsTask"), _mark(mark), - _timeout_in_micros(timeout_in_micros) { + _roots_colored(ZGenerationIdOptional::young), + _roots_uncolored(ZGenerationIdOptional::young), + _cl_colored(), + _cld_cl(&_cl_colored), + _thread_cl(), + _nm_cl() {} + + virtual void work() { + { + ZStatTimerWorker timer(ZSubPhaseConcurrentMarkRootColoredYoung); + _roots_colored.apply(&_cl_colored, + &_cld_cl); + } + + { + ZStatTimerWorker timer(ZSubPhaseConcurrentMarkRootUncoloredYoung); + _roots_uncolored.apply(&_thread_cl, + &_nm_cl); + } + + // Flush and free worker stacks. Needed here since + // the set of workers executing during root scanning + // can be different from the set of workers executing + // during mark. + ZHeap::heap()->mark_flush_and_free(Thread::current()); + } +}; + +class ZMarkTask : public ZRestartableTask { +private: + ZMark* const _mark; + +public: + ZMarkTask(ZMark* mark) : + ZRestartableTask("ZMarkTask"), + _mark(mark) { _mark->prepare_work(); } @@ -768,42 +932,66 @@ class ZMarkTask : public ZTask { } virtual void work() { - _mark->work(_timeout_in_micros); + SuspendibleThreadSetJoiner sts_joiner; + _mark->follow_work_complete(); + // We might have found pointers into the other generation, and then we want to + // publish such marking stacks to prevent that generation from getting a mark continue. + // We also flush in case of a resize where a new worker thread continues the marking + // work, causing a mark continue for the collected generation. + ZHeap::heap()->mark_flush_and_free(Thread::current()); } -}; -void ZMark::mark(bool initial) { - if (initial) { - ZMarkRootsTask task(this); - _workers->run(&task); + virtual void resize_workers(uint nworkers) { + _mark->resize_workers(nworkers); } +}; - ZMarkTask task(this); - _workers->run(&task); +void ZMark::resize_workers(uint nworkers) { + _nworkers = nworkers; + const size_t nstripes = calculate_nstripes(nworkers); + _stripes.set_nstripes(nstripes); + _terminate.reset(nworkers); } -bool ZMark::try_complete() { - _ntrycomplete++; +void ZMark::mark_young_roots() { + SuspendibleThreadSetJoiner sts_joiner; + ZMarkYoungRootsTask task(this); + workers()->run(&task); +} - // Use nconcurrent number of worker threads to maintain the - // worker/stripe distribution used during concurrent mark. - ZMarkTask task(this, ZMarkCompleteTimeout); - _workers->run(&task); +void ZMark::mark_old_roots() { + SuspendibleThreadSetJoiner sts_joiner; + ZMarkOldRootsTask task(this); + workers()->run(&task); +} - // Successful if all stripes are empty - return _stripes.is_empty(); +void ZMark::mark_follow() { + for (;;) { + ZMarkTask task(this); + workers()->run(&task); + if (ZAbort::should_abort() || !try_terminate_flush()) { + break; + } + } } bool ZMark::try_end() { - // Flush all mark stacks - if (!flush(true /* at_safepoint */)) { - // Mark completed - return true; + if (_terminate.resurrected()) { + // An oop was resurrected after concurrent termination. + return false; + } + + // Try end marking + ZMarkFlushAndFreeStacksClosure cl(this); + Threads::non_java_threads_do(&cl); + + // Check if non-java threads have any pending marking + if (cl.flushed() || !_stripes.is_empty()) { + return false; } - // Try complete marking by doing a limited - // amount of mark work in this phase. - return try_complete(); + // Mark completed + return true; } bool ZMark::end() { @@ -820,12 +1008,7 @@ bool ZMark::end() { } // Update statistics - ZStatMark::set_at_mark_end(_nproactiveflush, _nterminateflush, _ntrycomplete, _ncontinue); - - // Note that we finished a marking cycle. - // Unlike other GCs, we do not arm the nmethods - // when marking terminates. - CodeCache::on_gc_marking_cycle_finish(); + _generation->stat_mark()->at_mark_end(_nproactiveflush, _nterminateflush, _ntrycomplete, _ncontinue); // Mark completed return true; @@ -836,7 +1019,7 @@ void ZMark::free() { _allocator.free(); // Update statistics - ZStatMark::set_at_mark_free(_allocator.size()); + _generation->stat_mark()->at_mark_free(_allocator.size()); } void ZMark::flush_and_free() { @@ -845,8 +1028,11 @@ void ZMark::flush_and_free() { } bool ZMark::flush_and_free(Thread* thread) { - ZMarkThreadLocalStacks* const stacks = ZThreadLocalData::stacks(thread); - const bool flushed = stacks->flush(&_allocator, &_stripes); + if (thread->is_Java_thread()) { + ZThreadLocalData::store_barrier_buffer(thread)->flush(); + } + ZMarkThreadLocalStacks* const stacks = ZThreadLocalData::mark_stacks(thread, _generation->id()); + const bool flushed = stacks->flush(&_allocator, &_stripes, &_terminate); stacks->free(&_allocator); return flushed; } @@ -854,22 +1040,30 @@ bool ZMark::flush_and_free(Thread* thread) { class ZVerifyMarkStacksEmptyClosure : public ThreadClosure { private: const ZMarkStripeSet* const _stripes; + const ZGenerationId _generation_id; public: - ZVerifyMarkStacksEmptyClosure(const ZMarkStripeSet* stripes) : - _stripes(stripes) {} + ZVerifyMarkStacksEmptyClosure(const ZMarkStripeSet* stripes, ZGenerationId id) : + _stripes(stripes), + _generation_id(id) {} void do_thread(Thread* thread) { - ZMarkThreadLocalStacks* const stacks = ZThreadLocalData::stacks(thread); + ZMarkThreadLocalStacks* const stacks = ZThreadLocalData::mark_stacks(thread, _generation_id); guarantee(stacks->is_empty(_stripes), "Should be empty"); } }; void ZMark::verify_all_stacks_empty() const { // Verify thread stacks - ZVerifyMarkStacksEmptyClosure cl(&_stripes); + ZVerifyMarkStacksEmptyClosure cl(&_stripes, _generation->id()); Threads::threads_do(&cl); // Verify stripe stacks guarantee(_stripes.is_empty(), "Should be empty"); } + +void ZMark::verify_worker_stacks_empty() const { + // Verify thread stacks + ZVerifyMarkStacksEmptyClosure cl(&_stripes, _generation->id()); + workers()->threads_do(&cl); +} diff --git a/src/hotspot/share/gc/z/zMark.hpp b/src/hotspot/share/gc/z/zMark.hpp index 1de4eb604ea32..552bf3b959ddd 100644 --- a/src/hotspot/share/gc/z/zMark.hpp +++ b/src/hotspot/share/gc/z/zMark.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,6 +24,7 @@ #ifndef SHARE_GC_Z_ZMARK_HPP #define SHARE_GC_Z_ZMARK_HPP +#include "gc/z/zAddress.hpp" #include "gc/z/zMarkStack.hpp" #include "gc/z/zMarkStackAllocator.hpp" #include "gc/z/zMarkStackEntry.hpp" @@ -32,6 +33,7 @@ #include "utilities/globalDefinitions.hpp" class Thread; +class ZGeneration; class ZMarkContext; class ZPageTable; class ZWorkers; @@ -39,13 +41,25 @@ class ZWorkers; class ZMark { friend class ZMarkTask; +public: + static const bool Resurrect = true; + static const bool DontResurrect = false; + + static const bool GCThread = true; + static const bool AnyThread = false; + + static const bool Follow = true; + static const bool DontFollow = false; + + static const bool Strong = false; + static const bool Finalizable = true; + private: - ZWorkers* const _workers; + ZGeneration* const _generation; ZPageTable* const _page_table; ZMarkStackAllocator _allocator; ZMarkStripeSet _stripes; ZMarkTerminate _terminate; - volatile bool _work_terminateflush; volatile size_t _work_nproactiveflush; volatile size_t _work_nterminateflush; size_t _nproactiveflush; @@ -56,51 +70,59 @@ class ZMark { size_t calculate_nstripes(uint nworkers) const; - bool is_array(uintptr_t addr) const; - void push_partial_array(uintptr_t addr, size_t size, bool finalizable); - void follow_small_array(uintptr_t addr, size_t size, bool finalizable); - void follow_large_array(uintptr_t addr, size_t size, bool finalizable); - void follow_array(uintptr_t addr, size_t size, bool finalizable); + bool is_array(zaddress addr) const; + void push_partial_array(zpointer* addr, size_t length, bool finalizable); + void follow_array_elements_small(zpointer* addr, size_t length, bool finalizable); + void follow_array_elements_large(zpointer* addr, size_t length, bool finalizable); + void follow_array_elements(zpointer* addr, size_t length, bool finalizable); void follow_partial_array(ZMarkStackEntry entry, bool finalizable); void follow_array_object(objArrayOop obj, bool finalizable); void follow_object(oop obj, bool finalizable); void mark_and_follow(ZMarkContext* context, ZMarkStackEntry entry); - template bool drain(ZMarkContext* context, T* timeout); + bool rebalance_work(ZMarkContext* context); + bool drain(ZMarkContext* context); bool try_steal_local(ZMarkContext* context); bool try_steal_global(ZMarkContext* context); bool try_steal(ZMarkContext* context); - void idle() const; - bool flush(bool at_safepoint); + bool flush(); bool try_proactive_flush(); - bool try_flush(volatile size_t* nflush); - bool try_terminate(); - bool try_complete(); + bool try_terminate(ZMarkContext* context); + void leave(); bool try_end(); - void prepare_work(); - void finish_work(); + ZWorkers* workers() const; - void work_without_timeout(ZMarkContext* context); - void work_with_timeout(ZMarkContext* context, uint64_t timeout_in_micros); - void work(uint64_t timeout_in_micros); + bool follow_work(bool partial); void verify_all_stacks_empty() const; + void verify_worker_stacks_empty() const; public: - ZMark(ZWorkers* workers, ZPageTable* page_table); + ZMark(ZGeneration* generation, ZPageTable* page_table); bool is_initialized() const; - template void mark_object(uintptr_t addr); + template + void mark_object(zaddress addr); void start(); - void mark(bool initial); + void mark_young_roots(); + void mark_old_roots(); + void mark_follow(); bool end(); void free(); void flush_and_free(); bool flush_and_free(Thread* thread); + + // Following work + void prepare_work(); + void finish_work(); + void resize_workers(uint nworkers); + void follow_work_complete(); + bool follow_work_partial(); + bool try_terminate_flush(); }; #endif // SHARE_GC_Z_ZMARK_HPP diff --git a/src/hotspot/share/gc/z/zMark.inline.hpp b/src/hotspot/share/gc/z/zMark.inline.hpp index 289d8df7db459..b530259361027 100644 --- a/src/hotspot/share/gc/z/zMark.inline.hpp +++ b/src/hotspot/share/gc/z/zMark.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,7 +27,9 @@ #include "gc/z/zMark.hpp" #include "gc/z/zAddress.inline.hpp" +#include "gc/z/zGeneration.inline.hpp" #include "gc/z/zMarkStack.inline.hpp" +#include "gc/z/zMarkTerminate.inline.hpp" #include "gc/z/zPage.inline.hpp" #include "gc/z/zPageTable.inline.hpp" #include "gc/z/zThreadLocalData.hpp" @@ -43,9 +45,9 @@ // root processing has called ClassLoaderDataGraph::clear_claimed_marks(), // since it otherwise would interact badly with claiming of CLDs. -template -inline void ZMark::mark_object(uintptr_t addr) { - assert(ZAddress::is_marked(addr), "Should be marked"); +template +inline void ZMark::mark_object(zaddress addr) { + assert(!ZVerifyOops || oopDesc::is_oop(to_oop(addr)), "Should be oop"); ZPage* const page = _page_table->get(addr); if (page->is_allocating()) { @@ -64,17 +66,25 @@ inline void ZMark::mark_object(uintptr_t addr) { } } else { // Don't push if already marked - if (page->is_object_marked(addr)) { + if (page->is_object_marked(addr, finalizable)) { // Already marked return; } } + if (resurrect) { + _terminate.set_resurrected(true); + } + // Push - ZMarkThreadLocalStacks* const stacks = ZThreadLocalData::stacks(Thread::current()); - ZMarkStripe* const stripe = _stripes.stripe_for_addr(addr); - ZMarkStackEntry entry(addr, !mark_before_push, inc_live, follow, finalizable); - stacks->push(&_allocator, &_stripes, stripe, entry, publish); + ZMarkThreadLocalStacks* const stacks = ZThreadLocalData::mark_stacks(Thread::current(), _generation->id()); + ZMarkStripe* const stripe = _stripes.stripe_for_addr(untype(addr)); + ZMarkStackEntry entry(untype(ZAddress::offset(addr)), !mark_before_push, inc_live, follow, finalizable); + + assert(ZHeap::heap()->is_young(addr) == _generation->is_young(), "Phase/object mismatch"); + + const bool publish = !gc_thread; + stacks->push(&_allocator, &_stripes, stripe, &_terminate, entry, publish); } #endif // SHARE_GC_Z_ZMARK_INLINE_HPP diff --git a/src/hotspot/share/gc/z/zMarkCache.cpp b/src/hotspot/share/gc/z/zMarkCache.cpp index e41e3f3c3f4d4..f9b48417ac695 100644 --- a/src/hotspot/share/gc/z/zMarkCache.cpp +++ b/src/hotspot/share/gc/z/zMarkCache.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,13 +26,17 @@ #include "utilities/globalDefinitions.hpp" #include "utilities/powerOfTwo.hpp" +static size_t shift_for_stripes(size_t nstripes) { + return ZMarkStripeShift + exact_log2(nstripes); +} + ZMarkCacheEntry::ZMarkCacheEntry() : - _page(NULL), + _page(nullptr), _objects(0), _bytes(0) {} ZMarkCache::ZMarkCache(size_t nstripes) : - _shift(ZMarkStripeShift + exact_log2(nstripes)) {} + _shift(shift_for_stripes(nstripes)) {} ZMarkCache::~ZMarkCache() { // Evict all entries @@ -40,3 +44,7 @@ ZMarkCache::~ZMarkCache() { _cache[i].evict(); } } + +void ZMarkCache::set_nstripes(size_t nstripes) { + _shift = shift_for_stripes(nstripes); +} diff --git a/src/hotspot/share/gc/z/zMarkCache.hpp b/src/hotspot/share/gc/z/zMarkCache.hpp index 8d902088c0584..f866856100d0a 100644 --- a/src/hotspot/share/gc/z/zMarkCache.hpp +++ b/src/hotspot/share/gc/z/zMarkCache.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -44,13 +44,15 @@ class ZMarkCacheEntry { class ZMarkCache : public StackObj { private: - const size_t _shift; + size_t _shift; ZMarkCacheEntry _cache[ZMarkCacheSize]; public: ZMarkCache(size_t nstripes); ~ZMarkCache(); + void set_nstripes(size_t nstripes); + void inc_live(ZPage* page, size_t bytes); }; diff --git a/src/hotspot/share/gc/z/zMarkCache.inline.hpp b/src/hotspot/share/gc/z/zMarkCache.inline.hpp index ee67d333d59a4..de7203967a769 100644 --- a/src/hotspot/share/gc/z/zMarkCache.inline.hpp +++ b/src/hotspot/share/gc/z/zMarkCache.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -43,16 +43,16 @@ inline void ZMarkCacheEntry::inc_live(ZPage* page, size_t bytes) { } inline void ZMarkCacheEntry::evict() { - if (_page != NULL) { + if (_page != nullptr) { // Write cached data out to page _page->inc_live(_objects, _bytes); - _page = NULL; + _page = nullptr; } } inline void ZMarkCache::inc_live(ZPage* page, size_t bytes) { const size_t mask = ZMarkCacheSize - 1; - const size_t index = (page->start() >> _shift) & mask; + const size_t index = (untype(page->start()) >> _shift) & mask; _cache[index].inc_live(page, bytes); } diff --git a/src/hotspot/share/gc/z/zMarkContext.hpp b/src/hotspot/share/gc/z/zMarkContext.hpp index 1b5ab76db7af1..009252e524da6 100644 --- a/src/hotspot/share/gc/z/zMarkContext.hpp +++ b/src/hotspot/share/gc/z/zMarkContext.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -34,8 +34,9 @@ class ZMarkThreadLocalStacks; class ZMarkContext : public StackObj { private: ZMarkCache _cache; - ZMarkStripe* const _stripe; + ZMarkStripe* _stripe; ZMarkThreadLocalStacks* const _stacks; + size_t _nstripes; StringDedup::Requests _string_dedup_requests; public: @@ -45,8 +46,12 @@ class ZMarkContext : public StackObj { ZMarkCache* cache(); ZMarkStripe* stripe(); + void set_stripe(ZMarkStripe* stripe); ZMarkThreadLocalStacks* stacks(); StringDedup::Requests* string_dedup_requests(); + + size_t nstripes(); + void set_nstripes(size_t nstripes); }; #endif // SHARE_GC_Z_ZMARKCONTEXT_HPP diff --git a/src/hotspot/share/gc/z/zMarkContext.inline.hpp b/src/hotspot/share/gc/z/zMarkContext.inline.hpp index b104ab61e4f50..4a3237890feb7 100644 --- a/src/hotspot/share/gc/z/zMarkContext.inline.hpp +++ b/src/hotspot/share/gc/z/zMarkContext.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -32,6 +32,7 @@ inline ZMarkContext::ZMarkContext(size_t nstripes, _cache(nstripes), _stripe(stripe), _stacks(stacks), + _nstripes(nstripes), _string_dedup_requests() {} inline ZMarkCache* ZMarkContext::cache() { @@ -42,6 +43,10 @@ inline ZMarkStripe* ZMarkContext::stripe() { return _stripe; } +inline void ZMarkContext::set_stripe(ZMarkStripe* stripe) { + _stripe = stripe; +} + inline ZMarkThreadLocalStacks* ZMarkContext::stacks() { return _stacks; } @@ -50,4 +55,13 @@ inline StringDedup::Requests* ZMarkContext::string_dedup_requests() { return &_string_dedup_requests; } +inline size_t ZMarkContext::nstripes() { + return _nstripes; +} + +inline void ZMarkContext::set_nstripes(size_t nstripes) { + _cache.set_nstripes(nstripes); + _nstripes = nstripes; +} + #endif // SHARE_GC_Z_ZMARKCACHE_INLINE_HPP diff --git a/src/hotspot/share/gc/z/zMarkStack.cpp b/src/hotspot/share/gc/z/zMarkStack.cpp index b99c89a119a7e..64354fe022e24 100644 --- a/src/hotspot/share/gc/z/zMarkStack.cpp +++ b/src/hotspot/share/gc/z/zMarkStack.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,18 +24,25 @@ #include "precompiled.hpp" #include "gc/z/zMarkStack.inline.hpp" #include "gc/z/zMarkStackAllocator.hpp" +#include "gc/z/zMarkTerminate.inline.hpp" #include "logging/log.hpp" +#include "runtime/atomic.hpp" #include "utilities/debug.hpp" #include "utilities/powerOfTwo.hpp" -ZMarkStripe::ZMarkStripe() : - _published(), - _overflowed() {} +ZMarkStripe::ZMarkStripe(uintptr_t base) : + _published(base), + _overflowed(base) {} -ZMarkStripeSet::ZMarkStripeSet() : - _nstripes(0), +ZMarkStripeSet::ZMarkStripeSet(uintptr_t base) : _nstripes_mask(0), - _stripes() {} + _stripes() { + + // Re-construct array elements with the correct base + for (size_t i = 0; i < ARRAY_SIZE(_stripes); i++) { + _stripes[i] = ZMarkStripe(base); + } +} void ZMarkStripeSet::set_nstripes(size_t nstripes) { assert(is_power_of_2(nstripes), "Must be a power of two"); @@ -43,14 +50,19 @@ void ZMarkStripeSet::set_nstripes(size_t nstripes) { assert(nstripes >= 1, "Invalid number of stripes"); assert(nstripes <= ZMarkStripesMax, "Invalid number of stripes"); - _nstripes = nstripes; - _nstripes_mask = nstripes - 1; + // Mutators may read these values concurrently. It doesn't matter + // if they see the old or new values. + Atomic::store(&_nstripes_mask, nstripes - 1); + + log_debug(gc, marking)("Using " SIZE_FORMAT " mark stripes", nstripes); +} - log_debug(gc, marking)("Using " SIZE_FORMAT " mark stripes", _nstripes); +size_t ZMarkStripeSet::nstripes() const { + return Atomic::load(&_nstripes_mask) + 1; } bool ZMarkStripeSet::is_empty() const { - for (size_t i = 0; i < _nstripes; i++) { + for (size_t i = 0; i < ZMarkStripesMax; i++) { if (!_stripes[i].is_empty()) { return false; } @@ -60,35 +72,38 @@ bool ZMarkStripeSet::is_empty() const { } ZMarkStripe* ZMarkStripeSet::stripe_for_worker(uint nworkers, uint worker_id) { - const size_t spillover_limit = (nworkers / _nstripes) * _nstripes; + const size_t mask = Atomic::load(&_nstripes_mask); + const size_t nstripes = mask + 1; + + const size_t spillover_limit = (nworkers / nstripes) * nstripes; size_t index; if (worker_id < spillover_limit) { // Not a spillover worker, use natural stripe - index = worker_id & _nstripes_mask; + index = worker_id & mask; } else { // Distribute spillover workers evenly across stripes const size_t spillover_nworkers = nworkers - spillover_limit; const size_t spillover_worker_id = worker_id - spillover_limit; - const double spillover_chunk = (double)_nstripes / (double)spillover_nworkers; + const double spillover_chunk = (double)nstripes / (double)spillover_nworkers; index = spillover_worker_id * spillover_chunk; } - assert(index < _nstripes, "Invalid index"); + assert(index < nstripes, "Invalid index"); return &_stripes[index]; } ZMarkThreadLocalStacks::ZMarkThreadLocalStacks() : - _magazine(NULL) { + _magazine(nullptr) { for (size_t i = 0; i < ZMarkStripesMax; i++) { - _stacks[i] = NULL; + _stacks[i] = nullptr; } } bool ZMarkThreadLocalStacks::is_empty(const ZMarkStripeSet* stripes) const { - for (size_t i = 0; i < stripes->nstripes(); i++) { + for (size_t i = 0; i < ZMarkStripesMax; i++) { ZMarkStack* const stack = _stacks[i]; - if (stack != NULL) { + if (stack != nullptr) { return false; } } @@ -97,21 +112,21 @@ bool ZMarkThreadLocalStacks::is_empty(const ZMarkStripeSet* stripes) const { } ZMarkStack* ZMarkThreadLocalStacks::allocate_stack(ZMarkStackAllocator* allocator) { - if (_magazine == NULL) { + if (_magazine == nullptr) { // Allocate new magazine _magazine = allocator->alloc_magazine(); - if (_magazine == NULL) { - return NULL; + if (_magazine == nullptr) { + return nullptr; } } - ZMarkStack* stack = NULL; + ZMarkStack* stack = nullptr; if (!_magazine->pop(stack)) { // Magazine is empty, convert magazine into a new stack _magazine->~ZMarkStackMagazine(); stack = new ((void*)_magazine) ZMarkStack(); - _magazine = NULL; + _magazine = nullptr; } return stack; @@ -119,7 +134,7 @@ ZMarkStack* ZMarkThreadLocalStacks::allocate_stack(ZMarkStackAllocator* allocato void ZMarkThreadLocalStacks::free_stack(ZMarkStackAllocator* allocator, ZMarkStack* stack) { for (;;) { - if (_magazine == NULL) { + if (_magazine == nullptr) { // Convert stack into a new magazine stack->~ZMarkStack(); _magazine = new ((void*)stack) ZMarkStackMagazine(); @@ -133,22 +148,23 @@ void ZMarkThreadLocalStacks::free_stack(ZMarkStackAllocator* allocator, ZMarkSta // Free and uninstall full magazine allocator->free_magazine(_magazine); - _magazine = NULL; + _magazine = nullptr; } } bool ZMarkThreadLocalStacks::push_slow(ZMarkStackAllocator* allocator, ZMarkStripe* stripe, ZMarkStack** stackp, + ZMarkTerminate* terminate, ZMarkStackEntry entry, bool publish) { ZMarkStack* stack = *stackp; for (;;) { - if (stack == NULL) { + if (stack == nullptr) { // Allocate and install new stack *stackp = stack = allocate_stack(allocator); - if (stack == NULL) { + if (stack == nullptr) { // Out of mark stack memory return false; } @@ -160,8 +176,8 @@ bool ZMarkThreadLocalStacks::push_slow(ZMarkStackAllocator* allocator, } // Publish/Overflow and uninstall stack - stripe->publish_stack(stack, publish); - *stackp = stack = NULL; + stripe->publish_stack(stack, terminate, publish); + *stackp = stack = nullptr; } } @@ -172,10 +188,10 @@ bool ZMarkThreadLocalStacks::pop_slow(ZMarkStackAllocator* allocator, ZMarkStack* stack = *stackp; for (;;) { - if (stack == NULL) { + if (stack == nullptr) { // Try steal and install stack *stackp = stack = stripe->steal_stack(); - if (stack == NULL) { + if (stack == nullptr) { // Nothing to steal return false; } @@ -188,19 +204,19 @@ bool ZMarkThreadLocalStacks::pop_slow(ZMarkStackAllocator* allocator, // Free and uninstall stack free_stack(allocator, stack); - *stackp = stack = NULL; + *stackp = stack = nullptr; } } -bool ZMarkThreadLocalStacks::flush(ZMarkStackAllocator* allocator, ZMarkStripeSet* stripes) { +bool ZMarkThreadLocalStacks::flush(ZMarkStackAllocator* allocator, ZMarkStripeSet* stripes, ZMarkTerminate* terminate) { bool flushed = false; // Flush all stacks - for (size_t i = 0; i < stripes->nstripes(); i++) { + for (size_t i = 0; i < ZMarkStripesMax; i++) { ZMarkStripe* const stripe = stripes->stripe_at(i); ZMarkStack** const stackp = &_stacks[i]; ZMarkStack* const stack = *stackp; - if (stack == NULL) { + if (stack == nullptr) { continue; } @@ -208,10 +224,10 @@ bool ZMarkThreadLocalStacks::flush(ZMarkStackAllocator* allocator, ZMarkStripeSe if (stack->is_empty()) { free_stack(allocator, stack); } else { - stripe->publish_stack(stack); + stripe->publish_stack(stack, terminate, true /* publish */); flushed = true; } - *stackp = NULL; + *stackp = nullptr; } return flushed; @@ -219,8 +235,8 @@ bool ZMarkThreadLocalStacks::flush(ZMarkStackAllocator* allocator, ZMarkStripeSe void ZMarkThreadLocalStacks::free(ZMarkStackAllocator* allocator) { // Free and uninstall magazine - if (_magazine != NULL) { + if (_magazine != nullptr) { allocator->free_magazine(_magazine); - _magazine = NULL; + _magazine = nullptr; } } diff --git a/src/hotspot/share/gc/z/zMarkStack.hpp b/src/hotspot/share/gc/z/zMarkStack.hpp index 6c45d7ef7e0c1..5244830b3e2a0 100644 --- a/src/hotspot/share/gc/z/zMarkStack.hpp +++ b/src/hotspot/share/gc/z/zMarkStack.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,6 +28,8 @@ #include "gc/z/zMarkStackEntry.hpp" #include "utilities/globalDefinitions.hpp" +class ZMarkTerminate; + template class ZStack { private: @@ -52,13 +54,14 @@ class ZStack { template class ZStackList { private: + uintptr_t _base; T* volatile _head; T* encode_versioned_pointer(const T* stack, uint32_t version) const; void decode_versioned_pointer(const T* vstack, T** stack, uint32_t* version) const; public: - ZStackList(); + explicit ZStackList(uintptr_t base); bool is_empty() const; @@ -82,25 +85,24 @@ class ZMarkStripe { ZCACHE_ALIGNED ZMarkStackList _overflowed; public: - ZMarkStripe(); + explicit ZMarkStripe(uintptr_t base = 0); bool is_empty() const; - void publish_stack(ZMarkStack* stack, bool publish = true); + void publish_stack(ZMarkStack* stack, ZMarkTerminate* terminate, bool publish); ZMarkStack* steal_stack(); }; class ZMarkStripeSet { private: - size_t _nstripes; size_t _nstripes_mask; ZMarkStripe _stripes[ZMarkStripesMax]; public: - ZMarkStripeSet(); + explicit ZMarkStripeSet(uintptr_t base); - size_t nstripes() const; void set_nstripes(size_t nstripes); + size_t nstripes() const; bool is_empty() const; @@ -124,6 +126,7 @@ class ZMarkThreadLocalStacks { bool push_slow(ZMarkStackAllocator* allocator, ZMarkStripe* stripe, ZMarkStack** stackp, + ZMarkTerminate* terminate, ZMarkStackEntry entry, bool publish); @@ -147,6 +150,7 @@ class ZMarkThreadLocalStacks { bool push(ZMarkStackAllocator* allocator, ZMarkStripeSet* stripes, ZMarkStripe* stripe, + ZMarkTerminate* terminate, ZMarkStackEntry entry, bool publish); @@ -156,7 +160,8 @@ class ZMarkThreadLocalStacks { ZMarkStackEntry& entry); bool flush(ZMarkStackAllocator* allocator, - ZMarkStripeSet* stripes); + ZMarkStripeSet* stripes, + ZMarkTerminate* terminate); void free(ZMarkStackAllocator* allocator); }; diff --git a/src/hotspot/share/gc/z/zMarkStack.inline.hpp b/src/hotspot/share/gc/z/zMarkStack.inline.hpp index 5eaff5d704923..0c3329d9c0783 100644 --- a/src/hotspot/share/gc/z/zMarkStack.inline.hpp +++ b/src/hotspot/share/gc/z/zMarkStack.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,13 +26,14 @@ #include "gc/z/zMarkStack.hpp" -#include "utilities/debug.hpp" +#include "gc/z/zMarkTerminate.inline.hpp" #include "runtime/atomic.hpp" +#include "utilities/debug.hpp" template inline ZStack::ZStack() : _top(0), - _next(NULL) {} + _next(nullptr) {} template inline bool ZStack::is_empty() const { @@ -75,17 +76,18 @@ inline ZStack** ZStack::next_addr() { } template -inline ZStackList::ZStackList() : - _head(encode_versioned_pointer(NULL, 0)) {} +inline ZStackList::ZStackList(uintptr_t base) : + _base(base), + _head(encode_versioned_pointer(nullptr, 0)) {} template inline T* ZStackList::encode_versioned_pointer(const T* stack, uint32_t version) const { uint64_t addr; - if (stack == NULL) { + if (stack == nullptr) { addr = (uint32_t)-1; } else { - addr = ((uint64_t)stack - ZMarkStackSpaceStart) >> ZMarkStackSizeShift; + addr = ((uint64_t)stack - _base) >> ZMarkStackSizeShift; } return (T*)((addr << 32) | (uint64_t)version); @@ -96,9 +98,9 @@ inline void ZStackList::decode_versioned_pointer(const T* vstack, T** stack, const uint64_t addr = (uint64_t)vstack >> 32; if (addr == (uint32_t)-1) { - *stack = NULL; + *stack = nullptr; } else { - *stack = (T*)((addr << ZMarkStackSizeShift) + ZMarkStackSpaceStart); + *stack = (T*)((addr << ZMarkStackSizeShift) + _base); } *version = (uint32_t)(uint64_t)vstack; @@ -107,11 +109,11 @@ inline void ZStackList::decode_versioned_pointer(const T* vstack, T** stack, template inline bool ZStackList::is_empty() const { const T* vstack = _head; - T* stack = NULL; + T* stack = nullptr; uint32_t version = 0; decode_versioned_pointer(vstack, &stack, &version); - return stack == NULL; + return stack == nullptr; } template @@ -136,13 +138,13 @@ inline void ZStackList::push(T* stack) { template inline T* ZStackList::pop() { T* vstack = _head; - T* stack = NULL; + T* stack = nullptr; uint32_t version = 0; for (;;) { decode_versioned_pointer(vstack, &stack, &version); - if (stack == NULL) { - return NULL; + if (stack == nullptr) { + return nullptr; } T* const new_vstack = encode_versioned_pointer(stack->next(), version + 1); @@ -159,14 +161,14 @@ inline T* ZStackList::pop() { template inline void ZStackList::clear() { - _head = encode_versioned_pointer(NULL, 0); + _head = encode_versioned_pointer(nullptr, 0); } inline bool ZMarkStripe::is_empty() const { return _published.is_empty() && _overflowed.is_empty(); } -inline void ZMarkStripe::publish_stack(ZMarkStack* stack, bool publish) { +inline void ZMarkStripe::publish_stack(ZMarkStack* stack, ZMarkTerminate* terminate, bool publish) { // A stack is published either on the published list or the overflowed // list. The published list is used by mutators publishing stacks for GC // workers to work on, while the overflowed list is used by GC workers @@ -178,42 +180,40 @@ inline void ZMarkStripe::publish_stack(ZMarkStack* stack, bool publish) { } else { _overflowed.push(stack); } + + terminate->wake_up(); } inline ZMarkStack* ZMarkStripe::steal_stack() { // Steal overflowed stacks first, then published stacks ZMarkStack* const stack = _overflowed.pop(); - if (stack != NULL) { + if (stack != nullptr) { return stack; } return _published.pop(); } -inline size_t ZMarkStripeSet::nstripes() const { - return _nstripes; -} - inline size_t ZMarkStripeSet::stripe_id(const ZMarkStripe* stripe) const { const size_t index = ((uintptr_t)stripe - (uintptr_t)_stripes) / sizeof(ZMarkStripe); - assert(index < _nstripes, "Invalid index"); + assert(index < ZMarkStripesMax, "Invalid index"); return index; } inline ZMarkStripe* ZMarkStripeSet::stripe_at(size_t index) { - assert(index < _nstripes, "Invalid index"); + assert(index < ZMarkStripesMax, "Invalid index"); return &_stripes[index]; } inline ZMarkStripe* ZMarkStripeSet::stripe_next(ZMarkStripe* stripe) { - const size_t index = (stripe_id(stripe) + 1) & _nstripes_mask; - assert(index < _nstripes, "Invalid index"); + const size_t index = (stripe_id(stripe) + 1) & (ZMarkStripesMax - 1); + assert(index < ZMarkStripesMax, "Invalid index"); return &_stripes[index]; } inline ZMarkStripe* ZMarkStripeSet::stripe_for_addr(uintptr_t addr) { - const size_t index = (addr >> ZMarkStripeShift) & _nstripes_mask; - assert(index < _nstripes, "Invalid index"); + const size_t index = (addr >> ZMarkStripeShift) & Atomic::load(&_nstripes_mask); + assert(index < ZMarkStripesMax, "Invalid index"); return &_stripes[index]; } @@ -221,7 +221,7 @@ inline void ZMarkThreadLocalStacks::install(ZMarkStripeSet* stripes, ZMarkStripe* stripe, ZMarkStack* stack) { ZMarkStack** const stackp = &_stacks[stripes->stripe_id(stripe)]; - assert(*stackp == NULL, "Should be empty"); + assert(*stackp == nullptr, "Should be empty"); *stackp = stack; } @@ -229,8 +229,8 @@ inline ZMarkStack* ZMarkThreadLocalStacks::steal(ZMarkStripeSet* stripes, ZMarkStripe* stripe) { ZMarkStack** const stackp = &_stacks[stripes->stripe_id(stripe)]; ZMarkStack* const stack = *stackp; - if (stack != NULL) { - *stackp = NULL; + if (stack != nullptr) { + *stackp = nullptr; } return stack; @@ -239,15 +239,16 @@ inline ZMarkStack* ZMarkThreadLocalStacks::steal(ZMarkStripeSet* stripes, inline bool ZMarkThreadLocalStacks::push(ZMarkStackAllocator* allocator, ZMarkStripeSet* stripes, ZMarkStripe* stripe, + ZMarkTerminate* terminate, ZMarkStackEntry entry, bool publish) { ZMarkStack** const stackp = &_stacks[stripes->stripe_id(stripe)]; ZMarkStack* const stack = *stackp; - if (stack != NULL && stack->push(entry)) { + if (stack != nullptr && stack->push(entry)) { return true; } - return push_slow(allocator, stripe, stackp, entry, publish); + return push_slow(allocator, stripe, stackp, terminate, entry, publish); } inline bool ZMarkThreadLocalStacks::pop(ZMarkStackAllocator* allocator, @@ -256,7 +257,7 @@ inline bool ZMarkThreadLocalStacks::pop(ZMarkStackAllocator* allocator, ZMarkStackEntry& entry) { ZMarkStack** const stackp = &_stacks[stripes->stripe_id(stripe)]; ZMarkStack* const stack = *stackp; - if (stack != NULL && stack->pop(entry)) { + if (stack != nullptr && stack->pop(entry)) { return true; } diff --git a/src/hotspot/share/gc/z/zMarkStackAllocator.cpp b/src/hotspot/share/gc/z/zMarkStackAllocator.cpp index dc38b75f8743e..df75c7b99a53f 100644 --- a/src/hotspot/share/gc/z/zMarkStackAllocator.cpp +++ b/src/hotspot/share/gc/z/zMarkStackAllocator.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -32,8 +32,6 @@ #include "runtime/os.hpp" #include "utilities/debug.hpp" -uintptr_t ZMarkStackSpaceStart; - ZMarkStackSpace::ZMarkStackSpace() : _expand_lock(), _start(0), @@ -52,9 +50,6 @@ ZMarkStackSpace::ZMarkStackSpace() : // Successfully initialized _start = _top = _end = addr; - // Register mark stack space start - ZMarkStackSpaceStart = _start; - // Prime space _end += expand_space(); } @@ -63,6 +58,10 @@ bool ZMarkStackSpace::is_initialized() const { return _start != 0; } +uintptr_t ZMarkStackSpace::start() const { + return _start; +} + size_t ZMarkStackSpace::size() const { return _end - _start; } @@ -147,7 +146,7 @@ uintptr_t ZMarkStackSpace::expand_and_alloc_space(size_t size) { // Increment top before end to make sure another // thread can't steal out newly expanded space. - addr = Atomic::fetch_and_add(&_top, size); + addr = Atomic::fetch_then_add(&_top, size); Atomic::add(&_end, expand_size); return addr; @@ -170,13 +169,18 @@ void ZMarkStackSpace::free() { } ZMarkStackAllocator::ZMarkStackAllocator() : - _freelist(), - _space() {} + _space(), + _freelist(_space.start()), + _expanded_recently(false) {} bool ZMarkStackAllocator::is_initialized() const { return _space.is_initialized(); } +uintptr_t ZMarkStackAllocator::start() const { + return _space.start(); +} + size_t ZMarkStackAllocator::size() const { return _space.size(); } @@ -198,19 +202,31 @@ ZMarkStackMagazine* ZMarkStackAllocator::create_magazine_from_space(uintptr_t ad ZMarkStackMagazine* ZMarkStackAllocator::alloc_magazine() { // Try allocating from the free list first ZMarkStackMagazine* const magazine = _freelist.pop(); - if (magazine != NULL) { + if (magazine != nullptr) { return magazine; } + if (!Atomic::load(&_expanded_recently)) { + Atomic::cmpxchg(&_expanded_recently, false, true); + } + // Allocate new magazine const uintptr_t addr = _space.alloc(ZMarkStackMagazineSize); if (addr == 0) { - return NULL; + return nullptr; } return create_magazine_from_space(addr, ZMarkStackMagazineSize); } +bool ZMarkStackAllocator::clear_and_get_expanded_recently() { + if (!Atomic::load(&_expanded_recently)) { + return false; + } + + return Atomic::cmpxchg(&_expanded_recently, true, false); +} + void ZMarkStackAllocator::free_magazine(ZMarkStackMagazine* magazine) { _freelist.push(magazine); } diff --git a/src/hotspot/share/gc/z/zMarkStackAllocator.hpp b/src/hotspot/share/gc/z/zMarkStackAllocator.hpp index 68fd9d143381c..dc18a23e101e6 100644 --- a/src/hotspot/share/gc/z/zMarkStackAllocator.hpp +++ b/src/hotspot/share/gc/z/zMarkStackAllocator.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,6 +26,7 @@ #include "gc/z/zGlobals.hpp" #include "gc/z/zLock.hpp" +#include "gc/z/zMarkStack.hpp" #include "utilities/globalDefinitions.hpp" class ZMarkStackSpace { @@ -34,6 +35,7 @@ class ZMarkStackSpace { uintptr_t _start; volatile uintptr_t _top; volatile uintptr_t _end; + volatile bool _recently_expanded; size_t used() const; @@ -48,16 +50,18 @@ class ZMarkStackSpace { bool is_initialized() const; + uintptr_t start() const; size_t size() const; uintptr_t alloc(size_t size); void free(); }; -class ZMarkStackAllocator { +class ZMarkStackAllocator : public CHeapObj { private: - ZCACHE_ALIGNED ZMarkStackMagazineList _freelist; ZCACHE_ALIGNED ZMarkStackSpace _space; + ZCACHE_ALIGNED ZMarkStackMagazineList _freelist; + ZCACHE_ALIGNED volatile bool _expanded_recently; ZMarkStackMagazine* create_magazine_from_space(uintptr_t addr, size_t size); @@ -66,8 +70,11 @@ class ZMarkStackAllocator { bool is_initialized() const; + uintptr_t start() const; size_t size() const; + bool clear_and_get_expanded_recently(); + ZMarkStackMagazine* alloc_magazine(); void free_magazine(ZMarkStackMagazine* magazine); diff --git a/src/hotspot/share/gc/z/zMarkTerminate.hpp b/src/hotspot/share/gc/z/zMarkTerminate.hpp index ca29566d2c17b..cff1f8e73fabf 100644 --- a/src/hotspot/share/gc/z/zMarkTerminate.hpp +++ b/src/hotspot/share/gc/z/zMarkTerminate.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,31 +24,33 @@ #ifndef SHARE_GC_Z_ZMARKTERMINATE_HPP #define SHARE_GC_Z_ZMARKTERMINATE_HPP -#include "gc/z/zGlobals.hpp" -#include "memory/allocation.hpp" +#include "gc/z/zLock.hpp" #include "utilities/globalDefinitions.hpp" +class ZMarkStripeSet; + class ZMarkTerminate { private: - uint _nworkers; - ZCACHE_ALIGNED volatile uint _nworking_stage0; - volatile uint _nworking_stage1; + uint _nworkers; + volatile uint _nworking; + volatile uint _nawakening; + volatile bool _resurrected; + ZConditionLock _lock; - bool enter_stage(volatile uint* nworking_stage); - void exit_stage(volatile uint* nworking_stage); - bool try_exit_stage(volatile uint* nworking_stage); + void maybe_reduce_stripes(ZMarkStripeSet* stripes, size_t used_nstripes); public: ZMarkTerminate(); void reset(uint nworkers); + void leave(); - bool enter_stage0(); - void exit_stage0(); - bool try_exit_stage0(); + bool saturated() const; - bool enter_stage1(); - bool try_exit_stage1(); + void wake_up(); + bool try_terminate(ZMarkStripeSet* stripes, size_t used_nstripes); + void set_resurrected(bool value); + bool resurrected() const; }; #endif // SHARE_GC_Z_ZMARKTERMINATE_HPP diff --git a/src/hotspot/share/gc/z/zMarkTerminate.inline.hpp b/src/hotspot/share/gc/z/zMarkTerminate.inline.hpp index f738b65a1e3b3..49b9ac7aeaa0f 100644 --- a/src/hotspot/share/gc/z/zMarkTerminate.inline.hpp +++ b/src/hotspot/share/gc/z/zMarkTerminate.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,63 +26,119 @@ #include "gc/z/zMarkTerminate.hpp" +#include "gc/shared/suspendibleThreadSet.hpp" +#include "gc/z/zLock.inline.hpp" +#include "logging/log.hpp" #include "runtime/atomic.hpp" +#include "runtime/osThread.hpp" +#include "runtime/thread.inline.hpp" inline ZMarkTerminate::ZMarkTerminate() : _nworkers(0), - _nworking_stage0(0), - _nworking_stage1(0) {} + _nworking(0), + _nawakening(0), + _resurrected(false), + _lock() {} -inline bool ZMarkTerminate::enter_stage(volatile uint* nworking_stage) { - return Atomic::sub(nworking_stage, 1u) == 0; +inline void ZMarkTerminate::reset(uint nworkers) { + Atomic::store(&_nworkers, nworkers); + Atomic::store(&_nworking, nworkers); + _nawakening = 0; } -inline void ZMarkTerminate::exit_stage(volatile uint* nworking_stage) { - Atomic::add(nworking_stage, 1u); +inline void ZMarkTerminate::leave() { + SuspendibleThreadSetLeaver sts_leaver; + ZLocker locker(&_lock); + + Atomic::store(&_nworking, _nworking - 1); + if (_nworking == 0) { + // Last thread leaving; notify waiters + _lock.notify_all(); + } } -inline bool ZMarkTerminate::try_exit_stage(volatile uint* nworking_stage) { - uint nworking = Atomic::load(nworking_stage); +inline void ZMarkTerminate::maybe_reduce_stripes(ZMarkStripeSet* stripes, size_t used_nstripes) { + size_t nstripes = stripes->nstripes(); + if (used_nstripes == nstripes && nstripes > 1u) { + nstripes >>= 1; + stripes->set_nstripes(nstripes); + } +} - for (;;) { - if (nworking == 0) { - return false; - } +inline bool ZMarkTerminate::try_terminate(ZMarkStripeSet* stripes, size_t used_nstripes) { + SuspendibleThreadSetLeaver sts_leaver; + ZLocker locker(&_lock); - const uint new_nworking = nworking + 1; - const uint prev_nworking = Atomic::cmpxchg(nworking_stage, nworking, new_nworking); - if (prev_nworking == nworking) { - // Success - return true; - } + Atomic::store(&_nworking, _nworking - 1); + if (_nworking == 0) { + // Last thread entering termination: success + _lock.notify_all(); + return true; + } + + // If a worker runs out of work, it might be a sign that we have too many stripes + // hiding work. Try to reduce the number of stripes if possible. + maybe_reduce_stripes(stripes, used_nstripes); + _lock.wait(); - // Retry - nworking = prev_nworking; + // We either got notification about more work + // or got a spurious wakeup; don't terminate + if (_nawakening > 0) { + Atomic::store(&_nawakening, _nawakening - 1); } -} -inline void ZMarkTerminate::reset(uint nworkers) { - _nworkers = _nworking_stage0 = _nworking_stage1 = nworkers; -} + if (_nworking == 0) { + // We got notified all work is done; terminate + return true; + } + + Atomic::store(&_nworking, _nworking + 1); -inline bool ZMarkTerminate::enter_stage0() { - return enter_stage(&_nworking_stage0); + return false; } -inline void ZMarkTerminate::exit_stage0() { - exit_stage(&_nworking_stage0); +inline void ZMarkTerminate::wake_up() { + uint nworking = Atomic::load(&_nworking); + uint nawakening = Atomic::load(&_nawakening); + if (nworking + nawakening == Atomic::load(&_nworkers)) { + // Everyone is working or about to + return; + } + + if (nworking == 0) { + // Marking when marking task is not active + return; + } + + ZLocker locker(&_lock); + if (_nworking + _nawakening != _nworkers) { + // Everyone is not working + Atomic::store(&_nawakening, _nawakening + 1); + _lock.notify(); + } } -inline bool ZMarkTerminate::try_exit_stage0() { - return try_exit_stage(&_nworking_stage0); +inline bool ZMarkTerminate::saturated() const { + uint nworking = Atomic::load(&_nworking); + uint nawakening = Atomic::load(&_nawakening); + + return nworking + nawakening == Atomic::load(&_nworkers); } -inline bool ZMarkTerminate::enter_stage1() { - return enter_stage(&_nworking_stage1); +inline void ZMarkTerminate::set_resurrected(bool value) { + // Update resurrected if it changed + if (resurrected() != value) { + Atomic::store(&_resurrected, value); + if (value) { + log_debug(gc, marking)("Resurrection broke termination"); + } else { + log_debug(gc, marking)("Try terminate after resurrection"); + } + } } -inline bool ZMarkTerminate::try_exit_stage1() { - return try_exit_stage(&_nworking_stage1); +inline bool ZMarkTerminate::resurrected() const { + return Atomic::load(&_resurrected); } #endif // SHARE_GC_Z_ZMARKTERMINATE_INLINE_HPP diff --git a/src/hotspot/share/gc/z/zMemory.cpp b/src/hotspot/share/gc/z/zMemory.cpp index 7b1c0749f75c0..23d02892eb213 100644 --- a/src/hotspot/share/gc/z/zMemory.cpp +++ b/src/hotspot/share/gc/z/zMemory.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,56 +26,56 @@ #include "gc/z/zLock.inline.hpp" #include "gc/z/zMemory.inline.hpp" -ZMemory* ZMemoryManager::create(uintptr_t start, size_t size) { +ZMemory* ZMemoryManager::create(zoffset start, size_t size) { ZMemory* const area = new ZMemory(start, size); - if (_callbacks._create != NULL) { + if (_callbacks._create != nullptr) { _callbacks._create(area); } return area; } void ZMemoryManager::destroy(ZMemory* area) { - if (_callbacks._destroy != NULL) { + if (_callbacks._destroy != nullptr) { _callbacks._destroy(area); } delete area; } void ZMemoryManager::shrink_from_front(ZMemory* area, size_t size) { - if (_callbacks._shrink_from_front != NULL) { + if (_callbacks._shrink_from_front != nullptr) { _callbacks._shrink_from_front(area, size); } area->shrink_from_front(size); } void ZMemoryManager::shrink_from_back(ZMemory* area, size_t size) { - if (_callbacks._shrink_from_back != NULL) { + if (_callbacks._shrink_from_back != nullptr) { _callbacks._shrink_from_back(area, size); } area->shrink_from_back(size); } void ZMemoryManager::grow_from_front(ZMemory* area, size_t size) { - if (_callbacks._grow_from_front != NULL) { + if (_callbacks._grow_from_front != nullptr) { _callbacks._grow_from_front(area, size); } area->grow_from_front(size); } void ZMemoryManager::grow_from_back(ZMemory* area, size_t size) { - if (_callbacks._grow_from_back != NULL) { + if (_callbacks._grow_from_back != nullptr) { _callbacks._grow_from_back(area, size); } area->grow_from_back(size); } ZMemoryManager::Callbacks::Callbacks() : - _create(NULL), - _destroy(NULL), - _shrink_from_front(NULL), - _shrink_from_back(NULL), - _grow_from_front(NULL), - _grow_from_back(NULL) {} + _create(nullptr), + _destroy(nullptr), + _shrink_from_front(nullptr), + _shrink_from_back(nullptr), + _grow_from_front(nullptr), + _grow_from_back(nullptr) {} ZMemoryManager::ZMemoryManager() : _freelist(), @@ -85,19 +85,19 @@ void ZMemoryManager::register_callbacks(const Callbacks& callbacks) { _callbacks = callbacks; } -uintptr_t ZMemoryManager::peek_low_address() const { +zoffset ZMemoryManager::peek_low_address() const { ZLocker locker(&_lock); const ZMemory* const area = _freelist.first(); - if (area != NULL) { + if (area != nullptr) { return area->start(); } // Out of memory - return UINTPTR_MAX; + return zoffset(UINTPTR_MAX); } -uintptr_t ZMemoryManager::alloc_low_address(size_t size) { +zoffset ZMemoryManager::alloc_low_address(size_t size) { ZLocker locker(&_lock); ZListIterator iter(&_freelist); @@ -105,13 +105,13 @@ uintptr_t ZMemoryManager::alloc_low_address(size_t size) { if (area->size() >= size) { if (area->size() == size) { // Exact match, remove area - const uintptr_t start = area->start(); + const zoffset start = area->start(); _freelist.remove(area); destroy(area); return start; } else { // Larger than requested, shrink area - const uintptr_t start = area->start(); + const zoffset start = area->start(); shrink_from_front(area, size); return start; } @@ -119,24 +119,24 @@ uintptr_t ZMemoryManager::alloc_low_address(size_t size) { } // Out of memory - return UINTPTR_MAX; + return zoffset(UINTPTR_MAX); } -uintptr_t ZMemoryManager::alloc_low_address_at_most(size_t size, size_t* allocated) { +zoffset ZMemoryManager::alloc_low_address_at_most(size_t size, size_t* allocated) { ZLocker locker(&_lock); - ZMemory* area = _freelist.first(); - if (area != NULL) { + ZMemory* const area = _freelist.first(); + if (area != nullptr) { if (area->size() <= size) { // Smaller than or equal to requested, remove area - const uintptr_t start = area->start(); + const zoffset start = area->start(); *allocated = area->size(); _freelist.remove(area); destroy(area); return start; } else { // Larger than requested, shrink area - const uintptr_t start = area->start(); + const zoffset start = area->start(); shrink_from_front(area, size); *allocated = size; return start; @@ -145,10 +145,10 @@ uintptr_t ZMemoryManager::alloc_low_address_at_most(size_t size, size_t* allocat // Out of memory *allocated = 0; - return UINTPTR_MAX; + return zoffset(UINTPTR_MAX); } -uintptr_t ZMemoryManager::alloc_high_address(size_t size) { +zoffset ZMemoryManager::alloc_high_address(size_t size) { ZLocker locker(&_lock); ZListReverseIterator iter(&_freelist); @@ -156,25 +156,25 @@ uintptr_t ZMemoryManager::alloc_high_address(size_t size) { if (area->size() >= size) { if (area->size() == size) { // Exact match, remove area - const uintptr_t start = area->start(); + const zoffset start = area->start(); _freelist.remove(area); destroy(area); return start; } else { // Larger than requested, shrink area shrink_from_back(area, size); - return area->end(); + return to_zoffset(area->end()); } } } // Out of memory - return UINTPTR_MAX; + return zoffset(UINTPTR_MAX); } -void ZMemoryManager::free(uintptr_t start, size_t size) { - assert(start != UINTPTR_MAX, "Invalid address"); - const uintptr_t end = start + size; +void ZMemoryManager::free(zoffset start, size_t size) { + assert(start != zoffset(UINTPTR_MAX), "Invalid address"); + const zoffset_end end = to_zoffset_end(start, size); ZLocker locker(&_lock); @@ -182,7 +182,7 @@ void ZMemoryManager::free(uintptr_t start, size_t size) { for (ZMemory* area; iter.next(&area);) { if (start < area->start()) { ZMemory* const prev = _freelist.prev(area); - if (prev != NULL && start == prev->end()) { + if (prev != nullptr && start == prev->end()) { if (end == area->start()) { // Merge with prev and current area grow_from_back(prev, size + area->size()); @@ -209,7 +209,7 @@ void ZMemoryManager::free(uintptr_t start, size_t size) { // Insert last ZMemory* const last = _freelist.last(); - if (last != NULL && start == last->end()) { + if (last != nullptr && start == last->end()) { // Merge with last area grow_from_back(last, size); } else { diff --git a/src/hotspot/share/gc/z/zMemory.hpp b/src/hotspot/share/gc/z/zMemory.hpp index 85c4f5d232ddd..e75ac071d1dea 100644 --- a/src/hotspot/share/gc/z/zMemory.hpp +++ b/src/hotspot/share/gc/z/zMemory.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,6 +24,7 @@ #ifndef SHARE_GC_Z_ZMEMORY_HPP #define SHARE_GC_Z_ZMEMORY_HPP +#include "gc/z/zAddress.hpp" #include "gc/z/zList.hpp" #include "gc/z/zLock.hpp" #include "memory/allocation.hpp" @@ -32,15 +33,15 @@ class ZMemory : public CHeapObj { friend class ZList; private: - uintptr_t _start; - uintptr_t _end; + zoffset _start; + zoffset_end _end; ZListNode _node; public: - ZMemory(uintptr_t start, size_t size); + ZMemory(zoffset start, size_t size); - uintptr_t start() const; - uintptr_t end() const; + zoffset start() const; + zoffset_end end() const; size_t size() const; void shrink_from_front(size_t size); @@ -70,7 +71,7 @@ class ZMemoryManager { ZList _freelist; Callbacks _callbacks; - ZMemory* create(uintptr_t start, size_t size); + ZMemory* create(zoffset start, size_t size); void destroy(ZMemory* area); void shrink_from_front(ZMemory* area, size_t size); void shrink_from_back(ZMemory* area, size_t size); @@ -82,12 +83,12 @@ class ZMemoryManager { void register_callbacks(const Callbacks& callbacks); - uintptr_t peek_low_address() const; - uintptr_t alloc_low_address(size_t size); - uintptr_t alloc_low_address_at_most(size_t size, size_t* allocated); - uintptr_t alloc_high_address(size_t size); + zoffset peek_low_address() const; + zoffset alloc_low_address(size_t size); + zoffset alloc_low_address_at_most(size_t size, size_t* allocated); + zoffset alloc_high_address(size_t size); - void free(uintptr_t start, size_t size); + void free(zoffset start, size_t size); }; #endif // SHARE_GC_Z_ZMEMORY_HPP diff --git a/src/hotspot/share/gc/z/zMemory.inline.hpp b/src/hotspot/share/gc/z/zMemory.inline.hpp index 895e38375ec50..19cccd3f6f5ce 100644 --- a/src/hotspot/share/gc/z/zMemory.inline.hpp +++ b/src/hotspot/share/gc/z/zMemory.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,18 +26,19 @@ #include "gc/z/zMemory.hpp" +#include "gc/z/zAddress.inline.hpp" #include "gc/z/zList.inline.hpp" #include "utilities/debug.hpp" -inline ZMemory::ZMemory(uintptr_t start, size_t size) : +inline ZMemory::ZMemory(zoffset start, size_t size) : _start(start), - _end(start + size) {} + _end(to_zoffset_end(start, size)) {} -inline uintptr_t ZMemory::start() const { +inline zoffset ZMemory::start() const { return _start; } -inline uintptr_t ZMemory::end() const { +inline zoffset_end ZMemory::end() const { return _end; } @@ -56,7 +57,7 @@ inline void ZMemory::shrink_from_back(size_t size) { } inline void ZMemory::grow_from_front(size_t size) { - assert(start() >= size, "Too big"); + assert(size_t(start()) >= size, "Too big"); _start -= size; } diff --git a/src/hotspot/share/gc/z/zNMethod.cpp b/src/hotspot/share/gc/z/zNMethod.cpp index db4d3775d23ec..a851daba84b77 100644 --- a/src/hotspot/share/gc/z/zNMethod.cpp +++ b/src/hotspot/share/gc/z/zNMethod.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,25 +22,32 @@ */ #include "precompiled.hpp" +#include "code/codeCache.hpp" #include "code/relocInfo.hpp" #include "code/nmethod.hpp" #include "code/icBuffer.hpp" #include "gc/shared/barrierSet.hpp" #include "gc/shared/barrierSetNMethod.hpp" #include "gc/shared/suspendibleThreadSet.hpp" +#include "gc/z/zAddress.hpp" +#include "gc/z/zArray.inline.hpp" #include "gc/z/zBarrier.inline.hpp" -#include "gc/z/zGlobals.hpp" +#include "gc/z/zBarrierSet.hpp" +#include "gc/z/zBarrierSetAssembler.hpp" +#include "gc/z/zBarrierSetNMethod.hpp" #include "gc/z/zLock.inline.hpp" #include "gc/z/zNMethod.hpp" #include "gc/z/zNMethodData.hpp" #include "gc/z/zNMethodTable.hpp" #include "gc/z/zTask.hpp" +#include "gc/z/zUncoloredRoot.inline.hpp" #include "gc/z/zWorkers.hpp" #include "logging/log.hpp" #include "memory/allocation.inline.hpp" #include "memory/iterator.hpp" #include "memory/resourceArea.hpp" #include "memory/universe.hpp" +#include "oops/klass.inline.hpp" #include "oops/oop.inline.hpp" #include "runtime/atomic.hpp" #include "runtime/continuation.hpp" @@ -55,44 +62,42 @@ static void set_gc_data(nmethod* nm, ZNMethodData* data) { } void ZNMethod::attach_gc_data(nmethod* nm) { - GrowableArray immediate_oops; - bool non_immediate_oops = false; + ZArray barriers; + ZArray immediate_oops; + bool has_non_immediate_oops = false; - // Find all oop relocations + // Find all barrier and oop relocations RelocIterator iter(nm); while (iter.next()) { - if (iter.type() != relocInfo::oop_type) { - // Not an oop - continue; - } - - oop_Relocation* r = iter.oop_reloc(); - - if (!r->oop_is_immediate()) { - // Non-immediate oop found - non_immediate_oops = true; - continue; - } - - if (r->oop_value() != NULL) { - // Non-NULL immediate oop found. NULL oops can safely be - // ignored since the method will be re-registered if they - // are later patched to be non-NULL. - immediate_oops.push(r->oop_addr()); + if (iter.type() == relocInfo::barrier_type) { + // Barrier relocation + barrier_Relocation* const reloc = iter.barrier_reloc(); + barriers.push({ reloc->addr(), reloc->format() }); + } else if (iter.type() == relocInfo::oop_type) { + // Oop relocation + oop_Relocation* const reloc = iter.oop_reloc(); + + if (!reloc->oop_is_immediate()) { + // Non-immediate oop found + has_non_immediate_oops = true; + } else if (reloc->oop_value() != nullptr) { + // Non-null immediate oop found. null oops can safely be + // ignored since the method will be re-registered if they + // are later patched to be non-null. + immediate_oops.push(reloc->oop_addr()); + } } } // Attach GC data to nmethod ZNMethodData* data = gc_data(nm); - if (data == NULL) { + if (data == nullptr) { data = new ZNMethodData(); set_gc_data(nm, data); } - // Attach oops in GC data - ZNMethodDataOops* const new_oops = ZNMethodDataOops::create(immediate_oops, non_immediate_oops); - ZNMethodDataOops* const old_oops = data->swap_oops(new_oops); - ZNMethodDataOops::destroy(old_oops); + // Attach barriers and oops to GC data + data->swap(&barriers, &immediate_oops, has_non_immediate_oops); } ZReentrantLock* ZNMethod::lock_for_nmethod(nmethod* nm) { @@ -100,47 +105,55 @@ ZReentrantLock* ZNMethod::lock_for_nmethod(nmethod* nm) { } void ZNMethod::log_register(const nmethod* nm) { - LogTarget(Trace, gc, nmethod) log; + LogTarget(Debug, gc, nmethod) log; if (!log.is_enabled()) { return; } - const ZNMethodDataOops* const oops = gc_data(nm)->oops(); + ResourceMark rm; + + const ZNMethodData* const data = gc_data(nm); - log.print("Register NMethod: %s.%s (" PTR_FORMAT "), " - "Compiler: %s, Oops: %d, ImmediateOops: " SIZE_FORMAT ", NonImmediateOops: %s", + log.print("Register NMethod: %s.%s (" PTR_FORMAT ") [" PTR_FORMAT ", " PTR_FORMAT "] " + "Compiler: %s, Barriers: %d, Oops: %d, ImmediateOops: %d, NonImmediateOops: %s", nm->method()->method_holder()->external_name(), nm->method()->name()->as_C_string(), p2i(nm), + p2i(nm->code_begin()), + p2i(nm->code_end()), nm->compiler_name(), + data->barriers()->length(), nm->oops_count() - 1, - oops->immediates_count(), - oops->has_non_immediates() ? "Yes" : "No"); - - LogTarget(Trace, gc, nmethod, oops) log_oops; - if (!log_oops.is_enabled()) { - return; + data->immediate_oops()->length(), + data->has_non_immediate_oops() ? "Yes" : "No"); + + LogTarget(Trace, gc, nmethod, barrier) log_barriers; + if (log_barriers.is_enabled()) { + // Print nmethod barriers + ZArrayIterator iter(data->barriers()); + for (ZNMethodDataBarrier b; iter.next(&b);) { + log_barriers.print(" Barrier: %d @ " PTR_FORMAT, + b._reloc_format, p2i(b._reloc_addr)); + } } - // Print nmethod oops table - { + LogTarget(Trace, gc, nmethod, oops) log_oops; + if (log_oops.is_enabled()) { + // Print nmethod oops table oop* const begin = nm->oops_begin(); oop* const end = nm->oops_end(); for (oop* p = begin; p < end; p++) { const oop o = Atomic::load(p); // C1 PatchingStub may replace it concurrently. - const char* external_name = (o == nullptr) ? "N/A" : o->klass()->external_name(); - log_oops.print(" Oop[" SIZE_FORMAT "] " PTR_FORMAT " (%s)", - (p - begin), p2i(o), external_name); + const char* const external_name = (o == nullptr) ? "N/A" : o->klass()->external_name(); + log_oops.print(" Oop: " PTR_FORMAT " (%s)", + p2i(o), external_name); } - } - // Print nmethod immediate oops - { - oop** const begin = oops->immediates_begin(); - oop** const end = oops->immediates_end(); - for (oop** p = begin; p < end; p++) { - log_oops.print(" ImmediateOop[" SIZE_FORMAT "] " PTR_FORMAT " @ " PTR_FORMAT " (%s)", - (p - begin), p2i(**p), p2i(*p), (**p)->klass()->external_name()); + // Print nmethod immediate oops + ZArrayIterator iter(data->immediate_oops()); + for (oop* p; iter.next(&p);) { + log_oops.print(" ImmediateOop: " PTR_FORMAT " @ " PTR_FORMAT " (%s)", + p2i(*p), p2i(p), (*p)->klass()->external_name()); } } } @@ -151,20 +164,44 @@ void ZNMethod::log_unregister(const nmethod* nm) { return; } - log.print("Unregister NMethod: %s.%s (" PTR_FORMAT ")", + ResourceMark rm; + + log.print("Unregister NMethod: %s.%s (" PTR_FORMAT ") [" PTR_FORMAT ", " PTR_FORMAT "] ", nm->method()->method_holder()->external_name(), nm->method()->name()->as_C_string(), - p2i(nm)); + p2i(nm), + p2i(nm->code_begin()), + p2i(nm->code_end())); } -void ZNMethod::register_nmethod(nmethod* nm) { +void ZNMethod::log_purge(const nmethod* nm) { + LogTarget(Debug, gc, nmethod) log; + if (!log.is_enabled()) { + return; + } + ResourceMark rm; + log.print("Purge NMethod: %s.%s (" PTR_FORMAT ") [" PTR_FORMAT ", " PTR_FORMAT "] ", + nm->method()->method_holder()->external_name(), + nm->method()->name()->as_C_string(), + p2i(nm), + p2i(nm->code_begin()), + p2i(nm->code_end())); +} + +void ZNMethod::register_nmethod(nmethod* nm) { // Create and attach gc data attach_gc_data(nm); + ZLocker locker(lock_for_nmethod(nm)); + log_register(nm); + // Patch nmethod barriers + nmethod_patch_barriers(nm); + + // Register nmethod ZNMethodTable::register_nmethod(nm); // Disarm nmethod entry barrier @@ -172,11 +209,13 @@ void ZNMethod::register_nmethod(nmethod* nm) { } void ZNMethod::unregister_nmethod(nmethod* nm) { - ResourceMark rm; - log_unregister(nm); ZNMethodTable::unregister_nmethod(nm); +} + +void ZNMethod::purge_nmethod(nmethod* nm) { + log_purge(nm); // Destroy GC data delete gc_data(nm); @@ -202,8 +241,16 @@ void ZNMethod::set_guard_value(nmethod* nm, int value) { bs->set_guard_value(nm, value); } +void ZNMethod::nmethod_patch_barriers(nmethod* nm) { + ZBarrierSetAssembler* const bs_asm = ZBarrierSet::assembler(); + ZArrayIterator iter(gc_data(nm)->barriers()); + for (ZNMethodDataBarrier barrier; iter.next(&barrier);) { + bs_asm->patch_barrier_relocation(barrier._reloc_addr, barrier._reloc_format); + } +} + void ZNMethod::nmethod_oops_do(nmethod* nm, OopClosure* cl) { - ZLocker locker(ZNMethod::lock_for_nmethod(nm)); + ZLocker locker(lock_for_nmethod(nm)); ZNMethod::nmethod_oops_do_inner(nm, cl); } @@ -219,55 +266,69 @@ void ZNMethod::nmethod_oops_do_inner(nmethod* nm, OopClosure* cl) { } } - ZNMethodDataOops* const oops = gc_data(nm)->oops(); + ZNMethodData* const data = gc_data(nm); // Process immediate oops { - oop** const begin = oops->immediates_begin(); - oop** const end = oops->immediates_end(); - for (oop** p = begin; p < end; p++) { - if (*p != Universe::non_oop_word()) { - cl->do_oop(*p); + ZArrayIterator iter(data->immediate_oops()); + for (oop* p; iter.next(&p);) { + if (!Universe::contains_non_oop_word(p)) { + cl->do_oop(p); } } } // Process non-immediate oops - if (oops->has_non_immediates()) { + if (data->has_non_immediate_oops()) { nm->fix_oop_relocations(); } } -class ZNMethodOopClosure : public OopClosure { -public: - virtual void do_oop(oop* p) { - if (ZResurrection::is_blocked()) { - ZBarrier::keep_alive_barrier_on_phantom_root_oop_field(p); - } else { - ZBarrier::load_barrier_on_root_oop_field(p); - } - } - - virtual void do_oop(narrowOop* p) { - ShouldNotReachHere(); - } -}; +void ZNMethod::nmethods_do_begin(bool secondary) { + ZNMethodTable::nmethods_do_begin(secondary); +} -void ZNMethod::nmethod_oops_barrier(nmethod* nm) { - ZNMethodOopClosure cl; - nmethod_oops_do_inner(nm, &cl); +void ZNMethod::nmethods_do_end(bool secondary) { + ZNMethodTable::nmethods_do_end(secondary); } -void ZNMethod::nmethods_do_begin() { - ZNMethodTable::nmethods_do_begin(); +void ZNMethod::nmethods_do(bool secondary, NMethodClosure* cl) { + ZNMethodTable::nmethods_do(secondary, cl); } -void ZNMethod::nmethods_do_end() { - ZNMethodTable::nmethods_do_end(); +uintptr_t ZNMethod::color(nmethod* nm) { + BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod(); + // color is stored at low order bits of int; implicit conversion to uintptr_t is fine + return bs_nm->guard_value(nm); } -void ZNMethod::nmethods_do(NMethodClosure* cl) { - ZNMethodTable::nmethods_do(cl); +oop ZNMethod::load_oop(oop* p, DecoratorSet decorators) { + assert((decorators & ON_WEAK_OOP_REF) == 0, + "nmethod oops have phantom strength, not weak"); + nmethod* const nm = CodeCache::find_nmethod((void*)p); + if (!is_armed(nm)) { + // If the nmethod entry barrier isn't armed, then it has been applied + // already. The implication is that the contents of the memory location + // is already a valid oop, and the barrier would have kept it alive if + // necessary. Therefore, no action is required, and we are allowed to + // simply read the oop. + return *p; + } + + const bool keep_alive = (decorators & ON_PHANTOM_OOP_REF) != 0 && + (decorators & AS_NO_KEEPALIVE) == 0; + ZLocker locker(ZNMethod::lock_for_nmethod(nm)); + + // Make a local root + zaddress_unsafe obj = *ZUncoloredRoot::cast(p); + + if (keep_alive) { + ZUncoloredRoot::process(&obj, ZNMethod::color(nm)); + } else { + ZUncoloredRoot::process_no_keepalive(&obj, ZNMethod::color(nm)); + } + + return to_oop(safe(obj)); } class ZNMethodUnlinkClosure : public NMethodClosure { @@ -290,6 +351,10 @@ class ZNMethodUnlinkClosure : public NMethodClosure { } if (nm->is_unloading()) { + // Unlink from the ZNMethodTable + ZNMethod::unregister_nmethod(nm); + + // Shared unlink ZLocker locker(ZNMethod::lock_for_nmethod(nm)); nm->unlink(); return; @@ -298,9 +363,25 @@ class ZNMethodUnlinkClosure : public NMethodClosure { ZLocker locker(ZNMethod::lock_for_nmethod(nm)); if (ZNMethod::is_armed(nm)) { - // Heal oops and arm phase invariantly - ZNMethod::nmethod_oops_barrier(nm); - ZNMethod::set_guard_value(nm, 0); + const uintptr_t prev_color = ZNMethod::color(nm); + assert(prev_color != ZPointerStoreGoodMask, "Potentially non-monotonic transition"); + + // Heal oops and potentially mark young objects if there is a concurrent young collection. + ZUncoloredRootProcessOopClosure cl(prev_color); + ZNMethod::nmethod_oops_do_inner(nm, &cl); + + // Disarm for marking and relocation, but leave the remset bits so this isn't store good. + // This makes sure the mutator still takes a slow path to fill in the nmethod epoch for + // the sweeper, to track continuations, if they exist in the system. + const zpointer new_disarm_value_ptr = ZAddress::color(zaddress::null, ZPointerMarkGoodMask | ZPointerRememberedMask); + + // The new disarm value is mark good, and hence never store good. Therefore, this operation + // never completely disarms the nmethod. Therefore, we don't need to patch barriers yet + // via ZNMethod::nmethod_patch_barriers. + ZNMethod::set_guard_value(nm, (int)untype(new_disarm_value_ptr)); + + log_trace(gc, nmethod)("nmethod: " PTR_FORMAT " visited by unlinking [" PTR_FORMAT " -> " PTR_FORMAT "]", p2i(nm), prev_color, untype(new_disarm_value_ptr)); + assert(ZNMethod::is_armed(nm), "Must be considered armed"); } // Clear compiled ICs and exception caches @@ -324,16 +405,16 @@ class ZNMethodUnlinkTask : public ZTask { ZTask("ZNMethodUnlinkTask"), _cl(unloading_occurred), _verifier(verifier) { - ZNMethodTable::nmethods_do_begin(); + ZNMethodTable::nmethods_do_begin(false /* secondary */); } ~ZNMethodUnlinkTask() { - ZNMethodTable::nmethods_do_end(); + ZNMethodTable::nmethods_do_end(false /* secondary */); } virtual void work() { ICRefillVerifierMark mark(_verifier); - ZNMethodTable::nmethods_do(&_cl); + ZNMethodTable::nmethods_do(false /* secondary */, &_cl); } bool success() const { @@ -356,7 +437,7 @@ void ZNMethod::unlink(ZWorkers* workers, bool unloading_occurred) { // Cleaning failed because we ran out of transitional IC stubs, // so we have to refill and try again. Refilling requires taking // a safepoint, so we temporarily leave the suspendible thread set. - SuspendibleThreadSetLeaver sts; + SuspendibleThreadSetLeaver sts_leaver; InlineCacheBuffer::refill_ic_stubs(); } } diff --git a/src/hotspot/share/gc/z/zNMethod.hpp b/src/hotspot/share/gc/z/zNMethod.hpp index 543a3a2173289..1c6ef82328cb1 100644 --- a/src/hotspot/share/gc/z/zNMethod.hpp +++ b/src/hotspot/share/gc/z/zNMethod.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,7 +24,10 @@ #ifndef SHARE_GC_Z_ZNMETHOD_HPP #define SHARE_GC_Z_ZNMETHOD_HPP -#include "memory/allStatic.hpp" +#include "memory/allocation.hpp" +#include "memory/iterator.hpp" +#include "oops/accessDecorators.hpp" +#include "oops/oopsHierarchy.hpp" class nmethod; class NMethodClosure; @@ -37,10 +40,12 @@ class ZNMethod : public AllStatic { static void log_register(const nmethod* nm); static void log_unregister(const nmethod* nm); + static void log_purge(const nmethod* nm); public: static void register_nmethod(nmethod* nm); static void unregister_nmethod(nmethod* nm); + static void purge_nmethod(nmethod* nm); static bool supports_entry_barrier(nmethod* nm); @@ -48,19 +53,22 @@ class ZNMethod : public AllStatic { static void disarm(nmethod* nm); static void set_guard_value(nmethod* nm, int value); + static void nmethod_patch_barriers(nmethod* nm); + static void nmethod_oops_do(nmethod* nm, OopClosure* cl); static void nmethod_oops_do_inner(nmethod* nm, OopClosure* cl); - static void nmethod_oops_barrier(nmethod* nm); - - static void nmethods_do_begin(); - static void nmethods_do_end(); - static void nmethods_do(NMethodClosure* cl); + static void nmethods_do_begin(bool secondary); + static void nmethods_do_end(bool secondary); + static void nmethods_do(bool secondary, NMethodClosure* cl); static ZReentrantLock* lock_for_nmethod(nmethod* nm); static void unlink(ZWorkers* workers, bool unloading_occurred); static void purge(); + + static uintptr_t color(nmethod* nm); + static oop load_oop(oop* p, DecoratorSet decorators); }; #endif // SHARE_GC_Z_ZNMETHOD_HPP diff --git a/src/hotspot/share/gc/z/zNMethodData.cpp b/src/hotspot/share/gc/z/zNMethodData.cpp index c6efbfe661719..d19a7af4dda89 100644 --- a/src/hotspot/share/gc/z/zNMethodData.cpp +++ b/src/hotspot/share/gc/z/zNMethodData.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,67 +22,40 @@ */ #include "precompiled.hpp" -#include "gc/z/zAttachedArray.inline.hpp" #include "gc/z/zLock.inline.hpp" #include "gc/z/zNMethodData.hpp" -#include "memory/allocation.hpp" -#include "runtime/atomic.hpp" -#include "utilities/align.hpp" #include "utilities/debug.hpp" -#include "utilities/growableArray.hpp" - -ZNMethodDataOops* ZNMethodDataOops::create(const GrowableArray& immediates, bool has_non_immediates) { - return ::new (AttachedArray::alloc(immediates.length())) ZNMethodDataOops(immediates, has_non_immediates); -} - -void ZNMethodDataOops::destroy(ZNMethodDataOops* oops) { - AttachedArray::free(oops); -} - -ZNMethodDataOops::ZNMethodDataOops(const GrowableArray& immediates, bool has_non_immediates) : - _immediates(immediates.length()), - _has_non_immediates(has_non_immediates) { - // Save all immediate oops - for (size_t i = 0; i < immediates_count(); i++) { - immediates_begin()[i] = immediates.at(int(i)); - } -} - -size_t ZNMethodDataOops::immediates_count() const { - return _immediates.length(); -} - -oop** ZNMethodDataOops::immediates_begin() const { - return _immediates(this); -} - -oop** ZNMethodDataOops::immediates_end() const { - return immediates_begin() + immediates_count(); -} - -bool ZNMethodDataOops::has_non_immediates() const { - return _has_non_immediates; -} ZNMethodData::ZNMethodData() : _lock(), - _oops(NULL) {} - -ZNMethodData::~ZNMethodData() { - ZNMethodDataOops::destroy(_oops); -} + _barriers(), + _immediate_oops(), + _has_non_immediate_oops(false) {} ZReentrantLock* ZNMethodData::lock() { return &_lock; } -ZNMethodDataOops* ZNMethodData::oops() const { - return Atomic::load_acquire(&_oops); +const ZArray* ZNMethodData::barriers() const { + assert(_lock.is_owned(), "Should be owned"); + return &_barriers; +} + +const ZArray* ZNMethodData::immediate_oops() const { + assert(_lock.is_owned(), "Should be owned"); + return &_immediate_oops; +} + +bool ZNMethodData::has_non_immediate_oops() const { + assert(_lock.is_owned(), "Should be owned"); + return _has_non_immediate_oops; } -ZNMethodDataOops* ZNMethodData::swap_oops(ZNMethodDataOops* new_oops) { +void ZNMethodData::swap(ZArray* barriers, + ZArray* immediate_oops, + bool has_non_immediate_oops) { ZLocker locker(&_lock); - ZNMethodDataOops* const old_oops = _oops; - _oops = new_oops; - return old_oops; + _barriers.swap(barriers); + _immediate_oops.swap(immediate_oops); + _has_non_immediate_oops = has_non_immediate_oops; } diff --git a/src/hotspot/share/gc/z/zNMethodData.hpp b/src/hotspot/share/gc/z/zNMethodData.hpp index 7afd6010554bf..1b1ce077efc65 100644 --- a/src/hotspot/share/gc/z/zNMethodData.hpp +++ b/src/hotspot/share/gc/z/zNMethodData.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,48 +24,36 @@ #ifndef SHARE_GC_Z_ZNMETHODDATA_HPP #define SHARE_GC_Z_ZNMETHODDATA_HPP -#include "gc/z/zAttachedArray.hpp" +#include "gc/z/zArray.hpp" #include "gc/z/zLock.hpp" #include "memory/allocation.hpp" #include "oops/oopsHierarchy.hpp" #include "utilities/globalDefinitions.hpp" -class nmethod; -template class GrowableArray; - -class ZNMethodDataOops { -private: - typedef ZAttachedArray AttachedArray; - - const AttachedArray _immediates; - const bool _has_non_immediates; - - ZNMethodDataOops(const GrowableArray& immediates, bool has_non_immediates); - -public: - static ZNMethodDataOops* create(const GrowableArray& immediates, bool has_non_immediates); - static void destroy(ZNMethodDataOops* oops); - - size_t immediates_count() const; - oop** immediates_begin() const; - oop** immediates_end() const; - - bool has_non_immediates() const; +struct ZNMethodDataBarrier { + address _reloc_addr; + int _reloc_format; }; class ZNMethodData : public CHeapObj { private: - ZReentrantLock _lock; - ZNMethodDataOops* volatile _oops; + ZReentrantLock _lock; + ZArray _barriers; + ZArray _immediate_oops; + bool _has_non_immediate_oops; public: ZNMethodData(); - ~ZNMethodData(); ZReentrantLock* lock(); - ZNMethodDataOops* oops() const; - ZNMethodDataOops* swap_oops(ZNMethodDataOops* oops); + const ZArray* barriers() const; + const ZArray* immediate_oops() const; + bool has_non_immediate_oops() const; + + void swap(ZArray* barriers, + ZArray* immediate_oops, + bool has_non_immediate_oops); }; #endif // SHARE_GC_Z_ZNMETHODDATA_HPP diff --git a/src/hotspot/share/gc/z/zNMethodTable.cpp b/src/hotspot/share/gc/z/zNMethodTable.cpp index 4dde10d15d3dc..f75af7af616c7 100644 --- a/src/hotspot/share/gc/z/zNMethodTable.cpp +++ b/src/hotspot/share/gc/z/zNMethodTable.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,7 +27,6 @@ #include "code/icBuffer.hpp" #include "gc/shared/barrierSet.hpp" #include "gc/shared/barrierSetNMethod.hpp" -#include "gc/z/zGlobals.hpp" #include "gc/z/zHash.inline.hpp" #include "gc/z/zLock.inline.hpp" #include "gc/z/zNMethodData.hpp" @@ -45,12 +44,13 @@ #include "utilities/debug.hpp" #include "utilities/powerOfTwo.hpp" -ZNMethodTableEntry* ZNMethodTable::_table = NULL; +ZNMethodTableEntry* ZNMethodTable::_table = nullptr; size_t ZNMethodTable::_size = 0; size_t ZNMethodTable::_nregistered = 0; size_t ZNMethodTable::_nunregistered = 0; ZNMethodTableIteration ZNMethodTable::_iteration; -ZSafeDeleteNoLock ZNMethodTable::_safe_delete; +ZNMethodTableIteration ZNMethodTable::_iteration_secondary; +ZSafeDelete ZNMethodTable::_safe_delete(false /* locked */); size_t ZNMethodTable::first_index(const nmethod* nm, size_t size) { assert(is_power_of_2(size), "Invalid size"); @@ -130,7 +130,7 @@ void ZNMethodTable::rebuild(size_t new_size) { } // Free old table - _safe_delete(_table); + _safe_delete.schedule_delete(_table); // Install new table _table = new_table; @@ -167,6 +167,12 @@ void ZNMethodTable::rebuild_if_needed() { } } +ZNMethodTableIteration* ZNMethodTable::iteration(bool secondary) { + return secondary + ? &_iteration_secondary + : &_iteration; +} + size_t ZNMethodTable::registered_nmethods() { return _nregistered; } @@ -193,13 +199,13 @@ void ZNMethodTable::register_nmethod(nmethod* nm) { void ZNMethodTable::wait_until_iteration_done() { assert(CodeCache_lock->owned_by_self(), "Lock must be held"); - while (_iteration.in_progress()) { + while (_iteration.in_progress() || _iteration_secondary.in_progress()) { CodeCache_lock->wait_without_safepoint_check(); } } void ZNMethodTable::unregister_nmethod(nmethod* nm) { - assert(CodeCache_lock->owned_by_self(), "Lock must be held"); + MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); // Remove entry unregister_entry(_table, _size, nm); @@ -207,21 +213,21 @@ void ZNMethodTable::unregister_nmethod(nmethod* nm) { _nregistered--; } -void ZNMethodTable::nmethods_do_begin() { +void ZNMethodTable::nmethods_do_begin(bool secondary) { MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); // Do not allow the table to be deleted while iterating _safe_delete.enable_deferred_delete(); // Prepare iteration - _iteration.nmethods_do_begin(_table, _size); + iteration(secondary)->nmethods_do_begin(_table, _size); } -void ZNMethodTable::nmethods_do_end() { +void ZNMethodTable::nmethods_do_end(bool secondary) { MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); // Finish iteration - _iteration.nmethods_do_end(); + iteration(secondary)->nmethods_do_end(); // Allow the table to be deleted _safe_delete.disable_deferred_delete(); @@ -230,6 +236,6 @@ void ZNMethodTable::nmethods_do_end() { CodeCache_lock->notify_all(); } -void ZNMethodTable::nmethods_do(NMethodClosure* cl) { - _iteration.nmethods_do(cl); +void ZNMethodTable::nmethods_do(bool secondary, NMethodClosure* cl) { + iteration(secondary)->nmethods_do(cl); } diff --git a/src/hotspot/share/gc/z/zNMethodTable.hpp b/src/hotspot/share/gc/z/zNMethodTable.hpp index a1af8512f698b..e160ac1b39a98 100644 --- a/src/hotspot/share/gc/z/zNMethodTable.hpp +++ b/src/hotspot/share/gc/z/zNMethodTable.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -35,12 +35,13 @@ class ZWorkers; class ZNMethodTable : public AllStatic { private: - static ZNMethodTableEntry* _table; - static size_t _size; - static size_t _nregistered; - static size_t _nunregistered; - static ZNMethodTableIteration _iteration; - static ZSafeDeleteNoLock _safe_delete; + static ZNMethodTableEntry* _table; + static size_t _size; + static size_t _nregistered; + static size_t _nunregistered; + static ZNMethodTableIteration _iteration; + static ZNMethodTableIteration _iteration_secondary; + static ZSafeDelete _safe_delete; static ZNMethodTableEntry* create(size_t size); static void destroy(ZNMethodTableEntry* table); @@ -54,6 +55,8 @@ class ZNMethodTable : public AllStatic { static void rebuild(size_t new_size); static void rebuild_if_needed(); + static ZNMethodTableIteration* iteration(bool secondary); + public: static size_t registered_nmethods(); static size_t unregistered_nmethods(); @@ -63,9 +66,9 @@ class ZNMethodTable : public AllStatic { static void wait_until_iteration_done(); - static void nmethods_do_begin(); - static void nmethods_do_end(); - static void nmethods_do(NMethodClosure* cl); + static void nmethods_do_begin(bool secondary); + static void nmethods_do_end(bool secondary); + static void nmethods_do(bool secondary, NMethodClosure* cl); static void unlink(ZWorkers* workers, bool unloading_occurred); static void purge(ZWorkers* workers); diff --git a/src/hotspot/share/gc/z/zNMethodTableEntry.hpp b/src/hotspot/share/gc/z/zNMethodTableEntry.hpp index 8b30542999d07..1afb634faeb65 100644 --- a/src/hotspot/share/gc/z/zNMethodTableEntry.hpp +++ b/src/hotspot/share/gc/z/zNMethodTableEntry.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -58,7 +58,7 @@ class ZNMethodTableEntry : public CHeapObj { explicit ZNMethodTableEntry(bool unregistered = false) : _entry(field_registered::encode(false) | field_unregistered::encode(unregistered) | - field_method::encode(NULL)) {} + field_method::encode(nullptr)) {} explicit ZNMethodTableEntry(nmethod* method) : _entry(field_registered::encode(true) | diff --git a/src/hotspot/share/gc/z/zNMethodTableIteration.cpp b/src/hotspot/share/gc/z/zNMethodTableIteration.cpp index b3f9471bad771..4b6ff99d9520c 100644 --- a/src/hotspot/share/gc/z/zNMethodTableIteration.cpp +++ b/src/hotspot/share/gc/z/zNMethodTableIteration.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,12 +30,12 @@ #include "utilities/globalDefinitions.hpp" ZNMethodTableIteration::ZNMethodTableIteration() : - _table(NULL), + _table(nullptr), _size(0), _claimed(0) {} bool ZNMethodTableIteration::in_progress() const { - return _table != NULL; + return _table != nullptr; } void ZNMethodTableIteration::nmethods_do_begin(ZNMethodTableEntry* table, size_t size) { @@ -50,7 +50,7 @@ void ZNMethodTableIteration::nmethods_do_end() { assert(_claimed >= _size, "Failed to claim all table entries"); // Finish iteration - _table = NULL; + _table = nullptr; } void ZNMethodTableIteration::nmethods_do(NMethodClosure* cl) { @@ -58,7 +58,7 @@ void ZNMethodTableIteration::nmethods_do(NMethodClosure* cl) { // Claim table partition. Each partition is currently sized to span // two cache lines. This number is just a guess, but seems to work well. const size_t partition_size = (ZCacheLineSize * 2) / sizeof(ZNMethodTableEntry); - const size_t partition_start = MIN2(Atomic::fetch_and_add(&_claimed, partition_size), _size); + const size_t partition_start = MIN2(Atomic::fetch_then_add(&_claimed, partition_size), _size); const size_t partition_end = MIN2(partition_start + partition_size, _size); if (partition_start == partition_end) { // End of table diff --git a/src/hotspot/share/gc/z/zObjArrayAllocator.cpp b/src/hotspot/share/gc/z/zObjArrayAllocator.cpp index 391ba264a74cd..14b2045f36e26 100644 --- a/src/hotspot/share/gc/z/zObjArrayAllocator.cpp +++ b/src/hotspot/share/gc/z/zObjArrayAllocator.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -63,31 +63,79 @@ oop ZObjArrayAllocator::initialize(HeapWord* mem) const { // The array is going to be exposed before it has been completely // cleared, therefore we can't expose the header at the end of this // function. Instead explicitly initialize it according to our needs. - arrayOopDesc::set_mark(mem, markWord::prototype()); + + // Signal to the ZIterator that this is an invisible root, by setting + // the mark word to "marked". Reset to prototype() after the clearing. + arrayOopDesc::set_mark(mem, markWord::prototype().set_marked()); arrayOopDesc::release_set_klass(mem, _klass); assert(_length >= 0, "length should be non-negative"); arrayOopDesc::set_length(mem, _length); // Keep the array alive across safepoints through an invisible - // root. Invisible roots are not visited by the heap itarator + // root. Invisible roots are not visited by the heap iterator // and the marking logic will not attempt to follow its elements. - // Relocation knows how to dodge iterating over such objects. - ZThreadLocalData::set_invisible_root(_thread, (oop*)&mem); + // Relocation and remembered set code know how to dodge iterating + // over such objects. + ZThreadLocalData::set_invisible_root(_thread, (zaddress_unsafe*)&mem); + + uint32_t old_seqnum_before = ZGeneration::old()->seqnum(); + uint32_t young_seqnum_before = ZGeneration::young()->seqnum(); + uintptr_t color_before = ZPointerStoreGoodMask; + auto gc_safepoint_happened = [&]() { + return old_seqnum_before != ZGeneration::old()->seqnum() || + young_seqnum_before != ZGeneration::young()->seqnum() || + color_before != ZPointerStoreGoodMask; + }; + + bool seen_gc_safepoint = false; - for (size_t processed = 0; processed < payload_size; processed += segment_max) { - // Calculate segment - HeapWord* const start = (HeapWord*)(mem + header + processed); - const size_t remaining = payload_size - processed; - const size_t segment_size = MIN2(remaining, segment_max); + auto initialize_memory = [&]() { + for (size_t processed = 0; processed < payload_size; processed += segment_max) { + // Clear segment + uintptr_t* const start = (uintptr_t*)(mem + header + processed); + const size_t remaining = payload_size - processed; + const size_t segment = MIN2(remaining, segment_max); + // Usually, the young marking code has the responsibility to color + // raw nulls, before they end up in the old generation. However, the + // invisible roots are hidden from the marking code, and therefore + // we must color the nulls already here in the initialization. The + // color we choose must be store bad for any subsequent stores, regardless + // of how many GC flips later it will arrive. That's why we OR in 11 + // (ZPointerRememberedMask) in the remembered bits, similar to how + // forgotten old oops also have 11, for the very same reason. + // However, we opportunistically try to color without the 11 remembered + // bits, hoping to not get interrupted in the middle of a GC safepoint. + // Most of the time, we manage to do that, and can the avoid having GC + // barriers trigger slow paths for this. + const uintptr_t colored_null = seen_gc_safepoint ? (ZPointerStoreGoodMask | ZPointerRememberedMask) + : ZPointerStoreGoodMask; + const uintptr_t fill_value = is_reference_type(element_type) ? colored_null : 0; + ZUtils::fill(start, segment, fill_value); - // Clear segment - Copy::zero_to_words(start, segment_size); + // Safepoint + yield_for_safepoint(); - // Safepoint - yield_for_safepoint(); + // Deal with safepoints + if (!seen_gc_safepoint && gc_safepoint_happened()) { + // The first time we observe a GC safepoint in the yield point, + // we have to restart processing with 11 remembered bits. + seen_gc_safepoint = true; + return false; + } + } + return true; + }; + + if (!initialize_memory()) { + // Re-color with 11 remset bits if we got intercepted by a GC safepoint + const bool result = initialize_memory(); + assert(result, "Array initialization should always succeed the second time"); } ZThreadLocalData::clear_invisible_root(_thread); + // Signal to the ZIterator that this is no longer an invisible root + oopDesc::release_set_mark(mem, markWord::prototype()); + return cast_to_oop(mem); } diff --git a/src/hotspot/share/gc/z/zObjectAllocator.cpp b/src/hotspot/share/gc/z/zObjectAllocator.cpp index bbed59c13013a..746906cfd9374 100644 --- a/src/hotspot/share/gc/z/zObjectAllocator.cpp +++ b/src/hotspot/share/gc/z/zObjectAllocator.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,25 +29,24 @@ #include "gc/z/zPage.inline.hpp" #include "gc/z/zPageTable.inline.hpp" #include "gc/z/zStat.hpp" -#include "gc/z/zThread.inline.hpp" #include "gc/z/zValue.inline.hpp" #include "logging/log.hpp" #include "runtime/atomic.hpp" #include "runtime/safepoint.hpp" +#include "runtime/thread.hpp" #include "utilities/align.hpp" #include "utilities/debug.hpp" static const ZStatCounter ZCounterUndoObjectAllocationSucceeded("Memory", "Undo Object Allocation Succeeded", ZStatUnitOpsPerSecond); static const ZStatCounter ZCounterUndoObjectAllocationFailed("Memory", "Undo Object Allocation Failed", ZStatUnitOpsPerSecond); -ZObjectAllocator::ZObjectAllocator() : +ZObjectAllocator::ZObjectAllocator(ZPageAge age) : + _age(age), _use_per_cpu_shared_small_pages(ZHeuristics::use_per_cpu_shared_small_pages()), _used(0), _undone(0), - _alloc_for_relocation(0), - _undo_alloc_for_relocation(0), - _shared_medium_page(NULL), - _shared_small_page(NULL) {} + _shared_medium_page(nullptr), + _shared_small_page(nullptr) {} ZPage** ZObjectAllocator::shared_small_page_addr() { return _use_per_cpu_shared_small_pages ? _shared_small_page.addr() : _shared_small_page.addr(0); @@ -57,20 +56,9 @@ ZPage* const* ZObjectAllocator::shared_small_page_addr() const { return _use_per_cpu_shared_small_pages ? _shared_small_page.addr() : _shared_small_page.addr(0); } -void ZObjectAllocator::register_alloc_for_relocation(const ZPageTable* page_table, uintptr_t addr, size_t size) { - const ZPage* const page = page_table->get(addr); - const size_t aligned_size = align_up(size, page->object_alignment()); - Atomic::add(_alloc_for_relocation.addr(), aligned_size); -} - -void ZObjectAllocator::register_undo_alloc_for_relocation(const ZPage* page, size_t size) { - const size_t aligned_size = align_up(size, page->object_alignment()); - Atomic::add(_undo_alloc_for_relocation.addr(), aligned_size); -} - -ZPage* ZObjectAllocator::alloc_page(uint8_t type, size_t size, ZAllocationFlags flags) { - ZPage* const page = ZHeap::heap()->alloc_page(type, size, flags); - if (page != NULL) { +ZPage* ZObjectAllocator::alloc_page(ZPageType type, size_t size, ZAllocationFlags flags) { + ZPage* const page = ZHeap::heap()->alloc_page(type, size, flags, _age); + if (page != nullptr) { // Increment used bytes Atomic::add(_used.addr(), size); } @@ -78,6 +66,10 @@ ZPage* ZObjectAllocator::alloc_page(uint8_t type, size_t size, ZAllocationFlags return page; } +ZPage* ZObjectAllocator::alloc_page_for_relocation(ZPageType type, size_t size, ZAllocationFlags flags) { + return ZHeap::heap()->alloc_page(type, size, flags, _age); +} + void ZObjectAllocator::undo_alloc_page(ZPage* page) { // Increment undone bytes Atomic::add(_undone.addr(), page->size()); @@ -85,22 +77,22 @@ void ZObjectAllocator::undo_alloc_page(ZPage* page) { ZHeap::heap()->undo_alloc_page(page); } -uintptr_t ZObjectAllocator::alloc_object_in_shared_page(ZPage** shared_page, - uint8_t page_type, - size_t page_size, - size_t size, - ZAllocationFlags flags) { - uintptr_t addr = 0; +zaddress ZObjectAllocator::alloc_object_in_shared_page(ZPage** shared_page, + ZPageType page_type, + size_t page_size, + size_t size, + ZAllocationFlags flags) { + zaddress addr = zaddress::null; ZPage* page = Atomic::load_acquire(shared_page); - if (page != NULL) { + if (page != nullptr) { addr = page->alloc_object_atomic(size); } - if (addr == 0) { + if (is_null(addr)) { // Allocate new page ZPage* const new_page = alloc_page(page_type, page_size, flags); - if (new_page != NULL) { + if (new_page != nullptr) { // Allocate object before installing the new page addr = new_page->alloc_object(size); @@ -108,15 +100,15 @@ uintptr_t ZObjectAllocator::alloc_object_in_shared_page(ZPage** shared_page, // Install new page ZPage* const prev_page = Atomic::cmpxchg(shared_page, page, new_page); if (prev_page != page) { - if (prev_page == NULL) { + if (prev_page == nullptr) { // Previous page was retired, retry installing the new page page = prev_page; goto retry; } // Another page already installed, try allocation there first - const uintptr_t prev_addr = prev_page->alloc_object_atomic(size); - if (prev_addr == 0) { + const zaddress prev_addr = prev_page->alloc_object_atomic(size); + if (is_null(prev_addr)) { // Allocation failed, retry installing the new page page = prev_page; goto retry; @@ -134,13 +126,13 @@ uintptr_t ZObjectAllocator::alloc_object_in_shared_page(ZPage** shared_page, return addr; } -uintptr_t ZObjectAllocator::alloc_large_object(size_t size, ZAllocationFlags flags) { - uintptr_t addr = 0; +zaddress ZObjectAllocator::alloc_large_object(size_t size, ZAllocationFlags flags) { + zaddress addr = zaddress::null; // Allocate new large page const size_t page_size = align_up(size, ZGranuleSize); - ZPage* const page = alloc_page(ZPageTypeLarge, page_size, flags); - if (page != NULL) { + ZPage* const page = alloc_page(ZPageType::large, page_size, flags); + if (page != nullptr) { // Allocate the object addr = page->alloc_object(size); } @@ -148,15 +140,15 @@ uintptr_t ZObjectAllocator::alloc_large_object(size_t size, ZAllocationFlags fla return addr; } -uintptr_t ZObjectAllocator::alloc_medium_object(size_t size, ZAllocationFlags flags) { - return alloc_object_in_shared_page(_shared_medium_page.addr(), ZPageTypeMedium, ZPageSizeMedium, size, flags); +zaddress ZObjectAllocator::alloc_medium_object(size_t size, ZAllocationFlags flags) { + return alloc_object_in_shared_page(_shared_medium_page.addr(), ZPageType::medium, ZPageSizeMedium, size, flags); } -uintptr_t ZObjectAllocator::alloc_small_object(size_t size, ZAllocationFlags flags) { - return alloc_object_in_shared_page(shared_small_page_addr(), ZPageTypeSmall, ZPageSizeSmall, size, flags); +zaddress ZObjectAllocator::alloc_small_object(size_t size, ZAllocationFlags flags) { + return alloc_object_in_shared_page(shared_small_page_addr(), ZPageType::small, ZPageSizeSmall, size, flags); } -uintptr_t ZObjectAllocator::alloc_object(size_t size, ZAllocationFlags flags) { +zaddress ZObjectAllocator::alloc_object(size_t size, ZAllocationFlags flags) { if (size <= ZObjectSizeLimitSmall) { // Small return alloc_small_object(size, flags); @@ -169,33 +161,26 @@ uintptr_t ZObjectAllocator::alloc_object(size_t size, ZAllocationFlags flags) { } } -uintptr_t ZObjectAllocator::alloc_object(size_t size) { - ZAllocationFlags flags; +zaddress ZObjectAllocator::alloc_object(size_t size) { + const ZAllocationFlags flags; return alloc_object(size, flags); } -uintptr_t ZObjectAllocator::alloc_object_for_relocation(const ZPageTable* page_table, size_t size) { +zaddress ZObjectAllocator::alloc_object_for_relocation(size_t size) { ZAllocationFlags flags; flags.set_non_blocking(); - const uintptr_t addr = alloc_object(size, flags); - if (addr != 0) { - register_alloc_for_relocation(page_table, addr, size); - } - - return addr; + return alloc_object(size, flags); } -void ZObjectAllocator::undo_alloc_object_for_relocation(ZPage* page, uintptr_t addr, size_t size) { - const uint8_t type = page->type(); +void ZObjectAllocator::undo_alloc_object_for_relocation(zaddress addr, size_t size) { + ZPage* const page = ZHeap::heap()->page(addr); - if (type == ZPageTypeLarge) { - register_undo_alloc_for_relocation(page, size); + if (page->is_large()) { undo_alloc_page(page); ZStatInc(ZCounterUndoObjectAllocationSucceeded); } else { if (page->undo_alloc_object_atomic(addr, size)) { - register_undo_alloc_for_relocation(page, size); ZStatInc(ZCounterUndoObjectAllocationSucceeded); } else { ZStatInc(ZCounterUndoObjectAllocationFailed); @@ -203,6 +188,10 @@ void ZObjectAllocator::undo_alloc_object_for_relocation(ZPage* page, uintptr_t a } } +ZPageAge ZObjectAllocator::age() const { + return _age; +} + size_t ZObjectAllocator::used() const { size_t total_used = 0; size_t total_undone = 0; @@ -221,35 +210,16 @@ size_t ZObjectAllocator::used() const { } size_t ZObjectAllocator::remaining() const { - assert(ZThread::is_java(), "Should be a Java thread"); + assert(Thread::current()->is_Java_thread(), "Should be a Java thread"); const ZPage* const page = Atomic::load_acquire(shared_small_page_addr()); - if (page != NULL) { + if (page != nullptr) { return page->remaining(); } return 0; } -size_t ZObjectAllocator::relocated() const { - size_t total_alloc = 0; - size_t total_undo_alloc = 0; - - ZPerCPUConstIterator iter_alloc(&_alloc_for_relocation); - for (const size_t* alloc; iter_alloc.next(&alloc);) { - total_alloc += Atomic::load(alloc); - } - - ZPerCPUConstIterator iter_undo_alloc(&_undo_alloc_for_relocation); - for (const size_t* undo_alloc; iter_undo_alloc.next(&undo_alloc);) { - total_undo_alloc += Atomic::load(undo_alloc); - } - - assert(total_alloc >= total_undo_alloc, "Mismatch"); - - return total_alloc - total_undo_alloc; -} - void ZObjectAllocator::retire_pages() { assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint"); @@ -257,11 +227,7 @@ void ZObjectAllocator::retire_pages() { _used.set_all(0); _undone.set_all(0); - // Reset relocated bytes - _alloc_for_relocation.set_all(0); - _undo_alloc_for_relocation.set_all(0); - // Reset allocation pages - _shared_medium_page.set(NULL); - _shared_small_page.set_all(NULL); + _shared_medium_page.set(nullptr); + _shared_small_page.set_all(nullptr); } diff --git a/src/hotspot/share/gc/z/zObjectAllocator.hpp b/src/hotspot/share/gc/z/zObjectAllocator.hpp index 406782486dfba..8aa185646fad6 100644 --- a/src/hotspot/share/gc/z/zObjectAllocator.hpp +++ b/src/hotspot/share/gc/z/zObjectAllocator.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,7 +24,10 @@ #ifndef SHARE_GC_Z_ZOBJECTALLOCATOR_HPP #define SHARE_GC_Z_ZOBJECTALLOCATOR_HPP +#include "gc/z/zAddress.hpp" #include "gc/z/zAllocationFlags.hpp" +#include "gc/z/zPageAge.hpp" +#include "gc/z/zPageType.hpp" #include "gc/z/zValue.hpp" class ZPage; @@ -32,46 +35,48 @@ class ZPageTable; class ZObjectAllocator { private: + ZPageAge _age; const bool _use_per_cpu_shared_small_pages; ZPerCPU _used; ZPerCPU _undone; - ZPerCPU _alloc_for_relocation; - ZPerCPU _undo_alloc_for_relocation; ZContended _shared_medium_page; ZPerCPU _shared_small_page; ZPage** shared_small_page_addr(); ZPage* const* shared_small_page_addr() const; - void register_alloc_for_relocation(const ZPageTable* page_table, uintptr_t addr, size_t size); - void register_undo_alloc_for_relocation(const ZPage* page, size_t size); - - ZPage* alloc_page(uint8_t type, size_t size, ZAllocationFlags flags); + ZPage* alloc_page(ZPageType type, size_t size, ZAllocationFlags flags); void undo_alloc_page(ZPage* page); // Allocate an object in a shared page. Allocate and // atomically install a new page if necessary. - uintptr_t alloc_object_in_shared_page(ZPage** shared_page, - uint8_t page_type, - size_t page_size, - size_t size, - ZAllocationFlags flags); + zaddress alloc_object_in_shared_page(ZPage** shared_page, + ZPageType page_type, + size_t page_size, + size_t size, + ZAllocationFlags flags); - uintptr_t alloc_large_object(size_t size, ZAllocationFlags flags); - uintptr_t alloc_medium_object(size_t size, ZAllocationFlags flags); - uintptr_t alloc_small_object(size_t size, ZAllocationFlags flags); - uintptr_t alloc_object(size_t size, ZAllocationFlags flags); + zaddress alloc_large_object(size_t size, ZAllocationFlags flags); + zaddress alloc_medium_object(size_t size, ZAllocationFlags flags); + zaddress alloc_small_object(size_t size, ZAllocationFlags flags); + zaddress alloc_object(size_t size, ZAllocationFlags flags); public: - ZObjectAllocator(); + ZObjectAllocator(ZPageAge age); + + // Mutator allocation + zaddress alloc_object(size_t size); + + // Relocation + zaddress alloc_object_for_relocation(size_t size); + void undo_alloc_object_for_relocation(zaddress addr, size_t size); + + ZPage* alloc_page_for_relocation(ZPageType type, size_t size, ZAllocationFlags flags); - uintptr_t alloc_object(size_t size); - uintptr_t alloc_object_for_relocation(const ZPageTable* page_table, size_t size); - void undo_alloc_object_for_relocation(ZPage* page, uintptr_t addr, size_t size); + ZPageAge age() const; size_t used() const; size_t remaining() const; - size_t relocated() const; void retire_pages(); }; diff --git a/src/hotspot/share/gc/z/zPage.cpp b/src/hotspot/share/gc/z/zPage.cpp index 032834ff13b1f..52474e9541175 100644 --- a/src/hotspot/share/gc/z/zPage.cpp +++ b/src/hotspot/share/gc/z/zPage.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,79 +22,193 @@ */ #include "precompiled.hpp" +#include "gc/shared/gc_globals.hpp" +#include "gc/z/zGeneration.inline.hpp" #include "gc/z/zList.inline.hpp" #include "gc/z/zPage.inline.hpp" #include "gc/z/zPhysicalMemory.inline.hpp" +#include "gc/z/zRememberedSet.inline.hpp" #include "gc/z/zVirtualMemory.inline.hpp" #include "utilities/align.hpp" #include "utilities/debug.hpp" +#include "utilities/growableArray.hpp" -ZPage::ZPage(const ZVirtualMemory& vmem, const ZPhysicalMemory& pmem) : - ZPage(type_from_size(vmem.size()), vmem, pmem) {} - -ZPage::ZPage(uint8_t type, const ZVirtualMemory& vmem, const ZPhysicalMemory& pmem) : +ZPage::ZPage(ZPageType type, const ZVirtualMemory& vmem, const ZPhysicalMemory& pmem) : _type(type), + _generation_id(ZGenerationId::young), + _age(ZPageAge::eden), _numa_id((uint8_t)-1), _seqnum(0), + _seqnum_other(0), _virtual(vmem), - _top(start()), + _top(to_zoffset_end(start())), _livemap(object_max_count()), + _remembered_set(), _last_used(0), _physical(pmem), _node() { - assert_initialized(); -} - -ZPage::~ZPage() {} - -void ZPage::assert_initialized() const { assert(!_virtual.is_null(), "Should not be null"); assert(!_physical.is_null(), "Should not be null"); assert(_virtual.size() == _physical.size(), "Virtual/Physical size mismatch"); - assert((_type == ZPageTypeSmall && size() == ZPageSizeSmall) || - (_type == ZPageTypeMedium && size() == ZPageSizeMedium) || - (_type == ZPageTypeLarge && is_aligned(size(), ZGranuleSize)), + assert((_type == ZPageType::small && size() == ZPageSizeSmall) || + (_type == ZPageType::medium && size() == ZPageSizeMedium) || + (_type == ZPageType::large && is_aligned(size(), ZGranuleSize)), "Page type/size mismatch"); } -void ZPage::reset() { - _seqnum = ZGlobalSeqNum; - _top = start(); - _livemap.reset(); +ZPage* ZPage::clone_limited() const { + // Only copy type and memory layouts. Let the rest be lazily reconstructed when needed. + return new ZPage(_type, _virtual, _physical); +} + +ZPage* ZPage::clone_limited_promote_flipped() const { + ZPage* const page = new ZPage(_type, _virtual, _physical); + + // The page is still filled with the same objects, need to retain the top pointer. + page->_top = _top; + + return page; +} + +ZGeneration* ZPage::generation() { + return ZGeneration::generation(_generation_id); +} + +const ZGeneration* ZPage::generation() const { + return ZGeneration::generation(_generation_id); +} + +void ZPage::reset_seqnum() { + Atomic::store(&_seqnum, generation()->seqnum()); + Atomic::store(&_seqnum_other, ZGeneration::generation(_generation_id == ZGenerationId::young ? ZGenerationId::old : ZGenerationId::young)->seqnum()); +} + +void ZPage::remset_clear() { + _remembered_set.clear_all(); +} + +void ZPage::verify_remset_after_reset(ZPageAge prev_age, ZPageResetType type) { + // Young-to-old reset + if (prev_age != ZPageAge::old) { + verify_remset_cleared_previous(); + verify_remset_cleared_current(); + return; + } + + // Old-to-old reset + switch (type) { + case ZPageResetType::Splitting: + // Page is on the way to be destroyed or reused, delay + // clearing until the page is reset for Allocation. + break; + + case ZPageResetType::InPlaceRelocation: + // Relocation failed and page is being compacted in-place. + // The remset bits are flipped each young mark start, so + // the verification code below needs to use the right remset. + if (ZGeneration::old()->active_remset_is_current()) { + verify_remset_cleared_previous(); + } else { + verify_remset_cleared_current(); + } + break; + + case ZPageResetType::FlipAging: + fatal("Should not have called this for old-to-old flipping"); + break; + + case ZPageResetType::Allocation: + verify_remset_cleared_previous(); + verify_remset_cleared_current(); + break; + }; +} + +void ZPage::reset_remembered_set() { + if (is_young()) { + // Remset not needed + return; + } + + // Clearing of remsets is done when freeing a page, so this code only + // needs to ensure the remset is initialized the first time a page + // becomes old. + if (!_remembered_set.is_initialized()) { + _remembered_set.initialize(size()); + } +} + +void ZPage::reset(ZPageAge age, ZPageResetType type) { + const ZPageAge prev_age = _age; + _age = age; _last_used = 0; + + _generation_id = age == ZPageAge::old + ? ZGenerationId::old + : ZGenerationId::young; + + reset_seqnum(); + + // Flip aged pages are still filled with the same objects, need to retain the top pointer. + if (type != ZPageResetType::FlipAging) { + _top = to_zoffset_end(start()); + } + + reset_remembered_set(); + verify_remset_after_reset(prev_age, type); + + if (type != ZPageResetType::InPlaceRelocation || (prev_age != ZPageAge::old && age == ZPageAge::old)) { + // Promoted in-place relocations reset the live map, + // because they clone the page. + _livemap.reset(); + } } -void ZPage::reset_for_in_place_relocation() { - _seqnum = ZGlobalSeqNum; - _top = start(); +void ZPage::finalize_reset_for_in_place_relocation() { + // Now we're done iterating over the livemaps + _livemap.reset(); } -ZPage* ZPage::retype(uint8_t type) { - assert(_type != type, "Invalid retype"); +void ZPage::reset_type_and_size(ZPageType type) { _type = type; _livemap.resize(object_max_count()); + _remembered_set.resize(size()); +} + +ZPage* ZPage::retype(ZPageType type) { + assert(_type != type, "Invalid retype"); + reset_type_and_size(type); return this; } -ZPage* ZPage::split(size_t size) { - return split(type_from_size(size), size); +ZPage* ZPage::split(size_t split_of_size) { + return split(type_from_size(split_of_size), split_of_size); } -ZPage* ZPage::split(uint8_t type, size_t size) { - assert(_virtual.size() > size, "Invalid split"); +ZPage* ZPage::split_with_pmem(ZPageType type, const ZPhysicalMemory& pmem) { + // Resize this page + const ZVirtualMemory vmem = _virtual.split(pmem.size()); + + reset_type_and_size(type_from_size(_virtual.size())); + reset(_age, ZPageResetType::Splitting); - // Resize this page, keep _numa_id, _seqnum, and _last_used - const ZVirtualMemory vmem = _virtual.split(size); - const ZPhysicalMemory pmem = _physical.split(size); - _type = type_from_size(_virtual.size()); - _top = start(); - _livemap.resize(object_max_count()); + assert(vmem.end() == _virtual.start(), "Should be consecutive"); - // Create new page, inherit _seqnum and _last_used - ZPage* const page = new ZPage(type, vmem, pmem); - page->_seqnum = _seqnum; - page->_last_used = _last_used; - return page; + log_trace(gc, page)("Split page [" PTR_FORMAT ", " PTR_FORMAT ", " PTR_FORMAT "]", + untype(vmem.start()), + untype(vmem.end()), + untype(_virtual.end())); + + // Create new page + return new ZPage(type, vmem, pmem); +} + +ZPage* ZPage::split(ZPageType type, size_t split_of_size) { + assert(_virtual.size() > split_of_size, "Invalid split"); + + const ZPhysicalMemory pmem = _physical.split(split_of_size); + + return split_with_pmem(type, pmem); } ZPage* ZPage::split_committed() { @@ -103,33 +217,101 @@ ZPage* ZPage::split_committed() { const ZPhysicalMemory pmem = _physical.split_committed(); if (pmem.is_null()) { // Nothing committed - return NULL; + return nullptr; } assert(!_physical.is_null(), "Should not be null"); - // Resize this page - const ZVirtualMemory vmem = _virtual.split(pmem.size()); - _type = type_from_size(_virtual.size()); - _top = start(); - _livemap.resize(object_max_count()); + return split_with_pmem(type_from_size(pmem.size()), pmem); +} - // Create new page - return new ZPage(vmem, pmem); +class ZFindBaseOopClosure : public ObjectClosure { +private: + volatile zpointer* _p; + oop _result; + +public: + ZFindBaseOopClosure(volatile zpointer* p) : + _p(p), + _result(nullptr) {} + + virtual void do_object(oop obj) { + const uintptr_t p_int = reinterpret_cast(_p); + const uintptr_t base_int = cast_from_oop(obj); + const uintptr_t end_int = base_int + wordSize * obj->size(); + if (p_int >= base_int && p_int < end_int) { + _result = obj; + } + } + + oop result() const { return _result; } +}; + +bool ZPage::is_remset_cleared_current() const { + return _remembered_set.is_cleared_current(); +} + +bool ZPage::is_remset_cleared_previous() const { + return _remembered_set.is_cleared_previous(); +} + +void ZPage::verify_remset_cleared_current() const { + if (ZVerifyRemembered && !is_remset_cleared_current()) { + fatal_msg(" current remset bits should be cleared"); + } +} + +void ZPage::verify_remset_cleared_previous() const { + if (ZVerifyRemembered && !is_remset_cleared_previous()) { + fatal_msg(" previous remset bits should be cleared"); + } +} + +void ZPage::clear_remset_current() { + _remembered_set.clear_current(); +} + +void ZPage::clear_remset_previous() { + _remembered_set.clear_previous(); +} + +void ZPage::swap_remset_bitmaps() { + _remembered_set.swap_remset_bitmaps(); +} + +void* ZPage::remset_current() { + return _remembered_set.current(); +} + +void ZPage::print_on_msg(outputStream* out, const char* msg) const { + out->print_cr(" %-6s " PTR_FORMAT " " PTR_FORMAT " " PTR_FORMAT " %s/%-4u %s%s%s", + type_to_string(), untype(start()), untype(top()), untype(end()), + is_young() ? "Y" : "O", + seqnum(), + is_allocating() ? " Allocating " : "", + is_relocatable() ? " Relocatable" : "", + msg == nullptr ? "" : msg); } void ZPage::print_on(outputStream* out) const { - out->print_cr(" %-6s " PTR_FORMAT " " PTR_FORMAT " " PTR_FORMAT " %s%s", - type_to_string(), start(), top(), end(), - is_allocating() ? " Allocating" : "", - is_relocatable() ? " Relocatable" : ""); + print_on_msg(out, nullptr); } void ZPage::print() const { print_on(tty); } -void ZPage::verify_live(uint32_t live_objects, size_t live_bytes) const { +void ZPage::verify_live(uint32_t live_objects, size_t live_bytes, bool in_place) const { + if (!in_place) { + // In-place relocation has changed the page to allocating + assert_zpage_mark_state(); + } guarantee(live_objects == _livemap.live_objects(), "Invalid number of live objects"); guarantee(live_bytes == _livemap.live_bytes(), "Invalid number of live bytes"); } + +void ZPage::fatal_msg(const char* msg) const { + stringStream ss; + print_on_msg(&ss, msg); + fatal("%s", ss.base()); +} diff --git a/src/hotspot/share/gc/z/zPage.hpp b/src/hotspot/share/gc/z/zPage.hpp index 513773e3f8e79..e07b338c710e0 100644 --- a/src/hotspot/share/gc/z/zPage.hpp +++ b/src/hotspot/share/gc/z/zPage.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,95 +24,202 @@ #ifndef SHARE_GC_Z_ZPAGE_HPP #define SHARE_GC_Z_ZPAGE_HPP +#include "gc/z/zGenerationId.hpp" #include "gc/z/zList.hpp" #include "gc/z/zLiveMap.hpp" +#include "gc/z/zPageAge.hpp" +#include "gc/z/zPageType.hpp" #include "gc/z/zPhysicalMemory.hpp" +#include "gc/z/zRememberedSet.hpp" #include "gc/z/zVirtualMemory.hpp" #include "memory/allocation.hpp" +class ZGeneration; + +enum class ZPageResetType { + // Normal allocation path + Allocation, + // Relocation failed and started to relocate in-place + InPlaceRelocation, + // Page was not selected for relocation, all objects + // stayed, but the page aged. + FlipAging, + // The page was split and needs to be reset + Splitting, +}; + class ZPage : public CHeapObj { friend class VMStructs; friend class ZList; + friend class ZForwardingTest; private: - uint8_t _type; - uint8_t _numa_id; - uint32_t _seqnum; - ZVirtualMemory _virtual; - volatile uintptr_t _top; - ZLiveMap _livemap; - uint64_t _last_used; - ZPhysicalMemory _physical; - ZListNode _node; - - void assert_initialized() const; - - uint8_t type_from_size(size_t size) const; + ZPageType _type; + ZGenerationId _generation_id; + ZPageAge _age; + uint8_t _numa_id; + uint32_t _seqnum; + uint32_t _seqnum_other; + ZVirtualMemory _virtual; + volatile zoffset_end _top; + ZLiveMap _livemap; + ZRememberedSet _remembered_set; + uint64_t _last_used; + ZPhysicalMemory _physical; + ZListNode _node; + + ZPageType type_from_size(size_t size) const; const char* type_to_string() const; - bool is_object_marked(uintptr_t addr) const; - bool is_object_strongly_marked(uintptr_t addr) const; + BitMap::idx_t bit_index(zaddress addr) const; + zoffset offset_from_bit_index(BitMap::idx_t index) const; + oop object_from_bit_index(BitMap::idx_t index) const; + + bool is_live_bit_set(zaddress addr) const; + bool is_strong_bit_set(zaddress addr) const; + + ZGeneration* generation(); + const ZGeneration* generation() const; + + void reset_seqnum(); + void reset_remembered_set(); + + ZPage* split_with_pmem(ZPageType type, const ZPhysicalMemory& pmem); + + void verify_remset_after_reset(ZPageAge prev_age, ZPageResetType type); public: - ZPage(const ZVirtualMemory& vmem, const ZPhysicalMemory& pmem); - ZPage(uint8_t type, const ZVirtualMemory& vmem, const ZPhysicalMemory& pmem); - ~ZPage(); + ZPage(ZPageType type, const ZVirtualMemory& vmem, const ZPhysicalMemory& pmem); + + ZPage* clone_limited() const; + ZPage* clone_limited_promote_flipped() const; uint32_t object_max_count() const; size_t object_alignment_shift() const; size_t object_alignment() const; - uint8_t type() const; - uintptr_t start() const; - uintptr_t end() const; + ZPageType type() const; + + bool is_small() const; + bool is_medium() const; + bool is_large() const; + + ZGenerationId generation_id() const; + bool is_young() const; + bool is_old() const; + zoffset start() const; + zoffset_end end() const; size_t size() const; - uintptr_t top() const; + zoffset_end top() const; size_t remaining() const; + size_t used() const; const ZVirtualMemory& virtual_memory() const; const ZPhysicalMemory& physical_memory() const; ZPhysicalMemory& physical_memory(); uint8_t numa_id(); + ZPageAge age() const; + uint32_t seqnum() const; bool is_allocating() const; bool is_relocatable() const; uint64_t last_used() const; void set_last_used(); - void reset(); - void reset_for_in_place_relocation(); + void reset(ZPageAge age, ZPageResetType type); + + void finalize_reset_for_in_place_relocation(); + + void reset_type_and_size(ZPageType type); - ZPage* retype(uint8_t type); - ZPage* split(size_t size); - ZPage* split(uint8_t type, size_t size); + ZPage* retype(ZPageType type); + ZPage* split(size_t split_of_size); + ZPage* split(ZPageType type, size_t split_of_size); ZPage* split_committed(); - bool is_in(uintptr_t addr) const; + bool is_in(zoffset offset) const; + bool is_in(zaddress addr) const; + + uintptr_t local_offset(zoffset offset) const; + uintptr_t local_offset(zoffset_end offset) const; + uintptr_t local_offset(zaddress addr) const; + uintptr_t local_offset(zaddress_unsafe addr) const; + + zoffset global_offset(uintptr_t local_offset) const; + + bool is_object_live(zaddress addr) const; + bool is_object_strongly_live(zaddress addr) const; bool is_marked() const; - template bool is_object_marked(uintptr_t addr) const; - bool is_object_live(uintptr_t addr) const; - bool is_object_strongly_live(uintptr_t addr) const; - bool mark_object(uintptr_t addr, bool finalizable, bool& inc_live); + bool is_object_marked_live(zaddress addr) const; + bool is_object_marked_strong(zaddress addr) const; + bool is_object_marked(zaddress addr, bool finalizable) const; + bool mark_object(zaddress addr, bool finalizable, bool& inc_live); void inc_live(uint32_t objects, size_t bytes); uint32_t live_objects() const; size_t live_bytes() const; - void object_iterate(ObjectClosure* cl); + template + void object_iterate(Function function); - uintptr_t alloc_object(size_t size); - uintptr_t alloc_object_atomic(size_t size); + void remember(volatile zpointer* p); - bool undo_alloc_object(uintptr_t addr, size_t size); - bool undo_alloc_object_atomic(uintptr_t addr, size_t size); + // In-place relocation support + void clear_remset_bit_non_par_current(uintptr_t l_offset); + void clear_remset_range_non_par_current(uintptr_t l_offset, size_t size); + void swap_remset_bitmaps(); + void remset_clear(); + + ZBitMap::ReverseIterator remset_reverse_iterator_previous(); + BitMap::Iterator remset_iterator_limited_current(uintptr_t l_offset, size_t size); + BitMap::Iterator remset_iterator_limited_previous(uintptr_t l_offset, size_t size); + + zaddress_unsafe find_base_unsafe(volatile zpointer* p); + zaddress_unsafe find_base(volatile zpointer* p); + + template + void oops_do_remembered(Function function); + + // Only visits remembered set entries for live objects + template + void oops_do_remembered_in_live(Function function); + + template + void oops_do_current_remembered(Function function); + + bool is_remset_cleared_current() const; + bool is_remset_cleared_previous() const; + + void verify_remset_cleared_current() const; + void verify_remset_cleared_previous() const; + + void clear_remset_current(); + void clear_remset_previous(); + + void* remset_current(); + + zaddress alloc_object(size_t size); + zaddress alloc_object_atomic(size_t size); + + bool undo_alloc_object(zaddress addr, size_t size); + bool undo_alloc_object_atomic(zaddress addr, size_t size); + + void log_msg(const char* msg_format, ...) const ATTRIBUTE_PRINTF(2, 3); + + void print_on_msg(outputStream* out, const char* msg) const; void print_on(outputStream* out) const; void print() const; - void verify_live(uint32_t live_objects, size_t live_bytes) const; + // Verification + bool was_remembered(volatile zpointer* p); + bool is_remembered(volatile zpointer* p); + void verify_live(uint32_t live_objects, size_t live_bytes, bool in_place) const; + + void fatal_msg(const char* msg) const; }; class ZPageClosure { diff --git a/src/hotspot/share/gc/z/zPage.inline.hpp b/src/hotspot/share/gc/z/zPage.inline.hpp index e7d6742b91ce2..6cacf699145d2 100644 --- a/src/hotspot/share/gc/z/zPage.inline.hpp +++ b/src/hotspot/share/gc/z/zPage.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,43 +27,50 @@ #include "gc/z/zPage.hpp" #include "gc/z/zAddress.inline.hpp" +#include "gc/z/zGeneration.inline.hpp" #include "gc/z/zGlobals.hpp" #include "gc/z/zLiveMap.inline.hpp" #include "gc/z/zNUMA.hpp" #include "gc/z/zPhysicalMemory.inline.hpp" +#include "gc/z/zRememberedSet.inline.hpp" +#include "gc/z/zUtils.inline.hpp" #include "gc/z/zVirtualMemory.inline.hpp" +#include "logging/logStream.hpp" #include "runtime/atomic.hpp" #include "runtime/os.hpp" #include "utilities/align.hpp" #include "utilities/debug.hpp" -inline uint8_t ZPage::type_from_size(size_t size) const { +inline ZPageType ZPage::type_from_size(size_t size) const { if (size == ZPageSizeSmall) { - return ZPageTypeSmall; + return ZPageType::small; } else if (size == ZPageSizeMedium) { - return ZPageTypeMedium; + return ZPageType::medium; } else { - return ZPageTypeLarge; + return ZPageType::large; } } inline const char* ZPage::type_to_string() const { switch (type()) { - case ZPageTypeSmall: + case ZPageType::small: return "Small"; - case ZPageTypeMedium: + case ZPageType::medium: return "Medium"; - default: - assert(type() == ZPageTypeLarge, "Invalid page type"); + case ZPageType::large: return "Large"; + + default: + fatal("Unexpected page type"); + return 0; } } inline uint32_t ZPage::object_max_count() const { switch (type()) { - case ZPageTypeLarge: + case ZPageType::large: // A large page can only contain a single // object aligned to the start of the page. return 1; @@ -75,41 +82,71 @@ inline uint32_t ZPage::object_max_count() const { inline size_t ZPage::object_alignment_shift() const { switch (type()) { - case ZPageTypeSmall: + case ZPageType::small: return ZObjectAlignmentSmallShift; - case ZPageTypeMedium: + case ZPageType::medium: return ZObjectAlignmentMediumShift; - default: - assert(type() == ZPageTypeLarge, "Invalid page type"); + case ZPageType::large: return ZObjectAlignmentLargeShift; + + default: + fatal("Unexpected page type"); + return 0; } } inline size_t ZPage::object_alignment() const { switch (type()) { - case ZPageTypeSmall: + case ZPageType::small: return ZObjectAlignmentSmall; - case ZPageTypeMedium: + case ZPageType::medium: return ZObjectAlignmentMedium; - default: - assert(type() == ZPageTypeLarge, "Invalid page type"); + case ZPageType::large: return ZObjectAlignmentLarge; + + default: + fatal("Unexpected page type"); + return 0; } } -inline uint8_t ZPage::type() const { +inline ZPageType ZPage::type() const { return _type; } -inline uintptr_t ZPage::start() const { +inline bool ZPage::is_small() const { + return _type == ZPageType::small; +} + +inline bool ZPage::is_medium() const { + return _type == ZPageType::medium; +} + +inline bool ZPage::is_large() const { + return _type == ZPageType::large; +} + +inline ZGenerationId ZPage::generation_id() const { + return _generation_id; +} + +inline bool ZPage::is_young() const { + return _generation_id == ZGenerationId::young; +} + +inline bool ZPage::is_old() const { + return _generation_id == ZGenerationId::old; +} + +inline zoffset ZPage::start() const { return _virtual.start(); } -inline uintptr_t ZPage::end() const { +inline zoffset_end ZPage::end() const { return _virtual.end(); } @@ -117,7 +154,7 @@ inline size_t ZPage::size() const { return _virtual.size(); } -inline uintptr_t ZPage::top() const { +inline zoffset_end ZPage::top() const { return _top; } @@ -125,6 +162,10 @@ inline size_t ZPage::remaining() const { return end() - top(); } +inline size_t ZPage::used() const { + return top() - start(); +} + inline const ZVirtualMemory& ZPage::virtual_memory() const { return _virtual; } @@ -139,18 +180,26 @@ inline ZPhysicalMemory& ZPage::physical_memory() { inline uint8_t ZPage::numa_id() { if (_numa_id == (uint8_t)-1) { - _numa_id = ZNUMA::memory_id(ZAddress::good(start())); + _numa_id = ZNUMA::memory_id(untype(ZOffset::address(start()))); } return _numa_id; } +inline ZPageAge ZPage::age() const { + return _age; +} + +inline uint32_t ZPage::seqnum() const { + return _seqnum; +} + inline bool ZPage::is_allocating() const { - return _seqnum == ZGlobalSeqNum; + return _seqnum == generation()->seqnum(); } inline bool ZPage::is_relocatable() const { - return _seqnum < ZGlobalSeqNum; + return _seqnum < generation()->seqnum(); } inline uint64_t ZPage::last_used() const { @@ -161,103 +210,293 @@ inline void ZPage::set_last_used() { _last_used = ceil(os::elapsedTime()); } -inline bool ZPage::is_in(uintptr_t addr) const { - const uintptr_t offset = ZAddress::offset(addr); +inline bool ZPage::is_in(zoffset offset) const { return offset >= start() && offset < top(); } +inline bool ZPage::is_in(zaddress addr) const { + const zoffset offset = ZAddress::offset(addr); + return is_in(offset); +} + +inline uintptr_t ZPage::local_offset(zoffset offset) const { + assert(ZHeap::heap()->is_in_page_relaxed(this, ZOffset::address(offset)), + "Invalid offset " PTR_FORMAT " page [" PTR_FORMAT ", " PTR_FORMAT ", " PTR_FORMAT ")", + untype(offset), untype(start()), untype(top()), untype(end())); + return offset - start(); +} + +inline uintptr_t ZPage::local_offset(zoffset_end offset) const { + assert(offset <= end(), "Wrong offset"); + return offset - start(); +} + +inline uintptr_t ZPage::local_offset(zaddress addr) const { + const zoffset offset = ZAddress::offset(addr); + return local_offset(offset); +} + +inline uintptr_t ZPage::local_offset(zaddress_unsafe addr) const { + const zoffset offset = ZAddress::offset(addr); + return local_offset(offset); +} + +inline zoffset ZPage::global_offset(uintptr_t local_offset) const { + return start() + local_offset; +} + inline bool ZPage::is_marked() const { assert(is_relocatable(), "Invalid page state"); - return _livemap.is_marked(); + return _livemap.is_marked(_generation_id); +} + +inline BitMap::idx_t ZPage::bit_index(zaddress addr) const { + return (local_offset(addr) >> object_alignment_shift()) * 2; +} + +inline zoffset ZPage::offset_from_bit_index(BitMap::idx_t index) const { + const uintptr_t l_offset = ((index / 2) << object_alignment_shift()); + return start() + l_offset; +} + +inline oop ZPage::object_from_bit_index(BitMap::idx_t index) const { + const zoffset offset = offset_from_bit_index(index); + return to_oop(ZOffset::address(offset)); } -inline bool ZPage::is_object_marked(uintptr_t addr) const { +inline bool ZPage::is_live_bit_set(zaddress addr) const { assert(is_relocatable(), "Invalid page state"); - const size_t index = ((ZAddress::offset(addr) - start()) >> object_alignment_shift()) * 2; - return _livemap.get(index); + const BitMap::idx_t index = bit_index(addr); + return _livemap.get(_generation_id, index); } -inline bool ZPage::is_object_strongly_marked(uintptr_t addr) const { +inline bool ZPage::is_strong_bit_set(zaddress addr) const { assert(is_relocatable(), "Invalid page state"); - const size_t index = ((ZAddress::offset(addr) - start()) >> object_alignment_shift()) * 2; - return _livemap.get(index + 1); + const BitMap::idx_t index = bit_index(addr); + return _livemap.get(_generation_id, index + 1); } -template -inline bool ZPage::is_object_marked(uintptr_t addr) const { - return finalizable ? is_object_marked(addr) : is_object_strongly_marked(addr); +inline bool ZPage::is_object_live(zaddress addr) const { + return is_allocating() || is_live_bit_set(addr); } -inline bool ZPage::is_object_live(uintptr_t addr) const { - return is_allocating() || is_object_marked(addr); +inline bool ZPage::is_object_strongly_live(zaddress addr) const { + return is_allocating() || is_strong_bit_set(addr); +} + +inline bool ZPage::is_object_marked_live(zaddress addr) const { + // This function is only used by the marking code and therefore has stronger + // asserts that are not always valid to ask when checking for liveness. + assert(!is_old() || ZGeneration::old()->is_phase_mark(), "Location should match phase"); + assert(!is_young() || ZGeneration::young()->is_phase_mark(), "Location should match phase"); + + return is_object_live(addr); +} + +inline bool ZPage::is_object_marked_strong(zaddress addr) const { + // This function is only used by the marking code and therefore has stronger + // asserts that are not always valid to ask when checking for liveness. + assert(!is_old() || ZGeneration::old()->is_phase_mark(), "Location should match phase"); + assert(!is_young() || ZGeneration::young()->is_phase_mark(), "Location should match phase"); + + return is_object_strongly_live(addr); } -inline bool ZPage::is_object_strongly_live(uintptr_t addr) const { - return is_allocating() || is_object_strongly_marked(addr); +inline bool ZPage::is_object_marked(zaddress addr, bool finalizable) const { + return finalizable ? is_object_marked_live(addr) : is_object_marked_strong(addr); } -inline bool ZPage::mark_object(uintptr_t addr, bool finalizable, bool& inc_live) { - assert(ZAddress::is_marked(addr), "Invalid address"); +inline bool ZPage::mark_object(zaddress addr, bool finalizable, bool& inc_live) { assert(is_relocatable(), "Invalid page state"); assert(is_in(addr), "Invalid address"); + // Verify oop + (void)to_oop(addr); + // Set mark bit - const size_t index = ((ZAddress::offset(addr) - start()) >> object_alignment_shift()) * 2; - return _livemap.set(index, finalizable, inc_live); + const BitMap::idx_t index = bit_index(addr); + return _livemap.set(_generation_id, index, finalizable, inc_live); } inline void ZPage::inc_live(uint32_t objects, size_t bytes) { _livemap.inc_live(objects, bytes); } +#define assert_zpage_mark_state() \ + do { \ + assert(is_marked(), "Should be marked"); \ + assert(!is_young() || !ZGeneration::young()->is_phase_mark(), "Wrong phase"); \ + assert(!is_old() || !ZGeneration::old()->is_phase_mark(), "Wrong phase"); \ + } while (0) + inline uint32_t ZPage::live_objects() const { - assert(is_marked(), "Should be marked"); + assert_zpage_mark_state(); + return _livemap.live_objects(); } inline size_t ZPage::live_bytes() const { - assert(is_marked(), "Should be marked"); + assert_zpage_mark_state(); + return _livemap.live_bytes(); } -inline void ZPage::object_iterate(ObjectClosure* cl) { - _livemap.iterate(cl, ZAddress::good(start()), object_alignment_shift()); +template +inline void ZPage::object_iterate(Function function) { + auto do_bit = [&](BitMap::idx_t index) -> bool { + const oop obj = object_from_bit_index(index); + + // Apply function + function(obj); + + return true; + }; + + _livemap.iterate(_generation_id, do_bit); +} + +inline void ZPage::remember(volatile zpointer* p) { + const zaddress addr = to_zaddress((uintptr_t)p); + const uintptr_t l_offset = local_offset(addr); + _remembered_set.set_current(l_offset); +} + +inline void ZPage::clear_remset_bit_non_par_current(uintptr_t l_offset) { + _remembered_set.unset_non_par_current(l_offset); +} + +inline void ZPage::clear_remset_range_non_par_current(uintptr_t l_offset, size_t size) { + _remembered_set.unset_range_non_par_current(l_offset, size); +} + +inline ZBitMap::ReverseIterator ZPage::remset_reverse_iterator_previous() { + return _remembered_set.iterator_reverse_previous(); +} + +inline BitMap::Iterator ZPage::remset_iterator_limited_current(uintptr_t l_offset, size_t size) { + return _remembered_set.iterator_limited_current(l_offset, size); +} + +inline BitMap::Iterator ZPage::remset_iterator_limited_previous(uintptr_t l_offset, size_t size) { + return _remembered_set.iterator_limited_previous(l_offset, size); +} + +inline bool ZPage::is_remembered(volatile zpointer* p) { + const zaddress addr = to_zaddress((uintptr_t)p); + const uintptr_t l_offset = local_offset(addr); + return _remembered_set.at_current(l_offset); +} + +inline bool ZPage::was_remembered(volatile zpointer* p) { + const zaddress addr = to_zaddress((uintptr_t)p); + const uintptr_t l_offset = local_offset(addr); + return _remembered_set.at_previous(l_offset); +} + + +inline zaddress_unsafe ZPage::find_base_unsafe(volatile zpointer* p) { + if (is_large()) { + return ZOffset::address_unsafe(start()); + } + + // Note: when thinking about excluding looking at the index corresponding to + // the field address p, it's important to note that for medium pages both p + // and it's associated base could map to the same index. + const BitMap::idx_t index = bit_index(zaddress(uintptr_t(p))); + const BitMap::idx_t base_index = _livemap.find_base_bit(index); + if (base_index == BitMap::idx_t(-1)) { + return zaddress_unsafe::null; + } else { + return ZOffset::address_unsafe(offset_from_bit_index(base_index)); + } +} + +inline zaddress_unsafe ZPage::find_base(volatile zpointer* p) { + assert_zpage_mark_state(); + + return find_base_unsafe(p); +} + +template +inline void ZPage::oops_do_remembered(Function function) { + _remembered_set.iterate_previous([&](uintptr_t local_offset) { + const zoffset offset = start() + local_offset; + const zaddress addr = ZOffset::address(offset); + + function((volatile zpointer*)addr); + }); +} + +template +inline void ZPage::oops_do_remembered_in_live(Function function) { + assert(!is_allocating(), "Must have liveness information"); + assert(!ZGeneration::old()->is_phase_mark(), "Must have liveness information"); + assert(is_marked(), "Must have liveness information"); + + ZRememberedSetContainingInLiveIterator iter(this); + for (ZRememberedSetContaining containing; iter.next(&containing);) { + function((volatile zpointer*)containing._field_addr); + } + + iter.print_statistics(); } -inline uintptr_t ZPage::alloc_object(size_t size) { +template +inline void ZPage::oops_do_current_remembered(Function function) { + _remembered_set.iterate_current([&](uintptr_t local_offset) { + const zoffset offset = start() + local_offset; + const zaddress addr = ZOffset::address(offset); + + function((volatile zpointer*)addr); + }); +} + +inline zaddress ZPage::alloc_object(size_t size) { assert(is_allocating(), "Invalid state"); const size_t aligned_size = align_up(size, object_alignment()); - const uintptr_t addr = top(); - const uintptr_t new_top = addr + aligned_size; + const zoffset_end addr = top(); + + zoffset_end new_top; + + if (!to_zoffset_end(&new_top, addr, aligned_size)) { + // Next top would be outside of the heap - bail + return zaddress::null; + } if (new_top > end()) { - // Not enough space left - return 0; + // Not enough space left in the page + return zaddress::null; } _top = new_top; - return ZAddress::good(addr); + return ZOffset::address(to_zoffset(addr)); } -inline uintptr_t ZPage::alloc_object_atomic(size_t size) { +inline zaddress ZPage::alloc_object_atomic(size_t size) { assert(is_allocating(), "Invalid state"); const size_t aligned_size = align_up(size, object_alignment()); - uintptr_t addr = top(); + zoffset_end addr = top(); for (;;) { - const uintptr_t new_top = addr + aligned_size; + zoffset_end new_top; + + if (!to_zoffset_end(&new_top, addr, aligned_size)) { + // Next top would be outside of the heap - bail + return zaddress::null; + } + if (new_top > end()) { // Not enough space left - return 0; + return zaddress::null; } - const uintptr_t prev_top = Atomic::cmpxchg(&_top, addr, new_top); + const zoffset_end prev_top = Atomic::cmpxchg(&_top, addr, new_top); if (prev_top == addr) { // Success - return ZAddress::good(addr); + return ZOffset::address(to_zoffset(addr)); } // Retry @@ -265,13 +504,13 @@ inline uintptr_t ZPage::alloc_object_atomic(size_t size) { } } -inline bool ZPage::undo_alloc_object(uintptr_t addr, size_t size) { +inline bool ZPage::undo_alloc_object(zaddress addr, size_t size) { assert(is_allocating(), "Invalid state"); - const uintptr_t offset = ZAddress::offset(addr); + const zoffset offset = ZAddress::offset(addr); const size_t aligned_size = align_up(size, object_alignment()); - const uintptr_t old_top = top(); - const uintptr_t new_top = old_top - aligned_size; + const zoffset_end old_top = top(); + const zoffset_end new_top = old_top - aligned_size; if (new_top != offset) { // Failed to undo allocation, not the last allocated object @@ -284,21 +523,21 @@ inline bool ZPage::undo_alloc_object(uintptr_t addr, size_t size) { return true; } -inline bool ZPage::undo_alloc_object_atomic(uintptr_t addr, size_t size) { +inline bool ZPage::undo_alloc_object_atomic(zaddress addr, size_t size) { assert(is_allocating(), "Invalid state"); - const uintptr_t offset = ZAddress::offset(addr); + const zoffset offset = ZAddress::offset(addr); const size_t aligned_size = align_up(size, object_alignment()); - uintptr_t old_top = top(); + zoffset_end old_top = top(); for (;;) { - const uintptr_t new_top = old_top - aligned_size; + const zoffset_end new_top = old_top - aligned_size; if (new_top != offset) { // Failed to undo allocation, not the last allocated object return false; } - const uintptr_t prev_top = Atomic::cmpxchg(&_top, old_top, new_top); + const zoffset_end prev_top = Atomic::cmpxchg(&_top, old_top, new_top); if (prev_top == old_top) { // Success return true; @@ -309,4 +548,15 @@ inline bool ZPage::undo_alloc_object_atomic(uintptr_t addr, size_t size) { } } +inline void ZPage::log_msg(const char* msg_format, ...) const { + LogTarget(Trace, gc, page) target; + if (target.is_enabled()) { + va_list argp; + va_start(argp, msg_format); + LogStream stream(target); + print_on_msg(&stream, err_msg(FormatBufferDummy(), msg_format, argp)); + va_end(argp); + } +} + #endif // SHARE_GC_Z_ZPAGE_INLINE_HPP diff --git a/src/hotspot/share/gc/z/zPageAge.hpp b/src/hotspot/share/gc/z/zPageAge.hpp new file mode 100644 index 0000000000000..b7b1688b8308c --- /dev/null +++ b/src/hotspot/share/gc/z/zPageAge.hpp @@ -0,0 +1,50 @@ +/* + * Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_Z_ZPAGEAGE_HPP +#define SHARE_GC_Z_ZPAGEAGE_HPP + +#include "utilities/globalDefinitions.hpp" + +enum class ZPageAge : uint8_t { + eden, + survivor1, + survivor2, + survivor3, + survivor4, + survivor5, + survivor6, + survivor7, + survivor8, + survivor9, + survivor10, + survivor11, + survivor12, + survivor13, + survivor14, + old +}; + +constexpr uint ZPageAgeMax = static_cast(ZPageAge::old); + +#endif // SHARE_GC_Z_ZPAGEAGE_HPP diff --git a/src/hotspot/share/gc/z/zPageAllocator.cpp b/src/hotspot/share/gc/z/zPageAllocator.cpp index e964aec5454eb..c439638f87c04 100644 --- a/src/hotspot/share/gc/z/zPageAllocator.cpp +++ b/src/hotspot/share/gc/z/zPageAllocator.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,11 +25,14 @@ #include "gc/shared/gcLogPrecious.hpp" #include "gc/shared/suspendibleThreadSet.hpp" #include "gc/z/zArray.inline.hpp" -#include "gc/z/zCollectedHeap.hpp" +#include "gc/z/zDriver.hpp" #include "gc/z/zFuture.inline.hpp" +#include "gc/z/zGeneration.inline.hpp" +#include "gc/z/zGenerationId.hpp" #include "gc/z/zGlobals.hpp" #include "gc/z/zLock.inline.hpp" #include "gc/z/zPage.inline.hpp" +#include "gc/z/zPageAge.hpp" #include "gc/z/zPageAllocator.inline.hpp" #include "gc/z/zPageCache.hpp" #include "gc/z/zSafeDelete.inline.hpp" @@ -46,44 +49,82 @@ #include "utilities/debug.hpp" #include "utilities/globalDefinitions.hpp" -static const ZStatCounter ZCounterAllocationRate("Memory", "Allocation Rate", ZStatUnitBytesPerSecond); +static const ZStatCounter ZCounterMutatorAllocationRate("Memory", "Allocation Rate", ZStatUnitBytesPerSecond); static const ZStatCounter ZCounterPageCacheFlush("Memory", "Page Cache Flush", ZStatUnitBytesPerSecond); static const ZStatCounter ZCounterDefragment("Memory", "Defragment", ZStatUnitOpsPerSecond); static const ZStatCriticalPhase ZCriticalPhaseAllocationStall("Allocation Stall"); -enum ZPageAllocationStall { - ZPageAllocationStallSuccess, - ZPageAllocationStallFailed, - ZPageAllocationStallStartGC -}; +ZSafePageRecycle::ZSafePageRecycle(ZPageAllocator* page_allocator) : + _page_allocator(page_allocator), + _unsafe_to_recycle() {} + +void ZSafePageRecycle::activate() { + _unsafe_to_recycle.activate(); +} + +void ZSafePageRecycle::deactivate() { + auto delete_function = [&](ZPage* page) { + _page_allocator->safe_destroy_page(page); + }; + + _unsafe_to_recycle.deactivate_and_apply(delete_function); +} + +ZPage* ZSafePageRecycle::register_and_clone_if_activated(ZPage* page) { + if (!_unsafe_to_recycle.is_activated()) { + // The page has no concurrent readers. + // Recycle original page. + return page; + } + + // The page could have concurrent readers. + // It would be unsafe to recycle this page at this point. + + // As soon as the page is added to _unsafe_to_recycle, it + // must not be used again. Hence, the extra double-checked + // locking to only clone the page if it is believed to be + // unsafe to recycle the page. + ZPage* const cloned_page = page->clone_limited(); + if (!_unsafe_to_recycle.add_if_activated(page)) { + // It became safe to recycle the page after the is_activated check + delete cloned_page; + return page; + } + + // The original page has been registered to be deleted by another thread. + // Recycle the cloned page. + return cloned_page; +} class ZPageAllocation : public StackObj { friend class ZList; private: - const uint8_t _type; - const size_t _size; - const ZAllocationFlags _flags; - const uint32_t _seqnum; - size_t _flushed; - size_t _committed; - ZList _pages; - ZListNode _node; - ZFuture _stall_result; + const ZPageType _type; + const size_t _size; + const ZAllocationFlags _flags; + const uint32_t _young_seqnum; + const uint32_t _old_seqnum; + size_t _flushed; + size_t _committed; + ZList _pages; + ZListNode _node; + ZFuture _stall_result; public: - ZPageAllocation(uint8_t type, size_t size, ZAllocationFlags flags) : + ZPageAllocation(ZPageType type, size_t size, ZAllocationFlags flags) : _type(type), _size(size), _flags(flags), - _seqnum(ZGlobalSeqNum), + _young_seqnum(ZGeneration::young()->seqnum()), + _old_seqnum(ZGeneration::old()->seqnum()), _flushed(0), _committed(0), _pages(), _node(), _stall_result() {} - uint8_t type() const { + ZPageType type() const { return _type; } @@ -95,8 +136,12 @@ class ZPageAllocation : public StackObj { return _flags; } - uint32_t seqnum() const { - return _seqnum; + uint32_t young_seqnum() const { + return _young_seqnum; + } + + uint32_t old_seqnum() const { + return _old_seqnum; } size_t flushed() const { @@ -115,7 +160,7 @@ class ZPageAllocation : public StackObj { _committed = committed; } - ZPageAllocationStall wait() { + bool wait() { return _stall_result.get(); } @@ -123,34 +168,37 @@ class ZPageAllocation : public StackObj { return &_pages; } - void satisfy(ZPageAllocationStall result) { + void satisfy(bool result) { _stall_result.set(result); } + + bool gc_relocation() const { + return _flags.gc_relocation(); + } }; -ZPageAllocator::ZPageAllocator(ZWorkers* workers, - size_t min_capacity, +ZPageAllocator::ZPageAllocator(size_t min_capacity, size_t initial_capacity, + size_t soft_max_capacity, size_t max_capacity) : _lock(), _cache(), _virtual(max_capacity), _physical(max_capacity), _min_capacity(min_capacity), + _initial_capacity(initial_capacity), _max_capacity(max_capacity), _current_max_capacity(max_capacity), _capacity(0), _claimed(0), _used(0), - _used_high(0), - _used_low(0), - _reclaimed(0), + _used_generations{0, 0}, + _collection_stats{{0, 0}, {0, 0}}, _stalled(), - _nstalled(0), - _satisfied(), _unmapper(new ZUnmapper(this)), _uncommitter(new ZUncommitter(this)), - _safe_delete(), + _safe_destroy(), + _safe_recycle(this), _initialized(false) { if (!_virtual.is_initialized() || !_physical.is_initialized()) { @@ -160,6 +208,7 @@ ZPageAllocator::ZPageAllocator(ZWorkers* workers, log_info_p(gc, init)("Min Capacity: " SIZE_FORMAT "M", min_capacity / M); log_info_p(gc, init)("Initial Capacity: " SIZE_FORMAT "M", initial_capacity / M); log_info_p(gc, init)("Max Capacity: " SIZE_FORMAT "M", max_capacity / M); + log_info_p(gc, init)("Soft Max Capacity: " SIZE_FORMAT "M", soft_max_capacity / M); if (ZPageSizeMedium > 0) { log_info_p(gc, init)("Medium Page Size: " SIZE_FORMAT "M", ZPageSizeMedium / M); } else { @@ -173,24 +222,22 @@ ZPageAllocator::ZPageAllocator(ZWorkers* workers, // Check if uncommit should and can be enabled _physical.try_enable_uncommit(min_capacity, max_capacity); - // Pre-map initial capacity - if (!prime_cache(workers, initial_capacity)) { - log_error_p(gc)("Failed to allocate initial Java heap (" SIZE_FORMAT "M)", initial_capacity / M); - return; - } - // Successfully initialized _initialized = true; } +bool ZPageAllocator::is_initialized() const { + return _initialized; +} + class ZPreTouchTask : public ZTask { private: const ZPhysicalMemoryManager* const _physical; - volatile uintptr_t _start; - const uintptr_t _end; + volatile zoffset _start; + const zoffset_end _end; public: - ZPreTouchTask(const ZPhysicalMemoryManager* physical, uintptr_t start, uintptr_t end) : + ZPreTouchTask(const ZPhysicalMemoryManager* physical, zoffset start, zoffset_end end) : ZTask("ZPreTouchTask"), _physical(physical), _start(start), @@ -200,7 +247,7 @@ class ZPreTouchTask : public ZTask { for (;;) { // Get granule offset const size_t size = ZGranuleSize; - const uintptr_t offset = Atomic::fetch_and_add(&_start, size); + const zoffset offset = to_zoffset(Atomic::fetch_then_add((uintptr_t*)&_start, size)); if (offset >= _end) { // Done break; @@ -214,12 +261,11 @@ class ZPreTouchTask : public ZTask { bool ZPageAllocator::prime_cache(ZWorkers* workers, size_t size) { ZAllocationFlags flags; - flags.set_non_blocking(); flags.set_low_address(); - ZPage* const page = alloc_page(ZPageTypeLarge, size, flags); - if (page == NULL) { + ZPage* const page = alloc_page(ZPageType::large, size, flags, ZPageAge::eden); + if (page == nullptr) { return false; } @@ -229,13 +275,13 @@ bool ZPageAllocator::prime_cache(ZWorkers* workers, size_t size) { workers->run_all(&task); } - free_page(page, false /* reclaimed */); + free_page(page); return true; } -bool ZPageAllocator::is_initialized() const { - return _initialized; +size_t ZPageAllocator::initial_capacity() const { + return _initial_capacity; } size_t ZPageAllocator::min_capacity() const { @@ -261,6 +307,10 @@ size_t ZPageAllocator::used() const { return Atomic::load(&_used); } +size_t ZPageAllocator::used_generation(ZGenerationId id) const { + return Atomic::load(&_used_generations[(int)id]); +} + size_t ZPageAllocator::unused() const { const ssize_t capacity = (ssize_t)Atomic::load(&_capacity); const ssize_t used = (ssize_t)Atomic::load(&_used); @@ -269,23 +319,26 @@ size_t ZPageAllocator::unused() const { return unused > 0 ? (size_t)unused : 0; } -ZPageAllocatorStats ZPageAllocator::stats() const { +ZPageAllocatorStats ZPageAllocator::stats(ZGeneration* generation) const { ZLocker locker(&_lock); return ZPageAllocatorStats(_min_capacity, _max_capacity, soft_max_capacity(), _capacity, _used, - _used_high, - _used_low, - _reclaimed); + _collection_stats[(int)generation->id()]._used_high, + _collection_stats[(int)generation->id()]._used_low, + used_generation(generation->id()), + generation->freed(), + generation->promoted(), + generation->compacted(), + _stalled.size()); } -void ZPageAllocator::reset_statistics() { +void ZPageAllocator::reset_statistics(ZGenerationId id) { assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint"); - _reclaimed = 0; - _used_high = _used_low = _used; - _nstalled = 0; + _collection_stats[(int)id]._used_high = _used; + _collection_stats[(int)id]._used_low = _used; } size_t ZPageAllocator::increase_capacity(size_t size) { @@ -322,36 +375,53 @@ void ZPageAllocator::decrease_capacity(size_t size, bool set_max_capacity) { } } -void ZPageAllocator::increase_used(size_t size, bool worker_relocation) { - if (worker_relocation) { - // Allocating a page for the purpose of worker relocation has - // a negative contribution to the number of reclaimed bytes. - _reclaimed -= size; - } +void ZPageAllocator::increase_used(size_t size) { + // We don't track generation usage here because this page + // could be allocated by a thread that satisfies a stalling + // allocation. The stalled thread can wake up and potentially + // realize that the page alloc should be undone. If the alloc + // and the undo gets separated by a safepoint, the generation + // statistics could se a decreasing used value between mark + // start and mark end. // Update atomically since we have concurrent readers const size_t used = Atomic::add(&_used, size); - if (used > _used_high) { - _used_high = used; - } -} -void ZPageAllocator::decrease_used(size_t size, bool reclaimed) { - // Only pages explicitly released with the reclaimed flag set - // counts as reclaimed bytes. This flag is true when we release - // a page after relocation, and is false when we release a page - // to undo an allocation. - if (reclaimed) { - _reclaimed += size; + // Update used high + for (auto& stats : _collection_stats) { + if (used > stats._used_high) { + stats._used_high = used; + } } +} +void ZPageAllocator::decrease_used(size_t size) { // Update atomically since we have concurrent readers const size_t used = Atomic::sub(&_used, size); - if (used < _used_low) { - _used_low = used; + + // Update used low + for (auto& stats : _collection_stats) { + if (used < stats._used_low) { + stats._used_low = used; + } } } +void ZPageAllocator::increase_used_generation(ZGenerationId id, size_t size) { + // Update atomically since we have concurrent readers + Atomic::add(&_used_generations[(int)id], size, memory_order_relaxed); +} + +void ZPageAllocator::decrease_used_generation(ZGenerationId id, size_t size) { + // Update atomically since we have concurrent readers + Atomic::sub(&_used_generations[(int)id], size, memory_order_relaxed); +} + +void ZPageAllocator::promote_used(size_t size) { + decrease_used_generation(ZGenerationId::young, size); + increase_used_generation(ZGenerationId::old, size); +} + bool ZPageAllocator::commit_page(ZPage* page) { // Commit physical memory return _physical.commit(page->physical_memory()); @@ -376,6 +446,11 @@ void ZPageAllocator::unmap_page(const ZPage* page) const { _physical.unmap(page->start(), page->size()); } +void ZPageAllocator::safe_destroy_page(ZPage* page) { + // Destroy page safely + _safe_destroy.schedule_delete(page); +} + void ZPageAllocator::destroy_page(ZPage* page) { // Free virtual memory _virtual.free(page->virtual_memory()); @@ -383,8 +458,8 @@ void ZPageAllocator::destroy_page(ZPage* page) { // Free physical memory _physical.free(page->physical_memory()); - // Delete page safely - _safe_delete(page); + // Destroy page safely + safe_destroy_page(page); } bool ZPageAllocator::is_alloc_allowed(size_t size) const { @@ -392,7 +467,7 @@ bool ZPageAllocator::is_alloc_allowed(size_t size) const { return available >= size; } -bool ZPageAllocator::alloc_page_common_inner(uint8_t type, size_t size, ZList* pages) { +bool ZPageAllocator::alloc_page_common_inner(ZPageType type, size_t size, ZList* pages) { if (!is_alloc_allowed(size)) { // Out of memory return false; @@ -400,7 +475,7 @@ bool ZPageAllocator::alloc_page_common_inner(uint8_t type, size_t size, ZListinsert_last(page); return true; @@ -420,7 +495,7 @@ bool ZPageAllocator::alloc_page_common_inner(uint8_t type, size_t size, ZListtype(); + const ZPageType type = allocation->type(); const size_t size = allocation->size(); const ZAllocationFlags flags = allocation->flags(); ZList* const pages = allocation->pages(); @@ -431,7 +506,7 @@ bool ZPageAllocator::alloc_page_common(ZPageAllocation* allocation) { } // Updated used statistics - increase_used(size, flags.worker_relocation()); + increase_used(size); // Success return true; @@ -446,44 +521,32 @@ static void check_out_of_memory_during_initialization() { bool ZPageAllocator::alloc_page_stall(ZPageAllocation* allocation) { ZStatTimer timer(ZCriticalPhaseAllocationStall); EventZAllocationStall event; - ZPageAllocationStall result; // We can only block if the VM is fully initialized check_out_of_memory_during_initialization(); - // Increment stalled counter - Atomic::inc(&_nstalled); + // Start asynchronous minor GC + const ZDriverRequest request(GCCause::_z_allocation_stall, ZYoungGCThreads, 0); + ZDriver::minor()->collect(request); - do { - // Start asynchronous GC - ZCollectedHeap::heap()->collect(GCCause::_z_allocation_stall); - - // Wait for allocation to complete, fail or request a GC - result = allocation->wait(); - } while (result == ZPageAllocationStallStartGC); + // Wait for allocation to complete or fail + const bool result = allocation->wait(); { - // - // We grab the lock here for two different reasons: - // - // 1) Guard deletion of underlying semaphore. This is a workaround for + // Guard deletion of underlying semaphore. This is a workaround for // a bug in sem_post() in glibc < 2.21, where it's not safe to destroy // the semaphore immediately after returning from sem_wait(). The // reason is that sem_post() can touch the semaphore after a waiting // thread have returned from sem_wait(). To avoid this race we are // forcing the waiting thread to acquire/release the lock held by the // posting thread. https://sourceware.org/bugzilla/show_bug.cgi?id=12674 - // - // 2) Guard the list of satisfied pages. - // ZLocker locker(&_lock); - _satisfied.remove(allocation); } // Send event - event.commit(allocation->type(), allocation->size()); + event.commit((u8)allocation->type(), allocation->size()); - return (result == ZPageAllocationStallSuccess); + return result; } bool ZPageAllocator::alloc_page_or_stall(ZPageAllocation* allocation) { @@ -519,7 +582,7 @@ ZPage* ZPageAllocator::alloc_page_create(ZPageAllocation* allocation) { const ZVirtualMemory vmem = _virtual.alloc(size, allocation->flags().low_address()); if (vmem.is_null()) { log_error(gc)("Out of address space"); - return NULL; + return nullptr; } ZPhysicalMemory pmem; @@ -565,8 +628,8 @@ bool ZPageAllocator::should_defragment(const ZPage* page) const { // if we've split a larger page or we have a constrained address space. To help // fight address space fragmentation we remap such pages to a lower address, if // a lower address is available. - return page->type() == ZPageTypeSmall && - page->start() >= _virtual.reserved() / 2 && + return page->type() == ZPageType::small && + page->start() >= to_zoffset(_virtual.reserved() / 2) && page->start() > _virtual.lowest_available_address(); } @@ -607,9 +670,9 @@ ZPage* ZPageAllocator::alloc_page_finalize(ZPageAllocation* allocation) { // Slow path ZPage* const page = alloc_page_create(allocation); - if (page == NULL) { + if (page == nullptr) { // Out of address space - return NULL; + return nullptr; } // Commit page @@ -625,36 +688,15 @@ ZPage* ZPageAllocator::alloc_page_finalize(ZPageAllocation* allocation) { ZPage* const committed_page = page->split_committed(); destroy_page(page); - if (committed_page != NULL) { + if (committed_page != nullptr) { map_page(committed_page); allocation->pages()->insert_last(committed_page); } - return NULL; + return nullptr; } -void ZPageAllocator::alloc_page_failed(ZPageAllocation* allocation) { - ZLocker locker(&_lock); - - size_t freed = 0; - - // Free any allocated/flushed pages - ZListRemoveIterator iter(allocation->pages()); - for (ZPage* page; iter.next(&page);) { - freed += page->size(); - free_page_inner(page, false /* reclaimed */); - } - - // Adjust capacity and used to reflect the failed capacity increase - const size_t remaining = allocation->size() - freed; - decrease_used(remaining, false /* reclaimed */); - decrease_capacity(remaining, true /* set_max_capacity */); - - // Try satisfy stalled allocations - satisfy_stalled(); -} - -ZPage* ZPageAllocator::alloc_page(uint8_t type, size_t size, ZAllocationFlags flags) { +ZPage* ZPageAllocator::alloc_page(ZPageType type, size_t size, ZAllocationFlags flags, ZPageAge age) { EventZPageAllocation event; retry: @@ -667,34 +709,39 @@ ZPage* ZPageAllocator::alloc_page(uint8_t type, size_t size, ZAllocationFlags fl // block in a safepoint if the non-blocking flag is not set. if (!alloc_page_or_stall(&allocation)) { // Out of memory - return NULL; + return nullptr; } ZPage* const page = alloc_page_finalize(&allocation); - if (page == NULL) { + if (page == nullptr) { // Failed to commit or map. Clean up and retry, in the hope that // we can still allocate by flushing the page cache (more aggressively). - alloc_page_failed(&allocation); + free_pages_alloc_failed(&allocation); goto retry; } + // The generation's used is tracked here when the page is handed out + // to the allocating thread. The overall heap "used" is tracked in + // the lower-level allocation code. + const ZGenerationId id = age == ZPageAge::old ? ZGenerationId::old : ZGenerationId::young; + increase_used_generation(id, size); + // Reset page. This updates the page's sequence number and must // be done after we potentially blocked in a safepoint (stalled) // where the global sequence number was updated. - page->reset(); + page->reset(age, ZPageResetType::Allocation); - // Update allocation statistics. Exclude worker relocations to avoid + // Update allocation statistics. Exclude gc relocations to avoid // artificial inflation of the allocation rate during relocation. - if (!flags.worker_relocation() && is_init_completed()) { + if (!flags.gc_relocation() && is_init_completed()) { // Note that there are two allocation rate counters, which have // different purposes and are sampled at different frequencies. - const size_t bytes = page->size(); - ZStatInc(ZCounterAllocationRate, bytes); - ZStatInc(ZStatAllocRate::counter(), bytes); + ZStatInc(ZCounterMutatorAllocationRate, size); + ZStatMutatorAllocRate::sample_allocation(size); } // Send event - event.commit(type, size, allocation.flushed(), allocation.committed(), + event.commit((u8)type, size, allocation.flushed(), allocation.committed(), page->physical_memory().nsegments(), flags.non_blocking()); return page; @@ -703,7 +750,7 @@ ZPage* ZPageAllocator::alloc_page(uint8_t type, size_t size, ZAllocationFlags fl void ZPageAllocator::satisfy_stalled() { for (;;) { ZPageAllocation* const allocation = _stalled.first(); - if (allocation == NULL) { + if (allocation == nullptr) { // Allocation queue is empty return; } @@ -717,15 +764,11 @@ void ZPageAllocator::satisfy_stalled() { // Note that we must dequeue the allocation request first, since // it will immediately be deallocated once it has been satisfied. _stalled.remove(allocation); - _satisfied.insert_last(allocation); - allocation->satisfy(ZPageAllocationStallSuccess); + allocation->satisfy(true); } } -void ZPageAllocator::free_page_inner(ZPage* page, bool reclaimed) { - // Update used statistics - decrease_used(page->size(), reclaimed); - +void ZPageAllocator::recycle_page(ZPage* page) { // Set time when last used page->set_last_used(); @@ -733,41 +776,96 @@ void ZPageAllocator::free_page_inner(ZPage* page, bool reclaimed) { _cache.free_page(page); } -void ZPageAllocator::free_page(ZPage* page, bool reclaimed) { +void ZPageAllocator::free_page(ZPage* page) { + const ZGenerationId generation_id = page->generation_id(); + ZPage* const to_recycle = _safe_recycle.register_and_clone_if_activated(page); + ZLocker locker(&_lock); + // Update used statistics + const size_t size = to_recycle->size(); + decrease_used(size); + decrease_used_generation(generation_id, size); + // Free page - free_page_inner(page, reclaimed); + recycle_page(to_recycle); // Try satisfy stalled allocations satisfy_stalled(); } -void ZPageAllocator::free_pages(const ZArray* pages, bool reclaimed) { +void ZPageAllocator::free_pages(const ZArray* pages) { + ZArray to_recycle; + + size_t young_size = 0; + size_t old_size = 0; + + ZArrayIterator pages_iter(pages); + for (ZPage* page; pages_iter.next(&page);) { + if (page->is_young()) { + young_size += page->size(); + } else { + old_size += page->size(); + } + to_recycle.push(_safe_recycle.register_and_clone_if_activated(page)); + } + ZLocker locker(&_lock); + // Update used statistics + decrease_used(young_size + old_size); + decrease_used_generation(ZGenerationId::young, young_size); + decrease_used_generation(ZGenerationId::old, old_size); + // Free pages - ZArrayIterator iter(pages); + ZArrayIterator iter(&to_recycle); for (ZPage* page; iter.next(&page);) { - free_page_inner(page, reclaimed); + recycle_page(page); } // Try satisfy stalled allocations satisfy_stalled(); } +void ZPageAllocator::free_pages_alloc_failed(ZPageAllocation* allocation) { + ZArray to_recycle; + + ZListRemoveIterator allocation_pages_iter(allocation->pages()); + for (ZPage* page; allocation_pages_iter.next(&page);) { + to_recycle.push(_safe_recycle.register_and_clone_if_activated(page)); + } + + ZLocker locker(&_lock); + + // Only decrease the overall used and not the generation used, + // since the allocation failed and generation used wasn't bumped. + decrease_used(allocation->size()); + + size_t freed = 0; + + // Free any allocated/flushed pages + ZArrayIterator iter(&to_recycle); + for (ZPage* page; iter.next(&page);) { + freed += page->size(); + recycle_page(page); + } + + // Adjust capacity and used to reflect the failed capacity increase + const size_t remaining = allocation->size() - freed; + decrease_capacity(remaining, true /* set_max_capacity */); + + // Try satisfy stalled allocations + satisfy_stalled(); +} + size_t ZPageAllocator::uncommit(uint64_t* timeout) { // We need to join the suspendible thread set while manipulating capacity and - // used, to make sure GC safepoints will have a consistent view. However, when - // ZVerifyViews is enabled we need to join at a broader scope to also make sure - // we don't change the address good mask after pages have been flushed, and - // thereby made invisible to pages_do(), but before they have been unmapped. - SuspendibleThreadSetJoiner joiner(ZVerifyViews); + // used, to make sure GC safepoints will have a consistent view. ZList pages; size_t flushed; { - SuspendibleThreadSetJoiner joiner(!ZVerifyViews); + SuspendibleThreadSetJoiner sts_joiner; ZLocker locker(&_lock); // Never uncommit below min capacity. We flush out and uncommit chunks at @@ -798,7 +896,7 @@ size_t ZPageAllocator::uncommit(uint64_t* timeout) { } { - SuspendibleThreadSetJoiner joiner(!ZVerifyViews); + SuspendibleThreadSetJoiner sts_joiner; ZLocker locker(&_lock); // Adjust claimed and capacity to reflect the uncommit @@ -809,59 +907,88 @@ size_t ZPageAllocator::uncommit(uint64_t* timeout) { return flushed; } -void ZPageAllocator::enable_deferred_delete() const { - _safe_delete.enable_deferred_delete(); +void ZPageAllocator::enable_safe_destroy() const { + _safe_destroy.enable_deferred_delete(); } -void ZPageAllocator::disable_deferred_delete() const { - _safe_delete.disable_deferred_delete(); +void ZPageAllocator::disable_safe_destroy() const { + _safe_destroy.disable_deferred_delete(); } -void ZPageAllocator::debug_map_page(const ZPage* page) const { - assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint"); - _physical.debug_map(page->start(), page->physical_memory()); +void ZPageAllocator::enable_safe_recycle() const { + _safe_recycle.activate(); } -void ZPageAllocator::debug_unmap_page(const ZPage* page) const { - assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint"); - _physical.debug_unmap(page->start(), page->size()); +void ZPageAllocator::disable_safe_recycle() const { + _safe_recycle.deactivate(); } -void ZPageAllocator::pages_do(ZPageClosure* cl) const { - assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint"); - - ZListIterator iter_satisfied(&_satisfied); - for (ZPageAllocation* allocation; iter_satisfied.next(&allocation);) { - ZListIterator iter_pages(allocation->pages()); - for (ZPage* page; iter_pages.next(&page);) { - cl->do_page(page); - } - } +static bool has_alloc_seen_young(const ZPageAllocation* allocation) { + return allocation->young_seqnum() != ZGeneration::young()->seqnum(); +} - _cache.pages_do(cl); +static bool has_alloc_seen_old(const ZPageAllocation* allocation) { + return allocation->old_seqnum() != ZGeneration::old()->seqnum(); } -bool ZPageAllocator::has_alloc_stalled() const { - return Atomic::load(&_nstalled) != 0; +bool ZPageAllocator::is_alloc_stalling() const { + ZLocker locker(&_lock); + return _stalled.first() != nullptr; } -void ZPageAllocator::check_out_of_memory() { +bool ZPageAllocator::is_alloc_stalling_for_old() const { ZLocker locker(&_lock); - // Fail allocation requests that were enqueued before the - // last GC cycle started, otherwise start a new GC cycle. - for (ZPageAllocation* allocation = _stalled.first(); allocation != NULL; allocation = _stalled.first()) { - if (allocation->seqnum() == ZGlobalSeqNum) { - // Start a new GC cycle, keep allocation requests enqueued - allocation->satisfy(ZPageAllocationStallStartGC); + ZPageAllocation* const allocation = _stalled.first(); + if (allocation == nullptr) { + // No stalled allocations + return false; + } + + return has_alloc_seen_young(allocation) && !has_alloc_seen_old(allocation); +} + +void ZPageAllocator::notify_out_of_memory() { + // Fail allocation requests that were enqueued before the last major GC started + for (ZPageAllocation* allocation = _stalled.first(); allocation != nullptr; allocation = _stalled.first()) { + if (!has_alloc_seen_old(allocation)) { + // Not out of memory, keep remaining allocation requests enqueued return; } - // Out of memory, fail allocation request + // Out of memory, dequeue and fail allocation request _stalled.remove(allocation); - _satisfied.insert_last(allocation); - allocation->satisfy(ZPageAllocationStallFailed); + allocation->satisfy(false); + } +} + +void ZPageAllocator::restart_gc() const { + ZPageAllocation* const allocation = _stalled.first(); + if (allocation == nullptr) { + // No stalled allocations + return; } + + if (!has_alloc_seen_young(allocation)) { + // Start asynchronous minor GC, keep allocation requests enqueued + const ZDriverRequest request(GCCause::_z_allocation_stall, ZYoungGCThreads, 0); + ZDriver::minor()->collect(request); + } else { + // Start asynchronous major GC, keep allocation requests enqueued + const ZDriverRequest request(GCCause::_z_allocation_stall, ZYoungGCThreads, ZOldGCThreads); + ZDriver::major()->collect(request); + } +} + +void ZPageAllocator::handle_alloc_stalling_for_young() { + ZLocker locker(&_lock); + restart_gc(); +} + +void ZPageAllocator::handle_alloc_stalling_for_old() { + ZLocker locker(&_lock); + notify_out_of_memory(); + restart_gc(); } void ZPageAllocator::threads_do(ThreadClosure* tc) const { diff --git a/src/hotspot/share/gc/z/zPageAllocator.hpp b/src/hotspot/share/gc/z/zPageAllocator.hpp index 963259977af30..3aaaeb41f2c75 100644 --- a/src/hotspot/share/gc/z/zPageAllocator.hpp +++ b/src/hotspot/share/gc/z/zPageAllocator.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,18 +28,36 @@ #include "gc/z/zArray.hpp" #include "gc/z/zList.hpp" #include "gc/z/zLock.hpp" +#include "gc/z/zPageAge.hpp" #include "gc/z/zPageCache.hpp" +#include "gc/z/zPageType.hpp" #include "gc/z/zPhysicalMemory.hpp" #include "gc/z/zSafeDelete.hpp" #include "gc/z/zVirtualMemory.hpp" class ThreadClosure; +class ZGeneration; class ZPageAllocation; +class ZPageAllocator; class ZPageAllocatorStats; class ZWorkers; class ZUncommitter; class ZUnmapper; +class ZSafePageRecycle { +private: + ZPageAllocator* _page_allocator; + ZActivatedArray _unsafe_to_recycle; + +public: + ZSafePageRecycle(ZPageAllocator* page_allocator); + + void activate(); + void deactivate(); + + ZPage* register_and_clone_if_activated(ZPage* page); +}; + class ZPageAllocator { friend class VMStructs; friend class ZUnmapper; @@ -51,29 +69,32 @@ class ZPageAllocator { ZVirtualMemoryManager _virtual; ZPhysicalMemoryManager _physical; const size_t _min_capacity; + const size_t _initial_capacity; const size_t _max_capacity; volatile size_t _current_max_capacity; volatile size_t _capacity; volatile size_t _claimed; volatile size_t _used; - size_t _used_high; - size_t _used_low; - ssize_t _reclaimed; + size_t _used_generations[2]; + struct { + size_t _used_high; + size_t _used_low; + } _collection_stats[2]; ZList _stalled; - volatile uint64_t _nstalled; - ZList _satisfied; ZUnmapper* _unmapper; ZUncommitter* _uncommitter; - mutable ZSafeDelete _safe_delete; + mutable ZSafeDelete _safe_destroy; + mutable ZSafePageRecycle _safe_recycle; bool _initialized; - bool prime_cache(ZWorkers* workers, size_t size); - size_t increase_capacity(size_t size); void decrease_capacity(size_t size, bool set_max_capacity); - void increase_used(size_t size, bool relocation); - void decrease_used(size_t size, bool reclaimed); + void increase_used(size_t size); + void decrease_used(size_t size); + + void increase_used_generation(ZGenerationId id, size_t size); + void decrease_used_generation(ZGenerationId id, size_t size); bool commit_page(ZPage* page); void uncommit_page(ZPage* page); @@ -85,7 +106,7 @@ class ZPageAllocator { bool is_alloc_allowed(size_t size) const; - bool alloc_page_common_inner(uint8_t type, size_t size, ZList* pages); + bool alloc_page_common_inner(ZPageType type, size_t size, ZList* pages); bool alloc_page_common(ZPageAllocation* allocation); bool alloc_page_stall(ZPageAllocation* allocation); bool alloc_page_or_stall(ZPageAllocation* allocation); @@ -93,47 +114,56 @@ class ZPageAllocator { bool is_alloc_satisfied(ZPageAllocation* allocation) const; ZPage* alloc_page_create(ZPageAllocation* allocation); ZPage* alloc_page_finalize(ZPageAllocation* allocation); - void alloc_page_failed(ZPageAllocation* allocation); + void free_pages_alloc_failed(ZPageAllocation* allocation); void satisfy_stalled(); - void free_page_inner(ZPage* page, bool reclaimed); - size_t uncommit(uint64_t* timeout); + void notify_out_of_memory(); + void restart_gc() const; + public: - ZPageAllocator(ZWorkers* workers, - size_t min_capacity, + ZPageAllocator(size_t min_capacity, size_t initial_capacity, + size_t soft_max_capacity, size_t max_capacity); bool is_initialized() const; + bool prime_cache(ZWorkers* workers, size_t size); + + size_t initial_capacity() const; size_t min_capacity() const; size_t max_capacity() const; size_t soft_max_capacity() const; size_t capacity() const; size_t used() const; + size_t used_generation(ZGenerationId id) const; size_t unused() const; - ZPageAllocatorStats stats() const; + void promote_used(size_t size); - void reset_statistics(); + ZPageAllocatorStats stats(ZGeneration* generation) const; - ZPage* alloc_page(uint8_t type, size_t size, ZAllocationFlags flags); - void free_page(ZPage* page, bool reclaimed); - void free_pages(const ZArray* pages, bool reclaimed); + void reset_statistics(ZGenerationId id); - void enable_deferred_delete() const; - void disable_deferred_delete() const; + ZPage* alloc_page(ZPageType type, size_t size, ZAllocationFlags flags, ZPageAge age); + void recycle_page(ZPage* page); + void safe_destroy_page(ZPage* page); + void free_page(ZPage* page); + void free_pages(const ZArray* pages); - void debug_map_page(const ZPage* page) const; - void debug_unmap_page(const ZPage* page) const; + void enable_safe_destroy() const; + void disable_safe_destroy() const; - bool has_alloc_stalled() const; - void check_out_of_memory(); + void enable_safe_recycle() const; + void disable_safe_recycle() const; - void pages_do(ZPageClosure* cl) const; + bool is_alloc_stalling() const; + bool is_alloc_stalling_for_old() const; + void handle_alloc_stalling_for_young(); + void handle_alloc_stalling_for_old(); void threads_do(ThreadClosure* tc) const; }; @@ -143,12 +173,15 @@ class ZPageAllocatorStats { size_t _min_capacity; size_t _max_capacity; size_t _soft_max_capacity; - size_t _current_max_capacity; size_t _capacity; size_t _used; size_t _used_high; size_t _used_low; - size_t _reclaimed; + size_t _used_generation; + size_t _freed; + size_t _promoted; + size_t _compacted; + size_t _allocation_stalls; public: ZPageAllocatorStats(size_t min_capacity, @@ -158,7 +191,11 @@ class ZPageAllocatorStats { size_t used, size_t used_high, size_t used_low, - size_t reclaimed); + size_t used_generation, + size_t freed, + size_t promoted, + size_t compacted, + size_t allocation_stalls); size_t min_capacity() const; size_t max_capacity() const; @@ -167,7 +204,11 @@ class ZPageAllocatorStats { size_t used() const; size_t used_high() const; size_t used_low() const; - size_t reclaimed() const; + size_t used_generation() const; + size_t freed() const; + size_t promoted() const; + size_t compacted() const; + size_t allocation_stalls() const; }; #endif // SHARE_GC_Z_ZPAGEALLOCATOR_HPP diff --git a/src/hotspot/share/gc/z/zPageAllocator.inline.hpp b/src/hotspot/share/gc/z/zPageAllocator.inline.hpp index 881765ee2a489..539fca128b630 100644 --- a/src/hotspot/share/gc/z/zPageAllocator.inline.hpp +++ b/src/hotspot/share/gc/z/zPageAllocator.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -33,7 +33,11 @@ inline ZPageAllocatorStats::ZPageAllocatorStats(size_t min_capacity, size_t used, size_t used_high, size_t used_low, - size_t reclaimed) : + size_t used_generation, + size_t freed, + size_t promoted, + size_t compacted, + size_t allocation_stalls) : _min_capacity(min_capacity), _max_capacity(max_capacity), _soft_max_capacity(soft_max_capacity), @@ -41,7 +45,11 @@ inline ZPageAllocatorStats::ZPageAllocatorStats(size_t min_capacity, _used(used), _used_high(used_high), _used_low(used_low), - _reclaimed(reclaimed) {} + _used_generation(used_generation), + _freed(freed), + _promoted(promoted), + _compacted(compacted), + _allocation_stalls(allocation_stalls) {} inline size_t ZPageAllocatorStats::min_capacity() const { return _min_capacity; @@ -71,8 +79,24 @@ inline size_t ZPageAllocatorStats::used_low() const { return _used_low; } -inline size_t ZPageAllocatorStats::reclaimed() const { - return _reclaimed; +inline size_t ZPageAllocatorStats::used_generation() const { + return _used_generation; +} + +inline size_t ZPageAllocatorStats::freed() const { + return _freed; +} + +inline size_t ZPageAllocatorStats::promoted() const { + return _promoted; +} + +inline size_t ZPageAllocatorStats::compacted() const { + return _compacted; +} + +inline size_t ZPageAllocatorStats::allocation_stalls() const { + return _allocation_stalls; } #endif // SHARE_GC_Z_ZPAGEALLOCATOR_INLINE_HPP diff --git a/src/hotspot/share/gc/z/zPageCache.cpp b/src/hotspot/share/gc/z/zPageCache.cpp index ae05b257244cc..04fbb57472855 100644 --- a/src/hotspot/share/gc/z/zPageCache.cpp +++ b/src/hotspot/share/gc/z/zPageCache.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -66,7 +66,7 @@ ZPage* ZPageCache::alloc_small_page() { // Try NUMA local page cache ZPage* const l1_page = _small.get(numa_id).remove_first(); - if (l1_page != NULL) { + if (l1_page != nullptr) { ZStatInc(ZCounterPageCacheHitL1); return l1_page; } @@ -80,7 +80,7 @@ ZPage* ZPageCache::alloc_small_page() { } ZPage* const l2_page = _small.get(remote_numa_id).remove_first(); - if (l2_page != NULL) { + if (l2_page != nullptr) { ZStatInc(ZCounterPageCacheHitL2); return l2_page; } @@ -88,17 +88,17 @@ ZPage* ZPageCache::alloc_small_page() { remote_numa_id++; } - return NULL; + return nullptr; } ZPage* ZPageCache::alloc_medium_page() { ZPage* const page = _medium.remove_first(); - if (page != NULL) { + if (page != nullptr) { ZStatInc(ZCounterPageCacheHitL1); return page; } - return NULL; + return nullptr; } ZPage* ZPageCache::alloc_large_page(size_t size) { @@ -113,7 +113,7 @@ ZPage* ZPageCache::alloc_large_page(size_t size) { } } - return NULL; + return nullptr; } ZPage* ZPageCache::alloc_oversized_medium_page(size_t size) { @@ -121,7 +121,7 @@ ZPage* ZPageCache::alloc_oversized_medium_page(size_t size) { return _medium.remove_first(); } - return NULL; + return nullptr; } ZPage* ZPageCache::alloc_oversized_large_page(size_t size) { @@ -135,38 +135,38 @@ ZPage* ZPageCache::alloc_oversized_large_page(size_t size) { } } - return NULL; + return nullptr; } ZPage* ZPageCache::alloc_oversized_page(size_t size) { ZPage* page = alloc_oversized_large_page(size); - if (page == NULL) { + if (page == nullptr) { page = alloc_oversized_medium_page(size); } - if (page != NULL) { + if (page != nullptr) { ZStatInc(ZCounterPageCacheHitL3); } return page; } -ZPage* ZPageCache::alloc_page(uint8_t type, size_t size) { +ZPage* ZPageCache::alloc_page(ZPageType type, size_t size) { ZPage* page; // Try allocate exact page - if (type == ZPageTypeSmall) { + if (type == ZPageType::small) { page = alloc_small_page(); - } else if (type == ZPageTypeMedium) { + } else if (type == ZPageType::medium) { page = alloc_medium_page(); } else { page = alloc_large_page(size); } - if (page == NULL) { + if (page == nullptr) { // Try allocate potentially oversized page ZPage* const oversized = alloc_oversized_page(size); - if (oversized != NULL) { + if (oversized != nullptr) { if (size < oversized->size()) { // Split oversized page page = oversized->split(type, size); @@ -180,7 +180,7 @@ ZPage* ZPageCache::alloc_page(uint8_t type, size_t size) { } } - if (page == NULL) { + if (page == nullptr) { ZStatInc(ZCounterPageCacheMiss); } @@ -188,10 +188,10 @@ ZPage* ZPageCache::alloc_page(uint8_t type, size_t size) { } void ZPageCache::free_page(ZPage* page) { - const uint8_t type = page->type(); - if (type == ZPageTypeSmall) { + const ZPageType type = page->type(); + if (type == ZPageType::small) { _small.get(page->numa_id()).insert_first(page); - } else if (type == ZPageTypeMedium) { + } else if (type == ZPageType::medium) { _medium.insert_first(page); } else { _large.insert_first(page); @@ -200,7 +200,7 @@ void ZPageCache::free_page(ZPage* page) { bool ZPageCache::flush_list_inner(ZPageCacheFlushClosure* cl, ZList* from, ZList* to) { ZPage* const page = from->last(); - if (page == NULL || !cl->do_page(page)) { + if (page == nullptr || !cl->do_page(page)) { // Don't flush page return false; } @@ -222,7 +222,7 @@ void ZPageCache::flush_per_numa_lists(ZPageCacheFlushClosure* cl, ZPerNUMA* numa_list = from->addr(numa_next); + ZList* const numa_list = from->addr(numa_next); if (++numa_next == numa_count) { numa_next = 0; } @@ -331,26 +331,3 @@ size_t ZPageCache::flush_for_uncommit(size_t requested, ZList* to, uint64 void ZPageCache::set_last_commit() { _last_commit = ceil(os::elapsedTime()); } - -void ZPageCache::pages_do(ZPageClosure* cl) const { - // Small - ZPerNUMAConstIterator > iter_numa(&_small); - for (const ZList* list; iter_numa.next(&list);) { - ZListIterator iter_small(list); - for (ZPage* page; iter_small.next(&page);) { - cl->do_page(page); - } - } - - // Medium - ZListIterator iter_medium(&_medium); - for (ZPage* page; iter_medium.next(&page);) { - cl->do_page(page); - } - - // Large - ZListIterator iter_large(&_large); - for (ZPage* page; iter_large.next(&page);) { - cl->do_page(page); - } -} diff --git a/src/hotspot/share/gc/z/zPageCache.hpp b/src/hotspot/share/gc/z/zPageCache.hpp index b641e0e4be144..b28aaa6c10d40 100644 --- a/src/hotspot/share/gc/z/zPageCache.hpp +++ b/src/hotspot/share/gc/z/zPageCache.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,6 +26,7 @@ #include "gc/z/zList.hpp" #include "gc/z/zPage.hpp" +#include "gc/z/zPageType.hpp" #include "gc/z/zValue.hpp" class ZPageCacheFlushClosure; @@ -53,15 +54,13 @@ class ZPageCache { public: ZPageCache(); - ZPage* alloc_page(uint8_t type, size_t size); + ZPage* alloc_page(ZPageType type, size_t size); void free_page(ZPage* page); void flush_for_allocation(size_t requested, ZList* to); size_t flush_for_uncommit(size_t requested, ZList* to, uint64_t* timeout); void set_last_commit(); - - void pages_do(ZPageClosure* cl) const; }; #endif // SHARE_GC_Z_ZPAGECACHE_HPP diff --git a/src/hotspot/share/gc/z/zPageTable.cpp b/src/hotspot/share/gc/z/zPageTable.cpp index d462421097b7b..299f70eaad621 100644 --- a/src/hotspot/share/gc/z/zPageTable.cpp +++ b/src/hotspot/share/gc/z/zPageTable.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,7 +22,7 @@ */ #include "precompiled.hpp" -#include "gc/z/zGlobals.hpp" +#include "gc/z/zAddress.hpp" #include "gc/z/zGranuleMap.inline.hpp" #include "gc/z/zPage.inline.hpp" #include "gc/z/zPageTable.inline.hpp" @@ -33,21 +33,63 @@ ZPageTable::ZPageTable() : _map(ZAddressOffsetMax) {} void ZPageTable::insert(ZPage* page) { - const uintptr_t offset = page->start(); + const zoffset offset = page->start(); const size_t size = page->size(); // Make sure a newly created page is // visible before updating the page table. OrderAccess::storestore(); - assert(_map.get(offset) == NULL, "Invalid entry"); + assert(_map.get(offset) == nullptr, "Invalid entry"); _map.put(offset, size, page); + + if (page->is_old()) { + ZGeneration::young()->register_with_remset(page); + } } void ZPageTable::remove(ZPage* page) { - const uintptr_t offset = page->start(); + const zoffset offset = page->start(); const size_t size = page->size(); assert(_map.get(offset) == page, "Invalid entry"); - _map.put(offset, size, NULL); + _map.put(offset, size, nullptr); +} + +void ZPageTable::replace(ZPage* old_page, ZPage* new_page) { + const zoffset offset = old_page->start(); + const size_t size = old_page->size(); + + assert(_map.get(offset) == old_page, "Invalid entry"); + _map.release_put(offset, size, new_page); + + if (new_page->is_old()) { + ZGeneration::young()->register_with_remset(new_page); + } +} + +ZGenerationPagesParallelIterator::ZGenerationPagesParallelIterator(const ZPageTable* page_table, ZGenerationId id, ZPageAllocator* page_allocator) : + _iterator(page_table), + _generation_id(id), + _page_allocator(page_allocator) { + _page_allocator->enable_safe_destroy(); + _page_allocator->enable_safe_recycle(); +} + +ZGenerationPagesParallelIterator::~ZGenerationPagesParallelIterator() { + _page_allocator->disable_safe_recycle(); + _page_allocator->disable_safe_destroy(); +} + +ZGenerationPagesIterator::ZGenerationPagesIterator(const ZPageTable* page_table, ZGenerationId id, ZPageAllocator* page_allocator) : + _iterator(page_table), + _generation_id(id), + _page_allocator(page_allocator) { + _page_allocator->enable_safe_destroy(); + _page_allocator->enable_safe_recycle(); +} + +ZGenerationPagesIterator::~ZGenerationPagesIterator() { + _page_allocator->disable_safe_recycle(); + _page_allocator->disable_safe_destroy(); } diff --git a/src/hotspot/share/gc/z/zPageTable.hpp b/src/hotspot/share/gc/z/zPageTable.hpp index fccb61b584399..e809a0ac0ca85 100644 --- a/src/hotspot/share/gc/z/zPageTable.hpp +++ b/src/hotspot/share/gc/z/zPageTable.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,14 +24,20 @@ #ifndef SHARE_GC_Z_ZPAGETABLE_HPP #define SHARE_GC_Z_ZPAGETABLE_HPP +#include "gc/z/zGenerationId.hpp" #include "gc/z/zGranuleMap.hpp" +#include "gc/z/zIndexDistributor.hpp" #include "memory/allocation.hpp" class ZPage; +class ZPageAllocator; +class ZPageTable; class ZPageTable { - friend class VMStructs; friend class ZPageTableIterator; + friend class ZPageTableParallelIterator; + friend class ZRemsetTableIterator; + friend class VMStructs; private: ZGranuleMap _map; @@ -39,21 +45,66 @@ class ZPageTable { public: ZPageTable(); - ZPage* get(uintptr_t addr) const; + ZPage* get(zaddress addr) const; + ZPage* get(volatile zpointer* p) const; + + ZPage* at(size_t index) const; void insert(ZPage* page); void remove(ZPage* page); + void replace(ZPage* old_page, ZPage* new_page); }; class ZPageTableIterator : public StackObj { private: - ZGranuleMapIterator _iter; - ZPage* _prev; + ZGranuleMapIterator _iter; + ZPage* _prev; + +public: + ZPageTableIterator(const ZPageTable* table); + + bool next(ZPage** page); +}; + +class ZPageTableParallelIterator : public StackObj { + const ZPageTable* _table; + ZIndexDistributor _index_distributor; + +public: + ZPageTableParallelIterator(const ZPageTable* table); + + template + void do_pages(Function function); +}; + +class ZGenerationPagesIterator : public StackObj { +private: + ZPageTableIterator _iterator; + ZGenerationId _generation_id; + ZPageAllocator* _page_allocator; public: - ZPageTableIterator(const ZPageTable* page_table); + ZGenerationPagesIterator(const ZPageTable* page_table, ZGenerationId id, ZPageAllocator* page_allocator); + ~ZGenerationPagesIterator(); bool next(ZPage** page); + + template + void yield(Function function); +}; + +class ZGenerationPagesParallelIterator : public StackObj { +private: + ZPageTableParallelIterator _iterator; + ZGenerationId _generation_id; + ZPageAllocator* _page_allocator; + +public: + ZGenerationPagesParallelIterator(const ZPageTable* page_table, ZGenerationId id, ZPageAllocator* page_allocator); + ~ZGenerationPagesParallelIterator(); + + template + void do_pages(Function function); }; #endif // SHARE_GC_Z_ZPAGETABLE_HPP diff --git a/src/hotspot/share/gc/z/zPageTable.inline.hpp b/src/hotspot/share/gc/z/zPageTable.inline.hpp index d068958615d78..16beeda89f200 100644 --- a/src/hotspot/share/gc/z/zPageTable.inline.hpp +++ b/src/hotspot/share/gc/z/zPageTable.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,19 +28,30 @@ #include "gc/z/zAddress.inline.hpp" #include "gc/z/zGranuleMap.inline.hpp" +#include "gc/z/zIndexDistributor.inline.hpp" +#include "gc/z/zPage.inline.hpp" +#include "gc/z/zPageAllocator.inline.hpp" -inline ZPage* ZPageTable::get(uintptr_t addr) const { - assert(!ZAddress::is_null(addr), "Invalid address"); +inline ZPage* ZPageTable::get(zaddress addr) const { + assert(!is_null(addr), "Invalid address"); return _map.get(ZAddress::offset(addr)); } -inline ZPageTableIterator::ZPageTableIterator(const ZPageTable* page_table) : - _iter(&page_table->_map), - _prev(NULL) {} +inline ZPage* ZPageTable::get(volatile zpointer* p) const { + return get(to_zaddress((uintptr_t)p)); +} + +inline ZPage* ZPageTable::at(size_t index) const { + return _map.at(index); +} + +inline ZPageTableIterator::ZPageTableIterator(const ZPageTable* table) : + _iter(&table->_map), + _prev(nullptr) {} inline bool ZPageTableIterator::next(ZPage** page) { for (ZPage* entry; _iter.next(&entry);) { - if (entry != NULL && entry != _prev) { + if (entry != nullptr && entry != _prev) { // Next page found *page = _prev = entry; return true; @@ -51,4 +62,54 @@ inline bool ZPageTableIterator::next(ZPage** page) { return false; } +inline ZPageTableParallelIterator::ZPageTableParallelIterator(const ZPageTable* table) : + _table(table), + _index_distributor(int(ZAddressOffsetMax >> ZGranuleSizeShift)) {} + +template +inline void ZPageTableParallelIterator::do_pages(Function function) { + _index_distributor.do_indices([&](int index) { + ZPage* const page = _table->at(index); + if (page != nullptr) { + const size_t start_index = untype(page->start()) >> ZGranuleSizeShift; + if (size_t(index) == start_index) { + // Next page found + return function(page); + } + } + return true; + }); +} + +inline bool ZGenerationPagesIterator::next(ZPage** page) { + while (_iterator.next(page)) { + if ((*page)->generation_id() == _generation_id) { + return true; + } + } + + return false; +} + +template +inline void ZGenerationPagesIterator::yield(Function function) { + _page_allocator->disable_safe_destroy(); + _page_allocator->disable_safe_recycle(); + + function(); + + _page_allocator->enable_safe_recycle(); + _page_allocator->enable_safe_destroy(); +} + +template +inline void ZGenerationPagesParallelIterator::do_pages(Function function) { + _iterator.do_pages([&](ZPage* page) { + if (page->generation_id() == _generation_id) { + return function(page); + } + return true; + }); +} + #endif // SHARE_GC_Z_ZPAGETABLE_INLINE_HPP diff --git a/src/hotspot/share/gc/z/zPageType.hpp b/src/hotspot/share/gc/z/zPageType.hpp new file mode 100644 index 0000000000000..21b49305fae2b --- /dev/null +++ b/src/hotspot/share/gc/z/zPageType.hpp @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_Z_ZPAGETYPE_HPP +#define SHARE_GC_Z_ZPAGETYPE_HPP + +#include "utilities/globalDefinitions.hpp" + +enum class ZPageType : uint8_t { + small, + medium, + large +}; + +#endif // SHARE_GC_Z_ZPAGETYPE_HPP diff --git a/src/hotspot/share/gc/z/zPhysicalMemory.cpp b/src/hotspot/share/gc/z/zPhysicalMemory.cpp index eb889f4e4f2c3..f5e0a9c0994e2 100644 --- a/src/hotspot/share/gc/z/zPhysicalMemory.cpp +++ b/src/hotspot/share/gc/z/zPhysicalMemory.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -73,11 +73,11 @@ size_t ZPhysicalMemory::size() const { return size; } -void ZPhysicalMemory::insert_segment(int index, uintptr_t start, size_t size, bool committed) { +void ZPhysicalMemory::insert_segment(int index, zoffset start, size_t size, bool committed) { _segments.insert_before(index, ZPhysicalMemorySegment(start, size, committed)); } -void ZPhysicalMemory::replace_segment(int index, uintptr_t start, size_t size, bool committed) { +void ZPhysicalMemory::replace_segment(int index, zoffset start, size_t size, bool committed) { _segments.at_put(index, ZPhysicalMemorySegment(start, size, committed)); } @@ -108,7 +108,7 @@ void ZPhysicalMemory::add_segment(const ZPhysicalMemorySegment& segment) { if (is_mergable(_segments.at(current), segment)) { if (current + 1 < _segments.length() && is_mergable(segment, _segments.at(current + 1))) { // Merge with end of current segment and start of next segment - const size_t start = _segments.at(current).start(); + const zoffset start = _segments.at(current).start(); const size_t size = _segments.at(current).size() + segment.size() + _segments.at(current + 1).size(); replace_segment(current, start, size, segment.is_committed()); remove_segment(current + 1); @@ -116,13 +116,13 @@ void ZPhysicalMemory::add_segment(const ZPhysicalMemorySegment& segment) { } // Merge with end of current segment - const size_t start = _segments.at(current).start(); + const zoffset start = _segments.at(current).start(); const size_t size = _segments.at(current).size() + segment.size(); replace_segment(current, start, size, segment.is_committed()); return; } else if (current + 1 < _segments.length() && is_mergable(segment, _segments.at(current + 1))) { // Merge with start of next segment - const size_t start = segment.start(); + const zoffset start = segment.start(); const size_t size = segment.size() + _segments.at(current + 1).size(); replace_segment(current + 1, start, size, segment.is_committed()); return; @@ -136,7 +136,7 @@ void ZPhysicalMemory::add_segment(const ZPhysicalMemorySegment& segment) { if (_segments.length() > 0 && is_mergable(segment, _segments.at(0))) { // Merge with start of first segment - const size_t start = segment.start(); + const zoffset start = segment.start(); const size_t size = segment.size() + _segments.at(0).size(); replace_segment(0, start, size, segment.is_committed()); return; @@ -234,7 +234,7 @@ ZPhysicalMemory ZPhysicalMemory::split_committed() { ZPhysicalMemoryManager::ZPhysicalMemoryManager(size_t max_capacity) : _backing(max_capacity) { // Make the whole range free - _manager.free(0, max_capacity); + _manager.free(zoffset(0), max_capacity); } bool ZPhysicalMemoryManager::is_initialized() const { @@ -264,7 +264,7 @@ void ZPhysicalMemoryManager::try_enable_uncommit(size_t min_capacity, size_t max // Test if uncommit is supported by the operating system by committing // and then uncommitting a granule. - ZPhysicalMemory pmem(ZPhysicalMemorySegment(0, ZGranuleSize, false /* committed */)); + ZPhysicalMemory pmem(ZPhysicalMemorySegment(zoffset(0), ZGranuleSize, false /* committed */)); if (!commit(pmem) || !uncommit(pmem)) { log_info_p(gc, init)("Uncommit: Implicitly Disabled (Not supported by operating system)"); FLAG_SET_ERGO(ZUncommit, false); @@ -275,17 +275,16 @@ void ZPhysicalMemoryManager::try_enable_uncommit(size_t min_capacity, size_t max log_info_p(gc, init)("Uncommit Delay: " UINTX_FORMAT "s", ZUncommitDelay); } -void ZPhysicalMemoryManager::nmt_commit(uintptr_t offset, size_t size) const { - // From an NMT point of view we treat the first heap view (marked0) as committed - const uintptr_t addr = ZAddress::marked0(offset); - MemTracker::record_virtual_memory_commit((void*)addr, size, CALLER_PC); +void ZPhysicalMemoryManager::nmt_commit(zoffset offset, size_t size) const { + const zaddress addr = ZOffset::address(offset); + MemTracker::record_virtual_memory_commit((void*)untype(addr), size, CALLER_PC); } -void ZPhysicalMemoryManager::nmt_uncommit(uintptr_t offset, size_t size) const { +void ZPhysicalMemoryManager::nmt_uncommit(zoffset offset, size_t size) const { if (MemTracker::enabled()) { - const uintptr_t addr = ZAddress::marked0(offset); + const zaddress addr = ZOffset::address(offset); Tracker tracker(Tracker::uncommit); - tracker.record((address)addr, size); + tracker.record((address)untype(addr), size); } } @@ -295,8 +294,8 @@ void ZPhysicalMemoryManager::alloc(ZPhysicalMemory& pmem, size_t size) { // Allocate segments while (size > 0) { size_t allocated = 0; - const uintptr_t start = _manager.alloc_low_address_at_most(size, &allocated); - assert(start != UINTPTR_MAX, "Allocation should never fail"); + const zoffset start = _manager.alloc_low_address_at_most(size, &allocated); + assert(start != zoffset(UINTPTR_MAX), "Allocation should never fail"); pmem.add_segment(ZPhysicalMemorySegment(start, allocated, false /* committed */)); size -= allocated; } @@ -352,12 +351,12 @@ bool ZPhysicalMemoryManager::uncommit(ZPhysicalMemory& pmem) { return true; } -void ZPhysicalMemoryManager::pretouch_view(uintptr_t addr, size_t size) const { +void ZPhysicalMemoryManager::pretouch_view(zaddress addr, size_t size) const { const size_t page_size = ZLargePages::is_explicit() ? ZGranuleSize : os::vm_page_size(); - os::pretouch_memory((void*)addr, (void*)(addr + size), page_size); + os::pretouch_memory((void*)untype(addr), (void*)(untype(addr) + size), page_size); } -void ZPhysicalMemoryManager::map_view(uintptr_t addr, const ZPhysicalMemory& pmem) const { +void ZPhysicalMemoryManager::map_view(zaddress_unsafe addr, const ZPhysicalMemory& pmem) const { size_t size = 0; // Map segments @@ -376,60 +375,27 @@ void ZPhysicalMemoryManager::map_view(uintptr_t addr, const ZPhysicalMemory& pme } } -void ZPhysicalMemoryManager::unmap_view(uintptr_t addr, size_t size) const { +void ZPhysicalMemoryManager::unmap_view(zaddress_unsafe addr, size_t size) const { _backing.unmap(addr, size); } -void ZPhysicalMemoryManager::pretouch(uintptr_t offset, size_t size) const { - if (ZVerifyViews) { - // Pre-touch good view - pretouch_view(ZAddress::good(offset), size); - } else { - // Pre-touch all views - pretouch_view(ZAddress::marked0(offset), size); - pretouch_view(ZAddress::marked1(offset), size); - pretouch_view(ZAddress::remapped(offset), size); - } +void ZPhysicalMemoryManager::pretouch(zoffset offset, size_t size) const { + // Pre-touch all views + pretouch_view(ZOffset::address(offset), size); } -void ZPhysicalMemoryManager::map(uintptr_t offset, const ZPhysicalMemory& pmem) const { +void ZPhysicalMemoryManager::map(zoffset offset, const ZPhysicalMemory& pmem) const { const size_t size = pmem.size(); - if (ZVerifyViews) { - // Map good view - map_view(ZAddress::good(offset), pmem); - } else { - // Map all views - map_view(ZAddress::marked0(offset), pmem); - map_view(ZAddress::marked1(offset), pmem); - map_view(ZAddress::remapped(offset), pmem); - } + // Map all views + map_view(ZOffset::address_unsafe(offset), pmem); nmt_commit(offset, size); } -void ZPhysicalMemoryManager::unmap(uintptr_t offset, size_t size) const { +void ZPhysicalMemoryManager::unmap(zoffset offset, size_t size) const { nmt_uncommit(offset, size); - if (ZVerifyViews) { - // Unmap good view - unmap_view(ZAddress::good(offset), size); - } else { - // Unmap all views - unmap_view(ZAddress::marked0(offset), size); - unmap_view(ZAddress::marked1(offset), size); - unmap_view(ZAddress::remapped(offset), size); - } -} - -void ZPhysicalMemoryManager::debug_map(uintptr_t offset, const ZPhysicalMemory& pmem) const { - // Map good view - assert(ZVerifyViews, "Should be enabled"); - map_view(ZAddress::good(offset), pmem); -} - -void ZPhysicalMemoryManager::debug_unmap(uintptr_t offset, size_t size) const { - // Unmap good view - assert(ZVerifyViews, "Should be enabled"); - unmap_view(ZAddress::good(offset), size); + // Unmap all views + unmap_view(ZOffset::address_unsafe(offset), size); } diff --git a/src/hotspot/share/gc/z/zPhysicalMemory.hpp b/src/hotspot/share/gc/z/zPhysicalMemory.hpp index 8332eca32e122..0cf77dad7391d 100644 --- a/src/hotspot/share/gc/z/zPhysicalMemory.hpp +++ b/src/hotspot/share/gc/z/zPhysicalMemory.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,6 +24,7 @@ #ifndef SHARE_GC_Z_ZPHYSICALMEMORY_HPP #define SHARE_GC_Z_ZPHYSICALMEMORY_HPP +#include "gc/z/zAddress.hpp" #include "gc/z/zArray.hpp" #include "gc/z/zMemory.hpp" #include "memory/allocation.hpp" @@ -31,16 +32,16 @@ class ZPhysicalMemorySegment : public CHeapObj { private: - uintptr_t _start; - uintptr_t _end; - bool _committed; + zoffset _start; + zoffset _end; + bool _committed; public: ZPhysicalMemorySegment(); - ZPhysicalMemorySegment(uintptr_t start, size_t size, bool committed); + ZPhysicalMemorySegment(zoffset start, size_t size, bool committed); - uintptr_t start() const; - uintptr_t end() const; + zoffset start() const; + zoffset end() const; size_t size() const; bool is_committed() const; @@ -51,8 +52,8 @@ class ZPhysicalMemory { private: ZArray _segments; - void insert_segment(int index, uintptr_t start, size_t size, bool committed); - void replace_segment(int index, uintptr_t start, size_t size, bool committed); + void insert_segment(int index, zoffset start, size_t size, bool committed); + void replace_segment(int index, zoffset start, size_t size, bool committed); void remove_segment(int index); public: @@ -83,12 +84,12 @@ class ZPhysicalMemoryManager { ZPhysicalMemoryBacking _backing; ZMemoryManager _manager; - void nmt_commit(uintptr_t offset, size_t size) const; - void nmt_uncommit(uintptr_t offset, size_t size) const; + void nmt_commit(zoffset offset, size_t size) const; + void nmt_uncommit(zoffset offset, size_t size) const; - void pretouch_view(uintptr_t addr, size_t size) const; - void map_view(uintptr_t addr, const ZPhysicalMemory& pmem) const; - void unmap_view(uintptr_t addr, size_t size) const; + void pretouch_view(zaddress addr, size_t size) const; + void map_view(zaddress_unsafe addr, const ZPhysicalMemory& pmem) const; + void unmap_view(zaddress_unsafe addr, size_t size) const; public: ZPhysicalMemoryManager(size_t max_capacity); @@ -104,13 +105,10 @@ class ZPhysicalMemoryManager { bool commit(ZPhysicalMemory& pmem); bool uncommit(ZPhysicalMemory& pmem); - void pretouch(uintptr_t offset, size_t size) const; + void pretouch(zoffset offset, size_t size) const; - void map(uintptr_t offset, const ZPhysicalMemory& pmem) const; - void unmap(uintptr_t offset, size_t size) const; - - void debug_map(uintptr_t offset, const ZPhysicalMemory& pmem) const; - void debug_unmap(uintptr_t offset, size_t size) const; + void map(zoffset offset, const ZPhysicalMemory& pmem) const; + void unmap(zoffset offset, size_t size) const; }; #endif // SHARE_GC_Z_ZPHYSICALMEMORY_HPP diff --git a/src/hotspot/share/gc/z/zPhysicalMemory.inline.hpp b/src/hotspot/share/gc/z/zPhysicalMemory.inline.hpp index b241137993501..c6c434f57b63b 100644 --- a/src/hotspot/share/gc/z/zPhysicalMemory.inline.hpp +++ b/src/hotspot/share/gc/z/zPhysicalMemory.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,20 +30,20 @@ #include "utilities/debug.hpp" inline ZPhysicalMemorySegment::ZPhysicalMemorySegment() : - _start(UINTPTR_MAX), - _end(UINTPTR_MAX), + _start(zoffset(UINTPTR_MAX)), + _end(zoffset(UINTPTR_MAX)), _committed(false) {} -inline ZPhysicalMemorySegment::ZPhysicalMemorySegment(uintptr_t start, size_t size, bool committed) : +inline ZPhysicalMemorySegment::ZPhysicalMemorySegment(zoffset start, size_t size, bool committed) : _start(start), _end(start + size), _committed(committed) {} -inline uintptr_t ZPhysicalMemorySegment::start() const { +inline zoffset ZPhysicalMemorySegment::start() const { return _start; } -inline uintptr_t ZPhysicalMemorySegment::end() const { +inline zoffset ZPhysicalMemorySegment::end() const { return _end; } diff --git a/src/hotspot/share/gc/z/zReferenceProcessor.cpp b/src/hotspot/share/gc/z/zReferenceProcessor.cpp index 042323c8777b6..6da4cbdf4da63 100644 --- a/src/hotspot/share/gc/z/zReferenceProcessor.cpp +++ b/src/hotspot/share/gc/z/zReferenceProcessor.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,6 +25,9 @@ #include "classfile/javaClasses.inline.hpp" #include "gc/shared/referencePolicy.hpp" #include "gc/shared/referenceProcessorStats.hpp" +#include "gc/shared/suspendibleThreadSet.hpp" +#include "gc/z/zCollectedHeap.hpp" +#include "gc/z/zDriver.hpp" #include "gc/z/zHeap.inline.hpp" #include "gc/z/zReferenceProcessor.hpp" #include "gc/z/zStat.hpp" @@ -32,15 +35,16 @@ #include "gc/z/zTracer.inline.hpp" #include "gc/z/zValue.inline.hpp" #include "memory/universe.hpp" +#include "oops/access.inline.hpp" #include "runtime/atomic.hpp" #include "runtime/mutexLocker.hpp" #include "runtime/os.hpp" -static const ZStatSubPhase ZSubPhaseConcurrentReferencesProcess("Concurrent References Process"); -static const ZStatSubPhase ZSubPhaseConcurrentReferencesEnqueue("Concurrent References Enqueue"); +static const ZStatSubPhase ZSubPhaseConcurrentReferencesProcess("Concurrent References Process", ZGenerationId::old); +static const ZStatSubPhase ZSubPhaseConcurrentReferencesEnqueue("Concurrent References Enqueue", ZGenerationId::old); -static ReferenceType reference_type(oop reference) { - return InstanceKlass::cast(reference->klass())->reference_type(); +static ReferenceType reference_type(zaddress reference) { + return InstanceKlass::cast(to_oop(reference)->klass())->reference_type(); } static const char* reference_type_name(ReferenceType type) { @@ -63,56 +67,58 @@ static const char* reference_type_name(ReferenceType type) { } } -static volatile oop* reference_referent_addr(oop reference) { - return (volatile oop*)java_lang_ref_Reference::referent_addr_raw(reference); +static volatile zpointer* reference_referent_addr(zaddress reference) { + return (volatile zpointer*)java_lang_ref_Reference::referent_addr_raw(to_oop(reference)); } -static oop reference_referent(oop reference) { - return Atomic::load(reference_referent_addr(reference)); +static zpointer reference_referent(zaddress reference) { + return ZBarrier::load_atomic(reference_referent_addr(reference)); } -static void reference_clear_referent(oop reference) { - java_lang_ref_Reference::clear_referent_raw(reference); +static zaddress reference_discovered(zaddress reference) { + return to_zaddress(java_lang_ref_Reference::discovered(to_oop(reference))); } -static oop* reference_discovered_addr(oop reference) { - return (oop*)java_lang_ref_Reference::discovered_addr_raw(reference); +static void reference_set_discovered(zaddress reference, zaddress discovered) { + java_lang_ref_Reference::set_discovered(to_oop(reference), to_oop(discovered)); } -static oop reference_discovered(oop reference) { - return *reference_discovered_addr(reference); +static zaddress reference_next(zaddress reference) { + return to_zaddress(java_lang_ref_Reference::next(to_oop(reference))); } -static void reference_set_discovered(oop reference, oop discovered) { - java_lang_ref_Reference::set_discovered_raw(reference, discovered); -} - -static oop* reference_next_addr(oop reference) { - return (oop*)java_lang_ref_Reference::next_addr_raw(reference); -} - -static oop reference_next(oop reference) { - return *reference_next_addr(reference); -} - -static void reference_set_next(oop reference, oop next) { - java_lang_ref_Reference::set_next_raw(reference, next); +static void reference_set_next(zaddress reference, zaddress next) { + java_lang_ref_Reference::set_next(to_oop(reference), to_oop(next)); } static void soft_reference_update_clock() { + SuspendibleThreadSetJoiner sts_joiner; const jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; java_lang_ref_SoftReference::set_clock(now); } +static void list_append(zaddress& head, zaddress& tail, zaddress reference) { + if (is_null(head)) { + // First append - set up the head + head = reference; + } else { + // Not first append, link tail + reference_set_discovered(tail, reference); + } + + // Always set tail + tail = reference; +} + ZReferenceProcessor::ZReferenceProcessor(ZWorkers* workers) : _workers(workers), - _soft_reference_policy(NULL), + _soft_reference_policy(nullptr), _encountered_count(), _discovered_count(), _enqueued_count(), - _discovered_list(NULL), - _pending_list(NULL), - _pending_list_tail(_pending_list.addr()) {} + _discovered_list(zaddress::null), + _pending_list(zaddress::null), + _pending_list_tail(zaddress::null) {} void ZReferenceProcessor::set_soft_reference_policy(bool clear) { static AlwaysClearPolicy always_clear_policy; @@ -128,23 +134,27 @@ void ZReferenceProcessor::set_soft_reference_policy(bool clear) { _soft_reference_policy->setup(); } -bool ZReferenceProcessor::is_inactive(oop reference, oop referent, ReferenceType type) const { +bool ZReferenceProcessor::is_inactive(zaddress reference, oop referent, ReferenceType type) const { if (type == REF_FINAL) { // A FinalReference is inactive if its next field is non-null. An application can't // call enqueue() or clear() on a FinalReference. - return reference_next(reference) != NULL; + return !is_null(reference_next(reference)); } else { + // Verification + (void)to_zaddress(referent); + // A non-FinalReference is inactive if the referent is null. The referent can only // be null if the application called Reference.enqueue() or Reference.clear(). - return referent == NULL; + return referent == nullptr; } } bool ZReferenceProcessor::is_strongly_live(oop referent) const { - return ZHeap::heap()->is_object_strongly_live(ZOop::to_address(referent)); + const zaddress addr = to_zaddress(referent); + return ZHeap::heap()->is_young(addr) || ZHeap::heap()->is_object_strongly_live(to_zaddress(referent)); } -bool ZReferenceProcessor::is_softly_live(oop reference, ReferenceType type) const { +bool ZReferenceProcessor::is_softly_live(zaddress reference, ReferenceType type) const { if (type != REF_SOFT) { // Not a SoftReference return false; @@ -153,18 +163,22 @@ bool ZReferenceProcessor::is_softly_live(oop reference, ReferenceType type) cons // Ask SoftReference policy const jlong clock = java_lang_ref_SoftReference::clock(); assert(clock != 0, "Clock not initialized"); - assert(_soft_reference_policy != NULL, "Policy not initialized"); - return !_soft_reference_policy->should_clear_reference(reference, clock); + assert(_soft_reference_policy != nullptr, "Policy not initialized"); + return !_soft_reference_policy->should_clear_reference(to_oop(reference), clock); } -bool ZReferenceProcessor::should_discover(oop reference, ReferenceType type) const { - volatile oop* const referent_addr = reference_referent_addr(reference); - const oop referent = ZBarrier::weak_load_barrier_on_oop_field(referent_addr); +bool ZReferenceProcessor::should_discover(zaddress reference, ReferenceType type) const { + volatile zpointer* const referent_addr = reference_referent_addr(reference); + const oop referent = to_oop(ZBarrier::load_barrier_on_oop_field(referent_addr)); if (is_inactive(reference, referent, type)) { return false; } + if (ZHeap::heap()->is_young(reference)) { + return false; + } + if (is_strongly_live(referent)) { return false; } @@ -182,49 +196,43 @@ bool ZReferenceProcessor::should_discover(oop reference, ReferenceType type) con return true; } -bool ZReferenceProcessor::should_drop(oop reference, ReferenceType type) const { - const oop referent = reference_referent(reference); - if (referent == NULL) { - // Reference has been cleared, by a call to Reference.enqueue() - // or Reference.clear() from the application, which means we - // should drop the reference. - return true; - } +bool ZReferenceProcessor::try_make_inactive(zaddress reference, ReferenceType type) const { + const zpointer referent = reference_referent(reference); - // Check if the referent is still alive, in which case we should - // drop the reference. - if (type == REF_PHANTOM) { - return ZBarrier::is_alive_barrier_on_phantom_oop(referent); - } else { - return ZBarrier::is_alive_barrier_on_weak_oop(referent); + if (is_null_any(referent)) { + // Reference has already been cleared, by a call to Reference.enqueue() + // or Reference.clear() from the application, which means it's already + // inactive and we should drop the reference. + return false; } -} -void ZReferenceProcessor::keep_alive(oop reference, ReferenceType type) const { - volatile oop* const p = reference_referent_addr(reference); - if (type == REF_PHANTOM) { - ZBarrier::keep_alive_barrier_on_phantom_oop_field(p); + volatile zpointer* const referent_addr = reference_referent_addr(reference); + + // Cleaning the referent will fail if the object it points to is + // still alive, in which case we should drop the reference. + if (type == REF_SOFT || type == REF_WEAK) { + return ZBarrier::clean_barrier_on_weak_oop_field(referent_addr); + } else if (type == REF_PHANTOM) { + return ZBarrier::clean_barrier_on_phantom_oop_field(referent_addr); + } else if (type == REF_FINAL) { + if (ZBarrier::clean_barrier_on_final_oop_field(referent_addr)) { + // The referent in a FinalReference will not be cleared, instead it is + // made inactive by self-looping the next field. An application can't + // call FinalReference.enqueue(), so there is no race to worry about + // when setting the next field. + assert(is_null(reference_next(reference)), "Already inactive"); + reference_set_next(reference, reference); + return true; + } } else { - ZBarrier::keep_alive_barrier_on_weak_oop_field(p); + fatal("Invalid referent type %d", type); } -} -void ZReferenceProcessor::make_inactive(oop reference, ReferenceType type) const { - if (type == REF_FINAL) { - // Don't clear referent. It is needed by the Finalizer thread to make the call - // to finalize(). A FinalReference is instead made inactive by self-looping the - // next field. An application can't call FinalReference.enqueue(), so there is - // no race to worry about when setting the next field. - assert(reference_next(reference) == NULL, "Already inactive"); - reference_set_next(reference, reference); - } else { - // Clear referent - reference_clear_referent(reference); - } + return false; } -void ZReferenceProcessor::discover(oop reference, ReferenceType type) { - log_trace(gc, ref)("Discovered Reference: " PTR_FORMAT " (%s)", p2i(reference), reference_type_name(type)); +void ZReferenceProcessor::discover(zaddress reference, ReferenceType type) { + log_trace(gc, ref)("Discovered Reference: " PTR_FORMAT " (%s)", untype(reference), reference_type_name(type)); // Update statistics _discovered_count.get()[type]++; @@ -233,24 +241,27 @@ void ZReferenceProcessor::discover(oop reference, ReferenceType type) { // Mark referent (and its reachable subgraph) finalizable. This avoids // the problem of later having to mark those objects if the referent is // still final reachable during processing. - volatile oop* const referent_addr = reference_referent_addr(reference); - ZBarrier::mark_barrier_on_oop_field(referent_addr, true /* finalizable */); + volatile zpointer* const referent_addr = reference_referent_addr(reference); + ZBarrier::mark_barrier_on_old_oop_field(referent_addr, true /* finalizable */); } // Add reference to discovered list - assert(reference_discovered(reference) == NULL, "Already discovered"); - oop* const list = _discovered_list.addr(); + assert(ZHeap::heap()->is_old(reference), "Must be old"); + assert(is_null(reference_discovered(reference)), "Already discovered"); + zaddress* const list = _discovered_list.addr(); reference_set_discovered(reference, *list); *list = reference; } -bool ZReferenceProcessor::discover_reference(oop reference, ReferenceType type) { +bool ZReferenceProcessor::discover_reference(oop reference_obj, ReferenceType type) { if (!RegisterReferences) { // Reference processing disabled return false; } - log_trace(gc, ref)("Encountered Reference: " PTR_FORMAT " (%s)", p2i(reference), reference_type_name(type)); + log_trace(gc, ref)("Encountered Reference: " PTR_FORMAT " (%s)", p2i(reference_obj), reference_type_name(type)); + + const zaddress reference = to_zaddress(reference_obj); // Update statistics _encountered_count.get()[type]++; @@ -266,77 +277,81 @@ bool ZReferenceProcessor::discover_reference(oop reference, ReferenceType type) return true; } -oop ZReferenceProcessor::drop(oop reference, ReferenceType type) { - log_trace(gc, ref)("Dropped Reference: " PTR_FORMAT " (%s)", p2i(reference), reference_type_name(type)); +void ZReferenceProcessor::process_worker_discovered_list(zaddress discovered_list) { + zaddress keep_head = zaddress::null; + zaddress keep_tail = zaddress::null; - // Keep referent alive - keep_alive(reference, type); + // Iterate over the discovered list and unlink them as we go, potentially + // appending them to the keep list + for (zaddress reference = discovered_list; !is_null(reference); ) { + assert(ZHeap::heap()->is_old(reference), "Must be old"); - // Unlink and return next in list - const oop next = reference_discovered(reference); - reference_set_discovered(reference, NULL); - return next; -} + const ReferenceType type = reference_type(reference); + const zaddress next = reference_discovered(reference); + reference_set_discovered(reference, zaddress::null); -oop* ZReferenceProcessor::keep(oop reference, ReferenceType type) { - log_trace(gc, ref)("Enqueued Reference: " PTR_FORMAT " (%s)", p2i(reference), reference_type_name(type)); + if (try_make_inactive(reference, type)) { + // Keep reference + log_trace(gc, ref)("Enqueued Reference: " PTR_FORMAT " (%s)", untype(reference), reference_type_name(type)); - // Update statistics - _enqueued_count.get()[type]++; + // Update statistics + _enqueued_count.get()[type]++; - // Make reference inactive - make_inactive(reference, type); + list_append(keep_head, keep_tail, reference); + } else { + // Drop reference + log_trace(gc, ref)("Dropped Reference: " PTR_FORMAT " (%s)", untype(reference), reference_type_name(type)); + } - // Return next in list - return reference_discovered_addr(reference); -} + reference = next; + SuspendibleThreadSet::yield(); + } -void ZReferenceProcessor::work() { - // Process discovered references - oop* const list = _discovered_list.addr(); - oop* p = list; + // Prepend discovered references to internal pending list - while (*p != NULL) { - const oop reference = *p; - const ReferenceType type = reference_type(reference); + // Anything kept on the list? + if (!is_null(keep_head)) { + const zaddress old_pending_list = Atomic::xchg(_pending_list.addr(), keep_head); + + // Concatenate the old list + reference_set_discovered(keep_tail, old_pending_list); - if (should_drop(reference, type)) { - *p = drop(reference, type); + if (is_null(old_pending_list)) { + // Old list was empty. First to prepend to list, record tail + _pending_list_tail = keep_tail; } else { - p = keep(reference, type); + assert(ZHeap::heap()->is_old(old_pending_list), "Must be old"); } } +} - // Prepend discovered references to internal pending list - if (*list != NULL) { - *p = Atomic::xchg(_pending_list.addr(), *list); - if (*p == NULL) { - // First to prepend to list, record tail - _pending_list_tail = p; - } +void ZReferenceProcessor::work() { + SuspendibleThreadSetJoiner sts_joiner; - // Clear discovered list - *list = NULL; - } -} + ZPerWorkerIterator iter(&_discovered_list); + for (zaddress* start; iter.next(&start);) { + const zaddress discovered_list = Atomic::xchg(start, zaddress::null); -bool ZReferenceProcessor::is_empty() const { - ZPerWorkerConstIterator iter(&_discovered_list); - for (const oop* list; iter.next(&list);) { - if (*list != NULL) { - return false; + if (discovered_list != zaddress::null) { + // Process discovered references + process_worker_discovered_list(discovered_list); } } +} - if (_pending_list.get() != NULL) { - return false; +void ZReferenceProcessor::verify_empty() const { +#ifdef ASSERT + ZPerWorkerConstIterator iter(&_discovered_list); + for (const zaddress* list; iter.next(&list);) { + assert(is_null(*list), "Discovered list not empty"); } - return true; + assert(is_null(_pending_list.get()), "Pending list not empty"); +#endif } void ZReferenceProcessor::reset_statistics() { - assert(is_empty(), "Should be empty"); + verify_empty(); // Reset encountered ZPerWorkerIterator iter_encountered(&_encountered_count); @@ -403,7 +418,7 @@ void ZReferenceProcessor::collect_statistics() { discovered[REF_WEAK], discovered[REF_FINAL], discovered[REF_PHANTOM]); - ZTracer::tracer()->report_gc_reference_stats(stats); + ZDriver::major()->jfr_tracer()->report_gc_reference_stats(stats); } class ZReferenceProcessorTask : public ZTask { @@ -421,7 +436,7 @@ class ZReferenceProcessorTask : public ZTask { }; void ZReferenceProcessor::process_references() { - ZStatTimer timer(ZSubPhaseConcurrentReferencesProcess); + ZStatTimerOld timer(ZSubPhaseConcurrentReferencesProcess); // Process discovered lists ZReferenceProcessorTask task(this); @@ -434,26 +449,61 @@ void ZReferenceProcessor::process_references() { collect_statistics(); } +void ZReferenceProcessor::verify_pending_references() { +#ifdef ASSERT + SuspendibleThreadSetJoiner sts_joiner; + + assert(!is_null(_pending_list.get()), "Should not contain colored null"); + + for (zaddress current = _pending_list.get(); + !is_null(current); + current = reference_discovered(current)) + { + volatile zpointer* const referent_addr = reference_referent_addr(current); + const oop referent = to_oop(ZBarrier::load_barrier_on_oop_field(referent_addr)); + const ReferenceType type = reference_type(current); + assert(ZReferenceProcessor::is_inactive(current, referent, type), "invariant"); + if (type == REF_FINAL) { + assert(ZPointer::is_marked_any_old(ZBarrier::load_atomic(referent_addr)), "invariant"); + } + + SuspendibleThreadSet::yield(); + } +#endif +} + +zaddress ZReferenceProcessor::swap_pending_list(zaddress pending_list) { + const oop pending_list_oop = to_oop(pending_list); + const oop prev = Universe::swap_reference_pending_list(pending_list_oop); + return to_zaddress(prev); +} + void ZReferenceProcessor::enqueue_references() { - ZStatTimer timer(ZSubPhaseConcurrentReferencesEnqueue); + ZStatTimerOld timer(ZSubPhaseConcurrentReferencesEnqueue); - if (_pending_list.get() == NULL) { + if (is_null(_pending_list.get())) { // Nothing to enqueue return; } + // Verify references on internal pending list + verify_pending_references(); + { // Heap_lock protects external pending list MonitorLocker ml(Heap_lock); + SuspendibleThreadSetJoiner sts_joiner; + + const zaddress prev_list = swap_pending_list(_pending_list.get()); - // Prepend internal pending list to external pending list - *_pending_list_tail = Universe::swap_reference_pending_list(_pending_list.get()); + // Link together new and old list + reference_set_discovered(_pending_list_tail, prev_list); // Notify ReferenceHandler thread ml.notify_all(); } // Reset internal pending list - _pending_list.set(NULL); - _pending_list_tail = _pending_list.addr(); + _pending_list.set(zaddress::null); + _pending_list_tail = zaddress::null; } diff --git a/src/hotspot/share/gc/z/zReferenceProcessor.hpp b/src/hotspot/share/gc/z/zReferenceProcessor.hpp index aae9bfeb8a106..d39cc8634cd22 100644 --- a/src/hotspot/share/gc/z/zReferenceProcessor.hpp +++ b/src/hotspot/share/gc/z/zReferenceProcessor.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,8 +25,10 @@ #define SHARE_GC_Z_ZREFERENCEPROCESSOR_HPP #include "gc/shared/referenceDiscoverer.hpp" +#include "gc/z/zAddress.hpp" #include "gc/z/zValue.hpp" +class ConcurrentGCTimer; class ReferencePolicy; class ZWorkers; @@ -42,29 +44,27 @@ class ZReferenceProcessor : public ReferenceDiscoverer { ZPerWorker _encountered_count; ZPerWorker _discovered_count; ZPerWorker _enqueued_count; - ZPerWorker _discovered_list; - ZContended _pending_list; - oop* _pending_list_tail; + ZPerWorker _discovered_list; + ZContended _pending_list; + zaddress _pending_list_tail; - bool is_inactive(oop reference, oop referent, ReferenceType type) const; + bool is_inactive(zaddress reference, oop referent, ReferenceType type) const; bool is_strongly_live(oop referent) const; - bool is_softly_live(oop reference, ReferenceType type) const; + bool is_softly_live(zaddress reference, ReferenceType type) const; - bool should_discover(oop reference, ReferenceType type) const; - bool should_drop(oop reference, ReferenceType type) const; - void keep_alive(oop reference, ReferenceType type) const; - void make_inactive(oop reference, ReferenceType type) const; + bool should_discover(zaddress reference, ReferenceType type) const; + bool try_make_inactive(zaddress reference, ReferenceType type) const; - void discover(oop reference, ReferenceType type); + void discover(zaddress reference, ReferenceType type); - oop drop(oop reference, ReferenceType type); - oop* keep(oop reference, ReferenceType type); - - bool is_empty() const; + void verify_empty() const; + void process_worker_discovered_list(zaddress discovered_list); void work(); void collect_statistics(); + zaddress swap_pending_list(zaddress pending_list); + public: ZReferenceProcessor(ZWorkers* workers); @@ -74,6 +74,8 @@ class ZReferenceProcessor : public ReferenceDiscoverer { virtual bool discover_reference(oop reference, ReferenceType type); void process_references(); void enqueue_references(); + + void verify_pending_references(); }; #endif // SHARE_GC_Z_ZREFERENCEPROCESSOR_HPP diff --git a/src/hotspot/share/gc/z/zRelocate.cpp b/src/hotspot/share/gc/z/zRelocate.cpp index bc5147544a964..0ec474f087310 100644 --- a/src/hotspot/share/gc/z/zRelocate.cpp +++ b/src/hotspot/share/gc/z/zRelocate.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -23,147 +23,435 @@ #include "precompiled.hpp" #include "gc/shared/gc_globals.hpp" +#include "gc/shared/suspendibleThreadSet.hpp" #include "gc/z/zAbort.inline.hpp" #include "gc/z/zAddress.inline.hpp" +#include "gc/z/zAllocator.inline.hpp" #include "gc/z/zBarrier.inline.hpp" +#include "gc/z/zCollectedHeap.hpp" #include "gc/z/zForwarding.inline.hpp" +#include "gc/z/zGeneration.inline.hpp" #include "gc/z/zHeap.inline.hpp" +#include "gc/z/zIndexDistributor.inline.hpp" +#include "gc/z/zIterator.inline.hpp" #include "gc/z/zPage.inline.hpp" +#include "gc/z/zPageAge.hpp" #include "gc/z/zRelocate.hpp" #include "gc/z/zRelocationSet.inline.hpp" +#include "gc/z/zRootsIterator.hpp" +#include "gc/z/zStackWatermark.hpp" #include "gc/z/zStat.hpp" #include "gc/z/zTask.hpp" -#include "gc/z/zThread.inline.hpp" +#include "gc/z/zUncoloredRoot.inline.hpp" +#include "gc/z/zVerify.hpp" #include "gc/z/zWorkers.hpp" #include "prims/jvmtiTagMap.hpp" #include "runtime/atomic.hpp" #include "utilities/debug.hpp" -ZRelocate::ZRelocate(ZWorkers* workers) : - _workers(workers) {} +static const ZStatCriticalPhase ZCriticalPhaseRelocationStall("Relocation Stall"); +static const ZStatSubPhase ZSubPhaseConcurrentRelocateRememberedSetFlipPromotedYoung("Concurrent Relocate Remset FP", ZGenerationId::young); -static uintptr_t forwarding_index(ZForwarding* forwarding, uintptr_t from_addr) { - const uintptr_t from_offset = ZAddress::offset(from_addr); +static uintptr_t forwarding_index(ZForwarding* forwarding, zoffset from_offset) { return (from_offset - forwarding->start()) >> forwarding->object_alignment_shift(); } -static uintptr_t forwarding_find(ZForwarding* forwarding, uintptr_t from_addr, ZForwardingCursor* cursor) { - const uintptr_t from_index = forwarding_index(forwarding, from_addr); +static zaddress forwarding_find(ZForwarding* forwarding, zoffset from_offset, ZForwardingCursor* cursor) { + const uintptr_t from_index = forwarding_index(forwarding, from_offset); const ZForwardingEntry entry = forwarding->find(from_index, cursor); - return entry.populated() ? ZAddress::good(entry.to_offset()) : 0; + return entry.populated() ? ZOffset::address(to_zoffset(entry.to_offset())) : zaddress::null; } -static uintptr_t forwarding_insert(ZForwarding* forwarding, uintptr_t from_addr, uintptr_t to_addr, ZForwardingCursor* cursor) { - const uintptr_t from_index = forwarding_index(forwarding, from_addr); - const uintptr_t to_offset = ZAddress::offset(to_addr); - const uintptr_t to_offset_final = forwarding->insert(from_index, to_offset, cursor); - return ZAddress::good(to_offset_final); +static zaddress forwarding_find(ZForwarding* forwarding, zaddress_unsafe from_addr, ZForwardingCursor* cursor) { + return forwarding_find(forwarding, ZAddress::offset(from_addr), cursor); } -static uintptr_t relocate_object_inner(ZForwarding* forwarding, uintptr_t from_addr, ZForwardingCursor* cursor) { +static zaddress forwarding_find(ZForwarding* forwarding, zaddress from_addr, ZForwardingCursor* cursor) { + return forwarding_find(forwarding, ZAddress::offset(from_addr), cursor); +} + +static zaddress forwarding_insert(ZForwarding* forwarding, zoffset from_offset, zaddress to_addr, ZForwardingCursor* cursor) { + const uintptr_t from_index = forwarding_index(forwarding, from_offset); + const zoffset to_offset = ZAddress::offset(to_addr); + const zoffset to_offset_final = forwarding->insert(from_index, to_offset, cursor); + return ZOffset::address(to_offset_final); +} + +static zaddress forwarding_insert(ZForwarding* forwarding, zaddress from_addr, zaddress to_addr, ZForwardingCursor* cursor) { + return forwarding_insert(forwarding, ZAddress::offset(from_addr), to_addr, cursor); +} + +ZRelocateQueue::ZRelocateQueue() : + _lock(), + _queue(), + _nworkers(0), + _nsynchronized(0), + _synchronize(false), + _needs_attention(0) {} + +bool ZRelocateQueue::needs_attention() const { + return Atomic::load(&_needs_attention) != 0; +} + +void ZRelocateQueue::inc_needs_attention() { + const int needs_attention = Atomic::add(&_needs_attention, 1); + assert(needs_attention == 1 || needs_attention == 2, "Invalid state"); +} + +void ZRelocateQueue::dec_needs_attention() { + const int needs_attention = Atomic::sub(&_needs_attention, 1); + assert(needs_attention == 0 || needs_attention == 1, "Invalid state"); +} + +void ZRelocateQueue::join(uint nworkers) { + assert(nworkers != 0, "Must request at least one worker"); + assert(_nworkers == 0, "Invalid state"); + assert(_nsynchronized == 0, "Invalid state"); + + log_debug(gc, reloc)("Joining workers: %u", nworkers); + + _nworkers = nworkers; +} + +void ZRelocateQueue::resize_workers(uint nworkers) { + assert(nworkers != 0, "Must request at least one worker"); + assert(_nworkers == 0, "Invalid state"); + assert(_nsynchronized == 0, "Invalid state"); + + log_debug(gc, reloc)("Resize workers: %u", nworkers); + + ZLocker locker(&_lock); + _nworkers = nworkers; +} + +void ZRelocateQueue::leave() { + ZLocker locker(&_lock); + _nworkers--; + + assert(_nsynchronized <= _nworkers, "_nsynchronized: %u _nworkers: %u", _nsynchronized, _nworkers); + + log_debug(gc, reloc)("Leaving workers: left: %u _synchronize: %d _nsynchronized: %u", _nworkers, _synchronize, _nsynchronized); + + // Prune done forwardings + const bool forwardings_done = prune(); + + // Check if all workers synchronized + const bool last_synchronized = _synchronize && _nworkers == _nsynchronized; + + if (forwardings_done || last_synchronized) { + _lock.notify_all(); + } +} + +void ZRelocateQueue::add_and_wait(ZForwarding* forwarding) { + ZStatTimer timer(ZCriticalPhaseRelocationStall); + ZLocker locker(&_lock); + + if (forwarding->is_done()) { + return; + } + + _queue.append(forwarding); + if (_queue.length() == 1) { + // Queue became non-empty + inc_needs_attention(); + _lock.notify_all(); + } + + while (!forwarding->is_done()) { + _lock.wait(); + } +} + +bool ZRelocateQueue::prune() { + if (_queue.is_empty()) { + return false; + } + + bool done = false; + + for (int i = 0; i < _queue.length();) { + const ZForwarding* const forwarding = _queue.at(i); + if (forwarding->is_done()) { + done = true; + + _queue.delete_at(i); + } else { + i++; + } + } + + if (_queue.is_empty()) { + dec_needs_attention(); + } + + return done; +} + +ZForwarding* ZRelocateQueue::prune_and_claim() { + if (prune()) { + _lock.notify_all(); + } + + for (int i = 0; i < _queue.length(); i++) { + ZForwarding* const forwarding = _queue.at(i); + if (forwarding->claim()) { + return forwarding; + } + } + + return nullptr; +} + +class ZRelocateQueueSynchronizeThread { +private: + ZRelocateQueue* const _queue; + +public: + ZRelocateQueueSynchronizeThread(ZRelocateQueue* queue) : + _queue(queue) { + _queue->synchronize_thread(); + } + + ~ZRelocateQueueSynchronizeThread() { + _queue->desynchronize_thread(); + } +}; + +void ZRelocateQueue::synchronize_thread() { + _nsynchronized++; + + log_debug(gc, reloc)("Synchronize worker _nsynchronized %u", _nsynchronized); + + assert(_nsynchronized <= _nworkers, "_nsynchronized: %u _nworkers: %u", _nsynchronized, _nworkers); + if (_nsynchronized == _nworkers) { + // All workers synchronized + _lock.notify_all(); + } +} + +void ZRelocateQueue::desynchronize_thread() { + _nsynchronized--; + + log_debug(gc, reloc)("Desynchronize worker _nsynchronized %u", _nsynchronized); + + assert(_nsynchronized < _nworkers, "_nsynchronized: %u _nworkers: %u", _nsynchronized, _nworkers); +} + +ZForwarding* ZRelocateQueue::synchronize_poll() { + // Fast path avoids locking + if (!needs_attention()) { + return nullptr; + } + + // Slow path to get the next forwarding and/or synchronize + ZLocker locker(&_lock); + + { + ZForwarding* const forwarding = prune_and_claim(); + if (forwarding != nullptr) { + // Don't become synchronized while there are elements in the queue + return forwarding; + } + } + + if (!_synchronize) { + return nullptr; + } + + ZRelocateQueueSynchronizeThread rqst(this); + + do { + _lock.wait(); + + ZForwarding* const forwarding = prune_and_claim(); + if (forwarding != nullptr) { + return forwarding; + } + } while (_synchronize); + + return nullptr; +} + +void ZRelocateQueue::clear() { + assert(_nworkers == 0, "Invalid state"); + + if (_queue.is_empty()) { + return; + } + + ZArrayIterator iter(&_queue); + for (ZForwarding* forwarding; iter.next(&forwarding);) { + assert(forwarding->is_done(), "All should be done"); + } + + assert(false, "Clear was not empty"); + + _queue.clear(); + dec_needs_attention(); +} + +void ZRelocateQueue::synchronize() { + ZLocker locker(&_lock); + _synchronize = true; + + inc_needs_attention(); + + log_debug(gc, reloc)("Synchronize all workers 1 _nworkers: %u _nsynchronized: %u", _nworkers, _nsynchronized); + + while (_nworkers != _nsynchronized) { + _lock.wait(); + log_debug(gc, reloc)("Synchronize all workers 2 _nworkers: %u _nsynchronized: %u", _nworkers, _nsynchronized); + } +} + +void ZRelocateQueue::desynchronize() { + ZLocker locker(&_lock); + _synchronize = false; + + log_debug(gc, reloc)("Desynchronize all workers _nworkers: %u _nsynchronized: %u", _nworkers, _nsynchronized); + + assert(_nsynchronized <= _nworkers, "_nsynchronized: %u _nworkers: %u", _nsynchronized, _nworkers); + + dec_needs_attention(); + + _lock.notify_all(); +} + +ZRelocate::ZRelocate(ZGeneration* generation) : + _generation(generation), + _queue() {} + +ZWorkers* ZRelocate::workers() const { + return _generation->workers(); +} + +void ZRelocate::start() { + _queue.join(workers()->active_workers()); +} + +void ZRelocate::add_remset(volatile zpointer* p) { + ZGeneration::young()->remember(p); +} + +static zaddress relocate_object_inner(ZForwarding* forwarding, zaddress from_addr, ZForwardingCursor* cursor) { assert(ZHeap::heap()->is_object_live(from_addr), "Should be live"); // Allocate object const size_t size = ZUtils::object_size(from_addr); - const uintptr_t to_addr = ZHeap::heap()->alloc_object_for_relocation(size); - if (to_addr == 0) { + + ZAllocatorForRelocation* allocator = ZAllocator::relocation(forwarding->to_age()); + + const zaddress to_addr = allocator->alloc_object(size); + + if (is_null(to_addr)) { // Allocation failed - return 0; + return zaddress::null; } // Copy object ZUtils::object_copy_disjoint(from_addr, to_addr, size); // Insert forwarding - const uintptr_t to_addr_final = forwarding_insert(forwarding, from_addr, to_addr, cursor); + const zaddress to_addr_final = forwarding_insert(forwarding, from_addr, to_addr, cursor); + if (to_addr_final != to_addr) { // Already relocated, try undo allocation - ZHeap::heap()->undo_alloc_object_for_relocation(to_addr, size); + allocator->undo_alloc_object(to_addr, size); } return to_addr_final; } -uintptr_t ZRelocate::relocate_object(ZForwarding* forwarding, uintptr_t from_addr) const { +zaddress ZRelocate::relocate_object(ZForwarding* forwarding, zaddress_unsafe from_addr) { ZForwardingCursor cursor; // Lookup forwarding - uintptr_t to_addr = forwarding_find(forwarding, from_addr, &cursor); - if (to_addr != 0) { + zaddress to_addr = forwarding_find(forwarding, from_addr, &cursor); + if (!is_null(to_addr)) { // Already relocated return to_addr; } // Relocate object - if (forwarding->retain_page()) { - to_addr = relocate_object_inner(forwarding, from_addr, &cursor); + if (forwarding->retain_page(&_queue)) { + assert(_generation->is_phase_relocate(), "Must be"); + to_addr = relocate_object_inner(forwarding, safe(from_addr), &cursor); forwarding->release_page(); - if (to_addr != 0) { + if (!is_null(to_addr)) { // Success return to_addr; } - // Failed to relocate object. Wait for a worker thread to complete - // relocation of this page, and then forward the object. If the GC - // aborts the relocation phase before the page has been relocated, - // then wait return false and we just forward the object in-place. - if (!forwarding->wait_page_released()) { - // Forward object in-place - return forwarding_insert(forwarding, from_addr, from_addr, &cursor); - } + // Failed to relocate object. Signal and wait for a worker thread to + // complete relocation of this page, and then forward the object. + _queue.add_and_wait(forwarding); } // Forward object return forward_object(forwarding, from_addr); } -uintptr_t ZRelocate::forward_object(ZForwarding* forwarding, uintptr_t from_addr) const { +zaddress ZRelocate::forward_object(ZForwarding* forwarding, zaddress_unsafe from_addr) { ZForwardingCursor cursor; - const uintptr_t to_addr = forwarding_find(forwarding, from_addr, &cursor); - assert(to_addr != 0, "Should be forwarded"); + const zaddress to_addr = forwarding_find(forwarding, from_addr, &cursor); + assert(!is_null(to_addr), "Should be forwarded: " PTR_FORMAT, untype(from_addr)); return to_addr; } -static ZPage* alloc_page(const ZForwarding* forwarding) { +static ZPage* alloc_page(ZAllocatorForRelocation* allocator, ZPageType type, size_t size) { if (ZStressRelocateInPlace) { // Simulate failure to allocate a new page. This will // cause the page being relocated to be relocated in-place. - return NULL; + return nullptr; } ZAllocationFlags flags; flags.set_non_blocking(); - flags.set_worker_relocation(); - return ZHeap::heap()->alloc_page(forwarding->type(), forwarding->size(), flags); -} + flags.set_gc_relocation(); -static void free_page(ZPage* page) { - ZHeap::heap()->free_page(page, true /* reclaimed */); + return allocator->alloc_page_for_relocation(type, size, flags); } -static bool should_free_target_page(ZPage* page) { +static void retire_target_page(ZGeneration* generation, ZPage* page) { + if (generation->is_young() && page->is_old()) { + generation->increase_promoted(page->used()); + } else { + generation->increase_compacted(page->used()); + } + // Free target page if it is empty. We can end up with an empty target // page if we allocated a new target page, and then lost the race to // relocate the remaining objects, leaving the target page empty when // relocation completed. - return page != NULL && page->top() == page->start(); + if (page->used() == 0) { + ZHeap::heap()->free_page(page); + } } class ZRelocateSmallAllocator { private: - volatile size_t _in_place_count; + ZGeneration* const _generation; + volatile size_t _in_place_count; public: - ZRelocateSmallAllocator() : + ZRelocateSmallAllocator(ZGeneration* generation) : + _generation(generation), _in_place_count(0) {} - ZPage* alloc_target_page(ZForwarding* forwarding, ZPage* target) { - ZPage* const page = alloc_page(forwarding); - if (page == NULL) { + ZPage* alloc_and_retire_target_page(ZForwarding* forwarding, ZPage* target) { + ZAllocatorForRelocation* const allocator = ZAllocator::relocation(forwarding->to_age()); + ZPage* const page = alloc_page(allocator, forwarding->type(), forwarding->size()); + if (page == nullptr) { Atomic::inc(&_in_place_count); } + if (target != nullptr) { + // Retire the old target page + retire_target_page(_generation, target); + } + return page; } @@ -172,20 +460,16 @@ class ZRelocateSmallAllocator { } void free_target_page(ZPage* page) { - if (should_free_target_page(page)) { - free_page(page); + if (page != nullptr) { + retire_target_page(_generation, page); } } - void free_relocated_page(ZPage* page) { - free_page(page); - } - - uintptr_t alloc_object(ZPage* page, size_t size) const { - return (page != NULL) ? page->alloc_object(size) : 0; + zaddress alloc_object(ZPage* page, size_t size) const { + return (page != nullptr) ? page->alloc_object(size) : zaddress::null; } - void undo_alloc_object(ZPage* page, uintptr_t addr, size_t size) const { + void undo_alloc_object(ZPage* page, zaddress addr, size_t size) const { page->undo_alloc_object(addr, size); } @@ -196,25 +480,37 @@ class ZRelocateSmallAllocator { class ZRelocateMediumAllocator { private: - ZConditionLock _lock; - ZPage* _shared; - bool _in_place; - volatile size_t _in_place_count; + ZGeneration* const _generation; + ZConditionLock _lock; + ZPage* _shared[ZAllocator::_relocation_allocators]; + bool _in_place; + volatile size_t _in_place_count; public: - ZRelocateMediumAllocator() : + ZRelocateMediumAllocator(ZGeneration* generation) : + _generation(generation), _lock(), - _shared(NULL), + _shared(), _in_place(false), _in_place_count(0) {} ~ZRelocateMediumAllocator() { - if (should_free_target_page(_shared)) { - free_page(_shared); + for (uint i = 0; i < ZAllocator::_relocation_allocators; ++i) { + if (_shared[i] != nullptr) { + retire_target_page(_generation, _shared[i]); + } } } - ZPage* alloc_target_page(ZForwarding* forwarding, ZPage* target) { + ZPage* shared(ZPageAge age) { + return _shared[static_cast(age) - 1]; + } + + void set_shared(ZPageAge age, ZPage* page) { + _shared[static_cast(age) - 1] = page; + } + + ZPage* alloc_and_retire_target_page(ZForwarding* forwarding, ZPage* target) { ZLocker locker(&_lock); // Wait for any ongoing in-place relocation to complete @@ -226,25 +522,34 @@ class ZRelocateMediumAllocator { // current target page. The shared page will be different from the // current target page if another thread shared a page, or allocated // a new page. - if (_shared == target) { - _shared = alloc_page(forwarding); - if (_shared == NULL) { + const ZPageAge to_age = forwarding->to_age(); + if (shared(to_age) == target) { + ZAllocatorForRelocation* const allocator = ZAllocator::relocation(forwarding->to_age()); + ZPage* const to_page = alloc_page(allocator, forwarding->type(), forwarding->size()); + set_shared(to_age, to_page); + if (to_page == nullptr) { Atomic::inc(&_in_place_count); _in_place = true; } + + // This thread is responsible for retiring the shared target page + if (target != nullptr) { + retire_target_page(_generation, target); + } } - return _shared; + return shared(to_age); } void share_target_page(ZPage* page) { - ZLocker locker(&_lock); + const ZPageAge age = page->age(); + ZLocker locker(&_lock); assert(_in_place, "Invalid state"); - assert(_shared == NULL, "Invalid state"); - assert(page != NULL, "Invalid page"); + assert(shared(age) == nullptr, "Invalid state"); + assert(page != nullptr, "Invalid page"); - _shared = page; + set_shared(age, page); _in_place = false; _lock.notify_all(); @@ -254,15 +559,11 @@ class ZRelocateMediumAllocator { // Does nothing } - void free_relocated_page(ZPage* page) { - free_page(page); - } - - uintptr_t alloc_object(ZPage* page, size_t size) const { - return (page != NULL) ? page->alloc_object_atomic(size) : 0; + zaddress alloc_object(ZPage* page, size_t size) const { + return (page != nullptr) ? page->alloc_object_atomic(size) : zaddress::null; } - void undo_alloc_object(ZPage* page, uintptr_t addr, size_t size) const { + void undo_alloc_object(ZPage* page, zaddress addr, size_t size) const { page->undo_alloc_object_atomic(addr, size); } @@ -272,148 +573,746 @@ class ZRelocateMediumAllocator { }; template -class ZRelocateClosure : public ObjectClosure { +class ZRelocateWork : public StackObj { private: - Allocator* const _allocator; - ZForwarding* _forwarding; - ZPage* _target; + Allocator* const _allocator; + ZForwarding* _forwarding; + ZPage* _target[ZAllocator::_relocation_allocators]; + ZGeneration* const _generation; + size_t _other_promoted; + size_t _other_compacted; + + ZPage* target(ZPageAge age) { + return _target[static_cast(age) - 1]; + } + + void set_target(ZPageAge age, ZPage* page) { + _target[static_cast(age) - 1] = page; + } + + size_t object_alignment() const { + return (size_t)1 << _forwarding->object_alignment_shift(); + } - bool relocate_object(uintptr_t from_addr) const { + void increase_other_forwarded(size_t unaligned_object_size) { + const size_t aligned_size = align_up(unaligned_object_size, object_alignment()); + if (_forwarding->is_promotion()) { + _other_promoted += aligned_size; + } else { + _other_compacted += aligned_size; + } + } + + zaddress try_relocate_object_inner(zaddress from_addr) { ZForwardingCursor cursor; + const size_t size = ZUtils::object_size(from_addr); + ZPage* const to_page = target(_forwarding->to_age()); + // Lookup forwarding - if (forwarding_find(_forwarding, from_addr, &cursor) != 0) { - // Already relocated - return true; + { + const zaddress to_addr = forwarding_find(_forwarding, from_addr, &cursor); + if (!is_null(to_addr)) { + // Already relocated + increase_other_forwarded(size); + return to_addr; + } } // Allocate object - const size_t size = ZUtils::object_size(from_addr); - const uintptr_t to_addr = _allocator->alloc_object(_target, size); - if (to_addr == 0) { + const zaddress allocated_addr = _allocator->alloc_object(to_page, size); + if (is_null(allocated_addr)) { // Allocation failed - return false; + return zaddress::null; } // Copy object. Use conjoint copying if we are relocating - // in-place and the new object overlapps with the old object. - if (_forwarding->in_place() && to_addr + size > from_addr) { - ZUtils::object_copy_conjoint(from_addr, to_addr, size); + // in-place and the new object overlaps with the old object. + if (_forwarding->in_place_relocation() && allocated_addr + size > from_addr) { + ZUtils::object_copy_conjoint(from_addr, allocated_addr, size); } else { - ZUtils::object_copy_disjoint(from_addr, to_addr, size); + ZUtils::object_copy_disjoint(from_addr, allocated_addr, size); } // Insert forwarding - if (forwarding_insert(_forwarding, from_addr, to_addr, &cursor) != to_addr) { + const zaddress to_addr = forwarding_insert(_forwarding, from_addr, allocated_addr, &cursor); + if (to_addr != allocated_addr) { // Already relocated, undo allocation - _allocator->undo_alloc_object(_target, to_addr, size); + _allocator->undo_alloc_object(to_page, to_addr, size); + increase_other_forwarded(size); + } + + return to_addr; + } + + void update_remset_old_to_old(zaddress from_addr, zaddress to_addr) const { + // Old-to-old relocation - move existing remset bits + + // If this is called for an in-place relocated page, then this code has the + // responsibility to clear the old remset bits. Extra care is needed because: + // + // 1) The to-object copy can overlap with the from-object copy + // 2) Remset bits of old objects need to be cleared + // + // A watermark is used to keep track of how far the old remset bits have been removed. + + const bool in_place = _forwarding->in_place_relocation(); + ZPage* const from_page = _forwarding->page(); + const uintptr_t from_local_offset = from_page->local_offset(from_addr); + + // Note: even with in-place relocation, the to_page could be another page + ZPage* const to_page = ZHeap::heap()->page(to_addr); + + // Uses _relaxed version to handle that in-place relocation resets _top + assert(ZHeap::heap()->is_in_page_relaxed(from_page, from_addr), "Must be"); + assert(to_page->is_in(to_addr), "Must be"); + + + // Read the size from the to-object, since the from-object + // could have been overwritten during in-place relocation. + const size_t size = ZUtils::object_size(to_addr); + + // If a young generation collection started while the old generation + // relocated objects, the remember set bits were flipped from "current" + // to "previous". + // + // We need to select the correct remembered sets bitmap to ensure that the + // old remset bits are found. + // + // Note that if the young generation marking (remset scanning) finishes + // before the old generation relocation has relocated this page, then the + // young generation will visit this page's previous remembered set bits and + // moved them over to the current bitmap. + // + // If the young generation runs multiple cycles while the old generation is + // relocating, then the first cycle will have consume the the old remset, + // bits and moved associated objects to a new old page. The old relocation + // could find either the the two bitmaps. So, either it will find the original + // remset bits for the page, or it will find an empty bitmap for the page. It + // doesn't matter for correctness, because the young generation marking has + // already taken care of the bits. + + const bool active_remset_is_current = ZGeneration::old()->active_remset_is_current(); + + // When in-place relocation is done and the old remset bits are located in + // the bitmap that is going to be used for the new remset bits, then we + // need to clear the old bits before the new bits are inserted. + const bool iterate_current_remset = active_remset_is_current && !in_place; + + BitMap::Iterator iter = iterate_current_remset + ? from_page->remset_iterator_limited_current(from_local_offset, size) + : from_page->remset_iterator_limited_previous(from_local_offset, size); + + for (BitMap::idx_t field_bit : iter) { + const uintptr_t field_local_offset = ZRememberedSet::to_offset(field_bit); + + // Add remset entry in the to-page + const uintptr_t offset = field_local_offset - from_local_offset; + const zaddress to_field = to_addr + offset; + log_trace(gc, reloc)("Remember: from: " PTR_FORMAT " to: " PTR_FORMAT " current: %d marking: %d page: " PTR_FORMAT " remset: " PTR_FORMAT, + untype(from_page->start() + field_local_offset), untype(to_field), active_remset_is_current, ZGeneration::young()->is_phase_mark(), p2i(to_page), p2i(to_page->remset_current())); + + volatile zpointer* const p = (volatile zpointer*)to_field; + + if (ZGeneration::young()->is_phase_mark()) { + // Young generation remembered set scanning needs to know about this + // field. It will take responsibility to add a new remember set entry if needed. + _forwarding->relocated_remembered_fields_register(p); + } else { + to_page->remember(p); + if (in_place) { + assert(to_page->is_remembered(p), "p: " PTR_FORMAT, p2i(p)); + } + } + } + } + + static bool add_remset_if_young(volatile zpointer* p, zaddress addr) { + if (ZHeap::heap()->is_young(addr)) { + ZRelocate::add_remset(p); + return true; + } + + return false; + } + + static void update_remset_promoted_filter_and_remap_per_field(volatile zpointer* p) { + const zpointer ptr = Atomic::load(p); + + assert(ZPointer::is_old_load_good(ptr), "Should be at least old load good: " PTR_FORMAT, untype(ptr)); + + if (ZPointer::is_store_good(ptr)) { + // Already has a remset entry + return; + } + + if (ZPointer::is_load_good(ptr)) { + if (!is_null_any(ptr)) { + const zaddress addr = ZPointer::uncolor(ptr); + add_remset_if_young(p, addr); + } + // No need to remap it is already load good + return; + } + + if (is_null_any(ptr)) { + // Eagerly remap to skip adding a remset entry just to get deferred remapping + ZBarrier::remap_young_relocated(p, ptr); + return; + } + + const zaddress_unsafe addr_unsafe = ZPointer::uncolor_unsafe(ptr); + ZForwarding* const forwarding = ZGeneration::young()->forwarding(addr_unsafe); + + if (forwarding == nullptr) { + // Object isn't being relocated + const zaddress addr = safe(addr_unsafe); + if (!add_remset_if_young(p, addr)) { + // Not young - eagerly remap to skip adding a remset entry just to get deferred remapping + ZBarrier::remap_young_relocated(p, ptr); + } + return; + } + + const zaddress addr = forwarding->find(addr_unsafe); + + if (!is_null(addr)) { + // Object has already been relocated + if (!add_remset_if_young(p, addr)) { + // Not young - eagerly remap to skip adding a remset entry just to get deferred remapping + ZBarrier::remap_young_relocated(p, ptr); + } + return; + } + + // Object has not been relocated yet + // Don't want to eagerly relocate objects, so just add a remset + ZRelocate::add_remset(p); + return; + } + + void update_remset_promoted(zaddress to_addr) const { + ZIterator::basic_oop_iterate(to_oop(to_addr), update_remset_promoted_filter_and_remap_per_field); + } + + void update_remset_for_fields(zaddress from_addr, zaddress to_addr) const { + if (_forwarding->to_age() != ZPageAge::old) { + // No remembered set in young pages + return; + } + + // Need to deal with remset when moving objects to the old generation + if (_forwarding->from_age() == ZPageAge::old) { + update_remset_old_to_old(from_addr, to_addr); + return; + } + + // Normal promotion + update_remset_promoted(to_addr); + } + + bool try_relocate_object(zaddress from_addr) { + const zaddress to_addr = try_relocate_object_inner(from_addr); + + if (is_null(to_addr)) { + return false; } + update_remset_for_fields(from_addr, to_addr); + return true; } - virtual void do_object(oop obj) { - const uintptr_t addr = ZOop::to_address(obj); + void start_in_place_relocation_prepare_remset(ZPage* from_page) { + if (_forwarding->from_age() != ZPageAge::old) { + // Only old pages have use remset bits + return; + } + + if (ZGeneration::old()->active_remset_is_current()) { + // We want to iterate over and clear the remset bits of the from-space page, + // and insert current bits in the to-space page. However, with in-place + // relocation, the from-space and to-space pages are the same. Clearing + // is destructive, and is difficult to perform before or during the iteration. + // However, clearing of the current bits has to be done before exposing the + // to-space objects in the forwarding table. + // + // To solve this tricky dependency problem, we start by stashing away the + // current bits in the previous bits, and clearing the current bits + // (implemented by swapping the bits). This way, the current bits are + // cleared before copying the objects (like a normal to-space page), + // and the previous bits are representing a copy of the current bits + // of the from-space page, and are used for iteration. + from_page->swap_remset_bitmaps(); + } + } + + ZPage* start_in_place_relocation(zoffset relocated_watermark) { + _forwarding->in_place_relocation_claim_page(); + _forwarding->in_place_relocation_start(relocated_watermark); + + ZPage* const from_page = _forwarding->page(); + + const ZPageAge to_age = _forwarding->to_age(); + const bool promotion = _forwarding->is_promotion(); + + // Promotions happen through a new cloned page + ZPage* const to_page = promotion ? from_page->clone_limited() : from_page; + to_page->reset(to_age, ZPageResetType::InPlaceRelocation); + + // Clear remset bits for all objects that were relocated + // before this page became an in-place relocated page. + start_in_place_relocation_prepare_remset(from_page); + + if (promotion) { + // Register the the promotion + ZGeneration::young()->in_place_relocate_promote(from_page, to_page); + ZGeneration::young()->register_in_place_relocate_promoted(from_page); + } + + return to_page; + } + + void relocate_object(oop obj) { + const zaddress addr = to_zaddress(obj); assert(ZHeap::heap()->is_object_live(addr), "Should be live"); - while (!relocate_object(addr)) { + while (!try_relocate_object(addr)) { // Allocate a new target page, or if that fails, use the page being // relocated as the new target, which will cause it to be relocated // in-place. - _target = _allocator->alloc_target_page(_forwarding, _target); - if (_target != NULL) { + const ZPageAge to_age = _forwarding->to_age(); + ZPage* to_page = _allocator->alloc_and_retire_target_page(_forwarding, target(to_age)); + set_target(to_age, to_page); + if (to_page != nullptr) { continue; } - // Claim the page being relocated to block other threads from accessing - // it, or its forwarding table, until it has been released (relocation - // completed). - _target = _forwarding->claim_page(); - _target->reset_for_in_place_relocation(); - _forwarding->set_in_place(); + // Start in-place relocation to block other threads from accessing + // the page, or its forwarding table, until it has been released + // (relocation completed). + to_page = start_in_place_relocation(ZAddress::offset(addr)); + set_target(to_age, to_page); } } public: - ZRelocateClosure(Allocator* allocator) : + ZRelocateWork(Allocator* allocator, ZGeneration* generation) : _allocator(allocator), - _forwarding(NULL), - _target(NULL) {} + _forwarding(nullptr), + _target(), + _generation(generation), + _other_promoted(0), + _other_compacted(0) {} + + ~ZRelocateWork() { + for (uint i = 0; i < ZAllocator::_relocation_allocators; ++i) { + _allocator->free_target_page(_target[i]); + } + // Report statistics on-behalf of non-worker threads + _generation->increase_promoted(_other_promoted); + _generation->increase_compacted(_other_compacted); + } - ~ZRelocateClosure() { - _allocator->free_target_page(_target); + bool active_remset_is_current() const { + // Normal old-to-old relocation can treat the from-page remset as a + // read-only copy, and then copy over the appropriate remset bits to the + // cleared to-page's 'current' remset bitmap. + // + // In-place relocation is more complicated. Since, the same page is both + // a from-page and a to-page, we need to remove the old remset bits, and + // add remset bits that corresponds to the new locations of the relocated + // objects. + // + // Depending on how long ago (in terms of number of young GC's and the + // current young GC's phase), the page was allocated, the active + // remembered set will be in either the 'current' or 'previous' bitmap. + // + // If the active bits are in the 'previous' bitmap, we know that the + // 'current' bitmap was cleared at some earlier point in time, and we can + // simply set new bits in 'current' bitmap, and later when relocation has + // read all the old remset bits, we could just clear the 'previous' remset + // bitmap. + // + // If, on the other hand, the active bits are in the 'current' bitmap, then + // that bitmap will be used to both read the old remset bits, and the + // destination for the remset bits that we copy when an object is copied + // to it's new location within the page. We need to *carefully* remove all + // all old remset bits, without clearing out the newly set bits. + return ZGeneration::old()->active_remset_is_current(); } - void do_forwarding(ZForwarding* forwarding) { - _forwarding = forwarding; + void clear_remset_before_reuse(ZPage* page, bool in_place) { + if (_forwarding->from_age() != ZPageAge::old) { + // No remset bits + return; + } + + if (in_place) { + // Clear 'previous' remset bits. For in-place relocated pages, the previous + // remset bits are always used, even when active_remset_is_current(). + page->clear_remset_previous(); - // Check if we should abort - if (ZAbort::should_abort()) { - _forwarding->abort_page(); return; } + // Normal relocate + + // Clear active remset bits + if (active_remset_is_current()) { + page->clear_remset_current(); + } else { + page->clear_remset_previous(); + } + + // Verify that inactive remset bits are all cleared + if (active_remset_is_current()) { + page->verify_remset_cleared_previous(); + } else { + page->verify_remset_cleared_current(); + } + } + + void finish_in_place_relocation() { + // We are done with the from_space copy of the page + _forwarding->in_place_relocation_finish(); + } + + void do_forwarding(ZForwarding* forwarding) { + _forwarding = forwarding; + + _forwarding->page()->log_msg(" (relocate page)"); + + ZVerify::before_relocation(_forwarding); + // Relocate objects - _forwarding->object_iterate(this); + _forwarding->object_iterate([&](oop obj) { relocate_object(obj); }); + + ZVerify::after_relocation(_forwarding); // Verify if (ZVerifyForwarding) { _forwarding->verify(); } + _generation->increase_freed(_forwarding->page()->size()); + + // Deal with in-place relocation + const bool in_place = _forwarding->in_place_relocation(); + if (in_place) { + finish_in_place_relocation(); + } + + // Old from-space pages need to deal with remset bits + if (_forwarding->from_age() == ZPageAge::old) { + _forwarding->relocated_remembered_fields_after_relocate(); + } + // Release relocated page _forwarding->release_page(); - if (_forwarding->in_place()) { - // The relocated page has been relocated in-place and should not - // be freed. Keep it as target page until it is full, and offer to - // share it with other worker threads. - _allocator->share_target_page(_target); + if (in_place) { + // Wait for all other threads to call release_page + ZPage* const page = _forwarding->detach_page(); + + // Ensure that previous remset bits are cleared + clear_remset_before_reuse(page, true /* in_place */); + + page->log_msg(" (relocate page done in-place)"); + + // Different pages when promoting + ZPage* const target_page = target(_forwarding->to_age()); + _allocator->share_target_page(target_page); + } else { - // Detach and free relocated page + // Wait for all other threads to call release_page ZPage* const page = _forwarding->detach_page(); - _allocator->free_relocated_page(page); + + // Ensure that all remset bits are cleared + // Note: cleared after detach_page, when we know that + // the young generation isn't scanning the remset. + clear_remset_before_reuse(page, false /* in_place */); + + page->log_msg(" (relocate page done normal)"); + + // Free page + ZHeap::heap()->free_page(page); } } }; -class ZRelocateTask : public ZTask { +class ZRelocateStoreBufferInstallBasePointersThreadClosure : public ThreadClosure { +public: + virtual void do_thread(Thread* thread) { + JavaThread* const jt = JavaThread::cast(thread); + ZStoreBarrierBuffer* buffer = ZThreadLocalData::store_barrier_buffer(jt); + buffer->install_base_pointers(); + } +}; + +// Installs the object base pointers (object starts), for the fields written +// in the store buffer. The code that searches for the object start uses that +// liveness information stored in the pages. That information is lost when the +// pages have been relocated and then destroyed. +class ZRelocateStoreBufferInstallBasePointersTask : public ZTask { +private: + ZJavaThreadsIterator _threads_iter; + +public: + ZRelocateStoreBufferInstallBasePointersTask(ZGeneration* generation) : + ZTask("ZRelocateStoreBufferInstallBasePointersTask"), + _threads_iter(generation->id_optional()) {} + + virtual void work() { + ZRelocateStoreBufferInstallBasePointersThreadClosure fix_store_buffer_cl; + _threads_iter.apply(&fix_store_buffer_cl); + } +}; + +class ZRelocateTask : public ZRestartableTask { private: ZRelocationSetParallelIterator _iter; + ZGeneration* const _generation; + ZRelocateQueue* const _queue; ZRelocateSmallAllocator _small_allocator; ZRelocateMediumAllocator _medium_allocator; - static bool is_small(ZForwarding* forwarding) { - return forwarding->type() == ZPageTypeSmall; - } - public: - ZRelocateTask(ZRelocationSet* relocation_set) : - ZTask("ZRelocateTask"), + ZRelocateTask(ZRelocationSet* relocation_set, ZRelocateQueue* queue) : + ZRestartableTask("ZRelocateTask"), _iter(relocation_set), - _small_allocator(), - _medium_allocator() {} + _generation(relocation_set->generation()), + _queue(queue), + _small_allocator(_generation), + _medium_allocator(_generation) {} ~ZRelocateTask() { - ZStatRelocation::set_at_relocate_end(_small_allocator.in_place_count(), - _medium_allocator.in_place_count()); + _generation->stat_relocation()->at_relocate_end(_small_allocator.in_place_count(), _medium_allocator.in_place_count()); } virtual void work() { - ZRelocateClosure small(&_small_allocator); - ZRelocateClosure medium(&_medium_allocator); + ZRelocateWork small(&_small_allocator, _generation); + ZRelocateWork medium(&_medium_allocator, _generation); - for (ZForwarding* forwarding; _iter.next(&forwarding);) { - if (is_small(forwarding)) { + const auto do_forwarding = [&](ZForwarding* forwarding) { + ZPage* const page = forwarding->page(); + if (page->is_small()) { small.do_forwarding(forwarding); } else { medium.do_forwarding(forwarding); } + + // Absolute last thing done while relocating a page. + // + // We don't use the SuspendibleThreadSet when relocating pages. + // Instead the ZRelocateQueue is used as a pseudo STS joiner/leaver. + // + // After the mark_done call a safepointing could be completed and a + // new GC phase could be entered. + forwarding->mark_done(); + }; + + const auto claim_and_do_forwarding = [&](ZForwarding* forwarding) { + if (forwarding->claim()) { + do_forwarding(forwarding); + } + }; + + const auto do_forwarding_one_from_iter = [&]() { + ZForwarding* forwarding; + + if (_iter.next(&forwarding)) { + claim_and_do_forwarding(forwarding); + return true; + } + + return false; + }; + + for (;;) { + // As long as there are requests in the relocate queue, there are threads + // waiting in a VM state that does not allow them to be blocked. The + // worker thread needs to finish relocate these pages, and allow the + // other threads to continue and proceed to a blocking state. After that, + // the worker threads are allowed to safepoint synchronize. + for (ZForwarding* forwarding; (forwarding = _queue->synchronize_poll()) != nullptr;) { + do_forwarding(forwarding); + } + + if (!do_forwarding_one_from_iter()) { + // No more work + break; + } + + if (_generation->should_worker_resize()) { + break; + } + } + + _queue->leave(); + } + + virtual void resize_workers(uint nworkers) { + _queue->resize_workers(nworkers); + } +}; + +static void remap_and_maybe_add_remset(volatile zpointer* p) { + const zpointer ptr = Atomic::load(p); + + if (ZPointer::is_store_good(ptr)) { + // Already has a remset entry + return; + } + + // Remset entries are used for two reasons: + // 1) Young marking old-to-young pointer roots + // 2) Deferred remapping of stale old-to-young pointers + // + // This load barrier will up-front perform the remapping of (2), + // and the code below only has to make sure we register up-to-date + // old-to-young pointers for (1). + const zaddress addr = ZBarrier::load_barrier_on_oop_field_preloaded(p, ptr); + + if (is_null(addr)) { + // No need for remset entries for null pointers + return; + } + + if (ZHeap::heap()->is_old(addr)) { + // No need for remset entries for pointers to old gen + return; + } + + ZRelocate::add_remset(p); +} + +class ZRelocateAddRemsetForFlipPromoted : public ZRestartableTask { +private: + ZStatTimerYoung _timer; + ZArrayParallelIterator _iter; + +public: + ZRelocateAddRemsetForFlipPromoted(ZArray* pages) : + ZRestartableTask("ZRelocateAddRemsetForFlipPromoted"), + _timer(ZSubPhaseConcurrentRelocateRememberedSetFlipPromotedYoung), + _iter(pages) {} + + virtual void work() { + SuspendibleThreadSetJoiner sts_joiner; + + for (ZPage* page; _iter.next(&page);) { + page->object_iterate([&](oop obj) { + ZIterator::basic_oop_iterate_safe(obj, remap_and_maybe_add_remset); + }); + + SuspendibleThreadSet::yield(); + if (ZGeneration::young()->should_worker_resize()) { + return; + } } } }; void ZRelocate::relocate(ZRelocationSet* relocation_set) { - ZRelocateTask task(relocation_set); - _workers->run(&task); + { + // Install the store buffer's base pointers before the + // relocate task destroys the liveness information in + // the relocated pages. + ZRelocateStoreBufferInstallBasePointersTask buffer_task(_generation); + workers()->run(&buffer_task); + } + + { + ZRelocateTask relocate_task(relocation_set, &_queue); + workers()->run(&relocate_task); + } + + if (relocation_set->generation()->is_young()) { + ZRelocateAddRemsetForFlipPromoted task(relocation_set->flip_promoted_pages()); + workers()->run(&task); + } + + _queue.clear(); +} + +ZPageAge ZRelocate::compute_to_age(ZPageAge from_age) { + if (from_age == ZPageAge::old) { + return ZPageAge::old; + } + + const uint age = static_cast(from_age); + if (age >= ZGeneration::young()->tenuring_threshold()) { + return ZPageAge::old; + } + + return static_cast(age + 1); +} + +class ZFlipAgePagesTask : public ZTask { +private: + ZArrayParallelIterator _iter; + +public: + ZFlipAgePagesTask(const ZArray* pages) : + ZTask("ZPromotePagesTask"), + _iter(pages) {} + + virtual void work() { + SuspendibleThreadSetJoiner sts_joiner; + ZArray promoted_pages; + + for (ZPage* prev_page; _iter.next(&prev_page);) { + const ZPageAge from_age = prev_page->age(); + const ZPageAge to_age = ZRelocate::compute_to_age(from_age); + assert(from_age != ZPageAge::old, "invalid age for a young collection"); + + // Figure out if this is proper promotion + const bool promotion = to_age == ZPageAge::old; + + if (promotion) { + // Before promoting an object (and before relocate start), we must ensure that all + // contained zpointers are store good. The marking code ensures that for non-null + // pointers, but null pointers are ignored. This code ensures that even null pointers + // are made store good, for the promoted objects. + prev_page->object_iterate([&](oop obj) { + ZIterator::basic_oop_iterate_safe(obj, ZBarrier::promote_barrier_on_young_oop_field); + }); + } + + // Logging + prev_page->log_msg(promotion ? " (flip promoted)" : " (flip survived)"); + + // Setup to-space page + ZPage* const new_page = promotion ? prev_page->clone_limited_promote_flipped() : prev_page; + new_page->reset(to_age, ZPageResetType::FlipAging); + + if (promotion) { + ZGeneration::young()->flip_promote(prev_page, new_page); + // Defer promoted page registration times the lock is taken + promoted_pages.push(prev_page); + } + + SuspendibleThreadSet::yield(); + } + + ZGeneration::young()->register_flip_promoted(promoted_pages); + } +}; + +void ZRelocate::flip_age_pages(const ZArray* pages) { + ZFlipAgePagesTask flip_age_task(pages); + workers()->run(&flip_age_task); +} + +void ZRelocate::synchronize() { + _queue.synchronize(); +} + +void ZRelocate::desynchronize() { + _queue.desynchronize(); +} + +ZRelocateQueue* ZRelocate::queue() { + return &_queue; } diff --git a/src/hotspot/share/gc/z/zRelocate.hpp b/src/hotspot/share/gc/z/zRelocate.hpp index 647a640970a67..ed54103d53c18 100644 --- a/src/hotspot/share/gc/z/zRelocate.hpp +++ b/src/hotspot/share/gc/z/zRelocate.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,26 +24,81 @@ #ifndef SHARE_GC_Z_ZRELOCATE_HPP #define SHARE_GC_Z_ZRELOCATE_HPP +#include "gc/z/zAddress.hpp" +#include "gc/z/zPageAge.hpp" #include "gc/z/zRelocationSet.hpp" class ZForwarding; +class ZGeneration; class ZWorkers; +typedef size_t ZForwardingCursor; + +class ZRelocateQueue { +private: + ZConditionLock _lock; + ZArray _queue; + uint _nworkers; + uint _nsynchronized; + bool _synchronize; + volatile int _needs_attention; + + bool needs_attention() const; + void inc_needs_attention(); + void dec_needs_attention(); + + bool prune(); + ZForwarding* prune_and_claim(); + +public: + ZRelocateQueue(); + + void join(uint nworkers); + void resize_workers(uint nworkers); + void leave(); + + void add_and_wait(ZForwarding* forwarding); + + ZForwarding* synchronize_poll(); + void synchronize_thread(); + void desynchronize_thread(); + + void clear(); + + void synchronize(); + void desynchronize(); +}; + class ZRelocate { friend class ZRelocateTask; private: - ZWorkers* const _workers; + ZGeneration* const _generation; + ZRelocateQueue _queue; + ZWorkers* workers() const; void work(ZRelocationSetParallelIterator* iter); public: - ZRelocate(ZWorkers* workers); + ZRelocate(ZGeneration* generation); + + void start(); + + static void add_remset(volatile zpointer* p); - uintptr_t relocate_object(ZForwarding* forwarding, uintptr_t from_addr) const; - uintptr_t forward_object(ZForwarding* forwarding, uintptr_t from_addr) const; + static ZPageAge compute_to_age(ZPageAge from_age); + + zaddress relocate_object(ZForwarding* forwarding, zaddress_unsafe from_addr); + zaddress forward_object(ZForwarding* forwarding, zaddress_unsafe from_addr); void relocate(ZRelocationSet* relocation_set); + + void flip_age_pages(const ZArray* pages); + + void synchronize(); + void desynchronize(); + + ZRelocateQueue* queue(); }; #endif // SHARE_GC_Z_ZRELOCATE_HPP diff --git a/src/hotspot/share/gc/z/zRelocationSet.cpp b/src/hotspot/share/gc/z/zRelocationSet.cpp index 6577e11009c27..7fab802a8777e 100644 --- a/src/hotspot/share/gc/z/zRelocationSet.cpp +++ b/src/hotspot/share/gc/z/zRelocationSet.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -23,8 +23,12 @@ #include "precompiled.hpp" #include "gc/z/zArray.inline.hpp" +#include "gc/z/zCollectedHeap.hpp" #include "gc/z/zForwarding.inline.hpp" #include "gc/z/zForwardingAllocator.inline.hpp" +#include "gc/z/zGeneration.inline.hpp" +#include "gc/z/zPage.inline.hpp" +#include "gc/z/zPageAllocator.hpp" #include "gc/z/zRelocationSet.inline.hpp" #include "gc/z/zRelocationSetSelector.inline.hpp" #include "gc/z/zStat.hpp" @@ -38,35 +42,53 @@ class ZRelocationSetInstallTask : public ZTask { ZForwardingAllocator* const _allocator; ZForwarding** _forwardings; const size_t _nforwardings; + const ZArray* _small; + const ZArray* _medium; ZArrayParallelIterator _small_iter; ZArrayParallelIterator _medium_iter; - volatile size_t _small_next; - volatile size_t _medium_next; - void install(ZForwarding* forwarding, volatile size_t* next) { - const size_t index = Atomic::fetch_and_add(next, 1u); + void install(ZForwarding* forwarding, size_t index) { assert(index < _nforwardings, "Invalid index"); + + ZPage* const page = forwarding->page(); + + page->log_msg(" (relocation selected)"); + _forwardings[index] = forwarding; + + if (forwarding->is_promotion()) { + // Before promoting an object (and before relocate start), we must ensure that all + // contained zpointers are store good. The marking code ensures that for non-null + // pointers, but null pointers are ignored. This code ensures that even null pointers + // are made store good, for the promoted objects. + page->object_iterate([&](oop obj) { + ZIterator::basic_oop_iterate_safe(obj, ZBarrier::promote_barrier_on_young_oop_field); + }); + } } - void install_small(ZForwarding* forwarding) { - install(forwarding, &_small_next); + void install_small(ZForwarding* forwarding, size_t index) { + install(forwarding, index); } - void install_medium(ZForwarding* forwarding) { - install(forwarding, &_medium_next); + void install_medium(ZForwarding* forwarding, size_t index) { + install(forwarding, index); + } + + ZPageAge to_age(ZPage* page) { + return ZRelocate::compute_to_age(page->age()); } public: ZRelocationSetInstallTask(ZForwardingAllocator* allocator, const ZRelocationSetSelector* selector) : ZTask("ZRelocationSetInstallTask"), _allocator(allocator), - _forwardings(NULL), - _nforwardings(selector->small()->length() + selector->medium()->length()), - _small_iter(selector->small()), - _medium_iter(selector->medium()), - _small_next(selector->medium()->length()), - _medium_next(0) { + _forwardings(nullptr), + _nforwardings(selector->selected_small()->length() + selector->selected_medium()->length()), + _small(selector->selected_small()), + _medium(selector->selected_medium()), + _small_iter(selector->selected_small()), + _medium_iter(selector->selected_medium()) { // Reset the allocator to have room for the relocation // set, all forwardings, and all forwarding entries. @@ -85,15 +107,17 @@ class ZRelocationSetInstallTask : public ZTask { virtual void work() { // Allocate and install forwardings for small pages - for (ZPage* page; _small_iter.next(&page);) { - ZForwarding* const forwarding = ZForwarding::alloc(_allocator, page); - install_small(forwarding); + for (size_t page_index; _small_iter.next_index(&page_index);) { + ZPage* page = _small->at(int(page_index)); + ZForwarding* const forwarding = ZForwarding::alloc(_allocator, page, to_age(page)); + install_small(forwarding, _medium->length() + page_index); } // Allocate and install forwardings for medium pages - for (ZPage* page; _medium_iter.next(&page);) { - ZForwarding* const forwarding = ZForwarding::alloc(_allocator, page); - install_medium(forwarding); + for (size_t page_index; _medium_iter.next_index(&page_index);) { + ZPage* page = _medium->at(int(page_index)); + ZForwarding* const forwarding = ZForwarding::alloc(_allocator, page, to_age(page)); + install_medium(forwarding, page_index); } } @@ -106,25 +130,49 @@ class ZRelocationSetInstallTask : public ZTask { } }; -ZRelocationSet::ZRelocationSet(ZWorkers* workers) : - _workers(workers), +ZRelocationSet::ZRelocationSet(ZGeneration* generation) : + _generation(generation), _allocator(), - _forwardings(NULL), - _nforwardings(0) {} + _forwardings(nullptr), + _nforwardings(0), + _promotion_lock(), + _flip_promoted_pages(), + _in_place_relocate_promoted_pages() {} + +ZWorkers* ZRelocationSet::workers() const { + return _generation->workers(); +} + +ZGeneration* ZRelocationSet::generation() const { + return _generation; +} + +ZArray* ZRelocationSet::flip_promoted_pages() { + return &_flip_promoted_pages; +} void ZRelocationSet::install(const ZRelocationSetSelector* selector) { // Install relocation set ZRelocationSetInstallTask task(&_allocator, selector); - _workers->run(&task); + workers()->run(&task); _forwardings = task.forwardings(); _nforwardings = task.nforwardings(); // Update statistics - ZStatRelocation::set_at_install_relocation_set(_allocator.size()); + _generation->stat_relocation()->at_install_relocation_set(_allocator.size()); } -void ZRelocationSet::reset() { +static void destroy_and_clear(ZPageAllocator* page_allocator, ZArray* array) { + for (int i = 0; i < array->length(); i++) { + // Delete non-relocating promoted pages from last cycle + ZPage* const page = array->at(i); + page_allocator->safe_destroy_page(page); + } + + array->clear(); +} +void ZRelocationSet::reset(ZPageAllocator* page_allocator) { // Destroy forwardings ZRelocationSetIterator iter(this); for (ZForwarding* forwarding; iter.next(&forwarding);) { @@ -132,4 +180,21 @@ void ZRelocationSet::reset() { } _nforwardings = 0; + + destroy_and_clear(page_allocator, &_in_place_relocate_promoted_pages); + destroy_and_clear(page_allocator, &_flip_promoted_pages); +} + +void ZRelocationSet::register_flip_promoted(const ZArray& pages) { + ZLocker locker(&_promotion_lock); + for (ZPage* const page : pages) { + assert(!_flip_promoted_pages.contains(page), "no duplicates allowed"); + _flip_promoted_pages.append(page); + } +} + +void ZRelocationSet::register_in_place_relocate_promoted(ZPage* page) { + ZLocker locker(&_promotion_lock); + assert(!_in_place_relocate_promoted_pages.contains(page), "no duplicates allowed"); + _in_place_relocate_promoted_pages.append(page); } diff --git a/src/hotspot/share/gc/z/zRelocationSet.hpp b/src/hotspot/share/gc/z/zRelocationSet.hpp index 5881055db99df..2052f3c7bf1f5 100644 --- a/src/hotspot/share/gc/z/zRelocationSet.hpp +++ b/src/hotspot/share/gc/z/zRelocationSet.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,8 +26,12 @@ #include "gc/z/zArray.hpp" #include "gc/z/zForwardingAllocator.hpp" +#include "gc/z/zLock.hpp" class ZForwarding; +class ZGeneration; +class ZPage; +class ZPageAllocator; class ZRelocationSetSelector; class ZWorkers; @@ -35,16 +39,26 @@ class ZRelocationSet { template friend class ZRelocationSetIteratorImpl; private: - ZWorkers* _workers; + ZGeneration* _generation; ZForwardingAllocator _allocator; ZForwarding** _forwardings; size_t _nforwardings; + ZLock _promotion_lock; + ZArray _flip_promoted_pages; + ZArray _in_place_relocate_promoted_pages; + + ZWorkers* workers() const; public: - ZRelocationSet(ZWorkers* workers); + ZRelocationSet(ZGeneration* generation); void install(const ZRelocationSetSelector* selector); - void reset(); + void reset(ZPageAllocator* page_allocator); + ZGeneration* generation() const; + ZArray* flip_promoted_pages(); + + void register_flip_promoted(const ZArray& pages); + void register_in_place_relocate_promoted(ZPage* page); }; template diff --git a/src/hotspot/share/gc/z/zRelocationSetSelector.cpp b/src/hotspot/share/gc/z/zRelocationSetSelector.cpp index 04160480fe8d8..d5618e9207c1a 100644 --- a/src/hotspot/share/gc/z/zRelocationSetSelector.cpp +++ b/src/hotspot/share/gc/z/zRelocationSetSelector.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -34,33 +34,37 @@ #include "utilities/powerOfTwo.hpp" ZRelocationSetSelectorGroupStats::ZRelocationSetSelectorGroupStats() : - _npages(0), + _npages_candidates(0), _total(0), _live(0), _empty(0), + _npages_selected(0), _relocate(0) {} ZRelocationSetSelectorGroup::ZRelocationSetSelectorGroup(const char* name, - uint8_t page_type, + ZPageType page_type, size_t page_size, - size_t object_size_limit) : + size_t object_size_limit, + double fragmentation_limit) : _name(name), _page_type(page_type), _page_size(page_size), _object_size_limit(object_size_limit), - _fragmentation_limit(page_size * (ZFragmentationLimit / 100)), + _fragmentation_limit(fragmentation_limit), + _page_fragmentation_limit(page_size * (fragmentation_limit / 100)), _live_pages(), + _not_selected_pages(), _forwarding_entries(0), _stats() {} bool ZRelocationSetSelectorGroup::is_disabled() { // Medium pages are disabled when their page size is zero - return _page_type == ZPageTypeMedium && _page_size == 0; + return _page_type == ZPageType::medium && _page_size == 0; } bool ZRelocationSetSelectorGroup::is_selectable() { // Large pages are not selectable - return _page_type != ZPageTypeLarge; + return _page_type != ZPageType::large; } void ZRelocationSetSelectorGroup::semi_sort() { @@ -90,14 +94,14 @@ void ZRelocationSetSelectorGroup::semi_sort() { // Allocate destination array const int npages = _live_pages.length(); - ZArray sorted_live_pages(npages, npages, NULL); + ZArray sorted_live_pages(npages, npages, nullptr); // Sort pages into partitions ZArrayIterator iter2(&_live_pages); for (ZPage* page; iter2.next(&page);) { const size_t index = page->live_bytes() >> partition_size_shift; const int finger = partitions[index]++; - assert(sorted_live_pages.at(finger) == NULL, "Invalid finger"); + assert(sorted_live_pages.at(finger) == nullptr, "Invalid finger"); sorted_live_pages.at_put(finger, page); } @@ -111,8 +115,10 @@ void ZRelocationSetSelectorGroup::select_inner() { const int npages = _live_pages.length(); int selected_from = 0; int selected_to = 0; - size_t selected_live_bytes = 0; + size_t npages_selected[ZPageAgeMax + 1] = { 0 }; + size_t selected_live_bytes[ZPageAgeMax + 1] = { 0 }; size_t selected_forwarding_entries = 0; + size_t from_live_bytes = 0; size_t from_forwarding_entries = 0; @@ -121,7 +127,8 @@ void ZRelocationSetSelectorGroup::select_inner() { for (int from = 1; from <= npages; from++) { // Add page to the candidate relocation set ZPage* const page = _live_pages.at(from - 1); - from_live_bytes += page->live_bytes(); + const size_t page_live_bytes = page->live_bytes(); + from_live_bytes += page_live_bytes; from_forwarding_entries += ZForwarding::nentries(page); // Calculate the maximum number of pages needed by the candidate relocation set. @@ -137,27 +144,38 @@ void ZRelocationSetSelectorGroup::select_inner() { const int diff_from = from - selected_from; const int diff_to = to - selected_to; const double diff_reclaimable = 100 - percent_of(diff_to, diff_from); - if (diff_reclaimable > ZFragmentationLimit) { + if (diff_reclaimable > _fragmentation_limit) { selected_from = from; selected_to = to; - selected_live_bytes = from_live_bytes; + selected_live_bytes[static_cast(page->age())] += page_live_bytes; + npages_selected[static_cast(page->age())] += 1; selected_forwarding_entries = from_forwarding_entries; } log_trace(gc, reloc)("Candidate Relocation Set (%s Pages): %d->%d, " - "%.1f%% relative defragmentation, " SIZE_FORMAT " forwarding entries, %s", + "%.1f%% relative defragmentation, " SIZE_FORMAT " forwarding entries, %s, live %d", _name, from, to, diff_reclaimable, from_forwarding_entries, - (selected_from == from) ? "Selected" : "Rejected"); + (selected_from == from) ? "Selected" : "Rejected", + int(page_live_bytes * 100 / page->size())); } // Finalize selection + for (int i = selected_from; i < _live_pages.length(); i++) { + ZPage* const page = _live_pages.at(i); + if (page->is_young()) { + _not_selected_pages.append(page); + } + } _live_pages.trunc_to(selected_from); _forwarding_entries = selected_forwarding_entries; // Update statistics - _stats._relocate = selected_live_bytes; + for (uint i = 0; i <= ZPageAgeMax; ++i) { + _stats[i]._relocate = selected_live_bytes[i]; + _stats[i]._npages_selected = npages_selected[i]; + } - log_trace(gc, reloc)("Relocation Set (%s Pages): %d->%d, %d skipped, " SIZE_FORMAT " forwarding entries", + log_debug(gc, reloc)("Relocation Set (%s Pages): %d->%d, %d skipped, " SIZE_FORMAT " forwarding entries", _name, selected_from, selected_to, npages - selected_from, selected_forwarding_entries); } @@ -170,16 +188,32 @@ void ZRelocationSetSelectorGroup::select() { if (is_selectable()) { select_inner(); + } else { + // Mark pages as not selected + const int npages = _live_pages.length(); + for (int from = 1; from <= npages; from++) { + ZPage* const page = _live_pages.at(from - 1); + _not_selected_pages.append(page); + } + } + + ZRelocationSetSelectorGroupStats s{}; + for (uint i = 0; i <= ZPageAgeMax; ++i) { + s._npages_candidates += _stats[i].npages_candidates(); + s._total += _stats[i].total(); + s._empty += _stats[i].empty(); + s._npages_selected += _stats[i].npages_selected(); + s._relocate += _stats[i].relocate(); } // Send event - event.commit(_page_type, _stats.npages(), _stats.total(), _stats.empty(), _stats.relocate()); + event.commit((u8)_page_type, s._npages_candidates, s._total, s._empty, s._npages_selected, s._relocate); } -ZRelocationSetSelector::ZRelocationSetSelector() : - _small("Small", ZPageTypeSmall, ZPageSizeSmall, ZObjectSizeLimitSmall), - _medium("Medium", ZPageTypeMedium, ZPageSizeMedium, ZObjectSizeLimitMedium), - _large("Large", ZPageTypeLarge, 0 /* page_size */, 0 /* object_size_limit */), +ZRelocationSetSelector::ZRelocationSetSelector(double fragmentation_limit) : + _small("Small", ZPageType::small, ZPageSizeSmall, ZObjectSizeLimitSmall, fragmentation_limit), + _medium("Medium", ZPageType::medium, ZPageSizeMedium, ZObjectSizeLimitMedium, fragmentation_limit), + _large("Large", ZPageType::large, 0 /* page_size */, 0 /* object_size_limit */, fragmentation_limit), _empty_pages() {} void ZRelocationSetSelector::select() { @@ -202,8 +236,15 @@ void ZRelocationSetSelector::select() { ZRelocationSetSelectorStats ZRelocationSetSelector::stats() const { ZRelocationSetSelectorStats stats; - stats._small = _small.stats(); - stats._medium = _medium.stats(); - stats._large = _large.stats(); + + for (uint i = 0; i <= ZPageAgeMax; ++i) { + const ZPageAge age = static_cast(i); + stats._small[i] = _small.stats(age); + stats._medium[i] = _medium.stats(age); + stats._large[i] = _large.stats(age); + } + + stats._has_relocatable_pages = total() > 0; + return stats; } diff --git a/src/hotspot/share/gc/z/zRelocationSetSelector.hpp b/src/hotspot/share/gc/z/zRelocationSetSelector.hpp index 7a60234db90a6..7bf2942e843f0 100644 --- a/src/hotspot/share/gc/z/zRelocationSetSelector.hpp +++ b/src/hotspot/share/gc/z/zRelocationSetSelector.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,6 +25,9 @@ #define SHARE_GC_Z_ZRELOCATIONSETSELECTOR_HPP #include "gc/z/zArray.hpp" +#include "gc/z/zGenerationId.hpp" +#include "gc/z/zPageAge.hpp" +#include "gc/z/zPageType.hpp" #include "memory/allocation.hpp" class ZPage; @@ -33,19 +36,25 @@ class ZRelocationSetSelectorGroupStats { friend class ZRelocationSetSelectorGroup; private: - size_t _npages; + // Candidate set + size_t _npages_candidates; size_t _total; size_t _live; size_t _empty; + + // Selected set + size_t _npages_selected; size_t _relocate; public: ZRelocationSetSelectorGroupStats(); - size_t npages() const; + size_t npages_candidates() const; size_t total() const; size_t live() const; size_t empty() const; + + size_t npages_selected() const; size_t relocate() const; }; @@ -53,26 +62,32 @@ class ZRelocationSetSelectorStats { friend class ZRelocationSetSelector; private: - ZRelocationSetSelectorGroupStats _small; - ZRelocationSetSelectorGroupStats _medium; - ZRelocationSetSelectorGroupStats _large; + ZRelocationSetSelectorGroupStats _small[ZPageAgeMax + 1]; + ZRelocationSetSelectorGroupStats _medium[ZPageAgeMax + 1]; + ZRelocationSetSelectorGroupStats _large[ZPageAgeMax + 1]; + + size_t _has_relocatable_pages; public: - const ZRelocationSetSelectorGroupStats& small() const; - const ZRelocationSetSelectorGroupStats& medium() const; - const ZRelocationSetSelectorGroupStats& large() const; + const ZRelocationSetSelectorGroupStats& small(ZPageAge age) const; + const ZRelocationSetSelectorGroupStats& medium(ZPageAge age) const; + const ZRelocationSetSelectorGroupStats& large(ZPageAge age) const; + + bool has_relocatable_pages() const; }; class ZRelocationSetSelectorGroup { private: const char* const _name; - const uint8_t _page_type; + const ZPageType _page_type; const size_t _page_size; const size_t _object_size_limit; - const size_t _fragmentation_limit; + const double _fragmentation_limit; + const size_t _page_fragmentation_limit; ZArray _live_pages; + ZArray _not_selected_pages; size_t _forwarding_entries; - ZRelocationSetSelectorGroupStats _stats; + ZRelocationSetSelectorGroupStats _stats[ZPageAgeMax + 1]; bool is_disabled(); bool is_selectable(); @@ -81,18 +96,21 @@ class ZRelocationSetSelectorGroup { public: ZRelocationSetSelectorGroup(const char* name, - uint8_t page_type, + ZPageType page_type, size_t page_size, - size_t object_size_limit); + size_t object_size_limit, + double fragmentation_limit); void register_live_page(ZPage* page); void register_empty_page(ZPage* page); void select(); - const ZArray* selected() const; + const ZArray* live_pages() const; + const ZArray* selected_pages() const; + const ZArray* not_selected_pages() const; size_t forwarding_entries() const; - const ZRelocationSetSelectorGroupStats& stats() const; + const ZRelocationSetSelectorGroupStats& stats(ZPageAge age) const; }; class ZRelocationSetSelector : public StackObj { @@ -107,7 +125,7 @@ class ZRelocationSetSelector : public StackObj { size_t relocate() const; public: - ZRelocationSetSelector(); + ZRelocationSetSelector(double fragmentation_limit); void register_live_page(ZPage* page); void register_empty_page(ZPage* page); @@ -118,8 +136,12 @@ class ZRelocationSetSelector : public StackObj { void select(); - const ZArray* small() const; - const ZArray* medium() const; + const ZArray* selected_small() const; + const ZArray* selected_medium() const; + + const ZArray* not_selected_small() const; + const ZArray* not_selected_medium() const; + const ZArray* not_selected_large() const; size_t forwarding_entries() const; ZRelocationSetSelectorStats stats() const; diff --git a/src/hotspot/share/gc/z/zRelocationSetSelector.inline.hpp b/src/hotspot/share/gc/z/zRelocationSetSelector.inline.hpp index 2f31e10885644..316409ac7d8e4 100644 --- a/src/hotspot/share/gc/z/zRelocationSetSelector.inline.hpp +++ b/src/hotspot/share/gc/z/zRelocationSetSelector.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,8 +29,8 @@ #include "gc/z/zArray.inline.hpp" #include "gc/z/zPage.inline.hpp" -inline size_t ZRelocationSetSelectorGroupStats::npages() const { - return _npages; +inline size_t ZRelocationSetSelectorGroupStats::npages_candidates() const { + return _npages_candidates; } inline size_t ZRelocationSetSelectorGroupStats::total() const { @@ -45,63 +45,81 @@ inline size_t ZRelocationSetSelectorGroupStats::empty() const { return _empty; } +inline size_t ZRelocationSetSelectorGroupStats::npages_selected() const { + return _npages_selected; +} + inline size_t ZRelocationSetSelectorGroupStats::relocate() const { return _relocate; } -inline const ZRelocationSetSelectorGroupStats& ZRelocationSetSelectorStats::small() const { - return _small; +inline bool ZRelocationSetSelectorStats::has_relocatable_pages() const { + return _has_relocatable_pages; } -inline const ZRelocationSetSelectorGroupStats& ZRelocationSetSelectorStats::medium() const { - return _medium; +inline const ZRelocationSetSelectorGroupStats& ZRelocationSetSelectorStats::small(ZPageAge age) const { + return _small[static_cast(age)]; } -inline const ZRelocationSetSelectorGroupStats& ZRelocationSetSelectorStats::large() const { - return _large; +inline const ZRelocationSetSelectorGroupStats& ZRelocationSetSelectorStats::medium(ZPageAge age) const { + return _medium[static_cast(age)]; +} + +inline const ZRelocationSetSelectorGroupStats& ZRelocationSetSelectorStats::large(ZPageAge age) const { + return _large[static_cast(age)]; } inline void ZRelocationSetSelectorGroup::register_live_page(ZPage* page) { - const uint8_t type = page->type(); const size_t size = page->size(); const size_t live = page->live_bytes(); const size_t garbage = size - live; - if (garbage > _fragmentation_limit) { + // Pre-filter out pages that are guaranteed to not be selected + if (!page->is_large() && garbage > _page_fragmentation_limit) { _live_pages.append(page); + } else if (page->is_young()) { + _not_selected_pages.append(page); } - _stats._npages++; - _stats._total += size; - _stats._live += live; + const uint age = static_cast(page->age()); + _stats[age]._npages_candidates++; + _stats[age]._total += size; + _stats[age]._live += live; } inline void ZRelocationSetSelectorGroup::register_empty_page(ZPage* page) { const size_t size = page->size(); - _stats._npages++; - _stats._total += size; - _stats._empty += size; + const uint age = static_cast(page->age()); + _stats[age]._npages_candidates++; + _stats[age]._total += size; + _stats[age]._empty += size; } -inline const ZArray* ZRelocationSetSelectorGroup::selected() const { +inline const ZArray* ZRelocationSetSelectorGroup::selected_pages() const { return &_live_pages; } +inline const ZArray* ZRelocationSetSelectorGroup::not_selected_pages() const { + return &_not_selected_pages; +} + inline size_t ZRelocationSetSelectorGroup::forwarding_entries() const { return _forwarding_entries; } -inline const ZRelocationSetSelectorGroupStats& ZRelocationSetSelectorGroup::stats() const { - return _stats; +inline const ZRelocationSetSelectorGroupStats& ZRelocationSetSelectorGroup::stats(ZPageAge age) const { + return _stats[static_cast(age)]; } inline void ZRelocationSetSelector::register_live_page(ZPage* page) { - const uint8_t type = page->type(); + page->log_msg(" (relocation candidate)"); + + const ZPageType type = page->type(); - if (type == ZPageTypeSmall) { + if (type == ZPageType::small) { _small.register_live_page(page); - } else if (type == ZPageTypeMedium) { + } else if (type == ZPageType::medium) { _medium.register_live_page(page); } else { _large.register_live_page(page); @@ -109,11 +127,13 @@ inline void ZRelocationSetSelector::register_live_page(ZPage* page) { } inline void ZRelocationSetSelector::register_empty_page(ZPage* page) { - const uint8_t type = page->type(); + page->log_msg(" (relocation empty)"); - if (type == ZPageTypeSmall) { + const ZPageType type = page->type(); + + if (type == ZPageType::small) { _small.register_empty_page(page); - } else if (type == ZPageTypeMedium) { + } else if (type == ZPageType::medium) { _medium.register_empty_page(page); } else { _large.register_empty_page(page); @@ -135,23 +155,50 @@ inline void ZRelocationSetSelector::clear_empty_pages() { } inline size_t ZRelocationSetSelector::total() const { - return _small.stats().total() + _medium.stats().total() + _large.stats().total(); + size_t sum = 0; + for (uint i = 0; i <= ZPageAgeMax; ++i) { + const ZPageAge age = static_cast(i); + sum += _small.stats(age).total() + _medium.stats(age).total() + _large.stats(age).total(); + } + return sum; } inline size_t ZRelocationSetSelector::empty() const { - return _small.stats().empty() + _medium.stats().empty() + _large.stats().empty(); + size_t sum = 0; + for (uint i = 0; i <= ZPageAgeMax; ++i) { + const ZPageAge age = static_cast(i); + sum += _small.stats(age).empty() + _medium.stats(age).empty() + _large.stats(age).empty(); + } + return sum; } inline size_t ZRelocationSetSelector::relocate() const { - return _small.stats().relocate() + _medium.stats().relocate() + _large.stats().relocate(); + size_t sum = 0; + for (uint i = 0; i <= ZPageAgeMax; ++i) { + const ZPageAge age = static_cast(i); + sum += _small.stats(age).relocate() + _medium.stats(age).relocate() + _large.stats(age).relocate(); + } + return sum; +} + +inline const ZArray* ZRelocationSetSelector::selected_small() const { + return _small.selected_pages(); +} + +inline const ZArray* ZRelocationSetSelector::selected_medium() const { + return _medium.selected_pages(); +} + +inline const ZArray* ZRelocationSetSelector::not_selected_small() const { + return _small.not_selected_pages(); } -inline const ZArray* ZRelocationSetSelector::small() const { - return _small.selected(); +inline const ZArray* ZRelocationSetSelector::not_selected_medium() const { + return _medium.not_selected_pages(); } -inline const ZArray* ZRelocationSetSelector::medium() const { - return _medium.selected(); +inline const ZArray* ZRelocationSetSelector::not_selected_large() const { + return _large.not_selected_pages(); } inline size_t ZRelocationSetSelector::forwarding_entries() const { diff --git a/src/hotspot/share/gc/z/zRemembered.cpp b/src/hotspot/share/gc/z/zRemembered.cpp new file mode 100644 index 0000000000000..95865a5203f11 --- /dev/null +++ b/src/hotspot/share/gc/z/zRemembered.cpp @@ -0,0 +1,591 @@ +/* + * Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/z/zAddress.inline.hpp" +#include "gc/z/zForwarding.inline.hpp" +#include "gc/z/zGeneration.inline.hpp" +#include "gc/z/zHeap.inline.hpp" +#include "gc/z/zIterator.inline.hpp" +#include "gc/z/zMark.hpp" +#include "gc/z/zPage.inline.hpp" +#include "gc/z/zPageTable.hpp" +#include "gc/z/zRemembered.inline.hpp" +#include "gc/z/zRememberedSet.hpp" +#include "gc/z/zTask.hpp" +#include "gc/z/zVerify.hpp" +#include "memory/iterator.hpp" +#include "oops/oop.inline.hpp" +#include "utilities/bitMap.inline.hpp" +#include "utilities/debug.hpp" + +ZRemembered::ZRemembered(ZPageTable* page_table, + const ZForwardingTable* old_forwarding_table, + ZPageAllocator* page_allocator) : + _page_table(page_table), + _old_forwarding_table(old_forwarding_table), + _page_allocator(page_allocator), + _found_old() { +} + +template +void ZRemembered::oops_do_forwarded_via_containing(GrowableArrayView* array, Function function) const { + // The array contains duplicated from_addr values. Cache expensive operations. + zaddress_unsafe from_addr = zaddress_unsafe::null; + zaddress to_addr = zaddress::null; + size_t object_size = 0; + + for (const ZRememberedSetContaining containing: *array) { + if (from_addr != containing._addr) { + from_addr = containing._addr; + + // Relocate object to new location + to_addr = ZGeneration::old()->relocate_or_remap_object(from_addr); + + // Figure out size + object_size = ZUtils::object_size(to_addr); + } + + // Calculate how far into the from-object the remset entry is + const uintptr_t field_offset = containing._field_addr - from_addr; + + // The 'containing' could contain mismatched (addr, addr_field). + // Need to check if the field was within the reported object. + if (field_offset < object_size) { + // Calculate the corresponding address in the to-object + const zaddress to_addr_field = to_addr + field_offset; + + function((volatile zpointer*)untype(to_addr_field)); + } + } +} + +bool ZRemembered::should_scan_page(ZPage* page) const { + if (!ZGeneration::old()->is_phase_relocate()) { + // If the old generation collection is not in the relocation phase, then it + // will not need any synchronization on its forwardings. + return true; + } + + ZForwarding* const forwarding = ZGeneration::old()->forwarding(ZOffset::address_unsafe(page->start())); + + if (forwarding == nullptr) { + // This page was provably not part of the old relocation set + return true; + } + + if (!forwarding->relocated_remembered_fields_is_concurrently_scanned()) { + // Safe to scan + return true; + } + + // If we get here, we know that the old collection is concurrently relocating + // objects. We need to be extremely careful not to scan a page that is + // concurrently being in-place relocated because it's objects and previous + // bits could be concurrently be moving around. + // + // Before calling this function ZRemembered::scan_forwarding ensures + // that all forwardings that have not already been fully relocated, + // will have had their "previous" remembered set bits scanned. + // + // The current page we're currently scanning could either be the same page + // that was found during scan_forwarding, or it could have been replaced + // by a new "allocating" page. There are two situations we have to consider: + // + // 1) If it is a proper new allocating page, then all objects where copied + // after scan_forwarding ran, and we are guaranteed that no "previous" + // remembered set bits are set. So, there's no need to scan this page. + // + // 2) If this is an in-place relocated page, then the entire page could + // be concurrently relocated. Meaning that both objects and previous + // remembered set bits could be moving around. However, if the in-place + // relocation is ongoing, we've already scanned all relevant "previous" + // bits when calling scan_forwarding. So, this page *must* not be scanned. + // + // Don't scan the page. + return false; +} + +bool ZRemembered::scan_page(ZPage* page) const { + const bool can_trust_live_bits = + page->is_relocatable() && !ZGeneration::old()->is_phase_mark(); + + bool result = false; + + if (!can_trust_live_bits) { + // We don't have full liveness info - scan all remset entries + page->log_msg(" (scan_page_remembered)"); + int count = 0; + page->oops_do_remembered([&](volatile zpointer* p) { + result |= scan_field(p); + count++; + }); + page->log_msg(" (scan_page_remembered done: %d ignoring: " PTR_FORMAT " )", count, p2i(page->remset_current())); + } else if (page->is_marked()) { + // We have full liveness info - Only scan remset entries in live objects + page->log_msg(" (scan_page_remembered_in_live)"); + page->oops_do_remembered_in_live([&](volatile zpointer* p) { + result |= scan_field(p); + }); + } else { + page->log_msg(" (scan_page_remembered_dead)"); + // All objects are dead - do nothing + } + + return result; +} + +static void fill_containing(GrowableArrayCHeap* array, ZPage* page) { + page->log_msg(" (fill_remembered_containing)"); + + ZRememberedSetContainingIterator iter(page); + + for (ZRememberedSetContaining containing; iter.next(&containing);) { + array->push(containing); + } +} + +struct ZRememberedScanForwardingContext { + GrowableArrayCHeap _containing_array; + + struct Where { + static const int NumRecords = 10; + + Tickspan _duration; + int _count; + Tickspan _max_durations[NumRecords]; + int _max_count; + + Where() : + _duration(), + _count(), + _max_durations(), + _max_count() {} + + void report(const Tickspan& duration) { + _duration += duration; + _count++; + + // Install into max array + for (int i = 0; i < NumRecords; i++) { + if (duration > _max_durations[i]) { + // Slid to the side + for (int j = _max_count - 1; i < j; j--) { + _max_durations[j] = _max_durations[j - 1]; + } + + // Install + _max_durations[i] = duration; + if (_max_count < NumRecords) { + _max_count++; + } + break; + } + } + } + + void print(const char* name) { + log_debug(gc, remset)("Remset forwarding %s: %.3fms count: %d %s", + name, TimeHelper::counter_to_millis(_duration.value()), _count, Thread::current()->name()); + for (int i = 0; i < _max_count; i++) { + log_debug(gc, remset)(" %.3fms", TimeHelper::counter_to_millis(_max_durations[i].value())); + } + } + }; + + Where _where[2]; + + ZRememberedScanForwardingContext() : + _containing_array(), + _where() {} + + ~ZRememberedScanForwardingContext() { + print(); + } + + void report_retained(const Tickspan& duration) { + _where[0].report(duration); + } + + void report_released(const Tickspan& duration) { + _where[1].report(duration); + } + + void print() { + _where[0].print("retained"); + _where[1].print("released"); + } +}; + +struct ZRememberedScanForwardingMeasureRetained { + ZRememberedScanForwardingContext* _context; + Ticks _start; + + ZRememberedScanForwardingMeasureRetained(ZRememberedScanForwardingContext* context) : + _context(context), + _start(Ticks::now()) { + } + + ~ZRememberedScanForwardingMeasureRetained() { + const Ticks end = Ticks::now(); + const Tickspan duration = end - _start; + _context->report_retained(duration); + } +}; + +struct ZRememberedScanForwardingMeasureReleased { + ZRememberedScanForwardingContext* _context; + Ticks _start; + + ZRememberedScanForwardingMeasureReleased(ZRememberedScanForwardingContext* context) : + _context(context), + _start(Ticks::now()) { + } + + ~ZRememberedScanForwardingMeasureReleased() { + const Ticks end = Ticks::now(); + const Tickspan duration = end - _start; + _context->report_released(duration); + } +}; + +bool ZRemembered::scan_forwarding(ZForwarding* forwarding, void* context_void) const { + ZRememberedScanForwardingContext* const context = (ZRememberedScanForwardingContext*)context_void; + bool result = false; + + if (forwarding->retain_page(ZGeneration::old()->relocate_queue())) { + ZRememberedScanForwardingMeasureRetained measure(context); + forwarding->page()->log_msg(" (scan_forwarding)"); + + // We don't want to wait for the old relocation to finish and publish all + // relocated remembered fields. Reject its fields and collect enough data + // up-front. + forwarding->relocated_remembered_fields_notify_concurrent_scan_of(); + + // Collect all remset info while the page is retained + GrowableArrayCHeap* array = &context->_containing_array; + array->clear(); + fill_containing(array, forwarding->page()); + forwarding->release_page(); + + // Relocate (and mark) while page is released, to prevent + // retain deadlock when relocation threads in-place relocate. + oops_do_forwarded_via_containing(array, [&](volatile zpointer* p) { + result |= scan_field(p); + }); + + } else { + ZRememberedScanForwardingMeasureReleased measure(context); + + // The page has been released. If the page was relocated while this young + // generation collection was running, the old generation relocation will + // have published all addresses of fields that had a remembered set entry. + forwarding->relocated_remembered_fields_apply_to_published([&](volatile zpointer* p) { + result |= scan_field(p); + }); + } + + return result; +} + +// When scanning the remembered set during the young generation marking, we +// want to visit all old pages. And we want that to be done in parallel and +// fast. +// +// Walking over the entire page table and letting the workers claim indices +// have been shown to have scalability issues. +// +// So, we have the "found old" optimization, which allows us to perform much +// fewer claims (order of old pages, instead of order of slots in the page +// table), and it allows us to read fewer pages. +// +// The set of "found old pages" isn't precise, and can contain stale entries +// referring to slots of freed pages, or even slots where young pages have +// been installed. However, it will not lack any of the old pages. +// +// The data is maintained very similar to when and how we maintain the +// remembered set bits: We keep two separates sets, one for read-only access +// by the young marking, and a currently active set where we register new +// pages. When pages get relocated, or die, the page table slot for that page +// must be cleared. This clearing is done just like we do with the remset +// scanning: The old entries are not copied to the current active set, only +// slots that were found to actually contain old pages are registered in the +// active set. + +ZRemembered::FoundOld::FoundOld() : + // Array initialization requires copy constructors, which CHeapBitMap + // doesn't provide. Instantiate two instances, and populate an array + // with pointers to the two instances. + _allocated_bitmap_0{ZAddressOffsetMax >> ZGranuleSizeShift, mtGC, true /* clear */}, + _allocated_bitmap_1{ZAddressOffsetMax >> ZGranuleSizeShift, mtGC, true /* clear */}, + _bitmaps{&_allocated_bitmap_0, &_allocated_bitmap_1}, + _current{0} {} + +BitMap* ZRemembered::FoundOld::current_bitmap() { + return _bitmaps[_current]; +} + +BitMap* ZRemembered::FoundOld::previous_bitmap() { + return _bitmaps[_current ^ 1]; +} + +void ZRemembered::FoundOld::flip() { + _current ^= 1; +} + +void ZRemembered::FoundOld::clear_previous() { + previous_bitmap()->clear_large(); +} + +void ZRemembered::FoundOld::register_page(ZPage* page) { + assert(page->is_old(), "Only register old pages"); + current_bitmap()->par_set_bit(untype(page->start()) >> ZGranuleSizeShift, memory_order_relaxed); +} + +void ZRemembered::flip_found_old_sets() { + _found_old.flip(); +} + +void ZRemembered::clear_found_old_previous_set() { + _found_old.clear_previous(); +} + +void ZRemembered::register_found_old(ZPage* page) { + assert(page->is_old(), "Should only register old pages"); + _found_old.register_page(page); +} + +struct ZRemsetTableEntry { + ZPage* _page; + ZForwarding* _forwarding; +}; + +class ZRemsetTableIterator { +private: + ZRemembered* const _remembered; + ZPageTable* const _page_table; + const ZForwardingTable* const _old_forwarding_table; + volatile BitMap::idx_t _claimed; + +public: + ZRemsetTableIterator(ZRemembered* remembered) : + _remembered(remembered), + _page_table(remembered->_page_table), + _old_forwarding_table(remembered->_old_forwarding_table), + _claimed(0) {} + + // This iterator uses the "found old" optimization. + bool next(ZRemsetTableEntry* entry_addr) { + BitMap* const bm = _remembered->_found_old.previous_bitmap(); + + BitMap::idx_t prev = Atomic::load(&_claimed); + + for (;;) { + if (prev == bm->size()) { + return false; + } + + const BitMap::idx_t page_index = bm->find_first_set_bit(_claimed); + if (page_index == bm->size()) { + Atomic::cmpxchg(&_claimed, prev, page_index, memory_order_relaxed); + return false; + } + + const BitMap::idx_t res = Atomic::cmpxchg(&_claimed, prev, page_index + 1, memory_order_relaxed); + if (res != prev) { + // Someone else claimed + prev = res; + continue; + } + + // Found bit - look around for page or forwarding to scan + + ZForwarding* forwarding = nullptr; + if (ZGeneration::old()->is_phase_relocate()) { + forwarding = _old_forwarding_table->at(page_index); + } + + ZPage* page = _page_table->at(page_index); + if (page != nullptr && !page->is_old()) { + page = nullptr; + } + + if (page == nullptr && forwarding == nullptr) { + // Nothing to scan + continue; + } + + // Found old page or old forwarding + entry_addr->_forwarding = forwarding; + entry_addr->_page = page; + + return true; + } + } +}; + +// This task scans the remembered set and follows pointers when possible. +// Interleaving remembered set scanning with marking makes the marking times +// lower and more predictable. +class ZRememberedScanMarkFollowTask : public ZRestartableTask { +private: + ZRemembered* const _remembered; + ZMark* const _mark; + ZRemsetTableIterator _remset_table_iterator; + +public: + ZRememberedScanMarkFollowTask(ZRemembered* remembered, ZMark* mark) : + ZRestartableTask("ZRememberedScanMarkFollowTask"), + _remembered(remembered), + _mark(mark), + _remset_table_iterator(remembered) { + _mark->prepare_work(); + _remembered->_page_allocator->enable_safe_destroy(); + _remembered->_page_allocator->enable_safe_recycle(); + } + + ~ZRememberedScanMarkFollowTask() { + _remembered->_page_allocator->disable_safe_recycle(); + _remembered->_page_allocator->disable_safe_destroy(); + _mark->finish_work(); + // We are done scanning the set of old pages. + // Clear the set for the next young collection. + _remembered->clear_found_old_previous_set(); + } + + virtual void work_inner() { + ZRememberedScanForwardingContext context; + + // Follow initial roots + if (!_mark->follow_work_partial()) { + // Bail + return; + } + + for (ZRemsetTableEntry entry; _remset_table_iterator.next(&entry);) { + bool left_marking = false; + ZForwarding* forwarding = entry._forwarding; + ZPage* page = entry._page; + + // Scan forwarding + if (forwarding != nullptr) { + bool found_roots = _remembered->scan_forwarding(forwarding, &context); + ZVerify::after_scan(forwarding); + if (found_roots) { + // Follow remembered set when possible + left_marking = !_mark->follow_work_partial(); + } + } + + // Scan page + if (page != nullptr) { + if (_remembered->should_scan_page(page)) { + // Visit all entries pointing into young gen + bool found_roots = _remembered->scan_page(page); + + // ... and as a side-effect clear the previous entries + if (ZVerifyRemembered) { + // Make sure self healing of pointers is ordered before clearing of + // the previous bits so that ZVerify::after_scan can detect missing + // remset entries accurately. + OrderAccess::storestore(); + } + page->clear_remset_previous(); + + if (found_roots && !left_marking) { + // Follow remembered set when possible + left_marking = !_mark->follow_work_partial(); + } + } + + // The remset scanning maintains the "maybe old" pages optimization. + // + // We maintain two sets of old pages: The first is the currently active + // set, where old pages are registered into. The second is the old + // read-only copy. The two sets flip during young mark start. This + // analogous to how we set and clean remembered set bits. + // + // The iterator reads from the read-only copy, and then here, we install + // entries in the current active set. + _remembered->register_found_old(page); + } + + SuspendibleThreadSet::yield(); + if (left_marking) { + // Bail + return; + } + } + + _mark->follow_work_complete(); + } + + virtual void work() { + SuspendibleThreadSetJoiner sts_joiner; + work_inner(); + // We might have found pointers into the other generation, and then we want to + // publish such marking stacks to prevent that generation from getting a mark continue. + // We also flush in case of a resize where a new worker thread continues the marking + // work, causing a mark continue for the collected generation. + ZHeap::heap()->mark_flush_and_free(Thread::current()); + } + + virtual void resize_workers(uint nworkers) { + _mark->resize_workers(nworkers); + } +}; + +void ZRemembered::scan_and_follow(ZMark* mark) { + { + // Follow the object graph and lazily scan the remembered set + ZRememberedScanMarkFollowTask task(this, mark); + ZGeneration::young()->workers()->run(&task); + + // Try to terminate after following the graph + if (ZAbort::should_abort() || !mark->try_terminate_flush()) { + return; + } + } + + // If flushing failed, we have to restart marking again, but this time we don't need to + // scan the remembered set. + mark->mark_follow(); +} + +bool ZRemembered::scan_field(volatile zpointer* p) const { + assert(ZGeneration::young()->is_phase_mark(), "Wrong phase"); + + const zaddress addr = ZBarrier::remset_barrier_on_oop_field(p); + + if (!is_null(addr) && ZHeap::heap()->is_young(addr)) { + remember(p); + return true; + } + + return false; +} + +void ZRemembered::flip() { + ZRememberedSet::flip(); + flip_found_old_sets(); +} diff --git a/src/hotspot/share/gc/z/zRemembered.hpp b/src/hotspot/share/gc/z/zRemembered.hpp new file mode 100644 index 0000000000000..f7c74e8c53157 --- /dev/null +++ b/src/hotspot/share/gc/z/zRemembered.hpp @@ -0,0 +1,104 @@ +/* + * Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_Z_ZREMEMBERED_HPP +#define SHARE_GC_Z_ZREMEMBERED_HPP + +#include "gc/z/zAddress.hpp" +#include "utilities/bitMap.hpp" + +template class GrowableArrayView; +class OopClosure; +class ZForwarding; +class ZForwardingTable; +class ZMark; +class ZPage; +class ZPageAllocator; +class ZPageTable; +struct ZRememberedSetContaining; + +class ZRemembered { + friend class ZRememberedScanMarkFollowTask; + friend class ZRemsetTableIterator; + +private: + ZPageTable* const _page_table; + const ZForwardingTable* const _old_forwarding_table; + ZPageAllocator* const _page_allocator; + + // Optimization aid for faster old pages iteration + struct FoundOld { + CHeapBitMap _allocated_bitmap_0; + CHeapBitMap _allocated_bitmap_1; + BitMap* const _bitmaps[2]; + int _current; + + FoundOld(); + + void flip(); + void clear_previous(); + + void register_page(ZPage* page); + + BitMap* current_bitmap(); + BitMap* previous_bitmap(); + } _found_old; + + // Old pages iteration optimization aid + void flip_found_old_sets(); + void clear_found_old_previous_set(); + + template + void oops_do_forwarded_via_containing(GrowableArrayView* array, Function function) const; + + bool should_scan_page(ZPage* page) const; + + bool scan_page(ZPage* page) const; + bool scan_forwarding(ZForwarding* forwarding, void* context) const; + +public: + ZRemembered(ZPageTable* page_table, + const ZForwardingTable* old_forwarding_table, + ZPageAllocator* page_allocator); + + // Add to remembered set + void remember(volatile zpointer* p) const; + + // Scan all remembered sets and follow + void scan_and_follow(ZMark* mark); + + // Save the current remembered sets, + // and switch over to empty remembered sets. + void flip(); + + // Scan a remembered set entry + bool scan_field(volatile zpointer* p) const; + + // Verification + bool is_remembered(volatile zpointer* p) const; + + // Register pages with the remembered set + void register_found_old(ZPage* page); +}; + +#endif // SHARE_GC_Z_ZREMEMBERED_HPP diff --git a/src/hotspot/share/gc/z/zRemembered.inline.hpp b/src/hotspot/share/gc/z/zRemembered.inline.hpp new file mode 100644 index 0000000000000..8b6cb24de0364 --- /dev/null +++ b/src/hotspot/share/gc/z/zRemembered.inline.hpp @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_Z_ZREMEMBERED_INLINE_HPP +#define SHARE_GC_Z_ZREMEMBERED_INLINE_HPP + +#include "gc/z/zRemembered.hpp" + +#include "gc/z/zBarrier.inline.hpp" +#include "gc/z/zHeap.inline.hpp" +#include "gc/z/zPage.inline.hpp" +#include "gc/z/zPageTable.inline.hpp" + +inline void ZRemembered::remember(volatile zpointer* p) const { + ZPage* page = _page_table->get(p); + assert(page != nullptr, "Page missing in page table"); + page->remember(p); +} + +inline bool ZRemembered::is_remembered(volatile zpointer* p) const { + ZPage* page = _page_table->get(p); + assert(page != nullptr, "Page missing in page table"); + return page->is_remembered(p); +} + +#endif // SHARE_GC_Z_ZREMEMBERED_INLINE_HPP diff --git a/src/hotspot/share/gc/z/zRememberedSet.cpp b/src/hotspot/share/gc/z/zRememberedSet.cpp new file mode 100644 index 0000000000000..7c0ab9ad6cf89 --- /dev/null +++ b/src/hotspot/share/gc/z/zRememberedSet.cpp @@ -0,0 +1,227 @@ +/* + * Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/z/zAddress.inline.hpp" +#include "gc/z/zBitMap.inline.hpp" +#include "gc/z/zHeap.inline.hpp" +#include "gc/z/zPage.inline.hpp" +#include "gc/z/zRememberedSet.hpp" +#include "logging/log.hpp" +#include "memory/allocation.hpp" +#include "memory/iterator.hpp" +#include "utilities/globalDefinitions.hpp" + +int ZRememberedSet::_current = 0; + +void ZRememberedSet::flip() { + _current ^= 1; +} + +ZRememberedSet::ZRememberedSet() : + _bitmap{ZMovableBitMap(), ZMovableBitMap()} { + // Defer initialization of the bitmaps until the owning + // page becomes old and its remembered set is initialized. +} + +bool ZRememberedSet::is_initialized() const { + return _bitmap[0].size() > 0; +} + +void ZRememberedSet::initialize(size_t page_size) { + assert(!is_initialized(), "precondition"); + const BitMap::idx_t size_in_bits = to_bit_size(page_size); + _bitmap[0].initialize(size_in_bits, true /* clear */); + _bitmap[1].initialize(size_in_bits, true /* clear */); +} + +void ZRememberedSet::resize(size_t page_size) { + // The bitmaps only need to be resized if remset has been + // initialized, and hence the bitmaps have been initialized. + if (is_initialized()) { + const BitMap::idx_t size_in_bits = to_bit_size(page_size); + + // The bitmaps need to be cleared when free, but since this function is + // only used for shrinking the clear argument is correct but not crucial. + assert(size_in_bits <= _bitmap[0].size(), "Only used for shrinking"); + _bitmap[0].resize(size_in_bits, true /* clear */); + _bitmap[1].resize(size_in_bits, true /* clear */); + } +} + +bool ZRememberedSet::is_cleared_current() const { + return current()->is_empty(); +} + +bool ZRememberedSet::is_cleared_previous() const { + return previous()->is_empty(); +} + +void ZRememberedSet::clear_all() { + clear_current(); + clear_previous(); +} + +void ZRememberedSet::clear_current() { + current()->clear_large(); +} + +void ZRememberedSet::clear_previous() { + previous()->clear_large(); +} + +void ZRememberedSet::swap_remset_bitmaps() { + assert(previous()->is_empty(), "Previous remset bits should be empty when swapping"); + current()->iterate([&](BitMap::idx_t index) { + previous()->set_bit(index); + return true; + }); + current()->clear_large(); +} + +ZBitMap::ReverseIterator ZRememberedSet::iterator_reverse_previous() { + return ZBitMap::ReverseIterator(previous()); +} + +BitMap::Iterator ZRememberedSet::iterator_limited_current(uintptr_t offset, size_t size) { + const size_t index = to_index(offset);; + const size_t bit_size = to_bit_size(size); + + return BitMap::Iterator(*current(), index, index + bit_size); +} + +ZBitMap::Iterator ZRememberedSet::iterator_limited_previous(uintptr_t offset, size_t size) { + const size_t index = to_index(offset);; + const size_t bit_size = to_bit_size(size); + + return BitMap::Iterator(*previous(), index, index + bit_size); +} + +size_t ZRememberedSetContainingIterator::to_index(zaddress_unsafe addr) { + const uintptr_t local_offset = _page->local_offset(addr); + return ZRememberedSet::to_index(local_offset); +} + +zaddress_unsafe ZRememberedSetContainingIterator::to_addr(BitMap::idx_t index) { + const uintptr_t local_offset = ZRememberedSet::to_offset(index); + return ZOffset::address_unsafe(_page->global_offset(local_offset)); +} + +ZRememberedSetContainingIterator::ZRememberedSetContainingIterator(ZPage* page) : + _page(page), + _remset_iter(page->remset_reverse_iterator_previous()), + _obj(zaddress_unsafe::null), + _obj_remset_iter(page->remset_reverse_iterator_previous()) {} + +bool ZRememberedSetContainingIterator::next(ZRememberedSetContaining* containing) { + // Note: to skip having to read the contents of the heap, when collecting the + // containing information, this code doesn't read the size of the objects and + // therefore doesn't filter out remset bits that belong to dead objects. + // The (addr, addr_field) pair will contain the nearest live object, of a + // given remset bit. Users of 'containing' need to do the filtering. + + BitMap::idx_t index; + + if (!is_null(_obj)) { + // We've already found a remset bit and likely owning object in the main + // iterator. Now use that information to skip having to search for the + // same object multiple times. + + if (_obj_remset_iter.next(&index)) { + containing->_field_addr = to_addr(index); + containing->_addr = _obj; + + log_develop_trace(gc, remset)("Remset Containing Obj index: " PTR_FORMAT " base: " PTR_FORMAT " field: " PTR_FORMAT, index, untype(containing->_addr), untype(containing->_field_addr)); + + return true; + } else { + // No more remset bits in the scanned object + _obj = zaddress_unsafe::null; + } + } + + // At this point, we don't know where the nearest earlier object starts. + // Search for the next earlier remset bit, and then search for the likely + // owning object. + if (_remset_iter.next(&index)) { + containing->_field_addr = to_addr(index); + containing->_addr = _page->find_base((volatile zpointer*)untype(containing->_field_addr)); + + if (is_null(containing->_addr)) { + // Found no live object + return false; + } + + // Found live object. Not necessarily the one that originally owned the remset bit. + const BitMap::idx_t obj_index = to_index(containing->_addr); + + log_develop_trace(gc, remset)("Remset Containing Main index: " PTR_FORMAT " base: " PTR_FORMAT " field: " PTR_FORMAT, index, untype(containing->_addr), untype(containing->_field_addr)); + + // Don't scan inside the object in the main iterator + _remset_iter.reset(obj_index); + + // Scan inside the object iterator + _obj = containing->_addr; + _obj_remset_iter.reset(obj_index, index); + + return true; + } + + return false; +} + +ZRememberedSetContainingInLiveIterator::ZRememberedSetContainingInLiveIterator(ZPage* page) : + _iter(page), + _addr(zaddress::null), + _addr_size(0), + _count(0), + _count_skipped(0), + _page(page) {} + +bool ZRememberedSetContainingInLiveIterator::next(ZRememberedSetContaining* containing) { + ZRememberedSetContaining local; + while (_iter.next(&local)) { + const zaddress local_addr = safe(local._addr); + if (local_addr != _addr) { + _addr = local_addr; + _addr_size = ZUtils::object_size(_addr); + } + + const size_t field_offset = safe(local._field_addr) - _addr; + if (field_offset < _addr_size) { + *containing = local; + _count++; + return true; + } + + // Skip field outside object + _count_skipped++; + } + + // No more entries found + return false; +} + +void ZRememberedSetContainingInLiveIterator::print_statistics() const { + _page->log_msg(" (remembered iter count: " SIZE_FORMAT " skipped: " SIZE_FORMAT ")", _count, _count_skipped); +} diff --git a/src/hotspot/share/gc/z/zRememberedSet.hpp b/src/hotspot/share/gc/z/zRememberedSet.hpp new file mode 100644 index 0000000000000..d18c357f0ddbe --- /dev/null +++ b/src/hotspot/share/gc/z/zRememberedSet.hpp @@ -0,0 +1,146 @@ +/* + * Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_Z_ZREMEMBEREDSET_HPP +#define SHARE_GC_Z_ZREMEMBEREDSET_HPP + +#include "gc/z/zAddress.hpp" +#include "gc/z/zBitMap.hpp" + +class OopClosure; +class ZPage; + +struct ZRememberedSetContaining { + zaddress_unsafe _field_addr; + zaddress_unsafe _addr; +}; + +// Iterates over all (object, oop fields) pairs where the field address has +// been marked as remembered, and fill in that information in a +// ZRememberedSetContaining +// +// Note that it's not guaranteed that _field_addr belongs to the recorded +// _addr. The entry could denote a stale remembered set field and _addr could +// just be the nearest object. The users are responsible for filtering that +// out. +class ZRememberedSetContainingIterator { +private: + ZPage* const _page; + ZBitMap::ReverseIterator _remset_iter; + + zaddress_unsafe _obj; + ZBitMap::ReverseIterator _obj_remset_iter; + + size_t to_index(zaddress_unsafe addr); + zaddress_unsafe to_addr(BitMap::idx_t index); + +public: + ZRememberedSetContainingIterator(ZPage* page); + + bool next(ZRememberedSetContaining* containing); +}; + +// Like ZRememberedSetContainingIterator, but with stale remembered set fields +// filtered out. +class ZRememberedSetContainingInLiveIterator { +private: + ZRememberedSetContainingIterator _iter; + zaddress _addr; + size_t _addr_size; + size_t _count; + size_t _count_skipped; + ZPage* const _page; + +public: + ZRememberedSetContainingInLiveIterator(ZPage* page); + + bool next(ZRememberedSetContaining* containing); + + void print_statistics() const; +}; + +// The remembered set of a ZPage. +// +// There's one bit per potential object field address within the ZPage. +// +// New entries are added to the "current" active bitmap, while the +// "previous" bitmap is used by the GC to find pointers from old +// gen to young gen. +class ZRememberedSet { + friend class ZRememberedSetContainingIterator; + +public: + static int _current; + + ZMovableBitMap _bitmap[2]; + + CHeapBitMap* current(); + const CHeapBitMap* current() const; + + CHeapBitMap* previous(); + const CHeapBitMap* previous() const; + + template + void iterate_bitmap(Function function, CHeapBitMap* bitmap); + + static uintptr_t to_offset(BitMap::idx_t index); + static BitMap::idx_t to_index(uintptr_t offset); + static BitMap::idx_t to_bit_size(size_t size); + +public: + static void flip(); + + ZRememberedSet(); + + bool is_initialized() const; + void initialize(size_t page_size); + + void resize(size_t page_size); + + bool at_current(uintptr_t offset) const; + bool at_previous(uintptr_t offset) const; + bool set_current(uintptr_t offset); + void unset_non_par_current(uintptr_t offset); + void unset_range_non_par_current(uintptr_t offset, size_t size); + + // Visit all set offsets. + template + void iterate_previous(Function function); + + template + void iterate_current(Function function); + + bool is_cleared_current() const; + bool is_cleared_previous() const; + + void clear_all(); + void clear_current(); + void clear_previous(); + void swap_remset_bitmaps(); + + ZBitMap::ReverseIterator iterator_reverse_previous(); + BitMap::Iterator iterator_limited_current(uintptr_t offset, size_t size); + BitMap::Iterator iterator_limited_previous(uintptr_t offset, size_t size); +}; + +#endif // SHARE_GC_Z_ZREMEMBEREDSET_HPP diff --git a/src/hotspot/share/gc/z/zRememberedSet.inline.hpp b/src/hotspot/share/gc/z/zRememberedSet.inline.hpp new file mode 100644 index 0000000000000..a4f5ac72075ba --- /dev/null +++ b/src/hotspot/share/gc/z/zRememberedSet.inline.hpp @@ -0,0 +1,108 @@ +/* + * Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_Z_ZREMEMBEREDSET_INLINE_HPP +#define SHARE_GC_Z_ZREMEMBEREDSET_INLINE_HPP + +#include "gc/z/zRememberedSet.hpp" + +#include "utilities/bitMap.inline.hpp" + +inline CHeapBitMap* ZRememberedSet::current() { + return &_bitmap[_current]; +} + +inline const CHeapBitMap* ZRememberedSet::current() const { + return &_bitmap[_current]; +} + +inline CHeapBitMap* ZRememberedSet::previous() { + return &_bitmap[_current ^ 1]; +} + +inline const CHeapBitMap* ZRememberedSet::previous() const { + return &_bitmap[_current ^ 1]; +} + +inline uintptr_t ZRememberedSet::to_offset(BitMap::idx_t index) { + // One bit per possible oop* address + return index * oopSize; +} + +inline BitMap::idx_t ZRememberedSet::to_index(uintptr_t offset) { + // One bit per possible oop* address + return offset / oopSize; +} + +inline BitMap::idx_t ZRememberedSet::to_bit_size(size_t size) { + return size / oopSize; +} + +inline bool ZRememberedSet::at_current(uintptr_t offset) const { + const BitMap::idx_t index = to_index(offset); + return current()->at(index); +} + +inline bool ZRememberedSet::at_previous(uintptr_t offset) const { + const BitMap::idx_t index = to_index(offset); + return previous()->at(index); +} + +inline bool ZRememberedSet::set_current(uintptr_t offset) { + const BitMap::idx_t index = to_index(offset); + return current()->par_set_bit(index, memory_order_relaxed); +} + +inline void ZRememberedSet::unset_non_par_current(uintptr_t offset) { + const BitMap::idx_t index = to_index(offset); + current()->clear_bit(index); +} + +inline void ZRememberedSet::unset_range_non_par_current(uintptr_t offset, size_t size) { + const BitMap::idx_t start_index = to_index(offset); + const BitMap::idx_t end_index = to_index(offset + size); + current()->clear_range(start_index, end_index); +} + +template +void ZRememberedSet::iterate_bitmap(Function function, CHeapBitMap* bitmap) { + bitmap->iterate([&](BitMap::idx_t index) { + const uintptr_t offset = to_offset(index); + + function(offset); + + return true; + }); +} + +template +void ZRememberedSet::iterate_previous(Function function) { + iterate_bitmap(function, previous()); +} + +template +void ZRememberedSet::iterate_current(Function function) { + iterate_bitmap(function, current()); +} + +#endif // SHARE_GC_Z_ZREMEMBEREDSET_INLINE_HPP diff --git a/src/hotspot/share/gc/z/zRootsIterator.cpp b/src/hotspot/share/gc/z/zRootsIterator.cpp index 08f10fdf0efee..6f9f37fb5dba5 100644 --- a/src/hotspot/share/gc/z/zRootsIterator.cpp +++ b/src/hotspot/share/gc/z/zRootsIterator.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -35,11 +35,59 @@ #include "runtime/safepoint.hpp" #include "utilities/debug.hpp" -static const ZStatSubPhase ZSubPhaseConcurrentRootsOopStorageSet("Concurrent Roots OopStorageSet"); -static const ZStatSubPhase ZSubPhaseConcurrentRootsClassLoaderDataGraph("Concurrent Roots ClassLoaderDataGraph"); -static const ZStatSubPhase ZSubPhaseConcurrentRootsJavaThreads("Concurrent Roots JavaThreads"); -static const ZStatSubPhase ZSubPhaseConcurrentRootsCodeCache("Concurrent Roots CodeCache"); -static const ZStatSubPhase ZSubPhaseConcurrentWeakRootsOopStorageSet("Concurrent Weak Roots OopStorageSet"); +class ZRootStatSubPhase { +private: + ZStatSubPhase _young; + ZStatSubPhase _old; + +public: + ZRootStatSubPhase(const char* name) : + _young(name, ZGenerationId::young), + _old(name, ZGenerationId::old) {} + + const ZStatSubPhase& young() const { return _young; } + const ZStatSubPhase& old() const { return _old; } +}; + +static const ZRootStatSubPhase ZSubPhaseConcurrentRootsOopStorageSet("Concurrent Roots OopStorageSet"); +static const ZRootStatSubPhase ZSubPhaseConcurrentRootsClassLoaderDataGraph("Concurrent Roots ClassLoaderDataGraph"); +static const ZRootStatSubPhase ZSubPhaseConcurrentRootsJavaThreads("Concurrent Roots JavaThreads"); +static const ZRootStatSubPhase ZSubPhaseConcurrentRootsCodeCache("Concurrent Roots CodeCache"); +static const ZRootStatSubPhase ZSubPhaseConcurrentWeakRootsOopStorageSet("Concurrent Weak Roots OopStorageSet"); + +class ZRootStatTimer { +private: + const ZStatPhase* _phase; + const Ticks _start; + + ZRootStatTimer(const ZStatPhase* phase) : + _phase(phase), + _start(Ticks::now()) { + if (phase != nullptr) { + _phase->register_start(nullptr /* timer */, _start); + } + } + +public: + ~ZRootStatTimer() { + if (_phase != nullptr) { + const Ticks end = Ticks::now(); + _phase->register_end(nullptr /* timer */, _start, end); + } + } + + static const ZStatSubPhase* calculate_subphase(const ZGenerationIdOptional generation, const ZRootStatSubPhase& subphase) { + switch (generation) { + case ZGenerationIdOptional::young: return &subphase.young(); + case ZGenerationIdOptional::old: return &subphase.old(); + default: return nullptr; + } + } + +public: + ZRootStatTimer(const ZRootStatSubPhase& subphase, const ZGenerationIdOptional generation) : + ZRootStatTimer(calculate_subphase(generation, subphase)) {} +}; template template @@ -52,91 +100,107 @@ void ZParallelApply::apply(ClosureType* cl) { } } -ZStrongOopStorageSetIterator::ZStrongOopStorageSetIterator() : - _iter() {} - -void ZStrongOopStorageSetIterator::apply(OopClosure* cl) { - ZStatTimer timer(ZSubPhaseConcurrentRootsOopStorageSet); +void ZOopStorageSetIteratorStrong::apply(OopClosure* cl) { + ZRootStatTimer timer(ZSubPhaseConcurrentWeakRootsOopStorageSet, _generation); _iter.oops_do(cl); } -void ZStrongCLDsIterator::apply(CLDClosure* cl) { - ZStatTimer timer(ZSubPhaseConcurrentRootsClassLoaderDataGraph); +void ZCLDsIteratorStrong::apply(CLDClosure* cl) { + ZRootStatTimer timer(ZSubPhaseConcurrentRootsClassLoaderDataGraph, _generation); ClassLoaderDataGraph::always_strong_cld_do(cl); } -ZJavaThreadsIterator::ZJavaThreadsIterator() : - _threads(), - _claimed(0) {} +void ZCLDsIteratorWeak::apply(CLDClosure* cl) { + ZRootStatTimer timer(ZSubPhaseConcurrentRootsClassLoaderDataGraph, _generation); + ClassLoaderDataGraph::roots_cld_do(nullptr /* strong */, cl /* weak */); +} + +void ZCLDsIteratorAll::apply(CLDClosure* cl) { + ZRootStatTimer timer(ZSubPhaseConcurrentRootsClassLoaderDataGraph, _generation); + ClassLoaderDataGraph::cld_do(cl); +} uint ZJavaThreadsIterator::claim() { - return Atomic::fetch_and_add(&_claimed, 1u); + return Atomic::fetch_then_add(&_claimed, 1u); } void ZJavaThreadsIterator::apply(ThreadClosure* cl) { - ZStatTimer timer(ZSubPhaseConcurrentRootsJavaThreads); + ZRootStatTimer timer(ZSubPhaseConcurrentRootsJavaThreads, _generation); // The resource mark is needed because interpreter oop maps are // not reused in concurrent mode. Instead, they are temporary and // resource allocated. - ResourceMark _rm; + ResourceMark rm; for (uint i = claim(); i < _threads.length(); i = claim()) { cl->do_thread(_threads.thread_at(i)); } } -ZNMethodsIterator::ZNMethodsIterator() { - if (!ClassUnloading) { - ZNMethod::nmethods_do_begin(); +ZNMethodsIteratorImpl::ZNMethodsIteratorImpl(ZGenerationIdOptional generation, bool enabled, bool secondary) : + _enabled(enabled), + _secondary(secondary), + _generation(generation) { + if (_enabled) { + ZNMethod::nmethods_do_begin(secondary); } } -ZNMethodsIterator::~ZNMethodsIterator() { - if (!ClassUnloading) { - ZNMethod::nmethods_do_end(); +ZNMethodsIteratorImpl::~ZNMethodsIteratorImpl() { + if (_enabled) { + ZNMethod::nmethods_do_end(_secondary); } } -void ZNMethodsIterator::apply(NMethodClosure* cl) { - ZStatTimer timer(ZSubPhaseConcurrentRootsCodeCache); - ZNMethod::nmethods_do(cl); +void ZNMethodsIteratorImpl::apply(NMethodClosure* cl) { + ZRootStatTimer timer(ZSubPhaseConcurrentRootsCodeCache, _generation); + ZNMethod::nmethods_do(_secondary, cl); } -ZRootsIterator::ZRootsIterator(int cld_claim) { - if (cld_claim != ClassLoaderData::_claim_none) { - ClassLoaderDataGraph::verify_claimed_marks_cleared(cld_claim); - } +void ZRootsIteratorStrongColored::apply(OopClosure* cl, + CLDClosure* cld_cl) { + _oop_storage_set_strong.apply(cl); + _clds_strong.apply(cld_cl); } -void ZRootsIterator::apply(OopClosure* cl, - CLDClosure* cld_cl, - ThreadClosure* thread_cl, - NMethodClosure* nm_cl) { - _oop_storage_set.apply(cl); - _class_loader_data_graph.apply(cld_cl); +void ZRootsIteratorStrongUncolored::apply(ThreadClosure* thread_cl, + NMethodClosure* nm_cl) { _java_threads.apply(thread_cl); if (!ClassUnloading) { - _nmethods.apply(nm_cl); + _nmethods_strong.apply(nm_cl); } } -ZWeakOopStorageSetIterator::ZWeakOopStorageSetIterator() : - _iter() {} +void ZRootsIteratorWeakUncolored::apply(NMethodClosure* nm_cl) { + _nmethods_weak.apply(nm_cl); +} -void ZWeakOopStorageSetIterator::apply(OopClosure* cl) { - ZStatTimer timer(ZSubPhaseConcurrentWeakRootsOopStorageSet); +void ZOopStorageSetIteratorWeak::apply(OopClosure* cl) { + ZRootStatTimer timer(ZSubPhaseConcurrentWeakRootsOopStorageSet, _generation); _iter.oops_do(cl); } -void ZWeakOopStorageSetIterator::report_num_dead() { +void ZOopStorageSetIteratorWeak::report_num_dead() { _iter.report_num_dead(); } -void ZWeakRootsIterator::report_num_dead() { - _oop_storage_set.iter().report_num_dead(); +void ZRootsIteratorWeakColored::report_num_dead() { + _oop_storage_set_weak.iter().report_num_dead(); } -void ZWeakRootsIterator::apply(OopClosure* cl) { - _oop_storage_set.apply(cl); +void ZRootsIteratorWeakColored::apply(OopClosure* cl) { + _oop_storage_set_weak.apply(cl); +} + +void ZRootsIteratorAllColored::apply(OopClosure* cl, + CLDClosure* cld_cl) { + _oop_storage_set_strong.apply(cl); + _oop_storage_set_weak.apply(cl); + _clds_all.apply(cld_cl); +} + +void ZRootsIteratorAllUncolored::apply(ThreadClosure* thread_cl, + NMethodClosure* nm_cl) { + _java_threads.apply(thread_cl); + _nmethods_all.apply(nm_cl); } diff --git a/src/hotspot/share/gc/z/zRootsIterator.hpp b/src/hotspot/share/gc/z/zRootsIterator.hpp index 100089056ad96..ca30cf8080cc8 100644 --- a/src/hotspot/share/gc/z/zRootsIterator.hpp +++ b/src/hotspot/share/gc/z/zRootsIterator.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,6 +25,7 @@ #define SHARE_GC_Z_ZROOTSITERATOR_HPP #include "gc/shared/oopStorageSetParState.hpp" +#include "gc/z/zGenerationId.hpp" #include "logging/log.hpp" #include "memory/iterator.hpp" #include "runtime/threadSMR.hpp" @@ -36,8 +37,8 @@ class ZParallelApply { volatile bool _completed; public: - ZParallelApply() : - _iter(), + ZParallelApply(ZGenerationIdOptional generation) : + _iter(generation), _completed(false) {} template @@ -48,77 +49,196 @@ class ZParallelApply { } }; -class ZStrongOopStorageSetIterator { +class ZOopStorageSetIteratorStrong { +private: OopStorageSetStrongParState _iter; + const ZGenerationIdOptional _generation; + +public: + ZOopStorageSetIteratorStrong(ZGenerationIdOptional generation) : + _iter(), + _generation(generation) {} + + void apply(OopClosure* cl); +}; + +class ZOopStorageSetIteratorWeak { +private: + OopStorageSetWeakParState _iter; + const ZGenerationIdOptional _generation; public: - ZStrongOopStorageSetIterator(); + ZOopStorageSetIteratorWeak(ZGenerationIdOptional generation) : + _iter(), + _generation(generation) {} void apply(OopClosure* cl); + + void report_num_dead(); }; -class ZStrongCLDsIterator { +class ZCLDsIteratorStrong { +private: + const ZGenerationIdOptional _generation; + public: + ZCLDsIteratorStrong(ZGenerationIdOptional generation) : + _generation(generation) {} + + void apply(CLDClosure* cl); +}; + +class ZCLDsIteratorWeak { +private: + const ZGenerationIdOptional _generation; + +public: + ZCLDsIteratorWeak(ZGenerationIdOptional generation) : + _generation(generation) {} + + void apply(CLDClosure* cl); +}; + +class ZCLDsIteratorAll { +private: + const ZGenerationIdOptional _generation; + +public: + ZCLDsIteratorAll(ZGenerationIdOptional generation) : + _generation(generation) {} + void apply(CLDClosure* cl); }; class ZJavaThreadsIterator { private: - ThreadsListHandle _threads; - volatile uint _claimed; + ThreadsListHandle _threads; + volatile uint _claimed; + const ZGenerationIdOptional _generation; uint claim(); public: - ZJavaThreadsIterator(); + ZJavaThreadsIterator(ZGenerationIdOptional generation) : + _threads(), + _claimed(0), + _generation(generation) {} void apply(ThreadClosure* cl); }; -class ZNMethodsIterator { -public: - ZNMethodsIterator(); - ~ZNMethodsIterator(); +class ZNMethodsIteratorImpl { +private: + const bool _enabled; + const bool _secondary; + const ZGenerationIdOptional _generation; +protected: + ZNMethodsIteratorImpl(ZGenerationIdOptional generation, bool enabled, bool secondary); + ~ZNMethodsIteratorImpl(); + +public: void apply(NMethodClosure* cl); }; -class ZRootsIterator { +class ZNMethodsIteratorStrong : public ZNMethodsIteratorImpl { +public: + ZNMethodsIteratorStrong(ZGenerationIdOptional generation) : + ZNMethodsIteratorImpl(generation, !ClassUnloading /* enabled */, false /* secondary */) {} +}; + +class ZNMethodsIteratorWeak : public ZNMethodsIteratorImpl { +public: + ZNMethodsIteratorWeak(ZGenerationIdOptional generation) : + ZNMethodsIteratorImpl(generation, true /* enabled */, true /* secondary */) {} +}; + +class ZNMethodsIteratorAll : public ZNMethodsIteratorImpl { +public: + ZNMethodsIteratorAll(ZGenerationIdOptional generation) : + ZNMethodsIteratorImpl(generation, true /* enabled */, true /* secondary */) {} +}; + +class ZRootsIteratorStrongUncolored { private: - ZParallelApply _oop_storage_set; - ZParallelApply _class_loader_data_graph; - ZParallelApply _java_threads; - ZParallelApply _nmethods; + ZParallelApply _java_threads; + ZParallelApply _nmethods_strong; public: - ZRootsIterator(int cld_claim); + ZRootsIteratorStrongUncolored(ZGenerationIdOptional generation) : + _java_threads(generation), + _nmethods_strong(generation) {} - void apply(OopClosure* cl, - CLDClosure* cld_cl, - ThreadClosure* thread_cl, + void apply(ThreadClosure* thread_cl, NMethodClosure* nm_cl); }; -class ZWeakOopStorageSetIterator { +class ZRootsIteratorWeakUncolored { private: - OopStorageSetWeakParState _iter; + ZParallelApply _nmethods_weak; public: - ZWeakOopStorageSetIterator(); + ZRootsIteratorWeakUncolored(ZGenerationIdOptional generation) : + _nmethods_weak(generation) {} - void apply(OopClosure* cl); + void apply(NMethodClosure* nm_cl); +}; - void report_num_dead(); +class ZRootsIteratorAllUncolored { +private: + ZParallelApply _java_threads; + ZParallelApply _nmethods_all; + +public: + ZRootsIteratorAllUncolored(ZGenerationIdOptional generation) : + _java_threads(generation), + _nmethods_all(generation) {} + + void apply(ThreadClosure* thread_cl, + NMethodClosure* nm_cl); +}; + +class ZRootsIteratorStrongColored { +private: + ZParallelApply _oop_storage_set_strong; + ZParallelApply _clds_strong; + +public: + ZRootsIteratorStrongColored(ZGenerationIdOptional generation) : + _oop_storage_set_strong(generation), + _clds_strong(generation) {} + + void apply(OopClosure* cl, + CLDClosure* cld_cl); }; -class ZWeakRootsIterator { +class ZRootsIteratorWeakColored { private: - ZParallelApply _oop_storage_set; + ZParallelApply _oop_storage_set_weak; public: + ZRootsIteratorWeakColored(ZGenerationIdOptional generation) : + _oop_storage_set_weak(generation) {} + void apply(OopClosure* cl); void report_num_dead(); }; +class ZRootsIteratorAllColored { +private: + ZParallelApply _oop_storage_set_strong; + ZParallelApply _oop_storage_set_weak; + ZParallelApply _clds_all; + +public: + ZRootsIteratorAllColored(ZGenerationIdOptional generation) : + _oop_storage_set_strong(generation), + _oop_storage_set_weak(generation), + _clds_all(generation) {} + + void apply(OopClosure* cl, + CLDClosure* cld_cl); +}; + #endif // SHARE_GC_Z_ZROOTSITERATOR_HPP diff --git a/src/hotspot/share/gc/z/zRuntimeWorkers.cpp b/src/hotspot/share/gc/z/zRuntimeWorkers.cpp index 568b9b778ce90..d11adb5a04461 100644 --- a/src/hotspot/share/gc/z/zRuntimeWorkers.cpp +++ b/src/hotspot/share/gc/z/zRuntimeWorkers.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,39 +24,9 @@ #include "precompiled.hpp" #include "gc/shared/gcLogPrecious.hpp" #include "gc/shared/gc_globals.hpp" -#include "gc/z/zLock.inline.hpp" #include "gc/z/zRuntimeWorkers.hpp" -#include "gc/z/zTask.hpp" -#include "gc/z/zThread.hpp" #include "runtime/java.hpp" -class ZRuntimeWorkersInitializeTask : public WorkerTask { -private: - const uint _nworkers; - uint _started; - ZConditionLock _lock; - -public: - ZRuntimeWorkersInitializeTask(uint nworkers) : - WorkerTask("ZRuntimeWorkersInitializeTask"), - _nworkers(nworkers), - _started(0), - _lock() {} - - virtual void work(uint worker_id) { - // Wait for all threads to start - ZLocker locker(&_lock); - if (++_started == _nworkers) { - // All threads started - _lock.notify_all(); - } else { - while (_started != _nworkers) { - _lock.wait(); - } - } - } -}; - ZRuntimeWorkers::ZRuntimeWorkers() : _workers("RuntimeWorker", ParallelGCThreads) { @@ -69,11 +39,6 @@ ZRuntimeWorkers::ZRuntimeWorkers() : if (_workers.active_workers() != _workers.max_workers()) { vm_exit_during_initialization("Failed to create ZRuntimeWorkers"); } - - // Execute task to reduce latency in early safepoints, - // which otherwise would have to take on any warmup costs. - ZRuntimeWorkersInitializeTask task(_workers.max_workers()); - _workers.run_task(&task); } WorkerThreads* ZRuntimeWorkers::workers() { diff --git a/src/hotspot/share/gc/z/zSafeDelete.hpp b/src/hotspot/share/gc/z/zSafeDelete.hpp index 62c3495c57b2c..b0e5f99e57b20 100644 --- a/src/hotspot/share/gc/z/zSafeDelete.hpp +++ b/src/hotspot/share/gc/z/zSafeDelete.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,44 +25,25 @@ #define SHARE_GC_Z_ZSAFEDELETE_HPP #include "gc/z/zArray.hpp" -#include "gc/z/zLock.hpp" #include template -class ZSafeDeleteImpl { +class ZSafeDelete { private: using ItemT = std::remove_extent_t; - ZLock* _lock; - uint64_t _enabled; - ZArray _deferred; + ZActivatedArray _deferred; - bool deferred_delete(ItemT* item); - void immediate_delete(ItemT* item); + static void immediate_delete(ItemT* item); public: - ZSafeDeleteImpl(ZLock* lock); + explicit ZSafeDelete(bool locked = true); void enable_deferred_delete(); void disable_deferred_delete(); - void operator()(ItemT* item); -}; - -template -class ZSafeDelete : public ZSafeDeleteImpl { -private: - ZLock _lock; - -public: - ZSafeDelete(); -}; - -template -class ZSafeDeleteNoLock : public ZSafeDeleteImpl { -public: - ZSafeDeleteNoLock(); + void schedule_delete(ItemT* item); }; #endif // SHARE_GC_Z_ZSAFEDELETE_HPP diff --git a/src/hotspot/share/gc/z/zSafeDelete.inline.hpp b/src/hotspot/share/gc/z/zSafeDelete.inline.hpp index 460193827e055..e894d555c0a4c 100644 --- a/src/hotspot/share/gc/z/zSafeDelete.inline.hpp +++ b/src/hotspot/share/gc/z/zSafeDelete.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,29 +27,15 @@ #include "gc/z/zSafeDelete.hpp" #include "gc/z/zArray.inline.hpp" -#include "utilities/debug.hpp" #include template -ZSafeDeleteImpl::ZSafeDeleteImpl(ZLock* lock) : - _lock(lock), - _enabled(0), - _deferred() {} +ZSafeDelete::ZSafeDelete(bool locked) : + _deferred(locked) {} template -bool ZSafeDeleteImpl::deferred_delete(ItemT* item) { - ZLocker locker(_lock); - if (_enabled > 0) { - _deferred.append(item); - return true; - } - - return false; -} - -template -void ZSafeDeleteImpl::immediate_delete(ItemT* item) { +void ZSafeDelete::immediate_delete(ItemT* item) { if (std::is_array::value) { delete [] item; } else { @@ -58,43 +44,20 @@ void ZSafeDeleteImpl::immediate_delete(ItemT* item) { } template -void ZSafeDeleteImpl::enable_deferred_delete() { - ZLocker locker(_lock); - _enabled++; +void ZSafeDelete::enable_deferred_delete() { + _deferred.activate(); } template -void ZSafeDeleteImpl::disable_deferred_delete() { - ZArray deferred; - - { - ZLocker locker(_lock); - assert(_enabled > 0, "Invalid state"); - if (--_enabled == 0) { - deferred.swap(&_deferred); - } - } - - ZArrayIterator iter(&deferred); - for (ItemT* item; iter.next(&item);) { - immediate_delete(item); - } +void ZSafeDelete::disable_deferred_delete() { + _deferred.deactivate_and_apply(immediate_delete); } template -void ZSafeDeleteImpl::operator()(ItemT* item) { - if (!deferred_delete(item)) { +void ZSafeDelete::schedule_delete(ItemT* item) { + if (!_deferred.add_if_activated(item)) { immediate_delete(item); } } -template -ZSafeDelete::ZSafeDelete() : - ZSafeDeleteImpl(&_lock), - _lock() {} - -template -ZSafeDeleteNoLock::ZSafeDeleteNoLock() : - ZSafeDeleteImpl(NULL) {} - #endif // SHARE_GC_Z_ZSAFEDELETE_INLINE_HPP diff --git a/src/hotspot/share/gc/z/zServiceability.cpp b/src/hotspot/share/gc/z/zServiceability.cpp index c708d0d0c479d..b20da7ff5bf80 100644 --- a/src/hotspot/share/gc/z/zServiceability.cpp +++ b/src/hotspot/share/gc/z/zServiceability.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,11 +25,32 @@ #include "gc/shared/generationCounters.hpp" #include "gc/shared/hSpaceCounters.hpp" #include "gc/z/zCollectedHeap.hpp" +#include "gc/z/zDriver.hpp" #include "gc/z/zHeap.inline.hpp" #include "gc/z/zServiceability.hpp" #include "memory/metaspaceCounters.hpp" #include "runtime/perfData.hpp" +struct ZMemoryUsageInfo { + size_t _young_used; + size_t _young_capacity; + size_t _old_used; + size_t _old_capacity; +}; + +static ZMemoryUsageInfo compute_memory_usage_info() { + const size_t capacity = ZHeap::heap()->capacity(); + const size_t old_used = ZHeap::heap()->used_old(); + const size_t young_used = ZHeap::heap()->used_young(); + + ZMemoryUsageInfo info; + info._old_used = MIN2(old_used, capacity); + info._old_capacity = info._old_used; + info._young_capacity = capacity - info._old_capacity; + info._young_used = MIN2(young_used, info._young_capacity); + return info; +} + class ZGenerationCounters : public GenerationCounters { public: ZGenerationCounters(const char* name, int ordinal, int spaces, @@ -45,122 +66,189 @@ class ZGenerationCounters : public GenerationCounters { // Class to expose perf counters used by jstat. class ZServiceabilityCounters : public CHeapObj { private: - ZGenerationCounters _generation_counters; - HSpaceCounters _space_counters; - CollectorCounters _collector_counters; + ZGenerationCounters _generation_young_counters; + ZGenerationCounters _generation_old_counters; + HSpaceCounters _space_young_counters; + HSpaceCounters _space_old_counters; + CollectorCounters _minor_collection_counters; + CollectorCounters _major_collection_counters; public: - ZServiceabilityCounters(size_t min_capacity, size_t max_capacity); + ZServiceabilityCounters(size_t initial_capacity, size_t min_capacity, size_t max_capacity); - CollectorCounters* collector_counters(); + CollectorCounters* collector_counters(bool minor); void update_sizes(); }; -ZServiceabilityCounters::ZServiceabilityCounters(size_t min_capacity, size_t max_capacity) : +ZServiceabilityCounters::ZServiceabilityCounters(size_t initial_capacity, size_t min_capacity, size_t max_capacity) : + // generation.0 + _generation_young_counters( + "young" /* name */, + 0 /* ordinal */, + 1 /* spaces */, + min_capacity /* min_capacity */, + max_capacity /* max_capacity */, + initial_capacity /* curr_capacity */), // generation.1 - _generation_counters("old" /* name */, - 1 /* ordinal */, - 1 /* spaces */, - min_capacity /* min_capacity */, - max_capacity /* max_capacity */, - min_capacity /* curr_capacity */), + _generation_old_counters( + "old" /* name */, + 1 /* ordinal */, + 1 /* spaces */, + 0 /* min_capacity */, + max_capacity /* max_capacity */, + 0 /* curr_capacity */), + // generation.0.space.0 + _space_young_counters( + _generation_young_counters.name_space(), + "space" /* name */, + 0 /* ordinal */, + max_capacity /* max_capacity */, + initial_capacity /* init_capacity */), // generation.1.space.0 - _space_counters(_generation_counters.name_space(), - "space" /* name */, - 0 /* ordinal */, - max_capacity /* max_capacity */, - min_capacity /* init_capacity */), + _space_old_counters( + _generation_old_counters.name_space(), + "space" /* name */, + 0 /* ordinal */, + max_capacity /* max_capacity */, + 0 /* init_capacity */), + // gc.collector.0 + _minor_collection_counters( + "ZGC minor collection pauses" /* name */, + 0 /* ordinal */), // gc.collector.2 - _collector_counters("Z concurrent cycle pauses" /* name */, - 2 /* ordinal */) {} + _major_collection_counters( + "ZGC major collection pauses" /* name */, + 2 /* ordinal */) {} -CollectorCounters* ZServiceabilityCounters::collector_counters() { - return &_collector_counters; +CollectorCounters* ZServiceabilityCounters::collector_counters(bool minor) { + return minor + ? &_minor_collection_counters + : &_major_collection_counters; } void ZServiceabilityCounters::update_sizes() { if (UsePerfData) { - const size_t capacity = ZHeap::heap()->capacity(); - const size_t used = MIN2(ZHeap::heap()->used(), capacity); - - _generation_counters.update_capacity(capacity); - _space_counters.update_capacity(capacity); - _space_counters.update_used(used); + const ZMemoryUsageInfo info = compute_memory_usage_info(); + _generation_young_counters.update_capacity(info._young_capacity); + _generation_old_counters.update_capacity(info._old_capacity); + _space_young_counters.update_capacity(info._young_capacity); + _space_young_counters.update_used(info._young_used); + _space_old_counters.update_capacity(info._old_capacity); + _space_old_counters.update_used(info._old_used); MetaspaceCounters::update_performance_counters(); } } -ZServiceabilityMemoryPool::ZServiceabilityMemoryPool(size_t min_capacity, size_t max_capacity) : - CollectedMemoryPool("ZHeap", +ZServiceabilityMemoryPool::ZServiceabilityMemoryPool(const char* name, ZGenerationId id, size_t min_capacity, size_t max_capacity) : + CollectedMemoryPool(name, min_capacity, max_capacity, - true /* support_usage_threshold */) {} + id == ZGenerationId::old /* support_usage_threshold */), + _generation_id(id) {} size_t ZServiceabilityMemoryPool::used_in_bytes() { - return ZHeap::heap()->used(); + return ZHeap::heap()->used_generation(_generation_id); } MemoryUsage ZServiceabilityMemoryPool::get_memory_usage() { - const size_t committed = ZHeap::heap()->capacity(); - const size_t used = MIN2(ZHeap::heap()->used(), committed); + const ZMemoryUsageInfo info = compute_memory_usage_info(); - return MemoryUsage(initial_size(), used, committed, max_size()); + if (_generation_id == ZGenerationId::young) { + return MemoryUsage(initial_size(), info._young_used, info._young_capacity, max_size()); + } else { + return MemoryUsage(initial_size(), info._old_used, info._old_capacity, max_size()); + } } ZServiceabilityMemoryManager::ZServiceabilityMemoryManager(const char* name, - ZServiceabilityMemoryPool* pool) : + MemoryPool* young_memory_pool, + MemoryPool* old_memory_pool) : GCMemoryManager(name) { - add_pool(pool); + add_pool(young_memory_pool); + add_pool(old_memory_pool); } -ZServiceability::ZServiceability(size_t min_capacity, size_t max_capacity) : +ZServiceability::ZServiceability(size_t initial_capacity, + size_t min_capacity, + size_t max_capacity) : + _initial_capacity(initial_capacity), _min_capacity(min_capacity), _max_capacity(max_capacity), - _memory_pool(_min_capacity, _max_capacity), - _cycle_memory_manager("ZGC Cycles", &_memory_pool), - _pause_memory_manager("ZGC Pauses", &_memory_pool), - _counters(NULL) {} + _young_memory_pool("ZGC Young Generation", ZGenerationId::young, _min_capacity, _max_capacity), + _old_memory_pool("ZGC Old Generation", ZGenerationId::old, 0, _max_capacity), + _minor_cycle_memory_manager("ZGC Minor Cycles", &_young_memory_pool, &_old_memory_pool), + _major_cycle_memory_manager("ZGC Major Cycles", &_young_memory_pool, &_old_memory_pool), + _minor_pause_memory_manager("ZGC Minor Pauses", &_young_memory_pool, &_old_memory_pool), + _major_pause_memory_manager("ZGC Major Pauses", &_young_memory_pool, &_old_memory_pool), + _counters(nullptr) { +} void ZServiceability::initialize() { - _counters = new ZServiceabilityCounters(_min_capacity, _max_capacity); + _counters = new ZServiceabilityCounters(_initial_capacity, _min_capacity, _max_capacity); } -MemoryPool* ZServiceability::memory_pool() { - return &_memory_pool; +MemoryPool* ZServiceability::memory_pool(ZGenerationId id) { + return id == ZGenerationId::young + ? &_young_memory_pool + : &_old_memory_pool; } -GCMemoryManager* ZServiceability::cycle_memory_manager() { - return &_cycle_memory_manager; +GCMemoryManager* ZServiceability::cycle_memory_manager(bool minor) { + return minor + ? &_minor_cycle_memory_manager + : &_major_cycle_memory_manager; } -GCMemoryManager* ZServiceability::pause_memory_manager() { - return &_pause_memory_manager; +GCMemoryManager* ZServiceability::pause_memory_manager(bool minor) { + return minor + ? &_minor_pause_memory_manager + : &_major_pause_memory_manager; } ZServiceabilityCounters* ZServiceability::counters() { return _counters; } -ZServiceabilityCycleTracer::ZServiceabilityCycleTracer() : - _memory_manager_stats(ZHeap::heap()->serviceability_cycle_memory_manager(), - ZCollectedHeap::heap()->gc_cause(), +bool ZServiceabilityCycleTracer::_minor_is_active; + +ZServiceabilityCycleTracer::ZServiceabilityCycleTracer(bool minor) : + _memory_manager_stats(ZHeap::heap()->serviceability_cycle_memory_manager(minor), + minor ? ZDriver::minor()->gc_cause() : ZDriver::major()->gc_cause(), "end of GC cycle", - true /* allMemoryPoolsAffected */, - true /* recordGCBeginTime */, - true /* recordPreGCUsage */, - true /* recordPeakUsage */, - true /* recordPostGCUsage */, - true /* recordAccumulatedGCTime */, - true /* recordGCEndTime */, - true /* countCollection */) {} + true /* allMemoryPoolsAffected */, + true /* recordGCBeginTime */, + true /* recordPreGCUsage */, + true /* recordPeakUsage */, + true /* recordPostGCUsage */, + true /* recordAccumulatedGCTime */, + true /* recordGCEndTime */, + true /* countCollection */) { + _minor_is_active = minor; +} + +ZServiceabilityCycleTracer::~ZServiceabilityCycleTracer() { + _minor_is_active = false; +} + +bool ZServiceabilityCycleTracer::minor_is_active() { + return _minor_is_active; +} + +bool ZServiceabilityPauseTracer::minor_is_active() const { + // We report pauses at the minor/major collection level instead + // of the young/old level. At the call-site where ZServiceabilityPauseTracer + // is used, we don't have that information readily available, so + // we let ZServiceabilityCycleTracer keep track of that. + return ZServiceabilityCycleTracer::minor_is_active(); +} ZServiceabilityPauseTracer::ZServiceabilityPauseTracer() : _svc_gc_marker(SvcGCMarker::CONCURRENT), - _counters_stats(ZHeap::heap()->serviceability_counters()->collector_counters()), - _memory_manager_stats(ZHeap::heap()->serviceability_pause_memory_manager(), - ZCollectedHeap::heap()->gc_cause(), + _counters_stats(ZHeap::heap()->serviceability_counters()->collector_counters(minor_is_active())), + _memory_manager_stats(ZHeap::heap()->serviceability_pause_memory_manager(minor_is_active()), + minor_is_active() ? ZDriver::minor()->gc_cause() : ZDriver::major()->gc_cause(), "end of GC pause", true /* allMemoryPoolsAffected */, true /* recordGCBeginTime */, diff --git a/src/hotspot/share/gc/z/zServiceability.hpp b/src/hotspot/share/gc/z/zServiceability.hpp index 5d03184d244f9..2a3b56248df8c 100644 --- a/src/hotspot/share/gc/z/zServiceability.hpp +++ b/src/hotspot/share/gc/z/zServiceability.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,6 +26,7 @@ #include "gc/shared/collectorCounters.hpp" #include "gc/shared/gcVMOperations.hpp" +#include "gc/z/zGenerationId.hpp" #include "memory/allocation.hpp" #include "services/memoryManager.hpp" #include "services/memoryPool.hpp" @@ -34,8 +35,11 @@ class ZServiceabilityCounters; class ZServiceabilityMemoryPool : public CollectedMemoryPool { +private: + const ZGenerationId _generation_id; + public: - ZServiceabilityMemoryPool(size_t min_capacity, size_t max_capacity); + ZServiceabilityMemoryPool(const char* name, ZGenerationId id, size_t min_capacity, size_t max_capacity); virtual size_t used_in_bytes(); virtual MemoryUsage get_memory_usage(); @@ -44,35 +48,47 @@ class ZServiceabilityMemoryPool : public CollectedMemoryPool { class ZServiceabilityMemoryManager : public GCMemoryManager { public: ZServiceabilityMemoryManager(const char* name, - ZServiceabilityMemoryPool* pool); + MemoryPool* young_memory_pool, + MemoryPool* old_memory_pool); }; class ZServiceability { private: + const size_t _initial_capacity; const size_t _min_capacity; const size_t _max_capacity; - ZServiceabilityMemoryPool _memory_pool; - ZServiceabilityMemoryManager _cycle_memory_manager; - ZServiceabilityMemoryManager _pause_memory_manager; + ZServiceabilityMemoryPool _young_memory_pool; + ZServiceabilityMemoryPool _old_memory_pool; + ZServiceabilityMemoryManager _minor_cycle_memory_manager; + ZServiceabilityMemoryManager _major_cycle_memory_manager; + ZServiceabilityMemoryManager _minor_pause_memory_manager; + ZServiceabilityMemoryManager _major_pause_memory_manager; ZServiceabilityCounters* _counters; public: - ZServiceability(size_t min_capacity, size_t max_capacity); + ZServiceability(size_t initial_capacity, + size_t min_capacity, + size_t max_capacity); void initialize(); - MemoryPool* memory_pool(); - GCMemoryManager* cycle_memory_manager(); - GCMemoryManager* pause_memory_manager(); + MemoryPool* memory_pool(ZGenerationId id); + GCMemoryManager* cycle_memory_manager(bool minor); + GCMemoryManager* pause_memory_manager(bool minor); ZServiceabilityCounters* counters(); }; class ZServiceabilityCycleTracer : public StackObj { private: + static bool _minor_is_active; + TraceMemoryManagerStats _memory_manager_stats; public: - ZServiceabilityCycleTracer(); + ZServiceabilityCycleTracer(bool minor); + ~ZServiceabilityCycleTracer(); + + static bool minor_is_active(); }; class ZServiceabilityPauseTracer : public StackObj { @@ -81,6 +97,8 @@ class ZServiceabilityPauseTracer : public StackObj { TraceCollectorStats _counters_stats; TraceMemoryManagerStats _memory_manager_stats; + bool minor_is_active() const; + public: ZServiceabilityPauseTracer(); ~ZServiceabilityPauseTracer(); diff --git a/src/hotspot/share/gc/z/zStackChunkGCData.hpp b/src/hotspot/share/gc/z/zStackChunkGCData.hpp new file mode 100644 index 0000000000000..fefdc55919377 --- /dev/null +++ b/src/hotspot/share/gc/z/zStackChunkGCData.hpp @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2022, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_Z_ZSTACKCHUNKGCDATA_HPP +#define SHARE_GC_Z_ZSTACKCHUNKGCDATA_HPP + +#include "oops/oopsHierarchy.hpp" + +class ZStackChunkGCData { +private: + // The implicit color of all oops when the chunk was recently allocated + uintptr_t _color; + + static ZStackChunkGCData* data(stackChunkOop chunk); + +public: + static void initialize(stackChunkOop chunk); + static uintptr_t color(stackChunkOop chunk); +}; + +#endif // SHARE_GC_Z_ZSTACKCHUNKGCDATA_HPP diff --git a/src/hotspot/share/gc/z/zStackChunkGCData.inline.hpp b/src/hotspot/share/gc/z/zStackChunkGCData.inline.hpp new file mode 100644 index 0000000000000..0a5ca86fa68da --- /dev/null +++ b/src/hotspot/share/gc/z/zStackChunkGCData.inline.hpp @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2022, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_Z_ZSTACKCHUNKGCDATA_INLINE_HPP +#define SHARE_GC_Z_ZSTACKCHUNKGCDATA_INLINE_HPP + +#include "gc/z/zStackChunkGCData.hpp" + +#include "gc/z/zAddress.inline.hpp" +#include "gc/z/zGlobals.hpp" +#include "oops/stackChunkOop.inline.hpp" + +inline ZStackChunkGCData* ZStackChunkGCData::data(stackChunkOop chunk) { + return reinterpret_cast(chunk->gc_data()); +} + +inline void ZStackChunkGCData::initialize(stackChunkOop chunk) { + data(chunk)->_color = ZPointerStoreGoodMask; +} + +inline uintptr_t ZStackChunkGCData::color(stackChunkOop chunk) { + return data(chunk)->_color; +} + +#endif // SHARE_GC_Z_ZSTACKCHUNKGCDATA_INLINE_HPP diff --git a/src/hotspot/share/gc/z/zStackWatermark.cpp b/src/hotspot/share/gc/z/zStackWatermark.cpp index 7280f350e8000..ed0c054858d98 100644 --- a/src/hotspot/share/gc/z/zStackWatermark.cpp +++ b/src/hotspot/share/gc/z/zStackWatermark.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,13 +24,17 @@ #include "precompiled.hpp" #include "gc/z/zAddress.hpp" #include "gc/z/zBarrier.inline.hpp" +#include "gc/z/zGeneration.inline.hpp" #include "gc/z/zStackWatermark.hpp" -#include "gc/z/zThread.inline.hpp" +#include "gc/z/zStoreBarrierBuffer.hpp" #include "gc/z/zThreadLocalAllocBuffer.hpp" #include "gc/z/zThreadLocalData.hpp" +#include "gc/z/zUncoloredRoot.inline.hpp" #include "gc/z/zVerify.hpp" #include "memory/resourceArea.inline.hpp" #include "runtime/frame.inline.hpp" +#include "runtime/stackWatermark.hpp" +#include "runtime/thread.hpp" #include "utilities/preserveException.hpp" ZOnStackCodeBlobClosure::ZOnStackCodeBlobClosure() : @@ -38,7 +42,7 @@ ZOnStackCodeBlobClosure::ZOnStackCodeBlobClosure() : void ZOnStackCodeBlobClosure::do_code_blob(CodeBlob* cb) { nmethod* const nm = cb->as_nmethod_or_null(); - if (nm != NULL) { + if (nm != nullptr) { const bool result = _bs_nm->nmethod_entry_barrier(nm); assert(result, "NMethod on-stack must be alive"); } @@ -49,51 +53,163 @@ ThreadLocalAllocStats& ZStackWatermark::stats() { } uint32_t ZStackWatermark::epoch_id() const { - return *ZAddressBadMaskHighOrderBitsAddr; + return *ZPointerStoreGoodMaskLowOrderBitsAddr; } ZStackWatermark::ZStackWatermark(JavaThread* jt) : - StackWatermark(jt, StackWatermarkKind::gc, *ZAddressBadMaskHighOrderBitsAddr), - _jt_cl(), - _cb_cl(), + StackWatermark(jt, StackWatermarkKind::gc, *ZPointerStoreGoodMaskLowOrderBitsAddr), + // First watermark is fake and setup to be replaced at next phase shift + _old_watermarks{{ZPointerStoreBadMask, 1}, {}, {}}, + _old_watermarks_newest(0), _stats() {} -OopClosure* ZStackWatermark::closure_from_context(void* context) { - if (context != NULL) { - assert(ZThread::is_worker(), "Unexpected thread passing in context: " PTR_FORMAT, p2i(context)); - return reinterpret_cast(context); +bool ZColorWatermark::covers(const ZColorWatermark& other) const { + if (_watermark == 0) { + // This watermark was completed + return true; + } + + if (other._watermark == 0) { + // The other watermark was completed + return false; + } + + // Compare the two + return _watermark >= other._watermark; +} + +uintptr_t ZStackWatermark::prev_head_color() const { + return _old_watermarks[_old_watermarks_newest]._color; +} + +uintptr_t ZStackWatermark::prev_frame_color(const frame& fr) const { + for (int i = _old_watermarks_newest; i >= 0; i--) { + const ZColorWatermark ow = _old_watermarks[i]; + if (ow._watermark == 0 || uintptr_t(fr.sp()) <= ow._watermark) { + return ow._color; + } + } + + fatal("Found no matching previous color for the frame"); + return 0; +} + +void ZStackWatermark::save_old_watermark() { + assert(StackWatermarkState::epoch(_state) != ZStackWatermark::epoch_id(), "Shouldn't be here otherwise"); + + // Previous color + const uintptr_t prev_color = StackWatermarkState::epoch(_state); + + // If the prev_color is still the last saved color watermark, then processing has not started. + const bool prev_processing_started = prev_color != prev_head_color(); + + if (!prev_processing_started) { + // Nothing was processed in the previous phase, so there's no need to save a watermark for it. + // Must have been a remapped phase, the other phases are explicitly completed by the GC. + assert((prev_color & ZPointerRemapped) != 0, "Unexpected color: " PTR_FORMAT, prev_color); + return; + } + + // Previous watermark + const uintptr_t prev_watermark = StackWatermarkState::is_done(_state) ? 0 : last_processed_raw(); + + // Create a new color watermark to describe the old watermark + const ZColorWatermark cw = { prev_color, prev_watermark }; + + // Find the location of the oldest watermark that it covers, and thus can replace + int replace = -1; + for (int i = 0; i <= _old_watermarks_newest; i++) { + if (cw.covers(_old_watermarks[i])) { + replace = i; + break; + } + } + + // Update top + if (replace != -1) { + // Found one to replace + _old_watermarks_newest = replace; } else { - return &_jt_cl; + // Found none too replace - push it to the top + _old_watermarks_newest++; + assert(_old_watermarks_newest < _old_watermarks_max, "Unexpected amount of old watermarks"); + } + + // Install old watermark + _old_watermarks[_old_watermarks_newest] = cw; +} + +class ZStackWatermarkProcessOopClosure : public ZUncoloredRootClosure { +private: + const ZUncoloredRoot::RootFunction _function; + const uintptr_t _color; + + static ZUncoloredRoot::RootFunction select_function(void* context) { + if (context == nullptr) { + return ZUncoloredRoot::process; + } + + assert(Thread::current()->is_Worker_thread(), "Unexpected thread passing in context: " PTR_FORMAT, p2i(context)); + return reinterpret_cast(context); + } + +public: + ZStackWatermarkProcessOopClosure(void* context, uintptr_t color) : + _function(select_function(context)), _color(color) {} + + virtual void do_root(zaddress_unsafe* p) { + _function(p, _color); + } +}; + +void ZStackWatermark::process_head(void* context) { + const uintptr_t color = prev_head_color(); + + ZStackWatermarkProcessOopClosure cl(context, color); + ZOnStackCodeBlobClosure cb_cl; + + _jt->oops_do_no_frames(&cl, &cb_cl); + + zaddress_unsafe* const invisible_root = ZThreadLocalData::invisible_root(_jt); + if (invisible_root != nullptr) { + ZUncoloredRoot::process_invisible(invisible_root, color); } } void ZStackWatermark::start_processing_impl(void* context) { - // Verify the head (no_frames) of the thread is bad before fixing it. - ZVerify::verify_thread_head_bad(_jt); + save_old_watermark(); // Process the non-frame part of the thread - _jt->oops_do_no_frames(closure_from_context(context), &_cb_cl); - ZThreadLocalData::do_invisible_root(_jt, ZBarrier::load_barrier_on_invisible_root_oop_field); + process_head(context); // Verification of frames is done after processing of the "head" (no_frames). // The reason is that the exception oop is fiddled with during frame processing. - ZVerify::verify_thread_frames_bad(_jt); + // ZVerify::verify_thread_frames_bad(_jt); - // Update thread local address bad mask - ZThreadLocalData::set_address_bad_mask(_jt, ZAddressBadMask); + // Update thread-local masks + ZThreadLocalData::set_load_bad_mask(_jt, ZPointerLoadBadMask); + ZThreadLocalData::set_load_good_mask(_jt, ZPointerLoadGoodMask); + ZThreadLocalData::set_mark_bad_mask(_jt, ZPointerMarkBadMask); + ZThreadLocalData::set_store_bad_mask(_jt, ZPointerStoreBadMask); + ZThreadLocalData::set_store_good_mask(_jt, ZPointerStoreGoodMask); + ZThreadLocalData::set_nmethod_disarmed(_jt, ZPointerStoreGoodMask); // Retire TLAB - if (ZGlobalPhase == ZPhaseMark) { + if (ZGeneration::young()->is_phase_mark() || ZGeneration::old()->is_phase_mark()) { ZThreadLocalAllocBuffer::retire(_jt, &_stats); - } else { - ZThreadLocalAllocBuffer::remap(_jt); } + // Prepare store barrier buffer for new GC phase + ZThreadLocalData::store_barrier_buffer(_jt)->on_new_phase(); + // Publishes the processing start to concurrent threads StackWatermark::start_processing_impl(context); } void ZStackWatermark::process(const frame& fr, RegisterMap& register_map, void* context) { - ZVerify::verify_frame_bad(fr, register_map); - fr.oops_do(closure_from_context(context), &_cb_cl, ®ister_map, DerivedPointerIterationMode::_directly); + const uintptr_t color = prev_frame_color(fr); + ZStackWatermarkProcessOopClosure cl(context, color); + ZOnStackCodeBlobClosure cb_cl; + + fr.oops_do(&cl, &cb_cl, ®ister_map, DerivedPointerIterationMode::_directly); } diff --git a/src/hotspot/share/gc/z/zStackWatermark.hpp b/src/hotspot/share/gc/z/zStackWatermark.hpp index e40b24f4623a4..042ba4dd3f8a6 100644 --- a/src/hotspot/share/gc/z/zStackWatermark.hpp +++ b/src/hotspot/share/gc/z/zStackWatermark.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,6 +28,7 @@ #include "gc/shared/barrierSetNMethod.hpp" #include "gc/shared/threadLocalAllocBuffer.hpp" #include "gc/z/zBarrier.hpp" +#include "gc/z/zUncoloredRoot.hpp" #include "memory/allocation.hpp" #include "memory/iterator.hpp" #include "oops/oopsHierarchy.hpp" @@ -47,13 +48,28 @@ class ZOnStackCodeBlobClosure : public CodeBlobClosure { ZOnStackCodeBlobClosure(); }; +struct ZColorWatermark { + uintptr_t _color; + uintptr_t _watermark; + + bool covers(const ZColorWatermark& other) const; +}; + class ZStackWatermark : public StackWatermark { private: - ZLoadBarrierOopClosure _jt_cl; - ZOnStackCodeBlobClosure _cb_cl; - ThreadLocalAllocStats _stats; + // Stores old watermarks, which describes the + // colors of the non-processed part of the stack. + const static int _old_watermarks_max = 3; + ZColorWatermark _old_watermarks[_old_watermarks_max]; + int _old_watermarks_newest; + + ThreadLocalAllocStats _stats; + + uintptr_t prev_head_color() const; + uintptr_t prev_frame_color(const frame& fr) const; + void save_old_watermark(); - OopClosure* closure_from_context(void* context); + void process_head(void* context); virtual uint32_t epoch_id() const; virtual void start_processing_impl(void* context); diff --git a/src/hotspot/share/gc/z/zStat.cpp b/src/hotspot/share/gc/z/zStat.cpp index 540c171ae9f7f..5c2a87938afb3 100644 --- a/src/hotspot/share/gc/z/zStat.cpp +++ b/src/hotspot/share/gc/z/zStat.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,19 +25,22 @@ #include "gc/shared/gc_globals.hpp" #include "gc/z/zAbort.inline.hpp" #include "gc/z/zCollectedHeap.hpp" +#include "gc/z/zDirector.hpp" +#include "gc/z/zDriver.hpp" #include "gc/z/zCPU.inline.hpp" +#include "gc/z/zGeneration.inline.hpp" #include "gc/z/zGlobals.hpp" #include "gc/z/zNMethodTable.hpp" #include "gc/z/zPageAllocator.inline.hpp" #include "gc/z/zRelocationSetSelector.inline.hpp" #include "gc/z/zStat.hpp" -#include "gc/z/zThread.inline.hpp" #include "gc/z/zTracer.inline.hpp" #include "gc/z/zUtils.hpp" #include "memory/metaspaceUtils.hpp" #include "memory/resourceArea.hpp" #include "runtime/atomic.hpp" #include "runtime/os.hpp" +#include "runtime/thread.hpp" #include "runtime/timer.hpp" #include "utilities/align.hpp" #include "utilities/debug.hpp" @@ -240,7 +243,7 @@ class ZStatSamplerHistory : public CHeapObj { // Stat unit printers // void ZStatUnitTime(LogTargetHandle log, const ZStatSampler& sampler, const ZStatSamplerHistory& history) { - log.print(" %10s: %-41s " + log.print(" %16s: %-41s " "%9.3f / %-9.3f " "%9.3f / %-9.3f " "%9.3f / %-9.3f " @@ -258,7 +261,7 @@ void ZStatUnitTime(LogTargetHandle log, const ZStatSampler& sampler, const ZStat } void ZStatUnitBytes(LogTargetHandle log, const ZStatSampler& sampler, const ZStatSamplerHistory& history) { - log.print(" %10s: %-41s " + log.print(" %16s: %-41s " UINT64_FORMAT_W(9) " / " UINT64_FORMAT_W(-9) " " UINT64_FORMAT_W(9) " / " UINT64_FORMAT_W(-9) " " UINT64_FORMAT_W(9) " / " UINT64_FORMAT_W(-9) " " @@ -276,7 +279,7 @@ void ZStatUnitBytes(LogTargetHandle log, const ZStatSampler& sampler, const ZSta } void ZStatUnitThreads(LogTargetHandle log, const ZStatSampler& sampler, const ZStatSamplerHistory& history) { - log.print(" %10s: %-41s " + log.print(" %16s: %-41s " UINT64_FORMAT_W(9) " / " UINT64_FORMAT_W(-9) " " UINT64_FORMAT_W(9) " / " UINT64_FORMAT_W(-9) " " UINT64_FORMAT_W(9) " / " UINT64_FORMAT_W(-9) " " @@ -294,7 +297,7 @@ void ZStatUnitThreads(LogTargetHandle log, const ZStatSampler& sampler, const ZS } void ZStatUnitBytesPerSecond(LogTargetHandle log, const ZStatSampler& sampler, const ZStatSamplerHistory& history) { - log.print(" %10s: %-41s " + log.print(" %16s: %-41s " UINT64_FORMAT_W(9) " / " UINT64_FORMAT_W(-9) " " UINT64_FORMAT_W(9) " / " UINT64_FORMAT_W(-9) " " UINT64_FORMAT_W(9) " / " UINT64_FORMAT_W(-9) " " @@ -312,7 +315,7 @@ void ZStatUnitBytesPerSecond(LogTargetHandle log, const ZStatSampler& sampler, c } void ZStatUnitOpsPerSecond(LogTargetHandle log, const ZStatSampler& sampler, const ZStatSamplerHistory& history) { - log.print(" %10s: %-41s " + log.print(" %16s: %-41s " UINT64_FORMAT_W(9) " / " UINT64_FORMAT_W(-9) " " UINT64_FORMAT_W(9) " / " UINT64_FORMAT_W(-9) " " UINT64_FORMAT_W(9) " / " UINT64_FORMAT_W(-9) " " @@ -380,7 +383,7 @@ uint32_t ZStatValue::id() const { // Stat iterable value // template uint32_t ZStatIterableValue::_count = 0; -template T* ZStatIterableValue::_first = NULL; +template T* ZStatIterableValue::_first = nullptr; template ZStatIterableValue::ZStatIterableValue(const char* group, @@ -399,16 +402,16 @@ T* ZStatIterableValue::insert() const { template void ZStatIterableValue::sort() { T* first_unsorted = _first; - _first = NULL; + _first = nullptr; - while (first_unsorted != NULL) { + while (first_unsorted != nullptr) { T* const value = first_unsorted; first_unsorted = value->_next; - value->_next = NULL; + value->_next = nullptr; T** current = &_first; - while (*current != NULL) { + while (*current != nullptr) { // First sort by group, then by name const int group_cmp = strcmp((*current)->group(), value->group()); if ((group_cmp > 0) || (group_cmp == 0 && strcmp((*current)->name(), value->name()) > 0)) { @@ -589,7 +592,6 @@ void ZStatMMU::print() { // // Stat phases // -ConcurrentGCTimer ZStatPhase::_timer; ZStatPhase::ZStatPhase(const char* group, const char* name) : _sampler(group, name, ZStatUnitTime) {} @@ -620,79 +622,148 @@ void ZStatPhase::log_end(LogTargetHandle log, const Tickspan& duration, bool thr } } -ConcurrentGCTimer* ZStatPhase::timer() { - return &_timer; -} - const char* ZStatPhase::name() const { return _sampler.name(); } -ZStatPhaseCycle::ZStatPhaseCycle(const char* name) : - ZStatPhase("Collector", name) {} +ZStatPhaseCollection::ZStatPhaseCollection(const char* name, bool minor) : + ZStatPhase(minor ? "Minor Collection" : "Major Collection", name), + _minor(minor) {} + +GCTracer* ZStatPhaseCollection::jfr_tracer() const { + return _minor + ? ZDriver::minor()->jfr_tracer() + : ZDriver::major()->jfr_tracer(); +} + +void ZStatPhaseCollection::set_used_at_start(size_t used) const { + if (_minor) { + ZDriver::minor()->set_used_at_start(used); + } else { + ZDriver::major()->set_used_at_start(used); + } +} + +size_t ZStatPhaseCollection::used_at_start() const { + return _minor + ? ZDriver::minor()->used_at_start() + : ZDriver::major()->used_at_start(); +} + +void ZStatPhaseCollection::register_start(ConcurrentGCTimer* timer, const Ticks& start) const { + const GCCause::Cause cause = _minor ? ZDriver::minor()->gc_cause() : ZDriver::major()->gc_cause(); + + timer->register_gc_start(start); + + jfr_tracer()->report_gc_start(cause, start); + ZCollectedHeap::heap()->trace_heap_before_gc(jfr_tracer()); + + set_used_at_start(ZHeap::heap()->used()); + + log_info(gc)("%s (%s)", name(), GCCause::to_string(cause)); +} + +void ZStatPhaseCollection::register_end(ConcurrentGCTimer* timer, const Ticks& start, const Ticks& end) const { + const GCCause::Cause cause = _minor ? ZDriver::minor()->gc_cause() : ZDriver::major()->gc_cause(); + + if (ZAbort::should_abort()) { + log_info(gc)("%s (%s) Aborted", name(), GCCause::to_string(cause)); + return; + } + + timer->register_gc_end(end); + + jfr_tracer()->report_gc_end(end, timer->time_partitions()); + ZCollectedHeap::heap()->trace_heap_after_gc(jfr_tracer()); + + const Tickspan duration = end - start; + ZStatSample(_sampler, duration.value()); + + const size_t used_at_end = ZHeap::heap()->used(); -void ZStatPhaseCycle::register_start(const Ticks& start) const { - timer()->register_gc_start(start); + log_info(gc)("%s (%s) " ZSIZE_FMT "->" ZSIZE_FMT " %.3fs", + name(), + GCCause::to_string(cause), + ZSIZE_ARGS(used_at_start()), + ZSIZE_ARGS(used_at_end), + duration.seconds()); +} + +ZStatPhaseGeneration::ZStatPhaseGeneration(const char* name, ZGenerationId id) : + ZStatPhase(id == ZGenerationId::old ? "Old Generation" : "Young Generation", name), + _id(id) {} - ZTracer::tracer()->report_gc_start(ZCollectedHeap::heap()->gc_cause(), start); +ZGenerationTracer* ZStatPhaseGeneration::jfr_tracer() const { + return _id == ZGenerationId::young + ? ZGeneration::young()->jfr_tracer() + : ZGeneration::old()->jfr_tracer(); +} +void ZStatPhaseGeneration::register_start(ConcurrentGCTimer* timer, const Ticks& start) const { ZCollectedHeap::heap()->print_heap_before_gc(); - ZCollectedHeap::heap()->trace_heap_before_gc(ZTracer::tracer()); - log_info(gc, start)("Garbage Collection (%s)", - GCCause::to_string(ZCollectedHeap::heap()->gc_cause())); + jfr_tracer()->report_start(start); + + log_info(gc, phases)("%s", name()); } -void ZStatPhaseCycle::register_end(const Ticks& start, const Ticks& end) const { +void ZStatPhaseGeneration::register_end(ConcurrentGCTimer* timer, const Ticks& start, const Ticks& end) const { if (ZAbort::should_abort()) { - log_info(gc)("Garbage Collection (%s) Aborted", - GCCause::to_string(ZCollectedHeap::heap()->gc_cause())); + log_info(gc, phases)("%s Aborted", name()); return; } - timer()->register_gc_end(end); + jfr_tracer()->report_end(end); ZCollectedHeap::heap()->print_heap_after_gc(); - ZCollectedHeap::heap()->trace_heap_after_gc(ZTracer::tracer()); - - ZTracer::tracer()->report_gc_end(end, timer()->time_partitions()); const Tickspan duration = end - start; ZStatSample(_sampler, duration.value()); + ZGeneration* const generation = ZGeneration::generation(_id); + + generation->stat_heap()->print_stalls(); ZStatLoad::print(); ZStatMMU::print(); - ZStatMark::print(); + generation->stat_mark()->print(); ZStatNMethods::print(); ZStatMetaspace::print(); - ZStatReferences::print(); - ZStatRelocation::print(); - ZStatHeap::print(); + if (generation->is_old()) { + ZStatReferences::print(); + } - log_info(gc)("Garbage Collection (%s) " ZSIZE_FMT "->" ZSIZE_FMT, - GCCause::to_string(ZCollectedHeap::heap()->gc_cause()), - ZSIZE_ARGS(ZStatHeap::used_at_mark_start()), - ZSIZE_ARGS(ZStatHeap::used_at_relocate_end())); + generation->stat_relocation()->print_page_summary(); + if (generation->is_young()) { + generation->stat_relocation()->print_age_table(); + } + + generation->stat_heap()->print(generation); + + log_info(gc, phases)("%s " ZSIZE_FMT "->" ZSIZE_FMT " %.3fs", + name(), + ZSIZE_ARGS(generation->stat_heap()->used_at_collection_start()), + ZSIZE_ARGS(generation->stat_heap()->used_at_collection_end()), + duration.seconds()); } Tickspan ZStatPhasePause::_max; -ZStatPhasePause::ZStatPhasePause(const char* name) : - ZStatPhase("Phase", name) {} +ZStatPhasePause::ZStatPhasePause(const char* name, ZGenerationId id) : + ZStatPhase(id == ZGenerationId::young ? "Young Pause" : "Old Pause", name) {} const Tickspan& ZStatPhasePause::max() { return _max; } -void ZStatPhasePause::register_start(const Ticks& start) const { - timer()->register_gc_pause_start(name(), start); +void ZStatPhasePause::register_start(ConcurrentGCTimer* timer, const Ticks& start) const { + timer->register_gc_pause_start(name(), start); LogTarget(Debug, gc, phases, start) log; log_start(log); } -void ZStatPhasePause::register_end(const Ticks& start, const Ticks& end) const { - timer()->register_gc_pause_end(end); +void ZStatPhasePause::register_end(ConcurrentGCTimer* timer, const Ticks& start, const Ticks& end) const { + timer->register_gc_pause_end(end); const Tickspan duration = end - start; ZStatSample(_sampler, duration.value()); @@ -709,22 +780,22 @@ void ZStatPhasePause::register_end(const Ticks& start, const Ticks& end) const { log_end(log, duration); } -ZStatPhaseConcurrent::ZStatPhaseConcurrent(const char* name) : - ZStatPhase("Phase", name) {} +ZStatPhaseConcurrent::ZStatPhaseConcurrent(const char* name, ZGenerationId id) : + ZStatPhase(id == ZGenerationId::young ? "Young Phase" : "Old Phase", name) {} -void ZStatPhaseConcurrent::register_start(const Ticks& start) const { - timer()->register_gc_concurrent_start(name(), start); +void ZStatPhaseConcurrent::register_start(ConcurrentGCTimer* timer, const Ticks& start) const { + timer->register_gc_concurrent_start(name(), start); LogTarget(Debug, gc, phases, start) log; log_start(log); } -void ZStatPhaseConcurrent::register_end(const Ticks& start, const Ticks& end) const { +void ZStatPhaseConcurrent::register_end(ConcurrentGCTimer* timer, const Ticks& start, const Ticks& end) const { if (ZAbort::should_abort()) { return; } - timer()->register_gc_concurrent_end(end); + timer->register_gc_concurrent_end(end); const Tickspan duration = end - start; ZStatSample(_sampler, duration.value()); @@ -733,11 +804,16 @@ void ZStatPhaseConcurrent::register_end(const Ticks& start, const Ticks& end) co log_end(log, duration); } -ZStatSubPhase::ZStatSubPhase(const char* name) : - ZStatPhase("Subphase", name) {} +ZStatSubPhase::ZStatSubPhase(const char* name, ZGenerationId id) : + ZStatPhase(id == ZGenerationId::young ? "Young Subphase" : "Old Subphase", name) {} -void ZStatSubPhase::register_start(const Ticks& start) const { - if (ZThread::is_worker()) { +void ZStatSubPhase::register_start(ConcurrentGCTimer* timer, const Ticks& start) const { + if (timer != nullptr) { + assert(!Thread::current()->is_Worker_thread(), "Unexpected timer value"); + timer->register_gc_phase_start(name(), start); + } + + if (Thread::current()->is_Worker_thread()) { LogTarget(Trace, gc, phases, start) log; log_start(log, true /* thread */); } else { @@ -746,17 +822,22 @@ void ZStatSubPhase::register_start(const Ticks& start) const { } } -void ZStatSubPhase::register_end(const Ticks& start, const Ticks& end) const { +void ZStatSubPhase::register_end(ConcurrentGCTimer* timer, const Ticks& start, const Ticks& end) const { if (ZAbort::should_abort()) { return; } - ZTracer::tracer()->report_thread_phase(name(), start, end); + if (timer != nullptr) { + assert(!Thread::current()->is_Worker_thread(), "Unexpected timer value"); + timer->register_gc_phase_end(end); + } + + ZTracer::report_thread_phase(name(), start, end); const Tickspan duration = end - start; ZStatSample(_sampler, duration.value()); - if (ZThread::is_worker()) { + if (Thread::current()->is_Worker_thread()) { LogTarget(Trace, gc, phases) log; log_end(log, duration, true /* thread */); } else { @@ -770,15 +851,15 @@ ZStatCriticalPhase::ZStatCriticalPhase(const char* name, bool verbose) : _counter("Critical", name, ZStatUnitOpsPerSecond), _verbose(verbose) {} -void ZStatCriticalPhase::register_start(const Ticks& start) const { +void ZStatCriticalPhase::register_start(ConcurrentGCTimer* timer, const Ticks& start) const { // This is called from sensitive contexts, for example before an allocation stall // has been resolved. This means we must not access any oops in here since that // could lead to infinite recursion. Without access to the thread name we can't // really log anything useful here. } -void ZStatCriticalPhase::register_end(const Ticks& start, const Ticks& end) const { - ZTracer::tracer()->report_thread_phase(name(), start, end); +void ZStatCriticalPhase::register_end(ConcurrentGCTimer* timer, const Ticks& start, const Ticks& end) const { + ZTracer::report_thread_phase(name(), start, end); const Tickspan duration = end - start; ZStatSample(_sampler, duration.value()); @@ -793,10 +874,16 @@ void ZStatCriticalPhase::register_end(const Ticks& start, const Ticks& end) cons } } -// -// Stat timer -// -THREAD_LOCAL uint32_t ZStatTimerDisable::_active = 0; +ZStatTimerYoung::ZStatTimerYoung(const ZStatPhase& phase) : + ZStatTimer(phase, ZGeneration::young()->gc_timer()) {} + +ZStatTimerOld::ZStatTimerOld(const ZStatPhase& phase) : + ZStatTimer(phase, ZGeneration::old()->gc_timer()) {} + +ZStatTimerWorker::ZStatTimerWorker(const ZStatPhase& phase) : + ZStatTimer(phase, nullptr /* gc_timer */) { + assert(Thread::current()->is_Worker_thread(), "Should only be called by worker thread"); +} // // Stat sample/inc @@ -824,14 +911,14 @@ void ZStatSample(const ZStatSampler& sampler, uint64_t value) { max = prev_max; } - ZTracer::tracer()->report_stat_sampler(sampler, value); + ZTracer::report_stat_sampler(sampler, value); } void ZStatInc(const ZStatCounter& counter, uint64_t increment) { ZStatCounterData* const cpu_data = counter.get(); const uint64_t value = Atomic::add(&cpu_data->_counter, increment); - ZTracer::tracer()->report_stat_counter(counter, increment, value); + ZTracer::report_stat_counter(counter, increment, value); } void ZStatInc(const ZStatUnsampledCounter& counter, uint64_t increment) { @@ -840,36 +927,88 @@ void ZStatInc(const ZStatUnsampledCounter& counter, uint64_t increment) { } // -// Stat allocation rate +// Stat mutator allocation rate // -const ZStatUnsampledCounter ZStatAllocRate::_counter("Allocation Rate"); -TruncatedSeq ZStatAllocRate::_samples(ZStatAllocRate::sample_hz); -TruncatedSeq ZStatAllocRate::_rate(ZStatAllocRate::sample_hz); +ZLock* ZStatMutatorAllocRate::_stat_lock; +jlong ZStatMutatorAllocRate::_last_sample_time; +volatile size_t ZStatMutatorAllocRate::_sampling_granule; +volatile size_t ZStatMutatorAllocRate::_allocated_since_sample; +TruncatedSeq ZStatMutatorAllocRate::_samples_time(100); +TruncatedSeq ZStatMutatorAllocRate::_samples_bytes(100); +TruncatedSeq ZStatMutatorAllocRate::_rate(100); + +void ZStatMutatorAllocRate::initialize() { + _last_sample_time = os::elapsed_counter(); + _stat_lock = new ZLock(); + update_sampling_granule(); +} -const ZStatUnsampledCounter& ZStatAllocRate::counter() { - return _counter; +void ZStatMutatorAllocRate::update_sampling_granule() { + const size_t sampling_heap_granules = 128; + const size_t soft_max_capacity = ZHeap::heap()->soft_max_capacity(); + _sampling_granule = align_up(soft_max_capacity / sampling_heap_granules, ZGranuleSize); } -uint64_t ZStatAllocRate::sample_and_reset() { - const ZStatCounterData bytes_per_sample = _counter.collect_and_reset(); - _samples.add(bytes_per_sample._counter); +void ZStatMutatorAllocRate::sample_allocation(size_t allocation_bytes) { + const size_t allocated = Atomic::add(&_allocated_since_sample, allocation_bytes); + + if (allocated < Atomic::load(&_sampling_granule)) { + // No need for sampling yet + return; + } + + if (!_stat_lock->try_lock()) { + // Someone beat us to it + return; + } - const uint64_t bytes_per_second = _samples.sum(); + const size_t allocated_sample = Atomic::load(&_allocated_since_sample); + + if (allocated_sample < _sampling_granule) { + // Someone beat us to it + _stat_lock->unlock(); + return; + } + + const jlong now = os::elapsed_counter(); + const jlong elapsed = now - _last_sample_time; + + if (elapsed <= 0) { + // Avoid sampling nonsense allocation rates + _stat_lock->unlock(); + return; + } + + Atomic::sub(&_allocated_since_sample, allocated_sample); + + _samples_time.add(elapsed); + _samples_bytes.add(allocated_sample); + + const double last_sample_bytes = _samples_bytes.sum(); + const double elapsed_time = _samples_time.sum(); + + const double elapsed_seconds = elapsed_time / os::elapsed_frequency(); + const double bytes_per_second = double(last_sample_bytes) / elapsed_seconds; _rate.add(bytes_per_second); - return bytes_per_second; -} + update_sampling_granule(); -double ZStatAllocRate::predict() { - return _rate.predict_next(); -} + _last_sample_time = now; + + log_debug(gc, alloc)("Mutator Allocation Rate: %.1fMB/s Predicted: %.1fMB/s, Avg: %.1f(+/-%.1f)MB/s", + bytes_per_second / M, + _rate.predict_next() / M, + _rate.avg() / M, + _rate.sd() / M); -double ZStatAllocRate::avg() { - return _rate.avg(); + _stat_lock->unlock(); + + ZDirector::evaluate_rules(); } -double ZStatAllocRate::sd() { - return _rate.sd(); +ZStatMutatorAllocRateStats ZStatMutatorAllocRate::stats() { + ZLocker locker(_stat_lock); + return {_rate.avg(), _rate.predict_next(), _rate.sd()}; } // @@ -879,16 +1018,17 @@ ZStat::ZStat() : _metronome(sample_hz) { set_name("ZStat"); create_and_start(); + ZStatMutatorAllocRate::initialize(); } void ZStat::sample_and_collect(ZStatSamplerHistory* history) const { // Sample counters - for (const ZStatCounter* counter = ZStatCounter::first(); counter != NULL; counter = counter->next()) { + for (const ZStatCounter* counter = ZStatCounter::first(); counter != nullptr; counter = counter->next()) { counter->sample_and_reset(); } // Collect samples - for (const ZStatSampler* sampler = ZStatSampler::first(); sampler != NULL; sampler = sampler->next()) { + for (const ZStatSampler* sampler = ZStatSampler::first(); sampler != nullptr; sampler = sampler->next()) { ZStatSamplerHistory& sampler_history = history[sampler->id()]; sampler_history.add(sampler->collect_and_reset()); } @@ -913,7 +1053,7 @@ void ZStat::print(LogTargetHandle log, const ZStatSamplerHistory* history) const log.print(" Last 10s Last 10m Last 10h Total"); log.print(" Avg / Max Avg / Max Avg / Max Avg / Max"); - for (const ZStatSampler* sampler = ZStatSampler::first(); sampler != NULL; sampler = sampler->next()) { + for (const ZStatSampler* sampler = ZStatSampler::first(); sampler != nullptr; sampler = sampler->next()) { const ZStatSamplerHistory& sampler_history = history[sampler->id()]; const ZStatUnitPrinter printer = sampler->printer(); printer(log, *sampler, sampler_history); @@ -922,9 +1062,9 @@ void ZStat::print(LogTargetHandle log, const ZStatSamplerHistory* history) const log.print("========================================================================================================================================================="); } -void ZStat::run_service() { +void ZStat::run_thread() { ZStatSamplerHistory* const history = new ZStatSamplerHistory[ZStatSampler::count()]; - LogTarget(Info, gc, stats) log; + LogTarget(Debug, gc, stats) log; ZStatSampler::sort(); @@ -936,10 +1076,16 @@ void ZStat::run_service() { } } + // At exit print the final stats + LogTarget(Info, gc, stats) exit_log; + if (exit_log.is_enabled()) { + print(exit_log, history); + } + delete [] history; } -void ZStat::stop_service() { +void ZStat::terminate() { _metronome.stop(); } @@ -1072,59 +1218,75 @@ class ZStatTablePrinter { // // Stat cycle // -uint64_t ZStatCycle::_nwarmup_cycles = 0; -Ticks ZStatCycle::_start_of_last; -Ticks ZStatCycle::_end_of_last; -NumberSeq ZStatCycle::_serial_time(0.7 /* alpha */); -NumberSeq ZStatCycle::_parallelizable_time(0.7 /* alpha */); -uint ZStatCycle::_last_active_workers = 0; +ZStatCycle::ZStatCycle() : + _stat_lock(), + _nwarmup_cycles(0), + _start_of_last(), + _end_of_last(), + _cycle_intervals(0.7 /* alpha */), + _serial_time(0.7 /* alpha */), + _parallelizable_time(0.7 /* alpha */), + _parallelizable_duration(0.7 /* alpha */), + _last_active_workers(0.0) { +} void ZStatCycle::at_start() { + ZLocker locker(&_stat_lock); _start_of_last = Ticks::now(); } -void ZStatCycle::at_end(GCCause::Cause cause, uint active_workers) { +void ZStatCycle::at_end(ZStatWorkers* stat_workers, bool record_stats) { + ZLocker locker(&_stat_lock); + const Ticks end_of_last = _end_of_last; _end_of_last = Ticks::now(); - if (cause == GCCause::_z_warmup) { + if (ZDriver::major()->gc_cause() == GCCause::_z_warmup && _nwarmup_cycles < 3) { _nwarmup_cycles++; } - _last_active_workers = active_workers; - // Calculate serial and parallelizable GC cycle times const double duration = (_end_of_last - _start_of_last).seconds(); - const double workers_duration = ZStatWorkers::get_and_reset_duration(); + const double workers_duration = stat_workers->get_and_reset_duration(); + const double workers_time = stat_workers->get_and_reset_time(); const double serial_time = duration - workers_duration; - const double parallelizable_time = workers_duration * active_workers; - _serial_time.add(serial_time); - _parallelizable_time.add(parallelizable_time); + + _last_active_workers = workers_time / workers_duration; + + if (record_stats) { + _serial_time.add(serial_time); + _parallelizable_time.add(workers_time); + _parallelizable_duration.add(workers_duration); + if (end_of_last.value() != 0) { + const double cycle_interval = (_end_of_last - end_of_last).seconds(); + _cycle_intervals.add(cycle_interval); + } + } } bool ZStatCycle::is_warm() { return _nwarmup_cycles >= 3; } -uint64_t ZStatCycle::nwarmup_cycles() { - return _nwarmup_cycles; -} - bool ZStatCycle::is_time_trustable() { // The times are considered trustable if we // have completed at least one warmup cycle. return _nwarmup_cycles > 0; } -const AbsSeq& ZStatCycle::serial_time() { - return _serial_time; +double ZStatCycle::last_active_workers() { + return _last_active_workers; } -const AbsSeq& ZStatCycle::parallelizable_time() { - return _parallelizable_time; -} +double ZStatCycle::duration_since_start() { + const Ticks start = _start_of_last; + if (start.value() == 0) { + // No end recorded yet, return time since VM start + return 0.0; + } -uint ZStatCycle::last_active_workers() { - return _last_active_workers; + const Ticks now = Ticks::now(); + const Tickspan duration_since_start = now - start; + return duration_since_start.seconds(); } double ZStatCycle::time_since_last() { @@ -1138,63 +1300,145 @@ double ZStatCycle::time_since_last() { return time_since_last.seconds(); } +ZStatCycleStats ZStatCycle::stats() { + ZLocker locker(&_stat_lock); + + return { + is_warm(), + _nwarmup_cycles, + is_time_trustable(), + time_since_last(), + last_active_workers(), + duration_since_start(), + _cycle_intervals.davg(), + _serial_time.davg(), + _serial_time.dsd(), + _parallelizable_time.davg(), + _parallelizable_time.dsd(), + _parallelizable_duration.davg(), + _parallelizable_duration.dsd() + }; +} + // // Stat workers // -Ticks ZStatWorkers::_start_of_last; -Tickspan ZStatWorkers::_accumulated_duration; - -void ZStatWorkers::at_start() { +ZStatWorkers::ZStatWorkers() : + _stat_lock(), + _active_workers(0), + _start_of_last(), + _accumulated_duration(), + _accumulated_time() {} + +void ZStatWorkers::at_start(uint active_workers) { + ZLocker locker(&_stat_lock); _start_of_last = Ticks::now(); + _active_workers = active_workers; } void ZStatWorkers::at_end() { + ZLocker locker(&_stat_lock); const Ticks now = Ticks::now(); const Tickspan duration = now - _start_of_last; + Tickspan time = duration; + for (uint i = 1; i < _active_workers; ++i) { + time += duration; + } + _accumulated_time += time; _accumulated_duration += duration; + _active_workers = 0; +} + +double ZStatWorkers::accumulated_time() { + const uint nworkers = _active_workers; + const Ticks now = Ticks::now(); + const Ticks start = _start_of_last; + Tickspan time = _accumulated_time; + if (nworkers != 0) { + for (uint i = 0; i < nworkers; ++i) { + time += now - start; + } + } + return time.seconds(); +} + +double ZStatWorkers::accumulated_duration() { + const Ticks now = Ticks::now(); + const Ticks start = _start_of_last; + Tickspan duration = _accumulated_duration; + if (_active_workers != 0) { + duration += now - start; + } + return duration.seconds(); +} + +uint ZStatWorkers::active_workers() { + return _active_workers; } double ZStatWorkers::get_and_reset_duration() { + ZLocker locker(&_stat_lock); const double duration = _accumulated_duration.seconds(); const Ticks now = Ticks::now(); _accumulated_duration = now - now; return duration; } +double ZStatWorkers::get_and_reset_time() { + ZLocker locker(&_stat_lock); + const double time = _accumulated_time.seconds(); + const Ticks now = Ticks::now(); + _accumulated_time = now - now; + return time; +} + +ZStatWorkersStats ZStatWorkers::stats() { + ZLocker locker(&_stat_lock); + return { + accumulated_time(), + accumulated_duration() + }; +} + // // Stat load // void ZStatLoad::print() { double loadavg[3] = {}; os::loadavg(loadavg, ARRAY_SIZE(loadavg)); - log_info(gc, load)("Load: %.2f/%.2f/%.2f", loadavg[0], loadavg[1], loadavg[2]); + log_info(gc, load)("Load: %.2f (%.0f%%) / %.2f (%.0f%%) / %.2f (%.0f%%)", + loadavg[0], percent_of(loadavg[0], (double) ZCPU::count()), + loadavg[1], percent_of(loadavg[1], (double) ZCPU::count()), + loadavg[2], percent_of(loadavg[2], (double) ZCPU::count())); } // // Stat mark // -size_t ZStatMark::_nstripes; -size_t ZStatMark::_nproactiveflush; -size_t ZStatMark::_nterminateflush; -size_t ZStatMark::_ntrycomplete; -size_t ZStatMark::_ncontinue; -size_t ZStatMark::_mark_stack_usage; - -void ZStatMark::set_at_mark_start(size_t nstripes) { +ZStatMark::ZStatMark() : + _nstripes(), + _nproactiveflush(), + _nterminateflush(), + _ntrycomplete(), + _ncontinue(), + _mark_stack_usage() { +} + +void ZStatMark::at_mark_start(size_t nstripes) { _nstripes = nstripes; } -void ZStatMark::set_at_mark_end(size_t nproactiveflush, - size_t nterminateflush, - size_t ntrycomplete, - size_t ncontinue) { +void ZStatMark::at_mark_end(size_t nproactiveflush, + size_t nterminateflush, + size_t ntrycomplete, + size_t ncontinue) { _nproactiveflush = nproactiveflush; _nterminateflush = nterminateflush; _ntrycomplete = ntrycomplete; _ncontinue = ncontinue; } -void ZStatMark::set_at_mark_free(size_t mark_stack_usage) { +void ZStatMark::at_mark_free(size_t mark_stack_usage) { _mark_stack_usage = mark_stack_usage; } @@ -1217,45 +1461,163 @@ void ZStatMark::print() { // // Stat relocation // -ZRelocationSetSelectorStats ZStatRelocation::_selector_stats; -size_t ZStatRelocation::_forwarding_usage; -size_t ZStatRelocation::_small_in_place_count; -size_t ZStatRelocation::_medium_in_place_count; +ZStatRelocation::ZStatRelocation() : + _selector_stats(), + _forwarding_usage(), + _small_selected(), + _small_in_place_count(), + _medium_selected(), + _medium_in_place_count() { +} -void ZStatRelocation::set_at_select_relocation_set(const ZRelocationSetSelectorStats& selector_stats) { +void ZStatRelocation::at_select_relocation_set(const ZRelocationSetSelectorStats& selector_stats) { _selector_stats = selector_stats; } -void ZStatRelocation::set_at_install_relocation_set(size_t forwarding_usage) { +void ZStatRelocation::at_install_relocation_set(size_t forwarding_usage) { _forwarding_usage = forwarding_usage; } -void ZStatRelocation::set_at_relocate_end(size_t small_in_place_count, size_t medium_in_place_count) { +void ZStatRelocation::at_relocate_end(size_t small_in_place_count, size_t medium_in_place_count) { _small_in_place_count = small_in_place_count; _medium_in_place_count = medium_in_place_count; } -void ZStatRelocation::print(const char* name, - const ZRelocationSetSelectorGroupStats& selector_group, - size_t in_place_count) { - log_info(gc, reloc)("%s Pages: " SIZE_FORMAT " / " SIZE_FORMAT "M, Empty: " SIZE_FORMAT "M, " - "Relocated: " SIZE_FORMAT "M, In-Place: " SIZE_FORMAT, - name, - selector_group.npages(), - selector_group.total() / M, - selector_group.empty() / M, - selector_group.relocate() / M, - in_place_count); -} +void ZStatRelocation::print_page_summary() { + LogTarget(Info, gc, reloc) lt; + + if (!_selector_stats.has_relocatable_pages() || !lt.is_enabled()) { + // Nothing to log or logging not enabled. + return; + } + + // Zero initialize + ZStatRelocationSummary small_summary{}; + ZStatRelocationSummary medium_summary{}; + ZStatRelocationSummary large_summary{}; + + auto account_page_size = [&](ZStatRelocationSummary& summary, const ZRelocationSetSelectorGroupStats& stats) { + summary.npages_candidates += stats.npages_candidates(); + summary.total += stats.total(); + summary.empty += stats.empty(); + summary.npages_selected += stats.npages_selected(); + summary.relocate += stats.relocate(); + }; + + for (uint i = 0; i <= ZPageAgeMax; ++i) { + const ZPageAge age = static_cast(i); + + account_page_size(small_summary, _selector_stats.small(age)); + account_page_size(medium_summary, _selector_stats.medium(age)); + account_page_size(large_summary, _selector_stats.large(age)); + } + + ZStatTablePrinter pages(20, 12); + lt.print("%s", pages() + .fill() + .right("Candidates") + .right("Selected") + .right("In-Place") + .right("Size") + .right("Empty") + .right("Relocated") + .end()); + + auto print_summary = [&](const char* name, ZStatRelocationSummary& summary, size_t in_place_count) { + lt.print("%s", pages() + .left("%s Pages:", name) + .right("%zu", summary.npages_candidates) + .right("%zu", summary.npages_selected) + .right("%zu", in_place_count) + .right("%zuM", summary.total / M) + .right("%zuM", summary.empty / M) + .right("%zuM", summary.relocate /M) + .end()); + }; -void ZStatRelocation::print() { - print("Small", _selector_stats.small(), _small_in_place_count); + print_summary("Small", small_summary, _small_in_place_count); if (ZPageSizeMedium != 0) { - print("Medium", _selector_stats.medium(), _medium_in_place_count); + print_summary("Medium", medium_summary, _medium_in_place_count); } - print("Large", _selector_stats.large(), 0 /* in_place_count */); + print_summary("Large", large_summary, 0 /* in_place_count */); - log_info(gc, reloc)("Forwarding Usage: " SIZE_FORMAT "M", _forwarding_usage / M); + lt.print("Forwarding Usage: " SIZE_FORMAT "M", _forwarding_usage / M); +} + +void ZStatRelocation::print_age_table() { + LogTarget(Info, gc, reloc) lt; + if (!_selector_stats.has_relocatable_pages() || !lt.is_enabled()) { + // Nothing to log or logging not enabled. + return; + } + + ZStatTablePrinter age_table(11, 18); + lt.print("Age Table:"); + lt.print("%s", age_table() + .fill() + .center("Live") + .center("Garbage") + .center("Small") + .center("Medium") + .center("Large") + .end()); + + size_t live[ZPageAgeMax + 1] = {}; + size_t total[ZPageAgeMax + 1] = {}; + + uint oldest_none_empty_age = 0; + + for (uint i = 0; i <= ZPageAgeMax; ++i) { + ZPageAge age = static_cast(i); + auto summarize_pages = [&](const ZRelocationSetSelectorGroupStats& stats) { + live[i] += stats.live(); + total[i] += stats.total(); + }; + + summarize_pages(_selector_stats.small(age)); + summarize_pages(_selector_stats.medium(age)); + summarize_pages(_selector_stats.large(age)); + + if (total[i] != 0) { + oldest_none_empty_age = i; + } + } + + for (uint i = 0; i <= oldest_none_empty_age; ++i) { + ZPageAge age = static_cast(i); + + FormatBuffer<> age_str(""); + if (age == ZPageAge::eden) { + age_str.append("Eden"); + } else if (age != ZPageAge::old) { + age_str.append("Survivor %d", i); + } + + auto create_age_table = [&]() { + if (live[i] == 0) { + return age_table() + .left("%s", age_str.buffer()) + .left(ZTABLE_ARGS_NA); + } else { + return age_table() + .left("%s", age_str.buffer()) + .left(ZTABLE_ARGS(live[i])); + } + }; + + lt.print("%s", create_age_table() + .left(ZTABLE_ARGS(total[i] - live[i])) + .left(SIZE_FORMAT_W(7) " / " SIZE_FORMAT, + _selector_stats.small(age).npages_candidates(), + _selector_stats.small(age).npages_selected()) + .left(SIZE_FORMAT_W(7) " / " SIZE_FORMAT, + _selector_stats.medium(age).npages_candidates(), + _selector_stats.medium(age).npages_selected()) + .left(SIZE_FORMAT_W(7) " / " SIZE_FORMAT, + _selector_stats.large(age).npages_candidates(), + _selector_stats.large(age).npages_selected()) + .end()); + } } // @@ -1271,7 +1633,7 @@ void ZStatNMethods::print() { // Stat metaspace // void ZStatMetaspace::print() { - MetaspaceCombinedStats stats = MetaspaceUtils::get_combined_statistics(); + const MetaspaceCombinedStats stats = MetaspaceUtils::get_combined_statistics(); log_info(gc, metaspace)("Metaspace: " SIZE_FORMAT "M used, " SIZE_FORMAT "M committed, " SIZE_FORMAT "M reserved", @@ -1310,100 +1672,159 @@ void ZStatReferences::set_phantom(size_t encountered, size_t discovered, size_t set(&_phantom, encountered, discovered, enqueued); } -void ZStatReferences::print(const char* name, const ZStatReferences::ZCount& ref) { - log_info(gc, ref)("%s: " - SIZE_FORMAT " encountered, " - SIZE_FORMAT " discovered, " - SIZE_FORMAT " enqueued", - name, - ref.encountered, - ref.discovered, - ref.enqueued); -} - void ZStatReferences::print() { - print("Soft", _soft); - print("Weak", _weak); - print("Final", _final); - print("Phantom", _phantom); + LogTarget(Info, gc, ref) lt; + if (!lt.is_enabled()) { + // Nothing to log + return; + } + + ZStatTablePrinter refs(20, 12); + lt.print("%s", refs() + .fill() + .right("Encountered") + .right("Discovered") + .right("Enqueued") + .end()); + + auto ref_print = [&] (const char* name, const ZStatReferences::ZCount& ref) { + lt.print("%s", refs() + .left("%s References:", name) + .right("%zu", ref.encountered) + .right("%zu", ref.discovered) + .right("%zu", ref.enqueued) + .end()); + }; + + ref_print("Soft", _soft); + ref_print("Weak", _weak); + ref_print("Final", _final); + ref_print("Phantom", _phantom); } // // Stat heap // + +ZStatHeap::ZStatHeap() : + _stat_lock(), + _at_collection_start(), + _at_mark_start(), + _at_mark_end(), + _at_relocate_start(), + _at_relocate_end(), + _reclaimed_bytes(0.7 /* alpha */) { +} + ZStatHeap::ZAtInitialize ZStatHeap::_at_initialize; -ZStatHeap::ZAtMarkStart ZStatHeap::_at_mark_start; -ZStatHeap::ZAtMarkEnd ZStatHeap::_at_mark_end; -ZStatHeap::ZAtRelocateStart ZStatHeap::_at_relocate_start; -ZStatHeap::ZAtRelocateEnd ZStatHeap::_at_relocate_end; -size_t ZStatHeap::capacity_high() { +size_t ZStatHeap::capacity_high() const { return MAX4(_at_mark_start.capacity, _at_mark_end.capacity, _at_relocate_start.capacity, _at_relocate_end.capacity); } -size_t ZStatHeap::capacity_low() { +size_t ZStatHeap::capacity_low() const { return MIN4(_at_mark_start.capacity, _at_mark_end.capacity, _at_relocate_start.capacity, _at_relocate_end.capacity); } -size_t ZStatHeap::free(size_t used) { +size_t ZStatHeap::free(size_t used) const { return _at_initialize.max_capacity - used; } -size_t ZStatHeap::allocated(size_t used, size_t reclaimed) { +size_t ZStatHeap::mutator_allocated(size_t used_generation, size_t freed, size_t relocated) const { // The amount of allocated memory between point A and B is used(B) - used(A). // However, we might also have reclaimed memory between point A and B. This // means the current amount of used memory must be incremented by the amount // reclaimed, so that used(B) represents the amount of used memory we would // have had if we had not reclaimed anything. - return (used + reclaimed) - _at_mark_start.used; + const size_t used_generation_delta = used_generation - _at_mark_start.used_generation; + return used_generation_delta + freed - relocated; } -size_t ZStatHeap::garbage(size_t reclaimed) { - return _at_mark_end.garbage - reclaimed; +size_t ZStatHeap::garbage(size_t freed, size_t relocated, size_t promoted) const { + return _at_mark_end.garbage - (freed - promoted - relocated); } -void ZStatHeap::set_at_initialize(const ZPageAllocatorStats& stats) { - _at_initialize.min_capacity = stats.min_capacity(); - _at_initialize.max_capacity = stats.max_capacity(); +size_t ZStatHeap::reclaimed(size_t freed, size_t relocated, size_t promoted) const { + return freed - relocated - promoted; } -void ZStatHeap::set_at_mark_start(const ZPageAllocatorStats& stats) { +void ZStatHeap::at_initialize(size_t min_capacity, size_t max_capacity) { + ZLocker locker(&_stat_lock); + + _at_initialize.min_capacity = min_capacity; + _at_initialize.max_capacity = max_capacity; +} + +void ZStatHeap::at_collection_start(const ZPageAllocatorStats& stats) { + ZLocker locker(&_stat_lock); + + _at_collection_start.soft_max_capacity = stats.soft_max_capacity(); + _at_collection_start.capacity = stats.capacity(); + _at_collection_start.free = free(stats.used()); + _at_collection_start.used = stats.used(); + _at_collection_start.used_generation = stats.used_generation(); +} + +void ZStatHeap::at_mark_start(const ZPageAllocatorStats& stats) { + ZLocker locker(&_stat_lock); + _at_mark_start.soft_max_capacity = stats.soft_max_capacity(); _at_mark_start.capacity = stats.capacity(); _at_mark_start.free = free(stats.used()); _at_mark_start.used = stats.used(); + _at_mark_start.used_generation = stats.used_generation(); + _at_mark_start.allocation_stalls = stats.allocation_stalls(); } -void ZStatHeap::set_at_mark_end(const ZPageAllocatorStats& stats) { +void ZStatHeap::at_mark_end(const ZPageAllocatorStats& stats) { + ZLocker locker(&_stat_lock); + _at_mark_end.capacity = stats.capacity(); _at_mark_end.free = free(stats.used()); _at_mark_end.used = stats.used(); - _at_mark_end.allocated = allocated(stats.used(), 0 /* reclaimed */); + _at_mark_end.used_generation = stats.used_generation(); + _at_mark_end.mutator_allocated = mutator_allocated(stats.used_generation(), 0 /* reclaimed */, 0 /* relocated */); + _at_mark_end.allocation_stalls = stats.allocation_stalls(); } -void ZStatHeap::set_at_select_relocation_set(const ZRelocationSetSelectorStats& stats) { - const size_t live = stats.small().live() + stats.medium().live() + stats.large().live(); +void ZStatHeap::at_select_relocation_set(const ZRelocationSetSelectorStats& stats) { + ZLocker locker(&_stat_lock); + + size_t live = 0; + for (uint i = 0; i <= ZPageAgeMax; ++i) { + const ZPageAge age = static_cast(i); + live += stats.small(age).live() + stats.medium(age).live() + stats.large(age).live(); + } _at_mark_end.live = live; - _at_mark_end.garbage = _at_mark_start.used - live; + _at_mark_end.garbage = _at_mark_start.used_generation - live; } -void ZStatHeap::set_at_relocate_start(const ZPageAllocatorStats& stats) { +void ZStatHeap::at_relocate_start(const ZPageAllocatorStats& stats) { + ZLocker locker(&_stat_lock); + + assert(stats.compacted() == 0, "Nothing should have been compacted"); + _at_relocate_start.capacity = stats.capacity(); _at_relocate_start.free = free(stats.used()); _at_relocate_start.used = stats.used(); - _at_relocate_start.allocated = allocated(stats.used(), stats.reclaimed()); - _at_relocate_start.garbage = garbage(stats.reclaimed()); - _at_relocate_start.reclaimed = stats.reclaimed(); + _at_relocate_start.used_generation = stats.used_generation(); + _at_relocate_start.live = _at_mark_end.live - stats.promoted(); + _at_relocate_start.garbage = garbage(stats.freed(), stats.compacted(), stats.promoted()); + _at_relocate_start.mutator_allocated = mutator_allocated(stats.used_generation(), stats.freed(), stats.compacted()); + _at_relocate_start.reclaimed = reclaimed(stats.freed(), stats.compacted(), stats.promoted()); + _at_relocate_start.promoted = stats.promoted(); + _at_relocate_start.compacted = stats.compacted(); + _at_relocate_start.allocation_stalls = stats.allocation_stalls(); } -void ZStatHeap::set_at_relocate_end(const ZPageAllocatorStats& stats, size_t non_worker_relocated) { - const size_t reclaimed = stats.reclaimed() - MIN2(non_worker_relocated, stats.reclaimed()); +void ZStatHeap::at_relocate_end(const ZPageAllocatorStats& stats, bool record_stats) { + ZLocker locker(&_stat_lock); _at_relocate_end.capacity = stats.capacity(); _at_relocate_end.capacity_high = capacity_high(); @@ -1414,24 +1835,87 @@ void ZStatHeap::set_at_relocate_end(const ZPageAllocatorStats& stats, size_t non _at_relocate_end.used = stats.used(); _at_relocate_end.used_high = stats.used_high(); _at_relocate_end.used_low = stats.used_low(); - _at_relocate_end.allocated = allocated(stats.used(), reclaimed); - _at_relocate_end.garbage = garbage(reclaimed); - _at_relocate_end.reclaimed = reclaimed; + _at_relocate_end.used_generation = stats.used_generation(); + _at_relocate_end.live = _at_mark_end.live - stats.promoted(); + _at_relocate_end.garbage = garbage(stats.freed(), stats.compacted(), stats.promoted()); + _at_relocate_end.mutator_allocated = mutator_allocated(stats.used_generation(), stats.freed(), stats.compacted()); + _at_relocate_end.reclaimed = reclaimed(stats.freed(), stats.compacted(), stats.promoted()); + _at_relocate_end.promoted = stats.promoted(); + _at_relocate_end.compacted = stats.compacted(); + _at_relocate_end.allocation_stalls = stats.allocation_stalls(); + + if (record_stats) { + _reclaimed_bytes.add(_at_relocate_end.reclaimed); + } +} + +size_t ZStatHeap::reclaimed_avg() { + return _reclaimed_bytes.davg(); } size_t ZStatHeap::max_capacity() { return _at_initialize.max_capacity; } -size_t ZStatHeap::used_at_mark_start() { +size_t ZStatHeap::used_at_collection_start() const { + return _at_collection_start.used; +} + +size_t ZStatHeap::used_at_mark_start() const { return _at_mark_start.used; } -size_t ZStatHeap::used_at_relocate_end() { +size_t ZStatHeap::used_generation_at_mark_start() const { + return _at_mark_start.used_generation; +} + +size_t ZStatHeap::live_at_mark_end() const { + return _at_mark_end.live; +} + +size_t ZStatHeap::allocated_at_mark_end() const { + return _at_mark_end.mutator_allocated; +} + +size_t ZStatHeap::garbage_at_mark_end() const { + return _at_mark_end.garbage; +} + +size_t ZStatHeap::used_at_relocate_end() const { return _at_relocate_end.used; } -void ZStatHeap::print() { +size_t ZStatHeap::used_at_collection_end() const { + return used_at_relocate_end(); +} + +size_t ZStatHeap::stalls_at_mark_start() const { + return _at_mark_start.allocation_stalls; +} + +size_t ZStatHeap::stalls_at_mark_end() const { + return _at_mark_end.allocation_stalls; +} + +size_t ZStatHeap::stalls_at_relocate_start() const { + return _at_relocate_start.allocation_stalls; +} + +size_t ZStatHeap::stalls_at_relocate_end() const { + return _at_relocate_end.allocation_stalls; +} + +ZStatHeapStats ZStatHeap::stats() { + ZLocker locker(&_stat_lock); + + return { + live_at_mark_end(), + used_at_relocate_end(), + reclaimed_avg() + }; +} + +void ZStatHeap::print(const ZGeneration* generation) const { log_info(gc, heap)("Min Capacity: " ZSIZE_FMT, ZSIZE_ARGS(_at_initialize.min_capacity)); log_info(gc, heap)("Max Capacity: " @@ -1439,8 +1923,9 @@ void ZStatHeap::print() { log_info(gc, heap)("Soft Max Capacity: " ZSIZE_FMT, ZSIZE_ARGS(_at_mark_start.soft_max_capacity)); - ZStatTablePrinter table(10, 18); - log_info(gc, heap)("%s", table() + log_info(gc, heap)("Heap Statistics:"); + ZStatTablePrinter heap_table(10, 18); + log_info(gc, heap)("%s", heap_table() .fill() .center("Mark Start") .center("Mark End") @@ -1449,7 +1934,7 @@ void ZStatHeap::print() { .center("High") .center("Low") .end()); - log_info(gc, heap)("%s", table() + log_info(gc, heap)("%s", heap_table() .right("Capacity:") .left(ZTABLE_ARGS(_at_mark_start.capacity)) .left(ZTABLE_ARGS(_at_mark_end.capacity)) @@ -1458,7 +1943,7 @@ void ZStatHeap::print() { .left(ZTABLE_ARGS(_at_relocate_end.capacity_high)) .left(ZTABLE_ARGS(_at_relocate_end.capacity_low)) .end()); - log_info(gc, heap)("%s", table() + log_info(gc, heap)("%s", heap_table() .right("Free:") .left(ZTABLE_ARGS(_at_mark_start.free)) .left(ZTABLE_ARGS(_at_mark_end.free)) @@ -1467,7 +1952,7 @@ void ZStatHeap::print() { .left(ZTABLE_ARGS(_at_relocate_end.free_high)) .left(ZTABLE_ARGS(_at_relocate_end.free_low)) .end()); - log_info(gc, heap)("%s", table() + log_info(gc, heap)("%s", heap_table() .right("Used:") .left(ZTABLE_ARGS(_at_mark_start.used)) .left(ZTABLE_ARGS(_at_mark_end.used)) @@ -1476,40 +1961,83 @@ void ZStatHeap::print() { .left(ZTABLE_ARGS(_at_relocate_end.used_high)) .left(ZTABLE_ARGS(_at_relocate_end.used_low)) .end()); - log_info(gc, heap)("%s", table() + + log_info(gc, heap)("%s Generation Statistics:", generation->is_young() ? "Young" : "Old"); + ZStatTablePrinter gen_table(10, 18); + log_info(gc, heap)("%s", gen_table() + .fill() + .center("Mark Start") + .center("Mark End") + .center("Relocate Start") + .center("Relocate End") + .end()); + log_info(gc, heap)("%s", gen_table() + .right("Used:") + .left(ZTABLE_ARGS(_at_mark_start.used_generation)) + .left(ZTABLE_ARGS(_at_mark_end.used_generation)) + .left(ZTABLE_ARGS(_at_relocate_start.used_generation)) + .left(ZTABLE_ARGS(_at_relocate_end.used_generation)) + .end()); + log_info(gc, heap)("%s", gen_table() .right("Live:") .left(ZTABLE_ARGS_NA) .left(ZTABLE_ARGS(_at_mark_end.live)) - .left(ZTABLE_ARGS(_at_mark_end.live /* Same as at mark end */)) - .left(ZTABLE_ARGS(_at_mark_end.live /* Same as at mark end */)) - .left(ZTABLE_ARGS_NA) - .left(ZTABLE_ARGS_NA) + .left(ZTABLE_ARGS(_at_relocate_start.live)) + .left(ZTABLE_ARGS(_at_relocate_end.live)) .end()); - log_info(gc, heap)("%s", table() - .right("Allocated:") - .left(ZTABLE_ARGS_NA) - .left(ZTABLE_ARGS(_at_mark_end.allocated)) - .left(ZTABLE_ARGS(_at_relocate_start.allocated)) - .left(ZTABLE_ARGS(_at_relocate_end.allocated)) - .left(ZTABLE_ARGS_NA) - .left(ZTABLE_ARGS_NA) - .end()); - log_info(gc, heap)("%s", table() + log_info(gc, heap)("%s", gen_table() .right("Garbage:") .left(ZTABLE_ARGS_NA) .left(ZTABLE_ARGS(_at_mark_end.garbage)) .left(ZTABLE_ARGS(_at_relocate_start.garbage)) .left(ZTABLE_ARGS(_at_relocate_end.garbage)) + .end()); + log_info(gc, heap)("%s", gen_table() + .right("Allocated:") .left(ZTABLE_ARGS_NA) - .left(ZTABLE_ARGS_NA) + .left(ZTABLE_ARGS(_at_mark_end.mutator_allocated)) + .left(ZTABLE_ARGS(_at_relocate_start.mutator_allocated)) + .left(ZTABLE_ARGS(_at_relocate_end.mutator_allocated)) .end()); - log_info(gc, heap)("%s", table() + log_info(gc, heap)("%s", gen_table() .right("Reclaimed:") .left(ZTABLE_ARGS_NA) .left(ZTABLE_ARGS_NA) .left(ZTABLE_ARGS(_at_relocate_start.reclaimed)) .left(ZTABLE_ARGS(_at_relocate_end.reclaimed)) + .end()); + if (generation->is_young()) { + log_info(gc, heap)("%s", gen_table() + .right("Promoted:") + .left(ZTABLE_ARGS_NA) + .left(ZTABLE_ARGS_NA) + .left(ZTABLE_ARGS(_at_relocate_start.promoted)) + .left(ZTABLE_ARGS(_at_relocate_end.promoted)) + .end()); + } + log_info(gc, heap)("%s", gen_table() + .right("Compacted:") + .left(ZTABLE_ARGS_NA) .left(ZTABLE_ARGS_NA) .left(ZTABLE_ARGS_NA) + .left(ZTABLE_ARGS(_at_relocate_end.compacted)) + .end()); +} + +void ZStatHeap::print_stalls() const { + ZStatTablePrinter stall_table(20, 16); + log_info(gc, alloc)("%s", stall_table() + .fill() + .center("Mark Start") + .center("Mark End") + .center("Relocate Start") + .center("Relocate End") + .end()); + log_info(gc, alloc)("%s", stall_table() + .left("%s", "Allocation Stalls:") + .center("%zu", _at_mark_start.allocation_stalls) + .center("%zu", _at_mark_end.allocation_stalls) + .center("%zu", _at_relocate_start.allocation_stalls) + .center("%zu", _at_relocate_end.allocation_stalls) .end()); } diff --git a/src/hotspot/share/gc/z/zStat.hpp b/src/hotspot/share/gc/z/zStat.hpp index 65b993811adb8..5b129c26fc98a 100644 --- a/src/hotspot/share/gc/z/zStat.hpp +++ b/src/hotspot/share/gc/z/zStat.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,22 +24,28 @@ #ifndef SHARE_GC_Z_ZSTAT_HPP #define SHARE_GC_Z_ZSTAT_HPP -#include "gc/shared/concurrentGCThread.hpp" #include "gc/shared/gcCause.hpp" #include "gc/shared/gcTimer.hpp" +#include "gc/z/zGenerationId.hpp" +#include "gc/z/zLock.hpp" #include "gc/z/zMetronome.hpp" +#include "gc/z/zRelocationSetSelector.hpp" +#include "gc/z/zThread.hpp" +#include "gc/z/zTracer.hpp" #include "logging/logHandle.hpp" #include "memory/allocation.hpp" #include "utilities/globalDefinitions.hpp" #include "utilities/numberSeq.hpp" #include "utilities/ticks.hpp" +class GCTracer; +class ZGeneration; class ZPage; class ZPageAllocatorStats; class ZRelocationSetSelectorGroupStats; -class ZRelocationSetSelectorStats; class ZStatSampler; class ZStatSamplerHistory; +class ZStatWorkers; struct ZStatCounterData; struct ZStatSamplerData; @@ -204,9 +210,6 @@ class ZStatMMU { // Stat phases // class ZStatPhase { -private: - static ConcurrentGCTimer _timer; - protected: const ZStatSampler _sampler; @@ -216,20 +219,39 @@ class ZStatPhase { void log_end(LogTargetHandle log, const Tickspan& duration, bool thread = false) const; public: - static ConcurrentGCTimer* timer(); - const char* name() const; - virtual void register_start(const Ticks& start) const = 0; - virtual void register_end(const Ticks& start, const Ticks& end) const = 0; + virtual void register_start(ConcurrentGCTimer* timer, const Ticks& start) const = 0; + virtual void register_end(ConcurrentGCTimer* timer, const Ticks& start, const Ticks& end) const = 0; +}; + +class ZStatPhaseCollection : public ZStatPhase { +private: + const bool _minor; + + GCTracer* jfr_tracer() const; + + void set_used_at_start(size_t used) const; + size_t used_at_start() const; + +public: + ZStatPhaseCollection(const char* name, bool minor); + + virtual void register_start(ConcurrentGCTimer* timer, const Ticks& start) const; + virtual void register_end(ConcurrentGCTimer* timer, const Ticks& start, const Ticks& end) const; }; -class ZStatPhaseCycle : public ZStatPhase { +class ZStatPhaseGeneration : public ZStatPhase { +private: + const ZGenerationId _id; + + ZGenerationTracer* jfr_tracer() const; + public: - ZStatPhaseCycle(const char* name); + ZStatPhaseGeneration(const char* name, ZGenerationId id); - virtual void register_start(const Ticks& start) const; - virtual void register_end(const Ticks& start, const Ticks& end) const; + virtual void register_start(ConcurrentGCTimer* timer, const Ticks& start) const; + virtual void register_end(ConcurrentGCTimer* timer, const Ticks& start, const Ticks& end) const; }; class ZStatPhasePause : public ZStatPhase { @@ -237,28 +259,28 @@ class ZStatPhasePause : public ZStatPhase { static Tickspan _max; // Max pause time public: - ZStatPhasePause(const char* name); + ZStatPhasePause(const char* name, ZGenerationId id); static const Tickspan& max(); - virtual void register_start(const Ticks& start) const; - virtual void register_end(const Ticks& start, const Ticks& end) const; + virtual void register_start(ConcurrentGCTimer* timer, const Ticks& start) const; + virtual void register_end(ConcurrentGCTimer* timer, const Ticks& start, const Ticks& end) const; }; class ZStatPhaseConcurrent : public ZStatPhase { public: - ZStatPhaseConcurrent(const char* name); + ZStatPhaseConcurrent(const char* name, ZGenerationId id); - virtual void register_start(const Ticks& start) const; - virtual void register_end(const Ticks& start, const Ticks& end) const; + virtual void register_start(ConcurrentGCTimer* timer, const Ticks& start) const; + virtual void register_end(ConcurrentGCTimer* timer, const Ticks& start, const Ticks& end) const; }; class ZStatSubPhase : public ZStatPhase { public: - ZStatSubPhase(const char* name); + ZStatSubPhase(const char* name, ZGenerationId id); - virtual void register_start(const Ticks& start) const; - virtual void register_end(const Ticks& start, const Ticks& end) const; + virtual void register_start(ConcurrentGCTimer* timer, const Ticks& start) const; + virtual void register_end(ConcurrentGCTimer* timer, const Ticks& start, const Ticks& end) const; }; class ZStatCriticalPhase : public ZStatPhase { @@ -269,53 +291,54 @@ class ZStatCriticalPhase : public ZStatPhase { public: ZStatCriticalPhase(const char* name, bool verbose = true); - virtual void register_start(const Ticks& start) const; - virtual void register_end(const Ticks& start, const Ticks& end) const; + virtual void register_start(ConcurrentGCTimer* timer, const Ticks& start) const; + virtual void register_end(ConcurrentGCTimer* timer, const Ticks& start, const Ticks& end) const; }; // // Stat timer // -class ZStatTimerDisable : public StackObj { +class ZStatTimer : public StackObj { private: - static THREAD_LOCAL uint32_t _active; + ConcurrentGCTimer* const _gc_timer; + const ZStatPhase& _phase; + const Ticks _start; public: - ZStatTimerDisable() { - _active++; + ZStatTimer(const ZStatPhase& phase, ConcurrentGCTimer* gc_timer) : + _gc_timer(gc_timer), + _phase(phase), + _start(Ticks::now()) { + _phase.register_start(_gc_timer, _start); + } + + ZStatTimer(const ZStatSubPhase& phase) : + ZStatTimer(phase, nullptr /* timer */) { } - ~ZStatTimerDisable() { - _active--; + ZStatTimer(const ZStatCriticalPhase& phase) : + ZStatTimer(phase, nullptr /* timer */) { } - static bool is_active() { - return _active > 0; + ~ZStatTimer() { + const Ticks end = Ticks::now(); + _phase.register_end(_gc_timer, _start, end); } }; -class ZStatTimer : public StackObj { -private: - const bool _enabled; - const ZStatPhase& _phase; - const Ticks _start; +class ZStatTimerYoung : public ZStatTimer { +public: + ZStatTimerYoung(const ZStatPhase& phase); +}; +class ZStatTimerOld : public ZStatTimer { public: - ZStatTimer(const ZStatPhase& phase) : - _enabled(!ZStatTimerDisable::is_active()), - _phase(phase), - _start(Ticks::now()) { - if (_enabled) { - _phase.register_start(_start); - } - } + ZStatTimerOld(const ZStatPhase& phase); +}; - ~ZStatTimer() { - if (_enabled) { - const Ticks end = Ticks::now(); - _phase.register_end(_start, end); - } - } +class ZStatTimerWorker : public ZStatTimer { +public: + ZStatTimerWorker(const ZStatPhase& phase); }; // @@ -325,30 +348,40 @@ void ZStatSample(const ZStatSampler& sampler, uint64_t value); void ZStatInc(const ZStatCounter& counter, uint64_t increment = 1); void ZStatInc(const ZStatUnsampledCounter& counter, uint64_t increment = 1); +struct ZStatMutatorAllocRateStats { + double _avg; + double _predict; + double _sd; +}; + // -// Stat allocation rate +// Stat mutator allocation rate // -class ZStatAllocRate : public AllStatic { +class ZStatMutatorAllocRate : public AllStatic { private: - static const ZStatUnsampledCounter _counter; - static TruncatedSeq _samples; - static TruncatedSeq _rate; + static ZLock* _stat_lock; + static jlong _last_sample_time; + static volatile size_t _sampling_granule; + static volatile size_t _allocated_since_sample; + static TruncatedSeq _samples_time; + static TruncatedSeq _samples_bytes; + static TruncatedSeq _rate; -public: - static const uint64_t sample_hz = 10; + static void update_sampling_granule(); +public: static const ZStatUnsampledCounter& counter(); - static uint64_t sample_and_reset(); + static void sample_allocation(size_t allocation_bytes); + + static void initialize(); - static double predict(); - static double avg(); - static double sd(); + static ZStatMutatorAllocRateStats stats(); }; // // Stat thread // -class ZStat : public ConcurrentGCThread { +class ZStat : public ZThread { private: static const uint64_t sample_hz = 1; @@ -359,54 +392,89 @@ class ZStat : public ConcurrentGCThread { void print(LogTargetHandle log, const ZStatSamplerHistory* history) const; protected: - virtual void run_service(); - virtual void stop_service(); + virtual void run_thread(); + virtual void terminate(); public: ZStat(); }; +struct ZStatCycleStats { + bool _is_warm; + uint64_t _nwarmup_cycles; + bool _is_time_trustable; + double _time_since_last; + double _last_active_workers; + double _duration_since_start; + double _avg_cycle_interval; + double _avg_serial_time; + double _sd_serial_time; + double _avg_parallelizable_time; + double _sd_parallelizable_time; + double _avg_parallelizable_duration; + double _sd_parallelizable_duration; +}; + // // Stat cycle // -class ZStatCycle : public AllStatic { +class ZStatCycle { private: - static uint64_t _nwarmup_cycles; - static Ticks _start_of_last; - static Ticks _end_of_last; - static NumberSeq _serial_time; - static NumberSeq _parallelizable_time; - static uint _last_active_workers; + ZLock _stat_lock; + uint64_t _nwarmup_cycles; + Ticks _start_of_last; + Ticks _end_of_last; + NumberSeq _cycle_intervals; + NumberSeq _serial_time; + NumberSeq _parallelizable_time; + NumberSeq _parallelizable_duration; + double _last_active_workers; + + bool is_warm(); + bool is_time_trustable(); + double last_active_workers(); + double duration_since_start(); + double time_since_last(); public: - static void at_start(); - static void at_end(GCCause::Cause cause, uint active_workers); - - static bool is_warm(); - static uint64_t nwarmup_cycles(); + ZStatCycle(); - static bool is_time_trustable(); - static const AbsSeq& serial_time(); - static const AbsSeq& parallelizable_time(); + void at_start(); + void at_end(ZStatWorkers* stats_workers, bool record_stats); - static uint last_active_workers(); + ZStatCycleStats stats(); +}; - static double time_since_last(); +struct ZStatWorkersStats { + double _accumulated_time; + double _accumulated_duration; }; // // Stat workers // -class ZStatWorkers : public AllStatic { +class ZStatWorkers { private: - static Ticks _start_of_last; - static Tickspan _accumulated_duration; + ZLock _stat_lock; + uint _active_workers; + Ticks _start_of_last; + Tickspan _accumulated_duration; + Tickspan _accumulated_time; + + double accumulated_duration(); + double accumulated_time(); + uint active_workers(); public: - static void at_start(); - static void at_end(); + ZStatWorkers(); + + void at_start(uint active_workers); + void at_end(); + + double get_and_reset_duration(); + double get_and_reset_time(); - static double get_and_reset_duration(); + ZStatWorkersStats stats(); }; // @@ -420,46 +488,62 @@ class ZStatLoad : public AllStatic { // // Stat mark // -class ZStatMark : public AllStatic { +class ZStatMark { private: - static size_t _nstripes; - static size_t _nproactiveflush; - static size_t _nterminateflush; - static size_t _ntrycomplete; - static size_t _ncontinue; - static size_t _mark_stack_usage; + size_t _nstripes; + size_t _nproactiveflush; + size_t _nterminateflush; + size_t _ntrycomplete; + size_t _ncontinue; + size_t _mark_stack_usage; public: - static void set_at_mark_start(size_t nstripes); - static void set_at_mark_end(size_t nproactiveflush, - size_t nterminateflush, - size_t ntrycomplete, - size_t ncontinue); - static void set_at_mark_free(size_t mark_stack_usage); + ZStatMark(); - static void print(); + void at_mark_start(size_t nstripes); + void at_mark_end(size_t nproactiveflush, + size_t nterminateflush, + size_t ntrycomplete, + size_t ncontinue); + void at_mark_free(size_t mark_stack_usage); + + void print(); +}; + +struct ZStatRelocationSummary { + size_t npages_candidates; + size_t total; + size_t live; + size_t empty; + size_t npages_selected; + size_t relocate; }; // // Stat relocation // -class ZStatRelocation : public AllStatic { +class ZStatRelocation { private: - static ZRelocationSetSelectorStats _selector_stats; - static size_t _forwarding_usage; - static size_t _small_in_place_count; - static size_t _medium_in_place_count; + ZRelocationSetSelectorStats _selector_stats; + size_t _forwarding_usage; + size_t _small_selected; + size_t _small_in_place_count; + size_t _medium_selected; + size_t _medium_in_place_count; - static void print(const char* name, - const ZRelocationSetSelectorGroupStats& selector_group, - size_t in_place_count); + void print(const char* name, + ZStatRelocationSummary selector_group, + size_t in_place_count); public: - static void set_at_select_relocation_set(const ZRelocationSetSelectorStats& selector_stats); - static void set_at_install_relocation_set(size_t forwarding_usage); - static void set_at_relocate_end(size_t small_in_place_count, size_t medium_in_place_count); + ZStatRelocation(); - static void print(); + void at_select_relocation_set(const ZRelocationSetSelectorStats& selector_stats); + void at_install_relocation_set(size_t forwarding_usage); + void at_relocate_end(size_t small_in_place_count, size_t medium_in_place_count); + + void print_page_summary(); + void print_age_table(); }; // @@ -490,7 +574,6 @@ class ZStatReferences : public AllStatic { } _soft, _weak, _final, _phantom; static void set(ZCount* count, size_t encountered, size_t discovered, size_t enqueued); - static void print(const char* name, const ZCount& ref); public: static void set_soft(size_t encountered, size_t discovered, size_t enqueued); @@ -501,42 +584,67 @@ class ZStatReferences : public AllStatic { static void print(); }; +struct ZStatHeapStats { + size_t _live_at_mark_end; + size_t _used_at_relocate_end; + size_t _reclaimed_avg; +}; + // // Stat heap // -class ZStatHeap : public AllStatic { +class ZStatHeap { private: + ZLock _stat_lock; + static struct ZAtInitialize { size_t min_capacity; size_t max_capacity; } _at_initialize; - static struct ZAtMarkStart { + struct ZAtGenerationCollectionStart { + size_t soft_max_capacity; + size_t capacity; + size_t free; + size_t used; + size_t used_generation; + } _at_collection_start; + + struct ZAtMarkStart { size_t soft_max_capacity; size_t capacity; size_t free; size_t used; + size_t used_generation; + size_t allocation_stalls; } _at_mark_start; - static struct ZAtMarkEnd { + struct ZAtMarkEnd { size_t capacity; size_t free; size_t used; + size_t used_generation; size_t live; - size_t allocated; size_t garbage; + size_t mutator_allocated; + size_t allocation_stalls; } _at_mark_end; - static struct ZAtRelocateStart { + struct ZAtRelocateStart { size_t capacity; size_t free; size_t used; - size_t allocated; + size_t used_generation; + size_t live; size_t garbage; + size_t mutator_allocated; size_t reclaimed; + size_t promoted; + size_t compacted; + size_t allocation_stalls; } _at_relocate_start; - static struct ZAtRelocateEnd { + struct ZAtRelocateEnd { size_t capacity; size_t capacity_high; size_t capacity_low; @@ -546,30 +654,56 @@ class ZStatHeap : public AllStatic { size_t used; size_t used_high; size_t used_low; - size_t allocated; + size_t used_generation; + size_t live; size_t garbage; + size_t mutator_allocated; size_t reclaimed; + size_t promoted; + size_t compacted; + size_t allocation_stalls; } _at_relocate_end; - static size_t capacity_high(); - static size_t capacity_low(); - static size_t free(size_t used); - static size_t allocated(size_t used, size_t reclaimed); - static size_t garbage(size_t reclaimed); + NumberSeq _reclaimed_bytes; + + size_t capacity_high() const; + size_t capacity_low() const; + size_t free(size_t used) const; + size_t mutator_allocated(size_t used, size_t freed, size_t relocated) const; + size_t garbage(size_t freed, size_t relocated, size_t promoted) const; + size_t reclaimed(size_t freed, size_t relocated, size_t promoted) const; public: - static void set_at_initialize(const ZPageAllocatorStats& stats); - static void set_at_mark_start(const ZPageAllocatorStats& stats); - static void set_at_mark_end(const ZPageAllocatorStats& stats); - static void set_at_select_relocation_set(const ZRelocationSetSelectorStats& stats); - static void set_at_relocate_start(const ZPageAllocatorStats& stats); - static void set_at_relocate_end(const ZPageAllocatorStats& stats, size_t non_worker_relocated); + ZStatHeap(); - static size_t max_capacity(); - static size_t used_at_mark_start(); - static size_t used_at_relocate_end(); + void at_initialize(size_t min_capacity, size_t max_capacity); + void at_collection_start(const ZPageAllocatorStats& stats); + void at_mark_start(const ZPageAllocatorStats& stats); + void at_mark_end(const ZPageAllocatorStats& stats); + void at_select_relocation_set(const ZRelocationSetSelectorStats& stats); + void at_relocate_start(const ZPageAllocatorStats& stats); + void at_relocate_end(const ZPageAllocatorStats& stats, bool record_stats); - static void print(); + static size_t max_capacity(); + size_t used_at_collection_start() const; + size_t used_at_mark_start() const; + size_t used_generation_at_mark_start() const; + size_t live_at_mark_end() const; + size_t allocated_at_mark_end() const; + size_t garbage_at_mark_end() const; + size_t used_at_relocate_end() const; + size_t used_at_collection_end() const; + size_t stalls_at_mark_start() const; + size_t stalls_at_mark_end() const; + size_t stalls_at_relocate_start() const; + size_t stalls_at_relocate_end() const; + + size_t reclaimed_avg(); + + ZStatHeapStats stats(); + + void print(const ZGeneration* generation) const; + void print_stalls() const; }; #endif // SHARE_GC_Z_ZSTAT_HPP diff --git a/src/hotspot/share/gc/z/zStoreBarrierBuffer.cpp b/src/hotspot/share/gc/z/zStoreBarrierBuffer.cpp new file mode 100644 index 0000000000000..015b22cadaf4a --- /dev/null +++ b/src/hotspot/share/gc/z/zStoreBarrierBuffer.cpp @@ -0,0 +1,320 @@ +/* + * Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/shared/gc_globals.hpp" +#include "gc/z/zAddress.inline.hpp" +#include "gc/z/zBarrier.inline.hpp" +#include "gc/z/zGeneration.inline.hpp" +#include "gc/z/zStoreBarrierBuffer.inline.hpp" +#include "gc/z/zUncoloredRoot.inline.hpp" +#include "memory/resourceArea.hpp" +#include "runtime/threadSMR.hpp" +#include "utilities/ostream.hpp" +#include "utilities/vmError.hpp" + +ByteSize ZStoreBarrierEntry::p_offset() { + return byte_offset_of(ZStoreBarrierEntry, _p); +} + +ByteSize ZStoreBarrierEntry::prev_offset() { + return byte_offset_of(ZStoreBarrierEntry, _prev); +} + +ByteSize ZStoreBarrierBuffer::buffer_offset() { + return byte_offset_of(ZStoreBarrierBuffer, _buffer); +} + +ByteSize ZStoreBarrierBuffer::current_offset() { + return byte_offset_of(ZStoreBarrierBuffer, _current); +} + +ZStoreBarrierBuffer::ZStoreBarrierBuffer() : + _buffer(), + _last_processed_color(), + _last_installed_color(), + _base_pointer_lock(), + _base_pointers(), + _current(ZBufferStoreBarriers ? _buffer_size_bytes : 0) { +} + +void ZStoreBarrierBuffer::initialize() { + _last_processed_color = ZPointerStoreGoodMask; + _last_installed_color = ZPointerStoreGoodMask; +} + +void ZStoreBarrierBuffer::clear() { + _current = _buffer_size_bytes; +} + +bool ZStoreBarrierBuffer::is_empty() const { + return _current == _buffer_size_bytes; +} + +void ZStoreBarrierBuffer::install_base_pointers_inner() { + assert(ZPointer::remap_bits(_last_installed_color) == + ZPointer::remap_bits(_last_processed_color), + "Can't deal with two pending base pointer installations"); + + assert((ZPointer::remap_bits(_last_processed_color) & ZPointerRemappedYoungMask) == 0 || + (ZPointer::remap_bits(_last_processed_color) & ZPointerRemappedOldMask) == 0, + "Should not have double bit errors"); + + for (int i = current(); i < (int)_buffer_length; ++i) { + const ZStoreBarrierEntry& entry = _buffer[i]; + volatile zpointer* const p = entry._p; + const zaddress_unsafe p_unsafe = to_zaddress_unsafe((uintptr_t)p); + + // Color with the last processed color + const zpointer ptr = ZAddress::color(p_unsafe, _last_processed_color); + + // Look up the generation that thinks that this pointer is not + // load good and check if the page is being relocated. + ZGeneration* const remap_generation = ZBarrier::remap_generation(ptr); + ZForwarding* const forwarding = remap_generation->forwarding(p_unsafe); + if (forwarding != nullptr) { + // Page is being relocated + ZPage* const page = forwarding->page(); + _base_pointers[i] = page->find_base(p); + } else { + // Page is not being relocated + _base_pointers[i] = zaddress_unsafe::null; + } + } +} + +void ZStoreBarrierBuffer::install_base_pointers() { + if (!ZBufferStoreBarriers) { + return; + } + + // Use a lock since both the GC and the Java thread race to install the base pointers + ZLocker locker(&_base_pointer_lock); + + const bool should_install_base_pointers = ZPointer::remap_bits(_last_installed_color) != ZPointerRemapped; + + if (should_install_base_pointers) { + install_base_pointers_inner(); + } + + // This is used as a claim mechanism to make sure that we only install the base pointers once + _last_installed_color = ZPointerStoreGoodMask; +} + +static volatile zpointer* make_load_good(volatile zpointer* p, zaddress_unsafe p_base, uintptr_t color) { + assert(!is_null(p_base), "need base pointer"); + + // Calculate field offset before p_base is remapped + const uintptr_t offset = (uintptr_t)p - untype(p_base); + + // Remap local-copy of base pointer + ZUncoloredRoot::process_no_keepalive(&p_base, color); + + // Retype now that the address is known to point to the correct address + const zaddress p_base_remapped = safe(p_base); + + assert(offset < ZUtils::object_size(p_base_remapped), + "wrong base object; live bits are invalid"); + + // Calculate remapped field address + const zaddress p_remapped = to_zaddress(untype(p_base_remapped) + offset); + + return (volatile zpointer*)p_remapped; +} + +void ZStoreBarrierBuffer::on_new_phase_relocate(int i) { + const uintptr_t last_remap_bits = ZPointer::remap_bits(_last_processed_color); + if (last_remap_bits == ZPointerRemapped) { + // All pointers are already remapped + return; + } + + const zaddress_unsafe p_base = _base_pointers[i]; + if (is_null(p_base)) { + // Page is not part of the relocation set + return; + } + + ZStoreBarrierEntry& entry = _buffer[i]; + + // Relocate the base object and calculate the remapped p + entry._p = make_load_good(entry._p, p_base, _last_processed_color); +} + +void ZStoreBarrierBuffer::on_new_phase_remember(int i) { + volatile zpointer* const p = _buffer[i]._p; + + if (ZHeap::heap()->is_young(p)) { + // Only need remset entries for old objects + return; + } + + const uintptr_t last_mark_young_bits = _last_processed_color & (ZPointerMarkedYoung0 | ZPointerMarkedYoung1); + const bool woke_up_in_young_mark = last_mark_young_bits != ZPointerMarkedYoung; + + if (woke_up_in_young_mark) { + // When young mark starts we "flip" the remembered sets. The remembered + // sets used before the young mark start becomes read-only and used by + // the GC to scan for old-to-young pointers to use as marking roots. + // + // Entries in the store buffer that were added before the mark young start, + // were supposed to be part of the remembered sets that the GC scans. + // However, it is too late to add those entries at this point, so instead + // we perform the GC remembered set scanning up-front here. + ZGeneration::young()->scan_remembered_field(p); + } else { + // The remembered set wasn't flipped in this phase shift, + // so just add the remembered set entry. + ZGeneration::young()->remember(p); + } +} + +bool ZStoreBarrierBuffer::is_old_mark() const { + return ZGeneration::old()->is_phase_mark(); +} + +bool ZStoreBarrierBuffer::stored_during_old_mark() const { + const uintptr_t last_mark_old_bits = _last_processed_color & (ZPointerMarkedOld0 | ZPointerMarkedOld1); + return last_mark_old_bits == ZPointerMarkedOld; +} + +void ZStoreBarrierBuffer::on_new_phase_mark(int i) { + const ZStoreBarrierEntry& entry = _buffer[i]; + const zpointer prev = entry._prev; + + if (is_null_any(prev)) { + return; + } + + volatile zpointer* const p = entry._p; + + // Young collections can start during old collections, but not the other + // way around. Therefore, only old marking can see a collection phase + // shift (resulting in a call to this function). + // + // Stores before the marking phase started is not a part of the SATB snapshot, + // and therefore shouldn't be used for marking. + // + // Locations in the young generation are not part of the old marking. + if (is_old_mark() && stored_during_old_mark() && ZHeap::heap()->is_old(p)) { + const zaddress addr = ZBarrier::make_load_good(prev); + ZUncoloredRoot::mark_object(addr); + } +} + +void ZStoreBarrierBuffer::on_new_phase() { + if (!ZBufferStoreBarriers) { + return; + } + + // Install all base pointers for relocation + install_base_pointers(); + + for (int i = current(); i < (int)_buffer_length; ++i) { + on_new_phase_relocate(i); + on_new_phase_remember(i); + on_new_phase_mark(i); + } + + clear(); + + _last_processed_color = ZPointerStoreGoodMask; + assert(_last_installed_color == _last_processed_color, "invariant"); +} + +class ZStoreBarrierBuffer::OnError : public VMErrorCallback { +private: + ZStoreBarrierBuffer* _buffer; + +public: + OnError(ZStoreBarrierBuffer* buffer) : + _buffer(buffer) {} + + virtual void call(outputStream* st) { + _buffer->on_error(st); + } +}; + +void ZStoreBarrierBuffer::on_error(outputStream* st) { + st->print_cr("ZStoreBarrierBuffer: error when flushing"); + st->print_cr(" _last_processed_color: " PTR_FORMAT, _last_processed_color); + st->print_cr(" _last_installed_color: " PTR_FORMAT, _last_installed_color); + + for (int i = current(); i < (int)_buffer_length; ++i) { + st->print_cr(" [%2d]: base: " PTR_FORMAT " p: " PTR_FORMAT " prev: " PTR_FORMAT, + i, + untype(_base_pointers[i]), + p2i(_buffer[i]._p), + untype(_buffer[i]._prev)); + } +} + +void ZStoreBarrierBuffer::flush() { + if (!ZBufferStoreBarriers) { + return; + } + + OnError on_error(this); + VMErrorCallbackMark mark(&on_error); + + for (int i = current(); i < (int)_buffer_length; ++i) { + const ZStoreBarrierEntry& entry = _buffer[i]; + const zaddress addr = ZBarrier::make_load_good(entry._prev); + ZBarrier::mark_and_remember(entry._p, addr); + } + + clear(); +} + +bool ZStoreBarrierBuffer::is_in(volatile zpointer* p) { + if (!ZBufferStoreBarriers) { + return false; + } + + for (JavaThreadIteratorWithHandle jtiwh; JavaThread * const jt = jtiwh.next(); ) { + ZStoreBarrierBuffer* const buffer = ZThreadLocalData::store_barrier_buffer(jt); + + const uintptr_t last_remap_bits = ZPointer::remap_bits(buffer->_last_processed_color) & ZPointerRemappedMask; + const bool needs_remap = last_remap_bits != ZPointerRemapped; + + for (int i = buffer->current(); i < (int)_buffer_length; ++i) { + const ZStoreBarrierEntry& entry = buffer->_buffer[i]; + volatile zpointer* entry_p = entry._p; + + // Potentially remap p + if (needs_remap) { + const zaddress_unsafe entry_p_base = buffer->_base_pointers[i]; + if (!is_null(entry_p_base)) { + entry_p = make_load_good(entry_p, entry_p_base, buffer->_last_processed_color); + } + } + + // Check if p matches + if (entry_p == p) { + return true; + } + } + } + + return false; +} diff --git a/src/hotspot/share/gc/z/zStoreBarrierBuffer.hpp b/src/hotspot/share/gc/z/zStoreBarrierBuffer.hpp new file mode 100644 index 0000000000000..f917a6c3e7b58 --- /dev/null +++ b/src/hotspot/share/gc/z/zStoreBarrierBuffer.hpp @@ -0,0 +1,98 @@ +/* + * Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_Z_ZSTOREBARRIERBUFFER_HPP +#define SHARE_GC_Z_ZSTOREBARRIERBUFFER_HPP + +#include "gc/z/zAddress.hpp" +#include "gc/z/zGenerationId.hpp" +#include "gc/z/zLock.hpp" +#include "memory/allocation.hpp" +#include "utilities/sizes.hpp" + +struct ZStoreBarrierEntry { + volatile zpointer* _p; + zpointer _prev; + + static ByteSize p_offset(); + static ByteSize prev_offset(); +}; + +class ZStoreBarrierBuffer : public CHeapObj { + friend class ZVerify; + +private: + static const size_t _buffer_length = 32; + static const size_t _buffer_size_bytes = _buffer_length * sizeof(ZStoreBarrierEntry); + + ZStoreBarrierEntry _buffer[_buffer_length]; + + // Color from previous phase this buffer was processed + uintptr_t _last_processed_color; + + // Use as a claim mechanism for installing base pointers + uintptr_t _last_installed_color; + + ZLock _base_pointer_lock; + zaddress_unsafe _base_pointers[_buffer_length]; + + // sizeof(ZStoreBarrierEntry) scaled index growing downwards + size_t _current; + + void on_new_phase_relocate(int i); + void on_new_phase_remember(int i); + void on_new_phase_mark(int i); + + void clear(); + + bool is_old_mark() const; + bool stored_during_old_mark() const; + bool is_empty() const; + intptr_t current() const; + + void install_base_pointers_inner(); + + void on_error(outputStream* st); + class OnError; + +public: + ZStoreBarrierBuffer(); + + static ByteSize buffer_offset(); + static ByteSize current_offset(); + + static ZStoreBarrierBuffer* buffer_for_store(bool heal); + + void initialize(); + void on_new_phase(); + + void install_base_pointers(); + + void flush(); + void add(volatile zpointer* p, zpointer prev); + + // Check if p is contained in any store barrier buffer entry in the system + static bool is_in(volatile zpointer* p); +}; + +#endif // SHARE_GC_Z_ZSTOREBARRIERBUFFER_HPP diff --git a/src/hotspot/share/gc/z/zStoreBarrierBuffer.inline.hpp b/src/hotspot/share/gc/z/zStoreBarrierBuffer.inline.hpp new file mode 100644 index 0000000000000..762aac3ccd5bf --- /dev/null +++ b/src/hotspot/share/gc/z/zStoreBarrierBuffer.inline.hpp @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_Z_ZSTOREBARRIERBUFFER_INLINE_HPP +#define SHARE_GC_Z_ZSTOREBARRIERBUFFER_INLINE_HPP + +#include "gc/z/zStoreBarrierBuffer.hpp" + +#include "gc/shared/gc_globals.hpp" +#include "gc/z/zThreadLocalData.hpp" +#include "runtime/thread.hpp" + +inline intptr_t ZStoreBarrierBuffer::current() const { + return _current / sizeof(ZStoreBarrierEntry); +} + +inline void ZStoreBarrierBuffer::add(volatile zpointer* p, zpointer prev) { + assert(ZBufferStoreBarriers, "Only buffer stores when it is enabled"); + if (_current == 0) { + flush(); + } + _current -= sizeof(ZStoreBarrierEntry); + _buffer[current()] = {p, prev}; +} + +inline ZStoreBarrierBuffer* ZStoreBarrierBuffer::buffer_for_store(bool heal) { + if (heal) { + return nullptr; + } + + Thread* const thread = Thread::current(); + if (!thread->is_Java_thread()) { + return nullptr; + } + + ZStoreBarrierBuffer* const buffer = ZThreadLocalData::store_barrier_buffer(JavaThread::cast(thread)); + return ZBufferStoreBarriers ? buffer : nullptr; +} + +#endif // SHARE_GC_Z_ZSTOREBARRIERBUFFER_INLINE_HPP diff --git a/src/hotspot/share/gc/z/zTask.cpp b/src/hotspot/share/gc/z/zTask.cpp index 7a0503a4fb8a4..4e6d70c6c53f3 100644 --- a/src/hotspot/share/gc/z/zTask.cpp +++ b/src/hotspot/share/gc/z/zTask.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -23,16 +23,13 @@ #include "precompiled.hpp" #include "gc/z/zTask.hpp" -#include "gc/z/zThread.hpp" ZTask::Task::Task(ZTask* task, const char* name) : WorkerTask(name), _task(task) {} void ZTask::Task::work(uint worker_id) { - ZThread::set_worker_id(worker_id); _task->work(); - ZThread::clear_worker_id(); } ZTask::ZTask(const char* name) : @@ -45,3 +42,8 @@ const char* ZTask::name() const { WorkerTask* ZTask::worker_task() { return &_worker_task; } + +ZRestartableTask::ZRestartableTask(const char* name) : + ZTask(name) {} + +void ZRestartableTask::resize_workers(uint nworkers) {} diff --git a/src/hotspot/share/gc/z/zTask.hpp b/src/hotspot/share/gc/z/zTask.hpp index 80836aedcac53..37733f8873a45 100644 --- a/src/hotspot/share/gc/z/zTask.hpp +++ b/src/hotspot/share/gc/z/zTask.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -50,4 +50,10 @@ class ZTask : public StackObj { virtual void work() = 0; }; +class ZRestartableTask : public ZTask { +public: + ZRestartableTask(const char* name); + virtual void resize_workers(uint nworkers); +}; + #endif // SHARE_GC_Z_ZTASK_HPP diff --git a/src/hotspot/share/gc/z/zThread.cpp b/src/hotspot/share/gc/z/zThread.cpp index c0a9d5046c447..3203da6430f35 100644 --- a/src/hotspot/share/gc/z/zThread.cpp +++ b/src/hotspot/share/gc/z/zThread.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,59 +22,29 @@ */ #include "precompiled.hpp" -#include "gc/z/zThread.inline.hpp" -#include "runtime/javaThread.hpp" -#include "runtime/nonJavaThread.hpp" -#include "utilities/debug.hpp" +#include "gc/z/zThread.hpp" +#include "runtime/mutexLocker.hpp" -THREAD_LOCAL bool ZThread::_initialized; -THREAD_LOCAL uintptr_t ZThread::_id; -THREAD_LOCAL bool ZThread::_is_vm; -THREAD_LOCAL bool ZThread::_is_java; -THREAD_LOCAL bool ZThread::_is_worker; -THREAD_LOCAL uint ZThread::_worker_id; +void ZThread::run_service() { + run_thread(); -void ZThread::initialize() { - assert(!_initialized, "Already initialized"); - const Thread* const thread = Thread::current(); - _initialized = true; - _id = (uintptr_t)thread; - _is_vm = thread->is_VM_thread(); - _is_java = thread->is_Java_thread(); - _is_worker = false; - _worker_id = (uint)-1; -} + MonitorLocker ml(Terminator_lock, Monitor::_no_safepoint_check_flag); -const char* ZThread::name() { - const Thread* const thread = Thread::current(); - if (thread->is_Named_thread()) { - const NamedThread* const named = (const NamedThread*)thread; - return named->name(); - } else if (thread->is_Java_thread()) { - return "Java"; + // Wait for signal to terminate + while (!should_terminate()) { + ml.wait(); } - - return "Unknown"; -} - -void ZThread::set_worker() { - ensure_initialized(); - _is_worker = true; } -bool ZThread::has_worker_id() { - return _initialized && - _is_worker && - _worker_id != (uint)-1; -} - -void ZThread::set_worker_id(uint worker_id) { - ensure_initialized(); - assert(!has_worker_id(), "Worker id already initialized"); - _worker_id = worker_id; -} +void ZThread::stop_service() { + { + // Signal thread to terminate + // The should_terminate() flag should be true, and this notifies waiters + // to wake up. + MonitorLocker ml(Terminator_lock); + assert(should_terminate(), "This should be called when should_terminate has been set"); + ml.notify_all(); + } -void ZThread::clear_worker_id() { - assert(has_worker_id(), "Worker id not initialized"); - _worker_id = (uint)-1; + terminate(); } diff --git a/src/hotspot/share/gc/z/zThread.hpp b/src/hotspot/share/gc/z/zThread.hpp index c67807ff96ecd..c0b09ace465df 100644 --- a/src/hotspot/share/gc/z/zThread.hpp +++ b/src/hotspot/share/gc/z/zThread.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,38 +24,19 @@ #ifndef SHARE_GC_Z_ZTHREAD_HPP #define SHARE_GC_Z_ZTHREAD_HPP -#include "memory/allStatic.hpp" -#include "utilities/globalDefinitions.hpp" +#include "gc/shared/concurrentGCThread.hpp" -class ZThread : public AllStatic { - friend class ZTask; - friend class ZWorkersInitializeTask; - friend class ZRuntimeWorkersInitializeTask; +// A ZThread is a ConcurrentGCThread with some ZGC-specific handling of GC shutdown +class ZThread : public ConcurrentGCThread { private: - static THREAD_LOCAL bool _initialized; - static THREAD_LOCAL uintptr_t _id; - static THREAD_LOCAL bool _is_vm; - static THREAD_LOCAL bool _is_java; - static THREAD_LOCAL bool _is_worker; - static THREAD_LOCAL uint _worker_id; - - static void initialize(); - static void ensure_initialized(); - - static void set_worker(); - - static bool has_worker_id(); - static void set_worker_id(uint worker_id); - static void clear_worker_id(); + virtual void run_service(); + virtual void stop_service(); public: - static const char* name(); - static uintptr_t id(); - static bool is_vm(); - static bool is_java(); - static bool is_worker(); - static uint worker_id(); + + virtual void run_thread() = 0; + virtual void terminate() = 0; }; #endif // SHARE_GC_Z_ZTHREAD_HPP diff --git a/src/hotspot/share/gc/z/zThreadLocalAllocBuffer.cpp b/src/hotspot/share/gc/z/zThreadLocalAllocBuffer.cpp index fcad13ab6c07b..530db62f258df 100644 --- a/src/hotspot/share/gc/z/zThreadLocalAllocBuffer.cpp +++ b/src/hotspot/share/gc/z/zThreadLocalAllocBuffer.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -31,11 +31,11 @@ #include "runtime/javaThread.hpp" #include "runtime/stackWatermarkSet.inline.hpp" -ZPerWorker* ZThreadLocalAllocBuffer::_stats = NULL; +ZPerWorker* ZThreadLocalAllocBuffer::_stats = nullptr; void ZThreadLocalAllocBuffer::initialize() { if (UseTLAB) { - assert(_stats == NULL, "Already initialized"); + assert(_stats == nullptr, "Already initialized"); _stats = new ZPerWorker(); reset_statistics(); } @@ -63,14 +63,9 @@ void ZThreadLocalAllocBuffer::publish_statistics() { } } -static void fixup_address(HeapWord** p) { - *p = (HeapWord*)ZAddress::good_or_null((uintptr_t)*p); -} - void ZThreadLocalAllocBuffer::retire(JavaThread* thread, ThreadLocalAllocStats* stats) { if (UseTLAB) { stats->reset(); - thread->tlab().addresses_do(fixup_address); thread->tlab().retire(stats); if (ResizeTLAB) { thread->tlab().resize(); @@ -78,12 +73,6 @@ void ZThreadLocalAllocBuffer::retire(JavaThread* thread, ThreadLocalAllocStats* } } -void ZThreadLocalAllocBuffer::remap(JavaThread* thread) { - if (UseTLAB) { - thread->tlab().addresses_do(fixup_address); - } -} - void ZThreadLocalAllocBuffer::update_stats(JavaThread* thread) { if (UseTLAB) { ZStackWatermark* const watermark = StackWatermarkSet::get(thread, StackWatermarkKind::gc); diff --git a/src/hotspot/share/gc/z/zThreadLocalAllocBuffer.hpp b/src/hotspot/share/gc/z/zThreadLocalAllocBuffer.hpp index 086c8a5c351ce..0a4cc3b54dfbd 100644 --- a/src/hotspot/share/gc/z/zThreadLocalAllocBuffer.hpp +++ b/src/hotspot/share/gc/z/zThreadLocalAllocBuffer.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -41,7 +41,6 @@ class ZThreadLocalAllocBuffer : public AllStatic { static void publish_statistics(); static void retire(JavaThread* thread, ThreadLocalAllocStats* stats); - static void remap(JavaThread* thread); static void update_stats(JavaThread* thread); }; diff --git a/src/hotspot/share/gc/z/zThreadLocalData.hpp b/src/hotspot/share/gc/z/zThreadLocalData.hpp index f8c362c74ed15..17f7ee9b099c4 100644 --- a/src/hotspot/share/gc/z/zThreadLocalData.hpp +++ b/src/hotspot/share/gc/z/zThreadLocalData.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,22 +24,42 @@ #ifndef SHARE_GC_Z_ZTHREADLOCALDATA_HPP #define SHARE_GC_Z_ZTHREADLOCALDATA_HPP +#include "gc/z/zAddress.hpp" +#include "gc/z/zGenerationId.hpp" #include "gc/z/zMarkStack.hpp" -#include "gc/z/zGlobals.hpp" +#include "gc/z/zStoreBarrierBuffer.hpp" #include "runtime/javaThread.hpp" #include "utilities/debug.hpp" #include "utilities/sizes.hpp" class ZThreadLocalData { private: - uintptr_t _address_bad_mask; - ZMarkThreadLocalStacks _stacks; - oop* _invisible_root; + uintptr_t _load_good_mask; + uintptr_t _load_bad_mask; + uintptr_t _mark_bad_mask; + uintptr_t _store_good_mask; + uintptr_t _store_bad_mask; + uintptr_t _uncolor_mask; + uintptr_t _nmethod_disarmed; + ZStoreBarrierBuffer* _store_barrier_buffer; + ZMarkThreadLocalStacks _mark_stacks[2]; + zaddress_unsafe* _invisible_root; ZThreadLocalData() : - _address_bad_mask(0), - _stacks(), - _invisible_root(NULL) {} + _load_good_mask(0), + _load_bad_mask(0), + _mark_bad_mask(0), + _store_good_mask(0), + _store_bad_mask(0), + _uncolor_mask(0), + _nmethod_disarmed(0), + _store_barrier_buffer(new ZStoreBarrierBuffer()), + _mark_stacks(), + _invisible_root(nullptr) {} + + ~ZThreadLocalData() { + delete _store_barrier_buffer; + } static ZThreadLocalData* data(Thread* thread) { return thread->gc_data(); @@ -54,37 +74,74 @@ class ZThreadLocalData { data(thread)->~ZThreadLocalData(); } - static void set_address_bad_mask(Thread* thread, uintptr_t mask) { - data(thread)->_address_bad_mask = mask; + static void set_load_bad_mask(Thread* thread, uintptr_t mask) { + data(thread)->_load_bad_mask = mask; + } + + static void set_mark_bad_mask(Thread* thread, uintptr_t mask) { + data(thread)->_mark_bad_mask = mask; + } + + static void set_store_bad_mask(Thread* thread, uintptr_t mask) { + data(thread)->_store_bad_mask = mask; + } + + static void set_load_good_mask(Thread* thread, uintptr_t mask) { + data(thread)->_load_good_mask = mask; + } + + static void set_store_good_mask(Thread* thread, uintptr_t mask) { + data(thread)->_store_good_mask = mask; + } + + static void set_nmethod_disarmed(Thread* thread, uintptr_t value) { + data(thread)->_nmethod_disarmed = value; + } + + static ZMarkThreadLocalStacks* mark_stacks(Thread* thread, ZGenerationId id) { + return &data(thread)->_mark_stacks[(int)id]; } - static ZMarkThreadLocalStacks* stacks(Thread* thread) { - return &data(thread)->_stacks; + static ZStoreBarrierBuffer* store_barrier_buffer(Thread* thread) { + return data(thread)->_store_barrier_buffer; } - static void set_invisible_root(Thread* thread, oop* root) { - assert(data(thread)->_invisible_root == NULL, "Already set"); + static void set_invisible_root(Thread* thread, zaddress_unsafe* root) { + assert(data(thread)->_invisible_root == nullptr, "Already set"); data(thread)->_invisible_root = root; } static void clear_invisible_root(Thread* thread) { - assert(data(thread)->_invisible_root != NULL, "Should be set"); - data(thread)->_invisible_root = NULL; + assert(data(thread)->_invisible_root != nullptr, "Should be set"); + data(thread)->_invisible_root = nullptr; } - template - static void do_invisible_root(Thread* thread, T f) { - if (data(thread)->_invisible_root != NULL) { - f(data(thread)->_invisible_root); - } + static zaddress_unsafe* invisible_root(Thread* thread) { + return data(thread)->_invisible_root; } - static ByteSize address_bad_mask_offset() { - return Thread::gc_data_offset() + byte_offset_of(ZThreadLocalData, _address_bad_mask); + static ByteSize load_bad_mask_offset() { + return Thread::gc_data_offset() + byte_offset_of(ZThreadLocalData, _load_bad_mask); + } + + static ByteSize mark_bad_mask_offset() { + return Thread::gc_data_offset() + byte_offset_of(ZThreadLocalData, _mark_bad_mask); + } + + static ByteSize store_bad_mask_offset() { + return Thread::gc_data_offset() + byte_offset_of(ZThreadLocalData, _store_bad_mask); + } + + static ByteSize store_good_mask_offset() { + return Thread::gc_data_offset() + byte_offset_of(ZThreadLocalData, _store_good_mask); } static ByteSize nmethod_disarmed_offset() { - return address_bad_mask_offset() + in_ByteSize(ZAddressBadMaskHighOrderBitsOffset); + return Thread::gc_data_offset() + byte_offset_of(ZThreadLocalData, _nmethod_disarmed); + } + + static ByteSize store_barrier_buffer_offset() { + return Thread::gc_data_offset() + byte_offset_of(ZThreadLocalData, _store_barrier_buffer); } }; diff --git a/src/hotspot/share/gc/z/zTracer.cpp b/src/hotspot/share/gc/z/zTracer.cpp index 5593462921199..bcd1ee8e350b0 100644 --- a/src/hotspot/share/gc/z/zTracer.cpp +++ b/src/hotspot/share/gc/z/zTracer.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -23,7 +23,9 @@ #include "precompiled.hpp" #include "gc/shared/gcId.hpp" +#include "gc/z/zGeneration.inline.hpp" #include "gc/z/zGlobals.hpp" +#include "gc/z/zPageType.hpp" #include "gc/z/zStat.hpp" #include "gc/z/zTracer.hpp" #include "jfr/jfrEvents.hpp" @@ -40,11 +42,11 @@ class ZPageTypeConstant : public JfrSerializer { public: virtual void serialize(JfrCheckpointWriter& writer) { writer.write_count(3); - writer.write_key(ZPageTypeSmall); + writer.write_key((u8)ZPageType::small); writer.write("Small"); - writer.write_key(ZPageTypeMedium); + writer.write_key((u8)ZPageType::medium); writer.write("Medium"); - writer.write_key(ZPageTypeLarge); + writer.write_key((u8)ZPageType::large); writer.write("Large"); } }; @@ -53,7 +55,7 @@ class ZStatisticsCounterTypeConstant : public JfrSerializer { public: virtual void serialize(JfrCheckpointWriter& writer) { writer.write_count(ZStatCounter::count()); - for (ZStatCounter* counter = ZStatCounter::first(); counter != NULL; counter = counter->next()) { + for (ZStatCounter* counter = ZStatCounter::first(); counter != nullptr; counter = counter->next()) { writer.write_key(counter->id()); writer.write(counter->name()); } @@ -64,7 +66,7 @@ class ZStatisticsSamplerTypeConstant : public JfrSerializer { public: virtual void serialize(JfrCheckpointWriter& writer) { writer.write_count(ZStatSampler::count()); - for (ZStatSampler* sampler = ZStatSampler::first(); sampler != NULL; sampler = sampler->next()) { + for (ZStatSampler* sampler = ZStatSampler::first(); sampler != nullptr; sampler = sampler->next()) { writer.write_key(sampler->id()); writer.write(sampler->name()); } @@ -85,14 +87,39 @@ static void register_jfr_type_serializers() { #endif // INCLUDE_JFR -ZTracer* ZTracer::_tracer = NULL; +ZMinorTracer::ZMinorTracer() : + GCTracer(ZMinor) { +} + +ZMajorTracer::ZMajorTracer() : + GCTracer(ZMajor) {} + +void ZGenerationTracer::report_start(const Ticks& timestamp) { + _start = timestamp; +} + +void ZYoungTracer::report_end(const Ticks& timestamp) { + NoSafepointVerifier nsv; -ZTracer::ZTracer() : - GCTracer(Z) {} + EventZYoungGarbageCollection e(UNTIMED); + e.set_gcId(GCId::current()); + e.set_tenuringThreshold(ZGeneration::young()->tenuring_threshold()); + e.set_starttime(_start); + e.set_endtime(timestamp); + e.commit(); +} + +void ZOldTracer::report_end(const Ticks& timestamp) { + NoSafepointVerifier nsv; + + EventZOldGarbageCollection e(UNTIMED); + e.set_gcId(GCId::current()); + e.set_starttime(_start); + e.set_endtime(timestamp); + e.commit(); +} void ZTracer::initialize() { - assert(_tracer == NULL, "Already initialized"); - _tracer = new ZTracer(); JFR_ONLY(register_jfr_type_serializers()); } diff --git a/src/hotspot/share/gc/z/zTracer.hpp b/src/hotspot/share/gc/z/zTracer.hpp index fbf1a9346fc8e..703257324162b 100644 --- a/src/hotspot/share/gc/z/zTracer.hpp +++ b/src/hotspot/share/gc/z/zTracer.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,30 +25,58 @@ #define SHARE_GC_Z_ZTRACER_HPP #include "gc/shared/gcTrace.hpp" +#include "gc/z/zGenerationId.hpp" class ZStatCounter; class ZStatPhase; class ZStatSampler; -class ZTracer : public GCTracer, public CHeapObj { +class ZTracer : AllStatic { private: - static ZTracer* _tracer; + static void send_stat_counter(const ZStatCounter& counter, uint64_t increment, uint64_t value); + static void send_stat_sampler(const ZStatSampler& sampler, uint64_t value); + static void send_thread_phase(const char* name, const Ticks& start, const Ticks& end); + static void send_thread_debug(const char* name, const Ticks& start, const Ticks& end); - ZTracer(); +public: + static void initialize(); - void send_stat_counter(const ZStatCounter& counter, uint64_t increment, uint64_t value); - void send_stat_sampler(const ZStatSampler& sampler, uint64_t value); - void send_thread_phase(const char* name, const Ticks& start, const Ticks& end); - void send_thread_debug(const char* name, const Ticks& start, const Ticks& end); + static void report_stat_counter(const ZStatCounter& counter, uint64_t increment, uint64_t value); + static void report_stat_sampler(const ZStatSampler& sampler, uint64_t value); + static void report_thread_phase(const char* name, const Ticks& start, const Ticks& end); + static void report_thread_debug(const char* name, const Ticks& start, const Ticks& end); +}; +class ZMinorTracer : public GCTracer { public: - static ZTracer* tracer(); - static void initialize(); + ZMinorTracer(); +}; + +class ZMajorTracer : public GCTracer { +public: + ZMajorTracer(); +}; + +class ZGenerationTracer { +protected: + Ticks _start; - void report_stat_counter(const ZStatCounter& counter, uint64_t increment, uint64_t value); - void report_stat_sampler(const ZStatSampler& sampler, uint64_t value); - void report_thread_phase(const char* name, const Ticks& start, const Ticks& end); - void report_thread_debug(const char* name, const Ticks& start, const Ticks& end); +public: + ZGenerationTracer() : + _start() {} + + void report_start(const Ticks& timestamp); + virtual void report_end(const Ticks& timestamp) = 0; +}; + +class ZYoungTracer : public ZGenerationTracer { +public: + void report_end(const Ticks& timestamp) override; +}; + +class ZOldTracer : public ZGenerationTracer { +public: + void report_end(const Ticks& timestamp) override; }; // For temporary latency measurements during development and debugging diff --git a/src/hotspot/share/gc/z/zTracer.inline.hpp b/src/hotspot/share/gc/z/zTracer.inline.hpp index cfaf7b43f0c46..83a0fe471944c 100644 --- a/src/hotspot/share/gc/z/zTracer.inline.hpp +++ b/src/hotspot/share/gc/z/zTracer.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,10 +28,6 @@ #include "jfr/jfrEvents.hpp" -inline ZTracer* ZTracer::tracer() { - return _tracer; -} - inline void ZTracer::report_stat_counter(const ZStatCounter& counter, uint64_t increment, uint64_t value) { if (EventZStatisticsCounter::is_enabled()) { send_stat_counter(counter, increment, value); @@ -61,7 +57,7 @@ inline ZTraceThreadDebug::ZTraceThreadDebug(const char* name) : _name(name) {} inline ZTraceThreadDebug::~ZTraceThreadDebug() { - ZTracer::tracer()->report_thread_debug(_name, _start, Ticks::now()); + ZTracer::report_thread_debug(_name, _start, Ticks::now()); } #endif // SHARE_GC_Z_ZTRACER_INLINE_HPP diff --git a/src/hotspot/share/gc/z/zUncoloredRoot.cpp b/src/hotspot/share/gc/z/zUncoloredRoot.cpp new file mode 100644 index 0000000000000..505e10628d7f8 --- /dev/null +++ b/src/hotspot/share/gc/z/zUncoloredRoot.cpp @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/z/zUncoloredRoot.inline.hpp" + +void ZUncoloredRootClosure::do_oop(oop* p) { + do_root(ZUncoloredRoot::cast(p)); +} + +void ZUncoloredRootClosure::do_oop(narrowOop* p) { + ShouldNotReachHere(); +} diff --git a/src/hotspot/share/gc/z/zUncoloredRoot.hpp b/src/hotspot/share/gc/z/zUncoloredRoot.hpp new file mode 100644 index 0000000000000..e980bd70eb2db --- /dev/null +++ b/src/hotspot/share/gc/z/zUncoloredRoot.hpp @@ -0,0 +1,145 @@ +/* + * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_Z_ZUNCOLOREDROOT_HPP +#define SHARE_GC_Z_ZUNCOLOREDROOT_HPP + +#include "gc/z/zAddress.hpp" +#include "memory/allStatic.hpp" +#include "memory/iterator.hpp" +#include "oops/oopsHierarchy.hpp" + +// ZGC has two types of oops: +// +// Colored oops (zpointer) +// Metadata explicitly encoded in the pointer bits. +// Requires normal GC barriers to use. +// - OopStorage oops. +// +// Uncolored oops (zaddress, zaddress_unsafe) +// Metadata is either implicit or stored elsewhere +// Requires specialized GC barriers +// - nmethod oops - nmethod entry barriers +// - Thread oops - stack watermark barriers +// +// Even though the uncolored roots lack the color/metadata, ZGC still needs +// that information when processing the roots. Therefore, we store the color +// in the "container" object where the oop is located, and use specialized +// GC barriers, which accepts the external color as an extra argument. These +// roots are handled in this file. +// +// The zaddress_unsafe type is used to hold uncolored oops that the GC needs +// to process before it is safe to use. E.g. the original object might have +// been relocated and the address needs to be updated. The zaddress type +// denotes that this pointer refers the the correct address of the object. + +class ZUncoloredRoot : public AllStatic { +private: + template + static void barrier(ObjectFunctionT function, zaddress_unsafe* p, uintptr_t color); + + static zaddress make_load_good(zaddress_unsafe addr, uintptr_t color); + +public: + // Operations to be used on oops that are known to be load good + static void mark_object(zaddress addr); + static void mark_invisible_object(zaddress addr); + static void keep_alive_object(zaddress addr); + static void mark_young_object(zaddress addr); + + // Operations on roots, with an externally provided color + static void mark(zaddress_unsafe* p, uintptr_t color); + static void mark_young(zaddress_unsafe* p, uintptr_t color); + static void process(zaddress_unsafe* p, uintptr_t color); + static void process_invisible(zaddress_unsafe* p, uintptr_t color); + static void process_weak(zaddress_unsafe* p, uintptr_t color); + static void process_no_keepalive(zaddress_unsafe* p, uintptr_t color); + + // Cast needed when ZGC interfaces with the rest of the JVM, + // which is agnostic to ZGC's oop type system. + static zaddress_unsafe* cast(oop* p); + + typedef void (*RootFunction)(zaddress_unsafe*, uintptr_t); + typedef void (*ObjectFunction)(zaddress); +}; + +class ZUncoloredRootClosure : public OopClosure { +private: + void do_oop(oop* p) final; + void do_oop(narrowOop* p) final; + +public: + virtual void do_root(zaddress_unsafe* p) = 0; +}; + +class ZUncoloredRootMarkOopClosure : public ZUncoloredRootClosure { +private: + const uintptr_t _color; + +public: + ZUncoloredRootMarkOopClosure(uintptr_t color); + + virtual void do_root(zaddress_unsafe* p); +}; + +class ZUncoloredRootMarkYoungOopClosure : public ZUncoloredRootClosure { +private: + const uintptr_t _color; + +public: + ZUncoloredRootMarkYoungOopClosure(uintptr_t color); + + virtual void do_root(zaddress_unsafe* p); +}; + +class ZUncoloredRootProcessOopClosure : public ZUncoloredRootClosure { +private: + const uintptr_t _color; + +public: + ZUncoloredRootProcessOopClosure(uintptr_t color); + + virtual void do_root(zaddress_unsafe* p); +}; + +class ZUncoloredRootProcessWeakOopClosure : public ZUncoloredRootClosure { +private: + const uintptr_t _color; + +public: + ZUncoloredRootProcessWeakOopClosure(uintptr_t color); + + virtual void do_root(zaddress_unsafe* p); +}; + +class ZUncoloredRootProcessNoKeepaliveOopClosure : public ZUncoloredRootClosure { +private: + const uintptr_t _color; + +public: + ZUncoloredRootProcessNoKeepaliveOopClosure(uintptr_t color); + + virtual void do_root(zaddress_unsafe* p); +}; + +#endif // SHARE_GC_Z_ZUNCOLOREDROOT_HPP diff --git a/src/hotspot/share/gc/z/zUncoloredRoot.inline.hpp b/src/hotspot/share/gc/z/zUncoloredRoot.inline.hpp new file mode 100644 index 0000000000000..a9885f94f19c9 --- /dev/null +++ b/src/hotspot/share/gc/z/zUncoloredRoot.inline.hpp @@ -0,0 +1,153 @@ +/* + * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_Z_ZUNCOLOREDROOT_INLINE_HPP +#define SHARE_GC_Z_ZUNCOLOREDROOT_INLINE_HPP + +#include "gc/z/zUncoloredRoot.hpp" + +#include "gc/z/zAddress.inline.hpp" +#include "gc/z/zBarrier.inline.hpp" +#include "gc/z/zHeap.inline.hpp" +#include "oops/oop.hpp" + +template +inline void ZUncoloredRoot::barrier(ObjectFunctionT function, zaddress_unsafe* p, uintptr_t color) { + z_assert_is_barrier_safe(); + + const zaddress_unsafe addr = Atomic::load(p); + assert_is_valid(addr); + + // Nothing to do for nulls + if (is_null(addr)) { + return; + } + + // Make load good + const zaddress load_good_addr = make_load_good(addr, color); + + // Apply function + function(load_good_addr); + + // Non-atomic healing helps speed up root scanning. This is safe to do + // since we are always healing roots in a safepoint, or under a lock, + // which ensures we are never racing with mutators modifying roots while + // we are healing them. It's also safe in case multiple GC threads try + // to heal the same root if it is aligned, since they would always heal + // the root in the same way and it does not matter in which order it + // happens. For misaligned oops, there needs to be mutual exclusion. + *(zaddress*)p = load_good_addr; +} + +inline zaddress ZUncoloredRoot::make_load_good(zaddress_unsafe addr, uintptr_t color) { + const zpointer color_ptr = ZAddress::color(zaddress::null, color); + if (!ZPointer::is_load_good(color_ptr)) { + return ZBarrier::relocate_or_remap(addr, ZBarrier::remap_generation(color_ptr)); + } else { + return safe(addr); + } +} + +inline void ZUncoloredRoot::mark_object(zaddress addr) { + ZBarrier::mark(addr); +} + +inline void ZUncoloredRoot::mark_young_object(zaddress addr) { + ZBarrier::mark_if_young(addr); +} + +inline void ZUncoloredRoot::mark_invisible_object(zaddress addr) { + ZBarrier::mark(addr); +} + +inline void ZUncoloredRoot::keep_alive_object(zaddress addr) { + ZBarrier::mark(addr); +} + +inline void ZUncoloredRoot::mark(zaddress_unsafe* p, uintptr_t color) { + barrier(mark_object, p, color); +} + +inline void ZUncoloredRoot::mark_young(zaddress_unsafe* p, uintptr_t color) { + barrier(mark_young_object, p, color); +} + +inline void ZUncoloredRoot::process(zaddress_unsafe* p, uintptr_t color) { + barrier(mark_object, p, color); +} + +inline void ZUncoloredRoot::process_invisible(zaddress_unsafe* p, uintptr_t color) { + barrier(mark_invisible_object, p, color); +} + +inline void ZUncoloredRoot::process_weak(zaddress_unsafe* p, uintptr_t color) { + barrier(keep_alive_object, p, color); +} + +inline void ZUncoloredRoot::process_no_keepalive(zaddress_unsafe* p, uintptr_t color) { + auto do_nothing = [](zaddress) -> void {}; + barrier(do_nothing, p, color); +} + +inline zaddress_unsafe* ZUncoloredRoot::cast(oop* p) { + zaddress_unsafe* const root = (zaddress_unsafe*)p; + DEBUG_ONLY(assert_is_valid(*root);) + return root; +} + +inline ZUncoloredRootMarkOopClosure::ZUncoloredRootMarkOopClosure(uintptr_t color) : + _color(color) {} + +inline void ZUncoloredRootMarkOopClosure::do_root(zaddress_unsafe* p) { + ZUncoloredRoot::mark(p, _color); +} + +inline ZUncoloredRootMarkYoungOopClosure::ZUncoloredRootMarkYoungOopClosure(uintptr_t color) : + _color(color) {} + +inline void ZUncoloredRootMarkYoungOopClosure::do_root(zaddress_unsafe* p) { + ZUncoloredRoot::mark_young(p, _color); +} + +inline ZUncoloredRootProcessOopClosure::ZUncoloredRootProcessOopClosure(uintptr_t color) : + _color(color) {} + +inline void ZUncoloredRootProcessOopClosure::do_root(zaddress_unsafe* p) { + ZUncoloredRoot::process(p, _color); +} + +inline ZUncoloredRootProcessWeakOopClosure::ZUncoloredRootProcessWeakOopClosure(uintptr_t color) : + _color(color) {} + +inline void ZUncoloredRootProcessWeakOopClosure::do_root(zaddress_unsafe* p) { + ZUncoloredRoot::process_weak(p, _color); +} + +inline ZUncoloredRootProcessNoKeepaliveOopClosure::ZUncoloredRootProcessNoKeepaliveOopClosure(uintptr_t color) : + _color(color) {} + +inline void ZUncoloredRootProcessNoKeepaliveOopClosure::do_root(zaddress_unsafe* p) { + ZUncoloredRoot::process_no_keepalive(p, _color); +} + +#endif // SHARE_GC_Z_ZUNCOLOREDROOT_INLINE_HPP diff --git a/src/hotspot/share/gc/z/zUncommitter.cpp b/src/hotspot/share/gc/z/zUncommitter.cpp index 27ca4f4c19313..fb8cf29194f11 100644 --- a/src/hotspot/share/gc/z/zUncommitter.cpp +++ b/src/hotspot/share/gc/z/zUncommitter.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -59,7 +59,7 @@ bool ZUncommitter::should_continue() const { return !_stop; } -void ZUncommitter::run_service() { +void ZUncommitter::run_thread() { uint64_t timeout = 0; while (wait(timeout)) { @@ -89,7 +89,7 @@ void ZUncommitter::run_service() { } } -void ZUncommitter::stop_service() { +void ZUncommitter::terminate() { ZLocker locker(&_lock); _stop = true; _lock.notify_all(); diff --git a/src/hotspot/share/gc/z/zUncommitter.hpp b/src/hotspot/share/gc/z/zUncommitter.hpp index 6cb38d9db4c10..b626df8dddfe9 100644 --- a/src/hotspot/share/gc/z/zUncommitter.hpp +++ b/src/hotspot/share/gc/z/zUncommitter.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,12 +24,12 @@ #ifndef SHARE_GC_Z_ZUNCOMMITTER_HPP #define SHARE_GC_Z_ZUNCOMMITTER_HPP -#include "gc/shared/concurrentGCThread.hpp" #include "gc/z/zLock.hpp" +#include "gc/z/zThread.hpp" -class ZPageAllocation; +class ZPageAllocator; -class ZUncommitter : public ConcurrentGCThread { +class ZUncommitter : public ZThread { private: ZPageAllocator* const _page_allocator; mutable ZConditionLock _lock; @@ -39,8 +39,8 @@ class ZUncommitter : public ConcurrentGCThread { bool should_continue() const; protected: - virtual void run_service(); - virtual void stop_service(); + virtual void run_thread(); + virtual void terminate(); public: ZUncommitter(ZPageAllocator* page_allocator); diff --git a/src/hotspot/share/gc/z/zUnload.cpp b/src/hotspot/share/gc/z/zUnload.cpp index 0378466324e8a..d601916ed6dde 100644 --- a/src/hotspot/share/gc/z/zUnload.cpp +++ b/src/hotspot/share/gc/z/zUnload.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,36 +30,35 @@ #include "gc/shared/gcBehaviours.hpp" #include "gc/shared/suspendibleThreadSet.hpp" #include "gc/z/zBarrier.inline.hpp" +#include "gc/z/zBarrierSetNMethod.hpp" +#include "gc/z/zGeneration.inline.hpp" #include "gc/z/zLock.inline.hpp" #include "gc/z/zNMethod.hpp" #include "gc/z/zStat.hpp" +#include "gc/z/zUncoloredRoot.inline.hpp" #include "gc/z/zUnload.hpp" #include "memory/metaspaceUtils.hpp" #include "oops/access.inline.hpp" -static const ZStatSubPhase ZSubPhaseConcurrentClassesUnlink("Concurrent Classes Unlink"); -static const ZStatSubPhase ZSubPhaseConcurrentClassesPurge("Concurrent Classes Purge"); - -class ZPhantomIsAliveObjectClosure : public BoolObjectClosure { -public: - virtual bool do_object_b(oop o) { - return ZBarrier::is_alive_barrier_on_phantom_oop(o); - } -}; +static const ZStatSubPhase ZSubPhaseConcurrentClassesUnlink("Concurrent Classes Unlink", ZGenerationId::old); +static const ZStatSubPhase ZSubPhaseConcurrentClassesPurge("Concurrent Classes Purge", ZGenerationId::old); class ZIsUnloadingOopClosure : public OopClosure { private: - ZPhantomIsAliveObjectClosure _is_alive; - bool _is_unloading; + const uintptr_t _color; + bool _is_unloading; public: - ZIsUnloadingOopClosure() : - _is_alive(), + ZIsUnloadingOopClosure(nmethod* nm) : + _color(ZNMethod::color(nm)), _is_unloading(false) {} virtual void do_oop(oop* p) { - const oop o = RawAccess<>::oop_load(p); - if (o != NULL && !_is_alive.do_object_b(o)) { + // Create local, aligned root + zaddress_unsafe addr = Atomic::load(ZUncoloredRoot::cast(p)); + ZUncoloredRoot::process_no_keepalive(&addr, _color); + + if (!is_null(addr) && ZHeap::heap()->is_old(safe(addr)) && !ZHeap::heap()->is_object_live(safe(addr))) { _is_unloading = true; } } @@ -79,7 +78,11 @@ class ZIsUnloadingBehaviour : public IsUnloadingBehaviour { nmethod* const nm = method->as_nmethod(); ZReentrantLock* const lock = ZNMethod::lock_for_nmethod(nm); ZLocker locker(lock); - ZIsUnloadingOopClosure cl; + if (!ZNMethod::is_armed(nm)) { + // Disarmed nmethods are alive + return false; + } + ZIsUnloadingOopClosure cl(nm); ZNMethod::nmethod_oops_do_inner(nm, &cl); return cl.is_unloading(); } @@ -139,13 +142,13 @@ void ZUnload::unlink() { return; } - ZStatTimer timer(ZSubPhaseConcurrentClassesUnlink); - SuspendibleThreadSetJoiner sts; + ZStatTimerOld timer(ZSubPhaseConcurrentClassesUnlink); + SuspendibleThreadSetJoiner sts_joiner; bool unloading_occurred; { MutexLocker ml(ClassLoaderDataGraph_lock); - unloading_occurred = SystemDictionary::do_unloading(ZStatPhase::timer()); + unloading_occurred = SystemDictionary::do_unloading(ZGeneration::old()->gc_timer()); } Klass::clean_weak_klass_links(unloading_occurred); @@ -158,10 +161,10 @@ void ZUnload::purge() { return; } - ZStatTimer timer(ZSubPhaseConcurrentClassesPurge); + ZStatTimerOld timer(ZSubPhaseConcurrentClassesPurge); { - SuspendibleThreadSetJoiner sts; + SuspendibleThreadSetJoiner sts_joiner; ZNMethod::purge(); } diff --git a/src/hotspot/share/gc/z/zUnmapper.cpp b/src/hotspot/share/gc/z/zUnmapper.cpp index 1997449fd5d28..95ea87f01f91d 100644 --- a/src/hotspot/share/gc/z/zUnmapper.cpp +++ b/src/hotspot/share/gc/z/zUnmapper.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -45,11 +45,11 @@ ZPage* ZUnmapper::dequeue() { for (;;) { if (_stop) { - return NULL; + return nullptr; } ZPage* const page = _queue.remove_first(); - if (page != NULL) { + if (page != nullptr) { return page; } @@ -70,22 +70,16 @@ void ZUnmapper::do_unmap_and_destroy_page(ZPage* page) const { } void ZUnmapper::unmap_and_destroy_page(ZPage* page) { - // Asynchronous unmap and destroy is not supported with ZVerifyViews - if (ZVerifyViews) { - // Immediately unmap and destroy - do_unmap_and_destroy_page(page); - } else { - // Enqueue for asynchronous unmap and destroy - ZLocker locker(&_lock); - _queue.insert_last(page); - _lock.notify_all(); - } + // Enqueue for asynchronous unmap and destroy + ZLocker locker(&_lock); + _queue.insert_last(page); + _lock.notify_all(); } -void ZUnmapper::run_service() { +void ZUnmapper::run_thread() { for (;;) { ZPage* const page = dequeue(); - if (page == NULL) { + if (page == nullptr) { // Stop return; } @@ -94,7 +88,7 @@ void ZUnmapper::run_service() { } } -void ZUnmapper::stop_service() { +void ZUnmapper::terminate() { ZLocker locker(&_lock); _stop = true; _lock.notify_all(); diff --git a/src/hotspot/share/gc/z/zUnmapper.hpp b/src/hotspot/share/gc/z/zUnmapper.hpp index fd26394875958..23d384e797073 100644 --- a/src/hotspot/share/gc/z/zUnmapper.hpp +++ b/src/hotspot/share/gc/z/zUnmapper.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,14 +24,14 @@ #ifndef SHARE_GC_Z_ZUNMAPPER_HPP #define SHARE_GC_Z_ZUNMAPPER_HPP -#include "gc/shared/concurrentGCThread.hpp" #include "gc/z/zList.hpp" #include "gc/z/zLock.hpp" +#include "gc/z/zThread.hpp" class ZPage; class ZPageAllocator; -class ZUnmapper : public ConcurrentGCThread { +class ZUnmapper : public ZThread { private: ZPageAllocator* const _page_allocator; ZConditionLock _lock; @@ -42,8 +42,8 @@ class ZUnmapper : public ConcurrentGCThread { void do_unmap_and_destroy_page(ZPage* page) const; protected: - virtual void run_service(); - virtual void stop_service(); + virtual void run_thread(); + virtual void terminate(); public: ZUnmapper(ZPageAllocator* page_allocator); diff --git a/src/hotspot/share/gc/z/zUtils.cpp b/src/hotspot/share/gc/z/zUtils.cpp new file mode 100644 index 0000000000000..3804baad595d5 --- /dev/null +++ b/src/hotspot/share/gc/z/zUtils.cpp @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/z/zUtils.hpp" +#include "runtime/nonJavaThread.hpp" + +#include + +const char* ZUtils::thread_name() { + const Thread* const thread = Thread::current(); + if (thread->is_Named_thread()) { + const NamedThread* const named = (const NamedThread*)thread; + return named->name(); + } + + return thread->type_name(); +} + +void ZUtils::fill(uintptr_t* addr, size_t count, uintptr_t value) { + std::fill_n(addr, count, value); +} diff --git a/src/hotspot/share/gc/z/zUtils.hpp b/src/hotspot/share/gc/z/zUtils.hpp index 470329daf0d39..f82ef06235c82 100644 --- a/src/hotspot/share/gc/z/zUtils.hpp +++ b/src/hotspot/share/gc/z/zUtils.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,11 +24,15 @@ #ifndef SHARE_GC_Z_ZUTILS_HPP #define SHARE_GC_Z_ZUTILS_HPP +#include "gc/z/zAddress.hpp" #include "memory/allStatic.hpp" #include "utilities/globalDefinitions.hpp" class ZUtils : public AllStatic { public: + // Thread + static const char* thread_name(); + // Allocation static uintptr_t alloc_aligned(size_t alignment, size_t size); @@ -37,9 +41,12 @@ class ZUtils : public AllStatic { static size_t words_to_bytes(size_t size_in_words); // Object - static size_t object_size(uintptr_t addr); - static void object_copy_disjoint(uintptr_t from, uintptr_t to, size_t size); - static void object_copy_conjoint(uintptr_t from, uintptr_t to, size_t size); + static size_t object_size(zaddress addr); + static void object_copy_disjoint(zaddress from, zaddress to, size_t size); + static void object_copy_conjoint(zaddress from, zaddress to, size_t size); + + // Memory + static void fill(uintptr_t* addr, size_t count, uintptr_t value); }; #endif // SHARE_GC_Z_ZUTILS_HPP diff --git a/src/hotspot/share/gc/z/zUtils.inline.hpp b/src/hotspot/share/gc/z/zUtils.inline.hpp index 17fc8a69bc77b..eda9fca9398a7 100644 --- a/src/hotspot/share/gc/z/zUtils.inline.hpp +++ b/src/hotspot/share/gc/z/zUtils.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,7 +26,7 @@ #include "gc/z/zUtils.hpp" -#include "gc/z/zOop.inline.hpp" +#include "gc/z/zAddress.inline.hpp" #include "oops/oop.inline.hpp" #include "utilities/align.hpp" #include "utilities/copy.hpp" @@ -42,17 +42,17 @@ inline size_t ZUtils::words_to_bytes(size_t size_in_words) { return size_in_words << LogBytesPerWord; } -inline size_t ZUtils::object_size(uintptr_t addr) { - return words_to_bytes(ZOop::from_address(addr)->size()); +inline size_t ZUtils::object_size(zaddress addr) { + return words_to_bytes(to_oop(addr)->size()); } -inline void ZUtils::object_copy_disjoint(uintptr_t from, uintptr_t to, size_t size) { - Copy::aligned_disjoint_words((HeapWord*)from, (HeapWord*)to, bytes_to_words(size)); +inline void ZUtils::object_copy_disjoint(zaddress from, zaddress to, size_t size) { + Copy::aligned_disjoint_words((HeapWord*)untype(from), (HeapWord*)untype(to), bytes_to_words(size)); } -inline void ZUtils::object_copy_conjoint(uintptr_t from, uintptr_t to, size_t size) { +inline void ZUtils::object_copy_conjoint(zaddress from, zaddress to, size_t size) { if (from != to) { - Copy::aligned_conjoint_words((HeapWord*)from, (HeapWord*)to, bytes_to_words(size)); + Copy::aligned_conjoint_words((HeapWord*)untype(from), (HeapWord*)untype(to), bytes_to_words(size)); } } diff --git a/src/hotspot/share/gc/z/zValue.hpp b/src/hotspot/share/gc/z/zValue.hpp index e2c67e8c48d0f..29f1b707e7c6b 100644 --- a/src/hotspot/share/gc/z/zValue.hpp +++ b/src/hotspot/share/gc/z/zValue.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,6 +24,7 @@ #ifndef SHARE_GC_Z_ZVALUE_HPP #define SHARE_GC_Z_ZVALUE_HPP +#include "memory/allocation.hpp" #include "memory/allStatic.hpp" #include "utilities/globalDefinitions.hpp" diff --git a/src/hotspot/share/gc/z/zValue.inline.hpp b/src/hotspot/share/gc/z/zValue.inline.hpp index ec574b8a09f2b..a5c905484e7e2 100644 --- a/src/hotspot/share/gc/z/zValue.inline.hpp +++ b/src/hotspot/share/gc/z/zValue.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,10 +27,10 @@ #include "gc/z/zValue.hpp" #include "gc/shared/gc_globals.hpp" +#include "gc/shared/workerThread.hpp" #include "gc/z/zCPU.inline.hpp" #include "gc/z/zGlobals.hpp" #include "gc/z/zNUMA.hpp" -#include "gc/z/zThread.inline.hpp" #include "gc/z/zUtils.hpp" #include "runtime/globals.hpp" #include "utilities/align.hpp" @@ -106,11 +106,11 @@ inline size_t ZPerWorkerStorage::alignment() { } inline uint32_t ZPerWorkerStorage::count() { - return UseDynamicNumberOfGCThreads ? ConcGCThreads : MAX2(ConcGCThreads, ParallelGCThreads); + return ConcGCThreads; } inline uint32_t ZPerWorkerStorage::id() { - return ZThread::worker_id(); + return WorkerThread::worker_id(); } // diff --git a/src/hotspot/share/gc/z/zVerify.cpp b/src/hotspot/share/gc/z/zVerify.cpp index 689b0ded69167..5742d46cb591e 100644 --- a/src/hotspot/share/gc/z/zVerify.cpp +++ b/src/hotspot/share/gc/z/zVerify.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -21,17 +21,20 @@ * questions. */ +#include "memory/allocation.hpp" #include "precompiled.hpp" #include "classfile/classLoaderData.hpp" #include "gc/shared/gc_globals.hpp" +#include "gc/shared/isGCActiveMark.hpp" #include "gc/z/zAddress.inline.hpp" +#include "gc/z/zGenerationId.hpp" #include "gc/z/zHeap.inline.hpp" #include "gc/z/zNMethod.hpp" -#include "gc/z/zOop.hpp" #include "gc/z/zPageAllocator.hpp" #include "gc/z/zResurrection.hpp" #include "gc/z/zRootsIterator.hpp" #include "gc/z/zStackWatermark.hpp" +#include "gc/z/zStoreBarrierBuffer.inline.hpp" #include "gc/z/zStat.hpp" #include "gc/z/zVerify.hpp" #include "memory/iterator.inline.hpp" @@ -48,151 +51,199 @@ #include "utilities/debug.hpp" #include "utilities/globalDefinitions.hpp" #include "utilities/preserveException.hpp" +#include "utilities/resourceHash.hpp" -#define BAD_OOP_ARG(o, p) "Bad oop " PTR_FORMAT " found at " PTR_FORMAT, p2i(o), p2i(p) +#define BAD_OOP_ARG(o, p) "Bad oop " PTR_FORMAT " found at " PTR_FORMAT, untype(o), p2i(p) -static void z_verify_oop(oop* p) { - const oop o = RawAccess<>::oop_load(p); - if (o != NULL) { - const uintptr_t addr = ZOop::to_address(o); - guarantee(ZAddress::is_good(addr), BAD_OOP_ARG(o, p)); - guarantee(oopDesc::is_oop(ZOop::from_address(addr)), BAD_OOP_ARG(o, p)); +static bool z_is_null_relaxed(zpointer o) { + const uintptr_t color_mask = ZPointerAllMetadataMask | ZPointerReservedMask; + return (untype(o) & ~color_mask) == 0; +} + +static void z_verify_old_oop(zpointer* p) { + const zpointer o = *p; + assert(o != zpointer::null, "Old should not contain raw null"); + if (!z_is_null_relaxed(o)) { + if (ZPointer::is_mark_good(o)) { + // Even though the pointer is mark good, we can't verify that it should + // be in the remembered set in old mark end. We have to wait to the verify + // safepoint after reference processing, where we hold the driver lock and + // know there is no concurrent remembered set processing in the young generation. + const zaddress addr = ZPointer::uncolor(o); + guarantee(oopDesc::is_oop(to_oop(addr)), BAD_OOP_ARG(o, p)); + } else { + const zaddress addr = ZBarrier::load_barrier_on_oop_field_preloaded(nullptr, o); + // Old to young pointers might not be mark good if the young + // marking has not finished, which is responsible for coloring + // these pointers. + if (ZHeap::heap()->is_old(addr) || !ZGeneration::young()->is_phase_mark()) { + // Old to old pointers are allowed to have bad young bits + guarantee(ZPointer::is_marked_old(o), BAD_OOP_ARG(o, p)); + guarantee(ZHeap::heap()->is_old(p), BAD_OOP_ARG(o, p)); + } + } + } +} + +static void z_verify_young_oop(zpointer* p) { + const zpointer o = *p; + if (!z_is_null_relaxed(o)) { + guarantee(ZHeap::heap()->is_young(p), BAD_OOP_ARG(o, p)); + guarantee(ZPointer::is_marked_young(o), BAD_OOP_ARG(o, p)); + + if (ZPointer::is_load_good(o)) { + guarantee(oopDesc::is_oop(to_oop(ZPointer::uncolor(o))), BAD_OOP_ARG(o, p)); + } } } -static void z_verify_possibly_weak_oop(oop* p) { - const oop o = RawAccess<>::oop_load(p); - if (o != NULL) { - const uintptr_t addr = ZOop::to_address(o); - guarantee(ZAddress::is_good(addr) || ZAddress::is_finalizable_good(addr), BAD_OOP_ARG(o, p)); - guarantee(oopDesc::is_oop(ZOop::from_address(ZAddress::good(addr))), BAD_OOP_ARG(o, p)); +static void z_verify_root_oop_object(zaddress o, void* p) { + guarantee(oopDesc::is_oop(to_oop(o)), BAD_OOP_ARG(o, p)); +} + +static void z_verify_uncolored_root_oop(zaddress* p) { + assert(!ZHeap::heap()->is_in((uintptr_t)p), "Roots shouldn't be in heap"); + const zaddress o = *p; + if (!is_null(o)) { + z_verify_root_oop_object(o, p); } } -class ZVerifyRootClosure : public OopClosure { +static void z_verify_possibly_weak_oop(zpointer* p) { + const zpointer o = *p; + if (!z_is_null_relaxed(o)) { + guarantee(ZPointer::is_marked_old(o) || ZPointer::is_marked_finalizable(o), BAD_OOP_ARG(o, p)); + + const zaddress addr = ZBarrier::load_barrier_on_oop_field_preloaded(nullptr, o); + guarantee(ZHeap::heap()->is_old(addr) || ZPointer::is_marked_young(o), BAD_OOP_ARG(o, p)); + guarantee(ZHeap::heap()->is_young(addr) || ZHeap::heap()->is_object_live(addr), BAD_OOP_ARG(o, p)); + guarantee(oopDesc::is_oop(to_oop(addr)), BAD_OOP_ARG(o, p)); + + // Verify no missing remset entries. We are holding the driver lock here and that + // allows us to more precisely verify the remembered set, as there is no concurrent + // young generation collection going on at this point. + const uintptr_t remset_bits = untype(o) & ZPointerRememberedMask; + const uintptr_t prev_remembered = ZPointerRemembered ^ ZPointerRememberedMask; + guarantee(remset_bits != prev_remembered, BAD_OOP_ARG(o, p)); + guarantee(remset_bits == ZPointerRememberedMask || + ZGeneration::young()->is_remembered(p) || + ZStoreBarrierBuffer::is_in(p), BAD_OOP_ARG(o, p)); + } +} + +class ZVerifyColoredRootClosure : public OopClosure { private: - const bool _verify_fixed; + const bool _verify_marked_old; public: - ZVerifyRootClosure(bool verify_fixed) : - _verify_fixed(verify_fixed) {} + ZVerifyColoredRootClosure(bool verify_marked_old) : + OopClosure(), + _verify_marked_old(verify_marked_old) {} + + virtual void do_oop(oop* p_) { + zpointer* const p = (zpointer*)p_; + + assert(!ZHeap::heap()->is_in((uintptr_t)p), "Roots shouldn't be in heap"); + + const zpointer o = *p; + + if (z_is_null_relaxed(o)) { + // Skip verifying nulls + return; + } + + assert(is_valid(o), "Catch me!"); + + if (_verify_marked_old) { + guarantee(ZPointer::is_marked_old(o), BAD_OOP_ARG(o, p)); - virtual void do_oop(oop* p) { - if (_verify_fixed) { - z_verify_oop(p); + // Minor collections could have relocated the object; + // use load barrier to find correct object. + const zaddress addr = ZBarrier::load_barrier_on_oop_field_preloaded(nullptr, o); + z_verify_root_oop_object(addr, p); } else { - // Don't know the state of the oop. - oop obj = *p; - obj = NativeAccess::oop_load(&obj); - z_verify_oop(&obj); + // Don't know the state of the oop + if (is_valid(o)) { + // it looks like a valid colored oop; + // use load barrier to find correct object. + const zaddress addr = ZBarrier::load_barrier_on_oop_field_preloaded(nullptr, o); + z_verify_root_oop_object(addr, p); + } } } virtual void do_oop(narrowOop*) { ShouldNotReachHere(); } +}; - bool verify_fixed() const { - return _verify_fixed; +class ZVerifyUncoloredRootClosure : public OopClosure { +public: + virtual void do_oop(oop* p_) { + zaddress* const p = (zaddress*)p_; + z_verify_uncolored_root_oop(p); + } + + virtual void do_oop(narrowOop*) { + ShouldNotReachHere(); } }; class ZVerifyCodeBlobClosure : public CodeBlobToOopClosure { public: - ZVerifyCodeBlobClosure(ZVerifyRootClosure* _cl) : - CodeBlobToOopClosure(_cl, false /* fix_relocations */) {} + ZVerifyCodeBlobClosure(OopClosure* cl) : + CodeBlobToOopClosure(cl, false /* fix_relocations */) {} virtual void do_code_blob(CodeBlob* cb) { CodeBlobToOopClosure::do_code_blob(cb); } }; -class ZVerifyStack : public OopClosure { +class ZVerifyOldOopClosure : public BasicOopIterateClosure { private: - ZVerifyRootClosure* const _cl; - JavaThread* const _jt; - uint64_t _last_good; - bool _verifying_bad_frames; + const bool _verify_weaks; public: - ZVerifyStack(ZVerifyRootClosure* cl, JavaThread* jt) : - _cl(cl), - _jt(jt), - _last_good(0), - _verifying_bad_frames(false) { - ZStackWatermark* const stack_watermark = StackWatermarkSet::get(jt, StackWatermarkKind::gc); - - if (_cl->verify_fixed()) { - assert(stack_watermark->processing_started(), "Should already have been fixed"); - assert(stack_watermark->processing_completed(), "Should already have been fixed"); - } else { - // We don't really know the state of the stack, verify watermark. - if (!stack_watermark->processing_started()) { - _verifying_bad_frames = true; - } else { - // Not time yet to verify bad frames - _last_good = stack_watermark->last_processed(); - } - } - } + ZVerifyOldOopClosure(bool verify_weaks) : + _verify_weaks(verify_weaks) {} - void do_oop(oop* p) { - if (_verifying_bad_frames) { - const oop obj = *p; - guarantee(!ZAddress::is_good(ZOop::to_address(obj)), BAD_OOP_ARG(obj, p)); + virtual void do_oop(oop* p_) { + zpointer* const p = (zpointer*)p_; + if (_verify_weaks) { + z_verify_possibly_weak_oop(p); + } else { + // We should never encounter finalizable oops through strong + // paths. This assumes we have only visited strong roots. + z_verify_old_oop(p); } - _cl->do_oop(p); } - void do_oop(narrowOop* p) { + virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); } - void prepare_next_frame(frame& frame) { - if (_cl->verify_fixed()) { - // All frames need to be good - return; - } - - // The verification has two modes, depending on whether we have reached the - // last processed frame or not. Before it is reached, we expect everything to - // be good. After reaching it, we expect everything to be bad. - const uintptr_t sp = reinterpret_cast(frame.sp()); - - if (!_verifying_bad_frames && sp == _last_good) { - // Found the last good frame, now verify the bad ones - _verifying_bad_frames = true; - } - } - - void verify_frames() { - ZVerifyCodeBlobClosure cb_cl(_cl); - for (StackFrameStream frames(_jt, true /* update */, false /* process_frames */); - !frames.is_done(); - frames.next()) { - frame& frame = *frames.current(); - frame.oops_do(this, &cb_cl, frames.register_map(), DerivedPointerIterationMode::_ignore); - prepare_next_frame(frame); - } + virtual ReferenceIterationMode reference_iteration_mode() { + return _verify_weaks ? DO_FIELDS : DO_FIELDS_EXCEPT_REFERENT; } }; -class ZVerifyOopClosure : public ClaimMetadataVisitingOopIterateClosure { +class ZVerifyYoungOopClosure : public BasicOopIterateClosure { private: const bool _verify_weaks; public: - ZVerifyOopClosure(bool verify_weaks) : - ClaimMetadataVisitingOopIterateClosure(ClassLoaderData::_claim_other), + ZVerifyYoungOopClosure(bool verify_weaks) : _verify_weaks(verify_weaks) {} - virtual void do_oop(oop* p) { + virtual void do_oop(oop* p_) { + zpointer* const p = (zpointer*)p_; if (_verify_weaks) { - z_verify_possibly_weak_oop(p); + //z_verify_possibly_weak_oop(p); + z_verify_young_oop(p); } else { // We should never encounter finalizable oops through strong // paths. This assumes we have only visited strong roots. - z_verify_oop(p); + z_verify_young_oop(p); } } @@ -213,22 +264,22 @@ typedef ClaimingCLDToOopClosure ZVerifyCLDClosure; class ZVerifyThreadClosure : public ThreadClosure { private: - ZVerifyRootClosure* const _cl; + OopClosure* const _verify_cl; public: - ZVerifyThreadClosure(ZVerifyRootClosure* cl) : - _cl(cl) {} + ZVerifyThreadClosure(OopClosure* verify_cl) : + _verify_cl(verify_cl) {} virtual void do_thread(Thread* thread) { - thread->oops_do_no_frames(_cl, NULL); - JavaThread* const jt = JavaThread::cast(thread); - if (!jt->has_last_Java_frame()) { - return; - } + const ZStackWatermark* const watermark = StackWatermarkSet::get(jt, StackWatermarkKind::gc); + if (watermark->processing_started_acquire()) { + thread->oops_do_no_frames(_verify_cl, nullptr); - ZVerifyStack verify_stack(_cl, jt); - verify_stack.verify_frames(); + if (watermark->processing_completed_acquire()) { + thread->oops_do_frames(_verify_cl, nullptr); + } + } } }; @@ -236,86 +287,173 @@ class ZVerifyNMethodClosure : public NMethodClosure { private: OopClosure* const _cl; BarrierSetNMethod* const _bs_nm; - const bool _verify_fixed; - - bool trust_nmethod_state() const { - // The root iterator will visit non-processed - // nmethods class unloading is turned off. - return ClassUnloading || _verify_fixed; - } public: - ZVerifyNMethodClosure(OopClosure* cl, bool verify_fixed) : + ZVerifyNMethodClosure(OopClosure* cl) : _cl(cl), - _bs_nm(BarrierSet::barrier_set()->barrier_set_nmethod()), - _verify_fixed(verify_fixed) {} + _bs_nm(BarrierSet::barrier_set()->barrier_set_nmethod()) {} virtual void do_nmethod(nmethod* nm) { - assert(!trust_nmethod_state() || !_bs_nm->is_armed(nm), "Should not encounter any armed nmethods"); + if (_bs_nm->is_armed(nm)) { + // Can't verify + return; + } ZNMethod::nmethod_oops_do(nm, _cl); } }; -void ZVerify::roots_strong(bool verify_fixed) { +void ZVerify::roots_strong(bool verify_after_old_mark) { assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint"); - assert(!ZResurrection::is_blocked(), "Invalid phase"); - ZVerifyRootClosure cl(verify_fixed); - ZVerifyCLDClosure cld_cl(&cl); - ZVerifyThreadClosure thread_cl(&cl); - ZVerifyNMethodClosure nm_cl(&cl, verify_fixed); + { + ZVerifyColoredRootClosure cl(verify_after_old_mark); + ZVerifyCLDClosure cld_cl(&cl); + + ZRootsIteratorStrongColored roots_strong_colored(ZGenerationIdOptional::none); + roots_strong_colored.apply(&cl, + &cld_cl); + } - ZRootsIterator iter(ClassLoaderData::_claim_none); - iter.apply(&cl, - &cld_cl, - &thread_cl, - &nm_cl); + { + ZVerifyUncoloredRootClosure cl; + ZVerifyThreadClosure thread_cl(&cl); + ZVerifyNMethodClosure nm_cl(&cl); + + ZRootsIteratorStrongUncolored roots_strong_uncolored(ZGenerationIdOptional::none); + roots_strong_uncolored.apply(&thread_cl, + &nm_cl); + } } void ZVerify::roots_weak() { assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint"); assert(!ZResurrection::is_blocked(), "Invalid phase"); - ZVerifyRootClosure cl(true /* verify_fixed */); - ZWeakRootsIterator iter; - iter.apply(&cl); + ZVerifyColoredRootClosure cl(true /* verify_after_old_mark*/); + ZRootsIteratorWeakColored roots_weak_colored(ZGenerationIdOptional::none); + roots_weak_colored.apply(&cl); +} + +zaddress zverify_broken_object = zaddress::null; + +class ZVerifyObjectClosure : public ObjectClosure, public OopFieldClosure { +private: + const bool _verify_weaks; + + zaddress _visited_base; + volatile zpointer* _visited_p; + zpointer _visited_ptr_pre_loaded; + +public: + ZVerifyObjectClosure(bool verify_weaks) : + _verify_weaks(verify_weaks), + _visited_base(), + _visited_p(), + _visited_ptr_pre_loaded() {} + + void log_dead_object(zaddress addr) { + tty->print_cr("ZVerify found dead object: " PTR_FORMAT " at p: " PTR_FORMAT " ptr: " PTR_FORMAT, untype(addr), p2i((void*)_visited_p), untype(_visited_ptr_pre_loaded)); + to_oop(addr)->print(); + tty->print_cr("--- From --- "); + if (_visited_base != zaddress::null) { + to_oop(_visited_base)->print(); + } + tty->cr(); + + if (zverify_broken_object == zaddress::null) { + zverify_broken_object = addr; + } + } + + void verify_live_object(oop obj) { + // Verify that its pointers are sane + ZVerifyOldOopClosure cl(_verify_weaks); + ZIterator::oop_iterate_safe(obj, &cl); + } + + virtual void do_object(oop obj) { + guarantee(oopDesc::is_oop_or_null(obj), "Must be"); + + const zaddress addr = to_zaddress(obj); + if (ZHeap::heap()->is_old(addr)) { + if (ZHeap::heap()->is_object_live(addr)) { + verify_live_object(obj); + } else { + log_dead_object(addr); + } + } else { + // Young object - no verification + } + } + + virtual void do_field(oop base, oop* p) { + _visited_base = to_zaddress(base); + _visited_p = (volatile zpointer*)p; + _visited_ptr_pre_loaded = Atomic::load(_visited_p); + } +}; + +void ZVerify::threads_start_processing() { + class StartProcessingClosure : public ThreadClosure { + public: + void do_thread(Thread* thread) { + StackWatermarkSet::start_processing(JavaThread::cast(thread), StackWatermarkKind::gc); + } + }; + + ZJavaThreadsIterator threads_iterator(ZGenerationIdOptional::none); + StartProcessingClosure cl; + threads_iterator.apply(&cl); } void ZVerify::objects(bool verify_weaks) { + if (ZAbort::should_abort()) { + // Invariants might be a bit mushy if the young generation + // collection was forced to shut down. So let's be a bit forgiving here. + return; + } assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint"); - assert(ZGlobalPhase == ZPhaseMarkCompleted, "Invalid phase"); + assert(ZGeneration::young()->is_phase_mark_complete() || + ZGeneration::old()->is_phase_mark_complete(), "Invalid phase"); assert(!ZResurrection::is_blocked(), "Invalid phase"); - ZVerifyOopClosure cl(verify_weaks); - ObjectToOopClosure object_cl(&cl); - ZHeap::heap()->object_iterate(&object_cl, verify_weaks); + // Note that object verification will fix the pointers and + // only verify that the resulting objects are sane. + + // The verification VM_Operation doesn't start the thread processing. + // Do it here, after the roots have been verified. + threads_start_processing(); + + ZVerifyObjectClosure object_cl(verify_weaks); + ZHeap::heap()->object_and_field_iterate(&object_cl, &object_cl, verify_weaks); } void ZVerify::before_zoperation() { // Verify strong roots - ZStatTimerDisable disable; if (ZVerifyRoots) { - roots_strong(false /* verify_fixed */); + roots_strong(false /* verify_after_old_mark */); } } void ZVerify::after_mark() { // Verify all strong roots and strong references - ZStatTimerDisable disable; if (ZVerifyRoots) { - roots_strong(true /* verify_fixed */); + roots_strong(true /* verify_after_old_mark */); } if (ZVerifyObjects) { + // Workaround OopMapCacheAlloc_lock reordering with the StackWatermark_lock + DisableIsGCActiveMark mark; + objects(false /* verify_weaks */); + guarantee(zverify_broken_object == zaddress::null, "Verification failed"); } } void ZVerify::after_weak_processing() { // Verify all roots and all references - ZStatTimerDisable disable; if (ZVerifyRoots) { - roots_strong(true /* verify_fixed */); + roots_strong(true /* verify_after_old_mark */); roots_weak(); } if (ZVerifyObjects) { @@ -323,93 +461,268 @@ void ZVerify::after_weak_processing() { } } -template -class ZPageDebugMapOrUnmapClosure : public ZPageClosure { +// +// Remembered set verification +// + +typedef ResourceHashtable ZStoreBarrierBufferTable; + +static ZStoreBarrierBufferTable* z_verify_store_barrier_buffer_table = nullptr; + +#define BAD_REMSET_ARG(p, ptr, addr) \ + "Missing remembered set at " PTR_FORMAT " pointing at " PTR_FORMAT \ + " (" PTR_FORMAT " + " INTX_FORMAT ")" \ + , p2i(p), untype(ptr), untype(addr), p2i(p) - untype(addr) + +class ZVerifyRemsetBeforeOopClosure : public BasicOopIterateClosure { private: - const ZPageAllocator* const _allocator; + ZForwarding* _forwarding; + zaddress_unsafe _from_addr; public: - ZPageDebugMapOrUnmapClosure(const ZPageAllocator* allocator) : - _allocator(allocator) {} + ZVerifyRemsetBeforeOopClosure(ZForwarding* forwarding) : + _forwarding(forwarding), + _from_addr(zaddress_unsafe::null) {} - void do_page(const ZPage* page) { - if (Map) { - _allocator->debug_map_page(page); + void set_from_addr(zaddress_unsafe addr) { + _from_addr = addr; + } + + virtual void do_oop(oop* p_) { + volatile zpointer* const p = (volatile zpointer*)p_; + const zpointer ptr = *p; + + if (ZPointer::is_remembered_exact(ptr)) { + // When the remembered bits are 11, it means that it is intentionally + // not part of the remembered set + return; + } + + if (ZBufferStoreBarriers && z_verify_store_barrier_buffer_table->get(p) != nullptr) { + // If this oop location is in the store barrier buffer, we can't assume + // that it should have a remset entry + return; + } + + if (_forwarding->find(_from_addr) != zaddress::null) { + // If the mutator has already relocated the object to to-space, we defer + // and do to-space verification afterwards instead, because store barrier + // buffers could have installed the remembered set entry in to-space and + // then flushed the store barrier buffer, and then start young marking + return; + } + + ZPage* page = _forwarding->page(); + + if (ZGeneration::old()->active_remset_is_current()) { + guarantee(page->is_remembered(p), BAD_REMSET_ARG(p, ptr, _from_addr)); } else { - _allocator->debug_unmap_page(page); + guarantee(page->was_remembered(p), BAD_REMSET_ARG(p, ptr, _from_addr)); } } + + virtual void do_oop(narrowOop* p) { + ShouldNotReachHere(); + } + + virtual ReferenceIterationMode reference_iteration_mode() { + return DO_FIELDS; + } }; -ZVerifyViewsFlip::ZVerifyViewsFlip(const ZPageAllocator* allocator) : - _allocator(allocator) { - if (ZVerifyViews) { - // Unmap all pages - ZPageDebugMapOrUnmapClosure cl(_allocator); - ZHeap::heap()->pages_do(&cl); +void ZVerify::on_color_flip() { + if (!ZVerifyRemembered || !ZBufferStoreBarriers) { + return; + } + + // Reset the table tracking the stale stores of the store barrier buffer + delete z_verify_store_barrier_buffer_table; + z_verify_store_barrier_buffer_table = new (mtGC) ZStoreBarrierBufferTable(); + + // Gather information from store barrier buffers as we currently can't verify + // remset entries for oop locations touched by the store barrier buffer + + for (JavaThreadIteratorWithHandle jtiwh; JavaThread* const jt = jtiwh.next(); ) { + const ZStoreBarrierBuffer* const buffer = ZThreadLocalData::store_barrier_buffer(jt); + + for (int i = buffer->current(); i < (int)ZStoreBarrierBuffer::_buffer_length; ++i) { + volatile zpointer* const p = buffer->_buffer[i]._p; + bool created = false; + z_verify_store_barrier_buffer_table->put_if_absent(p, true, &created); + } } } -ZVerifyViewsFlip::~ZVerifyViewsFlip() { - if (ZVerifyViews) { - // Map all pages - ZPageDebugMapOrUnmapClosure cl(_allocator); - ZHeap::heap()->pages_do(&cl); +void ZVerify::before_relocation(ZForwarding* forwarding) { + if (!ZVerifyRemembered) { + return; + } + + if (forwarding->from_age() != ZPageAge::old) { + // Only supports verification of old-to-old relocations now + return; } + + // Verify that the inactive remset is cleared + if (ZGeneration::old()->active_remset_is_current()) { + forwarding->page()->verify_remset_cleared_previous(); + } else { + forwarding->page()->verify_remset_cleared_current(); + } + + ZVerifyRemsetBeforeOopClosure cl(forwarding); + + forwarding->object_iterate([&](oop obj) { + const zaddress_unsafe addr = to_zaddress_unsafe(cast_from_oop(obj)); + cl.set_from_addr(addr); + obj->oop_iterate(&cl); + }); } -#ifdef ASSERT +class ZVerifyRemsetAfterOopClosure : public BasicOopIterateClosure { +private: + ZForwarding* const _forwarding; + zaddress_unsafe _from_addr; + zaddress _to_addr; -class ZVerifyBadOopClosure : public OopClosure { public: - virtual void do_oop(oop* p) { - const oop o = *p; - assert(!ZAddress::is_good(ZOop::to_address(o)), "Should not be good: " PTR_FORMAT, p2i(o)); + ZVerifyRemsetAfterOopClosure(ZForwarding* forwarding) : + _forwarding(forwarding), + _from_addr(zaddress_unsafe::null), + _to_addr(zaddress::null) {} + + void set_from_addr(zaddress_unsafe addr) { + _from_addr = addr; + } + + void set_to_addr(zaddress addr) { + _to_addr = addr; + } + + virtual void do_oop(oop* p_) { + volatile zpointer* const p = (volatile zpointer*)p_; + const zpointer ptr = Atomic::load(p); + + // Order this load w.r.t. the was_remembered load which can race when + // the remset scanning of the to-space object is concurrently forgetting + // an entry. + OrderAccess::loadload(); + + if (ZPointer::is_remembered_exact(ptr)) { + // When the remembered bits are 11, it means that it is intentionally + // not part of the remembered set + return; + } + + if (ZPointer::is_store_good(ptr)) { + // In to-space, there could be stores racing with the verification. + // Such stores may not have reliably manifested in the remembered + // sets yet. + return; + } + + if (ZBufferStoreBarriers && z_verify_store_barrier_buffer_table->get(p) != nullptr) { + // If this to-space oop location is in the store barrier buffer, we + // can't assume that it should have a remset entry + return; + } + + const uintptr_t p_offset = uintptr_t(p) - untype(_to_addr); + volatile zpointer* const fromspace_p = (volatile zpointer*)(untype(_from_addr) + p_offset); + + if (ZBufferStoreBarriers && z_verify_store_barrier_buffer_table->get(fromspace_p) != nullptr) { + // If this from-space oop location is in the store barrier buffer, we + // can't assume that it should have a remset entry + return; + } + + ZPage* page = ZHeap::heap()->page(p); + + if (page->is_remembered(p) || page->was_remembered(p)) { + // No missing remembered set entry + return; + } + + OrderAccess::loadload(); + if (Atomic::load(p) != ptr) { + // Order the was_remembered bitmap load w.r.t. the reload of the zpointer. + // Sometimes the was_remembered() call above races with clearing of the + // previous bits, when the to-space object is concurrently forgetting + // remset entries because they were not so useful. When that happens, + // we have already self healed the pointers to have 11 in the remset + // bits. + return; + } + + guarantee(ZGeneration::young()->is_phase_mark(), "Should be in the mark phase " BAD_REMSET_ARG(p, ptr, _to_addr)); + guarantee(_forwarding->relocated_remembered_fields_published_contains(p), BAD_REMSET_ARG(p, ptr, _to_addr)); } virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); } -}; - -// This class encapsulates various marks we need to deal with calling the -// frame iteration code from arbitrary points in the runtime. It is mostly -// due to problems that we might want to eventually clean up inside of the -// frame iteration code, such as creating random handles even though there -// is no safepoint to protect against, and fiddling around with exceptions. -class StackWatermarkProcessingMark { - ResetNoHandleMark _rnhm; - HandleMark _hm; - PreserveExceptionMark _pem; - ResourceMark _rm; -public: - StackWatermarkProcessingMark(Thread* thread) : - _rnhm(), - _hm(thread), - _pem(thread), - _rm(thread) {} + virtual ReferenceIterationMode reference_iteration_mode() { + return DO_FIELDS; + } }; -void ZVerify::verify_frame_bad(const frame& fr, RegisterMap& register_map) { - ZVerifyBadOopClosure verify_cl; - fr.oops_do(&verify_cl, NULL, ®ister_map, DerivedPointerIterationMode::_ignore); +void ZVerify::after_relocation_internal(ZForwarding* forwarding) { + ZVerifyRemsetAfterOopClosure cl(forwarding); + + forwarding->address_unsafe_iterate_via_table([&](zaddress_unsafe from_addr) { + // If no field in this object was in the store barrier buffer + // when relocation started, we should be able to verify trivially + ZGeneration* const from_generation = forwarding->from_age() == ZPageAge::old ? (ZGeneration*)ZGeneration::old() + : (ZGeneration*)ZGeneration::young(); + const zaddress to_addr = from_generation->remap_object(from_addr); + + cl.set_from_addr(from_addr); + cl.set_to_addr(to_addr); + const oop to_obj = to_oop(to_addr); + to_obj->oop_iterate(&cl); + }); } -void ZVerify::verify_thread_head_bad(JavaThread* jt) { - ZVerifyBadOopClosure verify_cl; - jt->oops_do_no_frames(&verify_cl, NULL); -} +void ZVerify::after_relocation(ZForwarding* forwarding) { + if (!ZVerifyRemembered) { + return; + } -void ZVerify::verify_thread_frames_bad(JavaThread* jt) { - if (jt->has_last_Java_frame()) { - ZVerifyBadOopClosure verify_cl; - StackWatermarkProcessingMark swpm(Thread::current()); - // Traverse the execution stack - for (StackFrameStream fst(jt, true /* update */, false /* process_frames */); !fst.is_done(); fst.next()) { - fst.current()->oops_do(&verify_cl, NULL /* code_cl */, fst.register_map(), DerivedPointerIterationMode::_ignore); - } + if (forwarding->to_age() != ZPageAge::old) { + // No remsets to verify in the young gen + return; + } + + if (ZGeneration::young()->is_phase_mark() && + forwarding->relocated_remembered_fields_is_concurrently_scanned()) { + // Can't verify to-space objects if concurrent YC rejected published + // remset information, because that data is incomplete. The YC might + // not have finished scanning the forwarding, and might be about to + // insert required remembered set entries. + return; } + + after_relocation_internal(forwarding); } -#endif // ASSERT +void ZVerify::after_scan(ZForwarding* forwarding) { + if (!ZVerifyRemembered) { + return; + } + + if (ZAbort::should_abort()) { + // We can't verify remembered set accurately when shutting down the VM + return; + } + + if (!ZGeneration::old()->is_phase_relocate() || + !forwarding->relocated_remembered_fields_is_concurrently_scanned()) { + // Only verify remembered set from remembered set scanning, when the + // remembered set scanning rejected the publishing information of concurrent + // old generation relocation + return; + } + + after_relocation_internal(forwarding); +} diff --git a/src/hotspot/share/gc/z/zVerify.hpp b/src/hotspot/share/gc/z/zVerify.hpp index 8d7abd4a8d531..e9ada2cefa9ca 100644 --- a/src/hotspot/share/gc/z/zVerify.hpp +++ b/src/hotspot/share/gc/z/zVerify.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,32 +27,29 @@ #include "memory/allStatic.hpp" class frame; +class ZForwarding; class ZPageAllocator; class ZVerify : public AllStatic { private: - static void roots_strong(bool verify_fixed); + static void roots_strong(bool verify_after_old_mark); static void roots_weak(); static void objects(bool verify_weaks); + static void threads_start_processing(); + + static void after_relocation_internal(ZForwarding* forwarding); public: static void before_zoperation(); static void after_mark(); static void after_weak_processing(); - static void verify_thread_head_bad(JavaThread* thread) NOT_DEBUG_RETURN; - static void verify_thread_frames_bad(JavaThread* thread) NOT_DEBUG_RETURN; - static void verify_frame_bad(const frame& fr, RegisterMap& register_map) NOT_DEBUG_RETURN; -}; + static void before_relocation(ZForwarding* forwarding); + static void after_relocation(ZForwarding* forwarding); + static void after_scan(ZForwarding* forwarding); -class ZVerifyViewsFlip { -private: - const ZPageAllocator* const _allocator; - -public: - ZVerifyViewsFlip(const ZPageAllocator* allocator); - ~ZVerifyViewsFlip(); + static void on_color_flip(); }; #endif // SHARE_GC_Z_ZVERIFY_HPP diff --git a/src/hotspot/share/gc/z/zVirtualMemory.cpp b/src/hotspot/share/gc/z/zVirtualMemory.cpp index 9366412cea22a..7187c1cac94ed 100644 --- a/src/hotspot/share/gc/z/zVirtualMemory.cpp +++ b/src/hotspot/share/gc/z/zVirtualMemory.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -36,12 +36,7 @@ ZVirtualMemoryManager::ZVirtualMemoryManager(size_t max_capacity) : _reserved(0), _initialized(false) { - // Check max supported heap size - if (max_capacity > ZAddressOffsetMax) { - log_error_p(gc)("Java heap too large (max supported heap size is " SIZE_FORMAT "G)", - ZAddressOffsetMax / G); - return; - } + assert(max_capacity <= ZAddressOffsetMax, "Too large max_capacity"); // Initialize platform specific parts before reserving address space pd_initialize_before_reserve(); @@ -59,7 +54,7 @@ ZVirtualMemoryManager::ZVirtualMemoryManager(size_t max_capacity) : _initialized = true; } -size_t ZVirtualMemoryManager::reserve_discontiguous(uintptr_t start, size_t size, size_t min_range) { +size_t ZVirtualMemoryManager::reserve_discontiguous(zoffset start, size_t size, size_t min_range) { if (size < min_range) { // Too small return 0; @@ -89,47 +84,32 @@ size_t ZVirtualMemoryManager::reserve_discontiguous(size_t size) { // This avoids an explosion of reservation attempts in case large parts of the // address space is already occupied. const size_t min_range = align_up(size / 100, ZGranuleSize); - size_t start = 0; + uintptr_t start = 0; size_t reserved = 0; // Reserve size somewhere between [0, ZAddressOffsetMax) while (reserved < size && start < ZAddressOffsetMax) { const size_t remaining = MIN2(size - reserved, ZAddressOffsetMax - start); - reserved += reserve_discontiguous(start, remaining, min_range); + reserved += reserve_discontiguous(to_zoffset(start), remaining, min_range); start += remaining; } return reserved; } -bool ZVirtualMemoryManager::reserve_contiguous(uintptr_t start, size_t size) { +bool ZVirtualMemoryManager::reserve_contiguous(zoffset start, size_t size) { assert(is_aligned(size, ZGranuleSize), "Must be granule aligned"); // Reserve address views - const uintptr_t marked0 = ZAddress::marked0(start); - const uintptr_t marked1 = ZAddress::marked1(start); - const uintptr_t remapped = ZAddress::remapped(start); + const zaddress_unsafe addr = ZOffset::address_unsafe(start); // Reserve address space - if (!pd_reserve(marked0, size)) { - return false; - } - - if (!pd_reserve(marked1, size)) { - pd_unreserve(marked0, size); - return false; - } - - if (!pd_reserve(remapped, size)) { - pd_unreserve(marked0, size); - pd_unreserve(marked1, size); + if (!pd_reserve(addr, size)) { return false; } // Register address views with native memory tracker - nmt_reserve(marked0, size); - nmt_reserve(marked1, size); - nmt_reserve(remapped, size); + nmt_reserve(addr, size); // Make the address range free _manager.free(start, size); @@ -142,8 +122,8 @@ bool ZVirtualMemoryManager::reserve_contiguous(size_t size) { const size_t unused = ZAddressOffsetMax - size; const size_t increment = MAX2(align_up(unused / 8192, ZGranuleSize), ZGranuleSize); - for (size_t start = 0; start + size <= ZAddressOffsetMax; start += increment) { - if (reserve_contiguous(start, size)) { + for (uintptr_t start = 0; start + size <= ZAddressOffsetMax; start += increment) { + if (reserve_contiguous(to_zoffset(start), size)) { // Success return true; } @@ -154,7 +134,7 @@ bool ZVirtualMemoryManager::reserve_contiguous(size_t size) { } bool ZVirtualMemoryManager::reserve(size_t max_capacity) { - const size_t limit = MIN2(ZAddressOffsetMax, ZAddressSpaceLimit::heap_view()); + const size_t limit = MIN2(ZAddressOffsetMax, ZAddressSpaceLimit::heap()); const size_t size = MIN2(max_capacity * ZVirtualToPhysicalRatio, limit); size_t reserved = size; @@ -171,8 +151,7 @@ bool ZVirtualMemoryManager::reserve(size_t max_capacity) { (contiguous ? "Contiguous" : "Discontiguous"), (limit == ZAddressOffsetMax ? "Unrestricted" : "Restricted"), (reserved == size ? "Complete" : "Degraded")); - log_info_p(gc, init)("Address Space Size: " SIZE_FORMAT "M x " SIZE_FORMAT " = " SIZE_FORMAT "M", - reserved / M, ZHeapViews, (reserved * ZHeapViews) / M); + log_info_p(gc, init)("Address Space Size: " SIZE_FORMAT "M", reserved / M); // Record reserved _reserved = reserved; @@ -180,9 +159,9 @@ bool ZVirtualMemoryManager::reserve(size_t max_capacity) { return reserved >= max_capacity; } -void ZVirtualMemoryManager::nmt_reserve(uintptr_t start, size_t size) { - MemTracker::record_virtual_memory_reserve((void*)start, size, CALLER_PC); - MemTracker::record_virtual_memory_type((void*)start, mtJavaHeap); +void ZVirtualMemoryManager::nmt_reserve(zaddress_unsafe start, size_t size) { + MemTracker::record_virtual_memory_reserve((void*)untype(start), size, CALLER_PC); + MemTracker::record_virtual_memory_type((void*)untype(start), mtJavaHeap); } bool ZVirtualMemoryManager::is_initialized() const { @@ -190,7 +169,7 @@ bool ZVirtualMemoryManager::is_initialized() const { } ZVirtualMemory ZVirtualMemoryManager::alloc(size_t size, bool force_low_address) { - uintptr_t start; + zoffset start; // Small pages are allocated at low addresses, while medium/large pages // are allocated at high addresses (unless forced to be at a low address). diff --git a/src/hotspot/share/gc/z/zVirtualMemory.hpp b/src/hotspot/share/gc/z/zVirtualMemory.hpp index a7cde86b0d1e6..153cad81be440 100644 --- a/src/hotspot/share/gc/z/zVirtualMemory.hpp +++ b/src/hotspot/share/gc/z/zVirtualMemory.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,16 +30,16 @@ class ZVirtualMemory { friend class VMStructs; private: - uintptr_t _start; - uintptr_t _end; + zoffset _start; + zoffset_end _end; public: ZVirtualMemory(); - ZVirtualMemory(uintptr_t start, size_t size); + ZVirtualMemory(zoffset start, size_t size); bool is_null() const; - uintptr_t start() const; - uintptr_t end() const; + zoffset start() const; + zoffset_end end() const; size_t size() const; ZVirtualMemory split(size_t size); @@ -48,22 +48,22 @@ class ZVirtualMemory { class ZVirtualMemoryManager { private: ZMemoryManager _manager; - uintptr_t _reserved; + size_t _reserved; bool _initialized; // Platform specific implementation void pd_initialize_before_reserve(); void pd_initialize_after_reserve(); - bool pd_reserve(uintptr_t addr, size_t size); - void pd_unreserve(uintptr_t addr, size_t size); + bool pd_reserve(zaddress_unsafe addr, size_t size); + void pd_unreserve(zaddress_unsafe addr, size_t size); - bool reserve_contiguous(uintptr_t start, size_t size); + bool reserve_contiguous(zoffset start, size_t size); bool reserve_contiguous(size_t size); - size_t reserve_discontiguous(uintptr_t start, size_t size, size_t min_range); + size_t reserve_discontiguous(zoffset start, size_t size, size_t min_range); size_t reserve_discontiguous(size_t size); bool reserve(size_t max_capacity); - void nmt_reserve(uintptr_t start, size_t size); + void nmt_reserve(zaddress_unsafe start, size_t size); public: ZVirtualMemoryManager(size_t max_capacity); @@ -71,7 +71,7 @@ class ZVirtualMemoryManager { bool is_initialized() const; size_t reserved() const; - uintptr_t lowest_available_address() const; + zoffset lowest_available_address() const; ZVirtualMemory alloc(size_t size, bool force_low_address); void free(const ZVirtualMemory& vmem); diff --git a/src/hotspot/share/gc/z/zVirtualMemory.inline.hpp b/src/hotspot/share/gc/z/zVirtualMemory.inline.hpp index 20071fa4490a5..83aba75f675cc 100644 --- a/src/hotspot/share/gc/z/zVirtualMemory.inline.hpp +++ b/src/hotspot/share/gc/z/zVirtualMemory.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,22 +29,22 @@ #include "gc/z/zMemory.inline.hpp" inline ZVirtualMemory::ZVirtualMemory() : - _start(UINTPTR_MAX), - _end(UINTPTR_MAX) {} + _start(zoffset(UINTPTR_MAX)), + _end(zoffset_end(UINTPTR_MAX)) {} -inline ZVirtualMemory::ZVirtualMemory(uintptr_t start, size_t size) : +inline ZVirtualMemory::ZVirtualMemory(zoffset start, size_t size) : _start(start), - _end(start + size) {} + _end(to_zoffset_end(start, size)) {} inline bool ZVirtualMemory::is_null() const { - return _start == UINTPTR_MAX; + return _start == zoffset(UINTPTR_MAX); } -inline uintptr_t ZVirtualMemory::start() const { +inline zoffset ZVirtualMemory::start() const { return _start; } -inline uintptr_t ZVirtualMemory::end() const { +inline zoffset_end ZVirtualMemory::end() const { return _end; } @@ -61,7 +61,7 @@ inline size_t ZVirtualMemoryManager::reserved() const { return _reserved; } -inline uintptr_t ZVirtualMemoryManager::lowest_available_address() const { +inline zoffset ZVirtualMemoryManager::lowest_available_address() const { return _manager.peek_low_address(); } diff --git a/src/hotspot/share/gc/z/zWeakRootsProcessor.cpp b/src/hotspot/share/gc/z/zWeakRootsProcessor.cpp index 89163c7687f4e..5b01ad30ba67d 100644 --- a/src/hotspot/share/gc/z/zWeakRootsProcessor.cpp +++ b/src/hotspot/share/gc/z/zWeakRootsProcessor.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,28 +22,23 @@ */ #include "precompiled.hpp" +#include "gc/shared/suspendibleThreadSet.hpp" +#include "gc/z/zAddress.inline.hpp" #include "gc/z/zBarrier.inline.hpp" +#include "gc/z/zHeap.inline.hpp" #include "gc/z/zRootsIterator.hpp" #include "gc/z/zTask.hpp" #include "gc/z/zWeakRootsProcessor.hpp" #include "gc/z/zWorkers.hpp" +#include "memory/iterator.hpp" +#include "runtime/atomic.hpp" +#include "utilities/debug.hpp" class ZPhantomCleanOopClosure : public OopClosure { public: virtual void do_oop(oop* p) { - // Read the oop once, to make sure the liveness check - // and the later clearing uses the same value. - const oop obj = Atomic::load(p); - if (ZBarrier::is_alive_barrier_on_phantom_oop(obj)) { - ZBarrier::keep_alive_barrier_on_phantom_oop_field(p); - } else { - // The destination could have been modified/reused, in which case - // we don't want to clear it. However, no one could write the same - // oop here again (the object would be strongly live and we would - // not consider clearing such oops), so therefore we don't have an - // ABA problem here. - Atomic::cmpxchg(p, obj, oop(NULL)); - } + ZBarrier::clean_barrier_on_phantom_oop_field((zpointer*)p); + SuspendibleThreadSet::yield(); } virtual void do_oop(narrowOop* p) { @@ -56,20 +51,21 @@ ZWeakRootsProcessor::ZWeakRootsProcessor(ZWorkers* workers) : class ZProcessWeakRootsTask : public ZTask { private: - ZWeakRootsIterator _weak_roots; + ZRootsIteratorWeakColored _roots_weak_colored; public: ZProcessWeakRootsTask() : ZTask("ZProcessWeakRootsTask"), - _weak_roots() {} + _roots_weak_colored(ZGenerationIdOptional::old) {} ~ZProcessWeakRootsTask() { - _weak_roots.report_num_dead(); + _roots_weak_colored.report_num_dead(); } virtual void work() { + SuspendibleThreadSetJoiner sts_joiner; ZPhantomCleanOopClosure cl; - _weak_roots.apply(&cl); + _roots_weak_colored.apply(&cl); } }; diff --git a/src/hotspot/share/gc/z/zWorkers.cpp b/src/hotspot/share/gc/z/zWorkers.cpp index 84f17f299fbb7..0e1b969848e53 100644 --- a/src/hotspot/share/gc/z/zWorkers.cpp +++ b/src/hotspot/share/gc/z/zWorkers.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,52 +24,38 @@ #include "precompiled.hpp" #include "gc/shared/gc_globals.hpp" #include "gc/shared/gcLogPrecious.hpp" +#include "gc/z/zHeap.inline.hpp" #include "gc/z/zLock.inline.hpp" #include "gc/z/zStat.hpp" #include "gc/z/zTask.hpp" -#include "gc/z/zThread.hpp" #include "gc/z/zWorkers.hpp" #include "runtime/java.hpp" -class ZWorkersInitializeTask : public WorkerTask { -private: - const uint _nworkers; - uint _started; - ZConditionLock _lock; - -public: - ZWorkersInitializeTask(uint nworkers) : - WorkerTask("ZWorkersInitializeTask"), - _nworkers(nworkers), - _started(0), - _lock() {} - - virtual void work(uint worker_id) { - // Register as worker - ZThread::set_worker(); - - // Wait for all threads to start - ZLocker locker(&_lock); - if (++_started == _nworkers) { - // All threads started - _lock.notify_all(); - } else { - while (_started != _nworkers) { - _lock.wait(); - } - } - } -}; +static const char* workers_name(ZGenerationId id) { + return (id == ZGenerationId::young) ? "ZWorkerYoung" : "ZWorkerOld"; +} -ZWorkers::ZWorkers() : - _workers("ZWorker", - UseDynamicNumberOfGCThreads ? ConcGCThreads : MAX2(ConcGCThreads, ParallelGCThreads)) { +static const char* generation_name(ZGenerationId id) { + return (id == ZGenerationId::young) ? "Young" : "Old"; +} - if (UseDynamicNumberOfGCThreads) { - log_info_p(gc, init)("GC Workers: %u (dynamic)", _workers.max_workers()); - } else { - log_info_p(gc, init)("GC Workers: %u/%u (static)", ConcGCThreads, _workers.max_workers()); - } +static uint max_nworkers(ZGenerationId id) { + return id == ZGenerationId::young ? ZYoungGCThreads : ZOldGCThreads; +} + +ZWorkers::ZWorkers(ZGenerationId id, ZStatWorkers* stats) : + _workers(workers_name(id), + max_nworkers(id)), + _generation_name(generation_name(id)), + _resize_lock(), + _requested_nworkers(0), + _is_active(false), + _stats(stats) { + + log_info_p(gc, init)("GC Workers for %s Generation: %u (%s)", + _generation_name, + _workers.max_workers(), + UseDynamicNumberOfGCThreads ? "dynamic" : "static"); // Initialize worker threads _workers.initialize_workers(); @@ -77,10 +63,10 @@ ZWorkers::ZWorkers() : if (_workers.active_workers() != _workers.max_workers()) { vm_exit_during_initialization("Failed to create ZWorkers"); } +} - // Execute task to register threads as workers - ZWorkersInitializeTask task(_workers.max_workers()); - _workers.run_task(&task); +bool ZWorkers::is_active() const { + return _is_active; } uint ZWorkers::active_workers() const { @@ -88,24 +74,63 @@ uint ZWorkers::active_workers() const { } void ZWorkers::set_active_workers(uint nworkers) { - log_info(gc, task)("Using %u workers", nworkers); + log_info(gc, task)("Using %u Workers for %s Generation", nworkers, _generation_name); + ZLocker locker(&_resize_lock); _workers.set_active_workers(nworkers); } +void ZWorkers::set_active() { + ZLocker locker(&_resize_lock); + _is_active = true; + _requested_nworkers = 0; +} + +void ZWorkers::set_inactive() { + ZLocker locker(&_resize_lock); + _is_active = false; +} + void ZWorkers::run(ZTask* task) { - log_debug(gc, task)("Executing Task: %s, Active Workers: %u", task->name(), active_workers()); - ZStatWorkers::at_start(); + log_debug(gc, task)("Executing %s using %s with %u workers", task->name(), _workers.name(), active_workers()); + + { + ZLocker locker(&_resize_lock); + _stats->at_start(active_workers()); + } + _workers.run_task(task->worker_task()); - ZStatWorkers::at_end(); + + { + ZLocker locker(&_resize_lock); + _stats->at_end(); + } +} + +void ZWorkers::run(ZRestartableTask* task) { + for (;;) { + // Run task + run(static_cast(task)); + + ZLocker locker(&_resize_lock); + if (_requested_nworkers == 0) { + // Task completed + return; + } + + // Restart task with requested number of active workers + _workers.set_active_workers(_requested_nworkers); + task->resize_workers(active_workers()); + _requested_nworkers = 0; + } } void ZWorkers::run_all(ZTask* task) { - // Save number of active workers + // Get and set number of active workers const uint prev_active_workers = _workers.active_workers(); + _workers.set_active_workers(_workers.max_workers()); // Execute task using all workers - _workers.set_active_workers(_workers.max_workers()); - log_debug(gc, task)("Executing Task: %s, Active Workers: %u", task->name(), active_workers()); + log_debug(gc, task)("Executing %s using %s with %u workers", task->name(), _workers.name(), active_workers()); _workers.run_task(task->worker_task()); // Restore number of active workers @@ -115,3 +140,28 @@ void ZWorkers::run_all(ZTask* task) { void ZWorkers::threads_do(ThreadClosure* tc) const { _workers.threads_do(tc); } + +ZLock* ZWorkers::resizing_lock() { + return &_resize_lock; +} + +void ZWorkers::request_resize_workers(uint nworkers) { + assert(nworkers != 0, "Never ask for zero workers"); + + ZLocker locker(&_resize_lock); + + if (_requested_nworkers == nworkers) { + // Already requested + return; + } + + if (_workers.active_workers() == nworkers) { + // Already the right amount of threads + return; + } + + log_info(gc, task)("Adjusting Workers for %s Generation: %u -> %u", + _generation_name, _workers.active_workers(), nworkers); + + _requested_nworkers = nworkers; +} diff --git a/src/hotspot/share/gc/z/zWorkers.hpp b/src/hotspot/share/gc/z/zWorkers.hpp index 3ee14ece6bea0..8d1f415256260 100644 --- a/src/hotspot/share/gc/z/zWorkers.hpp +++ b/src/hotspot/share/gc/z/zWorkers.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,24 +25,45 @@ #define SHARE_GC_Z_ZWORKERS_HPP #include "gc/shared/workerThread.hpp" +#include "gc/z/zGenerationId.hpp" +#include "gc/z/zLock.hpp" +#include "gc/z/zStat.hpp" class ThreadClosure; +class ZRestartableTask; +class ZStatCycle; +class ZStatWorkers; class ZTask; class ZWorkers { private: - WorkerThreads _workers; + WorkerThreads _workers; + const char* const _generation_name; + ZLock _resize_lock; + volatile uint _requested_nworkers; + bool _is_active; + ZStatWorkers* const _stats; public: - ZWorkers(); + ZWorkers(ZGenerationId id, ZStatWorkers* stats); + bool is_active() const; uint active_workers() const; void set_active_workers(uint nworkers); + void set_active(); + void set_inactive(); void run(ZTask* task); + void run(ZRestartableTask* task); void run_all(ZTask* task); void threads_do(ThreadClosure* tc) const; + + // Worker resizing + ZLock* resizing_lock(); + void request_resize_workers(uint nworkers); + + bool should_worker_resize(); }; #endif // SHARE_GC_Z_ZWORKERS_HPP diff --git a/src/hotspot/share/gc/z/zWorkers.inline.hpp b/src/hotspot/share/gc/z/zWorkers.inline.hpp new file mode 100644 index 0000000000000..ee1c5f476e31f --- /dev/null +++ b/src/hotspot/share/gc/z/zWorkers.inline.hpp @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_Z_ZWORKERS_INLINE_HPP +#define SHARE_GC_Z_ZWORKERS_INLINE_HPP + +#include "gc/z/zWorkers.hpp" + +#include "runtime/atomic.hpp" + +inline bool ZWorkers::should_worker_resize() { + return Atomic::load(&_requested_nworkers) != 0; +} + +#endif // SHARE_GC_Z_ZWORKERS_INLINE_HPP diff --git a/src/hotspot/share/gc/z/z_globals.hpp b/src/hotspot/share/gc/z/z_globals.hpp index 4a49d775c8cb3..1ff63ca9c6ce7 100644 --- a/src/hotspot/share/gc/z/z_globals.hpp +++ b/src/hotspot/share/gc/z/z_globals.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,6 +24,8 @@ #ifndef SHARE_GC_Z_Z_GLOBALS_HPP #define SHARE_GC_Z_Z_GLOBALS_HPP +#include "zPageAge.hpp" + #define GC_Z_FLAGS(develop, \ develop_pd, \ product, \ @@ -32,50 +34,41 @@ range, \ constraint) \ \ - product(double, ZAllocationSpikeTolerance, 2.0, \ - "Allocation spike tolerance factor") \ - \ - product(double, ZFragmentationLimit, 25.0, \ - "Maximum allowed heap fragmentation") \ + product(double, ZYoungCompactionLimit, 25.0, \ + "Maximum allowed garbage in young pages") \ \ - product(size_t, ZMarkStackSpaceLimit, 8*G, \ - "Maximum number of bytes allocated for mark stacks") \ - range(32*M, 1024*G) \ + product(double, ZCollectionIntervalMinor, -1, \ + "Force Minor GC at a fixed time interval (in seconds)") \ \ - product(double, ZCollectionInterval, 0, \ + product(double, ZCollectionIntervalMajor, -1, \ "Force GC at a fixed time interval (in seconds)") \ \ - product(bool, ZProactive, true, \ - "Enable proactive GC cycles") \ - \ - product(bool, ZUncommit, true, \ - "Uncommit unused memory") \ - \ - product(uintx, ZUncommitDelay, 5 * 60, \ - "Uncommit memory if it has been unused for the specified " \ - "amount of time (in seconds)") \ + product(bool, ZCollectionIntervalOnly, false, \ + "Only use timers for GC heuristics") \ \ - product(uint, ZStatisticsInterval, 10, DIAGNOSTIC, \ - "Time between statistics print outs (in seconds)") \ - range(1, (uint)-1) \ + product(bool, ZBufferStoreBarriers, true, DIAGNOSTIC, \ + "Buffer store barriers") \ \ - product(bool, ZStressRelocateInPlace, false, DIAGNOSTIC, \ - "Always relocate pages in-place") \ + product(uint, ZYoungGCThreads, 0, DIAGNOSTIC, \ + "Number of GC threads for the young generation") \ \ - product(bool, ZVerifyViews, false, DIAGNOSTIC, \ - "Verify heap view accesses") \ + product(uint, ZOldGCThreads, 0, DIAGNOSTIC, \ + "Number of GC threads for the old generation") \ \ - product(bool, ZVerifyRoots, trueInDebug, DIAGNOSTIC, \ - "Verify roots") \ + product(uintx, ZIndexDistributorStrategy, 0, DIAGNOSTIC, \ + "Strategy used to distribute indices to parallel workers " \ + "0: Claim tree " \ + "1: Simple Striped ") \ \ - product(bool, ZVerifyObjects, false, DIAGNOSTIC, \ - "Verify objects") \ + product(bool, ZVerifyRemembered, trueInDebug, DIAGNOSTIC, \ + "Verify remembered sets") \ \ - product(bool, ZVerifyMarking, trueInDebug, DIAGNOSTIC, \ - "Verify marking stacks") \ + develop(bool, ZVerifyOops, false, \ + "Verify accessed oops") \ \ - product(bool, ZVerifyForwarding, false, DIAGNOSTIC, \ - "Verify forwarding tables") + product(int, ZTenuringThreshold, -1, DIAGNOSTIC, \ + "Young generation tenuring threshold, -1 for dynamic computation")\ + range(-1, static_cast(ZPageAgeMax)) // end of GC_Z_FLAGS diff --git a/src/hotspot/share/jfr/dcmd/jfrDcmds.cpp b/src/hotspot/share/jfr/dcmd/jfrDcmds.cpp index b9bdbe2480a9a..8482583da4097 100644 --- a/src/hotspot/share/jfr/dcmd/jfrDcmds.cpp +++ b/src/hotspot/share/jfr/dcmd/jfrDcmds.cpp @@ -58,7 +58,7 @@ bool register_jfr_dcmds() { static bool is_disabled(outputStream* output) { if (Jfr::is_disabled()) { - if (output != NULL) { + if (output != nullptr) { output->print_cr("Flight Recorder is disabled.\n"); } return true; @@ -93,14 +93,14 @@ static bool invalid_state(outputStream* out, TRAPS) { } static void handle_pending_exception(outputStream* output, bool startup, oop throwable) { - assert(throwable != NULL, "invariant"); + assert(throwable != nullptr, "invariant"); oop msg = java_lang_Throwable::message(throwable); - if (msg == NULL) { + if (msg == nullptr) { return; } char* text = java_lang_String::as_utf8_string(msg); - if (text != NULL) { + if (text != nullptr) { if (startup) { log_error(jfr,startup)("%s", text); } else { @@ -111,12 +111,12 @@ static void handle_pending_exception(outputStream* output, bool startup, oop thr static void print_message(outputStream* output, oop content, TRAPS) { objArrayOop lines = objArrayOop(content); - assert(lines != NULL, "invariant"); + assert(lines != nullptr, "invariant"); assert(lines->is_array(), "must be array"); const int length = lines->length(); for (int i = 0; i < length; ++i) { const char* text = JfrJavaSupport::c_str(lines->obj_at(i), THREAD); - if (text == NULL) { + if (text == nullptr) { // An oome has been thrown and is pending. break; } @@ -127,12 +127,12 @@ static void print_message(outputStream* output, oop content, TRAPS) { static void log(oop content, TRAPS) { LogMessage(jfr,startup) msg; objArrayOop lines = objArrayOop(content); - assert(lines != NULL, "invariant"); + assert(lines != nullptr, "invariant"); assert(lines->is_array(), "must be array"); const int length = lines->length(); for (int i = 0; i < length; ++i) { const char* text = JfrJavaSupport::c_str(lines->obj_at(i), THREAD); - if (text == NULL) { + if (text == nullptr) { // An oome has been thrown and is pending. break; } @@ -145,7 +145,7 @@ static void handle_dcmd_result(outputStream* output, const DCmdSource source, TRAPS) { DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(THREAD)); - assert(output != NULL, "invariant"); + assert(output != nullptr, "invariant"); ResourceMark rm(THREAD); const bool startup = DCmd_Source_Internal == source; if (HAS_PENDING_EXCEPTION) { @@ -177,16 +177,16 @@ static void handle_dcmd_result(outputStream* output, } static oop construct_dcmd_instance(JfrJavaArguments* args, TRAPS) { - assert(args != NULL, "invariant"); + assert(args != nullptr, "invariant"); DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(THREAD)); - assert(args->klass() != NULL, "invariant"); + assert(args->klass() != nullptr, "invariant"); args->set_name(""); args->set_signature("()V"); JfrJavaSupport::new_object(args, CHECK_NULL); return args->result()->get_oop(); } -JfrDCmd::JfrDCmd(outputStream* output, bool heap, int num_arguments) : DCmd(output, heap), _args(NULL), _num_arguments(num_arguments), _delimiter('\0') {} +JfrDCmd::JfrDCmd(outputStream* output, bool heap, int num_arguments) : DCmd(output, heap), _args(nullptr), _num_arguments(num_arguments), _delimiter('\0') {} void JfrDCmd::invoke(JfrJavaArguments& method, TRAPS) const { JavaValue constructor_result(T_OBJECT); @@ -221,7 +221,7 @@ void JfrDCmd::execute(DCmdSource source, TRAPS) { JavaValue result(T_OBJECT); JfrJavaArguments execute(&result, javaClass(), "execute", signature, CHECK); jstring argument = JfrJavaSupport::new_string(_args, CHECK); - jstring s = NULL; + jstring s = nullptr; if (source == DCmd_Source_Internal) { s = JfrJavaSupport::new_string("internal", CHECK); } @@ -248,11 +248,11 @@ void JfrDCmd::print_help(const char* name) const { } static void initialize_dummy_descriptors(GrowableArray* array) { - assert(array != NULL, "invariant"); - DCmdArgumentInfo * const dummy = new DCmdArgumentInfo(NULL, - NULL, - NULL, - NULL, + assert(array != nullptr, "invariant"); + DCmdArgumentInfo * const dummy = new DCmdArgumentInfo(nullptr, + nullptr, + nullptr, + nullptr, false, true, // a DcmdFramework "option" false); @@ -263,7 +263,7 @@ static void initialize_dummy_descriptors(GrowableArray* array // Since the DcmdFramework does not support dynamically allocated strings, // we keep them in a thread local arena. The arena is reset between invocations. -static THREAD_LOCAL Arena* dcmd_arena = NULL; +static THREAD_LOCAL Arena* dcmd_arena = nullptr; static void prepare_dcmd_string_arena(JavaThread* jt) { dcmd_arena = JfrThreadLocal::dcmd_arena(jt); @@ -272,17 +272,17 @@ static void prepare_dcmd_string_arena(JavaThread* jt) { } static char* dcmd_arena_allocate(size_t size) { - assert(dcmd_arena != NULL, "invariant"); + assert(dcmd_arena != nullptr, "invariant"); return (char*)dcmd_arena->Amalloc(size); } static const char* get_as_dcmd_arena_string(oop string) { - char* str = NULL; + char* str = nullptr; const typeArrayOop value = java_lang_String::value(string); - if (value != NULL) { + if (value != nullptr) { const size_t length = static_cast(java_lang_String::utf8_length(string, value)) + 1; str = dcmd_arena_allocate(length); - assert(str != NULL, "invariant"); + assert(str != nullptr, "invariant"); java_lang_String::as_utf8_string(string, value, str, static_cast(length)); } return str; @@ -297,7 +297,7 @@ static const char* read_string_field(oop argument, const char* field_name, TRAPS args.set_receiver(argument); JfrJavaSupport::get_field(&args, THREAD); const oop string_oop = result.get_oop(); - return string_oop != NULL ? get_as_dcmd_arena_string(string_oop) : NULL; + return string_oop != nullptr ? get_as_dcmd_arena_string(string_oop) : nullptr; } static bool read_boolean_field(oop argument, const char* field_name, TRAPS) { @@ -342,14 +342,14 @@ GrowableArray* JfrDCmd::argument_info_array() const { return array; } objArrayOop arguments = objArrayOop(result.get_oop()); - assert(arguments != NULL, "invariant"); + assert(arguments != nullptr, "invariant"); assert(arguments->is_array(), "must be array"); const int num_arguments = arguments->length(); assert(num_arguments == _num_arguments, "invariant"); prepare_dcmd_string_arena(thread); for (int i = 0; i < num_arguments; ++i) { DCmdArgumentInfo* const dai = create_info(arguments->obj_at(i), thread); - assert(dai != NULL, "invariant"); + assert(dai != nullptr, "invariant"); array->append(dai); } return array; @@ -366,8 +366,8 @@ GrowableArray* JfrDCmd::argument_name_array() const { JfrConfigureFlightRecorderDCmd::JfrConfigureFlightRecorderDCmd(outputStream* output, bool heap) : DCmdWithParser(output, heap), - _repository_path("repositorypath", "Path to repository,.e.g \\\"My Repository\\\"", "STRING", false, NULL), - _dump_path("dumppath", "Path to dump, e.g. \\\"My Dump path\\\"", "STRING", false, NULL), + _repository_path("repositorypath", "Path to repository,.e.g \\\"My Repository\\\"", "STRING", false, nullptr), + _dump_path("dumppath", "Path to dump, e.g. \\\"My Dump path\\\"", "STRING", false, nullptr), _stack_depth("stackdepth", "Stack depth", "JULONG", false, "64"), _global_buffer_count("globalbuffercount", "Number of global buffers,", "JULONG", false, "20"), _global_buffer_size("globalbuffersize", "Size of a global buffers,", "MEMORY SIZE", false, "512k"), @@ -469,22 +469,22 @@ void JfrConfigureFlightRecorderDCmd::execute(DCmdSource source, TRAPS) { Handle h_dcmd_instance(THREAD, dcmd); assert(h_dcmd_instance.not_null(), "invariant"); - jstring repository_path = NULL; - if (_repository_path.is_set() && _repository_path.value() != NULL) { + jstring repository_path = nullptr; + if (_repository_path.is_set() && _repository_path.value() != nullptr) { repository_path = JfrJavaSupport::new_string(_repository_path.value(), CHECK); } - jstring dump_path = NULL; - if (_dump_path.is_set() && _dump_path.value() != NULL) { + jstring dump_path = nullptr; + if (_dump_path.is_set() && _dump_path.value() != nullptr) { dump_path = JfrJavaSupport::new_string(_dump_path.value(), CHECK); } - jobject stack_depth = NULL; - jobject global_buffer_count = NULL; - jobject global_buffer_size = NULL; - jobject thread_buffer_size = NULL; - jobject max_chunk_size = NULL; - jobject memory_size = NULL; + jobject stack_depth = nullptr; + jobject global_buffer_count = nullptr; + jobject global_buffer_size = nullptr; + jobject thread_buffer_size = nullptr; + jobject max_chunk_size = nullptr; + jobject memory_size = nullptr; jobject preserve_repository = nullptr; if (!JfrRecorder::is_created()) { diff --git a/src/hotspot/share/jfr/dcmd/jfrDcmds.hpp b/src/hotspot/share/jfr/dcmd/jfrDcmds.hpp index 7bd27bc110445..5374c0537a410 100644 --- a/src/hotspot/share/jfr/dcmd/jfrDcmds.hpp +++ b/src/hotspot/share/jfr/dcmd/jfrDcmds.hpp @@ -59,7 +59,7 @@ class JfrStartFlightRecordingDCmd : public JfrDCmd { return "Medium: Depending on the settings for a recording, the impact can range from low to high."; } static const JavaPermission permission() { - JavaPermission p = {"java.lang.management.ManagementPermission", "monitor", NULL}; + JavaPermission p = {"java.lang.management.ManagementPermission", "monitor", nullptr}; return p; } virtual const char* javaClass() const { @@ -84,7 +84,7 @@ class JfrDumpFlightRecordingDCmd : public JfrDCmd { return "Low"; } static const JavaPermission permission() { - JavaPermission p = {"java.lang.management.ManagementPermission", "monitor", NULL}; + JavaPermission p = {"java.lang.management.ManagementPermission", "monitor", nullptr}; return p; } virtual const char* javaClass() const { @@ -109,7 +109,7 @@ class JfrCheckFlightRecordingDCmd : public JfrDCmd { return "Low"; } static const JavaPermission permission() { - JavaPermission p = {"java.lang.management.ManagementPermission", "monitor", NULL}; + JavaPermission p = {"java.lang.management.ManagementPermission", "monitor", nullptr}; return p; } virtual const char* javaClass() const { @@ -134,7 +134,7 @@ class JfrStopFlightRecordingDCmd : public JfrDCmd { return "Low"; } static const JavaPermission permission() { - JavaPermission p = {"java.lang.management.ManagementPermission", "monitor", NULL}; + JavaPermission p = {"java.lang.management.ManagementPermission", "monitor", nullptr}; return p; } virtual const char* javaClass() const { @@ -175,7 +175,7 @@ class JfrConfigureFlightRecorderDCmd : public DCmdWithParser { return "Low"; } static const JavaPermission permission() { - JavaPermission p = {"java.lang.management.ManagementPermission", "monitor", NULL}; + JavaPermission p = {"java.lang.management.ManagementPermission", "monitor", nullptr}; return p; } static int num_arguments() { return 10; } diff --git a/src/hotspot/share/jfr/instrumentation/jfrEventClassTransformer.cpp b/src/hotspot/share/jfr/instrumentation/jfrEventClassTransformer.cpp index 4e4856b84d56e..5e091192dafcc 100644 --- a/src/hotspot/share/jfr/instrumentation/jfrEventClassTransformer.cpp +++ b/src/hotspot/share/jfr/instrumentation/jfrEventClassTransformer.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -191,7 +191,7 @@ static int skip_annotation_value(const address, int, int); // fwd decl // Skip an annotation. Return >=limit if there is any problem. static int next_annotation_index(const address buffer, int limit, int index) { - assert(buffer != NULL, "invariant"); + assert(buffer != nullptr, "invariant"); index += 2; // skip atype if ((index += 2) >= limit) { return limit; @@ -206,7 +206,7 @@ static int next_annotation_index(const address buffer, int limit, int index) { // Skip an annotation value. Return >=limit if there is any problem. static int skip_annotation_value(const address buffer, int limit, int index) { - assert(buffer != NULL, "invariant"); + assert(buffer != nullptr, "invariant"); // value := switch (tag:u1) { // case B, C, I, S, Z, D, F, J, c: con:u2; // case e: e_class:u2 e_name:u2; @@ -280,7 +280,7 @@ class AnnotationElementIterator : public StackObj { _limit(limit), _current(element_name_offset), _next(element_name_offset) { - assert(_buffer != NULL, "invariant"); + assert(_buffer != nullptr, "invariant"); assert(_next == element_name_offset, "invariant"); assert(_current == element_name_offset, "invariant"); } @@ -332,11 +332,11 @@ class AnnotationIterator : public StackObj { public: AnnotationIterator(const InstanceKlass* ik, AnnotationArray* ar) : _ik(ik), - _limit(ar != NULL ? ar->length() : 0), - _buffer(_limit > 2 ? ar->adr_at(2) : NULL), + _limit(ar != nullptr ? ar->length() : 0), + _buffer(_limit > 2 ? ar->adr_at(2) : nullptr), _current(0), _next(0) { - if (_buffer != NULL) { + if (_buffer != nullptr) { _limit -= 2; // subtract sizeof(u2) number of annotations field } } @@ -358,7 +358,7 @@ class AnnotationIterator : public StackObj { return AnnotationElementIterator(_ik, _buffer + _current, _next - _current); } const Symbol* type() const { - assert(_buffer != NULL, "invariant"); + assert(_buffer != nullptr, "invariant"); assert(_current < _limit, "invariant"); return _ik->constants()->symbol_at(JfrBigEndian::read(_buffer + _current)); } @@ -366,9 +366,9 @@ class AnnotationIterator : public StackObj { static const char value_name[] = "value"; static bool has_annotation(const InstanceKlass* ik, const Symbol* annotation_type, bool& value) { - assert(annotation_type != NULL, "invariant"); + assert(annotation_type != nullptr, "invariant"); AnnotationArray* class_annotations = ik->class_annotations(); - if (class_annotations == NULL) { + if (class_annotations == nullptr) { return false; } @@ -379,7 +379,7 @@ static bool has_annotation(const InstanceKlass* ik, const Symbol* annotation_typ // target annotation found static const Symbol* value_symbol = SymbolTable::probe(value_name, sizeof value_name - 1); - assert(value_symbol != NULL, "invariant"); + assert(value_symbol != nullptr, "invariant"); const AnnotationElementIterator element_iterator = annotation_iterator.elements(); while (element_iterator.has_next()) { element_iterator.move_to_next(); @@ -399,14 +399,14 @@ static bool has_annotation(const InstanceKlass* ik, const Symbol* annotation_typ // Searching moves upwards in the klass hierarchy in order to support // inherited annotations in addition to the ability to override. static bool annotation_value(const InstanceKlass* ik, const Symbol* annotation_type, bool& value) { - assert(ik != NULL, "invariant"); - assert(annotation_type != NULL, "invariant"); + assert(ik != nullptr, "invariant"); + assert(annotation_type != nullptr, "invariant"); assert(JdkJfrEvent::is_a(ik), "invariant"); if (has_annotation(ik, annotation_type, value)) { return true; } InstanceKlass* const super = InstanceKlass::cast(ik->super()); - return super != NULL && JdkJfrEvent::is_a(super) ? annotation_value(super, annotation_type, value) : false; + return super != nullptr && JdkJfrEvent::is_a(super) ? annotation_value(super, annotation_type, value) : false; } static const char jdk_jfr_module_name[] = "jdk.jfr"; @@ -416,30 +416,30 @@ static bool java_base_can_read_jdk_jfr() { if (can_read) { return true; } - static Symbol* jdk_jfr_module_symbol = NULL; - if (jdk_jfr_module_symbol == NULL) { + static Symbol* jdk_jfr_module_symbol = nullptr; + if (jdk_jfr_module_symbol == nullptr) { jdk_jfr_module_symbol = SymbolTable::probe(jdk_jfr_module_name, sizeof jdk_jfr_module_name - 1); - if (jdk_jfr_module_symbol == NULL) { + if (jdk_jfr_module_symbol == nullptr) { return false; } } - assert(jdk_jfr_module_symbol != NULL, "invariant"); + assert(jdk_jfr_module_symbol != nullptr, "invariant"); ModuleEntryTable* const table = Modules::get_module_entry_table(Handle()); - assert(table != NULL, "invariant"); + assert(table != nullptr, "invariant"); const ModuleEntry* const java_base_module = table->javabase_moduleEntry(); - if (java_base_module == NULL) { + if (java_base_module == nullptr) { return false; } - assert(java_base_module != NULL, "invariant"); + assert(java_base_module != nullptr, "invariant"); ModuleEntry* jdk_jfr_module; { MutexLocker ml(Module_lock); jdk_jfr_module = table->lookup_only(jdk_jfr_module_symbol); - if (jdk_jfr_module == NULL) { + if (jdk_jfr_module == nullptr) { return false; } } - assert(jdk_jfr_module != NULL, "invariant"); + assert(jdk_jfr_module != nullptr, "invariant"); if (java_base_module->can_read(jdk_jfr_module)) { can_read = true; } @@ -452,18 +452,18 @@ static const char registered_constant[] = "Ljdk/jfr/Registered;"; // Searching moves upwards in the klass hierarchy in order to support // inherited annotations in addition to the ability to override. static bool should_register_klass(const InstanceKlass* ik, bool& untypedEventHandler) { - assert(ik != NULL, "invariant"); + assert(ik != nullptr, "invariant"); assert(JdkJfrEvent::is_a(ik), "invariant"); assert(!untypedEventHandler, "invariant"); - static const Symbol* registered_symbol = NULL; - if (registered_symbol == NULL) { + static const Symbol* registered_symbol = nullptr; + if (registered_symbol == nullptr) { registered_symbol = SymbolTable::probe(registered_constant, sizeof registered_constant - 1); - if (registered_symbol == NULL) { + if (registered_symbol == nullptr) { untypedEventHandler = true; return false; } } - assert(registered_symbol != NULL, "invariant"); + assert(registered_symbol != nullptr, "invariant"); bool value = false; // to be set by annotation_value untypedEventHandler = !(annotation_value(ik, registered_symbol, value) || java_base_can_read_jdk_jfr()); return value; @@ -473,14 +473,14 @@ static bool should_register_klass(const InstanceKlass* ik, bool& untypedEventHan * Map an utf8 constant back to its CONSTANT_UTF8_INFO */ static u2 utf8_info_index(const InstanceKlass* ik, const Symbol* const target, TRAPS) { - assert(target != NULL, "invariant"); + assert(target != nullptr, "invariant"); const ConstantPool* cp = ik->constants(); const int cp_len = cp->length(); for (u2 index = 1; index < cp_len; ++index) { const constantTag tag = cp->tag_at(index); if (tag.is_utf8()) { const Symbol* const utf8_sym = cp->symbol_at(index); - assert(utf8_sym != NULL, "invariant"); + assert(utf8_sym != nullptr, "invariant"); if (utf8_sym == target) { return index; } @@ -497,7 +497,7 @@ static bool is_index_within_range(u2 index, u2 orig_cp_len, u2 new_cp_entries_le #endif static u2 add_utf8_info(JfrBigEndianWriter& writer, const char* utf8_constant, u2 orig_cp_len, u2& new_cp_entries_len) { - assert(utf8_constant != NULL, "invariant"); + assert(utf8_constant != nullptr, "invariant"); writer.write(JVM_CONSTANT_Utf8); writer.write_utf8_u2_len(utf8_constant); assert(writer.is_valid(), "invariant"); @@ -540,7 +540,7 @@ static u2 add_flr_register_method_constants(JfrBigEndianWriter& writer, u2 orig_cp_len, u2& number_of_new_constants, TRAPS) { - assert(utf8_indexes != NULL, "invariant"); + assert(utf8_indexes != nullptr, "invariant"); return add_method_ref_info(writer, utf8_indexes[UTF8_OPT_FlightRecorder], utf8_indexes[UTF8_OPT_register], @@ -573,7 +573,7 @@ static jlong add_field_info(JfrBigEndianWriter& writer, u2 name_index, u2 desc_i } static u2 add_field_infos(JfrBigEndianWriter& writer, const u2* utf8_indexes, bool untypedEventConfiguration) { - assert(utf8_indexes != NULL, "invariant"); + assert(utf8_indexes != nullptr, "invariant"); add_field_info(writer, utf8_indexes[UTF8_REQ_eventConfiguration], untypedEventConfiguration ? utf8_indexes[UTF8_OPT_LjavaLangObject] : utf8_indexes[UTF8_OPT_eventConfiguration_FIELD_DESC], @@ -648,7 +648,7 @@ static jlong add_method_info(JfrBigEndianWriter& writer, * Stream should come in at the start position. */ static u2 position_stream_after_cp(const ClassFileStream* stream) { - assert(stream != NULL, "invariant"); + assert(stream != nullptr, "invariant"); assert(stream->current_offset() == 0, "invariant"); stream->skip_u4_fast(2); // 8 bytes skipped const u2 cp_len = stream->get_u2_fast(); @@ -715,7 +715,7 @@ static u2 position_stream_after_cp(const ClassFileStream* stream) { * Stream should come in positioned just before fields_count */ static u2 position_stream_after_fields(const ClassFileStream* stream) { - assert(stream != NULL, "invariant"); + assert(stream != nullptr, "invariant"); assert(stream->current_offset() > 0, "invariant"); // fields len const u2 orig_fields_len = stream->get_u2_fast(); @@ -745,9 +745,9 @@ static u2 position_stream_after_methods(JfrBigEndianWriter& writer, bool register_klass, const Method* clinit_method, u4& orig_method_len_offset) { - assert(stream != NULL, "invariant"); + assert(stream != nullptr, "invariant"); assert(stream->current_offset() > 0, "invariant"); - assert(utf8_indexes != NULL, "invariant"); + assert(utf8_indexes != nullptr, "invariant"); // We will come back to this location when we // know how many methods there will be. writer.reserve(sizeof(u2)); @@ -766,7 +766,7 @@ static u2 position_stream_after_methods(JfrBigEndianWriter& writer, const u4 attrib_len = stream->get_u4_fast(); stream->skip_u1_fast(attrib_len); } - if (clinit_method != NULL && name_index == clinit_method->name_index()) { + if (clinit_method != nullptr && name_index == clinit_method->name_index()) { // The method just parsed is an existing method. // If the class has the @Registered(false) annotation, i.e. marking a class // for opting out from automatic registration, then we do not need to do anything. @@ -790,7 +790,7 @@ static u2 position_stream_after_methods(JfrBigEndianWriter& writer, } static u2 add_method_infos(JfrBigEndianWriter& writer, const u2* utf8_indexes) { - assert(utf8_indexes != NULL, "invariant"); + assert(utf8_indexes != nullptr, "invariant"); add_method_info(writer, utf8_indexes[UTF8_REQ_begin], utf8_indexes[UTF8_REQ_EMPTY_VOID_METHOD_DESC], @@ -838,13 +838,13 @@ static u2 add_method_infos(JfrBigEndianWriter& writer, const u2* utf8_indexes) { } static void adjust_exception_table(JfrBigEndianWriter& writer, u2 bci_adjustment_offset, const Method* method, TRAPS) { - const u2 ex_table_length = method != NULL ? (u2)method->exception_table_length() : 0; + const u2 ex_table_length = method != nullptr ? (u2)method->exception_table_length() : 0; writer.write(ex_table_length); // Exception table length if (ex_table_length > 0) { - assert(method != NULL, "invariant"); + assert(method != nullptr, "invariant"); const ExceptionTableElement* const ex_elements = method->exception_table_start(); for (int i = 0; i < ex_table_length; ++i) { - assert(ex_elements != NULL, "invariant"); + assert(ex_elements != nullptr, "invariant"); writer.write(ex_elements[i].start_pc + bci_adjustment_offset); writer.write(ex_elements[i].end_pc + bci_adjustment_offset); writer.write(ex_elements[i].handler_pc + bci_adjustment_offset); @@ -872,8 +872,8 @@ static void adjust_stack_map(JfrBigEndianWriter& writer, const u2* utf8_indexes, u2 bci_adjustment_offset, TRAPS) { - assert(stack_map != NULL, "invariant"); - assert(utf8_indexes != NULL, "invariant"); + assert(stack_map != nullptr, "invariant"); + assert(utf8_indexes != nullptr, "invariant"); writer.write(utf8_indexes[UTF8_OPT_StackMapTable]); const jlong stack_map_attrib_len_offset = writer.current_offset(); writer.reserve(sizeof(u4)); @@ -920,8 +920,8 @@ static void adjust_line_number_table(JfrBigEndianWriter& writer, u4 bci_adjustement_offset, const Method* method, TRAPS) { - assert(utf8_indexes != NULL, "invariant"); - assert(method != NULL, "invariant"); + assert(utf8_indexes != nullptr, "invariant"); + assert(method != nullptr, "invariant"); assert(method->has_linenumber_table(), "invariant"); writer.write(utf8_indexes[UTF8_OPT_LineNumberTable]); const jlong lnt_attributes_length_offset = writer.current_offset(); @@ -950,8 +950,8 @@ static u2 adjust_local_variable_table(JfrBigEndianWriter& writer, u2 bci_adjustment_offset, const Method* method, TRAPS) { - assert(utf8_indexes != NULL, "invariant"); - assert(method != NULL, "invariant"); + assert(utf8_indexes != nullptr, "invariant"); + assert(method != nullptr, "invariant"); assert(method->has_localvariable_table(), "invariant"); writer.write(utf8_indexes[UTF8_OPT_LocalVariableTable]); const jlong lvt_attributes_length_offset = writer.current_offset(); @@ -959,7 +959,7 @@ static u2 adjust_local_variable_table(JfrBigEndianWriter& writer, const int lvt_len = method->localvariable_table_length(); writer.write((u2)lvt_len); const LocalVariableTableElement* table = method->localvariable_table_start(); - assert(table != NULL, "invariant"); + assert(table != nullptr, "invariant"); u2 num_lvtt_entries = 0; for (int i = 0; i < lvt_len; ++i) { writer.write(table[i].start_bci + bci_adjustment_offset); @@ -990,7 +990,7 @@ static void adjust_local_variable_type_table(JfrBigEndianWriter& writer, writer.reserve(sizeof(u4)); writer.write(num_lvtt_entries); const LocalVariableTableElement* table = method->localvariable_table_start(); - assert(table != NULL, "invariant"); + assert(table != nullptr, "invariant"); const int lvt_len = method->localvariable_table_length(); for (int i = 0; i < lvt_len; ++i) { if (table[i].signature_cp_index > 0) { @@ -1013,23 +1013,23 @@ static void adjust_code_attributes(JfrBigEndianWriter& writer, const Method* clinit_method, TRAPS) { // "Code" attributes - assert(utf8_indexes != NULL, "invariant"); + assert(utf8_indexes != nullptr, "invariant"); const jlong code_attributes_offset = writer.current_offset(); writer.reserve(sizeof(u2)); u2 number_of_code_attributes = 0; - if (clinit_method != NULL) { + if (clinit_method != nullptr) { Array* stack_map = clinit_method->stackmap_data(); - if (stack_map != NULL) { + if (stack_map != nullptr) { ++number_of_code_attributes; adjust_stack_map(writer, stack_map, utf8_indexes, bci_adjustment_offset, THREAD); assert(writer.is_valid(), "invariant"); } - if (clinit_method != NULL && clinit_method->has_linenumber_table()) { + if (clinit_method != nullptr && clinit_method->has_linenumber_table()) { ++number_of_code_attributes; adjust_line_number_table(writer, utf8_indexes, bci_adjustment_offset, clinit_method, THREAD); assert(writer.is_valid(), "invariant"); } - if (clinit_method != NULL && clinit_method->has_localvariable_table()) { + if (clinit_method != nullptr && clinit_method->has_localvariable_table()) { ++number_of_code_attributes; const u2 num_of_lvtt_entries = adjust_local_variable_table(writer, utf8_indexes, bci_adjustment_offset, clinit_method, THREAD); assert(writer.is_valid(), "invariant"); @@ -1053,7 +1053,7 @@ static jlong insert_clinit_method(const InstanceKlass* ik, const u2 register_method_ref_index, const Method* clinit_method, TRAPS) { - assert(utf8_indexes != NULL, "invariant"); + assert(utf8_indexes != nullptr, "invariant"); // The injected code length is always this value. // This is to ensure that padding can be done // where needed and to simplify size calculations. @@ -1061,10 +1061,10 @@ static jlong insert_clinit_method(const InstanceKlass* ik, const u2 name_index = utf8_indexes[UTF8_OPT_clinit]; assert(name_index != invalid_cp_index, "invariant"); const u2 desc_index = utf8_indexes[UTF8_REQ_EMPTY_VOID_METHOD_DESC]; - const u2 max_stack = MAX2(clinit_method != NULL ? clinit_method->verifier_max_stack() : 1, 1); - const u2 max_locals = MAX2(clinit_method != NULL ? clinit_method->max_locals() : 0, 0); - const u2 orig_bytecodes_length = clinit_method != NULL ? (u2)clinit_method->code_size() : 0; - const address orig_bytecodes = clinit_method != NULL ? clinit_method->code_base() : NULL; + const u2 max_stack = MAX2(clinit_method != nullptr ? clinit_method->verifier_max_stack() : 1, 1); + const u2 max_locals = MAX2(clinit_method != nullptr ? clinit_method->max_locals() : 0, 0); + const u2 orig_bytecodes_length = clinit_method != nullptr ? (u2)clinit_method->code_size() : 0; + const address orig_bytecodes = clinit_method != nullptr ? clinit_method->code_base() : nullptr; const u2 new_code_length = injected_code_length + orig_bytecodes_length; DEBUG_ONLY(const jlong start_offset = writer.current_offset();) writer.write(JVM_ACC_STATIC); // flags @@ -1090,7 +1090,7 @@ static jlong insert_clinit_method(const InstanceKlass* ik, writer.write((u1)Bytecodes::_invokestatic); // invoke "FlightRecorder.register(Ljava/lang/Class;") writer.write(register_method_ref_index); - if (clinit_method == NULL) { + if (clinit_method == nullptr) { writer.write((u1)Bytecodes::_nop); writer.write((u1)Bytecodes::_return); } else { @@ -1118,30 +1118,30 @@ static jlong insert_clinit_method(const InstanceKlass* ik, return writer.current_offset(); } -static Symbol* begin = NULL; -static Symbol* end = NULL; -static Symbol* commit = NULL; -static Symbol* isEnabled = NULL; -static Symbol* shouldCommit = NULL; -static Symbol* void_method_sig = NULL; -static Symbol* boolean_method_sig = NULL; +static Symbol* begin = nullptr; +static Symbol* end = nullptr; +static Symbol* commit = nullptr; +static Symbol* isEnabled = nullptr; +static Symbol* shouldCommit = nullptr; +static Symbol* void_method_sig = nullptr; +static Symbol* boolean_method_sig = nullptr; static void initialize_symbols() { - if (begin == NULL) { + if (begin == nullptr) { begin = SymbolTable::probe("begin", 5); - assert(begin != NULL, "invariant"); + assert(begin != nullptr, "invariant"); end = SymbolTable::probe("end", 3); - assert(end != NULL, "invariant"); + assert(end != nullptr, "invariant"); commit = SymbolTable::probe("commit", 6); - assert(commit != NULL, "invariant"); + assert(commit != nullptr, "invariant"); isEnabled = SymbolTable::probe("isEnabled", 9); - assert(isEnabled != NULL, "invariant"); + assert(isEnabled != nullptr, "invariant"); shouldCommit = SymbolTable::probe("shouldCommit", 12); - assert(shouldCommit != NULL, "invariant"); + assert(shouldCommit != nullptr, "invariant"); void_method_sig = SymbolTable::probe("()V", 3); - assert(void_method_sig != NULL, "invariant"); + assert(void_method_sig != nullptr, "invariant"); boolean_method_sig = SymbolTable::probe("()Z", 3); - assert(boolean_method_sig != NULL, "invariant"); + assert(boolean_method_sig != nullptr, "invariant"); } } @@ -1151,14 +1151,14 @@ static ClassFileStream* schema_extend_event_klass_bytes(const InstanceKlass* ik, initialize_symbols(); static const u2 public_final_flag_mask = JVM_ACC_PUBLIC | JVM_ACC_FINAL; const ClassFileStream* const orig_stream = parser.clone_stream(); - assert(orig_stream != NULL, "invariant"); + assert(orig_stream != nullptr, "invariant"); const int orig_stream_length = orig_stream->length(); // allocate an identically sized buffer u1* const new_buffer = NEW_RESOURCE_ARRAY_IN_THREAD_RETURN_NULL(THREAD, u1, orig_stream_length); - if (new_buffer == NULL) { - return NULL; + if (new_buffer == nullptr) { + return nullptr; } - assert(new_buffer != NULL, "invariant"); + assert(new_buffer != nullptr, "invariant"); // memcpy the entire [B memcpy(new_buffer, orig_stream->buffer(), orig_stream_length); const u2 orig_cp_len = position_stream_after_cp(orig_stream); @@ -1198,7 +1198,7 @@ static ClassFileStream* schema_extend_event_klass_bytes(const InstanceKlass* ik, orig_stream->skip_u1_fast(attrib_len); } } - return new ClassFileStream(new_buffer, orig_stream_length, NULL, ClassFileStream::verify); + return new ClassFileStream(new_buffer, orig_stream_length, nullptr, ClassFileStream::verify); } // Attempt to locate an existing UTF8_INFO mapping the utf8_constant. @@ -1209,7 +1209,7 @@ static u2 find_or_add_utf8_info(JfrBigEndianWriter& writer, u2 orig_cp_len, u2& added_cp_entries, TRAPS) { - assert(utf8_constant != NULL, "invariant"); + assert(utf8_constant != nullptr, "invariant"); TempNewSymbol utf8_sym = SymbolTable::new_symbol(utf8_constant); // lookup existing const int utf8_orig_idx = utf8_info_index(ik, utf8_sym, THREAD); @@ -1240,7 +1240,7 @@ static u2 resolve_utf8_indexes(JfrBigEndianWriter& writer, bool register_klass, bool untypedEventConfiguration, TRAPS) { - assert(utf8_indexes != NULL, "invariant"); + assert(utf8_indexes != nullptr, "invariant"); u2 added_cp_entries = 0; // resolve all required symbols for (u2 index = 0; index < NOF_UTF8_REQ_SYMBOLS; ++index) { @@ -1270,21 +1270,21 @@ static u2 resolve_utf8_indexes(JfrBigEndianWriter& writer, utf8_indexes[UTF8_OPT_CLASS_VOID_METHOD_DESC] = invalid_cp_index; } - if (clinit_method != NULL && clinit_method->has_stackmap_table()) { + if (clinit_method != nullptr && clinit_method->has_stackmap_table()) { utf8_indexes[UTF8_OPT_StackMapTable] = find_or_add_utf8_info(writer, ik, utf8_constants[UTF8_OPT_StackMapTable], orig_cp_len, added_cp_entries, THREAD); } else { utf8_indexes[UTF8_OPT_StackMapTable] = invalid_cp_index; } - if (clinit_method != NULL && clinit_method->has_linenumber_table()) { + if (clinit_method != nullptr && clinit_method->has_linenumber_table()) { utf8_indexes[UTF8_OPT_LineNumberTable] = find_or_add_utf8_info(writer, ik, utf8_constants[UTF8_OPT_LineNumberTable], orig_cp_len, added_cp_entries, THREAD); } else { utf8_indexes[UTF8_OPT_LineNumberTable] = invalid_cp_index; } - if (clinit_method != NULL && clinit_method->has_localvariable_table()) { + if (clinit_method != nullptr && clinit_method->has_localvariable_table()) { utf8_indexes[UTF8_OPT_LocalVariableTable] = find_or_add_utf8_info(writer, ik, utf8_constants[UTF8_OPT_LocalVariableTable], orig_cp_len, added_cp_entries, THREAD); utf8_indexes[UTF8_OPT_LocalVariableTypeTable] = @@ -1301,7 +1301,7 @@ static u1* schema_extend_event_subklass_bytes(const InstanceKlass* ik, const ClassFileParser& parser, jint& size_of_new_bytes, TRAPS) { - assert(ik != NULL, "invariant"); + assert(ik != nullptr, "invariant"); // If the class already has a clinit method // we need to take that into account const Method* clinit_method = ik->class_initializer(); @@ -1317,12 +1317,12 @@ static u1* schema_extend_event_subklass_bytes(const InstanceKlass* ik, // to be used in building up a modified class [B. const jint new_buffer_size = extra_stream_bytes + orig_stream_size; u1* const new_buffer = NEW_RESOURCE_ARRAY_IN_THREAD_RETURN_NULL(THREAD, u1, new_buffer_size); - if (new_buffer == NULL) { + if (new_buffer == nullptr) { log_error(jfr, system) ("Thread local allocation (native) for " SIZE_FORMAT " bytes failed in JfrEventClassTransformer::on_klass_creation", static_cast(new_buffer_size)); - return NULL; + return nullptr; } - assert(new_buffer != NULL, "invariant"); + assert(new_buffer != nullptr, "invariant"); // [B wrapped in a big endian writer JfrBigEndianWriter writer(new_buffer, new_buffer_size); assert(writer.current_offset() == 0, "invariant"); @@ -1406,7 +1406,7 @@ static u1* schema_extend_event_subklass_bytes(const InstanceKlass* ik, if (register_klass) { insert_clinit_method(ik, parser, writer, orig_cp_len, utf8_indexes, flr_register_method_ref_index, clinit_method, THREAD); } - number_of_new_methods += clinit_method != NULL ? 0 : register_klass ? 1 : 0; + number_of_new_methods += clinit_method != nullptr ? 0 : register_klass ? 1 : 0; // Update classfile methods_count writer.write_at_offset(orig_methods_len + number_of_new_methods, new_method_len_offset); assert(writer.is_valid(), "invariant"); @@ -1423,18 +1423,18 @@ static bool should_force_instrumentation() { } static void log_pending_exception(oop throwable) { - assert(throwable != NULL, "invariant"); + assert(throwable != nullptr, "invariant"); oop msg = java_lang_Throwable::message(throwable); - if (msg != NULL) { + if (msg != nullptr) { char* text = java_lang_String::as_utf8_string(msg); - if (text != NULL) { + if (text != nullptr) { log_error(jfr, system) ("%s", text); } } } static bool has_pending_exception(TRAPS) { - assert(THREAD != NULL, "invariant"); + assert(THREAD != nullptr, "invariant"); if (HAS_PENDING_EXCEPTION) { log_pending_exception(PENDING_EXCEPTION); CLEAR_PENDING_EXCEPTION; @@ -1444,17 +1444,17 @@ static bool has_pending_exception(TRAPS) { } static bool has_local_method_implementation(const InstanceKlass* ik, const Symbol* name, const Symbol* signature) { - assert(ik != NULL, "invariant"); - assert(name != NULL, "invariant"); - assert(signature != NULL, "invariant"); - return NULL != ik->find_local_method(name, signature, Klass::OverpassLookupMode::skip, Klass::StaticLookupMode::find, + assert(ik != nullptr, "invariant"); + assert(name != nullptr, "invariant"); + assert(signature != nullptr, "invariant"); + return nullptr != ik->find_local_method(name, signature, Klass::OverpassLookupMode::skip, Klass::StaticLookupMode::find, Klass::PrivateLookupMode::find); } // If for a subklass, on initial class load, an implementation exist for any of the final methods declared in Event, // then constraints are considered breached. static bool invalid_preconditions_for_subklass_on_initial_load(const InstanceKlass* ik) { - assert(ik != NULL, "invariant"); + assert(ik != nullptr, "invariant"); return has_local_method_implementation(ik, begin, void_method_sig) || has_local_method_implementation(ik, end, void_method_sig) || has_local_method_implementation(ik, commit, void_method_sig) || @@ -1470,19 +1470,19 @@ static ClassFileStream* schema_extend_event_subklass_bytes(const InstanceKlass* // Remove the tag denoting this as a jdk.jfr.Event subklass. No instrumentation, hence no events can be written. // The class is allowed to load as-is, but it is classified as outside of the jfr system. JdkJfrEvent::remove(ik); - return NULL; + return nullptr; } jint size_of_new_bytes = 0; const u1* new_bytes = schema_extend_event_subklass_bytes(ik, parser, size_of_new_bytes, THREAD); - if (new_bytes == NULL) { - return NULL; + if (new_bytes == nullptr) { + return nullptr; } - assert(new_bytes != NULL, "invariant"); + assert(new_bytes != nullptr, "invariant"); assert(size_of_new_bytes > 0, "invariant"); const bool force_instrumentation = should_force_instrumentation(); if (Jfr::is_recording() || force_instrumentation) { jint size_of_instrumented_bytes = 0; - unsigned char* instrumented_bytes = NULL; + unsigned char* instrumented_bytes = nullptr; const jclass super = static_cast(JfrJavaSupport::local_jni_handle(ik->super()->java_mirror(), THREAD)); const jboolean boot_class_loader = ik->class_loader_data()->is_boot_class_loader_data(); JfrUpcalls::new_bytes_eager_instrumentation(JfrTraceId::load_raw(ik), @@ -1496,15 +1496,15 @@ static ClassFileStream* schema_extend_event_subklass_bytes(const InstanceKlass* THREAD); JfrJavaSupport::destroy_local_jni_handle(super); if (has_pending_exception(THREAD)) { - return NULL; + return nullptr; } - assert(instrumented_bytes != NULL, "invariant"); + assert(instrumented_bytes != nullptr, "invariant"); assert(size_of_instrumented_bytes > 0, "invariant"); new_bytes = instrumented_bytes; size_of_new_bytes = size_of_instrumented_bytes; is_instrumented = true; } - return new ClassFileStream(new_bytes, size_of_new_bytes, NULL, ClassFileStream::verify); + return new ClassFileStream(new_bytes, size_of_new_bytes, nullptr, ClassFileStream::verify); } static bool _force_instrumentation = false; @@ -1518,14 +1518,14 @@ bool JfrEventClassTransformer::is_force_instrumentation() { } static ClassFileStream* retransform_bytes(const Klass* existing_klass, const ClassFileParser& parser, bool& is_instrumented, TRAPS) { - assert(existing_klass != NULL, "invariant"); + assert(existing_klass != nullptr, "invariant"); assert(!is_instrumented, "invariant"); assert(JdkJfrEvent::is_a(existing_klass) || JdkJfrEvent::is_host(existing_klass), "invariant"); DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(THREAD)); jint size_of_new_bytes = 0; - unsigned char* new_bytes = NULL; + unsigned char* new_bytes = nullptr; const ClassFileStream* const stream = parser.clone_stream(); - assert(stream != NULL, "invariant"); + assert(stream != nullptr, "invariant"); const jclass clazz = static_cast(JfrJavaSupport::local_jni_handle(existing_klass->java_mirror(), THREAD)); JfrUpcalls::on_retransform(JfrTraceId::load_raw(existing_klass), clazz, @@ -1536,19 +1536,19 @@ static ClassFileStream* retransform_bytes(const Klass* existing_klass, const Cla THREAD); JfrJavaSupport::destroy_local_jni_handle(clazz); if (has_pending_exception(THREAD)) { - return NULL; + return nullptr; } - assert(new_bytes != NULL, "invariant"); + assert(new_bytes != nullptr, "invariant"); assert(size_of_new_bytes > 0, "invariant"); is_instrumented = true; - return new ClassFileStream(new_bytes, size_of_new_bytes, NULL, ClassFileStream::verify); + return new ClassFileStream(new_bytes, size_of_new_bytes, nullptr, ClassFileStream::verify); } // On initial class load. static void cache_class_file_data(InstanceKlass* new_ik, const ClassFileStream* new_stream, const JavaThread* thread) { - assert(new_ik != NULL, "invariant"); - assert(new_stream != NULL, "invariant"); - assert(thread != NULL, "invariant"); + assert(new_ik != nullptr, "invariant"); + assert(new_stream != nullptr, "invariant"); + assert(thread != nullptr, "invariant"); assert(!thread->has_pending_exception(), "invariant"); if (!JfrOptionSet::allow_retransforms()) { return; @@ -1556,7 +1556,7 @@ static void cache_class_file_data(InstanceKlass* new_ik, const ClassFileStream* const jint stream_len = new_stream->length(); JvmtiCachedClassFileData* p = (JvmtiCachedClassFileData*)NEW_C_HEAP_ARRAY_RETURN_NULL(u1, offset_of(JvmtiCachedClassFileData, data) + stream_len, mtInternal); - if (p == NULL) { + if (p == nullptr) { log_error(jfr, system)("Allocation using C_HEAP_ARRAY for " SIZE_FORMAT " bytes failed in JfrEventClassTransformer::cache_class_file_data", static_cast(offset_of(JvmtiCachedClassFileData, data) + stream_len)); return; @@ -1568,12 +1568,12 @@ static void cache_class_file_data(InstanceKlass* new_ik, const ClassFileStream* // On redefine / retransform, in case an agent modified the class, the original bytes are cached onto the scratch klass. static void transfer_cached_class_file_data(InstanceKlass* ik, InstanceKlass* new_ik, const ClassFileParser& parser, JavaThread* thread) { - assert(ik != NULL, "invariant"); - assert(new_ik != NULL, "invariant"); + assert(ik != nullptr, "invariant"); + assert(new_ik != nullptr, "invariant"); JvmtiCachedClassFileData* const p = ik->get_cached_class_file(); - if (p != NULL) { + if (p != nullptr) { new_ik->set_cached_class_file(p); - ik->set_cached_class_file(NULL); + ik->set_cached_class_file(nullptr); return; } // No cached classfile indicates that no agent modified the klass. @@ -1583,9 +1583,9 @@ static void transfer_cached_class_file_data(InstanceKlass* ik, InstanceKlass* ne } static void rewrite_klass_pointer(InstanceKlass*& ik, InstanceKlass* new_ik, ClassFileParser& parser, const JavaThread* thread) { - assert(ik != NULL, "invariant"); - assert(new_ik != NULL, "invariant"); - assert(thread != NULL, "invariant"); + assert(ik != nullptr, "invariant"); + assert(new_ik != nullptr, "invariant"); + assert(thread != nullptr, "invariant"); assert(IS_EVENT_OR_HOST_KLASS(new_ik), "invariant"); assert(TRACE_ID(ik) == TRACE_ID(new_ik), "invariant"); assert(!thread->has_pending_exception(), "invariant"); @@ -1597,14 +1597,14 @@ static void rewrite_klass_pointer(InstanceKlass*& ik, InstanceKlass* new_ik, Cla // If code size is 1, it is 0xb1, i.e. the return instruction. static inline bool is_commit_method_instrumented(const Method* m) { - assert(m != NULL, "invariant"); + assert(m != nullptr, "invariant"); assert(m->name() == commit, "invariant"); assert(m->constMethod()->code_size() > 0, "invariant"); return m->constMethod()->code_size() > 1; } static bool bless_static_commit_method(const Array* methods) { - assert(methods != NULL, "invariant"); + assert(methods != nullptr, "invariant"); for (int i = 0; i < methods->length(); ++i) { const Method* const m = methods->at(i); // Method is of the form "static void UserEvent::commit(...)" and instrumented @@ -1617,7 +1617,7 @@ static bool bless_static_commit_method(const Array* methods) { } static void bless_instance_commit_method(const Array* methods) { - assert(methods != NULL, "invariant"); + assert(methods != nullptr, "invariant"); for (int i = 0; i < methods->length(); ++i) { const Method* const m = methods->at(i); // Method is of the form "void UserEvent:commit()" and instrumented @@ -1634,10 +1634,10 @@ static void bless_instance_commit_method(const Array* methods) { // It is primarily the class file schema extended instance 'commit()V' method. // Jdk events can also define a static commit method with an arbitrary signature. static void bless_commit_method(const InstanceKlass* new_ik) { - assert(new_ik != NULL, "invariant"); + assert(new_ik != nullptr, "invariant"); assert(JdkJfrEvent::is_subklass(new_ik), "invariant"); const Array* const methods = new_ik->methods(); - if (new_ik->class_loader() == NULL) { + if (new_ik->class_loader() == nullptr) { // JDK events are allowed an additional commit method that is static. // Search precedence must therefore inspect static methods first. if (bless_static_commit_method(methods)) { @@ -1648,45 +1648,45 @@ static void bless_commit_method(const InstanceKlass* new_ik) { } static void copy_traceid(const InstanceKlass* ik, const InstanceKlass* new_ik) { - assert(ik != NULL, "invariant"); - assert(new_ik != NULL, "invariant"); + assert(ik != nullptr, "invariant"); + assert(new_ik != nullptr, "invariant"); new_ik->set_trace_id(ik->trace_id()); assert(TRACE_ID(ik) == TRACE_ID(new_ik), "invariant"); } static const Klass* klass_being_redefined(const InstanceKlass* ik, JvmtiThreadState* state) { - assert(ik != NULL, "invariant"); - assert(state != NULL, "invariant"); + assert(ik != nullptr, "invariant"); + assert(state != nullptr, "invariant"); const GrowableArray* const redef_klasses = state->get_classes_being_redefined(); - if (redef_klasses == NULL || redef_klasses->is_empty()) { - return NULL; + if (redef_klasses == nullptr || redef_klasses->is_empty()) { + return nullptr; } for (int i = 0; i < redef_klasses->length(); ++i) { const Klass* const existing_klass = redef_klasses->at(i); - assert(existing_klass != NULL, "invariant"); + assert(existing_klass != nullptr, "invariant"); if (ik->name() == existing_klass->name() && ik->class_loader_data() == existing_klass->class_loader_data()) { // 'ik' is a scratch klass. Return the klass being redefined. return existing_klass; } } - return NULL; + return nullptr; } // Redefining / retransforming? static const Klass* find_existing_klass(const InstanceKlass* ik, JavaThread* thread) { - assert(ik != NULL, "invariant"); - assert(thread != NULL, "invariant"); + assert(ik != nullptr, "invariant"); + assert(thread != nullptr, "invariant"); JvmtiThreadState* const state = thread->jvmti_thread_state(); - return state != NULL ? klass_being_redefined(ik, state) : NULL; + return state != nullptr ? klass_being_redefined(ik, state) : nullptr; } static InstanceKlass* create_new_instance_klass(InstanceKlass* ik, ClassFileStream* stream, TRAPS) { - assert(stream != NULL, "invariant"); + assert(stream != nullptr, "invariant"); ResourceMark rm(THREAD); ClassLoaderData* const cld = ik->class_loader_data(); Handle pd(THREAD, ik->protection_domain()); Symbol* const class_name = ik->name(); - const char* const klass_name = class_name != NULL ? class_name->as_C_string() : ""; + const char* const klass_name = class_name != nullptr ? class_name->as_C_string() : ""; ClassLoadInfo cl_info(pd); ClassFileParser new_parser(stream, class_name, @@ -1697,30 +1697,30 @@ static InstanceKlass* create_new_instance_klass(InstanceKlass* ik, ClassFileStre if (HAS_PENDING_EXCEPTION) { log_pending_exception(PENDING_EXCEPTION); CLEAR_PENDING_EXCEPTION; - return NULL; + return nullptr; } const ClassInstanceInfo* cl_inst_info = cl_info.class_hidden_info_ptr(); InstanceKlass* const new_ik = new_parser.create_instance_klass(false, *cl_inst_info, THREAD); if (HAS_PENDING_EXCEPTION) { log_pending_exception(PENDING_EXCEPTION); CLEAR_PENDING_EXCEPTION; - return NULL; + return nullptr; } - assert(new_ik != NULL, "invariant"); - assert(new_ik->name() != NULL, "invariant"); + assert(new_ik != nullptr, "invariant"); + assert(new_ik->name() != nullptr, "invariant"); assert(strncmp(ik->name()->as_C_string(), new_ik->name()->as_C_string(), strlen(ik->name()->as_C_string())) == 0, "invariant"); return new_ik; } static InstanceKlass* create_instance_klass(InstanceKlass*& ik, ClassFileStream* stream, bool is_initial_load, JavaThread* thread) { - if (stream == NULL) { + if (stream == nullptr) { if (is_initial_load) { log_error(jfr, system)("JfrEventClassTransformer: unable to create ClassFileStream for %s", ik->external_name()); } - return NULL; + return nullptr; } InstanceKlass* const new_ik = create_new_instance_klass(ik, stream, thread); - if (new_ik == NULL) { + if (new_ik == nullptr) { if (is_initial_load) { log_error(jfr, system)("JfrEventClassTransformer: unable to create InstanceKlass for %s", ik->external_name()); } @@ -1731,20 +1731,20 @@ static InstanceKlass* create_instance_klass(InstanceKlass*& ik, ClassFileStream* static void transform(InstanceKlass*& ik, ClassFileParser& parser, JavaThread* thread) { assert(IS_EVENT_OR_HOST_KLASS(ik), "invariant"); bool is_instrumented = false; - ClassFileStream* stream = NULL; + ClassFileStream* stream = nullptr; const Klass* const existing_klass = find_existing_klass(ik, thread); - if (existing_klass != NULL) { + if (existing_klass != nullptr) { // There is already a klass defined, implying we are redefining / retransforming. stream = retransform_bytes(existing_klass, parser, is_instrumented, thread); } else { // No existing klass, implying this is the initial load. stream = JdkJfrEvent::is(ik) ? schema_extend_event_klass_bytes(ik, parser, thread) : schema_extend_event_subklass_bytes(ik, parser, is_instrumented, thread); } - InstanceKlass* const new_ik = create_instance_klass(ik, stream, existing_klass == NULL, thread); - if (new_ik == NULL) { + InstanceKlass* const new_ik = create_instance_klass(ik, stream, existing_klass == nullptr, thread); + if (new_ik == nullptr) { return; } - if (existing_klass != NULL) { + if (existing_klass != nullptr) { transfer_cached_class_file_data(ik, new_ik, parser, thread); } else { cache_class_file_data(new_ik, stream, thread); @@ -1762,7 +1762,7 @@ static void transform(InstanceKlass*& ik, ClassFileParser& parser, JavaThread* t // instance of the passed in InstanceKlass. The original 'ik' will be set onto the passed parser, // for destruction when the parser goes out of scope. void JfrEventClassTransformer::on_klass_creation(InstanceKlass*& ik, ClassFileParser& parser, TRAPS) { - assert(ik != NULL, "invariant"); + assert(ik != nullptr, "invariant"); assert(IS_EVENT_OR_HOST_KLASS(ik), "invariant"); if (ik->is_abstract() && !JdkJfrEvent::is(ik)) { assert(JdkJfrEvent::is_subklass(ik), "invariant"); @@ -1775,7 +1775,7 @@ void JfrEventClassTransformer::on_klass_creation(InstanceKlass*& ik, ClassFilePa } static bool is_static_commit_method_blessed(const Array* methods) { - assert(methods != NULL, "invariant"); + assert(methods != nullptr, "invariant"); for (int i = 0; i < methods->length(); ++i) { const Method* const m = methods->at(i); // Must be of form: static void UserEvent::commit(...) @@ -1787,7 +1787,7 @@ static bool is_static_commit_method_blessed(const Array* methods) { } static bool is_instance_commit_method_blessed(const Array* methods) { - assert(methods != NULL, "invariant"); + assert(methods != nullptr, "invariant"); for (int i = 0; i < methods->length(); ++i) { const Method* const m = methods->at(i); // Must be of form: void UserEvent::commit() @@ -1799,10 +1799,10 @@ static bool is_instance_commit_method_blessed(const Array* methods) { } bool JfrEventClassTransformer::is_instrumented(const InstanceKlass* ik) { - assert(ik != NULL, "invariant"); + assert(ik != nullptr, "invariant"); assert(JdkJfrEvent::is_subklass(ik), "invariant"); const Array* const methods = ik->methods(); - if (ik->class_loader() == NULL) { + if (ik->class_loader() == nullptr) { // JDK events are allowed an additional commit method that is static. // Search precedence must therefore inspect static methods first. if (is_static_commit_method_blessed(methods)) { diff --git a/src/hotspot/share/jfr/instrumentation/jfrJvmtiAgent.cpp b/src/hotspot/share/jfr/instrumentation/jfrJvmtiAgent.cpp index 665dc8d7e676e..f2007231c2240 100644 --- a/src/hotspot/share/jfr/instrumentation/jfrJvmtiAgent.cpp +++ b/src/hotspot/share/jfr/instrumentation/jfrJvmtiAgent.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -40,17 +40,17 @@ #include "utilities/exceptions.hpp" static const size_t ERROR_MSG_BUFFER_SIZE = 256; -static JfrJvmtiAgent* agent = NULL; -static jvmtiEnv* jfr_jvmti_env = NULL; +static JfrJvmtiAgent* agent = nullptr; +static jvmtiEnv* jfr_jvmti_env = nullptr; static void check_jvmti_error(jvmtiEnv* jvmti, jvmtiError errnum, const char* str) { if (errnum != JVMTI_ERROR_NONE) { - char* errnum_str = NULL; + char* errnum_str = nullptr; jvmti->GetErrorName(errnum, &errnum_str); log_error(jfr, system)("ERROR: JfrJvmtiAgent: " INT32_FORMAT " (%s): %s\n", errnum, - NULL == errnum_str ? "Unknown" : errnum_str, - NULL == str ? "" : str); + nullptr == errnum_str ? "Unknown" : errnum_str, + nullptr == str ? "" : str); } } @@ -58,14 +58,14 @@ static bool set_event_notification_mode(jvmtiEventMode mode, jvmtiEvent event, jthread event_thread, ...) { - assert(jfr_jvmti_env != NULL, "invariant"); + assert(jfr_jvmti_env != nullptr, "invariant"); const jvmtiError jvmti_ret_code = jfr_jvmti_env->SetEventNotificationMode(mode, event, event_thread); check_jvmti_error(jfr_jvmti_env, jvmti_ret_code, "SetEventNotificationMode"); return jvmti_ret_code == JVMTI_ERROR_NONE; } static bool update_class_file_load_hook_event(jvmtiEventMode mode) { - return set_event_notification_mode(mode, JVMTI_EVENT_CLASS_FILE_LOAD_HOOK, NULL); + return set_event_notification_mode(mode, JVMTI_EVENT_CLASS_FILE_LOAD_HOOK, nullptr); } // jvmti event callbacks require C linkage @@ -79,7 +79,7 @@ extern "C" void JNICALL jfr_on_class_file_load_hook(jvmtiEnv *jvmti_env, const unsigned char* class_data, jint* new_class_data_len, unsigned char** new_class_data) { - if (class_being_redefined == NULL) { + if (class_being_redefined == nullptr) { return; } JavaThread* jt = JavaThread::thread_from_jni_environment(jni_env); @@ -100,7 +100,7 @@ static jclass* create_classes_array(jint classes_count, TRAPS) { DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_native(THREAD)); ThreadInVMfromNative tvmfn(THREAD); jclass* const classes = NEW_RESOURCE_ARRAY_IN_THREAD_RETURN_NULL(THREAD, jclass, classes_count); - if (NULL == classes) { + if (nullptr == classes) { char error_buffer[ERROR_MSG_BUFFER_SIZE]; jio_snprintf(error_buffer, ERROR_MSG_BUFFER_SIZE, "Thread local allocation (native) of " SIZE_FORMAT " bytes failed " @@ -119,7 +119,7 @@ static void log_and_throw(jvmtiError error, TRAPS) { const char base_error_msg[] = "JfrJvmtiAgent::retransformClasses failed: "; size_t length = sizeof base_error_msg; // includes terminating null const char* const jvmti_error_name = JvmtiUtil::error_name(error); - assert(jvmti_error_name != NULL, "invariant"); + assert(jvmti_error_name != nullptr, "invariant"); length += strlen(jvmti_error_name); char* error_msg = NEW_RESOURCE_ARRAY(char, length); jio_snprintf(error_msg, length, "%s%s", base_error_msg, jvmti_error_name); @@ -132,7 +132,7 @@ static void log_and_throw(jvmtiError error, TRAPS) { } static void check_exception_and_log(JNIEnv* env, TRAPS) { - assert(env != NULL, "invariant"); + assert(env != nullptr, "invariant"); if (env->ExceptionOccurred()) { // array index out of bound DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_native(THREAD)); @@ -147,8 +147,8 @@ static bool is_valid_jvmti_phase() { } void JfrJvmtiAgent::retransform_classes(JNIEnv* env, jobjectArray classes_array, TRAPS) { - assert(env != NULL, "invariant"); - assert(classes_array != NULL, "invariant"); + assert(env != nullptr, "invariant"); + assert(classes_array != nullptr, "invariant"); assert(is_valid_jvmti_phase(), "invariant"); DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_native(THREAD)); const jint classes_count = env->GetArrayLength(classes_array); @@ -157,7 +157,7 @@ void JfrJvmtiAgent::retransform_classes(JNIEnv* env, jobjectArray classes_array, } ResourceMark rm(THREAD); jclass* const classes = create_classes_array(classes_count, CHECK); - assert(classes != NULL, "invariant"); + assert(classes != nullptr, "invariant"); for (jint i = 0; i < classes_count; i++) { jclass clz = (jclass)env->GetObjectArrayElement(classes_array, i); check_exception_and_log(env, THREAD); @@ -182,7 +182,7 @@ void JfrJvmtiAgent::retransform_classes(JNIEnv* env, jobjectArray classes_array, } static bool register_callbacks(JavaThread* jt) { - assert(jfr_jvmti_env != NULL, "invariant"); + assert(jfr_jvmti_env != nullptr, "invariant"); DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_native(jt)); jvmtiEventCallbacks callbacks; /* Set callbacks */ @@ -193,7 +193,7 @@ static bool register_callbacks(JavaThread* jt) { } static bool register_capabilities(JavaThread* jt) { - assert(jfr_jvmti_env != NULL, "invariant"); + assert(jfr_jvmti_env != nullptr, "invariant"); DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_native(jt)); jvmtiCapabilities capabilities; /* Add JVMTI capabilities */ @@ -206,7 +206,7 @@ static bool register_capabilities(JavaThread* jt) { } static jint create_jvmti_env(JavaThread* jt) { - assert(jfr_jvmti_env == NULL, "invariant"); + assert(jfr_jvmti_env == nullptr, "invariant"); DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_native(jt)); extern struct JavaVM_ main_vm; JavaVM* vm = &main_vm; @@ -214,7 +214,7 @@ static jint create_jvmti_env(JavaThread* jt) { } static bool unregister_callbacks(JavaThread* jt) { - assert(jfr_jvmti_env != NULL, "invariant"); + assert(jfr_jvmti_env != nullptr, "invariant"); jvmtiEventCallbacks callbacks; /* Set empty callbacks */ memset(&callbacks, 0, sizeof(callbacks)); @@ -228,24 +228,24 @@ JfrJvmtiAgent::JfrJvmtiAgent() {} JfrJvmtiAgent::~JfrJvmtiAgent() { JavaThread* jt = JavaThread::current(); DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(jt)); - if (jfr_jvmti_env != NULL) { + if (jfr_jvmti_env != nullptr) { ThreadToNativeFromVM transition(jt); update_class_file_load_hook_event(JVMTI_DISABLE); unregister_callbacks(jt); jfr_jvmti_env->DisposeEnvironment(); - jfr_jvmti_env = NULL; + jfr_jvmti_env = nullptr; } } static bool initialize(JavaThread* jt) { - assert(jt != NULL, "invariant"); + assert(jt != nullptr, "invariant"); DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(jt)); ThreadToNativeFromVM transition(jt); if (create_jvmti_env(jt) != JNI_OK) { - assert(jfr_jvmti_env == NULL, "invariant"); + assert(jfr_jvmti_env == nullptr, "invariant"); return false; } - assert(jfr_jvmti_env != NULL, "invariant"); + assert(jfr_jvmti_env != nullptr, "invariant"); if (!register_capabilities(jt)) { return false; } @@ -265,27 +265,27 @@ static void log_and_throw_illegal_state_exception(TRAPS) { } bool JfrJvmtiAgent::create() { - assert(agent == NULL, "invariant"); + assert(agent == nullptr, "invariant"); JavaThread* const jt = JavaThread::current(); if (!is_valid_jvmti_phase()) { log_and_throw_illegal_state_exception(jt); return false; } agent = new JfrJvmtiAgent(); - if (agent == NULL) { + if (agent == nullptr) { return false; } if (!initialize(jt)) { delete agent; - agent = NULL; + agent = nullptr; return false; } return true; } void JfrJvmtiAgent::destroy() { - if (agent != NULL) { + if (agent != nullptr) { delete agent; - agent = NULL; + agent = nullptr; } } diff --git a/src/hotspot/share/jfr/jni/jfrJavaCall.cpp b/src/hotspot/share/jfr/jni/jfrJavaCall.cpp index 6da6eab26da21..9eaf506db7309 100644 --- a/src/hotspot/share/jfr/jni/jfrJavaCall.cpp +++ b/src/hotspot/share/jfr/jni/jfrJavaCall.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -39,12 +39,12 @@ static bool is_large_value(const JavaValue& value) { #endif // ASSERT static Symbol* resolve(const char* str) { - assert(str != NULL, "invariant"); + assert(str != nullptr, "invariant"); return SymbolTable::new_symbol(str); } static Klass* resolve(Symbol* k_sym, TRAPS) { - assert(k_sym != NULL, "invariant"); + assert(k_sym != nullptr, "invariant"); return SystemDictionary::resolve_or_fail(k_sym, true, THREAD); } @@ -54,7 +54,6 @@ JfrJavaArguments::Parameters::Parameters() : _storage_index(0), _java_stack_slot } void JfrJavaArguments::Parameters::push(const JavaValue& value) { - assert(_storage != NULL, "invariant"); assert(!is_large_value(value), "invariant"); assert(_storage_index < SIZE, "invariant"); _storage[_storage_index++] = value; @@ -62,7 +61,6 @@ void JfrJavaArguments::Parameters::push(const JavaValue& value) { } void JfrJavaArguments::Parameters::push_large(const JavaValue& value) { - assert(_storage != NULL, "invariant"); assert(is_large_value(value), "invariant"); assert(_storage_index < SIZE, "invariant"); _storage[_storage_index++] = value; @@ -70,8 +68,7 @@ void JfrJavaArguments::Parameters::push_large(const JavaValue& value) { } void JfrJavaArguments::Parameters::set_receiver(const oop receiver) { - assert(_storage != NULL, "invariant"); - assert(receiver != NULL, "invariant"); + assert(receiver != nullptr, "invariant"); JavaValue value(T_OBJECT); value.set_oop(receiver); _storage[0] = value; @@ -88,7 +85,6 @@ oop JfrJavaArguments::Parameters::receiver() const { } bool JfrJavaArguments::Parameters::has_receiver() const { - assert(_storage != NULL, "invariant"); assert(_storage_index >= 1, "invariant"); assert(_java_stack_slots >= 1, "invariant"); return _storage[0].get_type() == T_OBJECT; @@ -184,92 +180,92 @@ void JfrJavaArguments::Parameters::copy(JavaCallArguments& args, TRAPS) const { } } -JfrJavaArguments::JfrJavaArguments(JavaValue* result) : _result(result), _klass(NULL), _name(NULL), _signature(NULL), _array_length(-1) { - assert(result != NULL, "invariant"); +JfrJavaArguments::JfrJavaArguments(JavaValue* result) : _result(result), _klass(nullptr), _name(nullptr), _signature(nullptr), _array_length(-1) { + assert(result != nullptr, "invariant"); } JfrJavaArguments::JfrJavaArguments(JavaValue* result, const char* klass_name, const char* name, const char* signature, TRAPS) : _result(result), - _klass(NULL), - _name(NULL), - _signature(NULL), + _klass(nullptr), + _name(nullptr), + _signature(nullptr), _array_length(-1) { - assert(result != NULL, "invariant"); - if (klass_name != NULL) { + assert(result != nullptr, "invariant"); + if (klass_name != nullptr) { set_klass(klass_name, CHECK); } - if (name != NULL) { + if (name != nullptr) { set_name(name); } - if (signature != NULL) { + if (signature != nullptr) { set_signature(signature); } } JfrJavaArguments::JfrJavaArguments(JavaValue* result, const Klass* klass, const Symbol* name, const Symbol* signature) : _result(result), - _klass(NULL), - _name(NULL), - _signature(NULL), + _klass(nullptr), + _name(nullptr), + _signature(nullptr), _array_length(-1) { - assert(result != NULL, "invariant"); - if (klass != NULL) { + assert(result != nullptr, "invariant"); + if (klass != nullptr) { set_klass(klass); } - if (name != NULL) { + if (name != nullptr) { set_name(name); } - if (signature != NULL) { + if (signature != nullptr) { set_signature(signature); } } Klass* JfrJavaArguments::klass() const { - assert(_klass != NULL, "invariant"); + assert(_klass != nullptr, "invariant"); return const_cast(_klass); } void JfrJavaArguments::set_klass(const char* klass_name, TRAPS) { - assert(klass_name != NULL, "invariant"); + assert(klass_name != nullptr, "invariant"); Symbol* const k_sym = resolve(klass_name); - assert(k_sym != NULL, "invariant"); + assert(k_sym != nullptr, "invariant"); const Klass* const klass = resolve(k_sym, CHECK); set_klass(klass); } void JfrJavaArguments::set_klass(const Klass* klass) { - assert(klass != NULL, "invariant"); + assert(klass != nullptr, "invariant"); _klass = klass; } Symbol* JfrJavaArguments::name() const { - assert(_name != NULL, "invariant"); + assert(_name != nullptr, "invariant"); return const_cast(_name); } void JfrJavaArguments::set_name(const char* name) { - assert(name != NULL, "invariant"); + assert(name != nullptr, "invariant"); const Symbol* const sym = resolve(name); set_name(sym); } void JfrJavaArguments::set_name(const Symbol* name) { - assert(name != NULL, "invariant"); + assert(name != nullptr, "invariant"); _name = name; } Symbol* JfrJavaArguments::signature() const { - assert(_signature != NULL, "invariant"); + assert(_signature != nullptr, "invariant"); return const_cast(_signature); } void JfrJavaArguments::set_signature(const char* signature) { - assert(signature != NULL, "invariant"); + assert(signature != nullptr, "invariant"); const Symbol* const sym = resolve(signature); set_signature(sym); } void JfrJavaArguments::set_signature(const Symbol* signature) { - assert(signature != NULL, "invariant"); + assert(signature != nullptr, "invariant"); _signature = signature; } @@ -283,7 +279,7 @@ void JfrJavaArguments::set_array_length(int length) { } JavaValue* JfrJavaArguments::result() const { - assert(_result != NULL, "invariant"); + assert(_result != nullptr, "invariant"); return const_cast(_result); } @@ -348,7 +344,7 @@ void JfrJavaArguments::copy(JavaCallArguments& args, TRAPS) { } void JfrJavaCall::call_static(JfrJavaArguments* args, TRAPS) { - assert(args != NULL, "invariant"); + assert(args != nullptr, "invariant"); DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(THREAD)); ResourceMark rm(THREAD); HandleMark hm(THREAD); @@ -358,7 +354,7 @@ void JfrJavaCall::call_static(JfrJavaArguments* args, TRAPS) { } void JfrJavaCall::call_special(JfrJavaArguments* args, TRAPS) { - assert(args != NULL, "invariant"); + assert(args != nullptr, "invariant"); assert(args->has_receiver(), "invariant"); DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(THREAD)); ResourceMark rm(THREAD); @@ -369,7 +365,7 @@ void JfrJavaCall::call_special(JfrJavaArguments* args, TRAPS) { } void JfrJavaCall::call_virtual(JfrJavaArguments* args, TRAPS) { - assert(args != NULL, "invariant"); + assert(args != nullptr, "invariant"); assert(args->has_receiver(), "invariant"); DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(THREAD)); ResourceMark rm(THREAD); diff --git a/src/hotspot/share/jfr/jni/jfrJavaSupport.cpp b/src/hotspot/share/jfr/jni/jfrJavaSupport.cpp index 4595484222355..0f147f137dc40 100644 --- a/src/hotspot/share/jfr/jni/jfrJavaSupport.cpp +++ b/src/hotspot/share/jfr/jni/jfrJavaSupport.cpp @@ -56,7 +56,7 @@ #ifdef ASSERT static void check_java_thread_state(JavaThread* t, JavaThreadState state) { - assert(t != NULL, "invariant"); + assert(t != nullptr, "invariant"); assert(t->is_Java_thread(), "invariant"); assert(t->thread_state() == state, "invariant"); } @@ -86,7 +86,7 @@ jobject JfrJavaSupport::local_jni_handle(const oop obj, JavaThread* t) { jobject JfrJavaSupport::local_jni_handle(const jobject handle, JavaThread* t) { DEBUG_ONLY(check_java_thread_in_vm(t)); const oop obj = JNIHandles::resolve(handle); - return obj == NULL ? NULL : local_jni_handle(obj, t); + return obj == nullptr ? nullptr : local_jni_handle(obj, t); } void JfrJavaSupport::destroy_local_jni_handle(jobject handle) { @@ -101,7 +101,7 @@ jobject JfrJavaSupport::global_jni_handle(const oop obj, JavaThread* t) { jobject JfrJavaSupport::global_jni_handle(const jobject handle, JavaThread* t) { const oop obj = JNIHandles::resolve(handle); - return obj == NULL ? NULL : global_jni_handle(obj, t); + return obj == nullptr ? nullptr : global_jni_handle(obj, t); } void JfrJavaSupport::destroy_global_jni_handle(jobject handle) { @@ -116,7 +116,7 @@ jweak JfrJavaSupport::global_weak_jni_handle(const oop obj, JavaThread* t) { jweak JfrJavaSupport::global_weak_jni_handle(const jobject handle, JavaThread* t) { const oop obj = JNIHandles::resolve(handle); - return obj == NULL ? NULL : global_weak_jni_handle(obj, t); + return obj == nullptr ? nullptr : global_weak_jni_handle(obj, t); } void JfrJavaSupport::destroy_global_weak_jni_handle(jweak handle) { @@ -147,7 +147,7 @@ void JfrJavaSupport::call_virtual(JfrJavaArguments* args, TRAPS) { } void JfrJavaSupport::notify_all(jobject object, TRAPS) { - assert(object != NULL, "invariant"); + assert(object != nullptr, "invariant"); DEBUG_ONLY(check_java_thread_in_vm(THREAD)); HandleMark hm(THREAD); Handle h_obj(THREAD, resolve_non_null(object)); @@ -162,9 +162,9 @@ void JfrJavaSupport::notify_all(jobject object, TRAPS) { * Object construction */ static void object_construction(JfrJavaArguments* args, JavaValue* result, InstanceKlass* klass, TRAPS) { - assert(args != NULL, "invariant"); - assert(result != NULL, "invariant"); - assert(klass != NULL, "invariant"); + assert(args != nullptr, "invariant"); + assert(result != nullptr, "invariant"); + assert(klass != nullptr, "invariant"); assert(klass->is_initialized(), "invariant"); HandleMark hm(THREAD); @@ -179,9 +179,9 @@ static void object_construction(JfrJavaArguments* args, JavaValue* result, Insta } static void array_construction(JfrJavaArguments* args, JavaValue* result, InstanceKlass* klass, int array_length, TRAPS) { - assert(args != NULL, "invariant"); - assert(result != NULL, "invariant"); - assert(klass != NULL, "invariant"); + assert(args != nullptr, "invariant"); + assert(result != nullptr, "invariant"); + assert(klass != nullptr, "invariant"); assert(klass->is_initialized(), "invariant"); Klass* const ak = klass->array_klass(THREAD); @@ -192,8 +192,8 @@ static void array_construction(JfrJavaArguments* args, JavaValue* result, Instan } static void create_object(JfrJavaArguments* args, JavaValue* result, TRAPS) { - assert(args != NULL, "invariant"); - assert(result != NULL, "invariant"); + assert(args != nullptr, "invariant"); + assert(result != nullptr, "invariant"); assert(result->get_type() == T_OBJECT, "invariant"); DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(THREAD)); @@ -210,10 +210,10 @@ static void create_object(JfrJavaArguments* args, JavaValue* result, TRAPS) { } static void handle_result(JavaValue* result, bool global_ref, JavaThread* t) { - assert(result != NULL, "invariant"); + assert(result != nullptr, "invariant"); DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(t)); const oop result_oop = result->get_oop(); - if (result_oop == NULL) { + if (result_oop == nullptr) { return; } result->set_jobject(global_ref ? @@ -222,31 +222,31 @@ static void handle_result(JavaValue* result, bool global_ref, JavaThread* t) { } void JfrJavaSupport::new_object(JfrJavaArguments* args, TRAPS) { - assert(args != NULL, "invariant"); + assert(args != nullptr, "invariant"); DEBUG_ONLY(check_java_thread_in_vm(THREAD)); create_object(args, args->result(), THREAD); } void JfrJavaSupport::new_object_local_ref(JfrJavaArguments* args, TRAPS) { - assert(args != NULL, "invariant"); + assert(args != nullptr, "invariant"); DEBUG_ONLY(check_java_thread_in_vm(THREAD)); JavaValue* const result = args->result(); - assert(result != NULL, "invariant"); + assert(result != nullptr, "invariant"); create_object(args, result, CHECK); handle_result(result, false, THREAD); } void JfrJavaSupport::new_object_global_ref(JfrJavaArguments* args, TRAPS) { - assert(args != NULL, "invariant"); + assert(args != nullptr, "invariant"); DEBUG_ONLY(check_java_thread_in_vm(THREAD)); JavaValue* const result = args->result(); - assert(result != NULL, "invariant"); + assert(result != nullptr, "invariant"); create_object(args, result, CHECK); handle_result(result, true, THREAD); } jstring JfrJavaSupport::new_string(const char* c_str, TRAPS) { - assert(c_str != NULL, "invariant"); + assert(c_str != nullptr, "invariant"); DEBUG_ONLY(check_java_thread_in_vm(THREAD)); const oop result = java_lang_String::create_oop_from_str(c_str, THREAD); return (jstring)local_jni_handle(result, THREAD); @@ -289,7 +289,7 @@ jobject JfrJavaSupport::new_java_lang_Long(jlong value, TRAPS) { } void JfrJavaSupport::set_array_element(jobjectArray arr, jobject element, int index, JavaThread* t) { - assert(arr != NULL, "invariant"); + assert(arr != nullptr, "invariant"); DEBUG_ONLY(check_java_thread_in_vm(t)); HandleMark hm(t); objArrayHandle a(t, (objArrayOop)resolve_non_null(arr)); @@ -301,38 +301,38 @@ void JfrJavaSupport::set_array_element(jobjectArray arr, jobject element, int in */ static void write_int_field(const Handle& h_oop, fieldDescriptor* fd, jint value) { assert(h_oop.not_null(), "invariant"); - assert(fd != NULL, "invariant"); + assert(fd != nullptr, "invariant"); h_oop->int_field_put(fd->offset(), value); } static void write_float_field(const Handle& h_oop, fieldDescriptor* fd, jfloat value) { assert(h_oop.not_null(), "invariant"); - assert(fd != NULL, "invariant"); + assert(fd != nullptr, "invariant"); h_oop->float_field_put(fd->offset(), value); } static void write_double_field(const Handle& h_oop, fieldDescriptor* fd, jdouble value) { assert(h_oop.not_null(), "invariant"); - assert(fd != NULL, "invariant"); + assert(fd != nullptr, "invariant"); h_oop->double_field_put(fd->offset(), value); } static void write_long_field(const Handle& h_oop, fieldDescriptor* fd, jlong value) { assert(h_oop.not_null(), "invariant"); - assert(fd != NULL, "invariant"); + assert(fd != nullptr, "invariant"); h_oop->long_field_put(fd->offset(), value); } static void write_oop_field(const Handle& h_oop, fieldDescriptor* fd, const oop value) { assert(h_oop.not_null(), "invariant"); - assert(fd != NULL, "invariant"); + assert(fd != nullptr, "invariant"); h_oop->obj_field_put(fd->offset(), value); } static void write_specialized_field(JfrJavaArguments* args, const Handle& h_oop, fieldDescriptor* fd, bool static_field) { - assert(args != NULL, "invariant"); + assert(args != nullptr, "invariant"); assert(h_oop.not_null(), "invariant"); - assert(fd != NULL, "invariant"); + assert(fd != nullptr, "invariant"); assert(fd->offset() > 0, "invariant"); assert(args->length() >= 1, "invariant"); @@ -367,9 +367,9 @@ static void write_specialized_field(JfrJavaArguments* args, const Handle& h_oop, } static void read_specialized_field(JavaValue* result, const Handle& h_oop, fieldDescriptor* fd) { - assert(result != NULL, "invariant"); + assert(result != nullptr, "invariant"); assert(h_oop.not_null(), "invariant"); - assert(fd != NULL, "invariant"); + assert(fd != nullptr, "invariant"); assert(fd->offset() > 0, "invariant"); switch(fd->field_type()) { @@ -402,18 +402,18 @@ static bool find_field(const InstanceKlass* ik, fieldDescriptor* fd, bool is_static = false, bool allow_super = false) { - assert(ik != NULL, "invariant"); + assert(ik != nullptr, "invariant"); if (allow_super || is_static) { - return ik->find_field(name_symbol, signature_symbol, is_static, fd) != NULL; + return ik->find_field(name_symbol, signature_symbol, is_static, fd) != nullptr; } return ik->find_local_field(name_symbol, signature_symbol, fd); } static void lookup_field(JfrJavaArguments* args, const InstanceKlass* ik, fieldDescriptor* fd, bool static_field) { - assert(args != NULL, "invariant"); - assert(ik != NULL, "invariant"); + assert(args != nullptr, "invariant"); + assert(ik != nullptr, "invariant"); assert(ik->is_initialized(), "invariant"); - assert(fd != NULL, "invariant"); + assert(fd != nullptr, "invariant"); find_field(ik, args->name(), args->signature(), fd, static_field, true); } @@ -431,8 +431,8 @@ static void read_field(JfrJavaArguments* args, JavaValue* result, Thread* thread } static void read_field(JfrJavaArguments* args, JavaValue* result, TRAPS) { - assert(args != NULL, "invariant"); - assert(result != NULL, "invariant"); + assert(args != nullptr, "invariant"); + assert(result != nullptr, "invariant"); DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(THREAD)); result->set_oop(nullptr); // Initialize result in case klass initialize throws. InstanceKlass* const klass = static_cast(args->klass()); @@ -441,7 +441,7 @@ static void read_field(JfrJavaArguments* args, JavaValue* result, TRAPS) { } static void write_field(JfrJavaArguments* args, TRAPS) { - assert(args != NULL, "invariant"); + assert(args != nullptr, "invariant"); DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(THREAD)); InstanceKlass* const klass = static_cast(args->klass()); @@ -458,20 +458,20 @@ static void write_field(JfrJavaArguments* args, TRAPS) { } void JfrJavaSupport::set_field(JfrJavaArguments* args, TRAPS) { - assert(args != NULL, "invariant"); + assert(args != nullptr, "invariant"); write_field(args, THREAD); } void JfrJavaSupport::get_field(JfrJavaArguments* args, TRAPS) { - assert(args != NULL, "invariant"); + assert(args != nullptr, "invariant"); read_field(args, args->result(), THREAD); } static void get_field_ref(JfrJavaArguments* args, bool local_ref, TRAPS) { - assert(args != NULL, "invariant"); + assert(args != nullptr, "invariant"); DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(THREAD)); JavaValue* const result = args->result(); - assert(result != NULL, "invariant"); + assert(result != nullptr, "invariant"); assert(result->get_type() == T_OBJECT, "invariant"); read_field(args, result, CHECK); const oop obj = result->get_oop(); @@ -498,7 +498,7 @@ void JfrJavaSupport::get_field_global_ref(JfrJavaArguments* args, TRAPS) { */ Klass* JfrJavaSupport::klass(const jobject handle) { const oop obj = resolve_non_null(handle); - assert(obj != NULL, "invariant"); + assert(obj != nullptr, "invariant"); return obj->klass(); } @@ -508,13 +508,13 @@ static char* allocate_string(bool c_heap, int length, Thread* thread) { } const char* JfrJavaSupport::c_str(oop string, Thread* thread, bool c_heap /* false */) { - char* str = NULL; + char* str = nullptr; const typeArrayOop value = java_lang_String::value(string); - if (value != NULL) { + if (value != nullptr) { const int length = java_lang_String::utf8_length(string, value); str = allocate_string(c_heap, length + 1, thread); - if (str == NULL) { - return NULL; + if (str == nullptr) { + return nullptr; } java_lang_String::as_utf8_string(string, value, str, length + 1); } @@ -522,14 +522,14 @@ const char* JfrJavaSupport::c_str(oop string, Thread* thread, bool c_heap /* fal } const char* JfrJavaSupport::c_str(jstring string, Thread* thread, bool c_heap /* false */) { - return string != NULL ? c_str(resolve_non_null(string), thread, c_heap) : NULL; + return string != nullptr ? c_str(resolve_non_null(string), thread, c_heap) : nullptr; } /* * Exceptions and errors */ static void create_and_throw(Symbol* name, const char* message, TRAPS) { - assert(name != NULL, "invariant"); + assert(name != nullptr, "invariant"); DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(THREAD)); assert(!HAS_PENDING_EXCEPTION, "invariant"); THROW_MSG(name, message); @@ -608,7 +608,7 @@ void JfrJavaSupport::set_cause(jthrowable throwable, JavaThread* t) { void JfrJavaSupport::uncaught_exception(jthrowable throwable, JavaThread* t) { DEBUG_ONLY(check_java_thread_in_vm(t)); - assert(throwable != NULL, "invariant"); + assert(throwable != nullptr, "invariant"); set_cause(throwable, t); } @@ -643,7 +643,7 @@ static bool is_jdk_jfr_module_in_readability_graph() { } static void print_module_resolution_error(outputStream* stream) { - assert(stream != NULL, "invariant"); + assert(stream != nullptr, "invariant"); stream->print_cr("Module %s not found.", JDK_JFR_MODULE_NAME); stream->print_cr("Flight Recorder can not be enabled."); } @@ -654,7 +654,7 @@ bool JfrJavaSupport::is_jdk_jfr_module_available() { bool JfrJavaSupport::is_jdk_jfr_module_available(outputStream* stream, TRAPS) { if (!JfrJavaSupport::is_jdk_jfr_module_available()) { - if (stream != NULL) { + if (stream != nullptr) { print_module_resolution_error(stream); } return false; @@ -664,9 +664,10 @@ bool JfrJavaSupport::is_jdk_jfr_module_available(outputStream* stream, TRAPS) { typedef JfrOopTraceId AccessThreadTraceId; + static JavaThread* get_native(ThreadsListHandle& tlh, jobject thread) { - JavaThread* native_thread = NULL; - (void)tlh.cv_internal_thread_to_JavaThread(thread, &native_thread, NULL); + JavaThread* native_thread = nullptr; + (void)tlh.cv_internal_thread_to_JavaThread(thread, &native_thread, nullptr); return native_thread; } @@ -777,7 +778,7 @@ bool JfrJavaSupport::is_excluded(Thread* thread) { static const Klass* get_configuration_field_descriptor(const Handle& h_mirror, fieldDescriptor* descriptor, TRAPS) { assert(h_mirror.not_null(), "invariant"); - assert(descriptor != NULL, "invariant"); + assert(descriptor != nullptr, "invariant"); Klass* const k = java_lang_Class::as_Klass(h_mirror()); assert(k->is_instance_klass(), "invariant"); InstanceKlass* const ik = InstanceKlass::cast(k); @@ -789,7 +790,7 @@ static const Klass* get_configuration_field_descriptor(const Handle& h_mirror, f vmSymbols::jdk_jfr_internal_event_EventConfiguration_signature(), true, descriptor); - return typed_field_holder != NULL ? typed_field_holder : ik->find_field(vmSymbols::eventConfiguration_name(), + return typed_field_holder != nullptr ? typed_field_holder : ik->find_field(vmSymbols::eventConfiguration_name(), vmSymbols::object_signature(), // untyped true, descriptor); @@ -802,13 +803,13 @@ jobject JfrJavaSupport::get_configuration(jobject clazz, TRAPS) { assert(h_mirror.not_null(), "invariant"); fieldDescriptor configuration_field_descriptor; const Klass* const field_holder = get_configuration_field_descriptor(h_mirror, &configuration_field_descriptor, THREAD); - if (field_holder == NULL) { + if (field_holder == nullptr) { // The only reason should be that klass initialization failed. - return NULL; + return nullptr; } assert(java_lang_Class::as_Klass(h_mirror()) == field_holder, "invariant"); oop configuration_oop = h_mirror->obj_field(configuration_field_descriptor.offset()); - return configuration_oop != NULL ? JfrJavaSupport::local_jni_handle(configuration_oop, THREAD) : NULL; + return configuration_oop != nullptr ? JfrJavaSupport::local_jni_handle(configuration_oop, THREAD) : nullptr; } bool JfrJavaSupport::set_configuration(jobject clazz, jobject configuration, TRAPS) { @@ -818,13 +819,13 @@ bool JfrJavaSupport::set_configuration(jobject clazz, jobject configuration, TRA assert(h_mirror.not_null(), "invariant"); fieldDescriptor configuration_field_descriptor; const Klass* const field_holder = get_configuration_field_descriptor(h_mirror, &configuration_field_descriptor, THREAD); - if (field_holder == NULL) { + if (field_holder == nullptr) { // The only reason should be that klass initialization failed. return false; } assert(java_lang_Class::as_Klass(h_mirror()) == field_holder, "invariant"); const oop configuration_oop = JNIHandles::resolve(configuration); - assert(configuration_oop != NULL, "invariant"); + assert(configuration_oop != nullptr, "invariant"); h_mirror->obj_field_put(configuration_field_descriptor.offset(), configuration_oop); return true; } @@ -837,7 +838,7 @@ bool JfrJavaSupport::is_instrumented(jobject clazz, TRAPS) { } bool JfrJavaSupport::on_thread_start(Thread* t) { - assert(t != NULL, "invariant"); + assert(t != nullptr, "invariant"); assert(Thread::current() == t, "invariant"); if (!t->is_Java_thread()) { return true; diff --git a/src/hotspot/share/jfr/jni/jfrJniMethod.cpp b/src/hotspot/share/jfr/jni/jfrJniMethod.cpp index cf4aae2add535..75da90615b69e 100644 --- a/src/hotspot/share/jfr/jni/jfrJniMethod.cpp +++ b/src/hotspot/share/jfr/jni/jfrJniMethod.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -94,7 +94,7 @@ NO_TRANSITION(jstring, jfr_get_pid(JNIEnv* env, jobject jvm)) char pid_buf[32] = { 0 }; jio_snprintf(pid_buf, sizeof(pid_buf), "%d", os::current_process_id()); jstring pid_string = env->NewStringUTF(pid_buf); - return pid_string; // exception pending if NULL + return pid_string; // exception pending if null NO_TRANSITION_END NO_TRANSITION(jlong, jfr_elapsed_frequency(JNIEnv* env, jobject jvm)) @@ -183,7 +183,7 @@ NO_TRANSITION(jboolean, jfr_should_rotate_disk(JNIEnv* env, jobject jvm)) NO_TRANSITION_END NO_TRANSITION(jlong, jfr_get_type_id_from_string(JNIEnv * env, jobject jvm, jstring type)) - const char* type_name = env->GetStringUTFChars(type, NULL); + const char* type_name = env->GetStringUTFChars(type, nullptr); jlong id = JfrType::name_to_id(type_name); env->ReleaseStringUTFChars(type, type_name); return id; @@ -315,10 +315,10 @@ JVM_ENTRY_NO_ENV(void, jfr_set_repository_location(JNIEnv* env, jobject repo, js JVM_END NO_TRANSITION(void, jfr_set_dump_path(JNIEnv* env, jobject jvm, jstring dumppath)) - if (dumppath == NULL) { - JfrEmergencyDump::set_dump_path(NULL); + if (dumppath == nullptr) { + JfrEmergencyDump::set_dump_path(nullptr); } else { - const char* dump_path = env->GetStringUTFChars(dumppath, NULL); + const char* dump_path = env->GetStringUTFChars(dumppath, nullptr); JfrEmergencyDump::set_dump_path(dump_path); env->ReleaseStringUTFChars(dumppath, dump_path); } diff --git a/src/hotspot/share/jfr/jni/jfrJniMethodRegistration.cpp b/src/hotspot/share/jfr/jni/jfrJniMethodRegistration.cpp index 6621a2e9446da..4cbe85f49b819 100644 --- a/src/hotspot/share/jfr/jni/jfrJniMethodRegistration.cpp +++ b/src/hotspot/share/jfr/jni/jfrJniMethodRegistration.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -31,9 +31,9 @@ #include "utilities/exceptions.hpp" JfrJniMethodRegistration::JfrJniMethodRegistration(JNIEnv* env) { - assert(env != NULL, "invariant"); + assert(env != nullptr, "invariant"); jclass jfr_clz = env->FindClass("jdk/jfr/internal/JVM"); - if (jfr_clz != NULL) { + if (jfr_clz != nullptr) { JNINativeMethod method[] = { (char*)"beginRecording", (char*)"()V", (void*)jfr_begin_recording, (char*)"isRecording", (char*)"()Z", (void*)jfr_is_recording, @@ -101,7 +101,7 @@ JfrJniMethodRegistration::JfrJniMethodRegistration(JNIEnv* env) { const size_t method_array_length = sizeof(method) / sizeof(JNINativeMethod); if (env->RegisterNatives(jfr_clz, method, (jint)method_array_length) != JNI_OK) { JavaThread* jt = JavaThread::thread_from_jni_environment(env); - assert(jt != NULL, "invariant"); + assert(jt != nullptr, "invariant"); assert(jt->thread_state() == _thread_in_native, "invariant"); ThreadInVMfromNative transition(jt); log_error(jfr, system)("RegisterNatives for JVM class failed!"); diff --git a/src/hotspot/share/jfr/jni/jfrUpcalls.cpp b/src/hotspot/share/jfr/jni/jfrUpcalls.cpp index 480c01ed2fe96..eaf11380895ee 100644 --- a/src/hotspot/share/jfr/jni/jfrUpcalls.cpp +++ b/src/hotspot/share/jfr/jni/jfrUpcalls.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -41,13 +41,13 @@ #include "runtime/os.hpp" #include "utilities/exceptions.hpp" -static Symbol* jvm_upcalls_class_sym = NULL; -static Symbol* on_retransform_method_sym = NULL; -static Symbol* on_retransform_signature_sym = NULL; -static Symbol* bytes_for_eager_instrumentation_sym = NULL; -static Symbol* bytes_for_eager_instrumentation_sig_sym = NULL; -static Symbol* unhide_internal_types_sym = NULL; -static Symbol* unhide_internal_types_sig_sym = NULL; +static Symbol* jvm_upcalls_class_sym = nullptr; +static Symbol* on_retransform_method_sym = nullptr; +static Symbol* on_retransform_signature_sym = nullptr; +static Symbol* bytes_for_eager_instrumentation_sym = nullptr; +static Symbol* bytes_for_eager_instrumentation_sig_sym = nullptr; +static Symbol* unhide_internal_types_sym = nullptr; +static Symbol* unhide_internal_types_sig_sym = nullptr; static bool initialize(TRAPS) { static bool initialized = false; @@ -60,7 +60,7 @@ static bool initialize(TRAPS) { bytes_for_eager_instrumentation_sig_sym = SymbolTable::new_permanent_symbol("(JZZLjava/lang/Class;[B)[B"); unhide_internal_types_sym = SymbolTable::new_permanent_symbol("unhideInternalTypes"); unhide_internal_types_sig_sym = SymbolTable::new_permanent_symbol("()V"); - initialized = unhide_internal_types_sig_sym != NULL; + initialized = unhide_internal_types_sig_sym != nullptr; } return initialized; } @@ -77,7 +77,7 @@ static const typeArrayOop invoke(jlong trace_id, TRAPS) { DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(THREAD)); const Klass* klass = SystemDictionary::resolve_or_fail(jvm_upcalls_class_sym, true, CHECK_NULL); - assert(klass != NULL, "invariant"); + assert(klass != nullptr, "invariant"); typeArrayOop old_byte_array = oopFactory::new_byteArray(class_data_len, CHECK_NULL); memcpy(old_byte_array->byte_at_addr(0), class_data, class_data_len); JavaValue result(T_OBJECT); @@ -91,11 +91,11 @@ static const typeArrayOop invoke(jlong trace_id, if (HAS_PENDING_EXCEPTION) { ResourceMark rm(THREAD); log_error(jfr, system)("JfrUpcall failed for %s", method_sym->as_C_string()); - return NULL; + return nullptr; } // The result should be a [B const oop res = result.get_oop(); - assert(res != NULL, "invariant"); + assert(res != nullptr, "invariant"); assert(res->is_typeArray(), "invariant"); assert(TypeArrayKlass::cast(res->klass())->element_type() == T_BYTE, "invariant"); const typeArrayOop new_byte_array = typeArrayOop(res); @@ -120,10 +120,10 @@ void JfrUpcalls::on_retransform(jlong trace_id, unsigned char** new_class_data, TRAPS) { DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(THREAD)); - assert(class_being_redefined != NULL, "invariant"); - assert(class_data != NULL, "invariant"); - assert(new_class_data_len != NULL, "invariant"); - assert(new_class_data != NULL, "invariant"); + assert(class_being_redefined != nullptr, "invariant"); + assert(class_data != nullptr, "invariant"); + assert(new_class_data_len != nullptr, "invariant"); + assert(new_class_data != nullptr, "invariant"); if (!JdkJfrEvent::is_visible(class_being_redefined)) { return; } @@ -139,13 +139,13 @@ void JfrUpcalls::on_retransform(jlong trace_id, on_retransform_signature_sym, new_bytes_length, CHECK); - assert(new_byte_array != NULL, "invariant"); + assert(new_byte_array != nullptr, "invariant"); assert(new_bytes_length > 0, "invariant"); unsigned char* const new_bytes = NEW_RESOURCE_ARRAY_IN_THREAD_RETURN_NULL(THREAD, unsigned char, new_bytes_length); - if (new_bytes == NULL) { + if (new_bytes == nullptr) { log_error_and_throw_oom(new_bytes_length, THREAD); // unwinds } - assert(new_bytes != NULL, "invariant"); + assert(new_bytes != nullptr, "invariant"); memcpy(new_bytes, new_byte_array->byte_at_addr(0), (size_t)new_bytes_length); *new_class_data_len = new_bytes_length; *new_class_data = new_bytes; @@ -161,10 +161,10 @@ void JfrUpcalls::new_bytes_eager_instrumentation(jlong trace_id, unsigned char** new_class_data, TRAPS) { DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(THREAD)); - assert(super != NULL, "invariant"); - assert(class_data != NULL, "invariant"); - assert(new_class_data_len != NULL, "invariant"); - assert(new_class_data != NULL, "invariant"); + assert(super != nullptr, "invariant"); + assert(class_data != nullptr, "invariant"); + assert(new_class_data_len != nullptr, "invariant"); + assert(new_class_data != nullptr, "invariant"); jint new_bytes_length = 0; initialize(THREAD); const typeArrayOop new_byte_array = invoke(trace_id, @@ -177,13 +177,13 @@ void JfrUpcalls::new_bytes_eager_instrumentation(jlong trace_id, bytes_for_eager_instrumentation_sig_sym, new_bytes_length, CHECK); - assert(new_byte_array != NULL, "invariant"); + assert(new_byte_array != nullptr, "invariant"); assert(new_bytes_length > 0, "invariant"); unsigned char* const new_bytes = NEW_RESOURCE_ARRAY_IN_THREAD_RETURN_NULL(THREAD, unsigned char, new_bytes_length); - if (new_bytes == NULL) { + if (new_bytes == nullptr) { log_error_and_throw_oom(new_bytes_length, THREAD); // this unwinds } - assert(new_bytes != NULL, "invariant"); + assert(new_bytes != nullptr, "invariant"); memcpy(new_bytes, new_byte_array->byte_at_addr(0), (size_t)new_bytes_length); *new_class_data_len = new_bytes_length; *new_class_data = new_bytes; @@ -193,7 +193,7 @@ bool JfrUpcalls::unhide_internal_types(TRAPS) { DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(THREAD)); JavaValue result(T_VOID); const Klass* klass = SystemDictionary::resolve_or_fail(jvm_upcalls_class_sym, true, CHECK_false); - assert(klass != NULL, "invariant"); + assert(klass != nullptr, "invariant"); JfrJavaArguments args(&result, klass, unhide_internal_types_sym, unhide_internal_types_sig_sym); JfrJavaSupport::call_static(&args, THREAD); if (HAS_PENDING_EXCEPTION) { diff --git a/src/hotspot/share/jfr/leakprofiler/chains/bfsClosure.cpp b/src/hotspot/share/jfr/leakprofiler/chains/bfsClosure.cpp index 5bedc8c033927..ea96a4ccd7aea 100644 --- a/src/hotspot/share/jfr/leakprofiler/chains/bfsClosure.cpp +++ b/src/hotspot/share/jfr/leakprofiler/chains/bfsClosure.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -41,7 +41,7 @@ BFSClosure::BFSClosure(EdgeQueue* edge_queue, EdgeStore* edge_store, JFRBitSet* _edge_queue(edge_queue), _edge_store(edge_store), _mark_bits(mark_bits), - _current_parent(NULL), + _current_parent(nullptr), _current_frontier_level(0), _next_frontier_idx(0), _prev_frontier_idx(0), @@ -106,7 +106,7 @@ void BFSClosure::process() { void BFSClosure::process_root_set() { for (size_t idx = _edge_queue->bottom(); idx < _edge_queue->top(); ++idx) { const Edge* edge = _edge_queue->element_at(idx); - assert(edge->parent() == NULL, "invariant"); + assert(edge->parent() == nullptr, "invariant"); process(edge->reference(), edge->pointee()); } } @@ -123,7 +123,7 @@ void BFSClosure::closure_impl(UnifiedOopRef reference, const oop pointee) { } if (_use_dfs) { - assert(_current_parent != NULL, "invariant"); + assert(_current_parent != nullptr, "invariant"); DFSClosure::find_leaks_from_edge(_edge_store, _mark_bits, _current_parent); return; } @@ -136,7 +136,7 @@ void BFSClosure::closure_impl(UnifiedOopRef reference, const oop pointee) { } // if we are processinig initial root set, don't add to queue - if (_current_parent != NULL) { + if (_current_parent != nullptr) { _edge_queue->add(_current_parent, reference); } @@ -147,10 +147,10 @@ void BFSClosure::closure_impl(UnifiedOopRef reference, const oop pointee) { } void BFSClosure::add_chain(UnifiedOopRef reference, const oop pointee) { - assert(pointee != NULL, "invariant"); + assert(pointee != nullptr, "invariant"); assert(pointee->mark().is_marked(), "invariant"); Edge leak_edge(_current_parent, reference); - _edge_store->put_chain(&leak_edge, _current_parent == NULL ? 1 : _current_frontier_level + 2); + _edge_store->put_chain(&leak_edge, _current_parent == nullptr ? 1 : _current_frontier_level + 2); } void BFSClosure::dfs_fallback() { @@ -159,7 +159,7 @@ void BFSClosure::dfs_fallback() { _dfs_fallback_idx = _edge_queue->bottom(); while (!_edge_queue->is_empty()) { const Edge* edge = _edge_queue->remove(); - if (edge->pointee() != NULL) { + if (edge->pointee() != nullptr) { DFSClosure::find_leaks_from_edge(_edge_store, _mark_bits, edge); } } @@ -203,34 +203,34 @@ bool BFSClosure::is_complete() const { } void BFSClosure::iterate(const Edge* parent) { - assert(parent != NULL, "invariant"); + assert(parent != nullptr, "invariant"); const oop pointee = parent->pointee(); - assert(pointee != NULL, "invariant"); + assert(pointee != nullptr, "invariant"); _current_parent = parent; pointee->oop_iterate(this); } void BFSClosure::do_oop(oop* ref) { - assert(ref != NULL, "invariant"); + assert(ref != nullptr, "invariant"); assert(is_aligned(ref, HeapWordSize), "invariant"); const oop pointee = HeapAccess::oop_load(ref); - if (pointee != NULL) { + if (pointee != nullptr) { closure_impl(UnifiedOopRef::encode_in_heap(ref), pointee); } } void BFSClosure::do_oop(narrowOop* ref) { - assert(ref != NULL, "invariant"); + assert(ref != nullptr, "invariant"); assert(is_aligned(ref, sizeof(narrowOop)), "invariant"); const oop pointee = HeapAccess::oop_load(ref); - if (pointee != NULL) { + if (pointee != nullptr) { closure_impl(UnifiedOopRef::encode_in_heap(ref), pointee); } } void BFSClosure::do_root(UnifiedOopRef ref) { - assert(ref.dereference() != NULL, "pointee must not be null"); + assert(ref.dereference() != nullptr, "pointee must not be null"); if (!_edge_queue->is_full()) { - _edge_queue->add(NULL, ref); + _edge_queue->add(nullptr, ref); } } diff --git a/src/hotspot/share/jfr/leakprofiler/chains/dfsClosure.cpp b/src/hotspot/share/jfr/leakprofiler/chains/dfsClosure.cpp index 00e7ff126f8db..4ba2e3329f818 100644 --- a/src/hotspot/share/jfr/leakprofiler/chains/dfsClosure.cpp +++ b/src/hotspot/share/jfr/leakprofiler/chains/dfsClosure.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -42,9 +42,9 @@ UnifiedOopRef DFSClosure::_reference_stack[max_dfs_depth]; void DFSClosure::find_leaks_from_edge(EdgeStore* edge_store, JFRBitSet* mark_bits, const Edge* start_edge) { - assert(edge_store != NULL, "invariant"); - assert(mark_bits != NULL," invariant"); - assert(start_edge != NULL, "invariant"); + assert(edge_store != nullptr, "invariant"); + assert(mark_bits != nullptr," invariant"); + assert(start_edge != nullptr, "invariant"); // Depth-first search, starting from a BFS edge DFSClosure dfs(edge_store, mark_bits, start_edge); @@ -53,11 +53,11 @@ void DFSClosure::find_leaks_from_edge(EdgeStore* edge_store, void DFSClosure::find_leaks_from_root_set(EdgeStore* edge_store, JFRBitSet* mark_bits) { - assert(edge_store != NULL, "invariant"); - assert(mark_bits != NULL, "invariant"); + assert(edge_store != nullptr, "invariant"); + assert(mark_bits != nullptr, "invariant"); // Mark root set, to avoid going sideways - DFSClosure dfs(edge_store, mark_bits, NULL); + DFSClosure dfs(edge_store, mark_bits, nullptr); dfs._max_depth = 1; RootSetClosure rs(&dfs); rs.process(); @@ -74,7 +74,7 @@ DFSClosure::DFSClosure(EdgeStore* edge_store, JFRBitSet* mark_bits, const Edge* } void DFSClosure::closure_impl(UnifiedOopRef reference, const oop pointee) { - assert(pointee != NULL, "invariant"); + assert(pointee != nullptr, "invariant"); assert(!reference.is_null(), "invariant"); if (GranularTimer::is_finished()) { @@ -123,28 +123,28 @@ void DFSClosure::add_chain() { assert(array_length == idx + 1, "invariant"); // aggregate from breadth-first search - if (_start_edge != NULL) { + if (_start_edge != nullptr) { chain[idx++] = *_start_edge; } else { - chain[idx - 1] = Edge(NULL, chain[idx - 1].reference()); + chain[idx - 1] = Edge(nullptr, chain[idx - 1].reference()); } - _edge_store->put_chain(chain, idx + (_start_edge != NULL ? _start_edge->distance_to_root() : 0)); + _edge_store->put_chain(chain, idx + (_start_edge != nullptr ? _start_edge->distance_to_root() : 0)); } void DFSClosure::do_oop(oop* ref) { - assert(ref != NULL, "invariant"); + assert(ref != nullptr, "invariant"); assert(is_aligned(ref, HeapWordSize), "invariant"); const oop pointee = HeapAccess::oop_load(ref); - if (pointee != NULL) { + if (pointee != nullptr) { closure_impl(UnifiedOopRef::encode_in_heap(ref), pointee); } } void DFSClosure::do_oop(narrowOop* ref) { - assert(ref != NULL, "invariant"); + assert(ref != nullptr, "invariant"); assert(is_aligned(ref, sizeof(narrowOop)), "invariant"); const oop pointee = HeapAccess::oop_load(ref); - if (pointee != NULL) { + if (pointee != nullptr) { closure_impl(UnifiedOopRef::encode_in_heap(ref), pointee); } } @@ -152,6 +152,6 @@ void DFSClosure::do_oop(narrowOop* ref) { void DFSClosure::do_root(UnifiedOopRef ref) { assert(!ref.is_null(), "invariant"); const oop pointee = ref.dereference(); - assert(pointee != NULL, "invariant"); + assert(pointee != nullptr, "invariant"); closure_impl(ref, pointee); } diff --git a/src/hotspot/share/jfr/leakprofiler/chains/edge.cpp b/src/hotspot/share/jfr/leakprofiler/chains/edge.cpp index fccf7cd7008f9..54c9ef3cb6b75 100644 --- a/src/hotspot/share/jfr/leakprofiler/chains/edge.cpp +++ b/src/hotspot/share/jfr/leakprofiler/chains/edge.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -34,13 +34,13 @@ const oop Edge::pointee() const { } const oop Edge::reference_owner() const { - return is_root() ? (oop)NULL : _parent->pointee(); + return is_root() ? (oop)nullptr : _parent->pointee(); } size_t Edge::distance_to_root() const { size_t depth = 0; const Edge* current = _parent; - while (current != NULL) { + while (current != nullptr) { depth++; current = current->parent(); } diff --git a/src/hotspot/share/jfr/leakprofiler/chains/edge.hpp b/src/hotspot/share/jfr/leakprofiler/chains/edge.hpp index 6c93b163dcdae..47aef3d3fee3a 100644 --- a/src/hotspot/share/jfr/leakprofiler/chains/edge.hpp +++ b/src/hotspot/share/jfr/leakprofiler/chains/edge.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -43,7 +43,7 @@ class Edge { return _parent; } bool is_root() const { - return _parent == NULL; + return _parent == nullptr; } const oop pointee() const; const oop reference_owner() const; diff --git a/src/hotspot/share/jfr/leakprofiler/chains/edgeQueue.cpp b/src/hotspot/share/jfr/leakprofiler/chains/edgeQueue.cpp index 9afe7cec1b5d2..75ca304b84789 100644 --- a/src/hotspot/share/jfr/leakprofiler/chains/edgeQueue.cpp +++ b/src/hotspot/share/jfr/leakprofiler/chains/edgeQueue.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,7 +28,7 @@ #include "jfr/recorder/storage/jfrVirtualMemory.hpp" EdgeQueue::EdgeQueue(size_t reservation_size_bytes, size_t commit_block_size_bytes) : - _vmm(NULL), + _vmm(nullptr), _reservation_size_bytes(reservation_size_bytes), _commit_block_size_bytes(commit_block_size_bytes), _top_index(0), @@ -37,9 +37,9 @@ EdgeQueue::EdgeQueue(size_t reservation_size_bytes, size_t commit_block_size_byt bool EdgeQueue::initialize() { assert(_reservation_size_bytes >= _commit_block_size_bytes, "invariant"); - assert(_vmm == NULL, "invariant"); + assert(_vmm == nullptr, "invariant"); _vmm = new JfrVirtualMemory(); - return _vmm != NULL && _vmm->initialize(_reservation_size_bytes, _commit_block_size_bytes, sizeof(Edge)); + return _vmm != nullptr && _vmm->initialize(_reservation_size_bytes, _commit_block_size_bytes, sizeof(Edge)); } EdgeQueue::~EdgeQueue() { @@ -51,7 +51,7 @@ void EdgeQueue::add(const Edge* parent, UnifiedOopRef ref) { assert(!is_full(), "EdgeQueue is full. Check is_full before adding another Edge"); assert(!_vmm->is_full(), "invariant"); void* const allocation = _vmm->new_datum(); - assert(allocation != NULL, "invariant"); + assert(allocation != nullptr, "invariant"); new (allocation)Edge(parent, ref); _top_index++; assert(_vmm->count() == _top_index, "invariant"); @@ -86,16 +86,16 @@ const Edge* EdgeQueue::element_at(size_t index) const { } size_t EdgeQueue::reserved_size() const { - assert(_vmm != NULL, "invariant"); + assert(_vmm != nullptr, "invariant"); return _vmm->reserved_size(); } size_t EdgeQueue::live_set() const { - assert(_vmm != NULL, "invariant"); + assert(_vmm != nullptr, "invariant"); return _vmm->live_set(); } size_t EdgeQueue::sizeof_edge() const { - assert(_vmm != NULL, "invariant"); + assert(_vmm != nullptr, "invariant"); return _vmm->aligned_datum_size_bytes(); } diff --git a/src/hotspot/share/jfr/leakprofiler/chains/edgeStore.cpp b/src/hotspot/share/jfr/leakprofiler/chains/edgeStore.cpp index 145b123f4b2ce..c08cc543f2f96 100644 --- a/src/hotspot/share/jfr/leakprofiler/chains/edgeStore.cpp +++ b/src/hotspot/share/jfr/leakprofiler/chains/edgeStore.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -43,60 +43,60 @@ bool EdgeStore::is_empty() const { } void EdgeStore::on_link(EdgeEntry* entry) { - assert(entry != NULL, "invariant"); + assert(entry != nullptr, "invariant"); assert(entry->id() == 0, "invariant"); entry->set_id(++_edge_id_counter); } bool EdgeStore::on_equals(uintptr_t hash, const EdgeEntry* entry) { - assert(entry != NULL, "invariant"); + assert(entry != nullptr, "invariant"); assert(entry->hash() == hash, "invariant"); return true; } void EdgeStore::on_unlink(EdgeEntry* entry) { - assert(entry != NULL, "invariant"); + assert(entry != nullptr, "invariant"); // nothing } #ifdef ASSERT bool EdgeStore::contains(UnifiedOopRef reference) const { - return get(reference) != NULL; + return get(reference) != nullptr; } #endif StoredEdge* EdgeStore::get(UnifiedOopRef reference) const { assert(!reference.is_null(), "invariant"); EdgeEntry* const entry = _edges->lookup_only(reference.addr()); - return entry != NULL ? entry->literal_addr() : NULL; + return entry != nullptr ? entry->literal_addr() : nullptr; } StoredEdge* EdgeStore::put(UnifiedOopRef reference) { assert(!reference.is_null(), "invariant"); - const StoredEdge e(NULL, reference); - assert(NULL == _edges->lookup_only(reference.addr()), "invariant"); + const StoredEdge e(nullptr, reference); + assert(nullptr == _edges->lookup_only(reference.addr()), "invariant"); EdgeEntry& entry = _edges->put(reference.addr(), e); return entry.literal_addr(); } traceid EdgeStore::get_id(const Edge* edge) const { - assert(edge != NULL, "invariant"); + assert(edge != nullptr, "invariant"); EdgeEntry* const entry = _edges->lookup_only(edge->reference().addr()); - assert(entry != NULL, "invariant"); + assert(entry != nullptr, "invariant"); return entry->id(); } traceid EdgeStore::gc_root_id(const Edge* edge) const { - assert(edge != NULL, "invariant"); + assert(edge != nullptr, "invariant"); const traceid gc_root_id = static_cast(edge)->gc_root_id(); if (gc_root_id != 0) { return gc_root_id; } // not cached - assert(edge != NULL, "invariant"); + assert(edge != nullptr, "invariant"); const Edge* const root = EdgeUtils::root(*edge); - assert(root != NULL, "invariant"); - assert(root->parent() == NULL, "invariant"); + assert(root != nullptr, "invariant"); + assert(root->parent() == nullptr, "invariant"); return get_id(root); } @@ -105,15 +105,15 @@ static const Edge* get_skip_ancestor(const Edge** current, size_t distance_to_ro assert(*skip_length == 0, "invariant"); *skip_length = distance_to_root - (EdgeUtils::root_context - 1); const Edge* const target = EdgeUtils::ancestor(**current, *skip_length); - assert(target != NULL, "invariant"); + assert(target != nullptr, "invariant"); assert(target->distance_to_root() + 1 == EdgeUtils::root_context, "invariant"); return target; } bool EdgeStore::put_skip_edge(StoredEdge** previous, const Edge** current, size_t distance_to_root) { - assert(*previous != NULL, "invariant"); - assert((*previous)->parent() == NULL, "invariant"); - assert(*current != NULL, "invariant"); + assert(*previous != nullptr, "invariant"); + assert((*previous)->parent() == nullptr, "invariant"); + assert(*current != nullptr, "invariant"); assert((*current)->distance_to_root() == distance_to_root, "invariant"); if (distance_to_root < EdgeUtils::root_context) { @@ -123,20 +123,20 @@ bool EdgeStore::put_skip_edge(StoredEdge** previous, const Edge** current, size_ size_t skip_length = 0; const Edge* const skip_ancestor = get_skip_ancestor(current, distance_to_root, &skip_length); - assert(skip_ancestor != NULL, "invariant"); + assert(skip_ancestor != nullptr, "invariant"); (*previous)->set_skip_length(skip_length); // lookup target StoredEdge* stored_target = get(skip_ancestor->reference()); - if (stored_target != NULL) { + if (stored_target != nullptr) { (*previous)->set_parent(stored_target); // linked to existing, complete return true; } - assert(stored_target == NULL, "invariant"); + assert(stored_target == nullptr, "invariant"); stored_target = put(skip_ancestor->reference()); - assert(stored_target != NULL, "invariant"); + assert(stored_target != nullptr, "invariant"); (*previous)->set_parent(stored_target); *previous = stored_target; *current = skip_ancestor->parent(); @@ -144,18 +144,18 @@ bool EdgeStore::put_skip_edge(StoredEdge** previous, const Edge** current, size_ } static void link_edge(const StoredEdge* current_stored, StoredEdge** previous) { - assert(current_stored != NULL, "invariant"); - assert(*previous != NULL, "invariant"); - assert((*previous)->parent() == NULL, "invariant"); + assert(current_stored != nullptr, "invariant"); + assert(*previous != nullptr, "invariant"); + assert((*previous)->parent() == nullptr, "invariant"); (*previous)->set_parent(current_stored); } static const StoredEdge* find_closest_skip_edge(const StoredEdge* edge, size_t* distance) { - assert(edge != NULL, "invariant"); - assert(distance != NULL, "invariant"); + assert(edge != nullptr, "invariant"); + assert(distance != nullptr, "invariant"); const StoredEdge* current = edge; *distance = 1; - while (current != NULL && !current->is_skip_edge()) { + while (current != nullptr && !current->is_skip_edge()) { ++(*distance); current = current->parent(); } @@ -163,11 +163,11 @@ static const StoredEdge* find_closest_skip_edge(const StoredEdge* edge, size_t* } void EdgeStore::link_with_existing_chain(const StoredEdge* current_stored, StoredEdge** previous, size_t previous_length) { - assert(current_stored != NULL, "invariant"); - assert((*previous)->parent() == NULL, "invariant"); + assert(current_stored != nullptr, "invariant"); + assert((*previous)->parent() == nullptr, "invariant"); size_t distance_to_skip_edge; // including the skip edge itself const StoredEdge* const closest_skip_edge = find_closest_skip_edge(current_stored, &distance_to_skip_edge); - if (closest_skip_edge == NULL) { + if (closest_skip_edge == nullptr) { // no found skip edge implies root if (distance_to_skip_edge + previous_length <= EdgeUtils::max_ref_chain_depth) { link_edge(current_stored, previous); @@ -188,33 +188,33 @@ void EdgeStore::link_with_existing_chain(const StoredEdge* current_stored, Store } StoredEdge* EdgeStore::link_new_edge(StoredEdge** previous, const Edge** current) { - assert(*previous != NULL, "invariant"); - assert((*previous)->parent() == NULL, "invariant"); - assert(*current != NULL, "invariant"); + assert(*previous != nullptr, "invariant"); + assert((*previous)->parent() == nullptr, "invariant"); + assert(*current != nullptr, "invariant"); assert(!contains((*current)->reference()), "invariant"); StoredEdge* const stored_edge = put((*current)->reference()); - assert(stored_edge != NULL, "invariant"); + assert(stored_edge != nullptr, "invariant"); link_edge(stored_edge, previous); return stored_edge; } bool EdgeStore::put_edges(StoredEdge** previous, const Edge** current, size_t limit) { - assert(*previous != NULL, "invariant"); - assert(*current != NULL, "invariant"); + assert(*previous != nullptr, "invariant"); + assert(*current != nullptr, "invariant"); size_t depth = 1; - while (*current != NULL && depth < limit) { + while (*current != nullptr && depth < limit) { StoredEdge* stored_edge = get((*current)->reference()); - if (stored_edge != NULL) { + if (stored_edge != nullptr) { link_with_existing_chain(stored_edge, previous, depth); return true; } stored_edge = link_new_edge(previous, current); - assert((*previous)->parent() != NULL, "invariant"); + assert((*previous)->parent() != nullptr, "invariant"); *previous = stored_edge; *current = (*current)->parent(); ++depth; } - return NULL == *current; + return nullptr == *current; } static GrowableArray* _leak_context_edges = nullptr; @@ -222,7 +222,7 @@ static GrowableArray* _leak_context_edges = nullptr; EdgeStore::EdgeStore() : _edges(new EdgeHashTable(this)) {} EdgeStore::~EdgeStore() { - assert(_edges != NULL, "invariant"); + assert(_edges != nullptr, "invariant"); delete _edges; delete _leak_context_edges; _leak_context_edges = nullptr; @@ -265,7 +265,7 @@ const StoredEdge* EdgeStore::get(const ObjectSample* sample) const { static constexpr const int max_idx = right_n_bits(32 - markWord::lock_bits); static void store_idx_precondition(oop sample_object, int idx) { - assert(sample_object != NULL, "invariant"); + assert(sample_object != nullptr, "invariant"); assert(sample_object->mark().is_marked(), "invariant"); assert(idx > 0, "invariant"); assert(idx <= max_idx, "invariant"); @@ -298,7 +298,7 @@ static void associate_with_candidate(const StoredEdge* leak_context_edge) { } StoredEdge* EdgeStore::associate_leak_context_with_candidate(const Edge* edge) { - assert(edge != NULL, "invariant"); + assert(edge != nullptr, "invariant"); assert(!contains(edge->reference()), "invariant"); StoredEdge* const leak_context_edge = put(edge->reference()); associate_with_candidate(leak_context_edge); @@ -315,11 +315,11 @@ StoredEdge* EdgeStore::associate_leak_context_with_candidate(const Edge* edge) { * The leak context edge is the edge adjacent to the leak candidate object, always an edge in the edge store. */ void EdgeStore::put_chain(const Edge* chain, size_t length) { - assert(chain != NULL, "invariant"); + assert(chain != nullptr, "invariant"); assert(chain->distance_to_root() + 1 == length, "invariant"); StoredEdge* const leak_context_edge = associate_leak_context_with_candidate(chain); - assert(leak_context_edge != NULL, "invariant"); - assert(leak_context_edge->parent() == NULL, "invariant"); + assert(leak_context_edge != nullptr, "invariant"); + assert(leak_context_edge->parent() == nullptr, "invariant"); if (1 == length) { store_gc_root_id_in_leak_context_edge(leak_context_edge, leak_context_edge); @@ -327,13 +327,13 @@ void EdgeStore::put_chain(const Edge* chain, size_t length) { } const Edge* current = chain->parent(); - assert(current != NULL, "invariant"); + assert(current != nullptr, "invariant"); StoredEdge* previous = leak_context_edge; // a leak context is the sequence of (limited) edges reachable from the leak candidate if (put_edges(&previous, ¤t, EdgeUtils::leak_context)) { // complete - assert(previous != NULL, "invariant"); + assert(previous != nullptr, "invariant"); put_chain_epilogue(leak_context_edge, EdgeUtils::root(*previous)); return; } @@ -345,9 +345,9 @@ void EdgeStore::put_chain(const Edge* chain, size_t length) { // connecting the leak context sequence with the root context sequence if (put_skip_edge(&previous, ¤t, distance_to_root)) { // complete - assert(previous != NULL, "invariant"); + assert(previous != nullptr, "invariant"); assert(previous->is_skip_edge(), "invariant"); - assert(previous->parent() != NULL, "invariant"); + assert(previous->parent() != nullptr, "invariant"); put_chain_epilogue(leak_context_edge, EdgeUtils::root(*previous->parent())); return; } @@ -356,13 +356,13 @@ void EdgeStore::put_chain(const Edge* chain, size_t length) { // a root context is the sequence of (limited) edges reachable from the root put_edges(&previous, ¤t, EdgeUtils::root_context); - assert(previous != NULL, "invariant"); + assert(previous != nullptr, "invariant"); put_chain_epilogue(leak_context_edge, EdgeUtils::root(*previous)); } void EdgeStore::put_chain_epilogue(StoredEdge* leak_context_edge, const Edge* root) const { - assert(leak_context_edge != NULL, "invariant"); - assert(root != NULL, "invariant"); + assert(leak_context_edge != nullptr, "invariant"); + assert(root != nullptr, "invariant"); store_gc_root_id_in_leak_context_edge(leak_context_edge, root); assert(leak_context_edge->distance_to_root() + 1 <= EdgeUtils::max_ref_chain_depth, "invariant"); } @@ -370,10 +370,10 @@ void EdgeStore::put_chain_epilogue(StoredEdge* leak_context_edge, const Edge* ro // To avoid another traversal to resolve the root edge id later, // cache it in the immediate leak context edge for fast retrieval. void EdgeStore::store_gc_root_id_in_leak_context_edge(StoredEdge* leak_context_edge, const Edge* root) const { - assert(leak_context_edge != NULL, "invariant"); + assert(leak_context_edge != nullptr, "invariant"); assert(leak_context_edge->gc_root_id() == 0, "invariant"); - assert(root != NULL, "invariant"); - assert(root->parent() == NULL, "invariant"); + assert(root != nullptr, "invariant"); + assert(root->parent() == nullptr, "invariant"); assert(root->distance_to_root() == 0, "invariant"); const StoredEdge* const stored_root = static_cast(root); traceid root_id = stored_root->gc_root_id(); diff --git a/src/hotspot/share/jfr/leakprofiler/chains/edgeUtils.cpp b/src/hotspot/share/jfr/leakprofiler/chains/edgeUtils.cpp index ad71690535167..69c88cba8ecad 100644 --- a/src/hotspot/share/jfr/leakprofiler/chains/edgeUtils.cpp +++ b/src/hotspot/share/jfr/leakprofiler/chains/edgeUtils.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -36,14 +36,14 @@ #include "runtime/handles.inline.hpp" static bool is_static_field(const oop ref_owner, const InstanceKlass* ik, int offset) { - assert(ref_owner != NULL, "invariant"); - assert(ik != NULL, "invariant"); + assert(ref_owner != nullptr, "invariant"); + assert(ik != nullptr, "invariant"); assert(ref_owner->klass() == ik, "invariant"); return ik->is_mirror_instance_klass() && offset >= InstanceMirrorKlass::cast(ik)->offset_of_static_fields(); } static int field_offset(const Edge& edge, const oop ref_owner) { - assert(ref_owner != NULL, "invariant"); + assert(ref_owner != nullptr, "invariant"); assert(!ref_owner->is_array(), "invariant"); assert(ref_owner->is_instance(), "invariant"); UnifiedOopRef reference = edge.reference(); @@ -57,9 +57,9 @@ static int field_offset(const Edge& edge, const oop ref_owner) { const Symbol* EdgeUtils::field_name(const Edge& edge, jshort* modifiers) { assert(!edge.is_root(), "invariant"); assert(!EdgeUtils::is_array_element(edge), "invariant"); - assert(modifiers != NULL, "invariant"); + assert(modifiers != nullptr, "invariant"); const oop ref_owner = edge.reference_owner(); - assert(ref_owner != NULL, "invariant"); + assert(ref_owner != nullptr, "invariant"); assert(ref_owner->klass()->is_instance_klass(), "invariant"); const InstanceKlass* ik = InstanceKlass::cast(ref_owner->klass()); const int offset = field_offset(edge, ref_owner); @@ -68,7 +68,7 @@ const Symbol* EdgeUtils::field_name(const Edge& edge, jshort* modifiers) { assert(java_lang_Class::as_Klass(ref_owner)->is_instance_klass(), "invariant"); ik = InstanceKlass::cast(java_lang_Class::as_Klass(ref_owner)); } - while (ik != NULL) { + while (ik != nullptr) { JavaFieldStream jfs(ik); while (!jfs.done()) { if (offset == jfs.offset()) { @@ -80,20 +80,20 @@ const Symbol* EdgeUtils::field_name(const Edge& edge, jshort* modifiers) { ik = (const InstanceKlass*)ik->super(); } *modifiers = 0; - return NULL; + return nullptr; } bool EdgeUtils::is_array_element(const Edge& edge) { assert(!edge.is_root(), "invariant"); const oop ref_owner = edge.reference_owner(); - assert(ref_owner != NULL, "invariant"); + assert(ref_owner != nullptr, "invariant"); return ref_owner->is_objArray(); } static int array_offset(const Edge& edge) { assert(EdgeUtils::is_array_element(edge), "invariant"); const oop ref_owner = edge.reference_owner(); - assert(ref_owner != NULL, "invariant"); + assert(ref_owner != nullptr, "invariant"); UnifiedOopRef reference = edge.reference(); assert(!reference.is_null(), "invariant"); assert(ref_owner->is_array(), "invariant"); @@ -110,7 +110,7 @@ int EdgeUtils::array_index(const Edge& edge) { int EdgeUtils::array_size(const Edge& edge) { assert(is_array_element(edge), "invariant"); const oop ref_owner = edge.reference_owner(); - assert(ref_owner != NULL, "invariant"); + assert(ref_owner != nullptr, "invariant"); assert(ref_owner->is_objArray(), "invariant"); return ((objArrayOop)ref_owner)->length(); } @@ -118,11 +118,11 @@ int EdgeUtils::array_size(const Edge& edge) { const Edge* EdgeUtils::root(const Edge& edge) { const Edge* current = &edge; const Edge* parent = current->parent(); - while (parent != NULL) { + while (parent != nullptr) { current = parent; parent = current->parent(); } - assert(current != NULL, "invariant"); + assert(current != nullptr, "invariant"); return current; } @@ -130,7 +130,7 @@ const Edge* EdgeUtils::ancestor(const Edge& edge, size_t distance) { const Edge* current = &edge; const Edge* parent = current->parent(); size_t seek = 0; - while (parent != NULL && seek != distance) { + while (parent != nullptr && seek != distance) { seek++; current = parent; parent = parent->parent(); diff --git a/src/hotspot/share/jfr/leakprofiler/chains/objectSampleMarker.hpp b/src/hotspot/share/jfr/leakprofiler/chains/objectSampleMarker.hpp index 2d496ef9dc7e7..13b55c34e238a 100644 --- a/src/hotspot/share/jfr/leakprofiler/chains/objectSampleMarker.hpp +++ b/src/hotspot/share/jfr/leakprofiler/chains/objectSampleMarker.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -45,7 +45,7 @@ class ObjectSampleMarker : public StackObj { const markWord mark_word) : _obj(obj), _mark_word(mark_word) {} public: - ObjectSampleMarkWord() : _obj(NULL), _mark_word(markWord::zero()) {} + ObjectSampleMarkWord() : _obj(nullptr), _mark_word(markWord::zero()) {} }; GrowableArray* _store; @@ -54,7 +54,7 @@ class ObjectSampleMarker : public StackObj { ObjectSampleMarker() : _store(new GrowableArray(16)) {} ~ObjectSampleMarker() { - assert(_store != NULL, "invariant"); + assert(_store != nullptr, "invariant"); // restore the saved, original, markWord for sample objects while (_store->is_nonempty()) { ObjectSampleMarkWord sample_oop = _store->pop(); @@ -64,7 +64,7 @@ class ObjectSampleMarker : public StackObj { } void mark(oop obj) { - assert(obj != NULL, "invariant"); + assert(obj != nullptr, "invariant"); // save the original markWord _store->push(ObjectSampleMarkWord(obj, obj->mark())); // now we will set the mark word to "marked" in order to quickly diff --git a/src/hotspot/share/jfr/leakprofiler/chains/rootSetClosure.cpp b/src/hotspot/share/jfr/leakprofiler/chains/rootSetClosure.cpp index 3de6c662d900b..b14bbb77d2a72 100644 --- a/src/hotspot/share/jfr/leakprofiler/chains/rootSetClosure.cpp +++ b/src/hotspot/share/jfr/leakprofiler/chains/rootSetClosure.cpp @@ -46,7 +46,7 @@ RootSetClosure::RootSetClosure(Delegate* delegate) : _delegate(delegat template void RootSetClosure::do_oop(oop* ref) { - assert(ref != NULL, "invariant"); + assert(ref != nullptr, "invariant"); assert(is_aligned(ref, HeapWordSize), "invariant"); if (NativeAccess<>::oop_load(ref) != nullptr) { _delegate->do_root(UnifiedOopRef::encode_in_native(ref)); @@ -55,7 +55,7 @@ void RootSetClosure::do_oop(oop* ref) { template void RootSetClosure::do_oop(narrowOop* ref) { - assert(ref != NULL, "invariant"); + assert(ref != nullptr, "invariant"); assert(is_aligned(ref, sizeof(narrowOop)), "invariant"); if (!CompressedOops::is_null(*ref)) { _delegate->do_root(UnifiedOopRef::encode_in_native(ref)); @@ -72,7 +72,7 @@ class RawRootClosure : public OopClosure { RawRootClosure(Delegate* delegate) : _delegate(delegate) {} void do_oop(oop* ref) { - assert(ref != NULL, "invariant"); + assert(ref != nullptr, "invariant"); assert(is_aligned(ref, HeapWordSize), "invariant"); if (*ref != nullptr) { _delegate->do_root(UnifiedOopRef::encode_as_raw(ref)); @@ -80,7 +80,7 @@ class RawRootClosure : public OopClosure { } void do_oop(narrowOop* ref) { - assert(ref != NULL, "invariant"); + assert(ref != nullptr, "invariant"); assert(is_aligned(ref, sizeof(narrowOop)), "invariant"); if (!CompressedOops::is_null(*ref)) { _delegate->do_root(UnifiedOopRef::encode_as_raw(ref)); @@ -99,7 +99,7 @@ void RootSetClosure::process() { // We don't follow code blob oops, because they have misaligned oops. RawRootClosure rrc(_delegate); - Threads::oops_do(&rrc, NULL); + Threads::oops_do(&rrc, nullptr); } template class RootSetClosure; diff --git a/src/hotspot/share/jfr/leakprofiler/checkpoint/eventEmitter.cpp b/src/hotspot/share/jfr/leakprofiler/checkpoint/eventEmitter.cpp index 2fee9671d5ee1..8b28c4f797404 100644 --- a/src/hotspot/share/jfr/leakprofiler/checkpoint/eventEmitter.cpp +++ b/src/hotspot/share/jfr/leakprofiler/checkpoint/eventEmitter.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2021, Datadog, Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -53,7 +53,7 @@ EventEmitter::~EventEmitter() { } void EventEmitter::emit(ObjectSampler* sampler, int64_t cutoff_ticks, bool emit_all, bool skip_bfs) { - assert(sampler != NULL, "invariant"); + assert(sampler != nullptr, "invariant"); ResourceMark rm; EdgeStore edge_store; if (cutoff_ticks <= 0) { @@ -71,8 +71,8 @@ void EventEmitter::emit(ObjectSampler* sampler, int64_t cutoff_ticks, bool emit_ size_t EventEmitter::write_events(ObjectSampler* object_sampler, EdgeStore* edge_store, bool emit_all) { assert(_thread == Thread::current(), "invariant"); assert(_thread->jfr_thread_local() == _jfr_thread_local, "invariant"); - assert(object_sampler != NULL, "invariant"); - assert(edge_store != NULL, "invariant"); + assert(object_sampler != nullptr, "invariant"); + assert(edge_store != nullptr, "invariant"); const jlong last_sweep = emit_all ? max_jlong : ObjectSampler::last_sweep(); size_t count = 0; @@ -80,7 +80,7 @@ size_t EventEmitter::write_events(ObjectSampler* object_sampler, EdgeStore* edge // First pass associates a live sample with its immediate edge // in preparation for writing checkpoint information. const ObjectSample* current = object_sampler->first(); - while (current != NULL) { + while (current != nullptr) { ObjectSample* prev = current->prev(); if (current->is_alive_and_older_than(last_sweep)) { link_sample_with_edge(current, edge_store); @@ -96,7 +96,7 @@ size_t EventEmitter::write_events(ObjectSampler* object_sampler, EdgeStore* edge // Now we are ready to write the events const ObjectSample* current = object_sampler->first(); - while (current != NULL) { + while (current != nullptr) { ObjectSample* prev = current->prev(); if (current->is_alive_and_older_than(last_sweep)) { write_event(current, edge_store); @@ -108,7 +108,7 @@ size_t EventEmitter::write_events(ObjectSampler* object_sampler, EdgeStore* edge } static int array_size(const oop object) { - assert(object != NULL, "invariant"); + assert(object != nullptr, "invariant"); if (object->is_array()) { return arrayOop(object)->length(); } @@ -116,9 +116,9 @@ static int array_size(const oop object) { } void EventEmitter::link_sample_with_edge(const ObjectSample* sample, EdgeStore* edge_store) { - assert(sample != NULL, "invariant"); + assert(sample != nullptr, "invariant"); assert(!sample->is_dead(), "invariant"); - assert(edge_store != NULL, "invariant"); + assert(edge_store != nullptr, "invariant"); if (SafepointSynchronize::is_at_safepoint()) { if (edge_store->has_leak_context(sample)) { // Associated with an edge (chain) already during heap traversal. @@ -132,13 +132,13 @@ void EventEmitter::link_sample_with_edge(const ObjectSample* sample, EdgeStore* } void EventEmitter::write_event(const ObjectSample* sample, EdgeStore* edge_store) { - assert(sample != NULL, "invariant"); + assert(sample != nullptr, "invariant"); assert(!sample->is_dead(), "invariant"); - assert(edge_store != NULL, "invariant"); - assert(_jfr_thread_local != NULL, "invariant"); + assert(edge_store != nullptr, "invariant"); + assert(_jfr_thread_local != nullptr, "invariant"); const StoredEdge* const edge = edge_store->get(sample); - assert(edge != NULL, "invariant"); + assert(edge != nullptr, "invariant"); assert(edge->pointee() == sample->object(), "invariant"); const traceid object_id = edge_store->get_id(edge); assert(object_id != 0, "invariant"); diff --git a/src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleCheckpoint.cpp b/src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleCheckpoint.cpp index 7effec6132759..ffd973696f150 100644 --- a/src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleCheckpoint.cpp +++ b/src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleCheckpoint.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -55,7 +55,7 @@ static GrowableArray* c_heap_allocate_array(int size = initial_array_size) { return new (mtTracing) GrowableArray(size, mtTracing); } -static GrowableArray* unloaded_thread_id_set = NULL; +static GrowableArray* unloaded_thread_id_set = nullptr; class ThreadIdExclusiveAccess : public StackObj { private: @@ -69,7 +69,7 @@ Semaphore ThreadIdExclusiveAccess::_mutex_semaphore(1); static bool has_thread_exited(traceid tid) { assert(tid != 0, "invariant"); - if (unloaded_thread_id_set == NULL) { + if (unloaded_thread_id_set == nullptr) { return false; } ThreadIdExclusiveAccess lock; @@ -78,7 +78,7 @@ static bool has_thread_exited(traceid tid) { static void add_to_unloaded_thread_set(traceid tid) { ThreadIdExclusiveAccess lock; - if (unloaded_thread_id_set == NULL) { + if (unloaded_thread_id_set == nullptr) { unloaded_thread_id_set = c_heap_allocate_array(); } JfrMutablePredicate::test(unloaded_thread_id_set, tid); @@ -93,16 +93,16 @@ void ObjectSampleCheckpoint::on_thread_exit(traceid tid) { void ObjectSampleCheckpoint::clear() { assert(SafepointSynchronize::is_at_safepoint(), "invariant"); - if (unloaded_thread_id_set != NULL) { + if (unloaded_thread_id_set != nullptr) { delete unloaded_thread_id_set; - unloaded_thread_id_set = NULL; + unloaded_thread_id_set = nullptr; } - assert(unloaded_thread_id_set == NULL, "invariant"); + assert(unloaded_thread_id_set == nullptr, "invariant"); } template static void do_samples(ObjectSample* sample, const ObjectSample* end, Processor& processor) { - assert(sample != NULL, "invariant"); + assert(sample != nullptr, "invariant"); while (sample != end) { processor.sample_do(sample); sample = sample->next(); @@ -112,10 +112,10 @@ static void do_samples(ObjectSample* sample, const ObjectSample* end, Processor& template static void iterate_samples(Processor& processor, bool all = false) { ObjectSampler* const sampler = ObjectSampler::sampler(); - assert(sampler != NULL, "invariant"); + assert(sampler != nullptr, "invariant"); ObjectSample* const last = sampler->last(); - assert(last != NULL, "invariant"); - do_samples(last, all ? NULL : sampler->last_resolved(), processor); + assert(last != nullptr, "invariant"); + do_samples(last, all ? nullptr : sampler->last_resolved(), processor); } class SampleMarker { @@ -137,8 +137,8 @@ class SampleMarker { }; int ObjectSampleCheckpoint::save_mark_words(const ObjectSampler* sampler, ObjectSampleMarker& marker, bool emit_all) { - assert(sampler != NULL, "invariant"); - if (sampler->last() == NULL) { + assert(sampler != nullptr, "invariant"); + if (sampler->last() == nullptr) { return 0; } SampleMarker sample_marker(marker, emit_all ? max_jlong : ObjectSampler::last_sweep()); @@ -163,45 +163,45 @@ class BlobCache { }; JfrBlobHandle BlobCache::get(const ObjectSample* sample) { - assert(sample != NULL, "invariant"); + assert(sample != nullptr, "invariant"); _lookup_id = sample->stack_trace_id(); assert(_lookup_id != 0, "invariant"); BlobEntry* const entry = _table.lookup_only(sample->stack_trace_hash()); - return entry != NULL ? entry->literal() : JfrBlobHandle(); + return entry != nullptr ? entry->literal() : JfrBlobHandle(); } void BlobCache::put(const ObjectSample* sample, const JfrBlobHandle& blob) { - assert(sample != NULL, "invariant"); - assert(_table.lookup_only(sample->stack_trace_hash()) == NULL, "invariant"); + assert(sample != nullptr, "invariant"); + assert(_table.lookup_only(sample->stack_trace_hash()) == nullptr, "invariant"); _lookup_id = sample->stack_trace_id(); assert(_lookup_id != 0, "invariant"); _table.put(sample->stack_trace_hash(), blob); } inline void BlobCache::on_link(const BlobEntry* entry) const { - assert(entry != NULL, "invariant"); + assert(entry != nullptr, "invariant"); assert(entry->id() == 0, "invariant"); entry->set_id(_lookup_id); } inline bool BlobCache::on_equals(uintptr_t hash, const BlobEntry* entry) const { - assert(entry != NULL, "invariant"); + assert(entry != nullptr, "invariant"); assert(entry->hash() == hash, "invariant"); return entry->id() == _lookup_id; } inline void BlobCache::on_unlink(BlobEntry* entry) const { - assert(entry != NULL, "invariant"); + assert(entry != nullptr, "invariant"); } -static GrowableArray* id_set = NULL; +static GrowableArray* id_set = nullptr; static void prepare_for_resolution() { id_set = new GrowableArray(JfrOptionSet::old_object_queue_size()); } static bool stack_trace_precondition(const ObjectSample* sample) { - assert(sample != NULL, "invariant"); + assert(sample != nullptr, "invariant"); return sample->has_stack_trace_id() && !sample->is_dead(); } @@ -227,7 +227,7 @@ class StackTraceBlobInstaller { #ifdef ASSERT static void validate_stack_trace(const ObjectSample* sample, const JfrStackTrace* stack_trace) { assert(!sample->has_stacktrace(), "invariant"); - assert(stack_trace != NULL, "invariant"); + assert(stack_trace != nullptr, "invariant"); assert(stack_trace->hash() == sample->stack_trace_hash(), "invariant"); assert(stack_trace->id() == sample->stack_trace_id(), "invariant"); } @@ -255,7 +255,7 @@ void StackTraceBlobInstaller::install(ObjectSample* sample) { } static void install_stack_traces(const ObjectSampler* sampler) { - assert(sampler != NULL, "invariant"); + assert(sampler != nullptr, "invariant"); const ObjectSample* const last = sampler->last(); if (last != sampler->last_resolved()) { ResourceMark rm; @@ -266,7 +266,7 @@ static void install_stack_traces(const ObjectSampler* sampler) { } void ObjectSampleCheckpoint::on_rotation(const ObjectSampler* sampler) { - assert(sampler != NULL, "invariant"); + assert(sampler != nullptr, "invariant"); assert(LeakProfiler::is_running(), "invariant"); JavaThread* const thread = JavaThread::current(); DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_native(thread);) @@ -284,23 +284,23 @@ static bool is_klass_unloaded(traceid klass_id) { static bool is_processed(traceid method_id) { assert(method_id != 0, "invariant"); - assert(id_set != NULL, "invariant"); + assert(id_set != nullptr, "invariant"); return JfrMutablePredicate::test(id_set, method_id); } void ObjectSampleCheckpoint::add_to_leakp_set(const InstanceKlass* ik, traceid method_id) { - assert(ik != NULL, "invariant"); + assert(ik != nullptr, "invariant"); if (is_processed(method_id) || is_klass_unloaded(JfrMethodLookup::klass_id(method_id))) { return; } const Method* const method = JfrMethodLookup::lookup(ik, method_id); - assert(method != NULL, "invariant"); + assert(method != nullptr, "invariant"); assert(method->method_holder() == ik, "invariant"); JfrTraceId::load_leakp(ik, method); } void ObjectSampleCheckpoint::write_stacktrace(const JfrStackTrace* trace, JfrCheckpointWriter& writer) { - assert(trace != NULL, "invariant"); + assert(trace != nullptr, "invariant"); // JfrStackTrace writer.write(trace->id()); writer.write((u1)!trace->_reached_root); @@ -341,7 +341,7 @@ static void write_stacktrace_blob(const ObjectSample* sample, JfrCheckpointWrite } static void write_blobs(const ObjectSample* sample, JfrCheckpointWriter& writer, bool reset) { - assert(sample != NULL, "invariant"); + assert(sample != nullptr, "invariant"); write_stacktrace_blob(sample, writer, reset); write_thread_blob(sample, writer, reset); write_type_set_blob(sample, writer, reset); @@ -378,9 +378,9 @@ static void write_sample_blobs(const ObjectSampler* sampler, bool emit_all, Thre } void ObjectSampleCheckpoint::write(const ObjectSampler* sampler, EdgeStore* edge_store, bool emit_all, Thread* thread) { - assert(sampler != NULL, "invariant"); - assert(edge_store != NULL, "invariant"); - assert(thread != NULL, "invariant"); + assert(sampler != nullptr, "invariant"); + assert(edge_store != nullptr, "invariant"); + assert(thread != nullptr, "invariant"); write_sample_blobs(sampler, emit_all, thread); // write reference chains if (!edge_store->is_empty()) { @@ -430,7 +430,7 @@ void ObjectSampleCheckpoint::on_type_set(JfrCheckpointWriter& writer) { assert(LeakProfiler::is_running(), "invariant"); DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(JavaThread::current());) const ObjectSample* last = ObjectSampler::sampler()->last(); - if (writer.has_data() && last != NULL) { + if (writer.has_data() && last != nullptr) { save_type_set_blob(writer); install_type_set_blobs(); ObjectSampler::sampler()->set_last_resolved(last); @@ -440,7 +440,7 @@ void ObjectSampleCheckpoint::on_type_set(JfrCheckpointWriter& writer) { void ObjectSampleCheckpoint::on_type_set_unload(JfrCheckpointWriter& writer) { assert_locked_or_safepoint(ClassLoaderDataGraph_lock); assert(LeakProfiler::is_running(), "invariant"); - if (writer.has_data() && ObjectSampler::sampler()->last() != NULL) { + if (writer.has_data() && ObjectSampler::sampler()->last() != nullptr) { save_type_set_blob(writer, true); } } diff --git a/src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleDescription.cpp b/src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleDescription.cpp index e15bd5c70b888..eb898318c9709 100644 --- a/src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleDescription.cpp +++ b/src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleDescription.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -35,7 +35,7 @@ #include "runtime/javaThread.hpp" #include "utilities/ostream.hpp" -static Symbol* symbol_size = NULL; +static Symbol* symbol_size = nullptr; ObjectDescriptionBuilder::ObjectDescriptionBuilder() { reset(); @@ -78,11 +78,11 @@ void ObjectDescriptionBuilder::print_description(outputStream* out) { const char* ObjectDescriptionBuilder::description() { if (_buffer[0] == '\0') { - return NULL; + return nullptr; } const size_t len = strlen(_buffer); char* copy = NEW_RESOURCE_ARRAY(char, len + 1); - assert(copy != NULL, "invariant"); + assert(copy != nullptr, "invariant"); strncpy(copy, _buffer, len + 1); return copy; } @@ -92,7 +92,7 @@ ObjectSampleDescription::ObjectSampleDescription(oop object) : } void ObjectSampleDescription::ensure_initialized() { - if (symbol_size == NULL) { + if (symbol_size == nullptr) { symbol_size = SymbolTable::new_permanent_symbol("size"); } } @@ -150,13 +150,13 @@ void ObjectSampleDescription::write_object_details() { void ObjectSampleDescription::write_class_name() { assert(_object->is_a(vmClasses::Class_klass()), "invariant"); const Klass* const k = java_lang_Class::as_Klass(_object); - if (k == NULL) { + if (k == nullptr) { // might represent a primitive const Klass* const ak = java_lang_Class::array_klass_acquire(_object); - // If ak is NULL, this is most likely a mirror associated with a + // If ak is null, this is most likely a mirror associated with a // jvmti redefine/retransform scratch klass. We can't get any additional // information from it. - if (ak != NULL) { + if (ak != nullptr) { write_text(type2name(java_lang_Class::primitive_type(_object))); } return; @@ -168,7 +168,7 @@ void ObjectSampleDescription::write_class_name() { return; } const Symbol* name = ik->name(); - if (name != NULL) { + if (name != nullptr) { write_text("Class Name: "); write_text(name->as_klass_external_name()); } @@ -178,7 +178,7 @@ void ObjectSampleDescription::write_class_name() { void ObjectSampleDescription::write_thread_group_name() { assert(_object->is_a(vmClasses::ThreadGroup_klass()), "invariant"); const char* tg_name = java_lang_ThreadGroup::name(_object); - if (tg_name != NULL) { + if (tg_name != nullptr) { write_text("Thread Group: "); write_text(tg_name); } @@ -187,9 +187,9 @@ void ObjectSampleDescription::write_thread_group_name() { void ObjectSampleDescription::write_thread_name() { assert(_object->is_a(vmClasses::Thread_klass()), "invariant"); oop name = java_lang_Thread::name(_object); - if (name != NULL) { + if (name != nullptr) { char* p = java_lang_String::as_utf8_string(name); - if (p != NULL) { + if (p != nullptr) { write_text("Thread Name: "); write_text(p); } @@ -208,7 +208,7 @@ bool ObjectSampleDescription::read_int_size(jint* result_size) { Klass* klass = _object->klass(); if (klass->is_instance_klass()) { InstanceKlass* ik = InstanceKlass::cast(klass); - if (ik->find_field(symbol_size, vmSymbols::int_signature(), false, &fd) != NULL) { + if (ik->find_field(symbol_size, vmSymbols::int_signature(), false, &fd) != nullptr) { jint size = _object->int_field(fd.offset()); *result_size = size; return true; diff --git a/src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleWriter.cpp b/src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleWriter.cpp index eeed5c1070979..7c5c88ef6ca62 100644 --- a/src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleWriter.cpp +++ b/src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleWriter.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -58,7 +58,7 @@ class ObjectSampleFieldInfo : public ResourceObj { public: const Symbol* _field_name_symbol; jshort _field_modifiers; - ObjectSampleFieldInfo() : _field_name_symbol(NULL), _field_modifiers(0) {} + ObjectSampleFieldInfo() : _field_name_symbol(nullptr), _field_modifiers(0) {} }; class ObjectSampleRootDescriptionData { @@ -67,8 +67,8 @@ class ObjectSampleRootDescriptionData { const char* _description; OldObjectRoot::System _system; OldObjectRoot::Type _type; - ObjectSampleRootDescriptionData() : _root_edge(NULL), - _description(NULL), + ObjectSampleRootDescriptionData() : _root_edge(nullptr), + _description(nullptr), _system(OldObjectRoot::_system_undetermined), _type(OldObjectRoot::_type_undetermined) {} }; @@ -94,26 +94,26 @@ class SampleSet : public ResourceObj { private: GrowableArray* _storage; public: - SampleSet() : _storage(NULL) {} + SampleSet() : _storage(nullptr) {} traceid store(Data data) { - assert(data != NULL, "invariant"); - if (_storage == NULL) { + assert(data != nullptr, "invariant"); + if (_storage == nullptr) { _storage = new GrowableArray(initial_storage_size); } - assert(_storage != NULL, "invariant"); + assert(_storage != nullptr, "invariant"); assert(_storage->find(data) == -1, "invariant"); _storage->append(data); return data->_id; } size_t size() const { - return _storage != NULL ? (size_t)_storage->length() : 0; + return _storage != nullptr ? (size_t)_storage->length() : 0; } template void iterate(Functor& functor) { - if (_storage != NULL) { + if (_storage != nullptr) { for (int i = 0; i < _storage->length(); ++i) { functor(_storage->at(i)); } @@ -147,30 +147,30 @@ class FieldTable : public ResourceObj { const ObjectSampleFieldInfo* _lookup; void on_link(FieldInfoEntry* entry) { - assert(entry != NULL, "invariant"); + assert(entry != nullptr, "invariant"); entry->set_id(++_field_id_counter); } bool on_equals(uintptr_t hash, const FieldInfoEntry* entry) { assert(hash == entry->hash(), "invariant"); - assert(_lookup != NULL, "invariant"); + assert(_lookup != nullptr, "invariant"); return entry->literal()->_field_modifiers == _lookup->_field_modifiers; } void on_unlink(FieldInfoEntry* entry) { - assert(entry != NULL, "invariant"); + assert(entry != nullptr, "invariant"); // nothing } public: - FieldTable() : _table(new FieldInfoTable(this)), _lookup(NULL) {} + FieldTable() : _table(new FieldInfoTable(this)), _lookup(nullptr) {} ~FieldTable() { - assert(_table != NULL, "invariant"); + assert(_table != nullptr, "invariant"); delete _table; } traceid store(const ObjectSampleFieldInfo* field_info) { - assert(field_info != NULL, "invariant"); + assert(field_info != nullptr, "invariant"); _lookup = field_info; const FieldInfoEntry& entry = _table->lookup_put(field_info->_field_name_symbol->identity_hash(), field_info); return entry.id(); @@ -193,18 +193,18 @@ typedef SampleSet RefInfo; typedef SampleSet ArrayInfo; typedef SampleSet RootDescriptionInfo; -static SampleInfo* sample_infos = NULL; -static RefInfo* ref_infos = NULL; -static ArrayInfo* array_infos = NULL; -static FieldTable* field_infos = NULL; -static RootDescriptionInfo* root_infos = NULL; +static SampleInfo* sample_infos = nullptr; +static RefInfo* ref_infos = nullptr; +static ArrayInfo* array_infos = nullptr; +static FieldTable* field_infos = nullptr; +static RootDescriptionInfo* root_infos = nullptr; int __write_sample_info__(JfrCheckpointWriter* writer, const void* si) { - assert(writer != NULL, "invariant"); - assert(si != NULL, "invariant"); + assert(writer != nullptr, "invariant"); + assert(si != nullptr, "invariant"); const OldObjectSampleInfo* const oosi = (const OldObjectSampleInfo*)si; oop object = oosi->_data._object; - assert(object != NULL, "invariant"); + assert(object != nullptr, "invariant"); writer->write(oosi->_id); writer->write(cast_from_oop(object)); writer->write(const_cast(object->klass())); @@ -218,15 +218,15 @@ typedef JfrTypeWriterImplHost typedef JfrTypeWriterHost SampleWriter; static void write_sample_infos(JfrCheckpointWriter& writer) { - if (sample_infos != NULL) { + if (sample_infos != nullptr) { SampleWriter sw(&writer); sample_infos->iterate(sw); } } int __write_reference_info__(JfrCheckpointWriter* writer, const void* ri) { - assert(writer != NULL, "invariant"); - assert(ri != NULL, "invariant"); + assert(writer != nullptr, "invariant"); + assert(ri != nullptr, "invariant"); const ReferenceInfo* const ref_info = (const ReferenceInfo*)ri; writer->write(ref_info->_id); writer->write(ref_info->_data._array_info_id); @@ -240,15 +240,15 @@ typedef JfrTypeWriterImplHost Re typedef JfrTypeWriterHost ReferenceWriter; static void write_reference_infos(JfrCheckpointWriter& writer) { - if (ref_infos != NULL) { + if (ref_infos != nullptr) { ReferenceWriter rw(&writer); ref_infos->iterate(rw); } } int __write_array_info__(JfrCheckpointWriter* writer, const void* ai) { - assert(writer != NULL, "invariant"); - assert(ai != NULL, "invariant"); + assert(writer != nullptr, "invariant"); + assert(ai != nullptr, "invariant"); const ObjectSampleArrayInfo* const osai = (const ObjectSampleArrayInfo*)ai; writer->write(osai->_id); writer->write(osai->_data._array_size); @@ -260,13 +260,13 @@ static traceid get_array_info_id(const Edge& edge, traceid id) { if (edge.is_root() || !EdgeUtils::is_array_element(edge)) { return 0; } - if (array_infos == NULL) { + if (array_infos == nullptr) { array_infos = new ArrayInfo(); } - assert(array_infos != NULL, "invariant"); + assert(array_infos != nullptr, "invariant"); ObjectSampleArrayInfo* const osai = new ObjectSampleArrayInfo(); - assert(osai != NULL, "invariant"); + assert(osai != nullptr, "invariant"); osai->_id = id; osai->_data._array_size = EdgeUtils::array_size(edge); osai->_data._array_index = EdgeUtils::array_index(edge); @@ -277,15 +277,15 @@ typedef JfrTypeWriterImplHost ArrayWriter; static void write_array_infos(JfrCheckpointWriter& writer) { - if (array_infos != NULL) { + if (array_infos != nullptr) { ArrayWriter aw(&writer); array_infos->iterate(aw); } } int __write_field_info__(JfrCheckpointWriter* writer, const void* fi) { - assert(writer != NULL, "invariant"); - assert(fi != NULL, "invariant"); + assert(writer != nullptr, "invariant"); + assert(fi != nullptr, "invariant"); const FieldTable::FieldInfoEntry* field_info_entry = (const FieldTable::FieldInfoEntry*)fi; writer->write(field_info_entry->id()); const ObjectSampleFieldInfo* const osfi = field_info_entry->literal(); @@ -301,15 +301,15 @@ static traceid get_field_info_id(const Edge& edge) { assert(!EdgeUtils::is_array_element(edge), "invariant"); jshort field_modifiers; const Symbol* const field_name_symbol = EdgeUtils::field_name(edge, &field_modifiers); - if (field_name_symbol == NULL) { + if (field_name_symbol == nullptr) { return 0; } - if (field_infos == NULL) { + if (field_infos == nullptr) { field_infos = new FieldTable(); } - assert(field_infos != NULL, "invariant"); + assert(field_infos != nullptr, "invariant"); ObjectSampleFieldInfo* const osfi = new ObjectSampleFieldInfo(); - assert(osfi != NULL, "invariant"); + assert(osfi != nullptr, "invariant"); osfi->_field_name_symbol = field_name_symbol; osfi->_field_modifiers = field_modifiers; return field_infos->store(osfi); @@ -319,17 +319,17 @@ typedef JfrTypeWriterImplHost FieldWriter; static void write_field_infos(JfrCheckpointWriter& writer) { - if (field_infos != NULL) { + if (field_infos != nullptr) { FieldWriter fw(&writer); field_infos->iterate(fw); } } static const char* description(const ObjectSampleRootDescriptionInfo* osdi) { - assert(osdi != NULL, "invariant"); + assert(osdi != nullptr, "invariant"); - if (osdi->_data._description == NULL) { - return NULL; + if (osdi->_data._description == nullptr) { + return nullptr; } ObjectDescriptionBuilder description; @@ -341,8 +341,8 @@ static const char* description(const ObjectSampleRootDescriptionInfo* osdi) { } int __write_root_description_info__(JfrCheckpointWriter* writer, const void* di) { - assert(writer != NULL, "invariant"); - assert(di != NULL, "invariant"); + assert(writer != nullptr, "invariant"); + assert(di != nullptr, "invariant"); const ObjectSampleRootDescriptionInfo* const osdi = (const ObjectSampleRootDescriptionInfo*)di; writer->write(osdi->_id); writer->write(description(osdi)); @@ -353,10 +353,10 @@ int __write_root_description_info__(JfrCheckpointWriter* writer, const void* di) static traceid get_gc_root_description_info_id(const Edge& edge, traceid id) { assert(edge.is_root(), "invariant"); - if (root_infos == NULL) { + if (root_infos == nullptr) { root_infos = new RootDescriptionInfo(); } - assert(root_infos != NULL, "invariant"); + assert(root_infos != nullptr, "invariant"); ObjectSampleRootDescriptionInfo* const oodi = new ObjectSampleRootDescriptionInfo(); oodi->_id = id; oodi->_data._root_edge = &edge; @@ -381,7 +381,7 @@ static int find_sorted(const RootCallbackInfo& callback_info, const GrowableArray* arr, int length, bool& found) { - assert(arr != NULL, "invariant"); + assert(arr != nullptr, "invariant"); assert(length >= 0, "invariant"); assert(length <= arr->length(), "invariant"); @@ -417,14 +417,14 @@ class RootResolutionSet : public ResourceObj, public RootCallback { } bool in_set_address_range(const RootCallbackInfo& callback_info) const { - assert(callback_info._low == NULL, "invariant"); + assert(callback_info._low == nullptr, "invariant"); const uintptr_t addr = (uintptr_t)callback_info._high; return low() <= addr && high() >= addr; } int compare_to_range(const RootCallbackInfo& callback_info) const { - assert(callback_info._high != NULL, "invariant"); - assert(callback_info._low != NULL, "invariant"); + assert(callback_info._high != nullptr, "invariant"); + assert(callback_info._low != nullptr, "invariant"); for (int i = 0; i < _unresolved_roots->length(); ++i) { const uintptr_t ref_addr = _unresolved_roots->at(i)->_data._root_edge->reference().addr(); @@ -436,7 +436,7 @@ class RootResolutionSet : public ResourceObj, public RootCallback { } int exact(const RootCallbackInfo& callback_info) const { - assert(callback_info._high != NULL, "invariant"); + assert(callback_info._high != nullptr, "invariant"); assert(in_set_address_range(callback_info), "invariant"); bool found; @@ -450,7 +450,7 @@ class RootResolutionSet : public ResourceObj, public RootCallback { ObjectSampleRootDescriptionInfo* const desc = const_cast(_unresolved_roots->at(idx)); - assert(desc != NULL, "invariant"); + assert(desc != nullptr, "invariant"); assert((uintptr_t)callback_info._high == desc->_data._root_edge->reference().addr(), "invariant"); desc->_data._system = callback_info._system; @@ -458,7 +458,7 @@ class RootResolutionSet : public ResourceObj, public RootCallback { if (callback_info._system == OldObjectRoot::_threads) { const JavaThread* jt = (const JavaThread*)callback_info._context; - assert(jt != NULL, "invariant"); + assert(jt != nullptr, "invariant"); desc->_data._description = jt->name(); } @@ -467,13 +467,13 @@ class RootResolutionSet : public ResourceObj, public RootCallback { } public: - RootResolutionSet(RootDescriptionInfo* info) : _unresolved_roots(NULL) { - assert(info != NULL, "invariant"); + RootResolutionSet(RootDescriptionInfo* info) : _unresolved_roots(nullptr) { + assert(info != nullptr, "invariant"); // construct a sorted copy const GrowableArray& info_storage = info->storage(); const int length = info_storage.length(); _unresolved_roots = new GrowableArray(length); - assert(_unresolved_roots != NULL, "invariant"); + assert(_unresolved_roots != nullptr, "invariant"); for (int i = 0; i < length; ++i) { _unresolved_roots->insert_sorted<_root_desc_compare_>(info_storage.at(i)); @@ -481,14 +481,14 @@ class RootResolutionSet : public ResourceObj, public RootCallback { } bool process(const RootCallbackInfo& callback_info) { - if (NULL == callback_info._low) { + if (nullptr == callback_info._low) { if (in_set_address_range(callback_info)) { const int idx = exact(callback_info); return idx == -1 ? false : resolve_root(callback_info, idx); } return false; } - assert(callback_info._low != NULL, "invariant"); + assert(callback_info._low != nullptr, "invariant"); const int idx = compare_to_range(callback_info); return idx == -1 ? false : resolve_root(callback_info, idx); } @@ -505,7 +505,7 @@ class RootResolutionSet : public ResourceObj, public RootCallback { }; static void write_root_descriptors(JfrCheckpointWriter& writer) { - if (root_infos != NULL) { + if (root_infos != nullptr) { // resolve roots RootResolutionSet rrs(root_infos); RootResolver::resolve(rrs); @@ -516,28 +516,28 @@ static void write_root_descriptors(JfrCheckpointWriter& writer) { } static void add_old_object_sample_info(const StoredEdge* current, traceid id) { - assert(current != NULL, "invariant"); - if (sample_infos == NULL) { + assert(current != nullptr, "invariant"); + if (sample_infos == nullptr) { sample_infos = new SampleInfo(); } - assert(sample_infos != NULL, "invariant"); + assert(sample_infos != nullptr, "invariant"); OldObjectSampleInfo* const oosi = new OldObjectSampleInfo(); - assert(oosi != NULL, "invariant"); + assert(oosi != nullptr, "invariant"); oosi->_id = id; oosi->_data._object = current->pointee(); - oosi->_data._reference_id = current->parent() == NULL ? 0 : id; + oosi->_data._reference_id = current->parent() == nullptr ? 0 : id; sample_infos->store(oosi); } static void add_reference_info(const StoredEdge* current, traceid id, traceid parent_id) { - assert(current != NULL, "invariant"); - if (ref_infos == NULL) { + assert(current != nullptr, "invariant"); + if (ref_infos == nullptr) { ref_infos = new RefInfo(); } - assert(ref_infos != NULL, "invariant"); + assert(ref_infos != nullptr, "invariant"); ReferenceInfo* const ri = new ReferenceInfo(); - assert(ri != NULL, "invariant"); + assert(ri != nullptr, "invariant"); ri->_id = id; ri->_data._array_info_id = current->is_skip_edge() ? 0 : get_array_info_id(*current, id); @@ -548,22 +548,22 @@ static void add_reference_info(const StoredEdge* current, traceid id, traceid pa } static bool is_gc_root(const StoredEdge* current) { - assert(current != NULL, "invariant"); - return current->parent() == NULL && current->gc_root_id() != 0; + assert(current != nullptr, "invariant"); + return current->parent() == nullptr && current->gc_root_id() != 0; } static traceid add_gc_root_info(const StoredEdge* root, traceid id) { - assert(root != NULL, "invariant"); + assert(root != nullptr, "invariant"); assert(is_gc_root(root), "invariant"); return get_gc_root_description_info_id(*root, id); } void ObjectSampleWriter::write(const StoredEdge* edge) { - assert(edge != NULL, "invariant"); + assert(edge != nullptr, "invariant"); const traceid id = _store->get_id(edge); add_old_object_sample_info(edge, id); const StoredEdge* const parent = edge->parent(); - if (parent != NULL) { + if (parent != nullptr) { add_reference_info(edge, id, _store->get_id(parent)); return; } @@ -609,14 +609,14 @@ static void register_serializers() { ObjectSampleWriter::ObjectSampleWriter(JfrCheckpointWriter& writer, EdgeStore* store) : _writer(writer), _store(store) { - assert(store != NULL, "invariant"); + assert(store != nullptr, "invariant"); assert(!store->is_empty(), "invariant"); register_serializers(); - assert(field_infos == NULL, "Invariant"); - assert(sample_infos == NULL, "Invariant"); - assert(ref_infos == NULL, "Invariant"); - assert(array_infos == NULL, "Invariant"); - assert(root_infos == NULL, "Invariant"); + assert(field_infos == nullptr, "Invariant"); + assert(sample_infos == nullptr, "Invariant"); + assert(ref_infos == nullptr, "Invariant"); + assert(array_infos == nullptr, "Invariant"); + assert(root_infos == nullptr, "Invariant"); } ObjectSampleWriter::~ObjectSampleWriter() { @@ -627,14 +627,14 @@ ObjectSampleWriter::~ObjectSampleWriter() { write_root_descriptors(_writer); // Following are RA allocated, memory will be released automatically. - if (field_infos != NULL) { + if (field_infos != nullptr) { field_infos->~FieldTable(); - field_infos = NULL; + field_infos = nullptr; } - sample_infos = NULL; - ref_infos = NULL; - array_infos = NULL; - root_infos = NULL; + sample_infos = nullptr; + ref_infos = nullptr; + array_infos = nullptr; + root_infos = nullptr; } bool ObjectSampleWriter::operator()(StoredEdge& e) { diff --git a/src/hotspot/share/jfr/leakprofiler/checkpoint/rootResolver.cpp b/src/hotspot/share/jfr/leakprofiler/checkpoint/rootResolver.cpp index eb5c112abef0c..9627259e26473 100644 --- a/src/hotspot/share/jfr/leakprofiler/checkpoint/rootResolver.cpp +++ b/src/hotspot/share/jfr/leakprofiler/checkpoint/rootResolver.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -60,8 +60,8 @@ class ReferenceLocateClosure : public OopClosure { const void* context) : _callback(callback), _info(), _complete(false) { - _info._high = NULL; - _info._low = NULL; + _info._high = nullptr; + _info._low = nullptr; _info._system = system; _info._type = type; _info._context = context; @@ -107,9 +107,9 @@ class ReferenceToRootClosure : public StackObj { ReferenceToRootClosure(RootCallback& callback) : _callback(callback), _info(), _complete(false) { - _info._high = NULL; - _info._low = NULL; - _info._context = NULL; + _info._high = nullptr; + _info._low = nullptr; + _info._context = nullptr; _info._system = OldObjectRoot::_system_undetermined; _info._type = OldObjectRoot::_type_undetermined; @@ -124,7 +124,7 @@ class ReferenceToRootClosure : public StackObj { bool ReferenceToRootClosure::do_cldg_roots() { assert(!complete(), "invariant"); - ReferenceLocateClosure rlc(_callback, OldObjectRoot::_class_loader_data, OldObjectRoot::_type_undetermined, NULL); + ReferenceLocateClosure rlc(_callback, OldObjectRoot::_class_loader_data, OldObjectRoot::_type_undetermined, nullptr); CLDToOopClosure cldt_closure(&rlc, ClassLoaderData::_claim_none); ClassLoaderDataGraph::always_strong_cld_do(&cldt_closure); return rlc.complete(); @@ -139,7 +139,7 @@ bool ReferenceToRootClosure::do_oop_storage_roots() { OldObjectRoot::_global_jni_handle : OldObjectRoot::_global_oop_handle; OldObjectRoot::System system = OldObjectRoot::System(OldObjectRoot::_strong_oop_storage_set_first + Range().index(id)); - ReferenceLocateClosure rlc(_callback, system, type, NULL); + ReferenceLocateClosure rlc(_callback, system, type, nullptr); oop_storage->oops_do(&rlc); if (rlc.complete()) { return true; @@ -195,7 +195,7 @@ class ReferenceToThreadRootClosure : public StackObj { }; bool ReferenceToThreadRootClosure::do_thread_handle_area(JavaThread* jt) { - assert(jt != NULL, "invariant"); + assert(jt != nullptr, "invariant"); assert(!complete(), "invariant"); ReferenceLocateClosure rcl(_callback, OldObjectRoot::_threads, OldObjectRoot::_handle_area, jt); jt->handle_area()->oops_do(&rcl); @@ -203,7 +203,7 @@ bool ReferenceToThreadRootClosure::do_thread_handle_area(JavaThread* jt) { } bool ReferenceToThreadRootClosure::do_thread_jni_handles(JavaThread* jt) { - assert(jt != NULL, "invariant"); + assert(jt != nullptr, "invariant"); assert(!complete(), "invariant"); ReferenceLocateClosure rcl(_callback, OldObjectRoot::_threads, OldObjectRoot::_local_jni_handle, jt); @@ -212,7 +212,7 @@ bool ReferenceToThreadRootClosure::do_thread_jni_handles(JavaThread* jt) { } bool ReferenceToThreadRootClosure::do_thread_stack_fast(JavaThread* jt) { - assert(jt != NULL, "invariant"); + assert(jt != nullptr, "invariant"); assert(!complete(), "invariant"); if (_callback.entries() == 0) { @@ -221,8 +221,8 @@ bool ReferenceToThreadRootClosure::do_thread_stack_fast(JavaThread* jt) { } RootCallbackInfo info; - info._high = NULL; - info._low = NULL; + info._high = nullptr; + info._low = nullptr; info._context = jt; info._system = OldObjectRoot::_threads; info._type = OldObjectRoot::_stack_variable; @@ -242,7 +242,7 @@ bool ReferenceToThreadRootClosure::do_thread_stack_fast(JavaThread* jt) { } bool ReferenceToThreadRootClosure::do_thread_stack_detailed(JavaThread* jt) { - assert(jt != NULL, "invariant"); + assert(jt != nullptr, "invariant"); assert(!complete(), "invariant"); ReferenceLocateClosure rcl(_callback, OldObjectRoot::_threads, OldObjectRoot::_stack_variable, jt); @@ -250,7 +250,7 @@ bool ReferenceToThreadRootClosure::do_thread_stack_detailed(JavaThread* jt) { if (jt->has_last_Java_frame()) { // Traverse the monitor chunks MonitorChunk* chunk = jt->monitor_chunks(); - for (; chunk != NULL; chunk = chunk->next()) { + for (; chunk != nullptr; chunk = chunk->next()) { chunk->oops_do(&rcl); } @@ -260,7 +260,7 @@ bool ReferenceToThreadRootClosure::do_thread_stack_detailed(JavaThread* jt) { // Traverse the execution stack for (StackFrameStream fst(jt, true /* update */, true /* process_frames */); !fst.is_done(); fst.next()) { - fst.current()->oops_do(&rcl, NULL, fst.register_map()); + fst.current()->oops_do(&rcl, nullptr, fst.register_map()); } } // last java frame @@ -270,7 +270,7 @@ bool ReferenceToThreadRootClosure::do_thread_stack_detailed(JavaThread* jt) { } GrowableArrayView* const list = JvmtiDeferredUpdates::deferred_locals(jt); - if (list != NULL) { + if (list != nullptr) { for (int i = 0; i < list->length(); i++) { list->at(i)->oops_do(&rcl); } @@ -290,24 +290,24 @@ bool ReferenceToThreadRootClosure::do_thread_stack_detailed(JavaThread* jt) { */ JvmtiThreadState* const jvmti_thread_state = jt->jvmti_thread_state(); - if (jvmti_thread_state != NULL) { - jvmti_thread_state->oops_do(&rcl, NULL); + if (jvmti_thread_state != nullptr) { + jvmti_thread_state->oops_do(&rcl, nullptr); } return rcl.complete(); } bool ReferenceToThreadRootClosure::do_java_threads_oops(JavaThread* jt) { - assert(jt != NULL, "invariant"); + assert(jt != nullptr, "invariant"); assert(!complete(), "invariant"); ReferenceLocateClosure rcl(_callback, OldObjectRoot::_threads, OldObjectRoot::_global_jni_handle, jt); - jt->oops_do(&rcl, NULL); + jt->oops_do(&rcl, nullptr); return rcl.complete(); } bool ReferenceToThreadRootClosure::do_thread_roots(JavaThread* jt) { - assert(jt != NULL, "invariant"); + assert(jt != nullptr, "invariant"); if (do_thread_stack_fast(jt)) { _complete = true; diff --git a/src/hotspot/share/jfr/leakprofiler/leakProfiler.cpp b/src/hotspot/share/jfr/leakprofiler/leakProfiler.cpp index 49c8d027168da..895eafc44099e 100644 --- a/src/hotspot/share/jfr/leakprofiler/leakProfiler.cpp +++ b/src/hotspot/share/jfr/leakprofiler/leakProfiler.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -84,14 +84,14 @@ void LeakProfiler::emit_events(int64_t cutoff_ticks, bool emit_all, bool skip_bf } // exclusive access to object sampler instance ObjectSampler* const sampler = ObjectSampler::acquire(); - assert(sampler != NULL, "invariant"); + assert(sampler != nullptr, "invariant"); EventEmitter::emit(sampler, cutoff_ticks, emit_all, skip_bfs); ObjectSampler::release(); } void LeakProfiler::sample(HeapWord* object, size_t size, JavaThread* thread) { assert(is_running(), "invariant"); - assert(thread != NULL, "invariant"); + assert(thread != nullptr, "invariant"); assert(thread->thread_state() == _thread_in_vm, "invariant"); // exclude compiler threads diff --git a/src/hotspot/share/jfr/leakprofiler/sampling/objectSample.cpp b/src/hotspot/share/jfr/leakprofiler/sampling/objectSample.cpp index b9b6aca862acb..492b7fd413161 100644 --- a/src/hotspot/share/jfr/leakprofiler/sampling/objectSample.cpp +++ b/src/hotspot/share/jfr/leakprofiler/sampling/objectSample.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -40,7 +40,7 @@ const oop ObjectSample::object() const { } bool ObjectSample::is_dead() const { - return _object.peek() == NULL; + return _object.peek() == nullptr; } const oop* ObjectSample::object_addr() const { diff --git a/src/hotspot/share/jfr/leakprofiler/sampling/objectSample.hpp b/src/hotspot/share/jfr/leakprofiler/sampling/objectSample.hpp index 359657d60fc89..734aaea3255ce 100644 --- a/src/hotspot/share/jfr/leakprofiler/sampling/objectSample.hpp +++ b/src/hotspot/share/jfr/leakprofiler/sampling/objectSample.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -69,8 +69,8 @@ class ObjectSample : public JfrCHeapObj { void reset(); public: - ObjectSample() : _next(NULL), - _previous(NULL), + ObjectSample() : _next(nullptr), + _previous(nullptr), _stacktrace(), _thread(), _type_set(), diff --git a/src/hotspot/share/jfr/leakprofiler/sampling/objectSampler.cpp b/src/hotspot/share/jfr/leakprofiler/sampling/objectSampler.cpp index 55002b3ccafec..311c54becd8e0 100644 --- a/src/hotspot/share/jfr/leakprofiler/sampling/objectSampler.cpp +++ b/src/hotspot/share/jfr/leakprofiler/sampling/objectSampler.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -57,13 +57,13 @@ static bool volatile _dead_samples = false; // The OopStorage instance is used to hold weak references to sampled objects. // It is constructed and registered during VM initialization. This is a singleton // that persist independent of the state of the ObjectSampler. -static OopStorage* _oop_storage = NULL; +static OopStorage* _oop_storage = nullptr; OopStorage* ObjectSampler::oop_storage() { return _oop_storage; } // Callback invoked by the GC after an iteration over the oop storage // that may have cleared dead referents. num_dead is the number of entries -// already NULL or cleared by the iteration. +// already nullptr or cleared by the iteration. void ObjectSampler::oop_storage_gc_notification(size_t num_dead) { if (num_dead != 0) { // The ObjectSampler instance may have already been cleaned or a new @@ -76,15 +76,15 @@ void ObjectSampler::oop_storage_gc_notification(size_t num_dead) { bool ObjectSampler::create_oop_storage() { _oop_storage = OopStorageSet::create_weak("Weak JFR Old Object Samples", mtTracing); - assert(_oop_storage != NULL, "invariant"); + assert(_oop_storage != nullptr, "invariant"); _oop_storage->register_num_dead_callback(&oop_storage_gc_notification); return true; } -static ObjectSampler* _instance = NULL; +static ObjectSampler* _instance = nullptr; static ObjectSampler& instance() { - assert(_instance != NULL, "invariant"); + assert(_instance != nullptr, "invariant"); return *_instance; } @@ -100,22 +100,22 @@ ObjectSampler::ObjectSampler(size_t size) : ObjectSampler::~ObjectSampler() { delete _priority_queue; - _priority_queue = NULL; + _priority_queue = nullptr; delete _list; - _list = NULL; + _list = nullptr; } bool ObjectSampler::create(size_t size) { assert(SafepointSynchronize::is_at_safepoint(), "invariant"); - assert(_oop_storage != NULL, "should be already created"); + assert(_oop_storage != nullptr, "should be already created"); ObjectSampleCheckpoint::clear(); - assert(_instance == NULL, "invariant"); + assert(_instance == nullptr, "invariant"); _instance = new ObjectSampler(size); - return _instance != NULL; + return _instance != nullptr; } bool ObjectSampler::is_created() { - return _instance != NULL; + return _instance != nullptr; } ObjectSampler* ObjectSampler::sampler() { @@ -125,9 +125,9 @@ ObjectSampler* ObjectSampler::sampler() { void ObjectSampler::destroy() { assert(SafepointSynchronize::is_at_safepoint(), "invariant"); - if (_instance != NULL) { + if (_instance != nullptr) { ObjectSampler* const sampler = _instance; - _instance = NULL; + _instance = nullptr; delete sampler; } } @@ -145,13 +145,13 @@ void ObjectSampler::release() { } static traceid get_thread_id(JavaThread* thread, bool* virtual_thread) { - assert(thread != NULL, "invariant"); - assert(virtual_thread != NULL, "invariant"); - if (thread->threadObj() == NULL) { + assert(thread != nullptr, "invariant"); + assert(virtual_thread != nullptr, "invariant"); + if (thread->threadObj() == nullptr) { return 0; } const JfrThreadLocal* const tl = thread->jfr_thread_local(); - assert(tl != NULL, "invariant"); + assert(tl != nullptr, "invariant"); if (tl->is_excluded()) { return 0; } @@ -160,9 +160,9 @@ static traceid get_thread_id(JavaThread* thread, bool* virtual_thread) { } static JfrBlobHandle get_thread_blob(JavaThread* thread, traceid tid, bool virtual_thread) { - assert(thread != NULL, "invariant"); + assert(thread != nullptr, "invariant"); JfrThreadLocal* const tl = thread->jfr_thread_local(); - assert(tl != NULL, "invariant"); + assert(tl != nullptr, "invariant"); assert(!tl->is_excluded(), "invariant"); if (virtual_thread) { // TODO: blob cache for virtual threads @@ -195,7 +195,7 @@ class RecordStackTrace { }; void ObjectSampler::sample(HeapWord* obj, size_t allocated, JavaThread* thread) { - assert(thread != NULL, "invariant"); + assert(thread != nullptr, "invariant"); assert(is_created(), "invariant"); bool virtual_thread = false; const traceid thread_id = get_thread_id(thread, &virtual_thread); @@ -215,9 +215,9 @@ void ObjectSampler::sample(HeapWord* obj, size_t allocated, JavaThread* thread) } void ObjectSampler::add(HeapWord* obj, size_t allocated, traceid thread_id, bool virtual_thread, const JfrBlobHandle& bh, JavaThread* thread) { - assert(obj != NULL, "invariant"); + assert(obj != nullptr, "invariant"); assert(thread_id != 0, "invariant"); - assert(thread != NULL, "invariant"); + assert(thread != nullptr, "invariant"); if (Atomic::load(&_dead_samples)) { // There's a small race where a GC scan might reset this to true, potentially @@ -241,7 +241,7 @@ void ObjectSampler::add(HeapWord* obj, size_t allocated, traceid thread_id, bool sample = _list->get(); } - assert(sample != NULL, "invariant"); + assert(sample != nullptr, "invariant"); sample->set_thread_id(thread_id); if (virtual_thread) { sample->set_thread_is_virtual(); @@ -265,7 +265,7 @@ void ObjectSampler::add(HeapWord* obj, size_t allocated, traceid thread_id, bool void ObjectSampler::scavenge() { ObjectSample* current = _list->last(); - while (current != NULL) { + while (current != nullptr) { ObjectSample* next = current->next(); if (current->is_dead()) { remove_dead(current); @@ -275,13 +275,13 @@ void ObjectSampler::scavenge() { } void ObjectSampler::remove_dead(ObjectSample* sample) { - assert(sample != NULL, "invariant"); + assert(sample != nullptr, "invariant"); assert(sample->is_dead(), "invariant"); sample->release(); ObjectSample* const previous = sample->prev(); // push span onto previous - if (previous != NULL) { + if (previous != nullptr) { _priority_queue->remove(previous); previous->add_span(sample->span()); _priority_queue->push(previous); diff --git a/src/hotspot/share/jfr/leakprofiler/sampling/sampleList.cpp b/src/hotspot/share/jfr/leakprofiler/sampling/sampleList.cpp index e355015e55410..28cddeee70e85 100644 --- a/src/hotspot/share/jfr/leakprofiler/sampling/sampleList.cpp +++ b/src/hotspot/share/jfr/leakprofiler/sampling/sampleList.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,7 +30,7 @@ SampleList::SampleList(size_t limit, size_t cache_size) : _free_list(), _in_use_list(), - _last_resolved(NULL), + _last_resolved(nullptr), _allocated(0), _limit(limit), _cache_size(cache_size) { @@ -59,12 +59,12 @@ void SampleList::set_last_resolved(const ObjectSample* sample) { } void SampleList::link(ObjectSample* sample) { - assert(sample != NULL, "invariant"); + assert(sample != nullptr, "invariant"); _in_use_list.prepend(sample); } void SampleList::unlink(ObjectSample* sample) { - assert(sample != NULL, "invariant"); + assert(sample != nullptr, "invariant"); if (_last_resolved == sample) { _last_resolved = sample->next(); } @@ -72,7 +72,7 @@ void SampleList::unlink(ObjectSample* sample) { } ObjectSample* SampleList::reuse(ObjectSample* sample) { - assert(sample != NULL, "invariant"); + assert(sample != nullptr, "invariant"); unlink(sample); link(sample); return sample; @@ -83,7 +83,7 @@ void SampleList::populate_cache() { const size_t cache_delta = _cache_size - _free_list.count(); for (size_t i = 0; i < cache_delta; ++i) { ObjectSample* sample = newSample(); - if (sample != NULL) { + if (sample != nullptr) { _free_list.append(sample); } } @@ -92,7 +92,7 @@ void SampleList::populate_cache() { ObjectSample* SampleList::newSample() const { if (_limit == _allocated) { - return NULL; + return nullptr; } ++_allocated; return new ObjectSample(); @@ -100,22 +100,22 @@ ObjectSample* SampleList::newSample() const { ObjectSample* SampleList::get() { ObjectSample* sample = _free_list.head(); - if (sample != NULL) { + if (sample != nullptr) { link(_free_list.remove(sample)); } else { sample = newSample(); - if (sample != NULL) { + if (sample != nullptr) { _in_use_list.prepend(sample); } } - if (_cache_size > 0 && sample != NULL) { + if (_cache_size > 0 && sample != nullptr) { populate_cache(); } return sample; } void SampleList::release(ObjectSample* sample) { - assert(sample != NULL, "invariant"); + assert(sample != nullptr, "invariant"); unlink(sample); _free_list.append(sample); } @@ -123,7 +123,7 @@ void SampleList::release(ObjectSample* sample) { void SampleList::deallocate_samples(List& list) { if (list.count() > 0) { ObjectSample* sample = list.head(); - while (sample != NULL) { + while (sample != nullptr) { list.remove(sample); delete sample; sample = list.head(); @@ -133,7 +133,7 @@ void SampleList::deallocate_samples(List& list) { } void SampleList::reset(ObjectSample* sample) { - assert(sample != NULL, "invariant"); + assert(sample != nullptr, "invariant"); sample->reset(); } diff --git a/src/hotspot/share/jfr/leakprofiler/sampling/samplePriorityQueue.cpp b/src/hotspot/share/jfr/leakprofiler/sampling/samplePriorityQueue.cpp index 5760dc7bfbdb3..caa77d2bbb9fd 100644 --- a/src/hotspot/share/jfr/leakprofiler/sampling/samplePriorityQueue.cpp +++ b/src/hotspot/share/jfr/leakprofiler/sampling/samplePriorityQueue.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -38,12 +38,12 @@ SamplePriorityQueue::SamplePriorityQueue(size_t size) : SamplePriorityQueue::~SamplePriorityQueue() { FREE_C_HEAP_ARRAY(ObjectSample*, _items); - _items = NULL; + _items = nullptr; } void SamplePriorityQueue::push(ObjectSample* item) { - assert(item != NULL, "invariant"); - assert(_items[_count] == NULL, "invariant"); + assert(item != nullptr, "invariant"); + assert(_items[_count] == nullptr, "invariant"); _items[_count] = item; _items[_count]->set_index(_count); @@ -58,16 +58,16 @@ size_t SamplePriorityQueue::total() const { ObjectSample* SamplePriorityQueue::pop() { if (_count == 0) { - return NULL; + return nullptr; } ObjectSample* const s = _items[0]; - assert(s != NULL, "invariant"); + assert(s != nullptr, "invariant"); swap(0, _count - 1); _count--; assert(s == _items[_count], "invariant"); // clear from heap - _items[_count] = NULL; + _items[_count] = nullptr; moveDown(0); _total -= s->span(); return s; @@ -128,7 +128,7 @@ void SamplePriorityQueue::moveUp(int i) { } void SamplePriorityQueue::remove(ObjectSample* s) { - assert(s != NULL, "invariant"); + assert(s != nullptr, "invariant"); const size_t realSpan = s->span(); s->set_span(0); moveUp(s->index()); @@ -141,7 +141,7 @@ int SamplePriorityQueue::count() const { } const ObjectSample* SamplePriorityQueue::peek() const { - return _count == 0 ? NULL : _items[0]; + return _count == 0 ? nullptr : _items[0]; } ObjectSample* SamplePriorityQueue::item_at(int index) { diff --git a/src/hotspot/share/jfr/leakprofiler/utilities/rootType.cpp b/src/hotspot/share/jfr/leakprofiler/utilities/rootType.cpp index aefbc59a57273..f066cd5df31f2 100644 --- a/src/hotspot/share/jfr/leakprofiler/utilities/rootType.cpp +++ b/src/hotspot/share/jfr/leakprofiler/utilities/rootType.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -38,12 +38,12 @@ OopStorage* OldObjectRoot::system_oop_storage(System system) { auto id = static_cast(first + (val - _strong_oop_storage_set_first)); return OopStorageSet::storage(id); } - return NULL; + return nullptr; } const char* OldObjectRoot::system_description(System system) { OopStorage* oop_storage = system_oop_storage(system); - if (oop_storage != NULL) { + if (oop_storage != nullptr) { return oop_storage->name(); } switch (system) { @@ -64,7 +64,7 @@ const char* OldObjectRoot::system_description(System system) { default: ShouldNotReachHere(); } - return NULL; + return nullptr; } const char* OldObjectRoot::type_description(Type type) { @@ -84,5 +84,5 @@ const char* OldObjectRoot::type_description(Type type) { default: ShouldNotReachHere(); } - return NULL; + return nullptr; } diff --git a/src/hotspot/share/jfr/leakprofiler/utilities/unifiedOopRef.inline.hpp b/src/hotspot/share/jfr/leakprofiler/utilities/unifiedOopRef.inline.hpp index d5bac9f5b1343..0ebf23d7246ea 100644 --- a/src/hotspot/share/jfr/leakprofiler/utilities/unifiedOopRef.inline.hpp +++ b/src/hotspot/share/jfr/leakprofiler/utilities/unifiedOopRef.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -63,7 +63,7 @@ inline bool UnifiedOopRef::is_null() const { template inline UnifiedOopRef create_with_tag(T ref, uintptr_t tag) { - assert(ref != NULL, "invariant"); + assert(ref != nullptr, "invariant"); uintptr_t value = reinterpret_cast(ref); diff --git a/src/hotspot/share/jfr/metadata/metadata.xml b/src/hotspot/share/jfr/metadata/metadata.xml index 4a06f44fe2636..d04e0825d2822 100644 --- a/src/hotspot/share/jfr/metadata/metadata.xml +++ b/src/hotspot/share/jfr/metadata/metadata.xml @@ -333,7 +333,7 @@ - + @@ -539,6 +539,11 @@ + + + + + @@ -1071,6 +1076,15 @@ + + + + + + + + + @@ -1093,9 +1107,10 @@ - + + diff --git a/src/hotspot/share/jfr/periodic/jfrFinalizerStatisticsEvent.cpp b/src/hotspot/share/jfr/periodic/jfrFinalizerStatisticsEvent.cpp index ad1c3043690be..908dae2227739 100644 --- a/src/hotspot/share/jfr/periodic/jfrFinalizerStatisticsEvent.cpp +++ b/src/hotspot/share/jfr/periodic/jfrFinalizerStatisticsEvent.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -36,17 +36,17 @@ #include "services/finalizerService.hpp" static void send_event(const FinalizerEntry* fe, const InstanceKlass* ik, const JfrTicks& timestamp, Thread* thread) { - assert(ik != NULL, "invariant"); + assert(ik != nullptr, "invariant"); assert(ik->has_finalizer(), "invariant"); - assert(thread != NULL, "invariant"); + assert(thread != nullptr, "invariant"); const char* const url = fe != nullptr ? fe->codesource() : nullptr; - const traceid url_symbol_id = url != NULL ? JfrSymbolTable::add(url) : 0; + const traceid url_symbol_id = url != nullptr ? JfrSymbolTable::add(url) : 0; EventFinalizerStatistics event(UNTIMED); event.set_starttime(timestamp); event.set_endtime(timestamp); event.set_finalizableClass(ik); event.set_codeSource(url_symbol_id); - if (fe == NULL) { + if (fe == nullptr) { event.set_objects(0); event.set_totalFinalizersRun(0); } else { @@ -75,7 +75,7 @@ class FinalizerStatisticsEventClosure : public FinalizerEntryClosure { public: FinalizerStatisticsEventClosure(Thread* thread) : _thread(thread), _timestamp(JfrTicks::now()) {} virtual bool do_entry(const FinalizerEntry* fe) { - assert(fe != NULL, "invariant"); + assert(fe != nullptr, "invariant"); send_event(fe, fe->klass(), _timestamp, _thread); return true; } diff --git a/src/hotspot/share/jfr/periodic/jfrModuleEvent.cpp b/src/hotspot/share/jfr/periodic/jfrModuleEvent.cpp index ef9ede8bc1d25..643cc0c5a3c63 100644 --- a/src/hotspot/share/jfr/periodic/jfrModuleEvent.cpp +++ b/src/hotspot/share/jfr/periodic/jfrModuleEvent.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -78,23 +78,23 @@ static void write_module_export_event(const void* package, const ModuleEntry* qu void ModuleDependencyClosure::do_module(ModuleEntry* to_module) { assert_locked_or_safepoint(Module_lock); - assert(to_module != NULL, "invariant"); - assert(_module != NULL, "invariant"); - assert(_event_func != NULL, "invariant"); + assert(to_module != nullptr, "invariant"); + assert(_module != nullptr, "invariant"); + assert(_event_func != nullptr, "invariant"); _event_func(_module, to_module); } void ModuleExportClosure::do_module(ModuleEntry* qualified_export) { assert_locked_or_safepoint(Module_lock); - assert(qualified_export != NULL, "invariant"); - assert(_package != NULL, "invariant"); - assert(_event_func != NULL, "invariant"); + assert(qualified_export != nullptr, "invariant"); + assert(_package != nullptr, "invariant"); + assert(_event_func != nullptr, "invariant"); _event_func(_package, qualified_export); } static void module_dependency_event_callback(ModuleEntry* module) { assert_locked_or_safepoint(Module_lock); - assert(module != NULL, "invariant"); + assert(module != nullptr, "invariant"); if (module->has_reads_list()) { // create an individual event for each directed edge ModuleDependencyClosure directed_edges(module, &write_module_dependency_event); @@ -104,7 +104,7 @@ static void module_dependency_event_callback(ModuleEntry* module) { static void module_export_event_callback(PackageEntry* package) { assert_locked_or_safepoint(Module_lock); - assert(package != NULL, "invariant"); + assert(package != nullptr, "invariant"); if (package->is_exported()) { if (package->has_qual_exports_list()) { // package is qualifiedly exported to a set of modules, @@ -116,9 +116,9 @@ static void module_export_event_callback(PackageEntry* package) { assert(!package->is_qual_exported() || package->is_exported_allUnnamed(), "invariant"); // no qualified exports - // only create a single event with NULL + // only create a single event with nullptr // for the qualified_exports module - write_module_export_event(package, NULL); + write_module_export_event(package, nullptr); } } diff --git a/src/hotspot/share/jfr/periodic/jfrNetworkUtilization.cpp b/src/hotspot/share/jfr/periodic/jfrNetworkUtilization.cpp index bf72e368b9656..9d57cddb44808 100644 --- a/src/hotspot/share/jfr/periodic/jfrNetworkUtilization.cpp +++ b/src/hotspot/share/jfr/periodic/jfrNetworkUtilization.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -42,27 +42,27 @@ struct InterfaceEntry { mutable bool written; }; -static GrowableArray* _interfaces = NULL; +static GrowableArray* _interfaces = nullptr; void JfrNetworkUtilization::destroy() { - if (_interfaces != NULL) { + if (_interfaces != nullptr) { for (int i = 0; i < _interfaces->length(); ++i) { FREE_C_HEAP_ARRAY(char, _interfaces->at(i).name); } delete _interfaces; - _interfaces = NULL; + _interfaces = nullptr; } } static InterfaceEntry& new_entry(const NetworkInterface* iface, GrowableArray* interfaces) { - assert(iface != NULL, "invariant"); - assert(interfaces != NULL, "invariant"); + assert(iface != nullptr, "invariant"); + assert(interfaces != nullptr, "invariant"); // single threaded premise static traceid interface_id = 0; const char* name = iface->get_name(); - assert(name != NULL, "invariant"); + assert(name != nullptr, "invariant"); InterfaceEntry entry; const size_t length = strlen(name); @@ -76,7 +76,7 @@ static InterfaceEntry& new_entry(const NetworkInterface* iface, GrowableArray* get_interfaces() { - if (_interfaces == NULL) { + if (_interfaces == nullptr) { _interfaces = new (mtTracing) GrowableArray(10, mtTracing); } return _interfaces; @@ -88,7 +88,7 @@ static InterfaceEntry& get_entry(const NetworkInterface* iface) { static int saved_index = -1; GrowableArray* interfaces = get_interfaces(); - assert(interfaces != NULL, "invariant"); + assert(interfaces != nullptr, "invariant"); for (int i = 0; i < _interfaces->length(); ++i) { saved_index = (saved_index + 1) % _interfaces->length(); if (strcmp(_interfaces->at(saved_index).name, iface->get_name()) == 0) { @@ -123,7 +123,7 @@ class JfrNetworkInterfaceName : public JfrSerializer { }; static bool register_network_interface_name_serializer() { - assert(_interfaces != NULL, "invariant"); + assert(_interfaces != nullptr, "invariant"); return JfrSerializer::register_serializer(TYPE_NETWORKINTERFACENAME, false, // disallow caching; we want a callback every rotation new JfrNetworkInterfaceName()); @@ -160,7 +160,7 @@ void JfrNetworkUtilization::send_events() { const JfrTicks cur_time = JfrTicks::now(); if (cur_time > last_sample_instant) { const JfrTickspan interval = cur_time - last_sample_instant; - for (NetworkInterface *cur = network_interfaces; cur != NULL; cur = cur->next()) { + for (NetworkInterface *cur = network_interfaces; cur != nullptr; cur = cur->next()) { InterfaceEntry& entry = get_entry(cur); const uint64_t current_bytes_in = cur->get_bytes_in(); const uint64_t current_bytes_out = cur->get_bytes_out(); diff --git a/src/hotspot/share/jfr/periodic/jfrOSInterface.cpp b/src/hotspot/share/jfr/periodic/jfrOSInterface.cpp index fda21dc58d2ad..9d9ec751fa642 100644 --- a/src/hotspot/share/jfr/periodic/jfrOSInterface.cpp +++ b/src/hotspot/share/jfr/periodic/jfrOSInterface.cpp @@ -35,23 +35,23 @@ #include // for environment variables -static JfrOSInterface* _instance = NULL; +static JfrOSInterface* _instance = nullptr; JfrOSInterface& JfrOSInterface::instance() { return *_instance; } JfrOSInterface* JfrOSInterface::create() { - assert(_instance == NULL, "invariant"); + assert(_instance == nullptr, "invariant"); _instance = new JfrOSInterface(); return _instance; } void JfrOSInterface::destroy() { JfrNetworkUtilization::destroy(); - if (_instance != NULL) { + if (_instance != nullptr) { delete _instance; - _instance = NULL; + _instance = nullptr; } } @@ -91,47 +91,47 @@ class JfrOSInterface::JfrOSInterfaceImpl : public JfrCHeapObj { int network_utilization(NetworkInterface** network_interfaces); }; -JfrOSInterface::JfrOSInterfaceImpl::JfrOSInterfaceImpl() : _cpu_info_interface(NULL), - _cpu_perf_interface(NULL), - _system_process_interface(NULL), - _network_performance_interface(NULL) {} +JfrOSInterface::JfrOSInterfaceImpl::JfrOSInterfaceImpl() : _cpu_info_interface(nullptr), + _cpu_perf_interface(nullptr), + _system_process_interface(nullptr), + _network_performance_interface(nullptr) {} template static T* create_interface() { ResourceMark rm; T* iface = new T(); - if (iface != NULL) { + if (iface != nullptr) { if (!iface->initialize()) { delete iface; - iface = NULL; + iface = nullptr; } } return iface; } CPUInformationInterface* JfrOSInterface::JfrOSInterfaceImpl::cpu_info_interface() { - if (_cpu_info_interface == NULL) { + if (_cpu_info_interface == nullptr) { _cpu_info_interface = create_interface(); } return _cpu_info_interface; } CPUPerformanceInterface* JfrOSInterface::JfrOSInterfaceImpl::cpu_perf_interface() { - if (_cpu_perf_interface == NULL) { + if (_cpu_perf_interface == nullptr) { _cpu_perf_interface = create_interface(); } return _cpu_perf_interface; } SystemProcessInterface* JfrOSInterface::JfrOSInterfaceImpl::system_process_interface() { - if (_system_process_interface == NULL) { + if (_system_process_interface == nullptr) { _system_process_interface = create_interface(); } return _system_process_interface; } NetworkPerformanceInterface* JfrOSInterface::JfrOSInterfaceImpl::network_performance_interface() { - if (_network_performance_interface == NULL) { + if (_network_performance_interface == nullptr) { _network_performance_interface = create_interface(); } return _network_performance_interface; @@ -142,67 +142,67 @@ bool JfrOSInterface::JfrOSInterfaceImpl::initialize() { } JfrOSInterface::JfrOSInterfaceImpl::~JfrOSInterfaceImpl(void) { - if (_cpu_info_interface != NULL) { + if (_cpu_info_interface != nullptr) { delete _cpu_info_interface; - _cpu_info_interface = NULL; + _cpu_info_interface = nullptr; } - if (_cpu_perf_interface != NULL) { + if (_cpu_perf_interface != nullptr) { delete _cpu_perf_interface; - _cpu_perf_interface = NULL; + _cpu_perf_interface = nullptr; } - if (_system_process_interface != NULL) { + if (_system_process_interface != nullptr) { delete _system_process_interface; - _system_process_interface = NULL; + _system_process_interface = nullptr; } - if (_network_performance_interface != NULL) { + if (_network_performance_interface != nullptr) { delete _network_performance_interface; - _network_performance_interface = NULL; + _network_performance_interface = nullptr; } } int JfrOSInterface::JfrOSInterfaceImpl::cpu_information(CPUInformation& cpu_info) { CPUInformationInterface* const iface = cpu_info_interface(); - return iface == NULL ? OS_ERR : iface->cpu_information(cpu_info); + return iface == nullptr ? OS_ERR : iface->cpu_information(cpu_info); } int JfrOSInterface::JfrOSInterfaceImpl::cpu_load(int which_logical_cpu, double* cpu_load) { CPUPerformanceInterface* const iface = cpu_perf_interface(); - return iface == NULL ? OS_ERR : iface->cpu_load(which_logical_cpu, cpu_load); + return iface == nullptr ? OS_ERR : iface->cpu_load(which_logical_cpu, cpu_load); } int JfrOSInterface::JfrOSInterfaceImpl::context_switch_rate(double* rate) { CPUPerformanceInterface* const iface = cpu_perf_interface(); - return iface == NULL ? OS_ERR : iface->context_switch_rate(rate); + return iface == nullptr ? OS_ERR : iface->context_switch_rate(rate); } int JfrOSInterface::JfrOSInterfaceImpl::cpu_load_total_process(double* cpu_load) { CPUPerformanceInterface* const iface = cpu_perf_interface(); - return iface == NULL ? OS_ERR : iface->cpu_load_total_process(cpu_load); + return iface == nullptr ? OS_ERR : iface->cpu_load_total_process(cpu_load); } int JfrOSInterface::JfrOSInterfaceImpl::cpu_loads_process(double* pjvmUserLoad, double* pjvmKernelLoad, double* psystemTotal) { CPUPerformanceInterface* const iface = cpu_perf_interface(); - return iface == NULL ? OS_ERR : iface->cpu_loads_process(pjvmUserLoad, pjvmKernelLoad, psystemTotal); + return iface == nullptr ? OS_ERR : iface->cpu_loads_process(pjvmUserLoad, pjvmKernelLoad, psystemTotal); } int JfrOSInterface::JfrOSInterfaceImpl::system_processes(SystemProcess** system_processes, int* no_of_sys_processes) { - assert(system_processes != NULL, "system_processes pointer is NULL!"); - assert(no_of_sys_processes != NULL, "no_of_sys_processes pointer is NULL!"); + assert(system_processes != nullptr, "system_processes pointer is null!"); + assert(no_of_sys_processes != nullptr, "no_of_sys_processes pointer is null!"); SystemProcessInterface* const iface = system_process_interface(); - return iface == NULL ? OS_ERR : iface->system_processes(system_processes, no_of_sys_processes); + return iface == nullptr ? OS_ERR : iface->system_processes(system_processes, no_of_sys_processes); } int JfrOSInterface::JfrOSInterfaceImpl::network_utilization(NetworkInterface** network_interfaces) { NetworkPerformanceInterface* const iface = network_performance_interface(); - return iface == NULL ? OS_ERR : iface->network_utilization(network_interfaces); + return iface == nullptr ? OS_ERR : iface->network_utilization(network_interfaces); } // assigned char* is RESOURCE_HEAP_ALLOCATED // caller need to ensure proper ResourceMark placement. int JfrOSInterface::JfrOSInterfaceImpl::os_version(char** os_version) const { - assert(os_version != NULL, "os_version pointer is NULL!"); + assert(os_version != nullptr, "os_version pointer is null!"); stringStream os_ver_info; os::print_os_info_brief(&os_ver_info); *os_version = os_ver_info.as_string(); @@ -210,18 +210,18 @@ int JfrOSInterface::JfrOSInterfaceImpl::os_version(char** os_version) const { } JfrOSInterface::JfrOSInterface() { - _impl = NULL; + _impl = nullptr; } bool JfrOSInterface::initialize() { _impl = new JfrOSInterface::JfrOSInterfaceImpl(); - return _impl != NULL && _impl->initialize(); + return _impl != nullptr && _impl->initialize(); } JfrOSInterface::~JfrOSInterface() { - if (_impl != NULL) { + if (_impl != nullptr) { delete _impl; - _impl = NULL; + _impl = nullptr; } } @@ -275,17 +275,17 @@ const char* JfrOSInterface::virtualization_name() { } int JfrOSInterface::generate_initial_environment_variable_events() { - if (os::get_environ() == NULL) { + if (os::get_environ() == nullptr) { return OS_ERR; } if (EventInitialEnvironmentVariable::is_enabled()) { // One time stamp for all events, so they can be grouped together JfrTicks time_stamp = JfrTicks::now(); - for (char** p = os::get_environ(); *p != NULL; p++) { + for (char** p = os::get_environ(); *p != nullptr; p++) { char* variable = *p; char* equal_sign = strchr(variable, '='); - if (equal_sign != NULL) { + if (equal_sign != nullptr) { // Extract key/value ResourceMark rm; ptrdiff_t key_length = equal_sign - variable; diff --git a/src/hotspot/share/jfr/periodic/jfrPeriodic.cpp b/src/hotspot/share/jfr/periodic/jfrPeriodic.cpp index 011dc37be9d16..7ff3b29c2899a 100644 --- a/src/hotspot/share/jfr/periodic/jfrPeriodic.cpp +++ b/src/hotspot/share/jfr/periodic/jfrPeriodic.cpp @@ -231,7 +231,7 @@ TRACE_REQUEST_FUNC(CPUTimeStampCounter) { TRACE_REQUEST_FUNC(SystemProcess) { char pid_buf[16]; - SystemProcess* processes = NULL; + SystemProcess* processes = nullptr; int num_of_processes = 0; JfrTicks start_time = JfrTicks::now(); int ret_val = JfrOSInterface::system_processes(&processes, &num_of_processes); @@ -245,16 +245,16 @@ TRACE_REQUEST_FUNC(SystemProcess) { } if (ret_val == OS_OK) { // feature is implemented, write real event - while (processes != NULL) { + while (processes != nullptr) { SystemProcess* tmp = processes; const char* info = processes->command_line(); - if (info == NULL) { + if (info == nullptr) { info = processes->path(); } - if (info == NULL) { + if (info == nullptr) { info = processes->name(); } - if (info == NULL) { + if (info == nullptr) { info = "?"; } jio_snprintf(pid_buf, sizeof(pid_buf), "%d", processes->pid()); @@ -333,7 +333,7 @@ TRACE_REQUEST_FUNC(ThreadContextSwitchRate) { #define SEND_FLAGS_OF_TYPE(eventType, flagType) \ do { \ JVMFlag *flag = JVMFlag::flags; \ - while (flag->name() != NULL) { \ + while (flag->name() != nullptr) { \ if (flag->is_ ## flagType()) { \ if (flag->is_unlocked()) { \ Event ## eventType event; \ @@ -379,7 +379,7 @@ TRACE_REQUEST_FUNC(StringFlag) { class VM_GC_SendObjectCountEvent : public VM_GC_HeapInspection { public: - VM_GC_SendObjectCountEvent() : VM_GC_HeapInspection(NULL, true) {} + VM_GC_SendObjectCountEvent() : VM_GC_HeapInspection(nullptr, true) {} virtual void doit() { ObjectCountEventSender::enable_requestable_event(); collect(); @@ -459,7 +459,7 @@ TRACE_REQUEST_FUNC(YoungGenerationConfiguration) { TRACE_REQUEST_FUNC(InitialSystemProperty) { SystemProperty* p = Arguments::system_properties(); JfrTicks time_stamp = JfrTicks::now(); - while (p != NULL) { + while (p != nullptr) { if (!p->internal()) { EventInitialSystemProperty event(UNTIMED); event.set_key(p->key()); @@ -481,7 +481,7 @@ TRACE_REQUEST_FUNC(ThreadAllocationStatistics) { JfrJavaThreadIterator iter; while (iter.has_next()) { JavaThread* const jt = iter.next(); - assert(jt != NULL, "invariant"); + assert(jt != nullptr, "invariant"); allocated.append(jt->cooked_allocated_bytes()); thread_ids.append(JFR_JVM_THREAD_ID(jt)); } @@ -539,13 +539,13 @@ TRACE_REQUEST_FUNC(ClassLoadingStatistics) { class JfrClassLoaderStatsClosure : public ClassLoaderStatsClosure { public: - JfrClassLoaderStatsClosure() : ClassLoaderStatsClosure(NULL) {} + JfrClassLoaderStatsClosure() : ClassLoaderStatsClosure(nullptr) {} bool do_entry(oop const& key, ClassLoaderStats const& cls) { - const ClassLoaderData* this_cld = cls._class_loader != NULL ? - java_lang_ClassLoader::loader_data_acquire(cls._class_loader) : NULL; - const ClassLoaderData* parent_cld = cls._parent != NULL ? - java_lang_ClassLoader::loader_data_acquire(cls._parent) : NULL; + const ClassLoaderData* this_cld = cls._class_loader != nullptr ? + java_lang_ClassLoader::loader_data_acquire(cls._class_loader) : nullptr; + const ClassLoaderData* parent_cld = cls._parent != nullptr ? + java_lang_ClassLoader::loader_data_acquire(cls._parent) : nullptr; EventClassLoaderStatistics event; event.set_classLoader(this_cld); event.set_parentClassLoader(parent_cld); @@ -567,7 +567,7 @@ class JfrClassLoaderStatsClosure : public ClassLoaderStatsClosure { class JfrClassLoaderStatsVMOperation : public ClassLoaderStatsVMOperation { public: - JfrClassLoaderStatsVMOperation() : ClassLoaderStatsVMOperation(NULL) { } + JfrClassLoaderStatsVMOperation() : ClassLoaderStatsVMOperation(nullptr) { } void doit() { JfrClassLoaderStatsClosure clsc; diff --git a/src/hotspot/share/jfr/periodic/jfrThreadCPULoadEvent.cpp b/src/hotspot/share/jfr/periodic/jfrThreadCPULoadEvent.cpp index 41dadd0d394c1..fef7bac62e8c6 100644 --- a/src/hotspot/share/jfr/periodic/jfrThreadCPULoadEvent.cpp +++ b/src/hotspot/share/jfr/periodic/jfrThreadCPULoadEvent.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -115,7 +115,7 @@ void JfrThreadCPULoadEvent::send_events() { int number_of_threads = 0; while (iter.has_next()) { JavaThread* const jt = iter.next(); - assert(jt != NULL, "invariant"); + assert(jt != nullptr, "invariant"); ++number_of_threads; EventThreadCPULoad event(UNTIMED); if (JfrThreadCPULoadEvent::update_event(event, jt, cur_wallclock_time, processor_count)) { diff --git a/src/hotspot/share/jfr/periodic/sampling/jfrCallTrace.cpp b/src/hotspot/share/jfr/periodic/sampling/jfrCallTrace.cpp index fb204313a0986..5d12af6d9370e 100644 --- a/src/hotspot/share/jfr/periodic/sampling/jfrCallTrace.cpp +++ b/src/hotspot/share/jfr/periodic/sampling/jfrCallTrace.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -35,7 +35,7 @@ #include "runtime/registerMap.hpp" bool JfrGetCallTrace::find_top_frame(frame& top_frame, Method** method, frame& first_frame) { - assert(top_frame.cb() != NULL, "invariant"); + assert(top_frame.cb() != nullptr, "invariant"); RegisterMap map(_thread, RegisterMap::UpdateMap::skip, RegisterMap::ProcessFrames::skip, @@ -44,7 +44,7 @@ bool JfrGetCallTrace::find_top_frame(frame& top_frame, Method** method, frame& f for (u4 i = 0; i < MAX_STACK_DEPTH * 2; ++i) { if (candidate.is_entry_frame()) { JavaCallWrapper *jcw = candidate.entry_frame_call_wrapper_if_safe(_thread); - if (jcw == NULL || jcw->is_first_frame()) { + if (jcw == nullptr || jcw->is_first_frame()) { return false; } } @@ -75,11 +75,11 @@ bool JfrGetCallTrace::find_top_frame(frame& top_frame, Method** method, frame& f if (_in_java) { PcDesc* pc_desc = nm->pc_desc_near(candidate.pc() + 1); - if (pc_desc == NULL || pc_desc->scope_decode_offset() == DebugInformationRecorder::serialized_null) { + if (pc_desc == nullptr || pc_desc->scope_decode_offset() == DebugInformationRecorder::serialized_null) { return false; } candidate.set_pc(pc_desc->real_pc(nm)); - assert(nm->pc_desc_at(candidate.pc()) != NULL, "invalid pc"); + assert(nm->pc_desc_at(candidate.pc()) != nullptr, "invalid pc"); } first_frame = candidate; return true; @@ -92,7 +92,7 @@ bool JfrGetCallTrace::find_top_frame(frame& top_frame, Method** method, frame& f } candidate = candidate.sender(&map); - if (candidate.cb() == NULL) { + if (candidate.cb() == nullptr) { return false; } } @@ -104,14 +104,14 @@ bool JfrGetCallTrace::get_topframe(void* ucontext, frame& topframe) { return false; } - if (topframe.cb() == NULL) { + if (topframe.cb() == nullptr) { return false; } frame first_java_frame; - Method* method = NULL; + Method* method = nullptr; if (find_top_frame(topframe, &method, first_java_frame)) { - if (method == NULL) { + if (method == nullptr) { return false; } topframe = first_java_frame; diff --git a/src/hotspot/share/jfr/periodic/sampling/jfrThreadSampler.cpp b/src/hotspot/share/jfr/periodic/sampling/jfrThreadSampler.cpp index 0b31888f636f2..bda6acbc800bf 100644 --- a/src/hotspot/share/jfr/periodic/sampling/jfrThreadSampler.cpp +++ b/src/hotspot/share/jfr/periodic/sampling/jfrThreadSampler.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -54,7 +54,7 @@ enum JfrSampleType { }; static bool thread_state_in_java(JavaThread* thread) { - assert(thread != NULL, "invariant"); + assert(thread != nullptr, "invariant"); switch(thread->thread_state()) { case _thread_new: case _thread_uninitialized: @@ -77,7 +77,7 @@ static bool thread_state_in_java(JavaThread* thread) { } static bool thread_state_in_native(JavaThread* thread) { - assert(thread != NULL, "invariant"); + assert(thread != nullptr, "invariant"); switch(thread->thread_state()) { case _thread_new: case _thread_uninitialized: @@ -241,12 +241,12 @@ void JfrNativeSamplerCallback::call() { frame topframe = _jt->last_frame(); frame first_java_frame; - Method* method = NULL; + Method* method = nullptr; JfrGetCallTrace gct(false, _jt); if (!gct.find_top_frame(topframe, &method, first_java_frame)) { return; } - if (method == NULL) { + if (method == nullptr) { return; } topframe = first_java_frame; @@ -387,7 +387,7 @@ static void clear_transition_block(JavaThread* jt) { } static bool is_excluded(JavaThread* thread) { - assert(thread != NULL, "invariant"); + assert(thread != nullptr, "invariant"); return thread->is_hidden_from_external_view() || thread->in_deopt_handler() || thread->jfr_thread_local()->is_excluded(); } @@ -418,10 +418,10 @@ bool JfrThreadSampleClosure::do_sample_thread(JavaThread* thread, JfrStackFrame* JfrThreadSampler::JfrThreadSampler(int64_t java_period_millis, int64_t native_period_millis, u4 max_frames) : _sample(), - _sampler_thread(NULL), + _sampler_thread(nullptr), _frames(JfrCHeapObj::new_array(max_frames)), - _last_thread_java(NULL), - _last_thread_native(NULL), + _last_thread_java(nullptr), + _last_thread_native(nullptr), _java_period_millis(java_period_millis), _native_period_millis(native_period_millis), _min_size(max_frames * 2 * wordSize), // each frame tags at most 2 words, min size is a full stacktrace @@ -464,10 +464,10 @@ void JfrThreadSampler::on_javathread_suspend(JavaThread* thread) { } JavaThread* JfrThreadSampler::next_thread(ThreadsList* t_list, JavaThread* first_sampled, JavaThread* current) { - assert(t_list != NULL, "invariant"); + assert(t_list != nullptr, "invariant"); assert(Threads_lock->owned_by_self(), "Holding the thread table lock."); assert(_cur_index >= -1 && (uint)_cur_index + 1 <= t_list->length(), "invariant"); - assert((current == NULL && -1 == _cur_index) || (t_list->find_index_of_JavaThread(current) == _cur_index), "invariant"); + assert((current == nullptr && -1 == _cur_index) || (t_list->find_index_of_JavaThread(current) == _cur_index), "invariant"); if ((uint)_cur_index + 1 == t_list->length()) { // wrap _cur_index = 0; @@ -476,7 +476,7 @@ JavaThread* JfrThreadSampler::next_thread(ThreadsList* t_list, JavaThread* first } assert(_cur_index >= 0 && (uint)_cur_index < t_list->length(), "invariant"); JavaThread* const next = t_list->thread_at(_cur_index); - return next != first_sampled ? next : NULL; + return next != first_sampled ? next : nullptr; } void JfrThreadSampler::start_thread() { @@ -508,7 +508,7 @@ static int64_t get_monotonic_ms() { } void JfrThreadSampler::run() { - assert(_sampler_thread == NULL, "invariant"); + assert(_sampler_thread == nullptr, "invariant"); _sampler_thread = this; @@ -588,7 +588,7 @@ void JfrThreadSampler::task_stacktrace(JfrSampleType type, JavaThread** last_thr const uint sample_limit = JAVA_SAMPLE == type ? MAX_NR_OF_JAVA_SAMPLES : MAX_NR_OF_NATIVE_SAMPLES; uint num_samples = 0; - JavaThread* start = NULL; + JavaThread* start = nullptr; { elapsedTimer sample_time; sample_time.start(); @@ -596,9 +596,9 @@ void JfrThreadSampler::task_stacktrace(JfrSampleType type, JavaThread** last_thr MutexLocker tlock(Threads_lock); ThreadsListHandle tlh; // Resolve a sample session relative start position index into the thread list array. - // In cases where the last sampled thread is NULL or not-NULL but stale, find_index() returns -1. + // In cases where the last sampled thread is null or not-null but stale, find_index() returns -1. _cur_index = tlh.list()->find_index_of_JavaThread(*last_thread); - JavaThread* current = _cur_index != -1 ? *last_thread : NULL; + JavaThread* current = _cur_index != -1 ? *last_thread : nullptr; // Explicitly monitor the available space of the thread-local buffer used by the load barrier // for enqueuing klasses as part of tagging methods. We do this because if space becomes sparse, @@ -611,10 +611,10 @@ void JfrThreadSampler::task_stacktrace(JfrSampleType type, JavaThread** last_thr while (num_samples < sample_limit) { current = next_thread(tlh.list(), start, current); - if (current == NULL) { + if (current == nullptr) { break; } - if (start == NULL) { + if (start == nullptr) { start = current; // remember the thread where we started to attempt sampling } if (current->is_Compiler_thread()) { @@ -637,29 +637,29 @@ void JfrThreadSampler::task_stacktrace(JfrSampleType type, JavaThread** last_thr } } -static JfrThreadSampling* _instance = NULL; +static JfrThreadSampling* _instance = nullptr; JfrThreadSampling& JfrThreadSampling::instance() { return *_instance; } JfrThreadSampling* JfrThreadSampling::create() { - assert(_instance == NULL, "invariant"); + assert(_instance == nullptr, "invariant"); _instance = new JfrThreadSampling(); return _instance; } void JfrThreadSampling::destroy() { - if (_instance != NULL) { + if (_instance != nullptr) { delete _instance; - _instance = NULL; + _instance = nullptr; } } -JfrThreadSampling::JfrThreadSampling() : _sampler(NULL) {} +JfrThreadSampling::JfrThreadSampling() : _sampler(nullptr) {} JfrThreadSampling::~JfrThreadSampling() { - if (_sampler != NULL) { + if (_sampler != nullptr) { _sampler->disenroll(); } } @@ -722,7 +722,7 @@ void JfrThreadSampling::set_sampling_period(bool is_java_period, int64_t period_ void JfrThreadSampling::set_java_sample_period(int64_t period_millis) { assert(period_millis >= 0, "invariant"); - if (_instance == NULL && 0 == period_millis) { + if (_instance == nullptr && 0 == period_millis) { return; } instance().set_sampling_period(true, period_millis); @@ -730,7 +730,7 @@ void JfrThreadSampling::set_java_sample_period(int64_t period_millis) { void JfrThreadSampling::set_native_sample_period(int64_t period_millis) { assert(period_millis >= 0, "invariant"); - if (_instance == NULL && 0 == period_millis) { + if (_instance == nullptr && 0 == period_millis) { return; } instance().set_sampling_period(false, period_millis); diff --git a/src/hotspot/share/jfr/recorder/checkpoint/jfrCheckpointManager.cpp b/src/hotspot/share/jfr/recorder/checkpoint/jfrCheckpointManager.cpp index 8fce812b074e6..680e6ba621cbd 100644 --- a/src/hotspot/share/jfr/recorder/checkpoint/jfrCheckpointManager.cpp +++ b/src/hotspot/share/jfr/recorder/checkpoint/jfrCheckpointManager.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -59,22 +59,22 @@ typedef JfrCheckpointManager::BufferPtr BufferPtr; typedef JfrCheckpointManager::ConstBufferPtr ConstBufferPtr; static JfrSignal _new_checkpoint; -static JfrCheckpointManager* _instance = NULL; +static JfrCheckpointManager* _instance = nullptr; JfrCheckpointManager& JfrCheckpointManager::instance() { return *_instance; } JfrCheckpointManager* JfrCheckpointManager::create(JfrChunkWriter& cw) { - assert(_instance == NULL, "invariant"); + assert(_instance == nullptr, "invariant"); _instance = new JfrCheckpointManager(cw); return _instance; } void JfrCheckpointManager::destroy() { - assert(_instance != NULL, "invariant"); + assert(_instance != nullptr, "invariant"); delete _instance; - _instance = NULL; + _instance = nullptr; } JfrCheckpointManager::JfrCheckpointManager(JfrChunkWriter& cw) : @@ -100,9 +100,9 @@ static const size_t virtual_thread_local_buffer_prealloc_count = 0; static const size_t virtual_thread_local_buffer_size = 4 * K; bool JfrCheckpointManager::initialize() { - assert(_global_mspace == NULL, "invariant"); + assert(_global_mspace == nullptr, "invariant"); _global_mspace = create_mspace(global_buffer_size, 0, 0, false, this); // post-pone preallocation - if (_global_mspace == NULL) { + if (_global_mspace == nullptr) { return false; } // preallocate buffer count to each of the epoch live lists @@ -112,17 +112,17 @@ bool JfrCheckpointManager::initialize() { } assert(_global_mspace->free_list_is_empty(), "invariant"); - assert(_thread_local_mspace == NULL, "invariant"); + assert(_thread_local_mspace == nullptr, "invariant"); _thread_local_mspace = new JfrThreadLocalCheckpointMspace(); - if (_thread_local_mspace == NULL || !_thread_local_mspace->initialize(thread_local_buffer_size, + if (_thread_local_mspace == nullptr || !_thread_local_mspace->initialize(thread_local_buffer_size, thread_local_buffer_prealloc_count, thread_local_buffer_prealloc_count)) { return false; } - assert(_virtual_thread_local_mspace == NULL, "invariant"); + assert(_virtual_thread_local_mspace == nullptr, "invariant"); _virtual_thread_local_mspace = new JfrThreadLocalCheckpointMspace(); - if (_virtual_thread_local_mspace == NULL || !_virtual_thread_local_mspace->initialize(virtual_thread_local_buffer_size, + if (_virtual_thread_local_mspace == nullptr || !_virtual_thread_local_mspace->initialize(virtual_thread_local_buffer_size, JFR_MSPACE_UNLIMITED_CACHE_SIZE, virtual_thread_local_buffer_prealloc_count)) { return false; @@ -140,13 +140,13 @@ static void assert_lease(ConstBufferPtr buffer) { } static void assert_release(ConstBufferPtr buffer) { - assert(buffer != NULL, "invariant"); + assert(buffer != nullptr, "invariant"); assert(buffer->lease(), "invariant"); assert(buffer->acquired_by_self(), "invariant"); } static void assert_retired(ConstBufferPtr buffer, Thread* thread) { - assert(buffer != NULL, "invariant"); + assert(buffer != nullptr, "invariant"); assert(buffer->acquired_by(thread), "invariant"); assert(buffer->retired(), "invariant"); } @@ -158,28 +158,28 @@ void JfrCheckpointManager::register_full(BufferPtr buffer, Thread* thread) { } static inline bool is_global(ConstBufferPtr buffer) { - assert(buffer != NULL, "invariant"); + assert(buffer != nullptr, "invariant"); return buffer->context() == JFR_GLOBAL; } static inline bool is_thread_local(ConstBufferPtr buffer) { - assert(buffer != NULL, "invariant"); + assert(buffer != nullptr, "invariant"); return buffer->context() == JFR_THREADLOCAL; } static inline bool is_virtual_thread_local(ConstBufferPtr buffer) { - assert(buffer != NULL, "invariant"); + assert(buffer != nullptr, "invariant"); return buffer->context() == JFR_VIRTUAL_THREADLOCAL; } BufferPtr JfrCheckpointManager::lease_global(Thread* thread, bool previous_epoch /* false */, size_t size /* 0 */) { JfrCheckpointMspace* const mspace = instance()._global_mspace; - assert(mspace != NULL, "invariant"); + assert(mspace != nullptr, "invariant"); static const size_t max_elem_size = mspace->min_element_size(); // min is max BufferPtr buffer; if (size <= max_elem_size) { buffer = mspace_acquire_live(size, mspace, thread, previous_epoch); - if (buffer != NULL) { + if (buffer != nullptr) { buffer->set_lease(); DEBUG_ONLY(assert_lease(buffer);) return buffer; @@ -202,13 +202,13 @@ BufferPtr JfrCheckpointManager::lease_thread_local(Thread* thread, size_t size) } BufferPtr JfrCheckpointManager::get_virtual_thread_local(Thread* thread) { - assert(thread != NULL, "invariant"); + assert(thread != nullptr, "invariant"); return JfrTraceIdEpoch::epoch() ? thread->jfr_thread_local()->_checkpoint_buffer_epoch_1 : thread->jfr_thread_local()->_checkpoint_buffer_epoch_0; } void JfrCheckpointManager::set_virtual_thread_local(Thread* thread, BufferPtr buffer) { - assert(thread != NULL, "invariant"); + assert(thread != nullptr, "invariant"); if (JfrTraceIdEpoch::epoch()) { thread->jfr_thread_local()->_checkpoint_buffer_epoch_1 = buffer; } else { @@ -239,7 +239,7 @@ BufferPtr JfrCheckpointManager::acquire_virtual_thread_local(Thread* thread, siz } BufferPtr JfrCheckpointManager::renew(ConstBufferPtr old, Thread* thread, size_t size, JfrCheckpointBufferKind kind /* JFR_THREADLOCAL */) { - assert(old != NULL, "invariant"); + assert(old != nullptr, "invariant"); assert(old->acquired_by_self(), "invariant"); if (kind == JFR_GLOBAL) { return lease_global(thread, instance()._global_mspace->in_previous_epoch_list(old), size); @@ -285,14 +285,14 @@ static inline JfrCheckpointBufferKind kind(ConstBufferPtr buffer) { } BufferPtr JfrCheckpointManager::flush(BufferPtr old, size_t used, size_t requested, Thread* thread) { - assert(old != NULL, "invariant"); + assert(old != nullptr, "invariant"); if (0 == requested) { // indicates a lease is being returned assert(old->lease(), "invariant"); release(old); // signal completion of a new checkpoint _new_checkpoint.signal(); - return NULL; + return nullptr; } BufferPtr new_buffer = renew(old, thread, used + requested, kind(old)); if (new_buffer != nullptr) { @@ -353,7 +353,7 @@ static uint64_t calculate_event_size_bytes(JfrChunkWriter& cw, const u1* data, i } static size_t write_checkpoint_event(JfrChunkWriter& cw, const u1* data) { - assert(data != NULL, "invariant"); + assert(data != nullptr, "invariant"); const int64_t event_begin = cw.current_offset(); const int64_t last_checkpoint_event = cw.last_checkpoint_offset(); cw.set_last_checkpoint_offset(event_begin); @@ -373,7 +373,7 @@ static size_t write_checkpoint_event(JfrChunkWriter& cw, const u1* data) { static size_t write_checkpoints(JfrChunkWriter& cw, const u1* data, size_t size) { assert(cw.is_valid(), "invariant"); - assert(data != NULL, "invariant"); + assert(data != nullptr, "invariant"); assert(size > 0, "invariant"); const u1* const limit = data + size; const u1* next = data; @@ -388,7 +388,7 @@ static size_t write_checkpoints(JfrChunkWriter& cw, const u1* data, size_t size) } static size_t write_thread_checkpoint_content(JfrChunkWriter& cw, const u1* data) { - assert(data != NULL, "invariant"); + assert(data != nullptr, "invariant"); const size_t size = total_size(data); assert(size > 0, "invariant"); assert(checkpoint_type(data) == THREADS, "invariant"); @@ -400,7 +400,7 @@ static size_t write_thread_checkpoint_content(JfrChunkWriter& cw, const u1* data static size_t write_thread_checkpoint_payloads(JfrChunkWriter& cw, const u1* data, size_t size, u4& elements) { assert(cw.is_valid(), "invariant"); - assert(data != NULL, "invariant"); + assert(data != nullptr, "invariant"); assert(size > 0, "invariant"); const u1* const limit = data + size; const u1* next = data; @@ -528,14 +528,14 @@ size_t JfrCheckpointManager::clear() { } size_t JfrCheckpointManager::write_static_type_set(Thread* thread) { - assert(thread != NULL, "invariant"); + assert(thread != nullptr, "invariant"); JfrCheckpointWriter writer(true, thread, STATICS); JfrTypeManager::write_static_types(writer); return writer.used_size(); } size_t JfrCheckpointManager::write_threads(JavaThread* thread) { - assert(thread != NULL, "invariant"); + assert(thread != nullptr, "invariant"); // can safepoint here ThreadInVMfromNative transition(thread); ResourceMark rm(thread); @@ -585,7 +585,7 @@ void JfrCheckpointManager::write_type_set() { ObjectSampleCheckpoint::on_type_set(leakp_writer); } else { JfrCheckpointWriter writer(true, thread); - JfrTypeSet::serialize(&writer, NULL, false, false); + JfrTypeSet::serialize(&writer, nullptr, false, false); } } write(); @@ -601,11 +601,11 @@ void JfrCheckpointManager::on_unloading_classes() { } static size_t flush_type_set(Thread* thread) { - assert(thread != NULL, "invariant"); + assert(thread != nullptr, "invariant"); JfrCheckpointWriter writer(thread); MutexLocker cld_lock(thread, ClassLoaderDataGraph_lock); MutexLocker module_lock(thread, Module_lock); - return JfrTypeSet::serialize(&writer, NULL, false, true); + return JfrTypeSet::serialize(&writer, nullptr, false, true); } size_t JfrCheckpointManager::flush_type_set() { @@ -636,7 +636,7 @@ size_t JfrCheckpointManager::flush_type_set() { } JfrBlobHandle JfrCheckpointManager::create_thread_blob(JavaThread* jt, traceid tid /* 0 */, oop vthread /* nullptr */) { - assert(jt != NULL, "invariant"); + assert(jt != nullptr, "invariant"); assert(Thread::current() == jt, "invariant"); DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(jt)); return JfrTypeManager::create_thread_blob(jt, tid, vthread); @@ -649,7 +649,7 @@ void JfrCheckpointManager::write_checkpoint(Thread* thread, traceid tid /* 0 */, class JfrNotifyClosure : public ThreadClosure { public: void do_thread(Thread* thread) { - assert(thread != NULL, "invariant"); + assert(thread != nullptr, "invariant"); assert_locked_or_safepoint(Threads_lock); JfrJavaEventWriter::notify(JavaThread::cast(thread)); } diff --git a/src/hotspot/share/jfr/recorder/checkpoint/jfrCheckpointManager.hpp b/src/hotspot/share/jfr/recorder/checkpoint/jfrCheckpointManager.hpp index 6b0bd081b9028..b14c336771806 100644 --- a/src/hotspot/share/jfr/recorder/checkpoint/jfrCheckpointManager.hpp +++ b/src/hotspot/share/jfr/recorder/checkpoint/jfrCheckpointManager.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -103,8 +103,8 @@ class JfrCheckpointManager : public JfrCHeapObj { void register_full(BufferPtr buffer, Thread* thread); public: - static JfrBlobHandle create_thread_blob(JavaThread* jt, traceid tid = 0, oop vthread = NULL); - static void write_checkpoint(Thread* t, traceid tid = 0, oop vthread = NULL); + static JfrBlobHandle create_thread_blob(JavaThread* jt, traceid tid = 0, oop vthread = nullptr); + static void write_checkpoint(Thread* t, traceid tid = 0, oop vthread = nullptr); size_t flush_type_set(); friend class Jfr; diff --git a/src/hotspot/share/jfr/recorder/checkpoint/jfrCheckpointWriter.cpp b/src/hotspot/share/jfr/recorder/checkpoint/jfrCheckpointWriter.cpp index 6fea14057f7c6..0cc017b7062c9 100644 --- a/src/hotspot/share/jfr/recorder/checkpoint/jfrCheckpointWriter.cpp +++ b/src/hotspot/share/jfr/recorder/checkpoint/jfrCheckpointWriter.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -75,7 +75,7 @@ JfrCheckpointWriter::JfrCheckpointWriter(bool previous_epoch, Thread* thread, Jf } static void write_checkpoint_header(u1* pos, int64_t size, jlong time, u4 checkpoint_type, u4 type_count) { - assert(pos != NULL, "invariant"); + assert(pos != nullptr, "invariant"); JfrBigEndianWriter be_writer(pos, sizeof(JfrCheckpointEntry)); be_writer.write(size); be_writer.write(time); @@ -153,9 +153,9 @@ const u1* JfrCheckpointWriter::session_data(size_t* size, bool move /* false */, assert(this->is_acquired(), "wrong state!"); if (!this->is_valid()) { *size = 0; - return NULL; + return nullptr; } - if (ctx != NULL) { + if (ctx != nullptr) { const u1* session_start_pos = this->start_pos() + ctx->offset; *size = this->current_pos() - session_start_pos; return session_start_pos; @@ -195,7 +195,7 @@ JfrBlobHandle JfrCheckpointWriter::move(const JfrCheckpointContext* ctx /* 0 */) size_t size = 0; const u1* data = session_data(&size, true, ctx); JfrBlobHandle blob = JfrBlob::make(data, size); - if (ctx != NULL) { + if (ctx != nullptr) { const_cast(ctx)->count = 0; set_context(*ctx); } diff --git a/src/hotspot/share/jfr/recorder/checkpoint/jfrCheckpointWriter.hpp b/src/hotspot/share/jfr/recorder/checkpoint/jfrCheckpointWriter.hpp index bdbd4182204c9..25c1224fbc42b 100644 --- a/src/hotspot/share/jfr/recorder/checkpoint/jfrCheckpointWriter.hpp +++ b/src/hotspot/share/jfr/recorder/checkpoint/jfrCheckpointWriter.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -67,7 +67,7 @@ class JfrCheckpointWriter : public JfrCheckpointWriterBase { u4 count() const; void set_count(u4 count); void increment(); - const u1* session_data(size_t* size, bool move = false, const JfrCheckpointContext* ctx = NULL); + const u1* session_data(size_t* size, bool move = false, const JfrCheckpointContext* ctx = nullptr); void release(); JfrCheckpointWriter(bool previous_epoch, Thread* thread, JfrCheckpointType type = GENERIC); public: @@ -81,8 +81,8 @@ class JfrCheckpointWriter : public JfrCheckpointWriterBase { const JfrCheckpointContext context() const; void set_context(const JfrCheckpointContext ctx); bool has_data() const; - JfrBlobHandle copy(const JfrCheckpointContext* ctx = NULL); - JfrBlobHandle move(const JfrCheckpointContext* ctx = NULL); + JfrBlobHandle copy(const JfrCheckpointContext* ctx = nullptr); + JfrBlobHandle move(const JfrCheckpointContext* ctx = nullptr); }; #endif // SHARE_JFR_RECORDER_CHECKPOINT_JFRCHECKPOINTWRITER_HPP diff --git a/src/hotspot/share/jfr/recorder/checkpoint/jfrMetadataEvent.cpp b/src/hotspot/share/jfr/recorder/checkpoint/jfrMetadataEvent.cpp index 17ed26535250c..08f7d2ae87d8f 100644 --- a/src/hotspot/share/jfr/recorder/checkpoint/jfrMetadataEvent.cpp +++ b/src/hotspot/share/jfr/recorder/checkpoint/jfrMetadataEvent.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -35,7 +35,7 @@ #include "runtime/javaThread.hpp" #include "utilities/exceptions.hpp" -static jbyteArray metadata_blob = NULL; +static jbyteArray metadata_blob = nullptr; static u8 metadata_id = 0; static u8 last_metadata_id = 0; @@ -52,14 +52,14 @@ static void check_internal_types() { static void write_metadata_blob(JfrChunkWriter& chunkwriter, JavaThread* thread) { assert(chunkwriter.is_valid(), "invariant"); - assert(thread != NULL, "invariant"); - assert(metadata_blob != NULL, "invariant"); + assert(thread != nullptr, "invariant"); + assert(metadata_blob != nullptr, "invariant"); DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(thread)); const typeArrayOop arr = (typeArrayOop)JfrJavaSupport::resolve_non_null(metadata_blob); - assert(arr != NULL, "invariant"); + assert(arr != nullptr, "invariant"); const int length = arr->length(); const Klass* const k = arr->klass(); - assert(k != NULL && k->is_array_klass(), "invariant"); + assert(k != nullptr && k->is_array_klass(), "invariant"); const TypeArrayKlass* const byte_arr_klass = TypeArrayKlass::cast(k); const jbyte* const data_address = arr->byte_at_addr(0); chunkwriter.write_unbuffered(data_address, length); @@ -93,11 +93,11 @@ void JfrMetadataEvent::write(JfrChunkWriter& chunkwriter) { void JfrMetadataEvent::update(jbyteArray metadata) { JavaThread* thread = JavaThread::current(); DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(thread)); - if (metadata_blob != NULL) { + if (metadata_blob != nullptr) { JfrJavaSupport::destroy_global_jni_handle(metadata_blob); } const oop new_desc_oop = JfrJavaSupport::resolve_non_null(metadata); - assert(new_desc_oop != NULL, "invariant"); + assert(new_desc_oop != nullptr, "invariant"); metadata_blob = (jbyteArray)JfrJavaSupport::global_jni_handle(new_desc_oop, thread); ++metadata_id; } diff --git a/src/hotspot/share/jfr/recorder/checkpoint/types/jfrThreadGroup.cpp b/src/hotspot/share/jfr/recorder/checkpoint/types/jfrThreadGroup.cpp index 91f83f1ec496a..615a092d778c7 100644 --- a/src/hotspot/share/jfr/recorder/checkpoint/types/jfrThreadGroup.cpp +++ b/src/hotspot/share/jfr/recorder/checkpoint/types/jfrThreadGroup.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -43,7 +43,7 @@ class ThreadGroupExclusiveAccess : public StackObj { }; Semaphore ThreadGroupExclusiveAccess::_mutex_semaphore(1); -JfrThreadGroup* JfrThreadGroup::_instance = NULL; +JfrThreadGroup* JfrThreadGroup::_instance = nullptr; class JfrThreadGroupPointers : public ResourceObj { private: @@ -71,19 +71,19 @@ jweak JfrThreadGroupPointers::thread_group_weak_ref() const { } oopDesc* const JfrThreadGroupPointers::thread_group_oop() const { - assert(_thread_group_weak_ref == NULL || + assert(_thread_group_weak_ref == nullptr || JNIHandles::resolve_non_null(_thread_group_weak_ref) == _thread_group_handle(), "invariant"); return _thread_group_handle(); } jweak JfrThreadGroupPointers::transfer_weak_global_handle_ownership() { jweak temp = _thread_group_weak_ref; - _thread_group_weak_ref = NULL; + _thread_group_weak_ref = nullptr; return temp; } void JfrThreadGroupPointers::clear_weak_ref() { - if (NULL != _thread_group_weak_ref) { + if (nullptr != _thread_group_weak_ref) { JNIHandles::destroy_weak_global(_thread_group_weak_ref); } } @@ -118,7 +118,7 @@ JfrThreadGroupsHelper::~JfrThreadGroupsHelper() { } JfrThreadGroupPointers& JfrThreadGroupsHelper::at(int index) { - assert(_thread_group_hierarchy != NULL, "invariant"); + assert(_thread_group_hierarchy != nullptr, "invariant"); assert(index > invalid_iterator_pos && index < _thread_group_hierarchy->length(), "invariant"); return *(_thread_group_hierarchy->at(index)); } @@ -128,7 +128,7 @@ bool JfrThreadGroupsHelper::has_next() const { } bool JfrThreadGroupsHelper::is_valid() const { - return (_thread_group_hierarchy != NULL && _thread_group_hierarchy->length() > 0); + return (_thread_group_hierarchy != nullptr && _thread_group_hierarchy->length() > 0); } JfrThreadGroupPointers& JfrThreadGroupsHelper::next() { @@ -147,9 +147,9 @@ JfrThreadGroupPointers& JfrThreadGroupsHelper::next() { * (not here). */ int JfrThreadGroupsHelper::populate_thread_group_hierarchy(const JavaThread* jt, Thread* current) { - assert(jt != NULL && jt->is_Java_thread(), "invariant"); - assert(current != NULL, "invariant"); - assert(_thread_group_hierarchy != NULL, "invariant"); + assert(jt != nullptr && jt->is_Java_thread(), "invariant"); + assert(current != nullptr, "invariant"); + assert(_thread_group_hierarchy != nullptr, "invariant"); oop thread_oop = jt->threadObj(); if (thread_oop == nullptr) { @@ -157,12 +157,12 @@ int JfrThreadGroupsHelper::populate_thread_group_hierarchy(const JavaThread* jt, } // immediate thread group Handle thread_group_handle(current, java_lang_Thread::threadGroup(thread_oop)); - if (thread_group_handle == NULL) { + if (thread_group_handle == nullptr) { return 0; } const bool use_weak_handles = !SafepointSynchronize::is_at_safepoint(); - jweak thread_group_weak_ref = use_weak_handles ? JNIHandles::make_weak_global(thread_group_handle) : NULL; + jweak thread_group_weak_ref = use_weak_handles ? JNIHandles::make_weak_global(thread_group_handle) : nullptr; JfrThreadGroupPointers* thread_group_pointers = new JfrThreadGroupPointers(thread_group_handle, thread_group_weak_ref); _thread_group_hierarchy->append(thread_group_pointers); @@ -172,7 +172,7 @@ int JfrThreadGroupsHelper::populate_thread_group_hierarchy(const JavaThread* jt, // and check parents parents... while (parent_thread_group_handle != nullptr) { - const jweak parent_group_weak_ref = use_weak_handles ? JNIHandles::make_weak_global(parent_thread_group_handle) : NULL; + const jweak parent_group_weak_ref = use_weak_handles ? JNIHandles::make_weak_global(parent_thread_group_handle) : nullptr; thread_group_pointers = new JfrThreadGroupPointers(parent_thread_group_handle, parent_group_weak_ref); _thread_group_hierarchy->append(thread_group_pointers); parent_thread_group_obj = java_lang_ThreadGroup::parent(parent_thread_group_handle()); @@ -223,25 +223,25 @@ class JfrThreadGroup::JfrThreadGroupEntry : public JfrCHeapObj { JfrThreadGroup::JfrThreadGroupEntry::JfrThreadGroupEntry(const char* tgname, JfrThreadGroupPointers& ptrs) : _thread_group_id(0), _parent_group_id(0), - _thread_group_name(NULL), - _thread_group_oop(NULL), - _thread_group_weak_ref(NULL) { + _thread_group_name(nullptr), + _thread_group_oop(nullptr), + _thread_group_weak_ref(nullptr) { set_thread_group_name(tgname); set_thread_group(ptrs); } JfrThreadGroup::JfrThreadGroupEntry::~JfrThreadGroupEntry() { - if (_thread_group_name != NULL) { + if (_thread_group_name != nullptr) { JfrCHeapObj::free(_thread_group_name, strlen(_thread_group_name) + 1); } - if (_thread_group_weak_ref != NULL) { + if (_thread_group_weak_ref != nullptr) { JNIHandles::destroy_weak_global(_thread_group_weak_ref); } } void JfrThreadGroup::JfrThreadGroupEntry::set_thread_group_name(const char* tgname) { - assert(_thread_group_name == NULL, "invariant"); - if (tgname != NULL) { + assert(_thread_group_name == nullptr, "invariant"); + if (tgname != nullptr) { size_t len = strlen(tgname); _thread_group_name = JfrCHeapObj::new_array(len + 1); strncpy(_thread_group_name, tgname, len + 1); @@ -249,16 +249,16 @@ void JfrThreadGroup::JfrThreadGroupEntry::set_thread_group_name(const char* tgna } const oop JfrThreadGroup::JfrThreadGroupEntry::thread_group() const { - return _thread_group_weak_ref != NULL ? JNIHandles::resolve(_thread_group_weak_ref) : _thread_group_oop; + return _thread_group_weak_ref != nullptr ? JNIHandles::resolve(_thread_group_weak_ref) : _thread_group_oop; } void JfrThreadGroup::JfrThreadGroupEntry::set_thread_group(JfrThreadGroupPointers& ptrs) { _thread_group_weak_ref = ptrs.transfer_weak_global_handle_ownership(); - if (_thread_group_weak_ref == NULL) { + if (_thread_group_weak_ref == nullptr) { _thread_group_oop = ptrs.thread_group_oop(); - assert(_thread_group_oop != NULL, "invariant"); + assert(_thread_group_oop != nullptr, "invariant"); } else { - _thread_group_oop = NULL; + _thread_group_oop = nullptr; } } @@ -266,7 +266,7 @@ JfrThreadGroup::JfrThreadGroup() : _list(new (mtTracing) GrowableArray(initial_array_size, mtTracing)) {} JfrThreadGroup::~JfrThreadGroup() { - if (_list != NULL) { + if (_list != nullptr) { for (int i = 0; i < _list->length(); i++) { JfrThreadGroupEntry* e = _list->at(i); delete e; @@ -296,22 +296,22 @@ traceid JfrThreadGroup::thread_group_id(JavaThread* const jt) { traceid JfrThreadGroup::thread_group_id_internal(JfrThreadGroupsHelper& helper) { ThreadGroupExclusiveAccess lock; JfrThreadGroup* tg_instance = instance(); - if (tg_instance == NULL) { + if (tg_instance == nullptr) { tg_instance = new JfrThreadGroup(); - if (tg_instance == NULL) { + if (tg_instance == nullptr) { return 0; } set_instance(tg_instance); } - JfrThreadGroupEntry* tge = NULL; + JfrThreadGroupEntry* tge = nullptr; int parent_thread_group_id = 0; while (helper.has_next()) { JfrThreadGroupPointers& ptrs = helper.next(); tge = tg_instance->find_entry(ptrs); - if (NULL == tge) { + if (nullptr == tge) { tge = tg_instance->new_entry(ptrs); - assert(tge != NULL, "invariant"); + assert(tge != nullptr, "invariant"); tge->set_parent_group_id(parent_thread_group_id); } parent_thread_group_id = tge->thread_group_id(); @@ -332,7 +332,7 @@ JfrThreadGroup::find_entry(const JfrThreadGroupPointers& ptrs) const { return curtge; } } - return (JfrThreadGroupEntry*) NULL; + return (JfrThreadGroupEntry*) nullptr; } // Assumes you already searched for the existence @@ -345,22 +345,22 @@ JfrThreadGroup::new_entry(JfrThreadGroupPointers& ptrs) { } int JfrThreadGroup::add_entry(JfrThreadGroupEntry* tge) { - assert(tge != NULL, "attempting to add a null entry!"); + assert(tge != nullptr, "attempting to add a null entry!"); assert(0 == tge->thread_group_id(), "id must be unassigned!"); tge->set_thread_group_id(next_id()); return _list->append(tge); } void JfrThreadGroup::write_thread_group_entries(JfrCheckpointWriter& writer) const { - assert(_list != NULL && !_list->is_empty(), "should not need be here!"); + assert(_list != nullptr && !_list->is_empty(), "should not need be here!"); const int number_of_tg_entries = _list->length(); writer.write_count(number_of_tg_entries + 1); // + VirtualThread group writer.write_key(1); // 1 is reserved for VirtualThread group writer.write(0); // parent const oop vgroup = java_lang_Thread_Constants::get_VTHREAD_GROUP(); - assert(vgroup != (oop)NULL, "invariant"); + assert(vgroup != (oop)nullptr, "invariant"); const char* const vgroup_name = java_lang_ThreadGroup::name(vgroup); - assert(vgroup_name != NULL, "invariant"); + assert(vgroup_name != nullptr, "invariant"); writer.write(vgroup_name); for (int index = 0; index < number_of_tg_entries; ++index) { const JfrThreadGroupEntry* const curtge = _list->at(index); @@ -371,8 +371,8 @@ void JfrThreadGroup::write_thread_group_entries(JfrCheckpointWriter& writer) con } void JfrThreadGroup::write_selective_thread_group(JfrCheckpointWriter* writer, traceid thread_group_id) const { - assert(writer != NULL, "invariant"); - assert(_list != NULL && !_list->is_empty(), "should not need be here!"); + assert(writer != nullptr, "invariant"); + assert(_list != nullptr && !_list->is_empty(), "should not need be here!"); assert(thread_group_id != 1, "should not need be here!"); const int number_of_tg_entries = _list->length(); @@ -404,15 +404,15 @@ void JfrThreadGroup::write_selective_thread_group(JfrCheckpointWriter* writer, t void JfrThreadGroup::serialize(JfrCheckpointWriter& writer) { ThreadGroupExclusiveAccess lock; JfrThreadGroup* tg_instance = instance(); - assert(tg_instance != NULL, "invariant"); + assert(tg_instance != nullptr, "invariant"); tg_instance->write_thread_group_entries(writer); } // for writing a particular thread group void JfrThreadGroup::serialize(JfrCheckpointWriter* writer, traceid thread_group_id) { - assert(writer != NULL, "invariant"); + assert(writer != nullptr, "invariant"); ThreadGroupExclusiveAccess lock; JfrThreadGroup* const tg_instance = instance(); - assert(tg_instance != NULL, "invariant"); + assert(tg_instance != nullptr, "invariant"); tg_instance->write_selective_thread_group(writer, thread_group_id); } diff --git a/src/hotspot/share/jfr/recorder/checkpoint/types/jfrThreadState.cpp b/src/hotspot/share/jfr/recorder/checkpoint/types/jfrThreadState.cpp index c0379e6137714..08fe29e905ea5 100644 --- a/src/hotspot/share/jfr/recorder/checkpoint/types/jfrThreadState.cpp +++ b/src/hotspot/share/jfr/recorder/checkpoint/types/jfrThreadState.cpp @@ -97,13 +97,13 @@ traceid JfrThreadId::id(const Thread* t, oop vthread) { } traceid JfrThreadId::os_id(const Thread* t) { - assert(t != NULL, "invariant"); + assert(t != nullptr, "invariant"); const OSThread* const os_thread = t->osthread(); - return os_thread != NULL ? os_thread->thread_id() : 0; + return os_thread != nullptr ? os_thread->thread_id() : 0; } traceid JfrThreadId::jfr_id(const Thread* t, traceid tid) { - assert(t != NULL, "invariant"); + assert(t != nullptr, "invariant"); return tid != 0 ? tid : JfrThreadLocal::jvm_thread_id(t); } @@ -111,8 +111,8 @@ traceid JfrThreadId::jfr_id(const Thread* t, traceid tid) { const char* get_java_thread_name(const JavaThread* jt, int& length, oop vthread) { assert(jt != nullptr, "invariant"); const char* name_str = ""; - oop thread_obj = vthread != NULL ? vthread : jt->threadObj(); - if (thread_obj == NULL) { + oop thread_obj = vthread != nullptr ? vthread : jt->threadObj(); + if (thread_obj == nullptr) { if (jt->is_attaching_via_jni()) { name_str = ""; } diff --git a/src/hotspot/share/jfr/recorder/checkpoint/types/jfrThreadState.hpp b/src/hotspot/share/jfr/recorder/checkpoint/types/jfrThreadState.hpp index 633e4061253da..d2e3d1630b239 100644 --- a/src/hotspot/share/jfr/recorder/checkpoint/types/jfrThreadState.hpp +++ b/src/hotspot/share/jfr/recorder/checkpoint/types/jfrThreadState.hpp @@ -37,7 +37,7 @@ class JfrThreadState : public AllStatic { class JfrThreadId : public AllStatic { public: - static traceid id(const Thread* t, oop vthread = NULL); + static traceid id(const Thread* t, oop vthread = nullptr); static traceid os_id(const Thread* t); static traceid jfr_id(const Thread* t, traceid tid = 0); }; @@ -45,7 +45,7 @@ class JfrThreadId : public AllStatic { class JfrThreadName : public AllStatic { public: // Requires a ResourceMark for get_thread_name/as_utf8 - static const char* name(const Thread* t, int& length, oop vthread = NULL); + static const char* name(const Thread* t, int& length, oop vthread = nullptr); }; #endif // SHARE_JFR_RECORDER_CHECKPOINT_TYPES_JFRTHREADSTATE_HPP diff --git a/src/hotspot/share/jfr/recorder/checkpoint/types/jfrType.cpp b/src/hotspot/share/jfr/recorder/checkpoint/types/jfrType.cpp index f3a7650968ba9..e7e83131dfd19 100644 --- a/src/hotspot/share/jfr/recorder/checkpoint/types/jfrType.cpp +++ b/src/hotspot/share/jfr/recorder/checkpoint/types/jfrType.cpp @@ -90,14 +90,14 @@ class JfrCheckpointThreadClosure : public ThreadClosure { // Only static thread ids, virtual threads are handled dynamically. void JfrCheckpointThreadClosure::do_thread(Thread* t) { - assert(t != NULL, "invariant"); + assert(t != nullptr, "invariant"); ++_count; const traceid tid = JfrThreadId::jfr_id(t); assert(tid != 0, "invariant"); _writer.write_key(tid); int length = -1; const char* const name = JfrThreadName::name(t, length); - assert(name != NULL, "invariant"); + assert(name != nullptr, "invariant"); _writer.write(name); _writer.write(JfrThreadId::os_id(t)); if (!t->is_Java_thread()) { @@ -225,7 +225,7 @@ static const char* reference_type_to_string(ReferenceType rt) { case REF_PHANTOM: return "Phantom reference"; default: ShouldNotReachHere(); - return NULL; + return nullptr; } } @@ -278,7 +278,7 @@ void JfrThreadConstant::write_name(JfrCheckpointWriter& writer, const char* name } void JfrThreadConstant::serialize(JfrCheckpointWriter& writer) { - assert(_thread != NULL, "invariant"); + assert(_thread != nullptr, "invariant"); const bool vthread = _vthread != nullptr; writer.write_key(JfrThreadId::jfr_id(_thread, _tid)); int length = -1; diff --git a/src/hotspot/share/jfr/recorder/checkpoint/types/jfrType.hpp b/src/hotspot/share/jfr/recorder/checkpoint/types/jfrType.hpp index c26590b6a119b..127db555a6242 100644 --- a/src/hotspot/share/jfr/recorder/checkpoint/types/jfrType.hpp +++ b/src/hotspot/share/jfr/recorder/checkpoint/types/jfrType.hpp @@ -109,7 +109,7 @@ class JfrThreadConstant : public JfrSerializer { oop _vthread; void write_name(JfrCheckpointWriter& writer, const char* name, int length); public: - JfrThreadConstant(Thread* t, traceid tid, oop vthread = NULL) : _thread(t), _tid(tid), _vthread(vthread) {} + JfrThreadConstant(Thread* t, traceid tid, oop vthread = nullptr) : _thread(t), _tid(tid), _vthread(vthread) {} void serialize(JfrCheckpointWriter& writer); }; diff --git a/src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeManager.cpp b/src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeManager.cpp index f1692df537795..6f73fdece3c35 100644 --- a/src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeManager.cpp +++ b/src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeManager.cpp @@ -50,7 +50,7 @@ class JfrSerializerRegistration : public JfrCHeapObj { bool _permit_cache; public: JfrSerializerRegistration(JfrTypeId id, bool permit_cache, JfrSerializer* serializer) : - _next(NULL), _serializer(serializer), _cache(), _id(id), _permit_cache(permit_cache) {} + _next(nullptr), _serializer(serializer), _cache(), _id(id), _permit_cache(permit_cache) {} ~JfrSerializerRegistration() { delete _serializer; } @@ -104,7 +104,7 @@ void JfrTypeManager::write_threads(JfrCheckpointWriter& writer) { } JfrBlobHandle JfrTypeManager::create_thread_blob(JavaThread* jt, traceid tid /* 0 */, oop vthread /* nullptr */) { - assert(jt != NULL, "invariant"); + assert(jt != nullptr, "invariant"); ResourceMark rm(jt); JfrCheckpointWriter writer(jt, true, THREADS, JFR_THREADLOCAL); // Thread local lease for blob creation. // TYPE_THREAD and count is written unconditionally for blobs, also for vthreads. @@ -116,9 +116,9 @@ JfrBlobHandle JfrTypeManager::create_thread_blob(JavaThread* jt, traceid tid /* } void JfrTypeManager::write_checkpoint(Thread* t, traceid tid /* 0 */, oop vthread /* nullptr */) { - assert(t != NULL, "invariant"); + assert(t != nullptr, "invariant"); Thread* const current = Thread::current(); // not necessarily the same as t - assert(current != NULL, "invariant"); + assert(current != nullptr, "invariant"); const bool is_vthread = vthread != nullptr; ResourceMark rm(current); JfrCheckpointWriter writer(current, true, THREADS, is_vthread ? JFR_VIRTUAL_THREADLOCAL : JFR_THREADLOCAL); @@ -155,7 +155,7 @@ void JfrTypeManager::destroy() { JfrSerializerRegistration* registration; while (types.is_nonempty()) { registration = types.remove(); - assert(registration != NULL, "invariant"); + assert(registration != nullptr, "invariant"); delete registration; } } @@ -163,7 +163,7 @@ void JfrTypeManager::destroy() { class InvokeOnRotation { public: bool process(const JfrSerializerRegistration* r) { - assert(r != NULL, "invariant"); + assert(r != nullptr, "invariant"); r->on_rotation(); return true; } @@ -182,7 +182,7 @@ class Diversity { public: Diversity(JfrTypeId id) : _id(id) {} bool process(const JfrSerializerRegistration* r) { - assert(r != NULL, "invariant"); + assert(r != nullptr, "invariant"); assert(r->id() != _id, "invariant"); return true; } @@ -195,9 +195,9 @@ static void assert_not_registered_twice(JfrTypeId id, List& list) { #endif static bool register_static_type(JfrTypeId id, bool permit_cache, JfrSerializer* serializer) { - assert(serializer != NULL, "invariant"); + assert(serializer != nullptr, "invariant"); JfrSerializerRegistration* const registration = new JfrSerializerRegistration(id, permit_cache, serializer); - if (registration == NULL) { + if (registration == nullptr) { delete serializer; return false; } @@ -256,7 +256,7 @@ class InvokeSerializer { public: InvokeSerializer(JfrCheckpointWriter& writer) : _writer(writer) {} bool process(const JfrSerializerRegistration* r) { - assert(r != NULL, "invariant"); + assert(r != nullptr, "invariant"); r->invoke(_writer); return true; } diff --git a/src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeSet.cpp b/src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeSet.cpp index e3e9415104f26..2c0f74d13edde 100644 --- a/src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeSet.cpp +++ b/src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeSet.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -58,10 +58,10 @@ typedef const Symbol* SymbolPtr; typedef const JfrSymbolTable::SymbolEntry* SymbolEntryPtr; typedef const JfrSymbolTable::StringEntry* StringEntryPtr; -static JfrCheckpointWriter* _writer = NULL; -static JfrCheckpointWriter* _leakp_writer = NULL; -static JfrArtifactSet* _artifacts = NULL; -static JfrArtifactClosure* _subsystem_callback = NULL; +static JfrCheckpointWriter* _writer = nullptr; +static JfrCheckpointWriter* _leakp_writer = nullptr; +static JfrArtifactSet* _artifacts = nullptr; +static JfrArtifactClosure* _subsystem_callback = nullptr; static bool _class_unload = false; static bool _flushpoint = false; static bool _initial_type_set = true; @@ -83,11 +83,11 @@ static bool is_complete() { } static traceid mark_symbol(KlassPtr klass, bool leakp) { - return klass != NULL ? _artifacts->mark(klass, leakp) : 0; + return klass != nullptr ? _artifacts->mark(klass, leakp) : 0; } static traceid mark_symbol(Symbol* symbol, bool leakp) { - return symbol != NULL ? _artifacts->mark(symbol, leakp) : 0; + return symbol != nullptr ? _artifacts->mark(symbol, leakp) : 0; } static traceid get_bootstrap_name(bool leakp) { @@ -106,33 +106,33 @@ static const char* primitive_name(KlassPtr type_array_klass) { case JVM_SIGNATURE_DOUBLE: return "double"; } assert(false, "invalid type array klass"); - return NULL; + return nullptr; } static Symbol* primitive_symbol(KlassPtr type_array_klass) { - if (type_array_klass == NULL) { + if (type_array_klass == nullptr) { // void.class static Symbol* const void_class_name = SymbolTable::probe("void", 4); - assert(void_class_name != NULL, "invariant"); + assert(void_class_name != nullptr, "invariant"); return void_class_name; } const char* const primitive_type_str = primitive_name(type_array_klass); - assert(primitive_type_str != NULL, "invariant"); + assert(primitive_type_str != nullptr, "invariant"); Symbol* const primitive_type_sym = SymbolTable::probe(primitive_type_str, (int)strlen(primitive_type_str)); - assert(primitive_type_sym != NULL, "invariant"); + assert(primitive_type_sym != nullptr, "invariant"); return primitive_type_sym; } template static traceid artifact_id(const T* ptr) { - assert(ptr != NULL, "invariant"); + assert(ptr != nullptr, "invariant"); return JfrTraceId::load_raw(ptr); } static traceid package_id(KlassPtr klass, bool leakp) { - assert(klass != NULL, "invariant"); + assert(klass != nullptr, "invariant"); PkgPtr pkg_entry = klass->package(); - if (pkg_entry == NULL) { + if (pkg_entry == nullptr) { return 0; } if (leakp) { @@ -143,9 +143,9 @@ static traceid package_id(KlassPtr klass, bool leakp) { } static traceid module_id(PkgPtr pkg, bool leakp) { - assert(pkg != NULL, "invariant"); + assert(pkg != nullptr, "invariant"); ModPtr module_entry = pkg->module(); - if (module_entry == NULL) { + if (module_entry == nullptr) { return 0; } if (leakp) { @@ -157,13 +157,13 @@ static traceid module_id(PkgPtr pkg, bool leakp) { } static traceid method_id(KlassPtr klass, MethodPtr method) { - assert(klass != NULL, "invariant"); - assert(method != NULL, "invariant"); + assert(klass != nullptr, "invariant"); + assert(method != nullptr, "invariant"); return METHOD_ID(klass, method); } static traceid cld_id(CldPtr cld, bool leakp) { - assert(cld != NULL, "invariant"); + assert(cld != nullptr, "invariant"); if (leakp) { SET_LEAKP(cld); } else { @@ -174,7 +174,7 @@ static traceid cld_id(CldPtr cld, bool leakp) { template static s4 get_flags(const T* ptr) { - assert(ptr != NULL, "invariant"); + assert(ptr != nullptr, "invariant"); return ptr->access_flags().get_flags(); } @@ -184,17 +184,17 @@ static u4 get_primitive_flags() { } static ClassLoaderData* get_cld(const Klass* klass) { - assert(klass != NULL, "invariant"); + assert(klass != nullptr, "invariant"); if (klass->is_objArray_klass()) { klass = ObjArrayKlass::cast(klass)->bottom_klass(); } - if (klass->is_non_strong_hidden()) return NULL; + if (klass->is_non_strong_hidden()) return nullptr; return klass->class_loader_data(); } template static void set_serialized(const T* ptr) { - assert(ptr != NULL, "invariant"); + assert(ptr != nullptr, "invariant"); SET_SERIALIZED(ptr); assert(IS_SERIALIZED(ptr), "invariant"); if (current_epoch()) { @@ -211,12 +211,12 @@ static void set_serialized(const T* ptr) { */ static int write_klass(JfrCheckpointWriter* writer, KlassPtr klass, bool leakp) { - assert(writer != NULL, "invariant"); - assert(_artifacts != NULL, "invariant"); - assert(klass != NULL, "invariant"); + assert(writer != nullptr, "invariant"); + assert(_artifacts != nullptr, "invariant"); + assert(klass != nullptr, "invariant"); writer->write(artifact_id(klass)); ClassLoaderData* cld = get_cld(klass); - writer->write(cld != NULL ? cld_id(cld, leakp) : 0); + writer->write(cld != nullptr ? cld_id(cld, leakp) : 0); writer->write(mark_symbol(klass, leakp)); writer->write(package_id(klass, leakp)); writer->write(get_flags(klass)); @@ -225,34 +225,34 @@ static int write_klass(JfrCheckpointWriter* writer, KlassPtr klass, bool leakp) } int write__klass(JfrCheckpointWriter* writer, const void* k) { - assert(k != NULL, "invariant"); + assert(k != nullptr, "invariant"); KlassPtr klass = (KlassPtr)k; set_serialized(klass); return write_klass(writer, klass, false); } int write__klass__leakp(JfrCheckpointWriter* writer, const void* k) { - assert(k != NULL, "invariant"); + assert(k != nullptr, "invariant"); KlassPtr klass = (KlassPtr)k; CLEAR_LEAKP(klass); return write_klass(writer, klass, true); } static bool is_implied(const Klass* klass) { - assert(klass != NULL, "invariant"); + assert(klass != nullptr, "invariant"); return klass->is_subclass_of(vmClasses::ClassLoader_klass()) || klass == vmClasses::Object_klass(); } static void do_klass(Klass* klass) { - assert(klass != NULL, "invariant"); + assert(klass != nullptr, "invariant"); assert(_flushpoint ? USED_THIS_EPOCH(klass) : USED_PREVIOUS_EPOCH(klass), "invariant"); - assert(_subsystem_callback != NULL, "invariant"); + assert(_subsystem_callback != nullptr, "invariant"); _subsystem_callback->do_artifact(klass); } static traceid primitive_id(KlassPtr array_klass) { - if (array_klass == NULL) { + if (array_klass == nullptr) { // The first klass id is reserved for the void.class. return LAST_TYPE_ID + 1; } @@ -261,8 +261,8 @@ static traceid primitive_id(KlassPtr array_klass) { } static void write_primitive(JfrCheckpointWriter* writer, KlassPtr type_array_klass) { - assert(writer != NULL, "invariant"); - assert(_artifacts != NULL, "invariant"); + assert(writer != nullptr, "invariant"); + assert(_artifacts != nullptr, "invariant"); writer->write(primitive_id(type_array_klass)); writer->write(cld_id(get_cld(Universe::boolArrayKlassObj()), false)); writer->write(mark_symbol(primitive_symbol(type_array_klass), false)); @@ -272,8 +272,8 @@ static void write_primitive(JfrCheckpointWriter* writer, KlassPtr type_array_kla } static void do_loader_klass(const Klass* klass) { - if (klass != NULL && _artifacts->should_do_loader_klass(klass)) { - if (_leakp_writer != NULL) { + if (klass != nullptr && _artifacts->should_do_loader_klass(klass)) { + if (_leakp_writer != nullptr) { SET_LEAKP(klass); } SET_TRANSIENT(klass); @@ -282,7 +282,7 @@ static void do_loader_klass(const Klass* klass) { } static bool register_klass_unload(Klass* klass) { - assert(klass != NULL, "invariant"); + assert(klass != nullptr, "invariant"); return JfrKlassUnloading::on_unload(klass); } @@ -296,8 +296,8 @@ static size_t register_unloading_klasses() { } static void do_unloading_klass(Klass* klass) { - assert(klass != NULL, "invariant"); - assert(_subsystem_callback != NULL, "invariant"); + assert(klass != nullptr, "invariant"); + assert(_subsystem_callback != nullptr, "invariant"); if (register_klass_unload(klass)) { _subsystem_callback->do_artifact(klass); do_loader_klass(klass->class_loader_data()->class_loader_klass()); @@ -311,7 +311,7 @@ static void do_unloading_klass(Klass* klass) { * trigger initialization. */ static bool is_classloader_klass_allowed(const Klass* k) { - assert(k != NULL, "invariant"); + assert(k != nullptr, "invariant"); return !(k->is_abstract() || k->should_be_initialized()); } @@ -342,7 +342,7 @@ static void do_primitives() { write_primitive(_writer, Universe::longArrayKlassObj()); write_primitive(_writer, Universe::floatArrayKlassObj()); write_primitive(_writer, Universe::doubleArrayKlassObj()); - write_primitive(_writer, NULL); // void.class + write_primitive(_writer, nullptr); // void.class } } @@ -373,7 +373,7 @@ class LeakPredicate { public: LeakPredicate(bool class_unload) {} bool operator()(const Klass* klass) { - assert(klass != NULL, "invariant"); + assert(klass != nullptr, "invariant"); return IS_LEAKP(klass) || is_implied(klass); } }; @@ -388,11 +388,11 @@ typedef JfrArtifactCallbackHost Comp static bool write_klasses() { assert(!_artifacts->has_klass_entries(), "invariant"); - assert(_writer != NULL, "invariant"); + assert(_writer != nullptr, "invariant"); KlassArtifactRegistrator reg(_artifacts); KlassWriter kw(_writer, _class_unload); KlassWriterRegistration kwr(&kw, ®); - if (_leakp_writer == NULL) { + if (_leakp_writer == nullptr) { KlassCallback callback(&_subsystem_callback, &kwr); do_klasses(); } else { @@ -416,8 +416,8 @@ static bool write_klasses() { template static void do_previous_epoch_artifact(JfrArtifactClosure* callback, T* value) { - assert(callback != NULL, "invariant"); - assert(value != NULL, "invariant"); + assert(callback != nullptr, "invariant"); + assert(value != nullptr, "invariant"); if (USED_PREVIOUS_EPOCH(value)) { callback->do_artifact(value); } @@ -430,8 +430,8 @@ static void do_previous_epoch_artifact(JfrArtifactClosure* callback, T* value) { typedef JfrArtifactCallbackHost RegisterKlassCallback; static void register_klass(Klass* klass) { - assert(klass != NULL, "invariant"); - assert(_subsystem_callback != NULL, "invariant"); + assert(klass != nullptr, "invariant"); + assert(_subsystem_callback != nullptr, "invariant"); do_previous_epoch_artifact(_subsystem_callback, klass); } @@ -443,9 +443,9 @@ static void register_klasses() { } static int write_package(JfrCheckpointWriter* writer, PkgPtr pkg, bool leakp) { - assert(writer != NULL, "invariant"); - assert(_artifacts != NULL, "invariant"); - assert(pkg != NULL, "invariant"); + assert(writer != nullptr, "invariant"); + assert(_artifacts != nullptr, "invariant"); + assert(pkg != nullptr, "invariant"); writer->write(artifact_id(pkg)); writer->write(mark_symbol(pkg->name(), leakp)); writer->write(module_id(pkg, leakp)); @@ -454,14 +454,14 @@ static int write_package(JfrCheckpointWriter* writer, PkgPtr pkg, bool leakp) { } int write__package(JfrCheckpointWriter* writer, const void* p) { - assert(p != NULL, "invariant"); + assert(p != nullptr, "invariant"); PkgPtr pkg = (PkgPtr)p; set_serialized(pkg); return write_package(writer, pkg, false); } int write__package__leakp(JfrCheckpointWriter* writer, const void* p) { - assert(p != NULL, "invariant"); + assert(p != nullptr, "invariant"); PkgPtr pkg = (PkgPtr)p; CLEAR_LEAKP(pkg); return write_package(writer, pkg, true); @@ -479,7 +479,7 @@ class PackageFieldSelector { public: typedef PkgPtr TypePtr; static TypePtr select(KlassPtr klass) { - assert(klass != NULL, "invariant"); + assert(klass != nullptr, "invariant"); return klass->package(); } }; @@ -502,7 +502,7 @@ typedef CompositeFunctor > typedef JfrArtifactCallbackHost CompositePackageCallback; static void write_packages() { - assert(_writer != NULL, "invariant"); + assert(_writer != nullptr, "invariant"); PackageWriter pw(_writer, _class_unload); KlassPackageWriter kpw(&pw); if (current_epoch()) { @@ -511,7 +511,7 @@ static void write_packages() { return; } assert(previous_epoch(), "invariant"); - if (_leakp_writer == NULL) { + if (_leakp_writer == nullptr) { _artifacts->iterate_klasses(kpw); ClearArtifact clear; PackageWriterWithClear pwwc(&pw, &clear); @@ -539,8 +539,8 @@ static void clear_packages() { } static int write_module(JfrCheckpointWriter* writer, ModPtr mod, bool leakp) { - assert(mod != NULL, "invariant"); - assert(_artifacts != NULL, "invariant"); + assert(mod != nullptr, "invariant"); + assert(_artifacts != nullptr, "invariant"); writer->write(artifact_id(mod)); writer->write(mark_symbol(mod->name(), leakp)); writer->write(mark_symbol(mod->version(), leakp)); @@ -550,14 +550,14 @@ static int write_module(JfrCheckpointWriter* writer, ModPtr mod, bool leakp) { } int write__module(JfrCheckpointWriter* writer, const void* m) { - assert(m != NULL, "invariant"); + assert(m != nullptr, "invariant"); ModPtr mod = (ModPtr)m; set_serialized(mod); return write_module(writer, mod, false); } int write__module__leakp(JfrCheckpointWriter* writer, const void* m) { - assert(m != NULL, "invariant"); + assert(m != nullptr, "invariant"); ModPtr mod = (ModPtr)m; CLEAR_LEAKP(mod); return write_module(writer, mod, true); @@ -575,9 +575,9 @@ class ModuleFieldSelector { public: typedef ModPtr TypePtr; static TypePtr select(KlassPtr klass) { - assert(klass != NULL, "invariant"); + assert(klass != nullptr, "invariant"); PkgPtr pkg = klass->package(); - return pkg != NULL ? pkg->module() : NULL; + return pkg != nullptr ? pkg->module() : nullptr; } }; @@ -598,7 +598,7 @@ typedef CompositeFunctor > typedef JfrArtifactCallbackHost CompositeModuleCallback; static void write_modules() { - assert(_writer != NULL, "invariant"); + assert(_writer != nullptr, "invariant"); ModuleWriter mw(_writer, _class_unload); KlassModuleWriter kmw(&mw); if (current_epoch()) { @@ -607,7 +607,7 @@ static void write_modules() { return; } assert(previous_epoch(), "invariant"); - if (_leakp_writer == NULL) { + if (_leakp_writer == nullptr) { _artifacts->iterate_klasses(kmw); ClearArtifact clear; ModuleWriterWithClear mwwc(&mw, &clear); @@ -635,10 +635,10 @@ static void clear_modules() { } static int write_classloader(JfrCheckpointWriter* writer, CldPtr cld, bool leakp) { - assert(cld != NULL, "invariant"); + assert(cld != nullptr, "invariant"); // class loader type const Klass* class_loader_klass = cld->class_loader_klass(); - if (class_loader_klass == NULL) { + if (class_loader_klass == nullptr) { // (primordial) boot class loader writer->write(artifact_id(cld)); // class loader instance id writer->write((traceid)0); // class loader type id (absence of) @@ -653,14 +653,14 @@ static int write_classloader(JfrCheckpointWriter* writer, CldPtr cld, bool leakp } int write__classloader(JfrCheckpointWriter* writer, const void* c) { - assert(c != NULL, "invariant"); + assert(c != nullptr, "invariant"); CldPtr cld = (CldPtr)c; set_serialized(cld); return write_classloader(writer, cld, false); } int write__classloader__leakp(JfrCheckpointWriter* writer, const void* c) { - assert(c != NULL, "invariant"); + assert(c != nullptr, "invariant"); CldPtr cld = (CldPtr)c; CLEAR_LEAKP(cld); return write_classloader(writer, cld, true); @@ -674,7 +674,7 @@ class KlassCldFieldSelector { public: typedef CldPtr TypePtr; static TypePtr select(KlassPtr klass) { - assert(klass != NULL, "invariant"); + assert(klass != nullptr, "invariant"); return get_cld(klass); } }; @@ -683,9 +683,9 @@ class ModuleCldFieldSelector { public: typedef CldPtr TypePtr; static TypePtr select(KlassPtr klass) { - assert(klass != NULL, "invariant"); + assert(klass != nullptr, "invariant"); ModPtr mod = ModuleFieldSelector::select(klass); - return mod != NULL ? mod->loader_data() : NULL; + return mod != nullptr ? mod->loader_data() : nullptr; } }; @@ -693,7 +693,7 @@ class CLDCallback : public CLDClosure { public: CLDCallback() {} void do_cld(ClassLoaderData* cld) { - assert(cld != NULL, "invariant"); + assert(cld != nullptr, "invariant"); if (cld->has_class_mirror_holder()) { return; } @@ -727,7 +727,7 @@ typedef CompositeFunctor > Com typedef JfrArtifactCallbackHost CompositeCldCallback; static void write_classloaders() { - assert(_writer != NULL, "invariant"); + assert(_writer != nullptr, "invariant"); CldWriter cldw(_writer, _class_unload); KlassCldWriter kcw(&cldw); ModuleCldWriter mcw(&cldw); @@ -738,7 +738,7 @@ static void write_classloaders() { return; } assert(previous_epoch(), "invariant"); - if (_leakp_writer == NULL) { + if (_leakp_writer == nullptr) { _artifacts->iterate_klasses(kmcw); ClearArtifact clear; CldWriterWithClear cldwwc(&cldw, &clear); @@ -768,13 +768,13 @@ static void clear_classloaders() { } static u1 get_visibility(MethodPtr method) { - assert(method != NULL, "invariant"); + assert(method != nullptr, "invariant"); return const_cast(method)->is_hidden() ? (u1)1 : (u1)0; } template <> void set_serialized(MethodPtr method) { - assert(method != NULL, "invariant"); + assert(method != nullptr, "invariant"); SET_METHOD_SERIALIZED(method); assert(IS_METHOD_SERIALIZED(method), "invariant"); if (current_epoch()) { @@ -783,11 +783,11 @@ void set_serialized(MethodPtr method) { } static int write_method(JfrCheckpointWriter* writer, MethodPtr method, bool leakp) { - assert(writer != NULL, "invariant"); - assert(method != NULL, "invariant"); - assert(_artifacts != NULL, "invariant"); + assert(writer != nullptr, "invariant"); + assert(method != nullptr, "invariant"); + assert(_artifacts != nullptr, "invariant"); KlassPtr klass = method->method_holder(); - assert(klass != NULL, "invariant"); + assert(klass != nullptr, "invariant"); writer->write(method_id(klass, method)); writer->write(artifact_id(klass)); writer->write(mark_symbol(method->name(), leakp)); @@ -798,14 +798,14 @@ static int write_method(JfrCheckpointWriter* writer, MethodPtr method, bool leak } int write__method(JfrCheckpointWriter* writer, const void* m) { - assert(m != NULL, "invariant"); + assert(m != nullptr, "invariant"); MethodPtr method = (MethodPtr)m; set_serialized(method); return write_method(writer, method, false); } int write__method__leakp(JfrCheckpointWriter* writer, const void* m) { - assert(m != NULL, "invariant"); + assert(m != nullptr, "invariant"); MethodPtr method = (MethodPtr)m; CLEAR_LEAKP_METHOD(method); return write_method(writer, method, true); @@ -855,7 +855,7 @@ class MethodIteratorHost { bool operator()(KlassPtr klass) { if (_method_used_predicate(klass)) { const InstanceKlass* ik = InstanceKlass::cast(klass); - while (ik != NULL) { + while (ik != nullptr) { const int len = ik->methods()->length(); for (int i = 0; i < len; ++i) { MethodPtr method = ik->methods()->at(i); @@ -905,9 +905,9 @@ typedef MethodIteratorHost CompositeMethodWriter; static void write_methods() { - assert(_writer != NULL, "invariant"); + assert(_writer != nullptr, "invariant"); MethodWriter mw(_writer, current_epoch(), _class_unload); - if (_leakp_writer == NULL) { + if (_leakp_writer == nullptr) { _artifacts->iterate_klasses(mw); } else { LeakMethodWriter lpmw(_leakp_writer, current_epoch(), _class_unload); @@ -919,21 +919,21 @@ static void write_methods() { template <> void set_serialized(SymbolEntryPtr ptr) { - assert(ptr != NULL, "invariant"); + assert(ptr != nullptr, "invariant"); ptr->set_serialized(); assert(ptr->is_serialized(), "invariant"); } template <> void set_serialized(StringEntryPtr ptr) { - assert(ptr != NULL, "invariant"); + assert(ptr != nullptr, "invariant"); ptr->set_serialized(); assert(ptr->is_serialized(), "invariant"); } static int write_symbol(JfrCheckpointWriter* writer, SymbolEntryPtr entry, bool leakp) { - assert(writer != NULL, "invariant"); - assert(entry != NULL, "invariant"); + assert(writer != nullptr, "invariant"); + assert(entry != nullptr, "invariant"); ResourceMark rm; writer->write(entry->id()); writer->write(entry->value()->as_C_string()); @@ -941,35 +941,35 @@ static int write_symbol(JfrCheckpointWriter* writer, SymbolEntryPtr entry, bool } int write__symbol(JfrCheckpointWriter* writer, const void* e) { - assert(e != NULL, "invariant"); + assert(e != nullptr, "invariant"); SymbolEntryPtr entry = (SymbolEntryPtr)e; set_serialized(entry); return write_symbol(writer, entry, false); } int write__symbol__leakp(JfrCheckpointWriter* writer, const void* e) { - assert(e != NULL, "invariant"); + assert(e != nullptr, "invariant"); SymbolEntryPtr entry = (SymbolEntryPtr)e; return write_symbol(writer, entry, true); } static int write_string(JfrCheckpointWriter* writer, StringEntryPtr entry, bool leakp) { - assert(writer != NULL, "invariant"); - assert(entry != NULL, "invariant"); + assert(writer != nullptr, "invariant"); + assert(entry != nullptr, "invariant"); writer->write(entry->id()); writer->write(entry->value()); return 1; } int write__string(JfrCheckpointWriter* writer, const void* e) { - assert(e != NULL, "invariant"); + assert(e != nullptr, "invariant"); StringEntryPtr entry = (StringEntryPtr)e; set_serialized(entry); return write_string(writer, entry, false); } int write__string__leakp(JfrCheckpointWriter* writer, const void* e) { - assert(e != NULL, "invariant"); + assert(e != nullptr, "invariant"); StringEntryPtr entry = (StringEntryPtr)e; return write_string(writer, entry, true); } @@ -991,7 +991,7 @@ typedef JfrTypeWriterHost LeakStringEntr typedef CompositeFunctor CompositeStringWriter; static void write_symbols_with_leakp() { - assert(_leakp_writer != NULL, "invariant"); + assert(_leakp_writer != nullptr, "invariant"); SymbolEntryWriter sw(_writer, _class_unload); LeakSymbolEntryWriter lsw(_leakp_writer, _class_unload); CompositeSymbolWriter csw(&lsw, &sw); @@ -1006,8 +1006,8 @@ static void write_symbols_with_leakp() { } static void write_symbols() { - assert(_writer != NULL, "invariant"); - if (_leakp_writer != NULL) { + assert(_writer != nullptr, "invariant"); + if (_leakp_writer != nullptr) { write_symbols_with_leakp(); return; } @@ -1029,7 +1029,7 @@ static void clear_klasses_and_methods() { } static size_t teardown() { - assert(_artifacts != NULL, "invariant"); + assert(_artifacts != nullptr, "invariant"); const size_t total_count = _artifacts->total_count(); if (previous_epoch()) { clear_klasses_and_methods(); @@ -1047,7 +1047,7 @@ static void setup(JfrCheckpointWriter* writer, JfrCheckpointWriter* leakp_writer _leakp_writer = leakp_writer; _class_unload = class_unload; _flushpoint = flushpoint; - if (_artifacts == NULL) { + if (_artifacts == nullptr) { _artifacts = new JfrArtifactSet(class_unload); } else { _artifacts->initialize(class_unload); @@ -1055,7 +1055,7 @@ static void setup(JfrCheckpointWriter* writer, JfrCheckpointWriter* leakp_writer if (!_class_unload) { JfrKlassUnloading::sort(previous_epoch()); } - assert(_artifacts != NULL, "invariant"); + assert(_artifacts != nullptr, "invariant"); assert(!_artifacts->has_klass_entries(), "invariant"); } @@ -1063,7 +1063,7 @@ static void setup(JfrCheckpointWriter* writer, JfrCheckpointWriter* leakp_writer * Write all "tagged" (in-use) constant artifacts and their dependencies. */ size_t JfrTypeSet::serialize(JfrCheckpointWriter* writer, JfrCheckpointWriter* leakp_writer, bool class_unload, bool flushpoint) { - assert(writer != NULL, "invariant"); + assert(writer != nullptr, "invariant"); ResourceMark rm; setup(writer, leakp_writer, class_unload, flushpoint); // write order is important because an individual write step @@ -1085,10 +1085,10 @@ size_t JfrTypeSet::serialize(JfrCheckpointWriter* writer, JfrCheckpointWriter* l void JfrTypeSet::clear() { ResourceMark rm; JfrKlassUnloading::clear(); - if (_artifacts != NULL) { + if (_artifacts != nullptr) { _artifacts->clear(); } - setup(NULL, NULL, false, false); + setup(nullptr, nullptr, false, false); register_klasses(); clear_packages(); clear_modules(); @@ -1102,7 +1102,7 @@ size_t JfrTypeSet::on_unloading_classes(JfrCheckpointWriter* writer) { // happen in arbitrary threads, we invoke it explicitly. JfrTraceIdEpoch::has_changed_tag_state_no_reset(); if (JfrRecorder::is_recording()) { - return serialize(writer, NULL, true, false); + return serialize(writer, nullptr, true, false); } return register_unloading_klasses(); } diff --git a/src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeSetUtils.cpp b/src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeSetUtils.cpp index 19a9fa7516b56..0876281d53f6b 100644 --- a/src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeSetUtils.cpp +++ b/src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeSetUtils.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,22 +30,22 @@ #include "oops/oop.inline.hpp" #include "oops/symbol.hpp" -JfrArtifactSet::JfrArtifactSet(bool class_unload) : _symbol_table(NULL), - _klass_list(NULL), +JfrArtifactSet::JfrArtifactSet(bool class_unload) : _symbol_table(nullptr), + _klass_list(nullptr), _total_count(0) { initialize(class_unload); - assert(_klass_list != NULL, "invariant"); + assert(_klass_list != nullptr, "invariant"); } static const size_t initial_klass_list_size = 256; const int initial_klass_loader_set_size = 64; void JfrArtifactSet::initialize(bool class_unload) { - if (_symbol_table == NULL) { + if (_symbol_table == nullptr) { _symbol_table = JfrSymbolTable::create(); - assert(_symbol_table != NULL, "invariant"); + assert(_symbol_table != nullptr, "invariant"); } - assert(_symbol_table != NULL, "invariant"); + assert(_symbol_table != nullptr, "invariant"); _symbol_table->set_class_unload(class_unload); _total_count = 0; // resource allocation @@ -54,7 +54,7 @@ void JfrArtifactSet::initialize(bool class_unload) { } void JfrArtifactSet::clear() { - if (_symbol_table != NULL) { + if (_symbol_table != nullptr) { _symbol_table->clear(); } } @@ -98,14 +98,14 @@ int JfrArtifactSet::entries() const { } bool JfrArtifactSet::should_do_loader_klass(const Klass* k) { - assert(k != NULL, "invariant"); - assert(_klass_loader_set != NULL, "invariant"); + assert(k != nullptr, "invariant"); + assert(_klass_loader_set != nullptr, "invariant"); return !JfrMutablePredicate::test(_klass_loader_set, k); } void JfrArtifactSet::register_klass(const Klass* k) { - assert(k != NULL, "invariant"); - assert(_klass_list != NULL, "invariant"); + assert(k != nullptr, "invariant"); + assert(_klass_list != nullptr, "invariant"); _klass_list->append(k); } @@ -114,7 +114,7 @@ size_t JfrArtifactSet::total_count() const { } void JfrArtifactSet::increment_checkpoint_id() { - assert(_symbol_table != NULL, "invariant"); + assert(_symbol_table != nullptr, "invariant"); _symbol_table->increment_checkpoint_id(); } diff --git a/src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeSetUtils.hpp b/src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeSetUtils.hpp index f9cd1d0f690f7..5886ef46ce979 100644 --- a/src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeSetUtils.hpp +++ b/src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeSetUtils.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -42,8 +42,8 @@ class CompositeFunctor { Func2* _g; public: CompositeFunctor(Func1* f, Func2* g) : _f(f), _g(g) { - assert(f != NULL, "invariant"); - assert(g != NULL, "invariant"); + assert(f != nullptr, "invariant"); + assert(g != nullptr, "invariant"); } bool operator()(T const& value) { return (*_f)(value) && (*_g)(value); @@ -63,11 +63,11 @@ class JfrArtifactCallbackHost : public JfrArtifactClosure { public: JfrArtifactCallbackHost(JfrArtifactClosure** subsystem_callback_loc, Callback* callback) : _subsystem_callback_loc(subsystem_callback_loc), _callback(callback) { - assert(*_subsystem_callback_loc == NULL, "Subsystem callback should not be set yet"); + assert(*_subsystem_callback_loc == nullptr, "Subsystem callback should not be set yet"); *_subsystem_callback_loc = this; } ~JfrArtifactCallbackHost() { - *_subsystem_callback_loc = NULL; + *_subsystem_callback_loc = nullptr; } void do_artifact(const void* artifact) { (*_callback)(reinterpret_cast(artifact)); @@ -81,7 +81,7 @@ class KlassToFieldEnvelope { KlassToFieldEnvelope(Letter* letter) : _letter(letter) {} bool operator()(const Klass* klass) { typename FieldSelector::TypePtr t = FieldSelector::select(klass); - return t != NULL ? (*_letter)(t) : true; + return t != nullptr ? (*_letter)(t) : true; } }; @@ -116,7 +116,7 @@ class SerializePredicate { public: SerializePredicate(bool class_unload) : _class_unload(class_unload) {} bool operator()(T const& value) { - assert(value != NULL, "invariant"); + assert(value != nullptr, "invariant"); return _class_unload ? true : IS_NOT_SERIALIZED(value); } }; @@ -127,7 +127,7 @@ class SerializePredicate { public: SerializePredicate(bool class_unload) : _class_unload(class_unload) {} bool operator()(const Method* method) { - assert(method != NULL, "invariant"); + assert(method != nullptr, "invariant"); return _class_unload ? true : METHOD_NOT_SERIALIZED(method); } }; @@ -138,7 +138,7 @@ class SymbolPredicate { public: SymbolPredicate(bool class_unload) : _class_unload(class_unload) {} bool operator()(T const& value) { - assert(value != NULL, "invariant"); + assert(value != nullptr, "invariant"); if (_class_unload) { return leakp ? value->is_leakp() : value->is_unloading(); } @@ -182,7 +182,7 @@ class LeakPredicate { public: LeakPredicate(bool class_unload) {} bool operator()(const Method* method) { - assert(method != NULL, "invariant"); + assert(method != nullptr, "invariant"); return IS_METHOD_LEAKP_USED(method); } }; @@ -266,11 +266,11 @@ class KlassArtifactRegistrator { public: KlassArtifactRegistrator(JfrArtifactSet* artifacts) : _artifacts(artifacts) { - assert(_artifacts != NULL, "invariant"); + assert(_artifacts != nullptr, "invariant"); } bool operator()(const Klass* klass) { - assert(klass != NULL, "invariant"); + assert(klass != nullptr, "invariant"); _artifacts->register_klass(klass); return true; } diff --git a/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceId.cpp b/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceId.cpp index 496b1a84d3b31..fd50628602334 100644 --- a/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceId.cpp +++ b/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceId.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -74,27 +74,27 @@ static bool found_jdk_internal_event_klass = false; static bool found_jdk_jfr_event_klass = false; static void check_klass(const Klass* klass) { - assert(klass != NULL, "invariant"); + assert(klass != nullptr, "invariant"); if (found_jdk_internal_event_klass && found_jdk_jfr_event_klass) { return; } - static const Symbol* jdk_internal_event_sym = NULL; - if (jdk_internal_event_sym == NULL) { + static const Symbol* jdk_internal_event_sym = nullptr; + if (jdk_internal_event_sym == nullptr) { // setup when loading the first TypeArrayKlass (Universe::genesis) hence single threaded invariant jdk_internal_event_sym = SymbolTable::new_permanent_symbol("jdk/internal/event/Event"); } - assert(jdk_internal_event_sym != NULL, "invariant"); + assert(jdk_internal_event_sym != nullptr, "invariant"); - static const Symbol* jdk_jfr_event_sym = NULL; - if (jdk_jfr_event_sym == NULL) { + static const Symbol* jdk_jfr_event_sym = nullptr; + if (jdk_jfr_event_sym == nullptr) { // setup when loading the first TypeArrayKlass (Universe::genesis) hence single threaded invariant jdk_jfr_event_sym = SymbolTable::new_permanent_symbol("jdk/jfr/Event"); } - assert(jdk_jfr_event_sym != NULL, "invariant"); + assert(jdk_jfr_event_sym != nullptr, "invariant"); const Symbol* const klass_name = klass->name(); if (!found_jdk_internal_event_klass) { - if (jdk_internal_event_sym == klass_name && klass->class_loader() == NULL) { + if (jdk_internal_event_sym == klass_name && klass->class_loader() == nullptr) { found_jdk_internal_event_klass = true; JfrTraceId::tag_as_jdk_jfr_event(klass); return; @@ -102,7 +102,7 @@ static void check_klass(const Klass* klass) { } if (!found_jdk_jfr_event_klass) { - if (jdk_jfr_event_sym == klass_name && klass->class_loader() == NULL) { + if (jdk_jfr_event_sym == klass_name && klass->class_loader() == nullptr) { found_jdk_jfr_event_klass = true; JfrTraceId::tag_as_jdk_jfr_event(klass); return; @@ -145,17 +145,17 @@ void JfrTraceId::assign(const Klass* klass) { } void JfrTraceId::assign(const ModuleEntry* module) { - assert(module != NULL, "invariant"); + assert(module != nullptr, "invariant"); module->set_trace_id(next_module_id()); } void JfrTraceId::assign(const PackageEntry* package) { - assert(package != NULL, "invariant"); + assert(package != nullptr, "invariant"); package->set_trace_id(next_package_id()); } void JfrTraceId::assign(const ClassLoaderData* cld) { - assert(cld != NULL, "invariant"); + assert(cld != nullptr, "invariant"); if (cld->has_class_mirror_holder()) { cld->set_trace_id(0); return; @@ -176,7 +176,7 @@ static traceid load_primitive(const oop mirror) { assert(java_lang_Class::is_primitive(mirror), "invariant"); const Klass* const tak = java_lang_Class::array_klass_acquire(mirror); traceid id; - if (tak == NULL) { + if (tak == nullptr) { // The first klass id is reserved for the void.class id = LAST_TYPE_ID + 1; } else { @@ -187,12 +187,12 @@ static traceid load_primitive(const oop mirror) { } traceid JfrTraceId::load(jclass jc, bool raw /* false */) { - assert(jc != NULL, "invariant"); + assert(jc != nullptr, "invariant"); assert(JavaThread::current()->thread_state() == _thread_in_vm, "invariant"); const oop mirror = JNIHandles::resolve(jc); - assert(mirror != NULL, "invariant"); + assert(mirror != nullptr, "invariant"); const Klass* const k = java_lang_Class::as_Klass(mirror); - return k != NULL ? (raw ? load_raw(k) : load(k)) : load_primitive(mirror); + return k != nullptr ? (raw ? load_raw(k) : load(k)) : load_primitive(mirror); } traceid JfrTraceId::load_raw(jclass jc) { @@ -202,7 +202,7 @@ traceid JfrTraceId::load_raw(jclass jc) { #if INCLUDE_CDS // used by CDS / APPCDS as part of "remove_unshareable_info" void JfrTraceId::remove(const Klass* k) { - assert(k != NULL, "invariant"); + assert(k != nullptr, "invariant"); // Mask off and store the event flags. // This mechanism will retain the event specific flags // in the archive, allowing for event flag restoration @@ -212,14 +212,14 @@ void JfrTraceId::remove(const Klass* k) { // used by CDS / APPCDS as part of "remove_unshareable_info" void JfrTraceId::remove(const Method* method) { - assert(method != NULL, "invariant"); + assert(method != nullptr, "invariant"); // Clear all bits. method->set_trace_flags(0); } // used by CDS / APPCDS as part of "restore_unshareable_info" void JfrTraceId::restore(const Klass* k) { - assert(k != NULL, "invariant"); + assert(k != nullptr, "invariant"); if (IS_JDK_JFR_EVENT_KLASS(k)) { found_jdk_jfr_event_klass = true; } @@ -234,61 +234,61 @@ void JfrTraceId::restore(const Klass* k) { #endif // INCLUDE_CDS bool JfrTraceId::in_visible_set(const jclass jc) { - assert(jc != NULL, "invariant"); + assert(jc != nullptr, "invariant"); assert(JavaThread::current()->thread_state() == _thread_in_vm, "invariant"); const oop mirror = JNIHandles::resolve(jc); - assert(mirror != NULL, "invariant"); + assert(mirror != nullptr, "invariant"); return in_visible_set(java_lang_Class::as_Klass(mirror)); } bool JfrTraceId::in_jdk_jfr_event_hierarchy(const jclass jc) { - assert(jc != NULL, "invariant"); + assert(jc != nullptr, "invariant"); const oop mirror = JNIHandles::resolve(jc); - assert(mirror != NULL, "invariant"); + assert(mirror != nullptr, "invariant"); return in_jdk_jfr_event_hierarchy(java_lang_Class::as_Klass(mirror)); } bool JfrTraceId::is_jdk_jfr_event_sub(const jclass jc) { - assert(jc != NULL, "invariant"); + assert(jc != nullptr, "invariant"); const oop mirror = JNIHandles::resolve(jc); - assert(mirror != NULL, "invariant"); + assert(mirror != nullptr, "invariant"); return is_jdk_jfr_event_sub(java_lang_Class::as_Klass(mirror)); } bool JfrTraceId::is_jdk_jfr_event(const jclass jc) { - assert(jc != NULL, "invariant"); + assert(jc != nullptr, "invariant"); const oop mirror = JNIHandles::resolve(jc); - assert(mirror != NULL, "invariant"); + assert(mirror != nullptr, "invariant"); return is_jdk_jfr_event(java_lang_Class::as_Klass(mirror)); } bool JfrTraceId::is_event_host(const jclass jc) { - assert(jc != NULL, "invariant"); + assert(jc != nullptr, "invariant"); const oop mirror = JNIHandles::resolve(jc); - assert(mirror != NULL, "invariant"); + assert(mirror != nullptr, "invariant"); return is_event_host(java_lang_Class::as_Klass(mirror)); } void JfrTraceId::tag_as_jdk_jfr_event_sub(const jclass jc) { - assert(jc != NULL, "invariant"); + assert(jc != nullptr, "invariant"); const oop mirror = JNIHandles::resolve(jc); - assert(mirror != NULL, "invariant"); + assert(mirror != nullptr, "invariant"); const Klass* const k = java_lang_Class::as_Klass(mirror); tag_as_jdk_jfr_event_sub(k); assert(IS_JDK_JFR_EVENT_SUBKLASS(k), "invariant"); } void JfrTraceId::tag_as_event_host(const jclass jc) { - assert(jc != NULL, "invariant"); + assert(jc != nullptr, "invariant"); const oop mirror = JNIHandles::resolve(jc); - assert(mirror != NULL, "invariant"); + assert(mirror != nullptr, "invariant"); const Klass* const k = java_lang_Class::as_Klass(mirror); tag_as_event_host(k); assert(IS_EVENT_HOST_KLASS(k), "invariant"); } void JfrTraceId::untag_jdk_jfr_event_sub(const Klass* k) { - assert(k != NULL, "invariant"); + assert(k != nullptr, "invariant"); if (JfrTraceId::is_jdk_jfr_event_sub(k)) { CLEAR_JDK_JFR_EVENT_SUBKLASS(k); } diff --git a/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceId.inline.hpp b/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceId.inline.hpp index b1b9460cf6265..1593cd7647c6f 100644 --- a/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceId.inline.hpp +++ b/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceId.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -66,7 +66,7 @@ inline traceid JfrTraceId::load_leakp(const Klass* klass, const Method* method) template inline traceid raw_load(const T* t) { - assert(t != NULL, "invariant"); + assert(t != nullptr, "invariant"); return TRACE_ID(t); } @@ -91,29 +91,29 @@ inline traceid JfrTraceId::load_raw(const ClassLoaderData* cld) { } inline bool JfrTraceId::in_visible_set(const Klass* klass) { - assert(klass != NULL, "invariant"); + assert(klass != nullptr, "invariant"); assert(JavaThread::current()->thread_state() == _thread_in_vm, "invariant"); return (IS_JDK_JFR_EVENT_SUBKLASS(klass) && !klass->is_abstract()) || IS_EVENT_HOST_KLASS(klass); } inline bool JfrTraceId::is_jdk_jfr_event(const Klass* k) { - assert(k != NULL, "invariant"); + assert(k != nullptr, "invariant"); return IS_JDK_JFR_EVENT_KLASS(k); } inline void JfrTraceId::tag_as_jdk_jfr_event(const Klass* klass) { - assert(klass != NULL, "invariant"); + assert(klass != nullptr, "invariant"); SET_JDK_JFR_EVENT_KLASS(klass); assert(IS_JDK_JFR_EVENT_KLASS(klass), "invariant"); } inline bool JfrTraceId::is_jdk_jfr_event_sub(const Klass* k) { - assert(k != NULL, "invariant"); + assert(k != nullptr, "invariant"); return IS_JDK_JFR_EVENT_SUBKLASS(k); } inline void JfrTraceId::tag_as_jdk_jfr_event_sub(const Klass* k) { - assert(k != NULL, "invariant"); + assert(k != nullptr, "invariant"); if (IS_NOT_AN_EVENT_SUB_KLASS(k)) { SET_JDK_JFR_EVENT_SUBKLASS(k); } @@ -121,21 +121,21 @@ inline void JfrTraceId::tag_as_jdk_jfr_event_sub(const Klass* k) { } inline bool JfrTraceId::in_jdk_jfr_event_hierarchy(const Klass* klass) { - assert(klass != NULL, "invariant"); + assert(klass != nullptr, "invariant"); if (is_jdk_jfr_event(klass)) { return true; } const Klass* const super = klass->super(); - return super != NULL ? IS_EVENT_KLASS(super) : false; + return super != nullptr ? IS_EVENT_KLASS(super) : false; } inline bool JfrTraceId::is_event_host(const Klass* k) { - assert(k != NULL, "invariant"); + assert(k != nullptr, "invariant"); return IS_EVENT_HOST_KLASS(k); } inline void JfrTraceId::tag_as_event_host(const Klass* k) { - assert(k != NULL, "invariant"); + assert(k != nullptr, "invariant"); SET_EVENT_HOST_KLASS(k); assert(IS_EVENT_HOST_KLASS(k), "invariant"); } diff --git a/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceIdBits.inline.hpp b/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceIdBits.inline.hpp index 0b939d63f78ce..7c4ff9c2a409f 100644 --- a/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceIdBits.inline.hpp +++ b/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceIdBits.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -40,7 +40,7 @@ const int meta_offset = low_offset - 1; #endif inline jbyte* low_addr(jbyte* addr) { - assert(addr != NULL, "invariant"); + assert(addr != nullptr, "invariant"); return addr + low_offset; } @@ -49,7 +49,7 @@ inline jbyte* low_addr(traceid* addr) { } inline jbyte* meta_addr(jbyte* addr) { - assert(addr != NULL, "invariant"); + assert(addr != nullptr, "invariant"); return addr + meta_offset; } @@ -59,25 +59,25 @@ inline jbyte* meta_addr(traceid* addr) { template inline jbyte* traceid_tag_byte(const T* ptr) { - assert(ptr != NULL, "invariant"); + assert(ptr != nullptr, "invariant"); return low_addr(ptr->trace_id_addr()); } template <> inline jbyte* traceid_tag_byte(const Method* ptr) { - assert(ptr != NULL, "invariant"); + assert(ptr != nullptr, "invariant"); return ptr->trace_flags_addr(); } template inline jbyte* traceid_meta_byte(const T* ptr) { - assert(ptr != NULL, "invariant"); + assert(ptr != nullptr, "invariant"); return meta_addr(ptr->trace_id_addr()); } template <> inline jbyte* traceid_meta_byte(const Method* ptr) { - assert(ptr != NULL, "invariant"); + assert(ptr != nullptr, "invariant"); return ptr->trace_meta_addr(); } @@ -95,14 +95,14 @@ inline jbyte traceid_xor(jbyte bits, jbyte current) { template inline void set_form(jbyte bits, jbyte* dest) { - assert(dest != NULL, "invariant"); + assert(dest != nullptr, "invariant"); *dest = op(bits, *dest); OrderAccess::storestore(); } template inline void set_cas_form(jbyte bits, jbyte volatile* dest) { - assert(dest != NULL, "invariant"); + assert(dest != nullptr, "invariant"); do { const jbyte current = *dest; const jbyte new_value = op(bits, current); @@ -114,24 +114,24 @@ inline void set_cas_form(jbyte bits, jbyte volatile* dest) { template inline void JfrTraceIdBits::cas(jbyte bits, const T* ptr) { - assert(ptr != NULL, "invariant"); + assert(ptr != nullptr, "invariant"); set_cas_form(bits, traceid_tag_byte(ptr)); } template inline traceid JfrTraceIdBits::load(const T* ptr) { - assert(ptr != NULL, "invariant"); + assert(ptr != nullptr, "invariant"); return ptr->trace_id(); } inline void set(jbyte bits, jbyte* dest) { - assert(dest != NULL, "invariant"); + assert(dest != nullptr, "invariant"); set_form(bits, dest); } template inline void JfrTraceIdBits::store(jbyte bits, const T* ptr) { - assert(ptr != NULL, "invariant"); + assert(ptr != nullptr, "invariant"); // gcc12 warns "writing 1 byte into a region of size 0" when T == Klass. // The warning seems to be a false positive. And there is no warning for // other types that use the same mechanisms. The warning also sometimes @@ -145,7 +145,7 @@ inline void JfrTraceIdBits::store(jbyte bits, const T* ptr) { template inline void JfrTraceIdBits::meta_store(jbyte bits, const T* ptr) { - assert(ptr != NULL, "invariant"); + assert(ptr != nullptr, "invariant"); set(bits, traceid_meta_byte(ptr)); } @@ -155,13 +155,13 @@ inline void set_mask(jbyte mask, jbyte* dest) { template inline void JfrTraceIdBits::mask_store(jbyte mask, const T* ptr) { - assert(ptr != NULL, "invariant"); + assert(ptr != nullptr, "invariant"); set_mask(mask, traceid_tag_byte(ptr)); } template inline void JfrTraceIdBits::meta_mask_store(jbyte mask, const T* ptr) { - assert(ptr != NULL, "invariant"); + assert(ptr != nullptr, "invariant"); set_mask(mask, traceid_meta_byte(ptr)); } @@ -171,7 +171,7 @@ inline void clear_bits(jbyte bits, jbyte* dest) { template inline void JfrTraceIdBits::clear(jbyte bits, const T* ptr) { - assert(ptr != NULL, "invariant"); + assert(ptr != nullptr, "invariant"); clear_bits(bits, traceid_tag_byte(ptr)); } @@ -181,13 +181,13 @@ inline void clear_bits_cas(jbyte bits, jbyte* dest) { template inline void JfrTraceIdBits::clear_cas(jbyte bits, const T* ptr) { - assert(ptr != NULL, "invariant"); + assert(ptr != nullptr, "invariant"); clear_bits_cas(bits, traceid_tag_byte(ptr)); } template inline void JfrTraceIdBits::meta_clear(jbyte bits, const T* ptr) { - assert(ptr != NULL, "invariant"); + assert(ptr != nullptr, "invariant"); clear_bits(bits, traceid_meta_byte(ptr)); } diff --git a/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceIdKlassQueue.cpp b/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceIdKlassQueue.cpp index 4e0368fec878c..c76048218e162 100644 --- a/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceIdKlassQueue.cpp +++ b/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceIdKlassQueue.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -80,7 +80,7 @@ static bool can_compress_element(traceid id) { } static size_t element_size(const Klass* klass) { - assert(klass != NULL, "invariant"); + assert(klass != nullptr, "invariant"); return element_size(can_compress_element(JfrTraceId::load_raw(klass))); } @@ -117,7 +117,7 @@ static traceid read_uncompressed_element(const u1* pos, const Klass** klass) { } static traceid read_element(const u1* pos, const Klass** klass, bool compressed) { - assert(pos != NULL, "invariant"); + assert(pos != nullptr, "invariant"); return compressed ? read_compressed_element(pos, klass) : read_uncompressed_element(pos, klass); } @@ -143,8 +143,8 @@ static void store_uncompressed_element(traceid id, const Klass* klass, u1* pos) } static void store_element(const Klass* klass, u1* pos) { - assert(pos != NULL, "invariant"); - assert(klass != NULL, "invariant"); + assert(pos != nullptr, "invariant"); + assert(klass != nullptr, "invariant"); const traceid id = JfrTraceId::load_raw(klass); if (can_compress_element(id)) { store_compressed_element(id, klass, pos); @@ -171,7 +171,7 @@ static bool _clear = false; template size_t JfrEpochQueueKlassPolicy::operator()(const u1* pos, KlassFunctor& callback, bool previous_epoch) { - assert(pos != NULL, "invariant"); + assert(pos != nullptr, "invariant"); const bool compressed = is_compressed(pos); const size_t size = ::element_size(compressed); if (_clear || is_unloaded(pos)) { @@ -184,35 +184,35 @@ size_t JfrEpochQueueKlassPolicy::operator()(const u1* pos, KlassFunctor& set_unloaded(pos); return size; } - assert(klass != NULL, "invariant"); + assert(klass != nullptr, "invariant"); callback(const_cast(klass)); return size; } template void JfrEpochQueueKlassPolicy::store_element(const Klass* klass, Buffer* buffer) { - assert(klass != NULL, "invariant"); - assert(buffer != NULL, "invariant"); + assert(klass != nullptr, "invariant"); + assert(buffer != nullptr, "invariant"); assert(buffer->free_size() >= ::element_size(klass), "invariant"); ::store_element(klass, buffer->pos()); } template inline size_t JfrEpochQueueKlassPolicy::element_size(const Klass* klass) { - assert(klass != NULL, "invariant"); + assert(klass != nullptr, "invariant"); return ::element_size(klass); } template inline Buffer* JfrEpochQueueKlassPolicy::thread_local_storage(Thread* thread) const { - assert(thread != NULL, "invariant"); + assert(thread != nullptr, "invariant"); JfrThreadLocal* tl = thread->jfr_thread_local(); return JfrTraceIdEpoch::epoch() ? tl->_load_barrier_buffer_epoch_1 : tl->_load_barrier_buffer_epoch_0; } template inline void JfrEpochQueueKlassPolicy::set_thread_local_storage(Buffer* buffer, Thread* thread) { - assert(thread != NULL, "invariant"); + assert(thread != nullptr, "invariant"); JfrThreadLocal* tl = thread->jfr_thread_local(); if (JfrTraceIdEpoch::epoch()) { tl->_load_barrier_buffer_epoch_1 = buffer; @@ -228,22 +228,22 @@ JfrTraceIdKlassQueue::~JfrTraceIdKlassQueue() { } bool JfrTraceIdKlassQueue::initialize(size_t min_elem_size, size_t free_list_cache_count_limit, size_t cache_prealloc_count) { - assert(_queue == NULL, "invariant"); + assert(_queue == nullptr, "invariant"); _queue = new JfrEpochQueue(); - return _queue != NULL && _queue->initialize(min_elem_size, free_list_cache_count_limit, cache_prealloc_count); + return _queue != nullptr && _queue->initialize(min_elem_size, free_list_cache_count_limit, cache_prealloc_count); } void JfrTraceIdKlassQueue::clear() { - if (_queue != NULL) { + if (_queue != nullptr) { _clear = true; - KlassFunctor functor(NULL); + KlassFunctor functor(nullptr); _queue->iterate(functor, true); _clear = false; } } void JfrTraceIdKlassQueue::enqueue(const Klass* klass) { - assert(klass != NULL, "invariant"); + assert(klass != nullptr, "invariant"); _queue->enqueue(klass); } diff --git a/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceIdLoadBarrier.inline.hpp b/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceIdLoadBarrier.inline.hpp index 78ad76269cefc..cc2bb2ab5cd72 100644 --- a/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceIdLoadBarrier.inline.hpp +++ b/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceIdLoadBarrier.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -45,19 +45,19 @@ inline bool is_not_tagged(traceid value) { template inline bool should_tag(const T* t) { - assert(t != NULL, "invariant"); + assert(t != nullptr, "invariant"); return is_not_tagged(TRACE_ID_RAW(t)); } template <> inline bool should_tag(const Method* method) { - assert(method != NULL, "invariant"); + assert(method != nullptr, "invariant"); return is_not_tagged((traceid)method->trace_flags()); } template inline traceid set_used_and_get(const T* type) { - assert(type != NULL, "invariant"); + assert(type != nullptr, "invariant"); if (should_tag(type)) { SET_USED_THIS_EPOCH(type); JfrTraceIdEpoch::set_changed_tag_state(); @@ -73,7 +73,7 @@ inline void JfrTraceIdLoadBarrier::load_barrier(const Klass* klass) { } inline traceid JfrTraceIdLoadBarrier::load(const Klass* klass) { - assert(klass != NULL, "invariant"); + assert(klass != nullptr, "invariant"); if (should_tag(klass)) { load_barrier(klass); } @@ -86,8 +86,8 @@ inline traceid JfrTraceIdLoadBarrier::load(const Method* method) { } inline traceid JfrTraceIdLoadBarrier::load(const Klass* klass, const Method* method) { - assert(klass != NULL, "invariant"); - assert(method != NULL, "invariant"); + assert(klass != nullptr, "invariant"); + assert(method != nullptr, "invariant"); if (should_tag(method)) { SET_METHOD_AND_CLASS_USED_THIS_EPOCH(klass); SET_METHOD_FLAG_USED_THIS_EPOCH(method); @@ -108,7 +108,7 @@ inline traceid JfrTraceIdLoadBarrier::load(const PackageEntry* package) { } inline traceid JfrTraceIdLoadBarrier::load(const ClassLoaderData* cld) { - assert(cld != NULL, "invariant"); + assert(cld != nullptr, "invariant"); if (cld->has_class_mirror_holder()) { return 0; } @@ -120,9 +120,9 @@ inline traceid JfrTraceIdLoadBarrier::load(const ClassLoaderData* cld) { } inline traceid JfrTraceIdLoadBarrier::load_leakp(const Klass* klass, const Method* method) { - assert(klass != NULL, "invariant"); + assert(klass != nullptr, "invariant"); assert(METHOD_AND_CLASS_USED_THIS_EPOCH(klass), "invariant"); - assert(method != NULL, "invariant"); + assert(method != nullptr, "invariant"); assert(klass == method->method_holder(), "invariant"); if (should_tag(method)) { // the method is already logically tagged, just like the klass, diff --git a/src/hotspot/share/jfr/recorder/jfrRecorder.cpp b/src/hotspot/share/jfr/recorder/jfrRecorder.cpp index f4ee1ea324409..1faf12300680d 100644 --- a/src/hotspot/share/jfr/recorder/jfrRecorder.cpp +++ b/src/hotspot/share/jfr/recorder/jfrRecorder.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -83,7 +83,7 @@ bool JfrRecorder::create_oop_storages() { bool JfrRecorder::on_create_vm_1() { if (!is_disabled()) { - if (FlightRecorder || StartFlightRecording != NULL) { + if (FlightRecorder || StartFlightRecording != nullptr) { enable(); } } @@ -94,16 +94,16 @@ bool JfrRecorder::on_create_vm_1() { return JfrTime::initialize(); } -static GrowableArray* dcmd_recordings_array = NULL; +static GrowableArray* dcmd_recordings_array = nullptr; static void release_recordings() { - if (dcmd_recordings_array != NULL) { + if (dcmd_recordings_array != nullptr) { const int length = dcmd_recordings_array->length(); for (int i = 0; i < length; ++i) { delete dcmd_recordings_array->at(i); } delete dcmd_recordings_array; - dcmd_recordings_array = NULL; + dcmd_recordings_array = nullptr; } } @@ -114,8 +114,8 @@ static void teardown_startup_support() { // Parsing options here to detect errors as soon as possible static bool parse_recording_options(const char* options, JfrStartFlightRecordingDCmd* dcmd_recording, TRAPS) { - assert(options != NULL, "invariant"); - assert(dcmd_recording != NULL, "invariant"); + assert(options != nullptr, "invariant"); + assert(dcmd_recording != nullptr, "invariant"); CmdLine cmdline(options, strlen(options), true); dcmd_recording->parse(&cmdline, ',', THREAD); if (HAS_PENDING_EXCEPTION) { @@ -128,17 +128,17 @@ static bool parse_recording_options(const char* options, JfrStartFlightRecording static bool validate_recording_options(TRAPS) { const GrowableArray* options = JfrOptionSet::start_flight_recording_options(); - if (options == NULL) { + if (options == nullptr) { return true; } const int length = options->length(); assert(length >= 1, "invariant"); - assert(dcmd_recordings_array == NULL, "invariant"); + assert(dcmd_recordings_array == nullptr, "invariant"); dcmd_recordings_array = new (mtTracing) GrowableArray(length, mtTracing); - assert(dcmd_recordings_array != NULL, "invariant"); + assert(dcmd_recordings_array != nullptr, "invariant"); for (int i = 0; i < length; ++i) { JfrStartFlightRecordingDCmd* const dcmd_recording = new (mtTracing) JfrStartFlightRecordingDCmd(tty, true); - assert(dcmd_recording != NULL, "invariant"); + assert(dcmd_recording != nullptr, "invariant"); dcmd_recordings_array->append(dcmd_recording); if (!parse_recording_options(options->at(i), dcmd_recording, THREAD)) { return false; @@ -148,7 +148,7 @@ static bool validate_recording_options(TRAPS) { } static bool launch_recording(JfrStartFlightRecordingDCmd* dcmd_recording, TRAPS) { - assert(dcmd_recording != NULL, "invariant"); + assert(dcmd_recording != nullptr, "invariant"); log_trace(jfr, system)("Starting a recording"); dcmd_recording->execute(DCmd_Source_Internal, THREAD); if (HAS_PENDING_EXCEPTION) { @@ -162,7 +162,7 @@ static bool launch_recording(JfrStartFlightRecordingDCmd* dcmd_recording, TRAPS) static bool launch_command_line_recordings(TRAPS) { bool result = true; - if (dcmd_recordings_array != NULL) { + if (dcmd_recordings_array != nullptr) { const int length = dcmd_recordings_array->length(); assert(length >= 1, "invariant"); for (int i = 0; i < length; ++i) { @@ -185,7 +185,7 @@ static void log_jdk_jfr_module_resolution_error(TRAPS) { static bool is_cds_dump_requested() { // we will not be able to launch recordings on startup if a cds dump is being requested - if (Arguments::is_dumping_archive() && JfrOptionSet::start_flight_recording_options() != NULL) { + if (Arguments::is_dumping_archive() && JfrOptionSet::start_flight_recording_options() != nullptr) { warning("JFR will be disabled during CDS dumping"); teardown_startup_support(); return true; @@ -300,14 +300,14 @@ bool JfrRecorder::create_components() { } // subsystems -static JfrPostBox* _post_box = NULL; -static JfrStorage* _storage = NULL; -static JfrCheckpointManager* _checkpoint_manager = NULL; -static JfrRepository* _repository = NULL; +static JfrPostBox* _post_box = nullptr; +static JfrStorage* _storage = nullptr; +static JfrCheckpointManager* _checkpoint_manager = nullptr; +static JfrRepository* _repository = nullptr; static JfrStackTraceRepository* _stack_trace_repository; -static JfrStringPool* _stringpool = NULL; -static JfrOSInterface* _os_interface = NULL; -static JfrThreadSampling* _thread_sampling = NULL; +static JfrStringPool* _stringpool = nullptr; +static JfrOSInterface* _os_interface = nullptr; +static JfrThreadSampling* _thread_sampling = nullptr; bool JfrRecorder::create_java_event_writer() { return JfrJavaEventWriter::initialize(); @@ -318,55 +318,55 @@ bool JfrRecorder::create_jvmti_agent() { } bool JfrRecorder::create_post_box() { - assert(_post_box == NULL, "invariant"); + assert(_post_box == nullptr, "invariant"); _post_box = JfrPostBox::create(); - return _post_box != NULL; + return _post_box != nullptr; } bool JfrRecorder::create_chunk_repository() { - assert(_repository == NULL, "invariant"); - assert(_post_box != NULL, "invariant"); + assert(_repository == nullptr, "invariant"); + assert(_post_box != nullptr, "invariant"); _repository = JfrRepository::create(*_post_box); - return _repository != NULL && _repository->initialize(); + return _repository != nullptr && _repository->initialize(); } bool JfrRecorder::create_os_interface() { - assert(_os_interface == NULL, "invariant"); + assert(_os_interface == nullptr, "invariant"); _os_interface = JfrOSInterface::create(); - return _os_interface != NULL && _os_interface->initialize(); + return _os_interface != nullptr && _os_interface->initialize(); } bool JfrRecorder::create_storage() { - assert(_repository != NULL, "invariant"); - assert(_post_box != NULL, "invariant"); + assert(_repository != nullptr, "invariant"); + assert(_post_box != nullptr, "invariant"); _storage = JfrStorage::create(_repository->chunkwriter(), *_post_box); - return _storage != NULL && _storage->initialize(); + return _storage != nullptr && _storage->initialize(); } bool JfrRecorder::create_checkpoint_manager() { - assert(_checkpoint_manager == NULL, "invariant"); - assert(_repository != NULL, "invariant"); + assert(_checkpoint_manager == nullptr, "invariant"); + assert(_repository != nullptr, "invariant"); _checkpoint_manager = JfrCheckpointManager::create(_repository->chunkwriter()); - return _checkpoint_manager != NULL && _checkpoint_manager->initialize(); + return _checkpoint_manager != nullptr && _checkpoint_manager->initialize(); } bool JfrRecorder::create_stacktrace_repository() { - assert(_stack_trace_repository == NULL, "invariant"); + assert(_stack_trace_repository == nullptr, "invariant"); _stack_trace_repository = JfrStackTraceRepository::create(); - return _stack_trace_repository != NULL && _stack_trace_repository->initialize(); + return _stack_trace_repository != nullptr && _stack_trace_repository->initialize(); } bool JfrRecorder::create_stringpool() { - assert(_stringpool == NULL, "invariant"); - assert(_repository != NULL, "invariant"); + assert(_stringpool == nullptr, "invariant"); + assert(_repository != nullptr, "invariant"); _stringpool = JfrStringPool::create(_repository->chunkwriter()); - return _stringpool != NULL && _stringpool->initialize(); + return _stringpool != nullptr && _stringpool->initialize(); } bool JfrRecorder::create_thread_sampling() { - assert(_thread_sampling == NULL, "invariant"); + assert(_thread_sampling == nullptr, "invariant"); _thread_sampling = JfrThreadSampling::create(); - return _thread_sampling != NULL; + return _thread_sampling != nullptr; } bool JfrRecorder::create_event_throttler() { @@ -375,37 +375,37 @@ bool JfrRecorder::create_event_throttler() { void JfrRecorder::destroy_components() { JfrJvmtiAgent::destroy(); - if (_post_box != NULL) { + if (_post_box != nullptr) { JfrPostBox::destroy(); - _post_box = NULL; + _post_box = nullptr; } - if (_repository != NULL) { + if (_repository != nullptr) { JfrRepository::destroy(); - _repository = NULL; + _repository = nullptr; } - if (_storage != NULL) { + if (_storage != nullptr) { JfrStorage::destroy(); - _storage = NULL; + _storage = nullptr; } - if (_checkpoint_manager != NULL) { + if (_checkpoint_manager != nullptr) { JfrCheckpointManager::destroy(); - _checkpoint_manager = NULL; + _checkpoint_manager = nullptr; } - if (_stack_trace_repository != NULL) { + if (_stack_trace_repository != nullptr) { JfrStackTraceRepository::destroy(); - _stack_trace_repository = NULL; + _stack_trace_repository = nullptr; } - if (_stringpool != NULL) { + if (_stringpool != nullptr) { JfrStringPool::destroy(); - _stringpool = NULL; + _stringpool = nullptr; } - if (_os_interface != NULL) { + if (_os_interface != nullptr) { JfrOSInterface::destroy(); - _os_interface = NULL; + _os_interface = nullptr; } - if (_thread_sampling != NULL) { + if (_thread_sampling != nullptr) { JfrThreadSampling::destroy(); - _thread_sampling = NULL; + _thread_sampling = nullptr; } JfrEventThrottler::destroy(); } diff --git a/src/hotspot/share/jfr/recorder/repository/jfrChunk.cpp b/src/hotspot/share/jfr/recorder/repository/jfrChunk.cpp index 5703830c0afd8..6d0ec7773b931 100644 --- a/src/hotspot/share/jfr/recorder/repository/jfrChunk.cpp +++ b/src/hotspot/share/jfr/recorder/repository/jfrChunk.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -58,7 +58,7 @@ static jlong ticks_now() { } JfrChunk::JfrChunk() : - _path(NULL), + _path(nullptr), _start_ticks(0), _previous_start_ticks(invalid_time), _start_nanos(0), @@ -74,9 +74,9 @@ JfrChunk::~JfrChunk() { } void JfrChunk::reset() { - if (_path != NULL) { + if (_path != nullptr) { JfrCHeapObj::free(_path, strlen(_path) + 1); - _path = NULL; + _path = nullptr; } _last_checkpoint_offset = _last_metadata_offset = 0; _generation = 1; @@ -180,7 +180,7 @@ int64_t JfrChunk::last_chunk_duration() const { } static char* copy_path(const char* path) { - assert(path != NULL, "invariant"); + assert(path != nullptr, "invariant"); const size_t path_len = strlen(path); char* new_path = JfrCHeapObj::new_array(path_len + 1); strncpy(new_path, path, path_len + 1); @@ -188,11 +188,11 @@ static char* copy_path(const char* path) { } void JfrChunk::set_path(const char* path) { - if (_path != NULL) { + if (_path != nullptr) { JfrCHeapObj::free(_path, strlen(_path) + 1); - _path = NULL; + _path = nullptr; } - if (path != NULL) { + if (path != nullptr) { _path = copy_path(path); } } diff --git a/src/hotspot/share/jfr/recorder/repository/jfrChunkRotation.cpp b/src/hotspot/share/jfr/recorder/repository/jfrChunkRotation.cpp index 7153fe74bd72a..2940126374117 100644 --- a/src/hotspot/share/jfr/recorder/repository/jfrChunkRotation.cpp +++ b/src/hotspot/share/jfr/recorder/repository/jfrChunkRotation.cpp @@ -30,12 +30,12 @@ #include "runtime/handles.inline.hpp" #include "runtime/interfaceSupport.inline.hpp" -static jobject chunk_monitor = NULL; +static jobject chunk_monitor = nullptr; static int64_t threshold = 0; static bool rotate = false; static jobject install_chunk_monitor(JavaThread* thread) { - assert(chunk_monitor == NULL, "invariant"); + assert(chunk_monitor == nullptr, "invariant"); // read static field HandleMark hm(thread); static const char klass[] = "jdk/jfr/internal/JVM"; @@ -50,7 +50,7 @@ static jobject install_chunk_monitor(JavaThread* thread) { // lazy install static jobject get_chunk_monitor(JavaThread* thread) { - return chunk_monitor != NULL ? chunk_monitor : install_chunk_monitor(thread); + return chunk_monitor != nullptr ? chunk_monitor : install_chunk_monitor(thread); } static void notify() { diff --git a/src/hotspot/share/jfr/recorder/repository/jfrChunkWriter.cpp b/src/hotspot/share/jfr/recorder/repository/jfrChunkWriter.cpp index 2f0781cdff2d2..6de882b1165f6 100644 --- a/src/hotspot/share/jfr/recorder/repository/jfrChunkWriter.cpp +++ b/src/hotspot/share/jfr/recorder/repository/jfrChunkWriter.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -46,12 +46,12 @@ static const int64_t FLAG_OFFSET = GENERATION_OFFSET + 2; static const int64_t HEADER_SIZE = FLAG_OFFSET + 2; static fio_fd open_chunk(const char* path) { - return path != NULL ? os::open(path, O_CREAT | O_RDWR, S_IREAD | S_IWRITE) : invalid_fd; + return path != nullptr ? os::open(path, O_CREAT | O_RDWR, S_IREAD | S_IWRITE) : invalid_fd; } #ifdef ASSERT static void assert_writer_position(JfrChunkWriter* writer, int64_t offset) { - assert(writer != NULL, "invariant"); + assert(writer != nullptr, "invariant"); assert(offset == writer->current_offset(), "invariant"); } #endif @@ -133,7 +133,7 @@ class JfrChunkHeadWriter : public StackObj { void flush(int64_t size, bool finalize) { assert(_writer->is_valid(), "invariant"); - assert(_chunk != NULL, "invariant"); + assert(_chunk != nullptr, "invariant"); DEBUG_ONLY(assert_writer_position(_writer, SIZE_OFFSET);) write_size_to_generation(size, finalize); write_flags(); @@ -142,7 +142,7 @@ class JfrChunkHeadWriter : public StackObj { void initialize() { assert(_writer->is_valid(), "invariant"); - assert(_chunk != NULL, "invariant"); + assert(_chunk != nullptr, "invariant"); DEBUG_ONLY(assert_writer_position(_writer, 0);) write_magic(); write_version(); @@ -153,9 +153,9 @@ class JfrChunkHeadWriter : public StackObj { } JfrChunkHeadWriter(JfrChunkWriter* writer, int64_t offset, bool guard = true) : _writer(writer), _chunk(writer->_chunk) { - assert(_writer != NULL, "invariant"); + assert(_writer != nullptr, "invariant"); assert(_writer->is_valid(), "invariant"); - assert(_chunk != NULL, "invariant"); + assert(_chunk != nullptr, "invariant"); if (0 == _writer->current_offset()) { assert(HEADER_SIZE == offset, "invariant"); initialize(); @@ -213,12 +213,12 @@ int64_t JfrChunkWriter::write_chunk_header_checkpoint(bool flushpoint) { } void JfrChunkWriter::mark_chunk_final() { - assert(_chunk != NULL, "invariant"); + assert(_chunk != nullptr, "invariant"); _chunk->mark_final(); } int64_t JfrChunkWriter::flush_chunk(bool flushpoint) { - assert(_chunk != NULL, "invariant"); + assert(_chunk != nullptr, "invariant"); const int64_t sz_written = write_chunk_header_checkpoint(flushpoint); assert(size_written() == sz_written, "invariant"); JfrChunkHeadWriter head(this, SIZE_OFFSET); @@ -226,20 +226,20 @@ int64_t JfrChunkWriter::flush_chunk(bool flushpoint) { return sz_written; } -JfrChunkWriter::JfrChunkWriter() : JfrChunkWriterBase(NULL), _chunk(new JfrChunk()) {} +JfrChunkWriter::JfrChunkWriter() : JfrChunkWriterBase(nullptr), _chunk(new JfrChunk()) {} JfrChunkWriter::~JfrChunkWriter() { - assert(_chunk != NULL, "invariant"); + assert(_chunk != nullptr, "invariant"); delete _chunk; } void JfrChunkWriter::set_path(const char* path) { - assert(_chunk != NULL, "invariant"); + assert(_chunk != nullptr, "invariant"); _chunk->set_path(path); } void JfrChunkWriter::set_time_stamp() { - assert(_chunk != NULL, "invariant"); + assert(_chunk != nullptr, "invariant"); _chunk->set_time_stamp(); } @@ -248,32 +248,32 @@ int64_t JfrChunkWriter::size_written() const { } int64_t JfrChunkWriter::last_checkpoint_offset() const { - assert(_chunk != NULL, "invariant"); + assert(_chunk != nullptr, "invariant"); return _chunk->last_checkpoint_offset(); } int64_t JfrChunkWriter::current_chunk_start_nanos() const { - assert(_chunk != NULL, "invariant"); + assert(_chunk != nullptr, "invariant"); return _chunk->start_nanos(); } void JfrChunkWriter::set_last_checkpoint_offset(int64_t offset) { - assert(_chunk != NULL, "invariant"); + assert(_chunk != nullptr, "invariant"); _chunk->set_last_checkpoint_offset(offset); } void JfrChunkWriter::set_last_metadata_offset(int64_t offset) { - assert(_chunk != NULL, "invariant"); + assert(_chunk != nullptr, "invariant"); _chunk->set_last_metadata_offset(offset); } bool JfrChunkWriter::has_metadata() const { - assert(_chunk != NULL, "invariant"); + assert(_chunk != nullptr, "invariant"); return _chunk->has_metadata(); } bool JfrChunkWriter::open() { - assert(_chunk != NULL, "invariant"); + assert(_chunk != nullptr, "invariant"); JfrChunkWriterBase::reset(open_chunk(_chunk->path())); const bool is_open = this->has_valid_fd(); if (is_open) { diff --git a/src/hotspot/share/jfr/recorder/repository/jfrEmergencyDump.cpp b/src/hotspot/share/jfr/recorder/repository/jfrEmergencyDump.cpp index c93971f1e1616..7b146a2f93c72 100644 --- a/src/hotspot/share/jfr/recorder/repository/jfrEmergencyDump.cpp +++ b/src/hotspot/share/jfr/recorder/repository/jfrEmergencyDump.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -70,7 +70,7 @@ static bool is_path_empty() { static size_t get_dump_directory() { const char* dump_path = JfrEmergencyDump::get_dump_path(); if (*dump_path == '\0') { - if (os::get_current_directory(_path_buffer, sizeof(_path_buffer)) == NULL) { + if (os::get_current_directory(_path_buffer, sizeof(_path_buffer)) == nullptr) { return 0; } } else { @@ -85,7 +85,7 @@ static size_t get_dump_directory() { } static fio_fd open_exclusivly(const char* path) { - assert((path != NULL) && (*path != '\0'), "invariant"); + assert((path != nullptr) && (*path != '\0'), "invariant"); return os::open(path, O_CREAT | O_RDWR, S_IREAD | S_IWRITE); } @@ -94,7 +94,7 @@ static bool is_emergency_dump_file_open() { } static bool open_emergency_dump_fd(const char* path) { - if (path == NULL) { + if (path == nullptr) { return false; } assert(emergency_fd == invalid_fd, "invariant"); @@ -113,9 +113,9 @@ static const char* create_emergency_dump_path() { const size_t path_len = get_dump_directory(); if (path_len == 0) { - return NULL; + return nullptr; } - const char* filename_fmt = NULL; + const char* filename_fmt = nullptr; // fetch specific error cause switch (JfrJavaSupport::cause()) { case JfrJavaSupport::OUT_OF_MEMORY: @@ -128,7 +128,7 @@ static const char* create_emergency_dump_path() { filename_fmt = vm_error_filename_fmt; } const bool result = Arguments::copy_expand_pid(filename_fmt, strlen(filename_fmt), _path_buffer + path_len, JVM_MAXPATHLEN - path_len); - return result ? _path_buffer : NULL; + return result ? _path_buffer : nullptr; } static bool open_emergency_dump_file() { @@ -149,12 +149,12 @@ static bool open_emergency_dump_file() { } static void report(outputStream* st, bool emergency_file_opened, const char* repository_path) { - assert(st != NULL, "invariant"); + assert(st != nullptr, "invariant"); if (emergency_file_opened) { st->print_raw("# JFR recording file will be written. Location: "); st->print_raw_cr(_path_buffer); st->print_raw_cr("#"); - } else if (repository_path != NULL) { + } else if (repository_path != nullptr) { st->print_raw("# The JFR repository may contain useful JFR files. Location: "); st->print_raw_cr(repository_path); st->print_raw_cr("#"); @@ -166,7 +166,7 @@ static void report(outputStream* st, bool emergency_file_opened, const char* rep } void JfrEmergencyDump::set_dump_path(const char* path) { - if (path == NULL || *path == '\0') { + if (path == nullptr || *path == '\0') { os::get_current_directory(_dump_path, sizeof(_dump_path)); } else { if (strlen(path) < JVM_MAXPATHLEN) { @@ -181,24 +181,24 @@ const char* JfrEmergencyDump::get_dump_path() { } void JfrEmergencyDump::on_vm_error_report(outputStream* st, const char* repository_path) { - assert(st != NULL, "invariant"); + assert(st != nullptr, "invariant"); Thread* thread = Thread::current_or_null_safe(); - if (thread != NULL) { + if (thread != nullptr) { report(st, open_emergency_dump_file(), repository_path); - } else if (repository_path != NULL) { + } else if (repository_path != nullptr) { // a non-attached thread will not be able to write anything later report(st, false, repository_path); } } static int file_sort(const char** const file1, const char** file2) { - assert(NULL != *file1 && NULL != *file2, "invariant"); + assert(nullptr != *file1 && nullptr != *file2, "invariant"); int cmp = strncmp(*file1, *file2, iso8601_len); if (0 == cmp) { const char* const dot1 = strchr(*file1, '.'); - assert(NULL != dot1, "invariant"); + assert(nullptr != dot1, "invariant"); const char* const dot2 = strchr(*file2, '.'); - assert(NULL != dot2, "invariant"); + assert(nullptr != dot2, "invariant"); ptrdiff_t file1_len = dot1 - *file1; ptrdiff_t file2_len = dot2 - *file2; if (file1_len < file2_len) { @@ -215,7 +215,7 @@ static int file_sort(const char** const file1, const char** file2) { } static void iso8601_to_date_time(char* iso8601_str) { - assert(iso8601_str != NULL, "invariant"); + assert(iso8601_str != nullptr, "invariant"); assert(strlen(iso8601_str) == iso8601_len, "invariant"); // "YYYY-MM-DDTHH:MM:SS" for (size_t i = 0; i < iso8601_len; ++i) { @@ -231,7 +231,7 @@ static void iso8601_to_date_time(char* iso8601_str) { } static void date_time(char* buffer, size_t buffer_len) { - assert(buffer != NULL, "invariant"); + assert(buffer != nullptr, "invariant"); assert(buffer_len >= iso8601_len, "buffer too small"); os::iso8601_time(buffer, buffer_len); assert(strlen(buffer) >= iso8601_len + 1, "invariant"); @@ -264,7 +264,7 @@ class RepositoryIterator : public StackObj { // append the file_name at the _path_buffer_file_name_offset position const char* RepositoryIterator::fully_qualified(const char* file_name) const { - assert(NULL != file_name, "invariant"); + assert(nullptr != file_name, "invariant"); assert(!is_path_empty(), "invariant"); assert(_path_buffer_file_name_offset != 0, "invariant"); @@ -272,13 +272,13 @@ const char* RepositoryIterator::fully_qualified(const char* file_name) const { sizeof(_path_buffer) - _path_buffer_file_name_offset, "%s", file_name); - return result != -1 ? _path_buffer : NULL; + return result != -1 ? _path_buffer : nullptr; } // caller responsible for deallocation const char* RepositoryIterator::filter(const char* file_name) const { - if (file_name == NULL) { - return NULL; + if (file_name == nullptr) { + return nullptr; } const size_t len = strlen(file_name); if ((len < chunk_file_extension_length) || @@ -286,36 +286,36 @@ const char* RepositoryIterator::filter(const char* file_name) const { chunk_file_jfr_ext, chunk_file_extension_length) != 0)) { // not a .jfr file - return NULL; + return nullptr; } const char* fqn = fully_qualified(file_name); - if (fqn == NULL) { - return NULL; + if (fqn == nullptr) { + return nullptr; } const fio_fd fd = open_exclusivly(fqn); if (invalid_fd == fd) { - return NULL; + return nullptr; } const int64_t size = file_size(fd); ::close(fd); if (size <= chunk_file_header_size) { - return NULL; + return nullptr; } char* const file_name_copy = (char*)os::malloc(len + 1, mtTracing); - if (file_name_copy == NULL) { + if (file_name_copy == nullptr) { log_error(jfr, system)("Unable to malloc memory during jfr emergency dump"); - return NULL; + return nullptr; } strncpy(file_name_copy, file_name, len + 1); return file_name_copy; } RepositoryIterator::RepositoryIterator(const char* repository_path) : - _file_names(NULL), + _file_names(nullptr), _path_buffer_file_name_offset(0), _iterator(0) { DIR* dirp = os::opendir(repository_path); - if (dirp == NULL) { + if (dirp == nullptr) { log_error(jfr, system)("Unable to open repository %s", repository_path); return; } @@ -329,15 +329,15 @@ RepositoryIterator::RepositoryIterator(const char* repository_path) : return; } _file_names = new (mtTracing) GrowableArray(10, mtTracing); - if (_file_names == NULL) { + if (_file_names == nullptr) { log_error(jfr, system)("Unable to malloc memory during jfr emergency dump"); return; } // iterate files in the repository and append filtered file names to the files array struct dirent* dentry; - while ((dentry = os::readdir(dirp)) != NULL) { + while ((dentry = os::readdir(dirp)) != nullptr) { const char* file_name = filter(dentry->d_name); - if (file_name != NULL) { + if (file_name != nullptr) { _file_names->append(file_name); } } @@ -348,7 +348,7 @@ RepositoryIterator::RepositoryIterator(const char* repository_path) : } RepositoryIterator::~RepositoryIterator() { - if (_file_names != NULL) { + if (_file_names != nullptr) { for (int i = 0; i < _file_names->length(); ++i) { os::free(const_cast(_file_names->at(i))); } @@ -357,11 +357,11 @@ RepositoryIterator::~RepositoryIterator() { } bool RepositoryIterator::has_next() const { - return _file_names != NULL && _iterator < _file_names->length(); + return _file_names != nullptr && _iterator < _file_names->length(); } const char* RepositoryIterator::next() const { - return _iterator >= _file_names->length() ? NULL : fully_qualified(_file_names->at(_iterator++)); + return _iterator >= _file_names->length() ? nullptr : fully_qualified(_file_names->at(_iterator++)); } static void write_repository_files(const RepositoryIterator& iterator, char* const copy_block, size_t block_size) { @@ -369,7 +369,7 @@ static void write_repository_files(const RepositoryIterator& iterator, char* con while (iterator.has_next()) { fio_fd current_fd = invalid_fd; const char* const fqn = iterator.next(); - assert(fqn != NULL, "invariant"); + assert(fqn != nullptr, "invariant"); current_fd = open_exclusivly(fqn); if (current_fd != invalid_fd) { const int64_t size = file_size(current_fd); @@ -396,7 +396,7 @@ static void write_repository_files(const RepositoryIterator& iterator, char* con static void write_emergency_dump_file(const RepositoryIterator& iterator) { static const size_t block_size = 1 * M; // 1 mb char* const copy_block = (char*)os::malloc(block_size, mtTracing); - if (copy_block == NULL) { + if (copy_block == nullptr) { log_error(jfr, system)("Unable to malloc memory during jfr emergency dump"); log_error(jfr, system)("Unable to write jfr emergency dump file"); } else { @@ -406,7 +406,7 @@ static void write_emergency_dump_file(const RepositoryIterator& iterator) { } void JfrEmergencyDump::on_vm_error(const char* repository_path) { - assert(repository_path != NULL, "invariant"); + assert(repository_path != nullptr, "invariant"); if (open_emergency_dump_file()) { RepositoryIterator iterator(repository_path); write_emergency_dump_file(iterator); @@ -426,13 +426,13 @@ static const char* create_emergency_chunk_path(const char* repository_path) { os::file_separator(), date_time_buffer, chunk_file_jfr_ext); - return result == -1 ? NULL : _path_buffer; + return result == -1 ? nullptr : _path_buffer; } const char* JfrEmergencyDump::chunk_path(const char* repository_path) { - if (repository_path == NULL) { + if (repository_path == nullptr) { if (!open_emergency_dump_file()) { - return NULL; + return nullptr; } // We can directly use the emergency dump file name as the chunk. // The chunk writer will open its own fd so we close this descriptor. @@ -454,7 +454,7 @@ const char* JfrEmergencyDump::chunk_path(const char* repository_path) { * */ static bool prepare_for_emergency_dump(Thread* thread) { - assert(thread != NULL, "invariant"); + assert(thread != nullptr, "invariant"); if (thread->is_Watcher_thread()) { // need WatcherThread as a safeguard against potential deadlocks return false; @@ -462,7 +462,7 @@ static bool prepare_for_emergency_dump(Thread* thread) { #ifdef ASSERT Mutex* owned_lock = thread->owned_locks(); - while (owned_lock != NULL) { + while (owned_lock != nullptr) { Mutex* next = owned_lock->next(); owned_lock->unlock(); owned_lock = next; @@ -531,9 +531,9 @@ class JavaThreadInVMAndNative : public StackObj { JavaThreadState _original_state; public: - JavaThreadInVMAndNative(Thread* t) : _jt(t->is_Java_thread() ? JavaThread::cast(t) : NULL), + JavaThreadInVMAndNative(Thread* t) : _jt(t->is_Java_thread() ? JavaThread::cast(t) : nullptr), _original_state(_thread_max_state) { - if (_jt != NULL) { + if (_jt != nullptr) { _original_state = _jt->thread_state(); if (_original_state != _thread_in_vm) { _jt->set_thread_state(_thread_in_vm); @@ -548,7 +548,7 @@ class JavaThreadInVMAndNative : public StackObj { } void transition_to_native() { - if (_jt != NULL) { + if (_jt != nullptr) { assert(_jt->thread_state() == _thread_in_vm, "invariant"); _jt->set_thread_state(_thread_in_native); } @@ -575,7 +575,7 @@ void JfrEmergencyDump::on_vm_shutdown(bool exception_handler) { return; } Thread* thread = Thread::current_or_null_safe(); - if (thread == NULL) { + if (thread == nullptr) { return; } // Ensure a JavaThread is _thread_in_vm when we make this call diff --git a/src/hotspot/share/jfr/recorder/repository/jfrRepository.cpp b/src/hotspot/share/jfr/recorder/repository/jfrRepository.cpp index 1bfc1a0242add..76ae5bdba660b 100644 --- a/src/hotspot/share/jfr/recorder/repository/jfrRepository.cpp +++ b/src/hotspot/share/jfr/recorder/repository/jfrRepository.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -36,53 +36,53 @@ #include "runtime/mutex.hpp" #include "runtime/os.hpp" -static JfrRepository* _instance = NULL; +static JfrRepository* _instance = nullptr; JfrRepository& JfrRepository::instance() { return *_instance; } -static JfrChunkWriter* _chunkwriter = NULL; +static JfrChunkWriter* _chunkwriter = nullptr; JfrChunkWriter& JfrRepository::chunkwriter() { return *_chunkwriter; } -JfrRepository::JfrRepository(JfrPostBox& post_box) : _path(NULL), _post_box(post_box) {} +JfrRepository::JfrRepository(JfrPostBox& post_box) : _path(nullptr), _post_box(post_box) {} bool JfrRepository::initialize() { - assert(_chunkwriter == NULL, "invariant"); + assert(_chunkwriter == nullptr, "invariant"); _chunkwriter = new JfrChunkWriter(); - return _chunkwriter != NULL; + return _chunkwriter != nullptr; } JfrRepository::~JfrRepository() { - if (_path != NULL) { + if (_path != nullptr) { JfrCHeapObj::free(_path, strlen(_path) + 1); - _path = NULL; + _path = nullptr; } - if (_chunkwriter != NULL) { + if (_chunkwriter != nullptr) { delete _chunkwriter; - _chunkwriter = NULL; + _chunkwriter = nullptr; } } JfrRepository* JfrRepository::create(JfrPostBox& post_box) { - assert(_instance == NULL, "invariant"); + assert(_instance == nullptr, "invariant"); _instance = new JfrRepository(post_box); return _instance; } void JfrRepository::destroy() { - assert(_instance != NULL, "invariant"); + assert(_instance != nullptr, "invariant"); delete _instance; - _instance = NULL; + _instance = nullptr; } void JfrRepository::on_vm_error() { - if (_path == NULL) { + if (_path == nullptr) { // completed already return; } @@ -94,14 +94,14 @@ void JfrRepository::on_vm_error_report(outputStream* st) { } bool JfrRepository::set_path(const char* path) { - assert(path != NULL, "trying to set the repository path with a NULL string!"); - if (_path != NULL) { + assert(path != nullptr, "trying to set the repository path with a null string!"); + if (_path != nullptr) { // delete existing JfrCHeapObj::free(_path, strlen(_path) + 1); } const size_t path_len = strlen(path); _path = JfrCHeapObj::new_array(path_len + 1); - if (_path == NULL) { + if (_path == nullptr) { return false; } strncpy(_path, path, path_len + 1); @@ -143,8 +143,8 @@ void JfrRepository::set_chunk_path(jstring path, JavaThread* jt) { DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(jt)); ResourceMark rm(jt); const char* const canonical_chunk_path = JfrJavaSupport::c_str(path, jt); - if (NULL == canonical_chunk_path && !_chunkwriter->is_valid()) { - // new output is NULL and current output is NULL + if (nullptr == canonical_chunk_path && !_chunkwriter->is_valid()) { + // new output is nullptr and current output is null return; } instance().set_chunk_path(canonical_chunk_path); @@ -155,7 +155,7 @@ void JfrRepository::set_path(jstring location, JavaThread* jt) { DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(jt)); ResourceMark rm(jt); const char* const path = JfrJavaSupport::c_str(location, jt); - if (path != NULL) { + if (path != nullptr) { instance().set_path(path); } } diff --git a/src/hotspot/share/jfr/recorder/service/jfrEventThrottler.cpp b/src/hotspot/share/jfr/recorder/service/jfrEventThrottler.cpp index e30bdc36e4882..bbca7755836f1 100644 --- a/src/hotspot/share/jfr/recorder/service/jfrEventThrottler.cpp +++ b/src/hotspot/share/jfr/recorder/service/jfrEventThrottler.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2020, Datadog, Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -35,7 +35,7 @@ constexpr static const JfrSamplerParams _disabled_params = { false // reconfigure }; -static JfrEventThrottler* _throttler = NULL; +static JfrEventThrottler* _throttler = nullptr; JfrEventThrottler::JfrEventThrottler(JfrEventId event_id) : JfrAdaptiveSampler(), @@ -48,29 +48,29 @@ JfrEventThrottler::JfrEventThrottler(JfrEventId event_id) : _update(false) {} bool JfrEventThrottler::create() { - assert(_throttler == NULL, "invariant"); + assert(_throttler == nullptr, "invariant"); _throttler = new JfrEventThrottler(JfrObjectAllocationSampleEvent); - return _throttler != NULL && _throttler->initialize(); + return _throttler != nullptr && _throttler->initialize(); } void JfrEventThrottler::destroy() { delete _throttler; - _throttler = NULL; + _throttler = nullptr; } // There is currently only one throttler instance, for the jdk.ObjectAllocationSample event. // When introducing additional throttlers, also add a lookup map keyed by event id. JfrEventThrottler* JfrEventThrottler::for_event(JfrEventId event_id) { - assert(_throttler != NULL, "JfrEventThrottler has not been properly initialized"); + assert(_throttler != nullptr, "JfrEventThrottler has not been properly initialized"); assert(event_id == JfrObjectAllocationSampleEvent, "Event type has an unconfigured throttler"); - return event_id == JfrObjectAllocationSampleEvent ? _throttler : NULL; + return event_id == JfrObjectAllocationSampleEvent ? _throttler : nullptr; } void JfrEventThrottler::configure(JfrEventId event_id, int64_t sample_size, int64_t period_ms) { if (event_id != JfrObjectAllocationSampleEvent) { return; } - assert(_throttler != NULL, "JfrEventThrottler has not been properly initialized"); + assert(_throttler != nullptr, "JfrEventThrottler has not been properly initialized"); _throttler->configure(sample_size, period_ms); } @@ -93,7 +93,7 @@ void JfrEventThrottler::configure(int64_t sample_size, int64_t period_ms) { // Predicate for event selection. bool JfrEventThrottler::accept(JfrEventId event_id, int64_t timestamp /* 0 */) { JfrEventThrottler* const throttler = for_event(event_id); - if (throttler == NULL) return true; + if (throttler == nullptr) return true; return _throttler->_disabled ? true : _throttler->sample(timestamp); } @@ -168,8 +168,8 @@ inline void set_sample_points_and_window_duration(JfrSamplerParams& params, int6 * If the input event sample size is large enough, normalize to per 1000 ms */ inline void normalize(int64_t* sample_size, int64_t* period_ms) { - assert(sample_size != NULL, "invariant"); - assert(period_ms != NULL, "invariant"); + assert(sample_size != nullptr, "invariant"); + assert(period_ms != nullptr, "invariant"); if (*period_ms == MILLIUNITS) { return; } @@ -245,7 +245,7 @@ inline double compute_ewma_alpha_coefficient(size_t lookback_count) { * When introducing additional throttlers, also provide a map from the event id to the event name. */ static void log(const JfrSamplerWindow* expired, double* sample_size_ewma) { - assert(sample_size_ewma != NULL, "invariant"); + assert(sample_size_ewma != nullptr, "invariant"); if (log_is_enabled(Debug, jfr, system, throttle)) { *sample_size_ewma = exponentially_weighted_moving_average(expired->sample_size(), compute_ewma_alpha_coefficient(expired->params().window_lookback_count), *sample_size_ewma); log_debug(jfr, system, throttle)("jdk.ObjectAllocationSample: avg.sample size: %0.4f, window set point: %zu, sample size: %zu, population size: %zu, ratio: %.4f, window duration: %zu ms\n", @@ -266,7 +266,7 @@ static void log(const JfrSamplerWindow* expired, double* sample_size_ewma) { * in the process of rotating windows. */ const JfrSamplerParams& JfrEventThrottler::next_window_params(const JfrSamplerWindow* expired) { - assert(expired != NULL, "invariant"); + assert(expired != nullptr, "invariant"); assert(_lock, "invariant"); log(expired, &_sample_size_ewma); if (_update) { diff --git a/src/hotspot/share/jfr/recorder/service/jfrMemorySizer.cpp b/src/hotspot/share/jfr/recorder/service/jfrMemorySizer.cpp index 9551b84e9704a..af17659e2ec36 100644 --- a/src/hotspot/share/jfr/recorder/service/jfrMemorySizer.cpp +++ b/src/hotspot/share/jfr/recorder/service/jfrMemorySizer.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -339,7 +339,7 @@ static void assert_post_condition(const JfrMemoryOptions* options) { // MEMORY SIZING ALGORITHM bool JfrMemorySizer::adjust_options(JfrMemoryOptions* options) { - assert(options != NULL, "invariant"); + assert(options != nullptr, "invariant"); enum MemoryOptions { MEMORY_SIZE = 1, @@ -361,7 +361,7 @@ bool JfrMemorySizer::adjust_options(JfrMemoryOptions* options) { // // Unordered selection: // - // C(4, 0) = {} = NULL set = 1 + // C(4, 0) = {} = null set = 1 // C(4, 1) = { (M), (G), (C), (T) } = 4 // C(4, 2) = { (M, G), (M, C), (M, T), (G, C), (G, T), (C, T) } = 6 // C(4, 3) = { (M, G, C), (M, G, T), (M, C, T), (G, C, T) } = 4 diff --git a/src/hotspot/share/jfr/recorder/service/jfrOptionSet.cpp b/src/hotspot/share/jfr/recorder/service/jfrOptionSet.cpp index fbcd28bd56024..a077ec53c9004 100644 --- a/src/hotspot/share/jfr/recorder/service/jfrOptionSet.cpp +++ b/src/hotspot/share/jfr/recorder/service/jfrOptionSet.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -154,8 +154,8 @@ bool JfrOptionSet::allow_event_retransforms() { } // default options for the dcmd parser -const char* const default_repository = NULL; -const char* const default_dumppath = NULL; +const char* const default_repository = nullptr; +const char* const default_dumppath = nullptr; const char* const default_global_buffer_size = "512k"; const char* const default_num_global_buffers = "20"; const char* const default_memory_size = "10m"; @@ -281,7 +281,7 @@ static void register_parser_options() { } static bool parse_flight_recorder_options_internal(TRAPS) { - if (FlightRecorderOptions == NULL) { + if (FlightRecorderOptions == nullptr) { return true; } const size_t length = strlen((const char*)FlightRecorderOptions); @@ -292,14 +292,14 @@ static bool parse_flight_recorder_options_internal(TRAPS) { ObsoleteOption option = OBSOLETE_OPTIONS[index]; const char* p = strstr((const char*)FlightRecorderOptions, option.name); const size_t option_length = strlen(option.name); - if (p != NULL && p[option_length] == '=') { + if (p != nullptr && p[option_length] == '=') { log_error(arguments) ("-XX:FlightRecorderOptions=%s=... has been removed. %s", option.name, option.message); return false; } } ResourceMark rm(THREAD); oop message = java_lang_Throwable::message(PENDING_EXCEPTION); - if (message != NULL) { + if (message != nullptr) { const char* msg = java_lang_String::as_utf8_string(message); log_error(arguments) ("%s", msg); } @@ -336,7 +336,7 @@ bool JfrOptionSet::initialize(JavaThread* thread) { } bool JfrOptionSet::configure(TRAPS) { - if (FlightRecorderOptions == NULL) { + if (FlightRecorderOptions == nullptr) { return true; } ResourceMark rm(THREAD); @@ -345,10 +345,10 @@ bool JfrOptionSet::configure(TRAPS) { JfrConfigureFlightRecorderDCmd configure(&st, false); configure._repository_path.set_is_set(_dcmd_repository.is_set()); char* repo = _dcmd_repository.value(); - if (repo != NULL) { + if (repo != nullptr) { const size_t len = strlen(repo); char* repo_copy = JfrCHeapObj::new_array(len + 1); - if (NULL == repo_copy) { + if (nullptr == repo_copy) { return false; } strncpy(repo_copy, repo, len + 1); @@ -357,10 +357,10 @@ bool JfrOptionSet::configure(TRAPS) { configure._dump_path.set_is_set(_dcmd_dumppath.is_set()); char* dumppath = _dcmd_dumppath.value(); - if (dumppath != NULL) { + if (dumppath != nullptr) { const size_t len = strlen(dumppath); char* dumppath_copy = JfrCHeapObj::new_array(len + 1); - if (NULL == dumppath_copy) { + if (nullptr == dumppath_copy) { return false; } strncpy(dumppath_copy, dumppath, len + 1); @@ -743,9 +743,9 @@ bool JfrOptionSet::adjust_memory_options() { } bool JfrOptionSet::parse_flight_recorder_option(const JavaVMOption** option, char* delimiter) { - assert(option != NULL, "invariant"); - assert(delimiter != NULL, "invariant"); - assert((*option)->optionString != NULL, "invariant"); + assert(option != nullptr, "invariant"); + assert(delimiter != nullptr, "invariant"); + assert((*option)->optionString != nullptr, "invariant"); assert(strncmp((*option)->optionString, "-XX:FlightRecorderOptions", 25) == 0, "invariant"); if (*delimiter == '\0') { // -XX:FlightRecorderOptions without any delimiter and values @@ -757,14 +757,14 @@ bool JfrOptionSet::parse_flight_recorder_option(const JavaVMOption** option, cha return false; } -static GrowableArray* start_flight_recording_options_array = NULL; +static GrowableArray* start_flight_recording_options_array = nullptr; bool JfrOptionSet::parse_start_flight_recording_option(const JavaVMOption** option, char* delimiter) { - assert(option != NULL, "invariant"); - assert(delimiter != NULL, "invariant"); - assert((*option)->optionString != NULL, "invariant"); + assert(option != nullptr, "invariant"); + assert(delimiter != nullptr, "invariant"); + assert((*option)->optionString != nullptr, "invariant"); assert(strncmp((*option)->optionString, "-XX:StartFlightRecording", 24) == 0, "invariant"); - const char* value = NULL; + const char* value = nullptr; if (*delimiter == '\0') { // -XX:StartFlightRecording without any delimiter and values // Add dummy value "dumponexit=false" so -XX:StartFlightRecording can be used without explicit values. @@ -777,13 +777,13 @@ bool JfrOptionSet::parse_start_flight_recording_option(const JavaVMOption** opti *delimiter = '='; value = delimiter + 1; } - assert(value != NULL, "invariant"); + assert(value != nullptr, "invariant"); const size_t value_length = strlen(value); - if (start_flight_recording_options_array == NULL) { + if (start_flight_recording_options_array == nullptr) { start_flight_recording_options_array = new (mtTracing) GrowableArray(8, mtTracing); } - assert(start_flight_recording_options_array != NULL, "invariant"); + assert(start_flight_recording_options_array != nullptr, "invariant"); char* const startup_value = NEW_C_HEAP_ARRAY(char, value_length + 1, mtTracing); strncpy(startup_value, value, value_length + 1); assert(strncmp(startup_value, value, value_length) == 0, "invariant"); @@ -796,12 +796,12 @@ const GrowableArray* JfrOptionSet::start_flight_recording_options() } void JfrOptionSet::release_start_flight_recording_options() { - if (start_flight_recording_options_array != NULL) { + if (start_flight_recording_options_array != nullptr) { const int length = start_flight_recording_options_array->length(); for (int i = 0; i < length; ++i) { FREE_C_HEAP_ARRAY(char, start_flight_recording_options_array->at(i)); } delete start_flight_recording_options_array; - start_flight_recording_options_array = NULL; + start_flight_recording_options_array = nullptr; } } diff --git a/src/hotspot/share/jfr/recorder/service/jfrPostBox.cpp b/src/hotspot/share/jfr/recorder/service/jfrPostBox.cpp index c4cb226c31431..00ebd710d30c9 100644 --- a/src/hotspot/share/jfr/recorder/service/jfrPostBox.cpp +++ b/src/hotspot/share/jfr/recorder/service/jfrPostBox.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -38,22 +38,22 @@ (MSGBIT(MSG_FLUSHPOINT)) \ ) -static JfrPostBox* _instance = NULL; +static JfrPostBox* _instance = nullptr; JfrPostBox& JfrPostBox::instance() { return *_instance; } JfrPostBox* JfrPostBox::create() { - assert(_instance == NULL, "invariant"); + assert(_instance == nullptr, "invariant"); _instance = new JfrPostBox(); return _instance; } void JfrPostBox::destroy() { - assert(_instance != NULL, "invariant"); + assert(_instance != nullptr, "invariant"); delete _instance; - _instance = NULL; + _instance = nullptr; } JfrPostBox::JfrPostBox() : diff --git a/src/hotspot/share/jfr/recorder/service/jfrRecorderService.cpp b/src/hotspot/share/jfr/recorder/service/jfrRecorderService.cpp index 4d6e6c9e25aef..5302ae32f7a6e 100644 --- a/src/hotspot/share/jfr/recorder/service/jfrRecorderService.cpp +++ b/src/hotspot/share/jfr/recorder/service/jfrRecorderService.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -67,7 +67,7 @@ class JfrRotationLock : public StackObj { static bool acquire(Thread* thread) { if (Atomic::cmpxchg(&_lock, 0, 1) == 0) { - assert(_owner_thread == NULL, "invariant"); + assert(_owner_thread == nullptr, "invariant"); _owner_thread = thread; return true; } @@ -86,7 +86,7 @@ class JfrRotationLock : public StackObj { public: JfrRotationLock() : _thread(Thread::current()), _recursive(false) { - assert(_thread != NULL, "invariant"); + assert(_thread != nullptr, "invariant"); if (_thread == _owner_thread) { // Recursive case is not supported. _recursive = true; @@ -103,7 +103,7 @@ class JfrRotationLock : public StackObj { if (_recursive) { return; } - _owner_thread = NULL; + _owner_thread = nullptr; OrderAccess::storestore(); _lock = 0; } @@ -117,7 +117,7 @@ class JfrRotationLock : public StackObj { } }; -const Thread* JfrRotationLock::_owner_thread = NULL; +const Thread* JfrRotationLock::_owner_thread = nullptr; const int JfrRotationLock::retry_wait_millis = 10; volatile int JfrRotationLock::_lock = 0; @@ -589,13 +589,13 @@ void JfrRecorderService::post_safepoint_write() { } static JfrBuffer* thread_local_buffer(Thread* t) { - assert(t != NULL, "invariant"); + assert(t != nullptr, "invariant"); return t->jfr_thread_local()->native_buffer(); } static void reset_buffer(JfrBuffer* buffer, Thread* t) { - assert(buffer != NULL, "invariant"); - assert(t != NULL, "invariant"); + assert(buffer != nullptr, "invariant"); + assert(t != nullptr, "invariant"); assert(buffer == thread_local_buffer(t), "invariant"); buffer->set_pos(const_cast(buffer->top())); } @@ -606,7 +606,7 @@ static void reset_thread_local_buffer(Thread* t) { static void write_thread_local_buffer(JfrChunkWriter& chunkwriter, Thread* t) { JfrBuffer * const buffer = thread_local_buffer(t); - assert(buffer != NULL, "invariant"); + assert(buffer != nullptr, "invariant"); if (!buffer->empty()) { chunkwriter.write_unbuffered(buffer->top(), buffer->pos() - buffer->top()); } diff --git a/src/hotspot/share/jfr/recorder/service/jfrRecorderThread.cpp b/src/hotspot/share/jfr/recorder/service/jfrRecorderThread.cpp index 25baddc937230..e1cb8e889d33e 100644 --- a/src/hotspot/share/jfr/recorder/service/jfrRecorderThread.cpp +++ b/src/hotspot/share/jfr/recorder/service/jfrRecorderThread.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -39,23 +39,23 @@ static Thread* start_thread(instanceHandle thread_oop, ThreadFunction proc, TRAPS) { assert(thread_oop.not_null(), "invariant"); - assert(proc != NULL, "invariant"); + assert(proc != nullptr, "invariant"); JavaThread* new_thread = new JavaThread(proc); // At this point it may be possible that no // osthread was created for the JavaThread due to lack of resources. - if (new_thread->osthread() == NULL) { + if (new_thread->osthread() == nullptr) { delete new_thread; JfrJavaSupport::throw_out_of_memory_error("Unable to create native recording thread for JFR", THREAD); - return NULL; + return nullptr; } else { JavaThread::start_internal_daemon(THREAD, new_thread, thread_oop, NormPriority); return new_thread; } } -JfrPostBox* JfrRecorderThread::_post_box = NULL; +JfrPostBox* JfrRecorderThread::_post_box = nullptr; JfrPostBox& JfrRecorderThread::post_box() { return *_post_box; @@ -65,8 +65,8 @@ JfrPostBox& JfrRecorderThread::post_box() { void recorderthread_entry(JavaThread*, JavaThread*); bool JfrRecorderThread::start(JfrCheckpointManager* cp_manager, JfrPostBox* post_box, TRAPS) { - assert(cp_manager != NULL, "invariant"); - assert(post_box != NULL, "invariant"); + assert(cp_manager != nullptr, "invariant"); + assert(post_box != nullptr, "invariant"); _post_box = post_box; static const char klass[] = "jdk/jfr/internal/JVMUpcalls"; diff --git a/src/hotspot/share/jfr/recorder/service/jfrRecorderThreadLoop.cpp b/src/hotspot/share/jfr/recorder/service/jfrRecorderThreadLoop.cpp index b46bae370e3a4..9742ae986de4f 100644 --- a/src/hotspot/share/jfr/recorder/service/jfrRecorderThreadLoop.cpp +++ b/src/hotspot/share/jfr/recorder/service/jfrRecorderThreadLoop.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -40,7 +40,7 @@ // The recorder thread executes service requests collected from the message system. // void recorderthread_entry(JavaThread* thread, JavaThread* unused) { - assert(thread != NULL, "invariant"); + assert(thread != nullptr, "invariant"); #define START (msgs & (MSGBIT(MSG_START))) #define SHUTDOWN (msgs & MSGBIT(MSG_SHUTDOWN)) #define ROTATE (msgs & (MSGBIT(MSG_ROTATE)|MSGBIT(MSG_STOP))) diff --git a/src/hotspot/share/jfr/recorder/stacktrace/jfrStackTrace.cpp b/src/hotspot/share/jfr/recorder/stacktrace/jfrStackTrace.cpp index b82bde135dea7..f4e7c620862a2 100644 --- a/src/hotspot/share/jfr/recorder/stacktrace/jfrStackTrace.cpp +++ b/src/hotspot/share/jfr/recorder/stacktrace/jfrStackTrace.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -38,8 +38,8 @@ #include "runtime/vframe.inline.hpp" static void copy_frames(JfrStackFrame** lhs_frames, u4 length, const JfrStackFrame* rhs_frames) { - assert(lhs_frames != NULL, "invariant"); - assert(rhs_frames != NULL, "invariant"); + assert(lhs_frames != nullptr, "invariant"); + assert(rhs_frames != nullptr, "invariant"); if (length > 0) { *lhs_frames = NEW_C_HEAP_ARRAY(JfrStackFrame, length, mtTracing); memcpy(*lhs_frames, rhs_frames, length * sizeof(JfrStackFrame)); @@ -53,7 +53,7 @@ JfrStackFrame::JfrStackFrame(const traceid& id, int bci, int type, int lineno, c _klass(ik), _methodid(id), _line(lineno), _bci(bci), _type(type) {} JfrStackTrace::JfrStackTrace(JfrStackFrame* frames, u4 max_frames) : - _next(NULL), + _next(nullptr), _frames(frames), _id(0), _hash(0), @@ -66,7 +66,7 @@ JfrStackTrace::JfrStackTrace(JfrStackFrame* frames, u4 max_frames) : JfrStackTrace::JfrStackTrace(traceid id, const JfrStackTrace& trace, const JfrStackTrace* next) : _next(next), - _frames(NULL), + _frames(nullptr), _id(id), _hash(trace._hash), _nr_of_frames(trace._nr_of_frames), @@ -230,7 +230,7 @@ static inline bool is_full(const JfrBuffer* enqueue_buffer) { } bool JfrStackTrace::record_async(JavaThread* jt, const frame& frame) { - assert(jt != NULL, "invariant"); + assert(jt != nullptr, "invariant"); assert(!_lineno, "invariant"); Thread* current_thread = Thread::current(); assert(jt != current_thread, "invariant"); @@ -283,7 +283,7 @@ bool JfrStackTrace::record_async(JavaThread* jt, const frame& frame) { } bool JfrStackTrace::record(JavaThread* jt, const frame& frame, int skip) { - assert(jt != NULL, "invariant"); + assert(jt != nullptr, "invariant"); assert(jt == Thread::current(), "invariant"); assert(!_lineno, "invariant"); // Must use ResetNoHandleMark here to bypass if any NoHandleMark exist on stack. @@ -333,7 +333,7 @@ bool JfrStackTrace::record(JavaThread* jt, const frame& frame, int skip) { } bool JfrStackTrace::record(JavaThread* current_thread, int skip) { - assert(current_thread != NULL, "invariant"); + assert(current_thread != nullptr, "invariant"); assert(current_thread == Thread::current(), "invariant"); if (!current_thread->has_last_Java_frame()) { return false; @@ -345,7 +345,7 @@ void JfrStackFrame::resolve_lineno() const { assert(_klass, "no klass pointer"); assert(_line == 0, "already have linenumber"); const Method* const method = JfrMethodLookup::lookup(_klass, _methodid); - assert(method != NULL, "invariant"); + assert(method != nullptr, "invariant"); assert(method->method_holder() == _klass, "invariant"); _line = method->line_number_from_bci(_bci); } diff --git a/src/hotspot/share/jfr/recorder/stacktrace/jfrStackTraceRepository.cpp b/src/hotspot/share/jfr/recorder/stacktrace/jfrStackTraceRepository.cpp index 162a65376e394..f4f9628d60ae5 100644 --- a/src/hotspot/share/jfr/recorder/stacktrace/jfrStackTraceRepository.cpp +++ b/src/hotspot/share/jfr/recorder/stacktrace/jfrStackTraceRepository.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -37,17 +37,17 @@ * which is a decision postponed and taken during rotation. */ -static JfrStackTraceRepository* _instance = NULL; -static JfrStackTraceRepository* _leak_profiler_instance = NULL; +static JfrStackTraceRepository* _instance = nullptr; +static JfrStackTraceRepository* _leak_profiler_instance = nullptr; static traceid _next_id = 0; JfrStackTraceRepository& JfrStackTraceRepository::instance() { - assert(_instance != NULL, "invariant"); + assert(_instance != nullptr, "invariant"); return *_instance; } static JfrStackTraceRepository& leak_profiler_instance() { - assert(_leak_profiler_instance != NULL, "invariant"); + assert(_leak_profiler_instance != nullptr, "invariant"); return *_leak_profiler_instance; } @@ -56,11 +56,11 @@ JfrStackTraceRepository::JfrStackTraceRepository() : _last_entries(0), _entries( } JfrStackTraceRepository* JfrStackTraceRepository::create() { - assert(_instance == NULL, "invariant"); - assert(_leak_profiler_instance == NULL, "invariant"); + assert(_instance == nullptr, "invariant"); + assert(_leak_profiler_instance == nullptr, "invariant"); _leak_profiler_instance = new JfrStackTraceRepository(); - if (_leak_profiler_instance == NULL) { - return NULL; + if (_leak_profiler_instance == nullptr) { + return nullptr; } _instance = new JfrStackTraceRepository(); return _instance; @@ -86,11 +86,11 @@ bool JfrStackTraceRepository::initialize() { } void JfrStackTraceRepository::destroy() { - assert(_instance != NULL, "invarinat"); + assert(_instance != nullptr, "invarinat"); delete _instance; - _instance = NULL; + _instance = nullptr; delete _leak_profiler_instance; - _leak_profiler_instance = NULL; + _leak_profiler_instance = nullptr; } bool JfrStackTraceRepository::is_modified() const { @@ -106,7 +106,7 @@ size_t JfrStackTraceRepository::write(JfrChunkWriter& sw, bool clear) { int count = 0; for (u4 i = 0; i < TABLE_SIZE; ++i) { JfrStackTrace* stacktrace = _table[i]; - while (stacktrace != NULL) { + while (stacktrace != nullptr) { JfrStackTrace* next = const_cast(stacktrace->next()); if (stacktrace->should_write()) { stacktrace->write(sw); @@ -133,7 +133,7 @@ size_t JfrStackTraceRepository::clear(JfrStackTraceRepository& repo) { } for (u4 i = 0; i < TABLE_SIZE; ++i) { JfrStackTrace* stacktrace = repo._table[i]; - while (stacktrace != NULL) { + while (stacktrace != nullptr) { JfrStackTrace* next = const_cast(stacktrace->next()); delete stacktrace; stacktrace = next; @@ -149,7 +149,7 @@ size_t JfrStackTraceRepository::clear(JfrStackTraceRepository& repo) { traceid JfrStackTraceRepository::record(Thread* current_thread, int skip /* 0 */) { assert(current_thread == Thread::current(), "invariant"); JfrThreadLocal* const tl = current_thread->jfr_thread_local(); - assert(tl != NULL, "invariant"); + assert(tl != nullptr, "invariant"); if (tl->has_cached_stack_trace()) { return tl->cached_stack_trace_id(); } @@ -157,11 +157,11 @@ traceid JfrStackTraceRepository::record(Thread* current_thread, int skip /* 0 */ return 0; } JfrStackFrame* frames = tl->stackframes(); - if (frames == NULL) { + if (frames == nullptr) { // pending oom return 0; } - assert(frames != NULL, "invariant"); + assert(frames != nullptr, "invariant"); assert(tl->stackframes() == frames, "invariant"); return instance().record(JavaThread::cast(current_thread), skip, frames, tl->stackdepth()); } @@ -185,10 +185,10 @@ traceid JfrStackTraceRepository::add(const JfrStackTrace& stacktrace) { } void JfrStackTraceRepository::record_for_leak_profiler(JavaThread* current_thread, int skip /* 0 */) { - assert(current_thread != NULL, "invariant"); + assert(current_thread != nullptr, "invariant"); assert(current_thread == Thread::current(), "invariant"); JfrThreadLocal* const tl = current_thread->jfr_thread_local(); - assert(tl != NULL, "invariant"); + assert(tl != nullptr, "invariant"); assert(!tl->has_cached_stack_trace(), "invariant"); JfrStackTrace stacktrace(tl->stackframes(), tl->stackdepth()); stacktrace.record(current_thread, skip); @@ -204,7 +204,7 @@ traceid JfrStackTraceRepository::add_trace(const JfrStackTrace& stacktrace) { const size_t index = stacktrace._hash % TABLE_SIZE; const JfrStackTrace* table_entry = _table[index]; - while (table_entry != NULL) { + while (table_entry != nullptr) { if (table_entry->equals(stacktrace)) { return table_entry->id(); } @@ -225,10 +225,10 @@ traceid JfrStackTraceRepository::add_trace(const JfrStackTrace& stacktrace) { const JfrStackTrace* JfrStackTraceRepository::lookup_for_leak_profiler(unsigned int hash, traceid id) { const size_t index = (hash % TABLE_SIZE); const JfrStackTrace* trace = leak_profiler_instance()._table[index]; - while (trace != NULL && trace->id() != id) { + while (trace != nullptr && trace->id() != id) { trace = trace->next(); } - assert(trace != NULL, "invariant"); + assert(trace != nullptr, "invariant"); assert(trace->hash() == hash, "invariant"); assert(trace->id() == id, "invariant"); return trace; diff --git a/src/hotspot/share/jfr/recorder/storage/jfrBuffer.cpp b/src/hotspot/share/jfr/recorder/storage/jfrBuffer.cpp index 6d8c86a0c65c6..e89147b97df74 100644 --- a/src/hotspot/share/jfr/recorder/storage/jfrBuffer.cpp +++ b/src/hotspot/share/jfr/recorder/storage/jfrBuffer.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,12 +26,12 @@ #include "jfr/recorder/storage/jfrBuffer.hpp" #include "runtime/javaThread.hpp" -static const u1* const TOP_CRITICAL_SECTION = NULL; +static const u1* const TOP_CRITICAL_SECTION = nullptr; -JfrBuffer::JfrBuffer() : _next(NULL), - _identity(NULL), - _pos(NULL), - _top(NULL), +JfrBuffer::JfrBuffer() : _next(nullptr), + _identity(nullptr), + _pos(nullptr), + _top(nullptr), _size(0), _header_size(0), _flags(0), @@ -39,8 +39,8 @@ JfrBuffer::JfrBuffer() : _next(NULL), LP64_ONLY(COMMA _pad(0)) {} void JfrBuffer::initialize(size_t header_size, size_t size) { - assert(_next == NULL, "invariant"); - assert(_identity == NULL, "invariant"); + assert(_next == nullptr, "invariant"); + assert(_identity == nullptr, "invariant"); assert(header_size <= max_jushort, "invariant"); _header_size = static_cast(header_size); _size = size; @@ -102,34 +102,34 @@ bool JfrBuffer::acquired_by_self() const { } void JfrBuffer::acquire(const void* id) { - assert(id != NULL, "invariant"); + assert(id != nullptr, "invariant"); const void* current_id; do { current_id = identity(); - } while (current_id != NULL || Atomic::cmpxchg(&_identity, current_id, id) != current_id); + } while (current_id != nullptr || Atomic::cmpxchg(&_identity, current_id, id) != current_id); } bool JfrBuffer::try_acquire(const void* id) { - assert(id != NULL, "invariant"); + assert(id != nullptr, "invariant"); const void* const current_id = identity(); - return current_id == NULL && Atomic::cmpxchg(&_identity, current_id, id) == current_id; + return current_id == nullptr && Atomic::cmpxchg(&_identity, current_id, id) == current_id; } void JfrBuffer::set_identity(const void* id) { - assert(id != NULL, "invariant"); - assert(_identity == NULL, "invariant"); + assert(id != nullptr, "invariant"); + assert(_identity == nullptr, "invariant"); OrderAccess::storestore(); _identity = id; } void JfrBuffer::release() { - assert(identity() != NULL, "invariant"); - Atomic::release_store(&_identity, (const void*)NULL); + assert(identity() != nullptr, "invariant"); + Atomic::release_store(&_identity, (const void*)nullptr); } #ifdef ASSERT static bool validate_to(const JfrBuffer* const to, size_t size) { - assert(to != NULL, "invariant"); + assert(to != nullptr, "invariant"); assert(to->acquired_by_self(), "invariant"); assert(to->free_size() >= size, "invariant"); return true; @@ -178,18 +178,18 @@ enum FLAG { }; inline u1 load(const volatile u1* dest) { - assert(dest != NULL, "invariant"); + assert(dest != nullptr, "invariant"); return Atomic::load_acquire(dest); } inline void set(u1* dest, u1 data) { - assert(dest != NULL, "invariant"); + assert(dest != nullptr, "invariant"); OrderAccess::storestore(); *dest |= data; } inline void clear(u1* dest, u1 data) { - assert(dest != NULL, "invariant"); + assert(dest != nullptr, "invariant"); OrderAccess::storestore(); *dest ^= data; } diff --git a/src/hotspot/share/jfr/recorder/storage/jfrEpochStorage.inline.hpp b/src/hotspot/share/jfr/recorder/storage/jfrEpochStorage.inline.hpp index ff5c6c3ac2bde..ae52f20436d9b 100644 --- a/src/hotspot/share/jfr/recorder/storage/jfrEpochStorage.inline.hpp +++ b/src/hotspot/share/jfr/recorder/storage/jfrEpochStorage.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -34,7 +34,7 @@ #include "logging/log.hpp" template class RetrievalPolicy, bool EagerReclaim> -JfrEpochStorageHost::JfrEpochStorageHost() : _mspace(NULL) {} +JfrEpochStorageHost::JfrEpochStorageHost() : _mspace(nullptr) {} template class RetrievalPolicy, bool EagerReclaim> JfrEpochStorageHost::~JfrEpochStorageHost() { @@ -43,17 +43,17 @@ JfrEpochStorageHost::~JfrEpochStorageHo template class RetrievalPolicy, bool EagerReclaim> bool JfrEpochStorageHost::initialize(size_t min_elem_size, size_t free_list_cache_count_limit, size_t cache_prealloc_count) { - assert(_mspace == NULL, "invariant"); + assert(_mspace == nullptr, "invariant"); _mspace = new EpochMspace(min_elem_size, free_list_cache_count_limit, this); - return _mspace != NULL && _mspace->initialize(cache_prealloc_count); + return _mspace != nullptr && _mspace->initialize(cache_prealloc_count); } template class RetrievalPolicy, bool EagerReclaim> inline NodeType* JfrEpochStorageHost::acquire(size_t size, Thread* thread) { BufferPtr buffer = mspace_acquire_to_live_list(size, _mspace, thread); - if (buffer == NULL) { + if (buffer == nullptr) { log_warning(jfr)("Unable to allocate " SIZE_FORMAT " bytes of %s.", _mspace->min_element_size(), "epoch storage"); - return NULL; + return nullptr; } assert(buffer->acquired_by_self(), "invariant"); return buffer; @@ -61,7 +61,7 @@ inline NodeType* JfrEpochStorageHost::a template class RetrievalPolicy, bool EagerReclaim> void JfrEpochStorageHost::release(NodeType* buffer) { - assert(buffer != NULL, "invariant"); + assert(buffer != nullptr, "invariant"); buffer->set_retired(); } @@ -101,7 +101,7 @@ class EmptyVerifier { typedef typename Mspace::NodePtr NodePtr; EmptyVerifier(Mspace* mspace) : _mspace(mspace) {} bool process(NodePtr node) { - assert(node != NULL, "invariant"); + assert(node != nullptr, "invariant"); assert(node->empty(), "invariant"); return true; } diff --git a/src/hotspot/share/jfr/recorder/storage/jfrFullStorage.inline.hpp b/src/hotspot/share/jfr/recorder/storage/jfrFullStorage.inline.hpp index 61b629d37bf25..620c68cd5cc89 100644 --- a/src/hotspot/share/jfr/recorder/storage/jfrFullStorage.inline.hpp +++ b/src/hotspot/share/jfr/recorder/storage/jfrFullStorage.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -32,7 +32,7 @@ template class NodeType, typename AllocPolicy> JfrFullStorage -::JfrFullStorage(JfrStorageControl& control) : _control(control), _free_node_list(NULL), _queue(NULL) {} +::JfrFullStorage(JfrStorageControl& control) : _control(control), _free_node_list(nullptr), _queue(nullptr) {} template class NodeType, typename AllocPolicy> JfrFullStorage::~JfrFullStorage() { @@ -52,21 +52,21 @@ JfrFullStorage::~JfrFullStorage() { template class NodeType, typename AllocPolicy> bool JfrFullStorage::initialize(size_t free_list_prealloc_count) { - assert(_free_node_list == NULL, "invariant"); + assert(_free_node_list == nullptr, "invariant"); _free_node_list = new JfrConcurrentQueue(); - if (_free_node_list == NULL || !_free_node_list->initialize()) { + if (_free_node_list == nullptr || !_free_node_list->initialize()) { return false; } for (size_t i = 0; i < free_list_prealloc_count; ++i) { NodePtr node = new Node(); - if (node == NULL) { + if (node == nullptr) { return false; } _free_node_list->add(node); } - assert(_queue == NULL, "invariant"); + assert(_queue == nullptr, "invariant"); _queue = new JfrConcurrentQueue(); - return _queue != NULL && _queue->initialize(); + return _queue != nullptr && _queue->initialize(); } template class NodeType, typename AllocPolicy> @@ -83,21 +83,21 @@ template class NodeType, typename Alloc inline typename JfrFullStorage::NodePtr JfrFullStorage::acquire() { NodePtr node = _free_node_list->remove(); - return node != NULL ? node : new Node(); + return node != nullptr ? node : new Node(); } template class NodeType, typename AllocPolicy> inline void JfrFullStorage ::release(typename JfrFullStorage::NodePtr node) { - assert(node != NULL, "invariant"); + assert(node != nullptr, "invariant"); _free_node_list->add(node); } template class NodeType, typename AllocPolicy> inline bool JfrFullStorage::add(ValueType value) { - assert(value != NULL, "invariant"); + assert(value != nullptr, "invariant"); NodePtr node = acquire(); - assert(node != NULL, "invariant"); + assert(node != nullptr, "invariant"); node->set_value(value); const bool notify = _control.increment_full(); _queue->add(node); @@ -106,9 +106,9 @@ inline bool JfrFullStorage::add(ValueType valu template class NodeType, typename AllocPolicy> inline ValueType JfrFullStorage::remove() { - Value value = NULL; + Value value = nullptr; NodePtr node = _queue->remove(); - if (node != NULL) { + if (node != nullptr) { _control.decrement_full(); value = node->value(); release(node); diff --git a/src/hotspot/share/jfr/recorder/storage/jfrMemorySpace.inline.hpp b/src/hotspot/share/jfr/recorder/storage/jfrMemorySpace.inline.hpp index 2e33958ace5ce..09a452caaa589 100644 --- a/src/hotspot/share/jfr/recorder/storage/jfrMemorySpace.inline.hpp +++ b/src/hotspot/share/jfr/recorder/storage/jfrMemorySpace.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -63,7 +63,7 @@ bool JfrMemorySpace class RetrievalPolicy, typename F inline typename FreeListType::NodePtr JfrMemorySpace::allocate(size_t size) { const size_t aligned_size_bytes = align_allocation_size(size, _min_element_size); if (aligned_size_bytes == 0) { - return NULL; + return nullptr; } void* const allocation = JfrCHeapObj::new_array(aligned_size_bytes + sizeof(Node)); - if (allocation == NULL) { - return NULL; + if (allocation == nullptr) { + return nullptr; } NodePtr node = new (allocation) Node(); - assert(node != NULL, "invariant"); + assert(node != nullptr, "invariant"); node->initialize(sizeof(Node), aligned_size_bytes); return node; } template class RetrievalPolicy, typename FreeListType, typename FullListType, bool epoch_aware> inline void JfrMemorySpace::deallocate(typename FreeListType::NodePtr node) { - assert(node != NULL, "invariant"); + assert(node != nullptr, "invariant"); assert(!in_free_list(node), "invariant"); assert(!_live_list_epoch_0.in_list(node), "invariant"); assert(!_live_list_epoch_1.in_list(node), "invariant"); - assert(node != NULL, "invariant"); + assert(node != nullptr, "invariant"); JfrCHeapObj::free(node, node->total_size()); } @@ -247,14 +247,14 @@ inline typename FreeListType::NodePtr JfrMemorySpace class RetrievalPolicy, typename FreeListType, typename FullListType, bool epoch_aware> inline void JfrMemorySpace::release(typename FreeListType::NodePtr node) { - assert(node != NULL, "invariant"); + assert(node != nullptr, "invariant"); if (node->transient()) { deallocate(node); return; } assert(node->empty(), "invariant"); assert(!node->retired(), "invariant"); - assert(node->identity() == NULL, "invariant"); + assert(node->identity() == nullptr, "invariant"); if (should_populate_free_list_cache()) { add_to_free_list(node); } else { @@ -264,7 +264,7 @@ inline void JfrMemorySpace class RetrievalPolicy, typename FreeListType, typename FullListType, bool epoch_aware> inline void JfrMemorySpace::add_to_free_list(typename FreeListType::NodePtr node) { - assert(node != NULL, "invariant"); + assert(node != nullptr, "invariant"); _free_list.add(node); if (is_free_list_cache_limited()) { Atomic::inc(&_free_list_cache_count); @@ -273,7 +273,7 @@ inline void JfrMemorySpace class RetrievalPolicy, typename FreeListType, typename FullListType, bool epoch_aware> inline void JfrMemorySpace::add_to_live_list(typename FreeListType::NodePtr node, bool previous_epoch) { - assert(node != NULL, "invariant"); + assert(node != nullptr, "invariant"); live_list(previous_epoch).add(node); } @@ -308,7 +308,7 @@ inline void JfrMemorySpace static inline Mspace* create_mspace(size_t min_element_size, size_t free_list_cache_count_limit, size_t cache_prealloc_count, bool prealloc_to_free_list, Client* cb) { Mspace* const mspace = new Mspace(min_element_size, free_list_cache_count_limit, cb); - if (mspace != NULL) { + if (mspace != nullptr) { mspace->initialize(cache_prealloc_count, prealloc_to_free_list); } return mspace; @@ -322,7 +322,7 @@ inline typename Mspace::NodePtr mspace_allocate(size_t size, Mspace* mspace) { template inline typename Mspace::NodePtr mspace_allocate_acquired(size_t size, Mspace* mspace, Thread* thread) { typename Mspace::NodePtr node = mspace_allocate(size, mspace); - if (node == NULL) return NULL; + if (node == nullptr) return nullptr; node->set_identity(thread); return node; } @@ -330,7 +330,7 @@ inline typename Mspace::NodePtr mspace_allocate_acquired(size_t size, Mspace* ms template inline typename Mspace::NodePtr mspace_allocate_transient(size_t size, Mspace* mspace, Thread* thread) { typename Mspace::NodePtr node = mspace_allocate_acquired(size, mspace, thread); - if (node == NULL) return NULL; + if (node == nullptr) return nullptr; assert(node->acquired_by_self(), "invariant"); node->set_transient(); return node; @@ -339,7 +339,7 @@ inline typename Mspace::NodePtr mspace_allocate_transient(size_t size, Mspace* m template inline typename Mspace::NodePtr mspace_allocate_transient_lease(size_t size, Mspace* mspace, Thread* thread) { typename Mspace::NodePtr node = mspace_allocate_transient(size, mspace, thread); - if (node == NULL) return NULL; + if (node == nullptr) return nullptr; assert(node->transient(), "invariant"); node->set_lease(); return node; @@ -348,7 +348,7 @@ inline typename Mspace::NodePtr mspace_allocate_transient_lease(size_t size, Msp template inline typename Mspace::NodePtr mspace_allocate_transient_lease_to_free(size_t size, Mspace* mspace, Thread* thread) { typename Mspace::NodePtr node = mspace_allocate_transient_lease(size, mspace, thread); - if (node == NULL) return NULL; + if (node == nullptr) return nullptr; assert(node->lease(), "invariant"); mspace->add_to_free_list(node); return node; @@ -364,17 +364,17 @@ inline typename Mspace::NodePtr mspace_acquire_free_with_retry(size_t size, Mspa assert(size <= mspace->min_element_size(), "invariant"); for (size_t i = 0; i < retry_count; ++i) { typename Mspace::NodePtr node = mspace_acquire_free(size, mspace, thread); - if (node != NULL) { + if (node != nullptr) { return node; } } - return NULL; + return nullptr; } template inline typename Mspace::NodePtr mspace_allocate_to_live_list(size_t size, Mspace* mspace, Thread* thread) { typename Mspace::NodePtr node = mspace_allocate_acquired(size, mspace, thread); - if (node == NULL) return NULL; + if (node == nullptr) return nullptr; assert(node->acquired_by_self(), "invariant"); mspace->add_to_live_list(node); return node; @@ -383,7 +383,7 @@ inline typename Mspace::NodePtr mspace_allocate_to_live_list(size_t size, Mspace template inline typename Mspace::NodePtr mspace_allocate_transient_to_live_list(size_t size, Mspace* mspace, Thread* thread, bool previous_epoch = false) { typename Mspace::NodePtr node = mspace_allocate_transient(size, mspace, thread); - if (node == NULL) return NULL; + if (node == nullptr) return nullptr; assert(node->transient(), "invariant"); mspace->add_to_live_list(node, previous_epoch); return node; @@ -392,7 +392,7 @@ inline typename Mspace::NodePtr mspace_allocate_transient_to_live_list(size_t si template inline typename Mspace::NodePtr mspace_allocate_transient_lease_to_live_list(size_t size, Mspace* mspace, Thread* thread, bool previous_epoch = false) { typename Mspace::NodePtr node = mspace_allocate_transient_lease(size, mspace, thread); - if (node == NULL) return NULL; + if (node == nullptr) return nullptr; assert(node->lease(), "invariant"); mspace->add_to_live_list(node, previous_epoch); return node; @@ -402,8 +402,8 @@ template inline typename Mspace::NodePtr mspace_acquire_free_to_live_list(size_t size, Mspace* mspace, Thread* thread, bool previous_epoch = false) { assert(size <= mspace->min_element_size(), "invariant"); typename Mspace::NodePtr node = mspace_acquire_free(size, mspace, thread); - if (node == NULL) { - return NULL; + if (node == nullptr) { + return nullptr; } assert(node->acquired_by_self(), "invariant"); mspace->add_to_live_list(node, previous_epoch); @@ -414,7 +414,7 @@ template inline typename Mspace::NodePtr mspace_acquire_to_live_list(size_t size, Mspace* mspace, Thread* thread, bool previous_epoch = false) { if (size <= mspace->min_element_size()) { typename Mspace::NodePtr node = mspace_acquire_free_to_live_list(size, mspace, thread, previous_epoch); - if (node != NULL) { + if (node != nullptr) { return node; } } @@ -431,17 +431,17 @@ inline typename Mspace::NodePtr mspace_acquire_live_with_retry(size_t size, Mspa assert(size <= mspace->min_element_size(), "invariant"); for (size_t i = 0; i < retry_count; ++i) { typename Mspace::NodePtr const node = mspace_acquire_live(size, mspace, thread, previous_epoch); - if (node != NULL) { + if (node != nullptr) { return node; } } - return NULL; + return nullptr; } template inline typename Mspace::NodePtr mspace_acquire_lease_with_retry(size_t size, Mspace* mspace, size_t retry_count, Thread* thread, bool previous_epoch = false) { typename Mspace::NodePtr node = mspace_acquire_live_with_retry(size, mspace, retry_count, thread, previous_epoch); - if (node != NULL) { + if (node != nullptr) { node->set_lease(); } return node; @@ -449,21 +449,21 @@ inline typename Mspace::NodePtr mspace_acquire_lease_with_retry(size_t size, Msp template inline void mspace_release(typename Mspace::NodePtr node, Mspace* mspace) { - assert(node != NULL, "invariant"); + assert(node != nullptr, "invariant"); assert(node->unflushed_size() == 0, "invariant"); - assert(mspace != NULL, "invariant"); + assert(mspace != nullptr, "invariant"); mspace->release(node); } template inline void process_live_list(Callback& callback, Mspace* mspace, bool previous_epoch = false) { - assert(mspace != NULL, "invariant"); + assert(mspace != nullptr, "invariant"); mspace->iterate_live_list(callback, previous_epoch); } template inline void process_free_list(Callback& callback, Mspace* mspace) { - assert(mspace != NULL, "invariant"); + assert(mspace != nullptr, "invariant"); assert(mspace->free_list_is_nonempty(), "invariant"); mspace->iterate_free_list(callback); } @@ -482,7 +482,7 @@ class ReleaseOp : public StackObj { template inline bool ReleaseOp::process(typename Mspace::NodePtr node) { - assert(node != NULL, "invariant"); + assert(node != nullptr, "invariant"); // assumes some means of exclusive access to the node if (node->transient()) { // make sure the transient node is already detached @@ -490,7 +490,7 @@ inline bool ReleaseOp::process(typename Mspace::NodePtr node) { return true; } node->reinitialize(); - if (node->identity() != NULL) { + if (node->identity() != nullptr) { assert(node->empty(), "invariant"); assert(!node->retired(), "invariant"); node->release(); // publish @@ -507,7 +507,7 @@ class ReleaseWithExcisionOp : public ReleaseOp { size_t _amount; public: ReleaseWithExcisionOp(Mspace* mspace, List& list) : - ReleaseOp(mspace), _list(list), _prev(NULL), _count(0), _amount(0) {} + ReleaseOp(mspace), _list(list), _prev(nullptr), _count(0), _amount(0) {} bool process(typename List::NodePtr node); size_t processed() const { return _count; } size_t amount() const { return _amount; } @@ -515,7 +515,7 @@ class ReleaseWithExcisionOp : public ReleaseOp { template inline bool ReleaseWithExcisionOp::process(typename List::NodePtr node) { - assert(node != NULL, "invariant"); + assert(node != nullptr, "invariant"); if (node->transient()) { _prev = _list.excise(_prev, node); } else { @@ -536,7 +536,7 @@ class ScavengingReleaseOp : public StackObj { public: typedef typename List::Node Node; ScavengingReleaseOp(Mspace* mspace, List& list) : - _mspace(mspace), _list(list), _prev(NULL), _count(0), _amount(0) {} + _mspace(mspace), _list(list), _prev(nullptr), _count(0), _amount(0) {} bool process(typename List::NodePtr node); size_t processed() const { return _count; } size_t amount() const { return _amount; } @@ -544,7 +544,7 @@ class ScavengingReleaseOp : public StackObj { template inline bool ScavengingReleaseOp::process(typename List::NodePtr node) { - assert(node != NULL, "invariant"); + assert(node != nullptr, "invariant"); assert(!node->transient(), "invariant"); if (node->retired()) { return excise_with_release(node); @@ -555,14 +555,14 @@ inline bool ScavengingReleaseOp::process(typename List::NodePtr no template inline bool ScavengingReleaseOp::excise_with_release(typename List::NodePtr node) { - assert(node != NULL, "invariant"); + assert(node != nullptr, "invariant"); assert(node->retired(), "invariant"); _prev = _list.excise(_prev, node); if (node->transient()) { _mspace->deallocate(node); return true; } - assert(node->identity() != NULL, "invariant"); + assert(node->identity() != nullptr, "invariant"); assert(node->empty(), "invariant"); assert(!node->lease(), "invariant"); ++_count; @@ -583,13 +583,13 @@ class ReleaseRetiredOp : public StackObj { public: typedef typename Mspace::Node Node; ReleaseRetiredOp(Functor& functor, Mspace* mspace, FromList& list) : - _functor(functor), _mspace(mspace), _list(list), _prev(NULL) {} + _functor(functor), _mspace(mspace), _list(list), _prev(nullptr) {} bool process(Node* node); }; template inline bool ReleaseRetiredOp::process(typename Mspace::Node* node) { - assert(node != NULL, "invariant"); + assert(node != nullptr, "invariant"); const bool is_retired = node->retired(); const bool result = _functor.process(node); if (is_retired) { @@ -615,13 +615,13 @@ class ReinitializeAllReleaseRetiredOp : public StackObj { public: typedef typename Mspace::Node Node; ReinitializeAllReleaseRetiredOp(Mspace* mspace, FromList& list) : - _mspace(mspace), _list(list), _prev(NULL) {} + _mspace(mspace), _list(list), _prev(nullptr) {} bool process(Node* node); }; template inline bool ReinitializeAllReleaseRetiredOp::process(typename Mspace::Node* node) { - assert(node != NULL, "invariant"); + assert(node != nullptr, "invariant"); // assumes some means of exclusive access to node const bool retired = node->retired(); node->reinitialize(); @@ -640,8 +640,8 @@ inline bool ReinitializeAllReleaseRetiredOp::process(typename #ifdef ASSERT template inline void assert_migration_state(const Node* old, const Node* new_node, size_t used, size_t requested) { - assert(old != NULL, "invariant"); - assert(new_node != NULL, "invariant"); + assert(old != nullptr, "invariant"); + assert(new_node != nullptr, "invariant"); assert(old->pos() >= old->start(), "invariant"); assert(old->pos() + used <= old->end(), "invariant"); assert(new_node->free_size() >= (used + requested), "invariant"); diff --git a/src/hotspot/share/jfr/recorder/storage/jfrMemorySpaceRetrieval.hpp b/src/hotspot/share/jfr/recorder/storage/jfrMemorySpaceRetrieval.hpp index ba350b3471caf..ce512918e6b19 100644 --- a/src/hotspot/share/jfr/recorder/storage/jfrMemorySpaceRetrieval.hpp +++ b/src/hotspot/share/jfr/recorder/storage/jfrMemorySpaceRetrieval.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -44,7 +44,7 @@ class JfrMspaceRetrieval { private: template static Node* acquire(Mspace* mspace, Iterator& iterator, Thread* thread, size_t size) { - assert(mspace != NULL, "invariant"); + assert(mspace != nullptr, "invariant"); while (iterator.has_next()) { Node* const node = iterator.next(); if (node->retired()) continue; @@ -57,7 +57,7 @@ class JfrMspaceRetrieval { mspace->register_full(node, thread); } } - return NULL; + return nullptr; } }; @@ -70,7 +70,7 @@ class JfrMspaceRemoveRetrieval : AllStatic { if (free_list) { StopOnNullConditionRemoval iterator(mspace->free_list()); Node* const node = acquire(iterator, thread, size); - if (node != NULL) { + if (node != nullptr) { mspace->decrement_free_list_count(); } return node; @@ -83,14 +83,14 @@ class JfrMspaceRemoveRetrieval : AllStatic { static Node* acquire(Iterator& iterator, Thread* thread, size_t size) { while (iterator.has_next()) { Node* const node = iterator.next(); - if (node == NULL) return NULL; + if (node == nullptr) return nullptr; assert(node->free_size() >= size, "invariant"); assert(!node->retired(), "invariant"); - assert(node->identity() == NULL, "invariant"); + assert(node->identity() == nullptr, "invariant"); node->set_identity(thread); return node; } - return NULL; + return nullptr; } }; diff --git a/src/hotspot/share/jfr/recorder/storage/jfrStorage.cpp b/src/hotspot/share/jfr/recorder/storage/jfrStorage.cpp index f03f0a963d43c..019793d045644 100644 --- a/src/hotspot/share/jfr/recorder/storage/jfrStorage.cpp +++ b/src/hotspot/share/jfr/recorder/storage/jfrStorage.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -46,7 +46,7 @@ typedef JfrStorage::BufferPtr BufferPtr; -static JfrStorage* _instance = NULL; +static JfrStorage* _instance = nullptr; static JfrStorageControl* _control; JfrStorage& JfrStorage::instance() { @@ -54,39 +54,39 @@ JfrStorage& JfrStorage::instance() { } JfrStorage* JfrStorage::create(JfrChunkWriter& chunkwriter, JfrPostBox& post_box) { - assert(_instance == NULL, "invariant"); + assert(_instance == nullptr, "invariant"); _instance = new JfrStorage(chunkwriter, post_box); return _instance; } void JfrStorage::destroy() { - if (_instance != NULL) { + if (_instance != nullptr) { delete _instance; - _instance = NULL; + _instance = nullptr; } } JfrStorage::JfrStorage(JfrChunkWriter& chunkwriter, JfrPostBox& post_box) : - _control(NULL), - _global_mspace(NULL), - _thread_local_mspace(NULL), + _control(nullptr), + _global_mspace(nullptr), + _thread_local_mspace(nullptr), _chunkwriter(chunkwriter), _post_box(post_box) {} JfrStorage::~JfrStorage() { - if (_control != NULL) { + if (_control != nullptr) { delete _control; } - if (_global_mspace != NULL) { + if (_global_mspace != nullptr) { delete _global_mspace; } - if (_thread_local_mspace != NULL) { + if (_thread_local_mspace != nullptr) { delete _thread_local_mspace; } - if (_full_list != NULL) { + if (_full_list != nullptr) { delete _full_list; } - _instance = NULL; + _instance = nullptr; } static const size_t thread_local_cache_count = 8; @@ -94,9 +94,9 @@ static const size_t thread_local_cache_count = 8; static const size_t in_memory_discard_threshold_delta = 2; bool JfrStorage::initialize() { - assert(_control == NULL, "invariant"); - assert(_global_mspace == NULL, "invariant"); - assert(_thread_local_mspace == NULL, "invariant"); + assert(_control == nullptr, "invariant"); + assert(_global_mspace == nullptr, "invariant"); + assert(_thread_local_mspace == nullptr, "invariant"); const size_t num_global_buffers = (size_t)JfrOptionSet::num_global_buffers(); assert(num_global_buffers >= in_memory_discard_threshold_delta, "invariant"); @@ -104,7 +104,7 @@ bool JfrStorage::initialize() { const size_t thread_buffer_size = (size_t)JfrOptionSet::thread_buffer_size(); _control = new JfrStorageControl(num_global_buffers, num_global_buffers - in_memory_discard_threshold_delta); - if (_control == NULL) { + if (_control == nullptr) { return false; } _global_mspace = create_mspace(global_buffer_size, @@ -112,7 +112,7 @@ bool JfrStorage::initialize() { num_global_buffers, // cache_preallocate count false, // preallocate_to_free_list (== preallocate directly to live list) this); - if (_global_mspace == NULL) { + if (_global_mspace == nullptr) { return false; } assert(_global_mspace->live_list_is_nonempty(), "invariant"); @@ -121,13 +121,13 @@ bool JfrStorage::initialize() { thread_local_cache_count, // cache preallocate count true, // preallocate_to_free_list this); - if (_thread_local_mspace == NULL) { + if (_thread_local_mspace == nullptr) { return false; } assert(_thread_local_mspace->free_list_is_nonempty(), "invariant"); // The full list will contain nodes pointing to retired global and transient buffers. _full_list = new JfrFullList(*_control); - return _full_list != NULL && _full_list->initialize(num_global_buffers * 2); + return _full_list != nullptr && _full_list->initialize(num_global_buffers * 2); } JfrStorageControl& JfrStorage::control() { @@ -140,9 +140,9 @@ static void log_allocation_failure(const char* msg, size_t size) { BufferPtr JfrStorage::acquire_thread_local(Thread* thread, size_t size /* 0 */) { BufferPtr buffer = mspace_acquire_to_live_list(size, instance()._thread_local_mspace, thread); - if (buffer == NULL) { + if (buffer == nullptr) { log_allocation_failure("thread local_memory", size); - return NULL; + return nullptr; } assert(buffer->acquired_by_self(), "invariant"); return buffer; @@ -150,9 +150,9 @@ BufferPtr JfrStorage::acquire_thread_local(Thread* thread, size_t size /* 0 */) BufferPtr JfrStorage::acquire_transient(size_t size, Thread* thread) { BufferPtr buffer = mspace_allocate_transient_lease(size, instance()._thread_local_mspace, thread); - if (buffer == NULL) { + if (buffer == nullptr) { log_allocation_failure("transient memory", size); - return NULL; + return nullptr; } assert(buffer->acquired_by_self(), "invariant"); assert(buffer->transient(), "invariant"); @@ -164,7 +164,7 @@ static BufferPtr acquire_lease(size_t size, JfrStorageMspace* mspace, JfrStorage assert(size <= mspace->min_element_size(), "invariant"); while (true) { BufferPtr buffer = mspace_acquire_lease_with_retry(size, mspace, retry_count, thread); - if (buffer == NULL && storage_instance.control().should_discard()) { + if (buffer == nullptr && storage_instance.control().should_discard()) { storage_instance.discard_oldest(thread); continue; } @@ -176,7 +176,7 @@ static BufferPtr acquire_promotion_buffer(size_t size, JfrStorageMspace* mspace, assert(size <= mspace->min_element_size(), "invariant"); while (true) { BufferPtr buffer= mspace_acquire_live_with_retry(size, mspace, retry_count, thread); - if (buffer == NULL && storage_instance.control().should_discard()) { + if (buffer == nullptr && storage_instance.control().should_discard()) { storage_instance.discard_oldest(thread); continue; } @@ -192,7 +192,7 @@ BufferPtr JfrStorage::acquire_large(size_t size, Thread* thread) { // if not too large and capacity is still available, ask for a lease from the global system if (size < max_elem_size && storage_instance.control().is_global_lease_allowed()) { BufferPtr const buffer = acquire_lease(size, storage_instance._global_mspace, storage_instance, lease_retry, thread); - if (buffer != NULL) { + if (buffer != nullptr) { assert(buffer->acquired_by_self(), "invariant"); assert(!buffer->transient(), "invariant"); assert(buffer->lease(), "invariant"); @@ -204,7 +204,7 @@ BufferPtr JfrStorage::acquire_large(size_t size, Thread* thread) { } static void write_data_loss_event(JfrBuffer* buffer, u8 unflushed_size, Thread* thread) { - assert(buffer != NULL, "invariant"); + assert(buffer != nullptr, "invariant"); assert(buffer->empty(), "invariant"); const u8 total_data_loss = thread->jfr_thread_local()->add_data_lost(unflushed_size); if (EventDataLoss::is_enabled()) { @@ -219,7 +219,7 @@ static void write_data_loss_event(JfrBuffer* buffer, u8 unflushed_size, Thread* } static void write_data_loss(BufferPtr buffer, Thread* thread) { - assert(buffer != NULL, "invariant"); + assert(buffer != nullptr, "invariant"); const size_t unflushed_size = buffer->unflushed_size(); buffer->reinitialize(); if (unflushed_size == 0) { @@ -231,7 +231,7 @@ static void write_data_loss(BufferPtr buffer, Thread* thread) { static const size_t promotion_retry = 100; bool JfrStorage::flush_regular_buffer(BufferPtr buffer, Thread* thread) { - assert(buffer != NULL, "invariant"); + assert(buffer != nullptr, "invariant"); assert(!buffer->lease(), "invariant"); assert(!buffer->transient(), "invariant"); const size_t unflushed_size = buffer->unflushed_size(); @@ -242,7 +242,7 @@ bool JfrStorage::flush_regular_buffer(BufferPtr buffer, Thread* thread) { } BufferPtr const promotion_buffer = acquire_promotion_buffer(unflushed_size, _global_mspace, *this, promotion_retry, thread); - if (promotion_buffer == NULL) { + if (promotion_buffer == nullptr) { write_data_loss(buffer, thread); return false; } @@ -261,7 +261,7 @@ bool JfrStorage::flush_regular_buffer(BufferPtr buffer, Thread* thread) { * and the caller should take means to ensure that it is not referenced any longer. */ void JfrStorage::release_large(BufferPtr buffer, Thread* thread) { - assert(buffer != NULL, "invariant"); + assert(buffer != nullptr, "invariant"); assert(buffer->lease(), "invariant"); assert(buffer->acquired_by_self(), "invariant"); buffer->clear_lease(); @@ -275,7 +275,7 @@ void JfrStorage::release_large(BufferPtr buffer, Thread* thread) { } void JfrStorage::register_full(BufferPtr buffer, Thread* thread) { - assert(buffer != NULL, "invariant"); + assert(buffer != nullptr, "invariant"); assert(buffer->acquired_by(thread), "invariant"); assert(buffer->retired(), "invariant"); if (_full_list->add(buffer)) { @@ -285,7 +285,7 @@ void JfrStorage::register_full(BufferPtr buffer, Thread* thread) { // don't use buffer on return, it is gone void JfrStorage::release(BufferPtr buffer, Thread* thread) { - assert(buffer != NULL, "invariant"); + assert(buffer != nullptr, "invariant"); assert(!buffer->lease(), "invariant"); assert(!buffer->transient(), "invariant"); assert(!buffer->retired(), "invariant"); @@ -295,12 +295,12 @@ void JfrStorage::release(BufferPtr buffer, Thread* thread) { } } assert(buffer->empty(), "invariant"); - assert(buffer->identity() != NULL, "invariant"); + assert(buffer->identity() != nullptr, "invariant"); buffer->set_retired(); } void JfrStorage::release_thread_local(BufferPtr buffer, Thread* thread) { - assert(buffer != NULL, "invariant"); + assert(buffer != nullptr, "invariant"); JfrStorage& storage_instance = instance(); storage_instance.release(buffer, thread); } @@ -325,8 +325,8 @@ void JfrStorage::discard_oldest(Thread* thread) { size_t discarded_size = 0; while (_full_list->is_nonempty()) { BufferPtr oldest = _full_list->remove(); - assert(oldest != NULL, "invariant"); - assert(oldest->identity() != NULL, "invariant"); + assert(oldest != nullptr, "invariant"); + assert(oldest->identity() != nullptr, "invariant"); discarded_size += oldest->discard(); assert(oldest->unflushed_size() == 0, "invariant"); if (oldest->transient()) { @@ -347,34 +347,34 @@ void JfrStorage::discard_oldest(Thread* thread) { typedef const BufferPtr ConstBufferPtr; static void assert_flush_precondition(ConstBufferPtr cur, size_t used, bool native, const Thread* t) { - assert(t != NULL, "invariant"); - assert(cur != NULL, "invariant"); + assert(t != nullptr, "invariant"); + assert(cur != nullptr, "invariant"); assert(cur->pos() + used <= cur->end(), "invariant"); assert(native ? t->jfr_thread_local()->native_buffer() == cur : t->jfr_thread_local()->java_buffer() == cur, "invariant"); } static void assert_flush_regular_precondition(ConstBufferPtr cur, const u1* const cur_pos, size_t used, size_t req, const Thread* t) { - assert(t != NULL, "invariant"); - assert(cur != NULL, "invariant"); + assert(t != nullptr, "invariant"); + assert(cur != nullptr, "invariant"); assert(!cur->lease(), "invariant"); - assert(cur_pos != NULL, "invariant"); + assert(cur_pos != nullptr, "invariant"); assert(req >= used, "invariant"); } static void assert_provision_large_precondition(ConstBufferPtr cur, size_t used, size_t req, const Thread* t) { - assert(cur != NULL, "invariant"); - assert(t != NULL, "invariant"); - assert(t->jfr_thread_local()->shelved_buffer() != NULL, "invariant"); + assert(cur != nullptr, "invariant"); + assert(t != nullptr, "invariant"); + assert(t->jfr_thread_local()->shelved_buffer() != nullptr, "invariant"); assert(req >= used, "invariant"); } static void assert_flush_large_precondition(ConstBufferPtr cur, const u1* const cur_pos, size_t used, size_t req, bool native, Thread* t) { - assert(t != NULL, "invariant"); - assert(cur != NULL, "invariant"); + assert(t != nullptr, "invariant"); + assert(cur != nullptr, "invariant"); assert(cur->lease(), "invariant"); - assert(cur_pos != NULL, "invariant"); + assert(cur_pos != nullptr, "invariant"); assert(native ? t->jfr_thread_local()->native_buffer() == cur : t->jfr_thread_local()->java_buffer() == cur, "invariant"); - assert(t->jfr_thread_local()->shelved_buffer() != NULL, "invariant"); + assert(t->jfr_thread_local()->shelved_buffer() != nullptr, "invariant"); assert(req >= used, "invariant"); assert(cur != t->jfr_thread_local()->shelved_buffer(), "invariant"); } @@ -408,13 +408,13 @@ BufferPtr JfrStorage::flush_regular(BufferPtr cur, const u1* const cur_pos, size } // Going for a "larger-than-regular" buffer. // Shelve the current buffer to make room for a temporary lease. - assert(t->jfr_thread_local()->shelved_buffer() == NULL, "invariant"); + assert(t->jfr_thread_local()->shelved_buffer() == nullptr, "invariant"); t->jfr_thread_local()->shelve_buffer(cur); return provision_large(cur, cur_pos, used, req, native, t); } static BufferPtr store_buffer_to_thread_local(BufferPtr buffer, JfrThreadLocal* jfr_thread_local, bool native) { - assert(buffer != NULL, "invariant"); + assert(buffer != nullptr, "invariant"); if (native) { jfr_thread_local->set_native_buffer(buffer); } else { @@ -426,8 +426,8 @@ static BufferPtr store_buffer_to_thread_local(BufferPtr buffer, JfrThreadLocal* static BufferPtr restore_shelved_buffer(bool native, Thread* t) { JfrThreadLocal* const tl = t->jfr_thread_local(); BufferPtr shelved = tl->shelved_buffer(); - assert(shelved != NULL, "invariant"); - tl->shelve_buffer(NULL); + assert(shelved != nullptr, "invariant"); + tl->shelve_buffer(nullptr); // restore shelved buffer back as primary return store_buffer_to_thread_local(shelved, tl, native); } @@ -436,7 +436,7 @@ BufferPtr JfrStorage::flush_large(BufferPtr cur, const u1* const cur_pos, size_t debug_only(assert_flush_large_precondition(cur, cur_pos, used, req, native, t);) // Can the "regular" buffer (now shelved) accommodate the requested size? BufferPtr shelved = t->jfr_thread_local()->shelved_buffer(); - assert(shelved != NULL, "invariant"); + assert(shelved != nullptr, "invariant"); if (shelved->free_size() >= req) { if (req > 0) { memcpy(shelved->pos(), (void*)cur_pos, (size_t)used); @@ -450,8 +450,8 @@ BufferPtr JfrStorage::flush_large(BufferPtr cur, const u1* const cur_pos, size_t } static BufferPtr large_fail(BufferPtr cur, bool native, JfrStorage& storage_instance, Thread* t) { - assert(cur != NULL, "invariant"); - assert(t != NULL, "invariant"); + assert(cur != nullptr, "invariant"); + assert(t != nullptr, "invariant"); if (cur->lease()) { storage_instance.release_large(cur, t); } @@ -464,9 +464,9 @@ static BufferPtr large_fail(BufferPtr cur, bool native, JfrStorage& storage_inst // Caller needs to ensure if the size was successfully accommodated. BufferPtr JfrStorage::provision_large(BufferPtr cur, const u1* const cur_pos, size_t used, size_t req, bool native, Thread* t) { debug_only(assert_provision_large_precondition(cur, used, req, t);) - assert(t->jfr_thread_local()->shelved_buffer() != NULL, "invariant"); + assert(t->jfr_thread_local()->shelved_buffer() != nullptr, "invariant"); BufferPtr const buffer = acquire_large(req, t); - if (buffer == NULL) { + if (buffer == nullptr) { // unable to allocate and serve the request return large_fail(cur, native, *this, t); } @@ -531,12 +531,12 @@ size_t JfrStorage::clear() { template static size_t process_full(Processor& processor, JfrFullList* list, JfrStorageControl& control) { - assert(list != NULL, "invariant"); + assert(list != nullptr, "invariant"); assert(list->is_nonempty(), "invariant"); size_t count = 0; do { BufferPtr full = list->remove(); - if (full == NULL) break; + if (full == nullptr) break; assert(full->retired(), "invariant"); processor.process(full); // at this point, the buffer is already live or destroyed diff --git a/src/hotspot/share/jfr/recorder/storage/jfrStorageUtils.hpp b/src/hotspot/share/jfr/recorder/storage/jfrStorageUtils.hpp index 02d0c7550edae..a7ba65698cb9f 100644 --- a/src/hotspot/share/jfr/recorder/storage/jfrStorageUtils.hpp +++ b/src/hotspot/share/jfr/recorder/storage/jfrStorageUtils.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -52,18 +52,18 @@ class CompositeOperation { NextOperation* _next; public: CompositeOperation(Operation* op, NextOperation* next) : _op(op), _next(next) { - assert(_op != NULL, "invariant"); + assert(_op != nullptr, "invariant"); } typedef typename Operation::Type Type; bool process(Type* t) { const bool op_result = _op->process(t); - return _next == NULL ? op_result : TruthFunction::evaluate(op_result) ? _next->process(t) : op_result; + return _next == nullptr ? op_result : TruthFunction::evaluate(op_result) ? _next->process(t) : op_result; } size_t elements() const { - return _next == NULL ? _op->elements() : _op->elements() + _next->elements(); + return _next == nullptr ? _op->elements() : _op->elements() + _next->elements(); } size_t size() const { - return _next == NULL ? _op->size() : _op->size() + _next->size(); + return _next == nullptr ? _op->size() : _op->size() + _next->size(); } }; @@ -99,7 +99,7 @@ class Retired { public: typedef T Type; bool process(Type* t) { - assert(t != NULL, "invariant"); + assert(t != nullptr, "invariant"); return negation ? !t->retired() : t->retired(); } }; diff --git a/src/hotspot/share/jfr/recorder/storage/jfrStorageUtils.inline.hpp b/src/hotspot/share/jfr/recorder/storage/jfrStorageUtils.inline.hpp index b04abe77f21ef..620d830afef47 100644 --- a/src/hotspot/share/jfr/recorder/storage/jfrStorageUtils.inline.hpp +++ b/src/hotspot/share/jfr/recorder/storage/jfrStorageUtils.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -48,7 +48,7 @@ inline bool DefaultDiscarder::discard(T* t, const u1* data, size_t size) { template inline size_t get_unflushed_size(const u1* top, Type* t) { - assert(t != NULL, "invariant"); + assert(t != nullptr, "invariant"); return Atomic::load_acquire(t->pos_address()) - top; } @@ -78,7 +78,7 @@ inline bool ConcurrentWriteOp::process(typename Operation::Type* t) { template inline bool MutexedWriteOp::process(typename Operation::Type* t) { - assert(t != NULL, "invariant"); + assert(t != nullptr, "invariant"); const u1* const top = t->top(); const size_t unflushed_size = get_unflushed_size(top, t); assert((intptr_t)unflushed_size >= 0, "invariant"); @@ -92,7 +92,7 @@ inline bool MutexedWriteOp::process(typename Operation::Type* t) { template static void retired_sensitive_acquire(Type* t, Thread* thread) { - assert(t != NULL, "invariant"); + assert(t != nullptr, "invariant"); assert(thread != nullptr, "invariant"); assert(thread == Thread::current(), "invariant"); if (t->retired()) { @@ -118,7 +118,7 @@ inline bool ExclusiveOp::process(typename Operation::Type* t) { template inline bool DiscardOp::process(typename Operation::Type* t) { - assert(t != NULL, "invariant"); + assert(t != nullptr, "invariant"); const u1* const top = _mode == concurrent ? t->acquire_critical_section_top() : t->top(); const size_t unflushed_size = get_unflushed_size(top, t); assert((intptr_t)unflushed_size >= 0, "invariant"); @@ -150,7 +150,7 @@ inline bool ExclusiveDiscardOp::process(typename Operation::Type* t) template inline bool EpochDispatchOp::process(typename Operation::Type* t) { - assert(t != NULL, "invariant"); + assert(t != nullptr, "invariant"); const u1* const current_top = _previous_epoch ? t->start() : t->top(); const size_t unflushed_size = Atomic::load_acquire(t->pos_address()) - current_top; assert((intptr_t)unflushed_size >= 0, "invariant"); @@ -164,7 +164,7 @@ inline bool EpochDispatchOp::process(typename Operation::Type* t) { template size_t EpochDispatchOp::dispatch(bool previous_epoch, const u1* element, size_t size) { - assert(element != NULL, "invariant"); + assert(element != nullptr, "invariant"); const u1* const limit = element + size; size_t elements = 0; while (element < limit) { diff --git a/src/hotspot/share/jfr/recorder/storage/jfrVirtualMemory.cpp b/src/hotspot/share/jfr/recorder/storage/jfrVirtualMemory.cpp index 6e4ab071469f8..99c980a2d9a56 100644 --- a/src/hotspot/share/jfr/recorder/storage/jfrVirtualMemory.cpp +++ b/src/hotspot/share/jfr/recorder/storage/jfrVirtualMemory.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -90,8 +90,8 @@ class JfrVirtualMemorySegment : public JfrCHeapObj { }; JfrVirtualMemorySegment::JfrVirtualMemorySegment() : - _next(NULL), - _top(NULL), + _next(nullptr), + _top(nullptr), _rs(), _virtual_memory() {} @@ -108,7 +108,7 @@ bool JfrVirtualMemorySegment::initialize(size_t reservation_size_request_bytes) if (!_rs.is_reserved()) { return false; } - assert(_rs.base() != NULL, "invariant"); + assert(_rs.base() != nullptr, "invariant"); assert(_rs.size() != 0, "invariant"); assert(is_aligned(_rs.base(), os::vm_allocation_granularity()), "invariant"); assert(is_aligned(_rs.size(), os::vm_allocation_granularity()), "invariant"); @@ -169,10 +169,10 @@ void* JfrVirtualMemorySegment::take_from_committed(size_t block_size_request_wor assert(_virtual_memory.committed_size() == _virtual_memory.actual_committed_size(), "The committed memory doesn't match the expanded memory."); if (!is_available(block_size_request_words)) { - return NULL; + return nullptr; } void* const block = top(); - assert(block != NULL, "invariant"); + assert(block != nullptr, "invariant"); inc_top(block_size_request_words); return block; } @@ -221,8 +221,8 @@ class JfrVirtualMemoryManager : public JfrCHeapObj { }; JfrVirtualMemoryManager::JfrVirtualMemoryManager() : - _segments(NULL), - _current_segment(NULL), + _segments(nullptr), + _current_segment(nullptr), _reservation_size_request_words(0), _reservation_size_request_limit_words(0), _current_reserved_words(0), @@ -230,7 +230,7 @@ JfrVirtualMemoryManager::JfrVirtualMemoryManager() : JfrVirtualMemoryManager::~JfrVirtualMemoryManager() { JfrVirtualMemorySegment* segment = _segments; - while (segment != NULL) { + while (segment != nullptr) { JfrVirtualMemorySegment* next_segment = segment->next(); delete segment; segment = next_segment; @@ -256,7 +256,7 @@ bool JfrVirtualMemoryManager::new_segment(size_t reservation_size_request_words) assert(reservation_size_request_words > 0, "invariant"); assert(is_aligned(reservation_size_request_words * BytesPerWord, os::vm_allocation_granularity()), "invariant"); Segment* segment = new Segment(); - if (NULL == segment) { + if (nullptr == segment) { return false; } if (!segment->initialize(reservation_size_request_words * BytesPerWord)) { @@ -270,7 +270,7 @@ bool JfrVirtualMemoryManager::new_segment(size_t reservation_size_request_words) } bool JfrVirtualMemoryManager::expand_segment_by(JfrVirtualMemorySegment* segment, size_t block_size_request_words) { - assert(segment != NULL, "invariant"); + assert(segment != nullptr, "invariant"); const size_t before = segment->committed_words(); const bool result = segment->expand_by(block_size_request_words); const size_t after = segment->committed_words(); @@ -324,11 +324,11 @@ bool JfrVirtualMemoryManager::expand_by(size_t block_size_request_words, size_t } void JfrVirtualMemoryManager::link(JfrVirtualMemorySegment* segment) { - assert(segment != NULL, "invariant"); - if (_segments == NULL) { + assert(segment != nullptr, "invariant"); + if (_segments == nullptr) { _segments = segment; } else { - assert(_current_segment != NULL, "invariant"); + assert(_current_segment != nullptr, "invariant"); assert(_segments == _current_segment, "invariant"); _current_segment->set_next(segment); } @@ -340,32 +340,32 @@ void JfrVirtualMemoryManager::link(JfrVirtualMemorySegment* segment) { void* JfrVirtualMemoryManager::commit(size_t block_size_request_words) { assert(is_aligned(block_size_request_words * BytesPerWord, os::vm_allocation_granularity()), "invariant"); void* block = current()->commit(block_size_request_words); - if (block != NULL) { + if (block != nullptr) { return block; } - assert(block == NULL, "invariant"); + assert(block == nullptr, "invariant"); if (is_full()) { - return NULL; + return nullptr; } assert(block_size_request_words <= _reservation_size_request_words, "invariant"); if (expand_by(block_size_request_words, _reservation_size_request_words)) { block = current()->commit(block_size_request_words); - assert(block != NULL, "The allocation was expected to succeed after the expansion"); + assert(block != nullptr, "The allocation was expected to succeed after the expansion"); } return block; } JfrVirtualMemory::JfrVirtualMemory() : - _vmm(NULL), + _vmm(nullptr), _reserved_low(), _reserved_high(), - _top(NULL), - _commit_point(NULL), + _top(nullptr), + _commit_point(nullptr), _physical_commit_size_request_words(0), _aligned_datum_size_bytes(0) {} JfrVirtualMemory::~JfrVirtualMemory() { - assert(_vmm != NULL, "invariant"); + assert(_vmm != nullptr, "invariant"); delete _vmm; } @@ -374,9 +374,9 @@ size_t JfrVirtualMemory::aligned_datum_size_bytes() const { } static void adjust_allocation_ratio(size_t* const reservation_size_bytes, size_t* const commit_size_bytes) { - assert(reservation_size_bytes != NULL, "invariant"); + assert(reservation_size_bytes != nullptr, "invariant"); assert(*reservation_size_bytes > 0, "invariant"); - assert(commit_size_bytes != NULL, "invariant"); + assert(commit_size_bytes != nullptr, "invariant"); assert(*commit_size_bytes > 0, "invariant"); assert(*reservation_size_bytes >= *commit_size_bytes, "invariant"); assert(is_aligned(*reservation_size_bytes, os::vm_allocation_granularity()), "invariant"); @@ -409,11 +409,11 @@ static void adjust_allocation_ratio(size_t* const reservation_size_bytes, size_t void* JfrVirtualMemory::initialize(size_t reservation_size_request_bytes, size_t block_size_request_bytes, size_t datum_size_bytes /* 1 */) { - assert(_vmm == NULL, "invariant"); + assert(_vmm == nullptr, "invariant"); _vmm = new JfrVirtualMemoryManager(); - if (_vmm == NULL) { - return NULL; + if (_vmm == nullptr) { + return nullptr; } assert(reservation_size_request_bytes > 0, "invariant"); @@ -440,7 +440,7 @@ void* JfrVirtualMemory::initialize(size_t reservation_size_request_bytes, if (!_vmm->initialize(reservation_size_request_words)) { // is implicitly "full" if reservation fails assert(is_full(), "invariant"); - return NULL; + return nullptr; } _reserved_low = (const u1*)_vmm->reserved_low(); _reserved_high = (const u1*)_vmm->reserved_high(); @@ -454,7 +454,7 @@ void* JfrVirtualMemory::initialize(size_t reservation_size_request_bytes, } void* JfrVirtualMemory::commit(size_t block_size_request_words) { - assert(_vmm != NULL, "invariant"); + assert(_vmm != nullptr, "invariant"); assert(is_aligned(block_size_request_words * BytesPerWord, os::vm_allocation_granularity()), "invariant"); return _vmm->commit(block_size_request_words); } @@ -468,26 +468,26 @@ bool JfrVirtualMemory::is_empty() const { } bool JfrVirtualMemory::commit_memory_block() { - assert(_vmm != NULL, "invariant"); + assert(_vmm != nullptr, "invariant"); assert(!is_full(), "invariant"); void* const block = _vmm->commit(_physical_commit_size_request_words); - if (block != NULL) { + if (block != nullptr) { _commit_point = _vmm->committed_high(); return true; } // all reserved virtual memory is committed - assert(block == NULL, "invariant"); + assert(block == nullptr, "invariant"); assert(_vmm->reserved_high() == _vmm->committed_high(), "invariant"); return false; } void* JfrVirtualMemory::new_datum() { - assert(_vmm != NULL, "invariant"); + assert(_vmm != nullptr, "invariant"); assert(!is_full(), "invariant"); if (_top == _commit_point) { if (!commit_memory_block()) { assert(is_full(), "invariant"); - return NULL; + return nullptr; } } assert(_top + _aligned_datum_size_bytes <= _commit_point, "invariant"); diff --git a/src/hotspot/share/jfr/recorder/stringpool/jfrStringPool.cpp b/src/hotspot/share/jfr/recorder/stringpool/jfrStringPool.cpp index fa1471e808b18..6863b71f954c4 100644 --- a/src/hotspot/share/jfr/recorder/stringpool/jfrStringPool.cpp +++ b/src/hotspot/share/jfr/recorder/stringpool/jfrStringPool.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -47,25 +47,25 @@ bool JfrStringPool::is_modified() { return _new_string.is_signaled_with_reset(); } -static JfrStringPool* _instance = NULL; +static JfrStringPool* _instance = nullptr; JfrStringPool& JfrStringPool::instance() { return *_instance; } JfrStringPool* JfrStringPool::create(JfrChunkWriter& cw) { - assert(_instance == NULL, "invariant"); + assert(_instance == nullptr, "invariant"); _instance = new JfrStringPool(cw); return _instance; } void JfrStringPool::destroy() { - assert(_instance != NULL, "invariant"); + assert(_instance != nullptr, "invariant"); delete _instance; - _instance = NULL; + _instance = nullptr; } -JfrStringPool::JfrStringPool(JfrChunkWriter& cw) : _mspace(NULL), _chunkwriter(cw) {} +JfrStringPool::JfrStringPool(JfrChunkWriter& cw) : _mspace(nullptr), _chunkwriter(cw) {} JfrStringPool::~JfrStringPool() { delete _mspace; @@ -75,13 +75,13 @@ static const size_t string_pool_cache_count = 2; static const size_t string_pool_buffer_size = 512 * K; bool JfrStringPool::initialize() { - assert(_mspace == NULL, "invariant"); + assert(_mspace == nullptr, "invariant"); _mspace = create_mspace(string_pool_buffer_size, string_pool_cache_count, // cache limit string_pool_cache_count, // cache preallocate count false, // preallocate_to_free_list (== preallocate directly to live list) this); - return _mspace != NULL; + return _mspace != nullptr; } /* @@ -91,7 +91,7 @@ bool JfrStringPool::initialize() { * and the caller should take means to ensure that it is not referenced any longer. */ static void release(BufferPtr buffer, Thread* thread) { - assert(buffer != NULL, "invariant"); + assert(buffer != nullptr, "invariant"); assert(buffer->lease(), "invariant"); assert(buffer->acquired_by_self(), "invariant"); buffer->clear_lease(); @@ -103,27 +103,27 @@ static void release(BufferPtr buffer, Thread* thread) { } BufferPtr JfrStringPool::flush(BufferPtr old, size_t used, size_t requested, Thread* thread) { - assert(old != NULL, "invariant"); + assert(old != nullptr, "invariant"); assert(old->lease(), "invariant"); if (0 == requested) { // indicates a lease is being returned release(old, thread); - return NULL; + return nullptr; } // migration of in-flight information BufferPtr const new_buffer = lease(thread, used + requested); - if (new_buffer != NULL) { + if (new_buffer != nullptr) { migrate_outstanding_writes(old, new_buffer, used, requested); } release(old, thread); - return new_buffer; // might be NULL + return new_buffer; // might be nullptr } static const size_t lease_retry = 10; BufferPtr JfrStringPool::lease(Thread* thread, size_t size /* 0 */) { BufferPtr buffer = mspace_acquire_lease_with_retry(size, instance()._mspace, lease_retry, thread); - if (buffer == NULL) { + if (buffer == nullptr) { buffer = mspace_allocate_transient_lease_to_live_list(size, instance()._mspace, thread); } assert(buffer->acquired_by_self(), "invariant"); @@ -132,7 +132,7 @@ BufferPtr JfrStringPool::lease(Thread* thread, size_t size /* 0 */) { } jboolean JfrStringPool::add(jlong id, jstring string, JavaThread* jt) { - assert(jt != NULL, "invariant"); + assert(jt != nullptr, "invariant"); { JfrStringPoolWriter writer(jt); writer.write(id); @@ -209,7 +209,7 @@ size_t JfrStringPool::clear() { void JfrStringPool::register_full(BufferPtr buffer, Thread* thread) { // nothing here at the moment - assert(buffer != NULL, "invariant"); + assert(buffer != nullptr, "invariant"); assert(buffer->acquired_by(thread), "invariant"); assert(buffer->retired(), "invariant"); } diff --git a/src/hotspot/share/jfr/support/jfrAdaptiveSampler.cpp b/src/hotspot/share/jfr/support/jfrAdaptiveSampler.cpp index 2246b8daea19c..c29dc5a8e321b 100644 --- a/src/hotspot/share/jfr/support/jfrAdaptiveSampler.cpp +++ b/src/hotspot/share/jfr/support/jfrAdaptiveSampler.cpp @@ -44,9 +44,9 @@ JfrSamplerWindow::JfrSamplerWindow() : JfrAdaptiveSampler::JfrAdaptiveSampler() : _prng(this), - _window_0(NULL), - _window_1(NULL), - _active_window(NULL), + _window_0(nullptr), + _window_1(nullptr), + _active_window(nullptr), _avg_population_size(0), _ewma_population_size_alpha(0), _acc_debt_carry_limit(0), @@ -59,14 +59,14 @@ JfrAdaptiveSampler::~JfrAdaptiveSampler() { } bool JfrAdaptiveSampler::initialize() { - assert(_window_0 == NULL, "invariant"); + assert(_window_0 == nullptr, "invariant"); _window_0 = new JfrSamplerWindow(); - if (_window_0 == NULL) { + if (_window_0 == nullptr) { return false; } - assert(_window_1 == NULL, "invariant"); + assert(_window_1 == nullptr, "invariant"); _window_1 = new JfrSamplerWindow(); - if (_window_1 == NULL) { + if (_window_1 == nullptr) { return false; } _active_window = _window_0; @@ -102,7 +102,7 @@ inline bool JfrSamplerWindow::is_expired(int64_t timestamp) const { } bool JfrSamplerWindow::sample(int64_t timestamp, bool* expired_window) const { - assert(expired_window != NULL, "invariant"); + assert(expired_window != nullptr, "invariant"); *expired_window = is_expired(timestamp); return *expired_window ? false : sample(); } @@ -116,7 +116,7 @@ inline bool JfrSamplerWindow::sample() const { void JfrAdaptiveSampler::rotate_window(int64_t timestamp) { assert(_lock, "invariant"); const JfrSamplerWindow* const current = active_window(); - assert(current != NULL, "invariant"); + assert(current != nullptr, "invariant"); if (!current->is_expired(timestamp)) { // Someone took care of it. return; @@ -229,7 +229,7 @@ JfrSamplerWindow* JfrAdaptiveSampler::set_rate(const JfrSamplerParams& params, c } inline JfrSamplerWindow* JfrAdaptiveSampler::next_window(const JfrSamplerWindow* expired) const { - assert(expired != NULL, "invariant"); + assert(expired != nullptr, "invariant"); return expired == _window_0 ? _window_1 : _window_0; } @@ -257,7 +257,7 @@ size_t JfrAdaptiveSampler::project_sample_size(const JfrSamplerParams& params, c * or 'amortize' debt accumulated by its predecessor(s). */ size_t JfrAdaptiveSampler::amortize_debt(const JfrSamplerWindow* expired) { - assert(expired != NULL, "invariant"); + assert(expired != nullptr, "invariant"); const intptr_t accumulated_debt = expired->accumulated_debt(); assert(accumulated_debt <= 0, "invariant"); if (_acc_debt_carry_count == _acc_debt_carry_limit) { @@ -326,7 +326,7 @@ size_t JfrAdaptiveSampler::derive_sampling_interval(double sample_size, const Jf // The projected population size is an exponentially weighted moving average, a function of the window_lookback_count. inline size_t JfrAdaptiveSampler::project_population_size(const JfrSamplerWindow* expired) { - assert(expired != NULL, "invariant"); + assert(expired != nullptr, "invariant"); _avg_population_size = exponentially_weighted_moving_average(expired->population_size(), _ewma_population_size_alpha, _avg_population_size); return _avg_population_size; } @@ -360,7 +360,7 @@ bool JfrGTestFixedRateSampler::initialize() { * */ static void log(const JfrSamplerWindow* expired, double* sample_size_ewma) { - assert(sample_size_ewma != NULL, "invariant"); + assert(sample_size_ewma != nullptr, "invariant"); if (log_is_enabled(Debug, jfr, system, throttle)) { *sample_size_ewma = exponentially_weighted_moving_average(expired->sample_size(), compute_ewma_alpha_coefficient(expired->params().window_lookback_count), *sample_size_ewma); log_debug(jfr, system, throttle)("JfrGTestFixedRateSampler: avg.sample size: %0.4f, window set point: %zu, sample size: %zu, population size: %zu, ratio: %.4f, window duration: %zu ms\n", @@ -378,7 +378,7 @@ static void log(const JfrSamplerWindow* expired, double* sample_size_ewma) { * parameters, possibly updated, for the engine to apply to the next window. */ const JfrSamplerParams& JfrGTestFixedRateSampler::next_window_params(const JfrSamplerWindow* expired) { - assert(expired != NULL, "invariant"); + assert(expired != nullptr, "invariant"); assert(_lock, "invariant"); log(expired, &_sample_size_ewma); return _params; diff --git a/src/hotspot/share/jfr/support/jfrFlush.cpp b/src/hotspot/share/jfr/support/jfrFlush.cpp index 38c4ec1821b01..42e4f5f45818a 100644 --- a/src/hotspot/share/jfr/support/jfrFlush.cpp +++ b/src/hotspot/share/jfr/support/jfrFlush.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -39,7 +39,7 @@ template class LessThanHalfBufferSize : AllStatic { public: static bool evaluate(T* t) { - assert(t != NULL, "invariant"); + assert(t != nullptr, "invariant"); return t->free_size() < t->size() / 2; } }; @@ -48,7 +48,7 @@ template class LessThanSize : AllStatic { public: static bool evaluate(T* t, size_t size) { - assert(t != NULL, "invariant"); + assert(t != nullptr, "invariant"); return t->free_size() < size; } }; diff --git a/src/hotspot/share/jfr/support/jfrIntrinsics.cpp b/src/hotspot/share/jfr/support/jfrIntrinsics.cpp index 390cc69614996..aaecd4500f29d 100644 --- a/src/hotspot/share/jfr/support/jfrIntrinsics.cpp +++ b/src/hotspot/share/jfr/support/jfrIntrinsics.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -34,7 +34,7 @@ #ifdef ASSERT static void assert_precondition(JavaThread* jt) { - assert(jt != NULL, "invariant"); + assert(jt != nullptr, "invariant"); DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_java(jt);) assert(jt->has_last_Java_frame(), "invariant"); } @@ -72,7 +72,7 @@ void* JfrIntrinsicSupport::write_checkpoint(JavaThread* jt) { } void JfrIntrinsicSupport::load_barrier(const Klass* klass) { - assert(klass != NULL, "sanity"); + assert(klass != nullptr, "sanity"); JfrTraceIdLoadBarrier::load_barrier(klass); } diff --git a/src/hotspot/share/jfr/support/jfrJdkJfrEvent.cpp b/src/hotspot/share/jfr/support/jfrJdkJfrEvent.cpp index fdc217308038c..5388349c3f6bd 100644 --- a/src/hotspot/share/jfr/support/jfrJdkJfrEvent.cpp +++ b/src/hotspot/share/jfr/support/jfrJdkJfrEvent.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -37,7 +37,7 @@ #include "runtime/javaThread.hpp" #include "utilities/stack.inline.hpp" -static jobject empty_java_util_arraylist = NULL; +static jobject empty_java_util_arraylist = nullptr; static oop new_java_util_arraylist(TRAPS) { DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(THREAD)); @@ -57,10 +57,10 @@ static GrowableArray* c_heap_allocate_array(int size = initial_array_size) { static bool initialize(TRAPS) { static bool initialized = false; if (!initialized) { - assert(NULL == empty_java_util_arraylist, "invariant"); + assert(nullptr == empty_java_util_arraylist, "invariant"); const oop array_list = new_java_util_arraylist(CHECK_false); empty_java_util_arraylist = JfrJavaSupport::global_jni_handle(array_list, THREAD); - initialized = empty_java_util_arraylist != NULL; + initialized = empty_java_util_arraylist != nullptr; } return initialized; } @@ -72,7 +72,7 @@ static bool initialize(TRAPS) { * trigger initialization. */ static bool is_allowed(const Klass* k) { - assert(k != NULL, "invariant"); + assert(k != nullptr, "invariant"); if (!JfrTraceId::is_jdk_jfr_event_sub(k)) { // Was excluded during initial class load. return false; @@ -82,7 +82,7 @@ static bool is_allowed(const Klass* k) { static void fill_klasses(GrowableArray& event_subklasses, const InstanceKlass* event_klass, JavaThread* thread) { assert(event_subklasses.length() == 0, "invariant"); - assert(event_klass != NULL, "invariant"); + assert(event_klass != nullptr, "invariant"); DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(thread)); for (ClassHierarchyIterator iter(const_cast(event_klass)); !iter.done(); iter.next()) { @@ -107,21 +107,21 @@ static void transform_klasses_to_local_jni_handles(GrowableArray& e jobject JdkJfrEvent::get_all_klasses(TRAPS) { DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(THREAD)); initialize(THREAD); - assert(empty_java_util_arraylist != NULL, "should have been setup already!"); + assert(empty_java_util_arraylist != nullptr, "should have been setup already!"); static const char jdk_jfr_event_name[] = "jdk/internal/event/Event"; Symbol* const event_klass_name = SymbolTable::probe(jdk_jfr_event_name, sizeof jdk_jfr_event_name - 1); - if (NULL == event_klass_name) { + if (nullptr == event_klass_name) { // not loaded yet return empty_java_util_arraylist; } const Klass* const klass = SystemDictionary::resolve_or_null(event_klass_name, THREAD); - assert(klass != NULL, "invariant"); + assert(klass != nullptr, "invariant"); assert(klass->is_instance_klass(), "invariant"); assert(JdkJfrEvent::is(klass), "invariant"); - if (klass->subklass() == NULL) { + if (klass->subklass() == nullptr) { return empty_java_util_arraylist; } @@ -141,13 +141,12 @@ jobject JdkJfrEvent::get_all_klasses(TRAPS) { static const char add_method_name[] = "add"; static const char add_method_signature[] = "(Ljava/lang/Object;)Z"; const Klass* const array_list_klass = JfrJavaSupport::klass(empty_java_util_arraylist); - assert(array_list_klass != NULL, "invariant"); + assert(array_list_klass != nullptr, "invariant"); const Symbol* const add_method_sym = SymbolTable::new_symbol(add_method_name); - assert(add_method_sym != NULL, "invariant"); + assert(add_method_sym != nullptr, "invariant"); const Symbol* const add_method_sig_sym = SymbolTable::new_symbol(add_method_signature); - assert(add_method_signature != NULL, "invariant"); JavaValue result(T_BOOLEAN); for (int i = 0; i < event_subklasses.length(); ++i) { diff --git a/src/hotspot/share/jfr/support/jfrKlassUnloading.cpp b/src/hotspot/share/jfr/support/jfrKlassUnloading.cpp index bbd4a03065af8..46d9cea90e90c 100644 --- a/src/hotspot/share/jfr/support/jfrKlassUnloading.cpp +++ b/src/hotspot/share/jfr/support/jfrKlassUnloading.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -40,20 +40,20 @@ static GrowableArray* c_heap_allocate_array(int size = initial_array_size) { } // Track the set of unloaded klasses during a chunk / epoch. -static GrowableArray* _unload_set_epoch_0 = NULL; -static GrowableArray* _unload_set_epoch_1 = NULL; +static GrowableArray* _unload_set_epoch_0 = nullptr; +static GrowableArray* _unload_set_epoch_1 = nullptr; static s8 event_klass_unloaded_count = 0; static GrowableArray* unload_set_epoch_0() { - if (_unload_set_epoch_0 == NULL) { + if (_unload_set_epoch_0 == nullptr) { _unload_set_epoch_0 = c_heap_allocate_array(initial_array_size); } return _unload_set_epoch_0; } static GrowableArray* unload_set_epoch_1() { - if (_unload_set_epoch_1 == NULL) { + if (_unload_set_epoch_1 == nullptr) { _unload_set_epoch_1 = c_heap_allocate_array(initial_array_size); } return _unload_set_epoch_1; @@ -72,16 +72,16 @@ static GrowableArray* get_unload_set_previous_epoch() { } static void sort_set(GrowableArray* set) { - assert(set != NULL, "invariant"); + assert(set != nullptr, "invariant"); assert(set->is_nonempty(), "invariant"); set->sort(sort_traceid); } static bool is_nonempty_set(u1 epoch) { if (epoch == 0) { - return _unload_set_epoch_0 != NULL && _unload_set_epoch_0->is_nonempty(); + return _unload_set_epoch_0 != nullptr && _unload_set_epoch_0->is_nonempty(); } - return _unload_set_epoch_1 != NULL && _unload_set_epoch_1->is_nonempty(); + return _unload_set_epoch_1 != nullptr && _unload_set_epoch_1->is_nonempty(); } void JfrKlassUnloading::sort(bool previous_epoch) { @@ -104,7 +104,7 @@ void JfrKlassUnloading::clear() { static bool add_to_unloaded_klass_set(traceid klass_id, bool current_epoch) { assert_locked_or_safepoint(ClassLoaderDataGraph_lock); GrowableArray* const unload_set = current_epoch ? get_unload_set() : get_unload_set_previous_epoch(); - assert(unload_set != NULL, "invariant"); + assert(unload_set != nullptr, "invariant"); assert(unload_set->find(klass_id) == -1, "invariant"); unload_set->append(klass_id); return true; @@ -123,7 +123,7 @@ static void send_finalizer_event(const Klass* k) { #endif bool JfrKlassUnloading::on_unload(const Klass* k) { - assert(k != NULL, "invariant"); + assert(k != nullptr, "invariant"); assert_locked_or_safepoint(ClassLoaderDataGraph_lock); MANAGEMENT_ONLY(send_finalizer_event(k);) if (IS_JDK_JFR_EVENT_SUBKLASS(k)) { diff --git a/src/hotspot/share/jfr/support/jfrMethodLookup.cpp b/src/hotspot/share/jfr/support/jfrMethodLookup.cpp index 920a76cdd47f1..f83fc95d2700b 100644 --- a/src/hotspot/share/jfr/support/jfrMethodLookup.cpp +++ b/src/hotspot/share/jfr/support/jfrMethodLookup.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -32,11 +32,11 @@ // The InstanceKlass is assumed to be the method holder for the method to be looked up. static const Method* lookup_method(InstanceKlass* ik, int orig_method_id_num) { - assert(ik != NULL, "invariant"); + assert(ik != nullptr, "invariant"); assert(orig_method_id_num >= 0, "invariant"); assert(orig_method_id_num < ik->methods()->length(), "invariant"); const Method* const m = ik->method_with_orig_idnum(orig_method_id_num); - assert(m != NULL, "invariant"); + assert(m != nullptr, "invariant"); assert(m->orig_method_idnum() == orig_method_id_num, "invariant"); assert(!m->is_obsolete(), "invariant"); assert(ik == m->method_holder(), "invariant"); @@ -44,7 +44,7 @@ static const Method* lookup_method(InstanceKlass* ik, int orig_method_id_num) { } const Method* JfrMethodLookup::lookup(const InstanceKlass* ik, traceid method_id) { - assert(ik != NULL, "invariant"); + assert(ik != nullptr, "invariant"); return lookup_method(const_cast(ik), method_id_num(method_id)); } @@ -53,7 +53,7 @@ int JfrMethodLookup::method_id_num(traceid method_id) { } traceid JfrMethodLookup::method_id(const Method* method) { - assert(method != NULL, "invariant"); + assert(method != nullptr, "invariant"); return METHOD_ID(method->method_holder(), method); } diff --git a/src/hotspot/share/jfr/support/jfrObjectAllocationSample.cpp b/src/hotspot/share/jfr/support/jfrObjectAllocationSample.cpp index fb71d7a3362dd..7c62516a224c9 100644 --- a/src/hotspot/share/jfr/support/jfrObjectAllocationSample.cpp +++ b/src/hotspot/share/jfr/support/jfrObjectAllocationSample.cpp @@ -67,7 +67,7 @@ inline int64_t estimate_tlab_size_bytes(Thread* thread) { } inline int64_t load_allocated_bytes(Thread* thread) { - assert(thread != NULL, "invariant"); + assert(thread != nullptr, "invariant"); const int64_t allocated_bytes = thread->allocated_bytes(); if (allocated_bytes < _last_allocated_bytes) { // A hw thread can detach and reattach to the VM, and when it does, diff --git a/src/hotspot/share/jfr/support/jfrStackTraceMark.cpp b/src/hotspot/share/jfr/support/jfrStackTraceMark.cpp index a5450f61646c8..d1fa55e6dde6f 100644 --- a/src/hotspot/share/jfr/support/jfrStackTraceMark.cpp +++ b/src/hotspot/share/jfr/support/jfrStackTraceMark.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -47,7 +47,7 @@ JfrStackTraceMark::JfrStackTraceMark(Thread* t) : _t(t), _previous_id(0), _previ tl->set_cached_stack_trace_id(JfrStackTraceRepository::record(t)); } -JfrStackTraceMark::JfrStackTraceMark(JfrEventId eventId) : _t(NULL), _previous_id(0), _previous_hash(0) { +JfrStackTraceMark::JfrStackTraceMark(JfrEventId eventId) : _t(nullptr), _previous_id(0), _previous_hash(0) { if (JfrEventSetting::has_stacktrace(eventId)) { _t = Thread::current(); JfrThreadLocal* const tl = _t->jfr_thread_local(); @@ -59,7 +59,7 @@ JfrStackTraceMark::JfrStackTraceMark(JfrEventId eventId) : _t(NULL), _previous_i } } -JfrStackTraceMark::JfrStackTraceMark(JfrEventId eventId, Thread* t) : _t(NULL), _previous_id(0), _previous_hash(0) { +JfrStackTraceMark::JfrStackTraceMark(JfrEventId eventId, Thread* t) : _t(nullptr), _previous_id(0), _previous_hash(0) { if (JfrEventSetting::has_stacktrace(eventId)) { _t = t; JfrThreadLocal* const tl = _t->jfr_thread_local(); @@ -75,7 +75,7 @@ JfrStackTraceMark::~JfrStackTraceMark() { if (_previous_id != 0) { _t->jfr_thread_local()->set_cached_stack_trace_id(_previous_id, _previous_hash); } else { - if (_t != NULL) { + if (_t != nullptr) { _t->jfr_thread_local()->clear_cached_stack_trace(); } } diff --git a/src/hotspot/share/jfr/support/jfrSymbolTable.cpp b/src/hotspot/share/jfr/support/jfrSymbolTable.cpp index 5a580c0c3d3c1..e01168ca7c6b9 100644 --- a/src/hotspot/share/jfr/support/jfrSymbolTable.cpp +++ b/src/hotspot/share/jfr/support/jfrSymbolTable.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -46,17 +46,17 @@ static uintptr_t string_hash(const char* str) { return java_lang_String::hash_code(reinterpret_cast(str), static_cast(strlen(str))); } -static JfrSymbolTable::StringEntry* bootstrap = NULL; +static JfrSymbolTable::StringEntry* bootstrap = nullptr; -static JfrSymbolTable* _instance = NULL; +static JfrSymbolTable* _instance = nullptr; static JfrSymbolTable& instance() { - assert(_instance != NULL, "invariant"); + assert(_instance != nullptr, "invariant"); return *_instance; } JfrSymbolTable* JfrSymbolTable::create() { - assert(_instance == NULL, "invariant"); + assert(_instance == nullptr, "invariant"); assert_lock_strong(ClassLoaderDataGraph_lock); _instance = new JfrSymbolTable(); return _instance; @@ -64,26 +64,26 @@ JfrSymbolTable* JfrSymbolTable::create() { void JfrSymbolTable::destroy() { assert_lock_strong(ClassLoaderDataGraph_lock); - if (_instance != NULL) { + if (_instance != nullptr) { delete _instance; - _instance = NULL; + _instance = nullptr; } - assert(_instance == NULL, "invariant"); + assert(_instance == nullptr, "invariant"); } JfrSymbolTable::JfrSymbolTable() : _symbols(new Symbols(this)), _strings(new Strings(this)), - _symbol_list(NULL), - _string_list(NULL), - _symbol_query(NULL), - _string_query(NULL), + _symbol_list(nullptr), + _string_list(nullptr), + _symbol_query(nullptr), + _string_query(nullptr), _id_counter(1), _class_unload(false) { - assert(_symbols != NULL, "invariant"); - assert(_strings != NULL, "invariant"); + assert(_symbols != nullptr, "invariant"); + assert(_strings != nullptr, "invariant"); bootstrap = new StringEntry(0, (const char*)&BOOTSTRAP_LOADER_NAME); - assert(bootstrap != NULL, "invariant"); + assert(bootstrap != nullptr, "invariant"); bootstrap->set_id(create_symbol_id(1)); _string_list = bootstrap; } @@ -96,25 +96,25 @@ JfrSymbolTable::~JfrSymbolTable() { } void JfrSymbolTable::clear() { - assert(_symbols != NULL, "invariant"); + assert(_symbols != nullptr, "invariant"); if (_symbols->has_entries()) { _symbols->clear_entries(); } assert(!_symbols->has_entries(), "invariant"); - assert(_strings != NULL, "invariant"); + assert(_strings != nullptr, "invariant"); if (_strings->has_entries()) { _strings->clear_entries(); } assert(!_strings->has_entries(), "invariant"); - _symbol_list = NULL; + _symbol_list = nullptr; _id_counter = 1; - _symbol_query = NULL; - _string_query = NULL; + _symbol_query = nullptr; + _string_query = nullptr; - assert(bootstrap != NULL, "invariant"); + assert(bootstrap != nullptr, "invariant"); bootstrap->reset(); _string_list = bootstrap; } @@ -131,7 +131,7 @@ void JfrSymbolTable::increment_checkpoint_id() { template inline void JfrSymbolTable::assign_id(T* entry) { - assert(entry != NULL, "invariant"); + assert(entry != nullptr, "invariant"); assert(entry->id() == 0, "invariant"); entry->set_id(create_symbol_id(++_id_counter)); } @@ -144,22 +144,22 @@ void JfrSymbolTable::on_link(const SymbolEntry* entry) { } bool JfrSymbolTable::on_equals(uintptr_t hash, const SymbolEntry* entry) { - assert(entry != NULL, "invariant"); + assert(entry != nullptr, "invariant"); assert(entry->hash() == hash, "invariant"); - assert(_symbol_query != NULL, "invariant"); + assert(_symbol_query != nullptr, "invariant"); return _symbol_query == entry->literal(); } void JfrSymbolTable::on_unlink(const SymbolEntry* entry) { - assert(entry != NULL, "invariant"); + assert(entry != nullptr, "invariant"); const_cast(entry->literal())->decrement_refcount(); } static const char* resource_to_c_heap_string(const char* resource_str) { - assert(resource_str != NULL, "invariant"); + assert(resource_str != nullptr, "invariant"); const size_t length = strlen(resource_str); char* const c_string = JfrCHeapObj::new_array(length + 1); - assert(c_string != NULL, "invariant"); + assert(c_string != nullptr, "invariant"); strncpy(c_string, resource_str, length + 1); return c_string; } @@ -172,26 +172,26 @@ void JfrSymbolTable::on_link(const StringEntry* entry) { } static bool string_compare(const char* query, const char* candidate) { - assert(query != NULL, "invariant"); - assert(candidate != NULL, "invariant"); + assert(query != nullptr, "invariant"); + assert(candidate != nullptr, "invariant"); const size_t length = strlen(query); return strncmp(query, candidate, length) == 0; } bool JfrSymbolTable::on_equals(uintptr_t hash, const StringEntry* entry) { - assert(entry != NULL, "invariant"); + assert(entry != nullptr, "invariant"); assert(entry->hash() == hash, "invariant"); - assert(_string_query != NULL, "invariant"); + assert(_string_query != nullptr, "invariant"); return string_compare(_string_query, entry->literal()); } void JfrSymbolTable::on_unlink(const StringEntry* entry) { - assert(entry != NULL, "invariant"); + assert(entry != nullptr, "invariant"); JfrCHeapObj::free(const_cast(entry->literal()), strlen(entry->literal() + 1)); } traceid JfrSymbolTable::bootstrap_name(bool leakp) { - assert(bootstrap != NULL, "invariant"); + assert(bootstrap != nullptr, "invariant"); if (leakp) { bootstrap->set_leakp(); } @@ -199,13 +199,13 @@ traceid JfrSymbolTable::bootstrap_name(bool leakp) { } traceid JfrSymbolTable::mark(const Symbol* sym, bool leakp /* false */) { - assert(sym != NULL, "invariant"); + assert(sym != nullptr, "invariant"); return mark((uintptr_t)sym->identity_hash(), sym, leakp); } traceid JfrSymbolTable::mark(uintptr_t hash, const Symbol* sym, bool leakp) { - assert(sym != NULL, "invariant"); - assert(_symbols != NULL, "invariant"); + assert(sym != nullptr, "invariant"); + assert(_symbols != nullptr, "invariant"); _symbol_query = sym; const SymbolEntry& entry = _symbols->lookup_put(hash, sym); if (_class_unload) { @@ -222,8 +222,8 @@ traceid JfrSymbolTable::mark(const char* str, bool leakp /* false*/) { } traceid JfrSymbolTable::mark(uintptr_t hash, const char* str, bool leakp) { - assert(str != NULL, "invariant"); - assert(_strings != NULL, "invariant"); + assert(str != nullptr, "invariant"); + assert(_strings != nullptr, "invariant"); _string_query = str; const StringEntry& entry = _strings->lookup_put(hash, str); if (_class_unload) { @@ -244,20 +244,20 @@ traceid JfrSymbolTable::mark(uintptr_t hash, const char* str, bool leakp) { */ uintptr_t JfrSymbolTable::hidden_klass_name_hash(const InstanceKlass* ik) { - assert(ik != NULL, "invariant"); + assert(ik != nullptr, "invariant"); assert(ik->is_hidden(), "invariant"); const oop mirror = ik->java_mirror_no_keepalive(); - assert(mirror != NULL, "invariant"); + assert(mirror != nullptr, "invariant"); return (uintptr_t)mirror->identity_hash(); } static const char* create_hidden_klass_symbol(const InstanceKlass* ik, uintptr_t hash) { - assert(ik != NULL, "invariant"); + assert(ik != nullptr, "invariant"); assert(ik->is_hidden(), "invariant"); assert(hash != 0, "invariant"); - char* hidden_symbol = NULL; + char* hidden_symbol = nullptr; const oop mirror = ik->java_mirror_no_keepalive(); - assert(mirror != NULL, "invariant"); + assert(mirror != nullptr, "invariant"); char hash_buf[40]; os::snprintf_checked(hash_buf, sizeof(hash_buf), "/" UINTX_FORMAT, hash); const size_t hash_len = strlen(hash_buf); @@ -271,12 +271,12 @@ static const char* create_hidden_klass_symbol(const InstanceKlass* ik, uintptr_t } bool JfrSymbolTable::is_hidden_klass(const Klass* k) { - assert(k != NULL, "invariant"); + assert(k != nullptr, "invariant"); return k->is_instance_klass() && ((const InstanceKlass*)k)->is_hidden(); } traceid JfrSymbolTable::mark_hidden_klass_name(const InstanceKlass* ik, bool leakp) { - assert(ik != NULL, "invariant"); + assert(ik != nullptr, "invariant"); assert(ik->is_hidden(), "invariant"); const uintptr_t hash = hidden_klass_name_hash(ik); const char* const hidden_symbol = create_hidden_klass_symbol(ik, hash); @@ -284,14 +284,14 @@ traceid JfrSymbolTable::mark_hidden_klass_name(const InstanceKlass* ik, bool lea } traceid JfrSymbolTable::mark(const Klass* k, bool leakp) { - assert(k != NULL, "invariant"); + assert(k != nullptr, "invariant"); traceid symbol_id = 0; if (is_hidden_klass(k)) { assert(k->is_instance_klass(), "invariant"); symbol_id = mark_hidden_klass_name((const InstanceKlass*)k, leakp); } else { Symbol* const sym = k->name(); - if (sym != NULL) { + if (sym != nullptr) { symbol_id = mark(sym, leakp); } } @@ -301,8 +301,8 @@ traceid JfrSymbolTable::mark(const Klass* k, bool leakp) { template traceid JfrSymbolTable::add_impl(const T* sym) { - assert(sym != NULL, "invariant"); - assert(_instance != NULL, "invariant"); + assert(sym != nullptr, "invariant"); + assert(_instance != nullptr, "invariant"); assert_locked_or_safepoint(ClassLoaderDataGraph_lock); return instance().mark(sym); } diff --git a/src/hotspot/share/jfr/support/jfrSymbolTable.hpp b/src/hotspot/share/jfr/support/jfrSymbolTable.hpp index 5a6d9beaa1776..a1951e52cc376 100644 --- a/src/hotspot/share/jfr/support/jfrSymbolTable.hpp +++ b/src/hotspot/share/jfr/support/jfrSymbolTable.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -32,10 +32,10 @@ template class ListEntry : public JfrHashtableEntry { public: ListEntry(uintptr_t hash, const T& data) : JfrHashtableEntry(hash, data), - _list_next(NULL), _serialized(false), _unloading(false), _leakp(false) {} + _list_next(nullptr), _serialized(false), _unloading(false), _leakp(false) {} const ListEntry* list_next() const { return _list_next; } void reset() const { - _list_next = NULL; _serialized = false; _unloading = false; _leakp = false; + _list_next = nullptr; _serialized = false; _unloading = false; _leakp = false; } void set_list_next(const ListEntry* next) const { _list_next = next; } bool is_serialized() const { return _serialized; } @@ -103,8 +103,8 @@ class JfrSymbolTable : public JfrCHeapObj { traceid bootstrap_name(bool leakp); bool has_entries() const { return has_symbol_entries() || has_string_entries(); } - bool has_symbol_entries() const { return _symbol_list != NULL; } - bool has_string_entries() const { return _string_list != NULL; } + bool has_symbol_entries() const { return _symbol_list != nullptr; } + bool has_string_entries() const { return _string_list != nullptr; } traceid mark_hidden_klass_name(const InstanceKlass* k, bool leakp); bool is_hidden_klass(const Klass* k); @@ -137,7 +137,7 @@ class JfrSymbolTable : public JfrCHeapObj { template void iterate(Functor& functor, const T* list) { const T* symbol = list; - while (symbol != NULL) { + while (symbol != nullptr) { const T* next = symbol->list_next(); functor(symbol); symbol = next; diff --git a/src/hotspot/share/jfr/support/jfrThreadLocal.cpp b/src/hotspot/share/jfr/support/jfrThreadLocal.cpp index ff4d255fc9806..9892554a7a9cc 100644 --- a/src/hotspot/share/jfr/support/jfrThreadLocal.cpp +++ b/src/hotspot/share/jfr/support/jfrThreadLocal.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -47,15 +47,15 @@ #include "utilities/sizes.hpp" JfrThreadLocal::JfrThreadLocal() : - _java_event_writer(NULL), - _java_buffer(NULL), - _native_buffer(NULL), - _shelved_buffer(NULL), - _load_barrier_buffer_epoch_0(NULL), - _load_barrier_buffer_epoch_1(NULL), - _checkpoint_buffer_epoch_0(NULL), - _checkpoint_buffer_epoch_1(NULL), - _stackframes(NULL), + _java_event_writer(nullptr), + _java_buffer(nullptr), + _native_buffer(nullptr), + _shelved_buffer(nullptr), + _load_barrier_buffer_epoch_0(nullptr), + _load_barrier_buffer_epoch_1(nullptr), + _checkpoint_buffer_epoch_0(nullptr), + _checkpoint_buffer_epoch_1(nullptr), + _stackframes(nullptr), _dcmd_arena(nullptr), _thread(), _vthread_id(0), @@ -77,7 +77,7 @@ JfrThreadLocal::JfrThreadLocal() : _vthread(false), _dead(false) { Thread* thread = Thread::current_or_null(); - _parent_trace_id = thread != NULL ? jvm_thread_id(thread) : (traceid)0; + _parent_trace_id = thread != nullptr ? jvm_thread_id(thread) : (traceid)0; } u8 JfrThreadLocal::add_data_lost(u8 value) { @@ -99,7 +99,7 @@ const JfrBlobHandle& JfrThreadLocal::thread_blob() const { } static void send_java_thread_start_event(JavaThread* jt) { - assert(jt != NULL, "invariant"); + assert(jt != nullptr, "invariant"); assert(Thread::current() == jt, "invariant"); if (!JfrJavaSupport::on_thread_start(jt)) { // thread is excluded @@ -146,35 +146,35 @@ void JfrThreadLocal::release(Thread* t) { if (has_java_event_writer()) { assert(t->is_Java_thread(), "invariant"); JfrJavaSupport::destroy_global_jni_handle(java_event_writer()); - _java_event_writer = NULL; + _java_event_writer = nullptr; } if (has_native_buffer()) { JfrStorage::release_thread_local(native_buffer(), t); - _native_buffer = NULL; + _native_buffer = nullptr; } if (has_java_buffer()) { JfrStorage::release_thread_local(java_buffer(), t); - _java_buffer = NULL; + _java_buffer = nullptr; } - if (_stackframes != NULL) { + if (_stackframes != nullptr) { FREE_C_HEAP_ARRAY(JfrStackFrame, _stackframes); - _stackframes = NULL; + _stackframes = nullptr; } - if (_load_barrier_buffer_epoch_0 != NULL) { + if (_load_barrier_buffer_epoch_0 != nullptr) { _load_barrier_buffer_epoch_0->set_retired(); - _load_barrier_buffer_epoch_0 = NULL; + _load_barrier_buffer_epoch_0 = nullptr; } - if (_load_barrier_buffer_epoch_1 != NULL) { + if (_load_barrier_buffer_epoch_1 != nullptr) { _load_barrier_buffer_epoch_1->set_retired(); - _load_barrier_buffer_epoch_1 = NULL; + _load_barrier_buffer_epoch_1 = nullptr; } - if (_checkpoint_buffer_epoch_0 != NULL) { + if (_checkpoint_buffer_epoch_0 != nullptr) { _checkpoint_buffer_epoch_0->set_retired(); - _checkpoint_buffer_epoch_0 = NULL; + _checkpoint_buffer_epoch_0 = nullptr; } - if (_checkpoint_buffer_epoch_1 != NULL) { + if (_checkpoint_buffer_epoch_1 != nullptr) { _checkpoint_buffer_epoch_1->set_retired(); - _checkpoint_buffer_epoch_1 = NULL; + _checkpoint_buffer_epoch_1 = nullptr; } if (_dcmd_arena != nullptr) { delete _dcmd_arena; @@ -183,17 +183,17 @@ void JfrThreadLocal::release(Thread* t) { } void JfrThreadLocal::release(JfrThreadLocal* tl, Thread* t) { - assert(tl != NULL, "invariant"); - assert(t != NULL, "invariant"); + assert(tl != nullptr, "invariant"); + assert(t != nullptr, "invariant"); assert(Thread::current() == t, "invariant"); assert(!tl->is_dead(), "invariant"); - assert(tl->shelved_buffer() == NULL, "invariant"); + assert(tl->shelved_buffer() == nullptr, "invariant"); tl->_dead = true; tl->release(t); } static void send_java_thread_end_event(JavaThread* jt, traceid tid) { - assert(jt != NULL, "invariant"); + assert(jt != nullptr, "invariant"); assert(Thread::current() == jt, "invariant"); assert(tid != 0, "invariant"); if (JfrRecorder::is_recording()) { @@ -205,7 +205,7 @@ static void send_java_thread_end_event(JavaThread* jt, traceid tid) { } void JfrThreadLocal::on_exit(Thread* t) { - assert(t != NULL, "invariant"); + assert(t != nullptr, "invariant"); JfrThreadLocal * const tl = t->jfr_thread_local(); assert(!tl->is_dead(), "invariant"); if (JfrRecorder::is_recording()) { @@ -237,7 +237,7 @@ JfrBuffer* JfrThreadLocal::install_java_buffer() const { } JfrStackFrame* JfrThreadLocal::install_stackframes() const { - assert(_stackframes == NULL, "invariant"); + assert(_stackframes == nullptr, "invariant"); _stackframes = NEW_C_HEAP_ARRAY(JfrStackFrame, stackdepth(), mtTracing); return _stackframes; } @@ -327,14 +327,14 @@ bool JfrThreadLocal::is_impersonating(const Thread* t) { } void JfrThreadLocal::impersonate(const Thread* t, traceid other_thread_id) { - assert(t != NULL, "invariant"); + assert(t != nullptr, "invariant"); assert(other_thread_id != 0, "invariant"); JfrThreadLocal* const tl = t->jfr_thread_local(); tl->_thread_id_alias = other_thread_id; } void JfrThreadLocal::stop_impersonating(const Thread* t) { - assert(t != NULL, "invariant"); + assert(t != nullptr, "invariant"); JfrThreadLocal* const tl = t->jfr_thread_local(); if (is_impersonating(t)) { tl->_thread_id_alias = max_julong; @@ -380,7 +380,7 @@ u2 JfrThreadLocal::vthread_epoch(const JavaThread* jt) { } traceid JfrThreadLocal::thread_id(const Thread* t) { - assert(t != NULL, "invariant"); + assert(t != nullptr, "invariant"); if (is_impersonating(t)) { return t->jfr_thread_local()->_thread_id_alias; } @@ -404,7 +404,7 @@ traceid JfrThreadLocal::thread_id(const Thread* t) { // When not recording, there is no checkpoint system // in place for writing vthread information. traceid JfrThreadLocal::external_thread_id(const Thread* t) { - assert(t != NULL, "invariant"); + assert(t != nullptr, "invariant"); return JfrRecorder::is_recording() ? thread_id(t) : jvm_thread_id(t); } diff --git a/src/hotspot/share/jfr/support/jfrThreadLocal.hpp b/src/hotspot/share/jfr/support/jfrThreadLocal.hpp index 4b44ba5a08ec1..f8bb764ef85fd 100644 --- a/src/hotspot/share/jfr/support/jfrThreadLocal.hpp +++ b/src/hotspot/share/jfr/support/jfrThreadLocal.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -92,11 +92,11 @@ class JfrThreadLocal { JfrThreadLocal(); JfrBuffer* native_buffer() const { - return _native_buffer != NULL ? _native_buffer : install_native_buffer(); + return _native_buffer != nullptr ? _native_buffer : install_native_buffer(); } bool has_native_buffer() const { - return _native_buffer != NULL; + return _native_buffer != nullptr; } void set_native_buffer(JfrBuffer* buffer) { @@ -104,11 +104,11 @@ class JfrThreadLocal { } JfrBuffer* java_buffer() const { - return _java_buffer != NULL ? _java_buffer : install_java_buffer(); + return _java_buffer != nullptr ? _java_buffer : install_java_buffer(); } bool has_java_buffer() const { - return _java_buffer != NULL; + return _java_buffer != nullptr; } void set_java_buffer(JfrBuffer* buffer) { @@ -124,7 +124,7 @@ class JfrThreadLocal { } bool has_java_event_writer() const { - return _java_event_writer != NULL; + return _java_event_writer != nullptr; } jobject java_event_writer() { @@ -136,7 +136,7 @@ class JfrThreadLocal { } JfrStackFrame* stackframes() const { - return _stackframes != NULL ? _stackframes : install_stackframes(); + return _stackframes != nullptr ? _stackframes : install_stackframes(); } void set_stackframes(JfrStackFrame* frames) { diff --git a/src/hotspot/share/jfr/utilities/jfrAllocation.cpp b/src/hotspot/share/jfr/utilities/jfrAllocation.cpp index 51361cf50f5a5..a9d164c40b030 100644 --- a/src/hotspot/share/jfr/utilities/jfrAllocation.cpp +++ b/src/hotspot/share/jfr/utilities/jfrAllocation.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -77,7 +77,7 @@ static void hook_memory_deallocation(size_t dealloc_size) { #endif // ASSERT static void hook_memory_allocation(const char* allocation, size_t alloc_size) { - if (NULL == allocation) { + if (nullptr == allocation) { if (!JfrRecorder::is_created()) { log_warning(jfr, system)("Memory allocation failed for size [" SIZE_FORMAT "] bytes", alloc_size); return; diff --git a/src/hotspot/share/jfr/utilities/jfrBigEndian.hpp b/src/hotspot/share/jfr/utilities/jfrBigEndian.hpp index 34db48f90ccd4..b3a3600deb64e 100644 --- a/src/hotspot/share/jfr/utilities/jfrBigEndian.hpp +++ b/src/hotspot/share/jfr/utilities/jfrBigEndian.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -84,7 +84,7 @@ inline u8 JfrBigEndian::read_bytes(const address location) { template inline T JfrBigEndian::read_unaligned(const address location) { - assert(location != NULL, "just checking"); + assert(location != nullptr, "just checking"); switch (sizeof(T)) { case sizeof(u1) : return read_bytes(location); @@ -113,7 +113,7 @@ inline bool JfrBigEndian::platform_supports_unaligned_reads(void) { template inline T JfrBigEndian::read(const void* location) { - assert(location != NULL, "just checking"); + assert(location != nullptr, "just checking"); assert(sizeof(T) <= sizeof(u8), "no support for arbitrary sizes"); if (sizeof(T) == sizeof(u1)) { return *(T*)location; diff --git a/src/hotspot/share/jfr/utilities/jfrBlob.cpp b/src/hotspot/share/jfr/utilities/jfrBlob.cpp index 0e58716bdb628..f326ff69d4795 100644 --- a/src/hotspot/share/jfr/utilities/jfrBlob.cpp +++ b/src/hotspot/share/jfr/utilities/jfrBlob.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,7 +30,7 @@ JfrBlob::JfrBlob(const u1* checkpoint, size_t size) : _next(), _size(size), _written(false) { - assert(_data != NULL, "invariant"); + assert(_data != nullptr, "invariant"); memcpy(const_cast(_data), checkpoint, size); } @@ -62,6 +62,6 @@ void JfrBlob::set_next(const JfrBlobHandle& ref) { JfrBlobHandle JfrBlob::make(const u1* data, size_t size) { const JfrBlob* const blob = new JfrBlob(data, size); - assert(blob != NULL, "invariant"); + assert(blob != nullptr, "invariant"); return JfrBlobReference::make(blob); } diff --git a/src/hotspot/share/jfr/utilities/jfrConcurrentLinkedListHost.hpp b/src/hotspot/share/jfr/utilities/jfrConcurrentLinkedListHost.hpp index 66fe220354cc7..32562b64a0072 100644 --- a/src/hotspot/share/jfr/utilities/jfrConcurrentLinkedListHost.hpp +++ b/src/hotspot/share/jfr/utilities/jfrConcurrentLinkedListHost.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -89,7 +89,7 @@ class JfrConcurrentLinkedListHost : public AllocPolicy { bool initialize(); void insert_head(NodePtr node, NodePtr head, ConstNodePtr tail) const; void insert_tail(NodePtr node, NodePtr head, NodePtr last, ConstNodePtr tail) const; - NodePtr remove(NodePtr head, ConstNodePtr tail, NodePtr last = NULL, bool insert_is_head = true); + NodePtr remove(NodePtr head, ConstNodePtr tail, NodePtr last = nullptr, bool insert_is_head = true); template void iterate(NodePtr head, ConstNodePtr tail, Callback& cb); bool in_list(ConstNodePtr node, NodePtr head, ConstNodePtr tail) const; diff --git a/src/hotspot/share/jfr/utilities/jfrConcurrentLinkedListHost.inline.hpp b/src/hotspot/share/jfr/utilities/jfrConcurrentLinkedListHost.inline.hpp index 94780d957048d..142c63c053382 100644 --- a/src/hotspot/share/jfr/utilities/jfrConcurrentLinkedListHost.inline.hpp +++ b/src/hotspot/share/jfr/utilities/jfrConcurrentLinkedListHost.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -38,11 +38,11 @@ */ template inline Node* mark_for_removal(Node* node) { - assert(node != NULL, "invariant"); + assert(node != nullptr, "invariant"); const Node* next = node->_next; - assert(next != NULL, "invariant"); + assert(next != nullptr, "invariant"); Node* const unmasked_next = unmask(next); - return next == unmasked_next && cas(&node->_next, unmasked_next, set_excision_bit(unmasked_next)) ? unmasked_next : NULL; + return next == unmasked_next && cas(&node->_next, unmasked_next, set_excision_bit(unmasked_next)) ? unmasked_next : nullptr; } /* @@ -51,7 +51,7 @@ inline Node* mark_for_removal(Node* node) { */ template inline bool mark_for_insertion(Node* node, const Node* tail) { - assert(node != NULL, "invariant"); + assert(node != nullptr, "invariant"); return node->_next == tail && cas(&node->_next, const_cast(tail), set_insertion_bit(tail)); } @@ -60,16 +60,16 @@ inline bool mark_for_insertion(Node* node, const Node* tail) { */ template class SearchPolicy> Node* find_adjacent(Node* head, const Node* tail, Node** predecessor, VersionHandle& version_handle, SearchPolicy& predicate) { - assert(head != NULL, "invariant"); - assert(tail != NULL, "invariant"); + assert(head != nullptr, "invariant"); + assert(tail != nullptr, "invariant"); assert(head != tail, "invariant"); - Node* predecessor_next = NULL; + Node* predecessor_next = nullptr; while (true) { Node* current = head; version_handle->checkout(); Node* next = Atomic::load_acquire(¤t->_next); do { - assert(next != NULL, "invariant"); + assert(next != nullptr, "invariant"); Node* const unmasked_next = unmask(next); // 1A: Locate the first node to keep as predecessor. if (!is_marked_for_removal(next)) { @@ -133,10 +133,10 @@ void JfrConcurrentLinkedListHost::insert_tail typename Client::Node* head, typename Client::Node* last, const typename Client::Node* tail) const { - assert(node != NULL, "invariant"); - assert(head != NULL, "invariant"); - assert(last != NULL, "invarinat"); - assert(tail != NULL, "invariant"); + assert(node != nullptr, "invariant"); + assert(head != nullptr, "invariant"); + assert(last != nullptr, "invarinat"); + assert(tail != nullptr, "invariant"); // Mark the new node to be inserted with the insertion marker already. node->_next = set_insertion_bit(const_cast(tail)); // Invariant: [node]--> tail @@ -188,10 +188,10 @@ void JfrConcurrentLinkedListHost::insert_tail template class SearchPolicy, typename AllocPolicy> typename Client::Node* JfrConcurrentLinkedListHost::remove(typename Client::Node* head, const typename Client::Node* tail, - typename Client::Node* last /* NULL */, + typename Client::Node* last /* nullptr */, bool insert_is_head /* true */) { - assert(head != NULL, "invariant"); - assert(tail != NULL, "invariant"); + assert(head != nullptr, "invariant"); + assert(tail != nullptr, "invariant"); assert(head != tail, "invariant"); NodePtr predecessor; NodePtr successor; @@ -202,14 +202,14 @@ typename Client::Node* JfrConcurrentLinkedListHost(head, tail, &predecessor, version_handle, predicate); if (successor == tail) { - return NULL; + return nullptr; } // Invariant: predecessor --> successor // Invariant (optional: key-based total order): predecessor->key() < key && key <= successor->key() // It is the successor node that is to be removed. // We first attempt to reserve (logically excise) the successor node. successor_next = mark_for_removal(successor); - if (successor_next != NULL) { + if (successor_next != nullptr) { break; } } @@ -225,7 +225,7 @@ typename Client::Node* JfrConcurrentLinkedListHost excise(successor); find_adjacent(head, tail, &predecessor, version_handle, excise); } - if (last != NULL && Atomic::load_acquire(&last->_next) == successor) { + if (last != nullptr && Atomic::load_acquire(&last->_next) == successor) { guarantee(!insert_is_head, "invariant"); guarantee(successor_next == tail, "invariant"); LastNode excise; @@ -243,8 +243,8 @@ template class SearchPolicy, typename Allo bool JfrConcurrentLinkedListHost::in_list(const typename Client::Node* node, typename Client::Node* head, const typename Client::Node* tail) const { - assert(head != NULL, "invariant"); - assert(tail != NULL, "invariant"); + assert(head != nullptr, "invariant"); + assert(tail != nullptr, "invariant"); assert(head != tail, "invariant"); VersionHandle version_handle = _client->get_version_handle(); const Node* current = head; @@ -268,8 +268,8 @@ template inline void JfrConcurrentLinkedListHost::iterate(typename Client::Node* head, const typename Client::Node* tail, Callback& cb) { - assert(head != NULL, "invariant"); - assert(tail != NULL, "invariant"); + assert(head != nullptr, "invariant"); + assert(tail != nullptr, "invariant"); assert(head != tail, "invariant"); VersionHandle version_handle = _client->get_version_handle(); NodePtr current = head; diff --git a/src/hotspot/share/jfr/utilities/jfrConcurrentQueue.inline.hpp b/src/hotspot/share/jfr/utilities/jfrConcurrentQueue.inline.hpp index cd6747d6d2234..11c10ee9002cf 100644 --- a/src/hotspot/share/jfr/utilities/jfrConcurrentQueue.inline.hpp +++ b/src/hotspot/share/jfr/utilities/jfrConcurrentQueue.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -31,16 +31,16 @@ #include "jfr/utilities/jfrVersionSystem.inline.hpp" template -JfrConcurrentQueue::JfrConcurrentQueue() : _list(NULL), _head(), _last(), _tail(), _version_system() { +JfrConcurrentQueue::JfrConcurrentQueue() : _list(nullptr), _head(), _last(), _tail(), _version_system() { _head._next = const_cast(&_tail); _last._next = const_cast(&_tail); } template bool JfrConcurrentQueue::initialize() { - assert(_list == NULL, "invariant"); + assert(_list == nullptr, "invariant"); _list = new JfrConcurrentLinkedListHost, HeadNode, AllocPolicy>(this); - return _list != NULL && _list->initialize(); + return _list != nullptr && _list->initialize(); } template @@ -76,7 +76,7 @@ inline JfrVersionSystem::Handle JfrConcurrentQueue::get_v template bool JfrConcurrentQueue::in_list(const NodeType* node) const { - assert(node != NULL, "invariant"); + assert(node != nullptr, "invariant"); return _list->in_list(node, const_cast(&_head), &_tail); } diff --git a/src/hotspot/share/jfr/utilities/jfrDoublyLinkedList.hpp b/src/hotspot/share/jfr/utilities/jfrDoublyLinkedList.hpp index a0048b1d26e47..4e66af7f47832 100644 --- a/src/hotspot/share/jfr/utilities/jfrDoublyLinkedList.hpp +++ b/src/hotspot/share/jfr/utilities/jfrDoublyLinkedList.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -39,7 +39,7 @@ class JfrDoublyLinkedList { public: typedef T Node; - JfrDoublyLinkedList() : _head(NULL), _tail(NULL), _count(0) {} + JfrDoublyLinkedList() : _head(nullptr), _tail(nullptr), _count(0) {} T* head() const { return _head; } T* tail() const { return _tail; } size_t count() const { return _count; } @@ -54,44 +54,44 @@ class JfrDoublyLinkedList { template inline void JfrDoublyLinkedList::prepend(T* const node) { - assert(node != NULL, "invariant"); - node->set_prev(NULL); + assert(node != nullptr, "invariant"); + node->set_prev(nullptr); assert(!in_list(node), "already in list error"); T** lh = list_head(); - if (*lh != NULL) { + if (*lh != nullptr) { (*lh)->set_prev(node); node->set_next(*lh); } else { T** lt = list_tail(); - assert(*lt == NULL, "invariant"); + assert(*lt == nullptr, "invariant"); *lt = node; - node->set_next(NULL); + node->set_next(nullptr); assert(tail() == node, "invariant"); - assert(node->next() == NULL, "invariant"); + assert(node->next() == nullptr, "invariant"); } *lh = node; ++_count; assert(head() == node, "head error"); assert(in_list(node), "not in list error"); - assert(node->prev() == NULL, "invariant"); + assert(node->prev() == nullptr, "invariant"); } template void JfrDoublyLinkedList::append(T* const node) { - assert(node != NULL, "invariant"); - node->set_next(NULL); + assert(node != nullptr, "invariant"); + node->set_next(nullptr); assert(!in_list(node), "already in list error"); T** lt = list_tail(); - if (*lt != NULL) { + if (*lt != nullptr) { // already an existing tail node->set_prev(*lt); (*lt)->set_next(node); } else { // if no tail, also update head - assert(*lt == NULL, "invariant"); + assert(*lt == nullptr, "invariant"); T** lh = list_head(); - assert(*lh == NULL, "invariant"); - node->set_prev(NULL); + assert(*lh == nullptr, "invariant"); + node->set_prev(nullptr); *lh = node; assert(head() == node, "invariant"); } @@ -99,32 +99,32 @@ void JfrDoublyLinkedList::append(T* const node) { ++_count; assert(tail() == node, "invariant"); assert(in_list(node), "not in list error"); - assert(node->next() == NULL, "invariant"); + assert(node->next() == nullptr, "invariant"); } template T* JfrDoublyLinkedList::remove(T* const node) { - assert(node != NULL, "invariant"); + assert(node != nullptr, "invariant"); assert(in_list(node), "invariant"); T* const prev = (T*)node->prev(); T* const next = (T*)node->next(); - if (prev == NULL) { + if (prev == nullptr) { assert(head() == node, "head error"); - if (next != NULL) { - next->set_prev(NULL); + if (next != nullptr) { + next->set_prev(nullptr); } else { - assert(next == NULL, "invariant"); + assert(next == nullptr, "invariant"); assert(tail() == node, "tail error"); T** lt = list_tail(); - *lt = NULL; - assert(tail() == NULL, "invariant"); + *lt = nullptr; + assert(tail() == nullptr, "invariant"); } T** lh = list_head(); *lh = next; assert(head() == next, "invariant"); } else { - assert(prev != NULL, "invariant"); - if (next == NULL) { + assert(prev != nullptr, "invariant"); + if (next == nullptr) { assert(tail() == node, "tail error"); T** lt = list_tail(); *lt = prev; @@ -143,19 +143,19 @@ template T* JfrDoublyLinkedList::clear(bool return_tail /* false */) { T* const node = return_tail ? tail() : head(); T** l = list_head(); - *l = NULL; + *l = nullptr; l = list_tail(); - *l = NULL; + *l = nullptr; _count = 0; - assert(head() == NULL, "invariant"); - assert(tail() == NULL, "invariant"); + assert(head() == nullptr, "invariant"); + assert(tail() == nullptr, "invariant"); return node; } template bool JfrDoublyLinkedList::locate(const T* node, const T* const target) const { - assert(target != NULL, "invariant"); - while (node != NULL) { + assert(target != nullptr, "invariant"); + while (node != nullptr) { if (node == target) { return true; } @@ -166,13 +166,13 @@ bool JfrDoublyLinkedList::locate(const T* node, const T* const target) const template bool JfrDoublyLinkedList::in_list(const T* const target) const { - assert(target != NULL, "invariant"); + assert(target != nullptr, "invariant"); return locate(head(), target); } template inline void validate_count_param(T* node, size_t count_param) { - assert(node != NULL, "invariant"); + assert(node != nullptr, "invariant"); size_t count = 0; while (node) { ++count; @@ -183,23 +183,23 @@ inline void validate_count_param(T* node, size_t count_param) { template void JfrDoublyLinkedList::append_list(T* const head_node, T* const tail_node, size_t count) { - assert(head_node != NULL, "invariant"); + assert(head_node != nullptr, "invariant"); assert(!in_list(head_node), "already in list error"); - assert(tail_node != NULL, "invariant"); + assert(tail_node != nullptr, "invariant"); assert(!in_list(tail_node), "already in list error"); - assert(tail_node->next() == NULL, "invariant"); + assert(tail_node->next() == nullptr, "invariant"); // ensure passed in list nodes are connected assert(locate(head_node, tail_node), "invariant"); T** lt = list_tail(); - if (*lt != NULL) { + if (*lt != nullptr) { head_node->set_prev(*lt); (*lt)->set_next(head_node); } else { // no head - assert(*lt == NULL, "invariant"); + assert(*lt == nullptr, "invariant"); T** lh = list_head(); - assert(*lh == NULL, "invariant"); - head_node->set_prev(NULL); + assert(*lh == nullptr, "invariant"); + head_node->set_prev(nullptr); *lh = head_node; assert(head() == head_node, "invariant"); } diff --git a/src/hotspot/share/jfr/utilities/jfrEpochQueue.inline.hpp b/src/hotspot/share/jfr/utilities/jfrEpochQueue.inline.hpp index 562ac0e77b576..cc2274408ec2d 100644 --- a/src/hotspot/share/jfr/utilities/jfrEpochQueue.inline.hpp +++ b/src/hotspot/share/jfr/utilities/jfrEpochQueue.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -31,7 +31,7 @@ #include "runtime/javaThread.hpp" template